diff --git a/docs/dws/dev/ALL_META.TXT.json b/docs/dws/dev/ALL_META.TXT.json new file mode 100644 index 00000000..b69d9060 --- /dev/null +++ b/docs/dws/dev/ALL_META.TXT.json @@ -0,0 +1,9452 @@ +[ + { + "uri":"dws_04_1000.html", + "product_code":"dws", + "code":"1", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Developer Guide", + "title":"Developer Guide", + "githuburl":"" + }, + { + "uri":"dws_04_0001.html", + "product_code":"dws", + "code":"2", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Welcome", + "title":"Welcome", + "githuburl":"" + }, + { + "uri":"dws_04_0002.html", + "product_code":"dws", + "code":"3", + "des":"This document is intended for database designers, application developers, and database administrators, and provides information required for designing, building, querying", + "doc_type":"devg", + "kw":"Target Readers,Welcome,Developer Guide", + "title":"Target Readers", + "githuburl":"" + }, + { + "uri":"dws_04_0004.html", + "product_code":"dws", + "code":"4", + "des":"If you are a new GaussDB(DWS) user, you are advised to read the following contents first:Sections describing the features, functions, and application scenarios of GaussDB", + "doc_type":"devg", + "kw":"Reading Guide,Welcome,Developer Guide", + "title":"Reading Guide", + "githuburl":"" + }, + { + "uri":"dws_04_0005.html", + "product_code":"dws", + "code":"5", + "des":"SQL examples in this manual are developed based on the TPC-DS model. Before you execute the examples, install the TPC-DS benchmark by following the instructions on the of", + "doc_type":"devg", + "kw":"Conventions,Welcome,Developer Guide", + "title":"Conventions", + "githuburl":"" + }, + { + "uri":"dws_04_0006.html", + "product_code":"dws", + "code":"6", + "des":"Complete the following tasks before you perform operations described in this document:Create a GaussDB(DWS) cluster.Install an SQL client.Connect the SQL client to the de", + "doc_type":"devg", + "kw":"Prerequisites,Welcome,Developer Guide", + "title":"Prerequisites", + "githuburl":"" + }, + { + "uri":"dws_04_0007.html", + "product_code":"dws", + "code":"7", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"System Overview", + "title":"System Overview", + "githuburl":"" + }, + { + "uri":"dws_04_0011.html", + "product_code":"dws", + "code":"8", + "des":"GaussDB(DWS) manages cluster transactions, the basis of HA and failovers. This ensures speedy fault recovery, guarantees the Atomicity, Consistency, Isolation, Durability", + "doc_type":"devg", + "kw":"Highly Reliable Transaction Processing,System Overview,Developer Guide", + "title":"Highly Reliable Transaction Processing", + "githuburl":"" + }, + { + "uri":"dws_04_0012.html", + "product_code":"dws", + "code":"9", + "des":"The following GaussDB(DWS) features help achieve high query performance.GaussDB(DWS) is an MPP system with the shared-nothing architecture. It consists of multiple indepe", + "doc_type":"devg", + "kw":"High Query Performance,System Overview,Developer Guide", + "title":"High Query Performance", + "githuburl":"" + }, + { + "uri":"dws_04_0015.html", + "product_code":"dws", + "code":"10", + "des":"A database manages data objects and is isolated from other databases. While creating a database, you can specify a tablespace. If you do not specify it, database objects ", + "doc_type":"devg", + "kw":"Related Concepts,System Overview,Developer Guide", + "title":"Related Concepts", + "githuburl":"" + }, + { + "uri":"dws_04_0985.html", + "product_code":"dws", + "code":"11", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Data Migration", + "title":"Data Migration", + "githuburl":"" + }, + { + "uri":"dws_04_0180.html", + "product_code":"dws", + "code":"12", + "des":"GaussDB(DWS) provides flexible methods for importing data. You can import data from different sources to GaussDB(DWS). The features of each method are listed in Table 1. ", + "doc_type":"devg", + "kw":"Data Migration to GaussDB(DWS),Data Migration,Developer Guide", + "title":"Data Migration to GaussDB(DWS)", + "githuburl":"" + }, + { + "uri":"dws_04_0179.html", + "product_code":"dws", + "code":"13", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Data Import", + "title":"Data Import", + "githuburl":"" + }, + { + "uri":"dws_04_0181.html", + "product_code":"dws", + "code":"14", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Importing Data from OBS in Parallel", + "title":"Importing Data from OBS in Parallel", + "githuburl":"" + }, + { + "uri":"dws_04_0182.html", + "product_code":"dws", + "code":"15", + "des":"The object storage service (OBS) is an object-based cloud storage service, featuring data storage of high security, proven reliability, and cost-effectiveness. OBS provid", + "doc_type":"devg", + "kw":"About Parallel Data Import from OBS,Importing Data from OBS in Parallel,Developer Guide", + "title":"About Parallel Data Import from OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0154.html", + "product_code":"dws", + "code":"16", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Importing CSV/TXT Data from the OBS", + "title":"Importing CSV/TXT Data from the OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0183.html", + "product_code":"dws", + "code":"17", + "des":"In this example, OBS data is imported to GaussDB(DWS) databases. When users who have registered with the cloud platform access OBS using clients, call APIs, or SDKs, acce", + "doc_type":"devg", + "kw":"Creating Access Keys (AK and SK),Importing CSV/TXT Data from the OBS,Developer Guide", + "title":"Creating Access Keys (AK and SK)", + "githuburl":"" + }, + { + "uri":"dws_04_0184.html", + "product_code":"dws", + "code":"18", + "des":"Before importing data from OBS to a cluster, prepare source data files and upload these files to OBS. If the data files have been stored on OBS, you only need to complete", + "doc_type":"devg", + "kw":"Uploading Data to OBS,Importing CSV/TXT Data from the OBS,Developer Guide", + "title":"Uploading Data to OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0185.html", + "product_code":"dws", + "code":"19", + "des":"format: format of the source data file in the foreign table. OBS foreign tables support CSV and TEXT formats. The default value is TEXT.header: Whether the data file cont", + "doc_type":"devg", + "kw":"Creating an OBS Foreign Table,Importing CSV/TXT Data from the OBS,Developer Guide", + "title":"Creating an OBS Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0186.html", + "product_code":"dws", + "code":"20", + "des":"Before importing data, you are advised to optimize your design and deployment based on the following excellent practices, helping maximize system resource utilization and", + "doc_type":"devg", + "kw":"Importing Data,Importing CSV/TXT Data from the OBS,Developer Guide", + "title":"Importing Data", + "githuburl":"" + }, + { + "uri":"dws_04_0187.html", + "product_code":"dws", + "code":"21", + "des":"Handle errors that occurred during data import.Errors that occur when data is imported are divided into data format errors and non-data format errors.Data format errorWhe", + "doc_type":"devg", + "kw":"Handling Import Errors,Importing CSV/TXT Data from the OBS,Developer Guide", + "title":"Handling Import Errors", + "githuburl":"" + }, + { + "uri":"dws_04_0155.html", + "product_code":"dws", + "code":"22", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Importing ORC/CarbonData Data from OBS", + "title":"Importing ORC/CarbonData Data from OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0243.html", + "product_code":"dws", + "code":"23", + "des":"Before you use the SQL on OBS feature to query OBS data:You have stored the ORC data on OBS.For example, the ORC table has been created when you use the Hive or Spark com", + "doc_type":"devg", + "kw":"Preparing Data on OBS,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Preparing Data on OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0244.html", + "product_code":"dws", + "code":"24", + "des":"This section describes how to create a foreign server that is used to define the information about OBS servers and is invoked by foreign tables. For details about the syn", + "doc_type":"devg", + "kw":"Creating a Foreign Server,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Creating a Foreign Server", + "githuburl":"" + }, + { + "uri":"dws_04_0245.html", + "product_code":"dws", + "code":"25", + "des":"After performing steps in Creating a Foreign Server, create an OBS foreign table in the GaussDB(DWS) database to access the data stored in OBS. An OBS foreign table is re", + "doc_type":"devg", + "kw":"Creating a Foreign Table,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Creating a Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0246.html", + "product_code":"dws", + "code":"26", + "des":"If the data amount is small, you can directly run SELECT to query the foreign table and view the data on OBS.If the query result is the same as the data in Original Data,", + "doc_type":"devg", + "kw":"Querying Data on OBS Through Foreign Tables,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Querying Data on OBS Through Foreign Tables", + "githuburl":"" + }, + { + "uri":"dws_04_0247.html", + "product_code":"dws", + "code":"27", + "des":"After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quo", + "doc_type":"devg", + "kw":"Deleting Resources,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Deleting Resources", + "githuburl":"" + }, + { + "uri":"dws_04_0156.html", + "product_code":"dws", + "code":"28", + "des":"In the big data field, the mainstream file format is ORC, which is supported by GaussDB(DWS). You can use Hive to export data to an ORC file and use a read-only foreign t", + "doc_type":"devg", + "kw":"Supported Data Types,Importing ORC/CarbonData Data from OBS,Developer Guide", + "title":"Supported Data Types", + "githuburl":"" + }, + { + "uri":"dws_04_0189.html", + "product_code":"dws", + "code":"29", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Using GDS to Import Data from a Remote Server", + "title":"Using GDS to Import Data from a Remote Server", + "githuburl":"" + }, + { + "uri":"dws_04_0190.html", + "product_code":"dws", + "code":"30", + "des":"INSERT and COPY statements are serially executed to import a small volume of data. To import a large volume of data to GaussDB(DWS), you can use GDS to import data in par", + "doc_type":"devg", + "kw":"Importing Data In Parallel Using GDS,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Importing Data In Parallel Using GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0192.html", + "product_code":"dws", + "code":"31", + "des":"Generally, the data to be imported has been uploaded to the data server. In this case, you only need to check the communication between the data server and GaussDB(DWS), ", + "doc_type":"devg", + "kw":"Preparing Source Data,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Preparing Source Data", + "githuburl":"" + }, + { + "uri":"dws_04_0193.html", + "product_code":"dws", + "code":"32", + "des":"GaussDB(DWS) uses GDS to allocate the source data for parallel data import. Deploy GDS on the data server.If a large volume of data is stored on multiple data servers, in", + "doc_type":"devg", + "kw":"Installing, Configuring, and Starting GDS,Using GDS to Import Data from a Remote Server,Developer Gu", + "title":"Installing, Configuring, and Starting GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0194.html", + "product_code":"dws", + "code":"33", + "des":"The source data information and GDS access information are configured in a foreign table. Then, GaussDB(DWS) can import data from a data server to a database table based ", + "doc_type":"devg", + "kw":"Creating a GDS Foreign Table,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Creating a GDS Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0195.html", + "product_code":"dws", + "code":"34", + "des":"This section describes how to create tables in GaussDB(DWS) and import data to the tables.Before importing all the data from a table containing over 10 million records, y", + "doc_type":"devg", + "kw":"Importing Data,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Importing Data", + "githuburl":"" + }, + { + "uri":"dws_04_0196.html", + "product_code":"dws", + "code":"35", + "des":"Handle errors that occurred during data import.Errors that occur when data is imported are divided into data format errors and non-data format errors.Data format errorWhe", + "doc_type":"devg", + "kw":"Handling Import Errors,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Handling Import Errors", + "githuburl":"" + }, + { + "uri":"dws_04_0197.html", + "product_code":"dws", + "code":"36", + "des":"Stop GDS after data is imported successfully.If GDS is started using the gds command, perform the following operations to stop GDS:Query the GDS process ID:ps -ef|grep gd", + "doc_type":"devg", + "kw":"Stopping GDS,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Stopping GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0198.html", + "product_code":"dws", + "code":"37", + "des":"The data servers and the cluster reside on the same intranet. The IP addresses are 192.168.0.90 and 192.168.0.91. Source data files are in CSV format.Create the target ta", + "doc_type":"devg", + "kw":"Example of Importing Data Using GDS,Using GDS to Import Data from a Remote Server,Developer Guide", + "title":"Example of Importing Data Using GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0210.html", + "product_code":"dws", + "code":"38", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Importing Data from MRS to a Cluster", + "title":"Importing Data from MRS to a Cluster", + "githuburl":"" + }, + { + "uri":"dws_04_0066.html", + "product_code":"dws", + "code":"39", + "des":"MRS is a big data cluster running based on the open-source Hadoop ecosystem. It provides the industry's latest cutting-edge storage and analytical capabilities of massive", + "doc_type":"devg", + "kw":"Overview,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_04_0212.html", + "product_code":"dws", + "code":"40", + "des":"Before importing data from MRS to a GaussDB(DWS) cluster, you must have:Created an MRS cluster.Created the Hive/Spark ORC table in the MRS cluster and stored the table da", + "doc_type":"devg", + "kw":"Preparing Data in an MRS Cluster,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Preparing Data in an MRS Cluster", + "githuburl":"" + }, + { + "uri":"dws_04_0213.html", + "product_code":"dws", + "code":"41", + "des":"In the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS) for creating a foreign table, you need to specify a foreign server associated with the MRS data source connectio", + "doc_type":"devg", + "kw":"Manually Creating a Foreign Server,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Manually Creating a Foreign Server", + "githuburl":"" + }, + { + "uri":"dws_04_0214.html", + "product_code":"dws", + "code":"42", + "des":"This section describes how to create a Hadoop foreign table in the GaussDB(DWS) database to access the Hadoop structured data stored on MRS HDFS. A Hadoop foreign table i", + "doc_type":"devg", + "kw":"Creating a Foreign Table,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Creating a Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0215.html", + "product_code":"dws", + "code":"43", + "des":"If the data amount is small, you can directly run SELECT to query the foreign table and view the data in the MRS data source.If the query result is the same as the data i", + "doc_type":"devg", + "kw":"Importing Data,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Importing Data", + "githuburl":"" + }, + { + "uri":"dws_04_0216.html", + "product_code":"dws", + "code":"44", + "des":"After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quo", + "doc_type":"devg", + "kw":"Deleting Resources,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Deleting Resources", + "githuburl":"" + }, + { + "uri":"dws_04_0217.html", + "product_code":"dws", + "code":"45", + "des":"The following error information indicates that GaussDB(DWS) is to read an ORC data file but the actual file is in text format. Therefore, create a table of the Hive ORC t", + "doc_type":"devg", + "kw":"Error Handling,Importing Data from MRS to a Cluster,Developer Guide", + "title":"Error Handling", + "githuburl":"" + }, + { + "uri":"dws_04_0949.html", + "product_code":"dws", + "code":"46", + "des":"You can create foreign tables to perform associated queries and import data between clusters.Import data from one GaussDB(DWS) cluster to another.Perform associated queri", + "doc_type":"devg", + "kw":"Importing Data from One GaussDB(DWS) Cluster to Another,Data Import,Developer Guide", + "title":"Importing Data from One GaussDB(DWS) Cluster to Another", + "githuburl":"" + }, + { + "uri":"dws_04_0208.html", + "product_code":"dws", + "code":"47", + "des":"The gsql tool of GaussDB(DWS) provides the \\copy meta-command to import data.For details about the \\copy command, see Table 1.tableSpecifies the name (possibly schema-qua", + "doc_type":"devg", + "kw":"Using the gsql Meta-Command \\COPY to Import Data,Data Import,Developer Guide", + "title":"Using the gsql Meta-Command \\COPY to Import Data", + "githuburl":"" + }, + { + "uri":"dws_04_0203.html", + "product_code":"dws", + "code":"48", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Running the COPY FROM STDIN Statement to Import Data", + "title":"Running the COPY FROM STDIN Statement to Import Data", + "githuburl":"" + }, + { + "uri":"dws_04_0204.html", + "product_code":"dws", + "code":"49", + "des":"This method is applicable to low-concurrency scenarios where a small volume of data is to be imported.Use either of the following methods to write data to GaussDB(DWS) us", + "doc_type":"devg", + "kw":"Data Import Using COPY FROM STDIN,Running the COPY FROM STDIN Statement to Import Data,Developer Gui", + "title":"Data Import Using COPY FROM STDIN", + "githuburl":"" + }, + { + "uri":"dws_04_0205.html", + "product_code":"dws", + "code":"50", + "des":"CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.The CopyManager class is in the or", + "doc_type":"devg", + "kw":"Introduction to the CopyManager Class,Running the COPY FROM STDIN Statement to Import Data,Developer", + "title":"Introduction to the CopyManager Class", + "githuburl":"" + }, + { + "uri":"dws_04_0206.html", + "product_code":"dws", + "code":"51", + "des":"When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or ", + "doc_type":"devg", + "kw":"Example: Importing and Exporting Data Through Local Files,Running the COPY FROM STDIN Statement to I", + "title":"Example: Importing and Exporting Data Through Local Files", + "githuburl":"" + }, + { + "uri":"dws_04_0207.html", + "product_code":"dws", + "code":"52", + "des":"The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).", + "doc_type":"devg", + "kw":"Example: Migrating Data from MySQL to GaussDB(DWS),Running the COPY FROM STDIN Statement to Import D", + "title":"Example: Migrating Data from MySQL to GaussDB(DWS)", + "githuburl":"" + }, + { + "uri":"dws_04_0986.html", + "product_code":"dws", + "code":"53", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Full Database Migration", + "title":"Full Database Migration", + "githuburl":"" + }, + { + "uri":"dws_04_0219.html", + "product_code":"dws", + "code":"54", + "des":"You can use CDM to migrate data from other data sources (for example, MySQL) to the databases in clusters on GaussDB(DWS).For details about scenarios where CDM is used to", + "doc_type":"devg", + "kw":"Using CDM to Migrate Data to GaussDB(DWS),Full Database Migration,Developer Guide", + "title":"Using CDM to Migrate Data to GaussDB(DWS)", + "githuburl":"" + }, + { + "uri":"dws_01_0127.html", + "product_code":"dws", + "code":"55", + "des":"The DSC is a CLI tool running on the Linux or Windows OS. It is dedicated to providing customers with simple, fast, and reliable application SQL script migration services", + "doc_type":"devg", + "kw":"Using DSC to Migrate SQL Scripts,Full Database Migration,Developer Guide", + "title":"Using DSC to Migrate SQL Scripts", + "githuburl":"" + }, + { + "uri":"dws_04_0987.html", + "product_code":"dws", + "code":"56", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Metadata Migration", + "title":"Metadata Migration", + "githuburl":"" + }, + { + "uri":"dws_04_0269.html", + "product_code":"dws", + "code":"57", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Using gs_dump and gs_dumpall to Export Metadata", + "title":"Using gs_dump and gs_dumpall to Export Metadata", + "githuburl":"" + }, + { + "uri":"dws_04_0270.html", + "product_code":"dws", + "code":"58", + "des":"GaussDB(DWS) provides gs_dump and gs_dumpall to export required database objects and related information. To migrate database information, you can use a tool to import th", + "doc_type":"devg", + "kw":"Overview,Using gs_dump and gs_dumpall to Export Metadata,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_04_0271.html", + "product_code":"dws", + "code":"59", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting a Single Database", + "title":"Exporting a Single Database", + "githuburl":"" + }, + { + "uri":"dws_04_0272.html", + "product_code":"dws", + "code":"60", + "des":"You can use gs_dump to export data and all object definitions of a database from GaussDB(DWS). You can specify the information to be exported as follows:Export full infor", + "doc_type":"devg", + "kw":"Exporting a Database,Exporting a Single Database,Developer Guide", + "title":"Exporting a Database", + "githuburl":"" + }, + { + "uri":"dws_04_0273.html", + "product_code":"dws", + "code":"61", + "des":"You can use gs_dump to export data and all object definitions of a schema from GaussDB(DWS). You can export one or more specified schemas as needed. You can specify the i", + "doc_type":"devg", + "kw":"Exporting a Schema,Exporting a Single Database,Developer Guide", + "title":"Exporting a Schema", + "githuburl":"" + }, + { + "uri":"dws_04_0274.html", + "product_code":"dws", + "code":"62", + "des":"You can use gs_dump to export data and all object definitions of a table-level object from GaussDB(DWS). Views, sequences, and foreign tables are special tables. You can ", + "doc_type":"devg", + "kw":"Exporting a Table,Exporting a Single Database,Developer Guide", + "title":"Exporting a Table", + "githuburl":"" + }, + { + "uri":"dws_04_0275.html", + "product_code":"dws", + "code":"63", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting All Databases", + "title":"Exporting All Databases", + "githuburl":"" + }, + { + "uri":"dws_04_0276.html", + "product_code":"dws", + "code":"64", + "des":"You can use gs_dumpall to export full information of all databases in a cluster from GaussDB(DWS), including information about each database and global objects in the clu", + "doc_type":"devg", + "kw":"Exporting All Databases,Exporting All Databases,Developer Guide", + "title":"Exporting All Databases", + "githuburl":"" + }, + { + "uri":"dws_04_0277.html", + "product_code":"dws", + "code":"65", + "des":"You can use gs_dumpall to export global objects from GaussDB(DWS), including database users, user groups, tablespaces, and attributes (for example, global access permissi", + "doc_type":"devg", + "kw":"Exporting Global Objects,Exporting All Databases,Developer Guide", + "title":"Exporting Global Objects", + "githuburl":"" + }, + { + "uri":"dws_04_0278.html", + "product_code":"dws", + "code":"66", + "des":"gs_dump and gs_dumpall use -U to specify the user that performs the export. If the specified user does not have the required permission, data cannot be exported. In this ", + "doc_type":"devg", + "kw":"Data Export By a User Without Required Permissions,Using gs_dump and gs_dumpall to Export Metadata,D", + "title":"Data Export By a User Without Required Permissions", + "githuburl":"" + }, + { + "uri":"dws_04_0209.html", + "product_code":"dws", + "code":"67", + "des":"gs_restore is an import tool provided by GaussDB(DWS). You can use gs_restore to import the files exported by gs_dump to a database. gs_restore can import the files in .t", + "doc_type":"devg", + "kw":"Using gs_restore to Import Data,Metadata Migration,Developer Guide", + "title":"Using gs_restore to Import Data", + "githuburl":"" + }, + { + "uri":"dws_04_0249.html", + "product_code":"dws", + "code":"68", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Data Export", + "title":"Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0250.html", + "product_code":"dws", + "code":"69", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting Data to OBS", + "title":"Exporting Data to OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0251.html", + "product_code":"dws", + "code":"70", + "des":"GaussDB(DWS) databases allow you to export data in parallel using OBS foreign tables, in which the export mode and the exported data format are specified. Data is exporte", + "doc_type":"devg", + "kw":"Parallel OBS Data Export,Exporting Data to OBS,Developer Guide", + "title":"Parallel OBS Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0157.html", + "product_code":"dws", + "code":"71", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting CSV/TXT Data to OBS", + "title":"Exporting CSV/TXT Data to OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0252.html", + "product_code":"dws", + "code":"72", + "des":"Plan the storage location of exported data in OBS.You need to specify the OBS path (to directory) for storing data that you want to export. The exported data can be saved", + "doc_type":"devg", + "kw":"Planning Data Export,Exporting CSV/TXT Data to OBS,Developer Guide", + "title":"Planning Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0253.html", + "product_code":"dws", + "code":"73", + "des":"To obtain access keys, log in to the management console, click the username in the upper right corner, and select My Credential from the menu. Then choose Access Keys in ", + "doc_type":"devg", + "kw":"Creating an OBS Foreign Table,Exporting CSV/TXT Data to OBS,Developer Guide", + "title":"Creating an OBS Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0254.html", + "product_code":"dws", + "code":"74", + "des":"Example 1: Export data from table product_info_output to a data file through the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM p", + "doc_type":"devg", + "kw":"Exporting Data,Exporting CSV/TXT Data to OBS,Developer Guide", + "title":"Exporting Data", + "githuburl":"" + }, + { + "uri":"dws_04_0255.html", + "product_code":"dws", + "code":"75", + "des":"Create two foreign tables and use them to export tables from a database to two buckets in OBS.OBS and the database are in the same region. The example GaussDB(DWS) table ", + "doc_type":"devg", + "kw":"Examples,Exporting CSV/TXT Data to OBS,Developer Guide", + "title":"Examples", + "githuburl":"" + }, + { + "uri":"dws_04_0256.html", + "product_code":"dws", + "code":"76", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting ORC Data to OBS", + "title":"Exporting ORC Data to OBS", + "githuburl":"" + }, + { + "uri":"dws_04_0258.html", + "product_code":"dws", + "code":"77", + "des":"For details about exporting data to OBS, see Planning Data Export.For details about the data types that can be exported to OBS, see Table 2.For details about HDFS data ex", + "doc_type":"devg", + "kw":"Planning Data Export,Exporting ORC Data to OBS,Developer Guide", + "title":"Planning Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0259.html", + "product_code":"dws", + "code":"78", + "des":"For details about creating a foreign server on OBS, see Creating a Foreign Server.For details about creating a foreign server in HDFS, see Manually Creating a Foreign Ser", + "doc_type":"devg", + "kw":"Creating a Foreign Server,Exporting ORC Data to OBS,Developer Guide", + "title":"Creating a Foreign Server", + "githuburl":"" + }, + { + "uri":"dws_04_0260.html", + "product_code":"dws", + "code":"79", + "des":"After operations in Creating a Foreign Server are complete, create an OBS/HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in OBS/HDFS. Th", + "doc_type":"devg", + "kw":"Creating a Foreign Table,Exporting ORC Data to OBS,Developer Guide", + "title":"Creating a Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0158.html", + "product_code":"dws", + "code":"80", + "des":"Example 1: Export data from table product_info_output to a data file using the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM pro", + "doc_type":"devg", + "kw":"Exporting Data,Exporting ORC Data to OBS,Developer Guide", + "title":"Exporting Data", + "githuburl":"" + }, + { + "uri":"dws_04_0159.html", + "product_code":"dws", + "code":"81", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Exporting ORC Data to MRS", + "title":"Exporting ORC Data to MRS", + "githuburl":"" + }, + { + "uri":"dws_04_0160.html", + "product_code":"dws", + "code":"82", + "des":"GaussDB(DWS) allows you to export ORC data to MRS using an HDFS foreign table. You can specify the export mode and export data format in the foreign table. Data is export", + "doc_type":"devg", + "kw":"Overview,Exporting ORC Data to MRS,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_04_0161.html", + "product_code":"dws", + "code":"83", + "des":"For details about the data types that can be exported to MRS, see Table 2.For details about HDFS data export or MRS configuration, see the MapReduce Service User Guide.", + "doc_type":"devg", + "kw":"Planning Data Export,Exporting ORC Data to MRS,Developer Guide", + "title":"Planning Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0162.html", + "product_code":"dws", + "code":"84", + "des":"For details about creating a foreign server on HDFS, see Manually Creating a Foreign Server.", + "doc_type":"devg", + "kw":"Creating a Foreign Server,Exporting ORC Data to MRS,Developer Guide", + "title":"Creating a Foreign Server", + "githuburl":"" + }, + { + "uri":"dws_04_0163.html", + "product_code":"dws", + "code":"85", + "des":"After operations in Creating a Foreign Server are complete, create an HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in HDFS. The foreig", + "doc_type":"devg", + "kw":"Creating a Foreign Table,Exporting ORC Data to MRS,Developer Guide", + "title":"Creating a Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0164.html", + "product_code":"dws", + "code":"86", + "des":"Example 1: Export data from table product_info_output to a data file using the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM pro", + "doc_type":"devg", + "kw":"Exporting Data,Exporting ORC Data to MRS,Developer Guide", + "title":"Exporting Data", + "githuburl":"" + }, + { + "uri":"dws_04_0261.html", + "product_code":"dws", + "code":"87", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Using GDS to Export Data to a Remote Server", + "title":"Using GDS to Export Data to a Remote Server", + "githuburl":"" + }, + { + "uri":"dws_04_0262.html", + "product_code":"dws", + "code":"88", + "des":"In high-concurrency scenarios, you can use GDS to export data from a database to a common file system.In the current GDS version, data can be exported from a database to ", + "doc_type":"devg", + "kw":"Exporting Data In Parallel Using GDS,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Exporting Data In Parallel Using GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0263.html", + "product_code":"dws", + "code":"89", + "des":"Before you use GDS to export data from a cluster, prepare data to be exported and plan the export path.Remote modeIf the following information is displayed, the user and ", + "doc_type":"devg", + "kw":"Planning Data Export,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Planning Data Export", + "githuburl":"" + }, + { + "uri":"dws_04_0264.html", + "product_code":"dws", + "code":"90", + "des":"GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.For details, see Installing, Configuri", + "doc_type":"devg", + "kw":"Installing, Configuring, and Starting GDS,Using GDS to Export Data to a Remote Server,Developer Guid", + "title":"Installing, Configuring, and Starting GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0265.html", + "product_code":"dws", + "code":"91", + "des":"Remote modeSet the location parameter to the URL of the directory that stores the data files.You do not need to specify any file.For example:The IP address of the GDS dat", + "doc_type":"devg", + "kw":"Creating a GDS Foreign Table,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Creating a GDS Foreign Table", + "githuburl":"" + }, + { + "uri":"dws_04_0266.html", + "product_code":"dws", + "code":"92", + "des":"Ensure that the IP addresses and ports of servers where CNs and DNs are deployed can connect to those of the GDS server.Create batch processing scripts to export data in ", + "doc_type":"devg", + "kw":"Exporting Data,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Exporting Data", + "githuburl":"" + }, + { + "uri":"dws_04_0267.html", + "product_code":"dws", + "code":"93", + "des":"GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.For details, see Stopping GDS.", + "doc_type":"devg", + "kw":"Stopping GDS,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Stopping GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0268.html", + "product_code":"dws", + "code":"94", + "des":"The data server and the cluster reside on the same intranet, the IP address of the data server is 192.168.0.90, and data source files are in CSV format. In this scenario,", + "doc_type":"devg", + "kw":"Examples of Exporting Data Using GDS,Using GDS to Export Data to a Remote Server,Developer Guide", + "title":"Examples of Exporting Data Using GDS", + "githuburl":"" + }, + { + "uri":"dws_04_0988.html", + "product_code":"dws", + "code":"95", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Other Operations", + "title":"Other Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0279.html", + "product_code":"dws", + "code":"96", + "des":"GDS supports concurrent import and export. The gds -t parameter is used to set the size of the thread pool and control the maximum number of concurrent working threads. B", + "doc_type":"devg", + "kw":"GDS Pipe FAQs,Other Operations,Developer Guide", + "title":"GDS Pipe FAQs", + "githuburl":"" + }, + { + "uri":"dws_04_0228.html", + "product_code":"dws", + "code":"97", + "des":"Data skew causes the query performance to deteriorate. Before importing all the data from a table consisting of over 10 million records, you are advised to import some of", + "doc_type":"devg", + "kw":"Checking for Data Skew,Other Operations,Developer Guide", + "title":"Checking for Data Skew", + "githuburl":"" + }, + { + "uri":"dws_04_0042.html", + "product_code":"dws", + "code":"98", + "des":"GaussDB(DWS) is compatible with Oracle, Teradata and MySQL syntax, of which the syntax behavior is different.", + "doc_type":"devg", + "kw":"Syntax Compatibility Differences Among Oracle, Teradata, and MySQL,Developer Guide,Developer Guide", + "title":"Syntax Compatibility Differences Among Oracle, Teradata, and MySQL", + "githuburl":"" + }, + { + "uri":"dws_04_0043.html", + "product_code":"dws", + "code":"99", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Database Security Management", + "title":"Database Security Management", + "githuburl":"" + }, + { + "uri":"dws_04_0053.html", + "product_code":"dws", + "code":"100", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Managing Users and Their Permissions", + "title":"Managing Users and Their Permissions", + "githuburl":"" + }, + { + "uri":"dws_04_0054.html", + "product_code":"dws", + "code":"101", + "des":"A user who creates an object is the owner of this object. By default, Separation of Permissions is disabled after cluster installation. A database system administrator ha", + "doc_type":"devg", + "kw":"Default Permission Mechanism,Managing Users and Their Permissions,Developer Guide", + "title":"Default Permission Mechanism", + "githuburl":"" + }, + { + "uri":"dws_04_0055.html", + "product_code":"dws", + "code":"102", + "des":"A system administrator is an account with the SYSADMIN permission. After a cluster is installed, a system administrator has the permissions of all object owners by defaul", + "doc_type":"devg", + "kw":"System Administrator,Managing Users and Their Permissions,Developer Guide", + "title":"System Administrator", + "githuburl":"" + }, + { + "uri":"dws_04_0056.html", + "product_code":"dws", + "code":"103", + "des":"Descriptions in Default Permission Mechanism and System Administrator are about the initial situation after a cluster is created. By default, a system administrator with ", + "doc_type":"devg", + "kw":"Separation of Permissions,Managing Users and Their Permissions,Developer Guide", + "title":"Separation of Permissions", + "githuburl":"" + }, + { + "uri":"dws_04_0057.html", + "product_code":"dws", + "code":"104", + "des":"You can use CREATE USER and ALTER USER to create and manage database users, respectively. The database cluster has one or more named databases. Users and roles are shared", + "doc_type":"devg", + "kw":"Users,Managing Users and Their Permissions,Developer Guide", + "title":"Users", + "githuburl":"" + }, + { + "uri":"dws_04_0058.html", + "product_code":"dws", + "code":"105", + "des":"A role is a set of permissions. After a role is granted to a user through GRANT, the user will have all the permissions of the role. It is recommended that roles be used ", + "doc_type":"devg", + "kw":"Roles,Managing Users and Their Permissions,Developer Guide", + "title":"Roles", + "githuburl":"" + }, + { + "uri":"dws_04_0059.html", + "product_code":"dws", + "code":"106", + "des":"Schemas function as models. Schema management allows multiple users to use the same database without mutual impacts, to organize database objects as manageable logical gr", + "doc_type":"devg", + "kw":"Schema,Managing Users and Their Permissions,Developer Guide", + "title":"Schema", + "githuburl":"" + }, + { + "uri":"dws_04_0060.html", + "product_code":"dws", + "code":"107", + "des":"To grant the permission for an object directly to a user, use GRANT.When permissions for a table or view in a schema are granted to a user or role, the USAGE permission o", + "doc_type":"devg", + "kw":"User Permission Setting,Managing Users and Their Permissions,Developer Guide", + "title":"User Permission Setting", + "githuburl":"" + }, + { + "uri":"dws_04_0063.html", + "product_code":"dws", + "code":"108", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Setting Security Policies", + "title":"Setting Security Policies", + "githuburl":"" + }, + { + "uri":"dws_04_0064.html", + "product_code":"dws", + "code":"109", + "des":"For data security purposes, GaussDB(DWS) provides a series of security measures, such as automatically locking and unlocking accounts, manually locking and unlocking abno", + "doc_type":"devg", + "kw":"Setting Account Security Policies,Setting Security Policies,Developer Guide", + "title":"Setting Account Security Policies", + "githuburl":"" + }, + { + "uri":"dws_04_0065.html", + "product_code":"dws", + "code":"110", + "des":"When creating a user, you need to specify the validity period of the user, including the start time and end time.To enable a user not within the validity period to use it", + "doc_type":"devg", + "kw":"Setting the Validity Period of an Account,Setting Security Policies,Developer Guide", + "title":"Setting the Validity Period of an Account", + "githuburl":"" + }, + { + "uri":"dws_04_0067.html", + "product_code":"dws", + "code":"111", + "des":"User passwords are stored in the system catalog pg_authid. To prevent password leakage, GaussDB(DWS) encrypts and stores the user passwords.Password complexityThe passwor", + "doc_type":"devg", + "kw":"Setting a User Password,Setting Security Policies,Developer Guide", + "title":"Setting a User Password", + "githuburl":"" + }, + { + "uri":"dws_04_0994.html", + "product_code":"dws", + "code":"112", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Sensitive Data Management", + "title":"Sensitive Data Management", + "githuburl":"" + }, + { + "uri":"dws_04_0061.html", + "product_code":"dws", + "code":"113", + "des":"The row-level access control feature enables database access control to be accurate to each row of data tables. In this way, the same SQL query may return different resul", + "doc_type":"devg", + "kw":"Row-Level Access Control,Sensitive Data Management,Developer Guide", + "title":"Row-Level Access Control", + "githuburl":"" + }, + { + "uri":"dws_04_0062.html", + "product_code":"dws", + "code":"114", + "des":"GaussDB(DWS) provides the column-level dynamic data masking (DDM) function. For sensitive data, such as the ID card number, mobile number, and bank card number, the DDM f", + "doc_type":"devg", + "kw":"Data Redaction,Sensitive Data Management,Developer Guide", + "title":"Data Redaction", + "githuburl":"" + }, + { + "uri":"dws_04_0995.html", + "product_code":"dws", + "code":"115", + "des":"GaussDB(DWS) supports encryption and decryption of strings using the following functions:gs_encrypt(encryptstr, keystr, cryptotype, cryptomode, hashmethod)Description: En", + "doc_type":"devg", + "kw":"Using Functions for Encryption and Decryption,Sensitive Data Management,Developer Guide", + "title":"Using Functions for Encryption and Decryption", + "githuburl":"" + }, + { + "uri":"dws_04_0074.html", + "product_code":"dws", + "code":"116", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Development and Design Proposal", + "title":"Development and Design Proposal", + "githuburl":"" + }, + { + "uri":"dws_04_0075.html", + "product_code":"dws", + "code":"117", + "des":"This chapter describes the design specifications for database modeling and application development. Modeling compliant with these specifications fits the distributed proc", + "doc_type":"devg", + "kw":"Development and Design Proposal,Development and Design Proposal,Developer Guide", + "title":"Development and Design Proposal", + "githuburl":"" + }, + { + "uri":"dws_04_0076.html", + "product_code":"dws", + "code":"118", + "des":"The name of a database object must contain 1 to 63 characters, start with a letter or underscore (_), and can contain letters, digits, underscores (_), dollar signs ($), ", + "doc_type":"devg", + "kw":"Database Object Naming Conventions,Development and Design Proposal,Developer Guide", + "title":"Database Object Naming Conventions", + "githuburl":"" + }, + { + "uri":"dws_04_0077.html", + "product_code":"dws", + "code":"119", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Database Object Design", + "title":"Database Object Design", + "githuburl":"" + }, + { + "uri":"dws_04_0078.html", + "product_code":"dws", + "code":"120", + "des":"In GaussDB(DWS), services can be isolated by databases and schemas. Databases share little resources and cannot directly access each other. Connections to and permissions", + "doc_type":"devg", + "kw":"Database and Schema Design,Database Object Design,Developer Guide", + "title":"Database and Schema Design", + "githuburl":"" + }, + { + "uri":"dws_04_0079.html", + "product_code":"dws", + "code":"121", + "des":"GaussDB(DWS) uses a distributed architecture. Data is distributed on DNs. Comply with the following principles to properly design a table:[Notice] Evenly distribute data ", + "doc_type":"devg", + "kw":"Table Design,Database Object Design,Developer Guide", + "title":"Table Design", + "githuburl":"" + }, + { + "uri":"dws_04_0080.html", + "product_code":"dws", + "code":"122", + "des":"Comply with the following rules to improve query efficiency when you design columns:[Proposal] Use the most efficient data types allowed.If all of the following number ty", + "doc_type":"devg", + "kw":"Column Design,Database Object Design,Developer Guide", + "title":"Column Design", + "githuburl":"" + }, + { + "uri":"dws_04_0081.html", + "product_code":"dws", + "code":"123", + "des":"[Proposal] If all the column values can be obtained from services, you are not advised to use the DEFAULT constraint, because doing so will generate unexpected results du", + "doc_type":"devg", + "kw":"Constraint Design,Database Object Design,Developer Guide", + "title":"Constraint Design", + "githuburl":"" + }, + { + "uri":"dws_04_0082.html", + "product_code":"dws", + "code":"124", + "des":"[Proposal] Do not nest views unless they have strong dependency on each other.[Proposal] Try to avoid sort operations in a view definition.[Proposal] Minimize joined colu", + "doc_type":"devg", + "kw":"View and Joined Table Design,Database Object Design,Developer Guide", + "title":"View and Joined Table Design", + "githuburl":"" + }, + { + "uri":"dws_04_0083.html", + "product_code":"dws", + "code":"125", + "des":"Currently, third-party tools are connected to GaussDB(DWS) trough JDBC. This section describes the precautions for configuring the tools.[Notice] When a third-party tool ", + "doc_type":"devg", + "kw":"JDBC Configuration,Development and Design Proposal,Developer Guide", + "title":"JDBC Configuration", + "githuburl":"" + }, + { + "uri":"dws_04_0084.html", + "product_code":"dws", + "code":"126", + "des":"[Proposal] In GaussDB(DWS), you are advised to execute DDL operations, such as creating table or making comments, separately from batch processing jobs to avoid performan", + "doc_type":"devg", + "kw":"SQL Compilation,Development and Design Proposal,Developer Guide", + "title":"SQL Compilation", + "githuburl":"" + }, + { + "uri":"dws_04_0971.html", + "product_code":"dws", + "code":"127", + "des":"[Notice] Java UDFs can perform some Java logic calculation. Do not encapsulate services in Java UDFs.[Notice] Do not connect to a database in any way (for example, by usi", + "doc_type":"devg", + "kw":"PL/Java Usage,Development and Design Proposal,Developer Guide", + "title":"PL/Java Usage", + "githuburl":"" + }, + { + "uri":"dws_04_0972.html", + "product_code":"dws", + "code":"128", + "des":"Development shall strictly comply with design documents.Program modules shall be highly cohesive and loosely coupled.Proper, comprehensive troubleshooting measures shall ", + "doc_type":"devg", + "kw":"PL/pgSQL Usage,Development and Design Proposal,Developer Guide", + "title":"PL/pgSQL Usage", + "githuburl":"" + }, + { + "uri":"dws_04_0085.html", + "product_code":"dws", + "code":"129", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Guide: JDBC- or ODBC-Based Development", + "title":"Guide: JDBC- or ODBC-Based Development", + "githuburl":"" + }, + { + "uri":"dws_04_0086.html", + "product_code":"dws", + "code":"130", + "des":"If the connection pool mechanism is used during application development, comply with the following specifications:If GUC parameters are set in the connection, before you ", + "doc_type":"devg", + "kw":"Development Specifications,Guide: JDBC- or ODBC-Based Development,Developer Guide", + "title":"Development Specifications", + "githuburl":"" + }, + { + "uri":"dws_04_0087.html", + "product_code":"dws", + "code":"131", + "des":"For details, see section \"Downloading the JDBC or ODBC Driver\" in the Data Warehouse Service User Guide.", + "doc_type":"devg", + "kw":"Downloading Drivers,Guide: JDBC- or ODBC-Based Development,Developer Guide", + "title":"Downloading Drivers", + "githuburl":"" + }, + { + "uri":"dws_04_0088.html", + "product_code":"dws", + "code":"132", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"JDBC-Based Development", + "title":"JDBC-Based Development", + "githuburl":"" + }, + { + "uri":"dws_04_0090.html", + "product_code":"dws", + "code":"133", + "des":"Obtain the package dws_8.1.x_jdbc_driver.zip from the management console. For details, see Downloading Drivers.Compressed in it is the JDBC driver JAR package:gsjdbc4.jar", + "doc_type":"devg", + "kw":"JDBC Package and Driver Class,JDBC-Based Development,Developer Guide", + "title":"JDBC Package and Driver Class", + "githuburl":"" + }, + { + "uri":"dws_04_0091.html", + "product_code":"dws", + "code":"134", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Development Process,JDBC-Based Development,Developer Guide", + "title":"Development Process", + "githuburl":"" + }, + { + "uri":"dws_04_0092.html", + "product_code":"dws", + "code":"135", + "des":"Load the database driver before creating a database connection.You can load the driver in the following ways:Implicitly loading the driver before creating a connection in", + "doc_type":"devg", + "kw":"Loading a Driver,JDBC-Based Development,Developer Guide", + "title":"Loading a Driver", + "githuburl":"" + }, + { + "uri":"dws_04_0093.html", + "product_code":"dws", + "code":"136", + "des":"After a database is connected, you can execute SQL statements in the database.If you use an open-source Java Database Connectivity (JDBC) driver, ensure that the database", + "doc_type":"devg", + "kw":"Connecting to a Database,JDBC-Based Development,Developer Guide", + "title":"Connecting to a Database", + "githuburl":"" + }, + { + "uri":"dws_04_0095.html", + "product_code":"dws", + "code":"137", + "des":"The application performs data (parameter statements do not need to be transferred) in the database by running SQL statements, and you need to perform the following steps:", + "doc_type":"devg", + "kw":"Executing SQL Statements,JDBC-Based Development,Developer Guide", + "title":"Executing SQL Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0096.html", + "product_code":"dws", + "code":"138", + "des":"Different types of result sets are applicable to different application scenarios. Applications select proper types of result sets based on requirements. Before executing ", + "doc_type":"devg", + "kw":"Processing Data in a Result Set,JDBC-Based Development,Developer Guide", + "title":"Processing Data in a Result Set", + "githuburl":"" + }, + { + "uri":"dws_04_0097.html", + "product_code":"dws", + "code":"139", + "des":"After you complete required data operations in the database, close the database connection.Call the close method to close the connection, such as, conn. close().", + "doc_type":"devg", + "kw":"Closing the Connection,JDBC-Based Development,Developer Guide", + "title":"Closing the Connection", + "githuburl":"" + }, + { + "uri":"dws_04_0098.html", + "product_code":"dws", + "code":"140", + "des":"Before completing the following example, you need to create a stored procedure.This example illustrates how to develop applications based on the GaussDB(DWS) JDBC interfa", + "doc_type":"devg", + "kw":"Example: Common Operations,JDBC-Based Development,Developer Guide", + "title":"Example: Common Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0099.html", + "product_code":"dws", + "code":"141", + "des":"If the primary DN is faulty and cannot be restored within 40s, its standby is automatically promoted to primary to ensure the normal running of the cluster. Jobs running ", + "doc_type":"devg", + "kw":"Example: Retrying SQL Queries for Applications,JDBC-Based Development,Developer Guide", + "title":"Example: Retrying SQL Queries for Applications", + "githuburl":"" + }, + { + "uri":"dws_04_0100.html", + "product_code":"dws", + "code":"142", + "des":"When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or ", + "doc_type":"devg", + "kw":"Example: Importing and Exporting Data Through Local Files,JDBC-Based Development,Developer Guide", + "title":"Example: Importing and Exporting Data Through Local Files", + "githuburl":"" + }, + { + "uri":"dws_04_0101.html", + "product_code":"dws", + "code":"143", + "des":"The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).", + "doc_type":"devg", + "kw":"Example: Migrating Data from MySQL to GaussDB(DWS),JDBC-Based Development,Developer Guide", + "title":"Example: Migrating Data from MySQL to GaussDB(DWS)", + "githuburl":"" + }, + { + "uri":"dws_04_0102.html", + "product_code":"dws", + "code":"144", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"JDBC Interface Reference", + "title":"JDBC Interface Reference", + "githuburl":"" + }, + { + "uri":"dws_04_0103.html", + "product_code":"dws", + "code":"145", + "des":"This section describes java.sql.Connection, the interface for connecting to a database.The AutoCommit mode is used by default within the interface. If you disable it runn", + "doc_type":"devg", + "kw":"java.sql.Connection,JDBC Interface Reference,Developer Guide", + "title":"java.sql.Connection", + "githuburl":"" + }, + { + "uri":"dws_04_0104.html", + "product_code":"dws", + "code":"146", + "des":"This section describes java.sql.CallableStatement, the stored procedure execution interface.The batch operation of statements containing OUT parameter is not allowed.The ", + "doc_type":"devg", + "kw":"java.sql.CallableStatement,JDBC Interface Reference,Developer Guide", + "title":"java.sql.CallableStatement", + "githuburl":"" + }, + { + "uri":"dws_04_0105.html", + "product_code":"dws", + "code":"147", + "des":"This section describes java.sql.DatabaseMetaData, the interface for defining database objects.", + "doc_type":"devg", + "kw":"java.sql.DatabaseMetaData,JDBC Interface Reference,Developer Guide", + "title":"java.sql.DatabaseMetaData", + "githuburl":"" + }, + { + "uri":"dws_04_0106.html", + "product_code":"dws", + "code":"148", + "des":"This section describes java.sql.Driver, the database driver interface.", + "doc_type":"devg", + "kw":"java.sql.Driver,JDBC Interface Reference,Developer Guide", + "title":"java.sql.Driver", + "githuburl":"" + }, + { + "uri":"dws_04_0107.html", + "product_code":"dws", + "code":"149", + "des":"This section describes java.sql.PreparedStatement, the interface for preparing statements.Execute addBatch() and execute() only after running clearBatch().Batch is not cl", + "doc_type":"devg", + "kw":"java.sql.PreparedStatement,JDBC Interface Reference,Developer Guide", + "title":"java.sql.PreparedStatement", + "githuburl":"" + }, + { + "uri":"dws_04_0108.html", + "product_code":"dws", + "code":"150", + "des":"This section describes java.sql.ResultSet, the interface for execution result sets.One Statement cannot have multiple open ResultSets.The cursor that is used for traversi", + "doc_type":"devg", + "kw":"java.sql.ResultSet,JDBC Interface Reference,Developer Guide", + "title":"java.sql.ResultSet", + "githuburl":"" + }, + { + "uri":"dws_04_0109.html", + "product_code":"dws", + "code":"151", + "des":"This section describes java.sql.ResultSetMetaData, which provides details about ResultSet object information.", + "doc_type":"devg", + "kw":"java.sql.ResultSetMetaData,JDBC Interface Reference,Developer Guide", + "title":"java.sql.ResultSetMetaData", + "githuburl":"" + }, + { + "uri":"dws_04_0110.html", + "product_code":"dws", + "code":"152", + "des":"This section describes java.sql.Statement, the interface for executing SQL statements.Using setFetchSize can reduce the memory occupied by result sets on the client. Resu", + "doc_type":"devg", + "kw":"java.sql.Statement,JDBC Interface Reference,Developer Guide", + "title":"java.sql.Statement", + "githuburl":"" + }, + { + "uri":"dws_04_0111.html", + "product_code":"dws", + "code":"153", + "des":"This section describes javax.sql.ConnectionPoolDataSource, the interface for data source connection pools.", + "doc_type":"devg", + "kw":"javax.sql.ConnectionPoolDataSource,JDBC Interface Reference,Developer Guide", + "title":"javax.sql.ConnectionPoolDataSource", + "githuburl":"" + }, + { + "uri":"dws_04_0112.html", + "product_code":"dws", + "code":"154", + "des":"This section describes javax.sql.DataSource, the interface for data sources.", + "doc_type":"devg", + "kw":"javax.sql.DataSource,JDBC Interface Reference,Developer Guide", + "title":"javax.sql.DataSource", + "githuburl":"" + }, + { + "uri":"dws_04_0113.html", + "product_code":"dws", + "code":"155", + "des":"This section describes javax.sql.PooledConnection, the connection interface created by a connection pool.", + "doc_type":"devg", + "kw":"javax.sql.PooledConnection,JDBC Interface Reference,Developer Guide", + "title":"javax.sql.PooledConnection", + "githuburl":"" + }, + { + "uri":"dws_04_0114.html", + "product_code":"dws", + "code":"156", + "des":"This section describes javax.naming.Context, the context interface for connection configuration.", + "doc_type":"devg", + "kw":"javax.naming.Context,JDBC Interface Reference,Developer Guide", + "title":"javax.naming.Context", + "githuburl":"" + }, + { + "uri":"dws_04_0115.html", + "product_code":"dws", + "code":"157", + "des":"This section describes javax.naming.spi.InitialContextFactory, the initial context factory interface.", + "doc_type":"devg", + "kw":"javax.naming.spi.InitialContextFactory,JDBC Interface Reference,Developer Guide", + "title":"javax.naming.spi.InitialContextFactory", + "githuburl":"" + }, + { + "uri":"dws_04_0116.html", + "product_code":"dws", + "code":"158", + "des":"CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.The CopyManager class is in the or", + "doc_type":"devg", + "kw":"CopyManager,JDBC Interface Reference,Developer Guide", + "title":"CopyManager", + "githuburl":"" + }, + { + "uri":"dws_04_0117.html", + "product_code":"dws", + "code":"159", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"ODBC-Based Development", + "title":"ODBC-Based Development", + "githuburl":"" + }, + { + "uri":"dws_04_0118.html", + "product_code":"dws", + "code":"160", + "des":"Obtain the dws_8.1.x_odbc_driver_for_xxx_xxx.zip package from the release package. In the Linux OS, header files (including sql.h and sqlext.h) and library (libodbc.so) a", + "doc_type":"devg", + "kw":"ODBC Package and Its Dependent Libraries and Header Files,ODBC-Based Development,Developer Guide", + "title":"ODBC Package and Its Dependent Libraries and Header Files", + "githuburl":"" + }, + { + "uri":"dws_04_0119.html", + "product_code":"dws", + "code":"161", + "des":"The ODBC DRIVER (psqlodbcw.so) provided by GaussDB(DWS) can be used after it has been configured in the data source. To configure data sources, users must configure the o", + "doc_type":"devg", + "kw":"Configuring a Data Source in the Linux OS,ODBC-Based Development,Developer Guide", + "title":"Configuring a Data Source in the Linux OS", + "githuburl":"" + }, + { + "uri":"dws_04_0120.html", + "product_code":"dws", + "code":"162", + "des":"Configure the ODBC data source using the ODBC data source manager preinstalled in the Windows OS.Decompress GaussDB-8.1.1-Windows-Odbc.tar.gz and install psqlodbc.msi (fo", + "doc_type":"devg", + "kw":"Configuring a Data Source in the Windows OS,ODBC-Based Development,Developer Guide", + "title":"Configuring a Data Source in the Windows OS", + "githuburl":"" + }, + { + "uri":"dws_04_0123.html", + "product_code":"dws", + "code":"163", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"ODBC Development Example,ODBC-Based Development,Developer Guide", + "title":"ODBC Development Example", + "githuburl":"" + }, + { + "uri":"dws_04_0124.html", + "product_code":"dws", + "code":"164", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"ODBC Interfaces", + "title":"ODBC Interfaces", + "githuburl":"" + }, + { + "uri":"dws_04_0125.html", + "product_code":"dws", + "code":"165", + "des":"In ODBC 3.x, SQLAllocEnv (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "doc_type":"devg", + "kw":"SQLAllocEnv,ODBC Interfaces,Developer Guide", + "title":"SQLAllocEnv", + "githuburl":"" + }, + { + "uri":"dws_04_0126.html", + "product_code":"dws", + "code":"166", + "des":"In ODBC 3.x, SQLAllocConnect (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "doc_type":"devg", + "kw":"SQLAllocConnect,ODBC Interfaces,Developer Guide", + "title":"SQLAllocConnect", + "githuburl":"" + }, + { + "uri":"dws_04_0127.html", + "product_code":"dws", + "code":"167", + "des":"SQLAllocHandle allocates environment, connection, or statement handles. This function is a generic function for allocating handles that replaces the deprecated ODBC 2.x f", + "doc_type":"devg", + "kw":"SQLAllocHandle,ODBC Interfaces,Developer Guide", + "title":"SQLAllocHandle", + "githuburl":"" + }, + { + "uri":"dws_04_0128.html", + "product_code":"dws", + "code":"168", + "des":"In ODBC 3.x, SQLAllocStmt was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "doc_type":"devg", + "kw":"SQLAllocStmt,ODBC Interfaces,Developer Guide", + "title":"SQLAllocStmt", + "githuburl":"" + }, + { + "uri":"dws_04_0129.html", + "product_code":"dws", + "code":"169", + "des":"SQLBindCol is used to associate (bind) columns in a result set to an application data buffer.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates", + "doc_type":"devg", + "kw":"SQLBindCol,ODBC Interfaces,Developer Guide", + "title":"SQLBindCol", + "githuburl":"" + }, + { + "uri":"dws_04_0130.html", + "product_code":"dws", + "code":"170", + "des":"SQLBindParameter is used to associate (bind) parameter markers in an SQL statement to a buffer.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicat", + "doc_type":"devg", + "kw":"SQLBindParameter,ODBC Interfaces,Developer Guide", + "title":"SQLBindParameter", + "githuburl":"" + }, + { + "uri":"dws_04_0131.html", + "product_code":"dws", + "code":"171", + "des":"SQLColAttribute returns the descriptor information about a column in the result set.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some war", + "doc_type":"devg", + "kw":"SQLColAttribute,ODBC Interfaces,Developer Guide", + "title":"SQLColAttribute", + "githuburl":"" + }, + { + "uri":"dws_04_0132.html", + "product_code":"dws", + "code":"172", + "des":"SQLConnect establishes a connection between a driver and a data source. After the connection, the connection handle can be used to access all information about the data s", + "doc_type":"devg", + "kw":"SQLConnect,ODBC Interfaces,Developer Guide", + "title":"SQLConnect", + "githuburl":"" + }, + { + "uri":"dws_04_0133.html", + "product_code":"dws", + "code":"173", + "des":"SQLDisconnect closes the connection associated with the database connection handle.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warn", + "doc_type":"devg", + "kw":"SQLDisconnect,ODBC Interfaces,Developer Guide", + "title":"SQLDisconnect", + "githuburl":"" + }, + { + "uri":"dws_04_0134.html", + "product_code":"dws", + "code":"174", + "des":"SQLExecDirect executes a prepared SQL statement specified in this parameter. This is the fastest execution method for executing only one SQL statement at a time.SQL_SUCCE", + "doc_type":"devg", + "kw":"SQLExecDirect,ODBC Interfaces,Developer Guide", + "title":"SQLExecDirect", + "githuburl":"" + }, + { + "uri":"dws_04_0135.html", + "product_code":"dws", + "code":"175", + "des":"The SQLExecute function executes a prepared SQL statement using SQLPrepare. The statement is executed using the current value of any application variables that were bound", + "doc_type":"devg", + "kw":"SQLExecute,ODBC Interfaces,Developer Guide", + "title":"SQLExecute", + "githuburl":"" + }, + { + "uri":"dws_04_0136.html", + "product_code":"dws", + "code":"176", + "des":"SQLFetch advances the cursor to the next row of the result set and retrieves any bound columns.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicat", + "doc_type":"devg", + "kw":"SQLFetch,ODBC Interfaces,Developer Guide", + "title":"SQLFetch", + "githuburl":"" + }, + { + "uri":"dws_04_0137.html", + "product_code":"dws", + "code":"177", + "des":"In ODBC 3.x, SQLFreeStmt (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "doc_type":"devg", + "kw":"SQLFreeStmt,ODBC Interfaces,Developer Guide", + "title":"SQLFreeStmt", + "githuburl":"" + }, + { + "uri":"dws_04_0138.html", + "product_code":"dws", + "code":"178", + "des":"In ODBC 3.x, SQLFreeConnect (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "doc_type":"devg", + "kw":"SQLFreeConnect,ODBC Interfaces,Developer Guide", + "title":"SQLFreeConnect", + "githuburl":"" + }, + { + "uri":"dws_04_0139.html", + "product_code":"dws", + "code":"179", + "des":"SQLFreeHandle releases resources associated with a specific environment, connection, or statement handle. It replaces the ODBC 2.x functions: SQLFreeEnv, SQLFreeConnect, ", + "doc_type":"devg", + "kw":"SQLFreeHandle,ODBC Interfaces,Developer Guide", + "title":"SQLFreeHandle", + "githuburl":"" + }, + { + "uri":"dws_04_0140.html", + "product_code":"dws", + "code":"180", + "des":"In ODBC 3.x, SQLFreeEnv (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "doc_type":"devg", + "kw":"SQLFreeEnv,ODBC Interfaces,Developer Guide", + "title":"SQLFreeEnv", + "githuburl":"" + }, + { + "uri":"dws_04_0141.html", + "product_code":"dws", + "code":"181", + "des":"SQLPrepare prepares an SQL statement to be executed.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQ", + "doc_type":"devg", + "kw":"SQLPrepare,ODBC Interfaces,Developer Guide", + "title":"SQLPrepare", + "githuburl":"" + }, + { + "uri":"dws_04_0142.html", + "product_code":"dws", + "code":"182", + "des":"SQLGetData retrieves data for a single column in the current row of the result set. It can be called for many times to retrieve data of variable lengths.SQL_SUCCESS indic", + "doc_type":"devg", + "kw":"SQLGetData,ODBC Interfaces,Developer Guide", + "title":"SQLGetData", + "githuburl":"" + }, + { + "uri":"dws_04_0143.html", + "product_code":"dws", + "code":"183", + "des":"SQLGetDiagRec returns the current values of multiple fields of a diagnostic record that contains error, warning, and status information.SQL_SUCCESS indicates that the cal", + "doc_type":"devg", + "kw":"SQLGetDiagRec,ODBC Interfaces,Developer Guide", + "title":"SQLGetDiagRec", + "githuburl":"" + }, + { + "uri":"dws_04_0144.html", + "product_code":"dws", + "code":"184", + "des":"SQLSetConnectAttr sets connection attributes.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQL_ERROR", + "doc_type":"devg", + "kw":"SQLSetConnectAttr,ODBC Interfaces,Developer Guide", + "title":"SQLSetConnectAttr", + "githuburl":"" + }, + { + "uri":"dws_04_0145.html", + "product_code":"dws", + "code":"185", + "des":"SQLSetEnvAttr sets environment attributes.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQL_ERROR in", + "doc_type":"devg", + "kw":"SQLSetEnvAttr,ODBC Interfaces,Developer Guide", + "title":"SQLSetEnvAttr", + "githuburl":"" + }, + { + "uri":"dws_04_0146.html", + "product_code":"dws", + "code":"186", + "des":"SQLSetStmtAttr sets attributes related to a statement.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.", + "doc_type":"devg", + "kw":"SQLSetStmtAttr,ODBC Interfaces,Developer Guide", + "title":"SQLSetStmtAttr", + "githuburl":"" + }, + { + "uri":"dws_04_0301.html", + "product_code":"dws", + "code":"187", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"PostGIS Extension", + "title":"PostGIS Extension", + "githuburl":"" + }, + { + "uri":"dws_04_0302.html", + "product_code":"dws", + "code":"188", + "des":"The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical ", + "doc_type":"devg", + "kw":"PostGIS,PostGIS Extension,Developer Guide", + "title":"PostGIS", + "githuburl":"" + }, + { + "uri":"dws_04_0304.html", + "product_code":"dws", + "code":"189", + "des":"The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical ", + "doc_type":"devg", + "kw":"Using PostGIS,PostGIS Extension,Developer Guide", + "title":"Using PostGIS", + "githuburl":"" + }, + { + "uri":"dws_04_0305.html", + "product_code":"dws", + "code":"190", + "des":"In GaussDB(DWS), PostGIS Extension support the following data types:box2dbox3dgeometry_dumpgeometrygeographyrasterIf PostGIS is used by a user other than the creator of t", + "doc_type":"devg", + "kw":"PostGIS Support and Constraints,PostGIS Extension,Developer Guide", + "title":"PostGIS Support and Constraints", + "githuburl":"" + }, + { + "uri":"dws_04_0306.html", + "product_code":"dws", + "code":"191", + "des":"This document contains open source software notice for the product. And this document is confidential information of copyright holder. Recipient shall protect it in due c", + "doc_type":"devg", + "kw":"OPEN SOURCE SOFTWARE NOTICE (For PostGIS),PostGIS Extension,Developer Guide", + "title":"OPEN SOURCE SOFTWARE NOTICE (For PostGIS)", + "githuburl":"" + }, + { + "uri":"dws_04_0393.html", + "product_code":"dws", + "code":"192", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Resource Monitoring", + "title":"Resource Monitoring", + "githuburl":"" + }, + { + "uri":"dws_04_0394.html", + "product_code":"dws", + "code":"193", + "des":"In the multi-tenant management framework, you can query the real-time or historical usage of all user resources (including memory, CPU cores, storage space, temporary spa", + "doc_type":"devg", + "kw":"User Resource Query,Resource Monitoring,Developer Guide", + "title":"User Resource Query", + "githuburl":"" + }, + { + "uri":"dws_04_0395.html", + "product_code":"dws", + "code":"194", + "des":"GaussDB(DWS) provides a view for monitoring the memory usage of the entire cluster.Query the pgxc_total_memory_detail view as a user with sysadmin permissions.SELECT * FR", + "doc_type":"devg", + "kw":"Monitoring Memory Resources,Resource Monitoring,Developer Guide", + "title":"Monitoring Memory Resources", + "githuburl":"" + }, + { + "uri":"dws_04_0396.html", + "product_code":"dws", + "code":"195", + "des":"GaussDB(DWS) provides system catalogs for monitoring the resource usage of CNs and DNs (including memory, CPU usage, disk I/O, process physical I/O, and process logical I", + "doc_type":"devg", + "kw":"Instance Resource Monitoring,Resource Monitoring,Developer Guide", + "title":"Instance Resource Monitoring", + "githuburl":"" + }, + { + "uri":"dws_04_0397.html", + "product_code":"dws", + "code":"196", + "des":"You can query real-time Top SQL in real-time resource monitoring views at different levels. The real-time resource monitoring view records the resource usage (including m", + "doc_type":"devg", + "kw":"Real-time TopSQL,Resource Monitoring,Developer Guide", + "title":"Real-time TopSQL", + "githuburl":"" + }, + { + "uri":"dws_04_0398.html", + "product_code":"dws", + "code":"197", + "des":"You can query historical Top SQL in historical resource monitoring views. The historical resource monitoring view records the resource usage (of memory, disk, CPU time, a", + "doc_type":"devg", + "kw":"Historical TopSQL,Resource Monitoring,Developer Guide", + "title":"Historical TopSQL", + "githuburl":"" + }, + { + "uri":"dws_04_0399.html", + "product_code":"dws", + "code":"198", + "des":"In this section, TPC-DS sample data is used as an example to describe how to query Real-time TopSQL and Historical TopSQL.To query for historical or archived resource mon", + "doc_type":"devg", + "kw":"TopSQL Query Example,Resource Monitoring,Developer Guide", + "title":"TopSQL Query Example", + "githuburl":"" + }, + { + "uri":"dws_04_0400.html", + "product_code":"dws", + "code":"199", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Query Performance Optimization", + "title":"Query Performance Optimization", + "githuburl":"" + }, + { + "uri":"dws_04_0402.html", + "product_code":"dws", + "code":"200", + "des":"The aim of SQL optimization is to maximize the utilization of resources, including CPU, memory, disk I/O, and network I/O. To maximize resource utilization is to run SQL ", + "doc_type":"devg", + "kw":"Overview of Query Performance Optimization,Query Performance Optimization,Developer Guide", + "title":"Overview of Query Performance Optimization", + "githuburl":"" + }, + { + "uri":"dws_04_0403.html", + "product_code":"dws", + "code":"201", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Query Analysis", + "title":"Query Analysis", + "githuburl":"" + }, + { + "uri":"dws_04_0409.html", + "product_code":"dws", + "code":"202", + "des":"The process from receiving SQL statements to the statement execution by the SQL engine is shown in Figure 1 and Table 1. The texts in red are steps where database adminis", + "doc_type":"devg", + "kw":"Query Execution Process,Query Analysis,Developer Guide", + "title":"Query Execution Process", + "githuburl":"" + }, + { + "uri":"dws_04_0410.html", + "product_code":"dws", + "code":"203", + "des":"The SQL execution plan is a node tree, which displays detailed procedure when GaussDB(DWS) runs an SQL statement. A database operator indicates one step.You can run the E", + "doc_type":"devg", + "kw":"Overview of the SQL Execution Plan,Query Analysis,Developer Guide", + "title":"Overview of the SQL Execution Plan", + "githuburl":"" + }, + { + "uri":"dws_04_0411.html", + "product_code":"dws", + "code":"204", + "des":"As described in Overview of the SQL Execution Plan, EXPLAIN displays the execution plan, but will not actually run SQL statements. EXPLAIN ANALYZE and EXPLAIN PERFORMANCE", + "doc_type":"devg", + "kw":"Deep Dive on the SQL Execution Plan,Query Analysis,Developer Guide", + "title":"Deep Dive on the SQL Execution Plan", + "githuburl":"" + }, + { + "uri":"dws_04_0412.html", + "product_code":"dws", + "code":"205", + "des":"This section describes how to query SQL statements whose execution takes a long time, leading to poor system performance.After the query, query statements are returned as", + "doc_type":"devg", + "kw":"Querying SQL Statements That Affect Performance Most,Query Analysis,Developer Guide", + "title":"Querying SQL Statements That Affect Performance Most", + "githuburl":"" + }, + { + "uri":"dws_04_0413.html", + "product_code":"dws", + "code":"206", + "des":"During database running, query statements are blocked in some service scenarios and run for an excessively long time. In this case, you can forcibly terminate the faulty ", + "doc_type":"devg", + "kw":"Checking Blocked Statements,Query Analysis,Developer Guide", + "title":"Checking Blocked Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0430.html", + "product_code":"dws", + "code":"207", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Query Improvement", + "title":"Query Improvement", + "githuburl":"" + }, + { + "uri":"dws_04_0435.html", + "product_code":"dws", + "code":"208", + "des":"You can analyze slow SQL statements to optimize them.", + "doc_type":"devg", + "kw":"Optimization Process,Query Improvement,Developer Guide", + "title":"Optimization Process", + "githuburl":"" + }, + { + "uri":"dws_04_0436.html", + "product_code":"dws", + "code":"209", + "des":"In a database, statistics indicate the source data of a plan generated by a planner. If no collection statistics are available or out of date, the execution plan may seri", + "doc_type":"devg", + "kw":"Updating Statistics,Query Improvement,Developer Guide", + "title":"Updating Statistics", + "githuburl":"" + }, + { + "uri":"dws_04_0437.html", + "product_code":"dws", + "code":"210", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Reviewing and Modifying a Table Definition", + "title":"Reviewing and Modifying a Table Definition", + "githuburl":"" + }, + { + "uri":"dws_04_0438.html", + "product_code":"dws", + "code":"211", + "des":"In a distributed framework, data is distributed on DNs. Data on one or more DNs is stored on a physical storage device. To properly define a table, you must:Evenly distri", + "doc_type":"devg", + "kw":"Reviewing and Modifying a Table Definition,Reviewing and Modifying a Table Definition,Developer Guid", + "title":"Reviewing and Modifying a Table Definition", + "githuburl":"" + }, + { + "uri":"dws_04_0439.html", + "product_code":"dws", + "code":"212", + "des":"During database design, some key factors about table design will greatly affect the subsequent query performance of the database. Table design affects data storage as wel", + "doc_type":"devg", + "kw":"Selecting a Storage Model,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Selecting a Storage Model", + "githuburl":"" + }, + { + "uri":"dws_04_0440.html", + "product_code":"dws", + "code":"213", + "des":"In replication mode, full data in a table is copied to each DN in the cluster. This mode is used for tables containing a small volume of data. Full data in a table stored", + "doc_type":"devg", + "kw":"Selecting a Distribution Mode,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Selecting a Distribution Mode", + "githuburl":"" + }, + { + "uri":"dws_04_0441.html", + "product_code":"dws", + "code":"214", + "des":"The distribution column in a hash table must meet the following requirements, which are ranked by priority in descending order:The value of the distribution column should", + "doc_type":"devg", + "kw":"Selecting a Distribution Column,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Selecting a Distribution Column", + "githuburl":"" + }, + { + "uri":"dws_04_0442.html", + "product_code":"dws", + "code":"215", + "des":"Partial Cluster Key is the column-based technology. It can minimize or maximize sparse indexes to quickly filter base tables. Partial cluster key can specify multiple col", + "doc_type":"devg", + "kw":"Using Partial Clustering,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Using Partial Clustering", + "githuburl":"" + }, + { + "uri":"dws_04_0443.html", + "product_code":"dws", + "code":"216", + "des":"Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partit", + "doc_type":"devg", + "kw":"Using Partitioned Tables,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Using Partitioned Tables", + "githuburl":"" + }, + { + "uri":"dws_04_0444.html", + "product_code":"dws", + "code":"217", + "des":"Use the following principles to obtain efficient data types:Using the data type that can be efficiently executedGenerally, calculation of integers (including common compa", + "doc_type":"devg", + "kw":"Selecting a Data Type,Reviewing and Modifying a Table Definition,Developer Guide", + "title":"Selecting a Data Type", + "githuburl":"" + }, + { + "uri":"dws_04_0445.html", + "product_code":"dws", + "code":"218", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Typical SQL Optimization Methods", + "title":"Typical SQL Optimization Methods", + "githuburl":"" + }, + { + "uri":"dws_04_0446.html", + "product_code":"dws", + "code":"219", + "des":"Performance issues may occur when you query data or run the INSERT, DELETE, UPDATE, or CREATE TABLE AS statement. You can query the warning column in the GS_WLM_SESSION_S", + "doc_type":"devg", + "kw":"SQL Self-Diagnosis,Typical SQL Optimization Methods,Developer Guide", + "title":"SQL Self-Diagnosis", + "githuburl":"" + }, + { + "uri":"dws_04_0447.html", + "product_code":"dws", + "code":"220", + "des":"Currently, the GaussDB(DWS) optimizer can use three methods to develop statement execution policies in the distributed framework: generating a statement pushdown plan, a ", + "doc_type":"devg", + "kw":"Optimizing Statement Pushdown,Typical SQL Optimization Methods,Developer Guide", + "title":"Optimizing Statement Pushdown", + "githuburl":"" + }, + { + "uri":"dws_04_0448.html", + "product_code":"dws", + "code":"221", + "des":"When an application runs a SQL statement to operate the database, a large number of subqueries are used because they are more clear than table join. Especially in complic", + "doc_type":"devg", + "kw":"Optimizing Subqueries,Typical SQL Optimization Methods,Developer Guide", + "title":"Optimizing Subqueries", + "githuburl":"" + }, + { + "uri":"dws_04_0449.html", + "product_code":"dws", + "code":"222", + "des":"GaussDB(DWS) generates optimal execution plans based on the cost estimation. Optimizers need to estimate the number of data rows and the cost based on statistics collecte", + "doc_type":"devg", + "kw":"Optimizing Statistics,Typical SQL Optimization Methods,Developer Guide", + "title":"Optimizing Statistics", + "githuburl":"" + }, + { + "uri":"dws_04_0450.html", + "product_code":"dws", + "code":"223", + "des":"A query statement needs to go through multiple operator procedures to generate the final result. Sometimes, the overall query performance deteriorates due to long executi", + "doc_type":"devg", + "kw":"Optimizing Operators,Typical SQL Optimization Methods,Developer Guide", + "title":"Optimizing Operators", + "githuburl":"" + }, + { + "uri":"dws_04_0451.html", + "product_code":"dws", + "code":"224", + "des":"Data skew breaks the balance among nodes in the distributed MPP architecture. If the amount of data stored or processed by a node is much greater than that by other nodes", + "doc_type":"devg", + "kw":"Optimizing Data Skew,Typical SQL Optimization Methods,Developer Guide", + "title":"Optimizing Data Skew", + "githuburl":"" + }, + { + "uri":"dws_04_0452.html", + "product_code":"dws", + "code":"225", + "des":"Based on the database SQL execution mechanism and a large number of practices, summarize finds that: using rules of a certain SQL statement, on the basis of the so that t", + "doc_type":"devg", + "kw":"Experience in Rewriting SQL Statements,Query Improvement,Developer Guide", + "title":"Experience in Rewriting SQL Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0453.html", + "product_code":"dws", + "code":"226", + "des":"This section describes the key CN parameters that affect GaussDB(DWS) SQL tuning performance. For details about how to configure these parameters, see Configuring GUC Par", + "doc_type":"devg", + "kw":"Adjusting Key Parameters During SQL Tuning,Query Improvement,Developer Guide", + "title":"Adjusting Key Parameters During SQL Tuning", + "githuburl":"" + }, + { + "uri":"dws_04_0454.html", + "product_code":"dws", + "code":"227", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Hint-based Tuning", + "title":"Hint-based Tuning", + "githuburl":"" + }, + { + "uri":"dws_04_0455.html", + "product_code":"dws", + "code":"228", + "des":"In plan hints, you can specify a join order, join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution p", + "doc_type":"devg", + "kw":"Plan Hint Optimization,Hint-based Tuning,Developer Guide", + "title":"Plan Hint Optimization", + "githuburl":"" + }, + { + "uri":"dws_04_0456.html", + "product_code":"dws", + "code":"229", + "des":"Theses hints specify the join order and outer/inner tables.Specify only the join order.Specify the join order and outer/inner tables. The outer/inner tables are specified", + "doc_type":"devg", + "kw":"Join Order Hints,Hint-based Tuning,Developer Guide", + "title":"Join Order Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0457.html", + "product_code":"dws", + "code":"230", + "des":"Specifies the join method. It can be nested loop join, hash join, or merge join.no indicates that the specified hint will not be used for a join.table_list specifies the ", + "doc_type":"devg", + "kw":"Join Operation Hints,Hint-based Tuning,Developer Guide", + "title":"Join Operation Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0458.html", + "product_code":"dws", + "code":"231", + "des":"These hints specify the number of rows in an intermediate result set. Both absolute values and relative values are supported.#,+,-, and * are operators used for hinting t", + "doc_type":"devg", + "kw":"Rows Hints,Hint-based Tuning,Developer Guide", + "title":"Rows Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0459.html", + "product_code":"dws", + "code":"232", + "des":"These hints specify a stream operation, which can be broadcast or redistribute.no indicates that the specified hint will not be used for a join.table_list specifies the t", + "doc_type":"devg", + "kw":"Stream Operation Hints,Hint-based Tuning,Developer Guide", + "title":"Stream Operation Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0460.html", + "product_code":"dws", + "code":"233", + "des":"These hints specify a scan operation, which can be tablescan, indexscan, or indexonlyscan.no indicates that the specified hint will not be used for a join.table specifies", + "doc_type":"devg", + "kw":"Scan Operation Hints,Hint-based Tuning,Developer Guide", + "title":"Scan Operation Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0461.html", + "product_code":"dws", + "code":"234", + "des":"These hints specify the name of a sublink block.table indicates the name you have specified for a sublink block.This hint is used by an outer query only when a sublink is", + "doc_type":"devg", + "kw":"Sublink Name Hints,Hint-based Tuning,Developer Guide", + "title":"Sublink Name Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0462.html", + "product_code":"dws", + "code":"235", + "des":"Theses hints specify redistribution keys containing skew data and skew values, and are used to optimize redistribution involving Join or HashAgg.Specify single-table skew", + "doc_type":"devg", + "kw":"Skew Hints,Hint-based Tuning,Developer Guide", + "title":"Skew Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0463.html", + "product_code":"dws", + "code":"236", + "des":"A hint, or a GUC hint, specifies a configuration parameter value when a plan is generated. Currently, only the following parameters are supported:agg_redistribute_enhance", + "doc_type":"devg", + "kw":"Configuration Parameter Hints,Hint-based Tuning,Developer Guide", + "title":"Configuration Parameter Hints", + "githuburl":"" + }, + { + "uri":"dws_04_0464.html", + "product_code":"dws", + "code":"237", + "des":"Plan hints change an execution plan. You can run EXPLAIN to view the changes.Hints containing errors are invalid and do not affect statement execution. The errors will be", + "doc_type":"devg", + "kw":"Hint Errors, Conflicts, and Other Warnings,Hint-based Tuning,Developer Guide", + "title":"Hint Errors, Conflicts, and Other Warnings", + "githuburl":"" + }, + { + "uri":"dws_04_0465.html", + "product_code":"dws", + "code":"238", + "des":"This section takes the statements in TPC-DS (Q24) as an example to describe how to optimize an execution plan by using hints in 1000X+24DN environments. For example:The o", + "doc_type":"devg", + "kw":"Plan Hint Cases,Hint-based Tuning,Developer Guide", + "title":"Plan Hint Cases", + "githuburl":"" + }, + { + "uri":"dws_04_0466.html", + "product_code":"dws", + "code":"239", + "des":"To ensure proper database running, after INSERT and DELETE operations, you need to routinely do VACUUM FULL and ANALYZE as appropriate for customer scenarios and update s", + "doc_type":"devg", + "kw":"Routinely Maintaining Tables,Query Improvement,Developer Guide", + "title":"Routinely Maintaining Tables", + "githuburl":"" + }, + { + "uri":"dws_04_0467.html", + "product_code":"dws", + "code":"240", + "des":"When data deletion is repeatedly performed in the database, index keys will be deleted from the index page, resulting in index distention. Recreating an index routinely i", + "doc_type":"devg", + "kw":"Routinely Recreating an Index,Query Improvement,Developer Guide", + "title":"Routinely Recreating an Index", + "githuburl":"" + }, + { + "uri":"dws_04_0468.html", + "product_code":"dws", + "code":"241", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Configuring the SMP", + "title":"Configuring the SMP", + "githuburl":"" + }, + { + "uri":"dws_04_0469.html", + "product_code":"dws", + "code":"242", + "des":"The SMP feature improves the performance through operator parallelism and occupies more system resources, including CPU, memory, network, and I/O. Actually, SMP is a meth", + "doc_type":"devg", + "kw":"Application Scenarios and Restrictions,Configuring the SMP,Developer Guide", + "title":"Application Scenarios and Restrictions", + "githuburl":"" + }, + { + "uri":"dws_04_0470.html", + "product_code":"dws", + "code":"243", + "des":"The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, the resource consumption is added, including the CPU, memory, I/O, an", + "doc_type":"devg", + "kw":"Resource Impact on SMP Performance,Configuring the SMP,Developer Guide", + "title":"Resource Impact on SMP Performance", + "githuburl":"" + }, + { + "uri":"dws_04_0471.html", + "product_code":"dws", + "code":"244", + "des":"Besides resource factors, there are other factors that impact the SMP parallelism performance, such as unevenly data distributed in a partitioned table and system paralle", + "doc_type":"devg", + "kw":"Other Factors Affecting SMP Performance,Configuring the SMP,Developer Guide", + "title":"Other Factors Affecting SMP Performance", + "githuburl":"" + }, + { + "uri":"dws_04_0472.html", + "product_code":"dws", + "code":"245", + "des":"Starting from this version, SMP auto adaptation is enabled. For newly deployed clusters, the default value of query_dop is 0, and SMP parameters have been adjusted. To en", + "doc_type":"devg", + "kw":"Suggestions for SMP Parameter Settings,Configuring the SMP,Developer Guide", + "title":"Suggestions for SMP Parameter Settings", + "githuburl":"" + }, + { + "uri":"dws_04_0473.html", + "product_code":"dws", + "code":"246", + "des":"To manually optimize SMP, you need to be familiar with Suggestions for SMP Parameter Settings. This section describes how to optimize SMP.The CPU, memory, I/O, and networ", + "doc_type":"devg", + "kw":"SMP Manual Optimization Suggestions,Configuring the SMP,Developer Guide", + "title":"SMP Manual Optimization Suggestions", + "githuburl":"" + }, + { + "uri":"dws_04_0474.html", + "product_code":"dws", + "code":"247", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Optimization Cases", + "title":"Optimization Cases", + "githuburl":"" + }, + { + "uri":"dws_04_0475.html", + "product_code":"dws", + "code":"248", + "des":"Tables are defined as follows:The following query is executed:If a is the distribution column of t1 and t2:Then Streaming exists in the execution plan and the data volume", + "doc_type":"devg", + "kw":"Case: Selecting an Appropriate Distribution Column,Optimization Cases,Developer Guide", + "title":"Case: Selecting an Appropriate Distribution Column", + "githuburl":"" + }, + { + "uri":"dws_04_0476.html", + "product_code":"dws", + "code":"249", + "des":"Query the information about all personnel in the sales department.The original execution plan is as follows before creating the places.place_id and states.state_id indexe", + "doc_type":"devg", + "kw":"Case: Creating an Appropriate Index,Optimization Cases,Developer Guide", + "title":"Case: Creating an Appropriate Index", + "githuburl":"" + }, + { + "uri":"dws_04_0477.html", + "product_code":"dws", + "code":"250", + "des":"Figure 1 shows the execution plan.As shown in Figure 1, the sequential scan phase is time consuming.The JOIN performance is poor because a large number of null values exi", + "doc_type":"devg", + "kw":"Case: Adding NOT NULL for JOIN Columns,Optimization Cases,Developer Guide", + "title":"Case: Adding NOT NULL for JOIN Columns", + "githuburl":"" + }, + { + "uri":"dws_04_0478.html", + "product_code":"dws", + "code":"251", + "des":"In an execution plan, more than 95% of the execution time is spent on window agg performed on the CN. In this case, sum is performed for the two columns separately, and t", + "doc_type":"devg", + "kw":"Case: Pushing Down Sort Operations to DNs,Optimization Cases,Developer Guide", + "title":"Case: Pushing Down Sort Operations to DNs", + "githuburl":"" + }, + { + "uri":"dws_04_0479.html", + "product_code":"dws", + "code":"252", + "des":"If bit0 of cost_param is set to 1, an improved mechanism is used for estimating the selection rate of non-equi-joins. This method is more accurate for estimating the sele", + "doc_type":"devg", + "kw":"Case: Configuring cost_param for Better Query Performance,Optimization Cases,Developer Guide", + "title":"Case: Configuring cost_param for Better Query Performance", + "githuburl":"" + }, + { + "uri":"dws_04_0480.html", + "product_code":"dws", + "code":"253", + "des":"During a site test, the information is displayed after EXPLAIN ANALYZE is executed:According to the execution information, HashJoin becomes the performance bottleneck of ", + "doc_type":"devg", + "kw":"Case: Adjusting the Distribution Key,Optimization Cases,Developer Guide", + "title":"Case: Adjusting the Distribution Key", + "githuburl":"" + }, + { + "uri":"dws_04_0481.html", + "product_code":"dws", + "code":"254", + "des":"Information on the EXPLAIN PERFORMANCE at a site is as follows: As shown in the red boxes, two performance bottlenecks are scan operations in a table.After further analys", + "doc_type":"devg", + "kw":"Case: Adjusting the Partial Clustering Key,Optimization Cases,Developer Guide", + "title":"Case: Adjusting the Partial Clustering Key", + "githuburl":"" + }, + { + "uri":"dws_04_0482.html", + "product_code":"dws", + "code":"255", + "des":"In the GaussDB(DWS) database, row-store tables use the row execution engine, and column-store tables use the column execution engine. If both row-store table and column-s", + "doc_type":"devg", + "kw":"Case: Adjusting the Table Storage Mode in a Medium Table,Optimization Cases,Developer Guide", + "title":"Case: Adjusting the Table Storage Mode in a Medium Table", + "githuburl":"" + }, + { + "uri":"dws_04_0483.html", + "product_code":"dws", + "code":"256", + "des":"During the test at a site, if the following execution plan is performed, the customer expects that the performance can be improved and the result can be returned within 3", + "doc_type":"devg", + "kw":"Case: Adjusting the Local Clustering Column,Optimization Cases,Developer Guide", + "title":"Case: Adjusting the Local Clustering Column", + "githuburl":"" + }, + { + "uri":"dws_04_0484.html", + "product_code":"dws", + "code":"257", + "des":"In the following simple SQL statements, the performance bottlenecks exist in the scan operation of dwcjk.Obviously, there are date features in the cjrq field of table dat", + "doc_type":"devg", + "kw":"Case: Reconstructing Partition Tables,Optimization Cases,Developer Guide", + "title":"Case: Reconstructing Partition Tables", + "githuburl":"" + }, + { + "uri":"dws_04_0485.html", + "product_code":"dws", + "code":"258", + "des":"The t1 table is defined as follows:Assume that the distribution column of the result set provided by the agg lower-layer operator is setA, and the group by column of the ", + "doc_type":"devg", + "kw":"Case: Adjusting the GUC Parameter best_agg_plan,Optimization Cases,Developer Guide", + "title":"Case: Adjusting the GUC Parameter best_agg_plan", + "githuburl":"" + }, + { + "uri":"dws_04_0486.html", + "product_code":"dws", + "code":"259", + "des":"This SQL performance is poor. SubPlan exists in the execution plan as follows:The core of this optimization is to eliminate subqueries. Based on the service scenario anal", + "doc_type":"devg", + "kw":"Case: Rewriting SQL and Deleting Subqueries (Case 1),Optimization Cases,Developer Guide", + "title":"Case: Rewriting SQL and Deleting Subqueries (Case 1)", + "githuburl":"" + }, + { + "uri":"dws_04_0487.html", + "product_code":"dws", + "code":"260", + "des":"On a site, the customer gave the feedback saying that the execution time of the following SQL statements lasted over one day and did not end:The corresponding execution p", + "doc_type":"devg", + "kw":"Case: Rewriting SQL and Deleting Subqueries (Case 2),Optimization Cases,Developer Guide", + "title":"Case: Rewriting SQL and Deleting Subqueries (Case 2)", + "githuburl":"" + }, + { + "uri":"dws_04_0488.html", + "product_code":"dws", + "code":"261", + "des":"In a test at a site, ddw_f10_op_cust_asset_mon is a partitioned table and the partition key is year_mth whose value is a combined string of month and year values.The foll", + "doc_type":"devg", + "kw":"Case: Rewriting SQL Statements and Eliminating Prune Interference,Optimization Cases,Developer Guide", + "title":"Case: Rewriting SQL Statements and Eliminating Prune Interference", + "githuburl":"" + }, + { + "uri":"dws_04_0489.html", + "product_code":"dws", + "code":"262", + "des":"in-clause/any-clause is a common SQL statement constraint. Sometimes, the clause following in or any is a constant. For example:orSome special usages are as follows:Where", + "doc_type":"devg", + "kw":"Case: Rewriting SQL Statements and Deleting in-clause,Optimization Cases,Developer Guide", + "title":"Case: Rewriting SQL Statements and Deleting in-clause", + "githuburl":"" + }, + { + "uri":"dws_04_0490.html", + "product_code":"dws", + "code":"263", + "des":"You can add PARTIAL CLUSTER KEY(column_name[,...]) to the definition of a column-store table to set one or more columns of this table as partial cluster keys. In this way", + "doc_type":"devg", + "kw":"Case: Setting Partial Cluster Keys,Optimization Cases,Developer Guide", + "title":"Case: Setting Partial Cluster Keys", + "githuburl":"" + }, + { + "uri":"dws_04_0491.html", + "product_code":"dws", + "code":"264", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"SQL Execution Troubleshooting", + "title":"SQL Execution Troubleshooting", + "githuburl":"" + }, + { + "uri":"dws_04_0492.html", + "product_code":"dws", + "code":"265", + "des":"A query task that used to take a few milliseconds to complete is now requiring several seconds, and that used to take several seconds is now requiring even half an hour. ", + "doc_type":"devg", + "kw":"Low Query Efficiency,SQL Execution Troubleshooting,Developer Guide", + "title":"Low Query Efficiency", + "githuburl":"" + }, + { + "uri":"dws_04_0494.html", + "product_code":"dws", + "code":"266", + "des":"DROP TABLE fails to be executed in the following scenarios:A user runs the \\dt+ command using gsql and finds that the table_name table does not exist. When the user runs ", + "doc_type":"devg", + "kw":"DROP TABLE Fails to Be Executed,SQL Execution Troubleshooting,Developer Guide", + "title":"DROP TABLE Fails to Be Executed", + "githuburl":"" + }, + { + "uri":"dws_04_0495.html", + "product_code":"dws", + "code":"267", + "des":"Two users log in to the same database human_resource and run the select count(*) from areas statement separately to query the areas table, but obtain different results.Ch", + "doc_type":"devg", + "kw":"Different Data Is Displayed for the Same Table Queried By Multiple Users,SQL Execution Troubleshooti", + "title":"Different Data Is Displayed for the Same Table Queried By Multiple Users", + "githuburl":"" + }, + { + "uri":"dws_04_0496.html", + "product_code":"dws", + "code":"268", + "des":"The following error is reported during the integer conversion:Some data types cannot be converted to the target data type.Gradually narrow down the range of SQL statement", + "doc_type":"devg", + "kw":"An Error Occurs During the Integer Conversion,SQL Execution Troubleshooting,Developer Guide", + "title":"An Error Occurs During the Integer Conversion", + "githuburl":"" + }, + { + "uri":"dws_04_0497.html", + "product_code":"dws", + "code":"269", + "des":"With automatic retry (referred to as CN retry), GaussDB(DWS) retries an SQL statement when the execution of this statement fails. If an SQL statement sent from the gsql c", + "doc_type":"devg", + "kw":"Automatic Retry upon SQL Statement Execution Errors,SQL Execution Troubleshooting,Developer Guide", + "title":"Automatic Retry upon SQL Statement Execution Errors", + "githuburl":"" + }, + { + "uri":"dws_04_0970.html", + "product_code":"dws", + "code":"270", + "des":"To improve the cluster performance, you can use multiple methods to optimize the database, including hardware configuration, software driver upgrade, and internal paramet", + "doc_type":"devg", + "kw":"Common Performance Parameter Optimization Design,Query Performance Optimization,Developer Guide", + "title":"Common Performance Parameter Optimization Design", + "githuburl":"" + }, + { + "uri":"dws_04_0507.html", + "product_code":"dws", + "code":"271", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"User-Defined Functions", + "title":"User-Defined Functions", + "githuburl":"" + }, + { + "uri":"dws_04_0509.html", + "product_code":"dws", + "code":"272", + "des":"With the GaussDB(DWS) PL/Java functions, you can choose your favorite Java IDE to write Java methods and install the JAR files containing these methods into the GaussDB(D", + "doc_type":"devg", + "kw":"PL/Java Functions,User-Defined Functions,Developer Guide", + "title":"PL/Java Functions", + "githuburl":"" + }, + { + "uri":"dws_04_0511.html", + "product_code":"dws", + "code":"273", + "des":"PL/pgSQL is similar to PL/SQL of Oracle. It is a loadable procedural language.The functions created using PL/pgSQL can be used in any place where you can use built-in fun", + "doc_type":"devg", + "kw":"PL/pgSQL Functions,User-Defined Functions,Developer Guide", + "title":"PL/pgSQL Functions", + "githuburl":"" + }, + { + "uri":"dws_04_0512.html", + "product_code":"dws", + "code":"274", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Stored Procedures", + "title":"Stored Procedures", + "githuburl":"" + }, + { + "uri":"dws_04_0513.html", + "product_code":"dws", + "code":"275", + "des":"In GaussDB(DWS), business rules and logics are saved as stored procedures.A stored procedure is a combination of SQL, PL/SQL, and Java statements, enabling business rule ", + "doc_type":"devg", + "kw":"Stored Procedure,Stored Procedures,Developer Guide", + "title":"Stored Procedure", + "githuburl":"" + }, + { + "uri":"dws_04_0514.html", + "product_code":"dws", + "code":"276", + "des":"A data type refers to a value set and an operation set defined on the value set. A GaussDB(DWS) database consists of tables, each of which is defined by its own columns. ", + "doc_type":"devg", + "kw":"Data Types,Stored Procedures,Developer Guide", + "title":"Data Types", + "githuburl":"" + }, + { + "uri":"dws_04_0515.html", + "product_code":"dws", + "code":"277", + "des":"Certain data types in the database support implicit data type conversions, such as assignments and parameters invoked by functions. For other data types, you can use the ", + "doc_type":"devg", + "kw":"Data Type Conversion,Stored Procedures,Developer Guide", + "title":"Data Type Conversion", + "githuburl":"" + }, + { + "uri":"dws_04_0516.html", + "product_code":"dws", + "code":"278", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Arrays and Records", + "title":"Arrays and Records", + "githuburl":"" + }, + { + "uri":"dws_04_0517.html", + "product_code":"dws", + "code":"279", + "des":"Before the use of arrays, an array type needs to be defined:Define an array type immediately after the AS keyword in a stored procedure. Run the following statement:TYPE ", + "doc_type":"devg", + "kw":"Arrays,Arrays and Records,Developer Guide", + "title":"Arrays", + "githuburl":"" + }, + { + "uri":"dws_04_0518.html", + "product_code":"dws", + "code":"280", + "des":"Perform the following operations to create a record variable:Define a record type and use this type to declare a variable.For the syntax of the record type, see Figure 1.", + "doc_type":"devg", + "kw":"record,Arrays and Records,Developer Guide", + "title":"record", + "githuburl":"" + }, + { + "uri":"dws_04_0519.html", + "product_code":"dws", + "code":"281", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Syntax", + "title":"Syntax", + "githuburl":"" + }, + { + "uri":"dws_04_0520.html", + "product_code":"dws", + "code":"282", + "des":"A PL/SQL block can contain a sub-block which can be placed in any section. The following describes the architecture of a PL/SQL block:DECLARE: declares variables, types, ", + "doc_type":"devg", + "kw":"Basic Structure,Syntax,Developer Guide", + "title":"Basic Structure", + "githuburl":"" + }, + { + "uri":"dws_04_0521.html", + "product_code":"dws", + "code":"283", + "des":"An anonymous block applies to a script infrequently executed or a one-off activity. An anonymous block is executed in a session and is not stored.Figure 1 shows the synta", + "doc_type":"devg", + "kw":"Anonymous Block,Syntax,Developer Guide", + "title":"Anonymous Block", + "githuburl":"" + }, + { + "uri":"dws_04_0522.html", + "product_code":"dws", + "code":"284", + "des":"A subprogram stores stored procedures, functions, operators, and advanced packages. A subprogram created in a database can be called by other programs.", + "doc_type":"devg", + "kw":"Subprogram,Syntax,Developer Guide", + "title":"Subprogram", + "githuburl":"" + }, + { + "uri":"dws_04_0523.html", + "product_code":"dws", + "code":"285", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Basic Statements", + "title":"Basic Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0524.html", + "product_code":"dws", + "code":"286", + "des":"This section describes the declaration of variables in the PL/SQL and the scope of this variable in codes.For details about the variable declaration syntax, see Figure 1.", + "doc_type":"devg", + "kw":"Variable Definition Statement,Basic Statements,Developer Guide", + "title":"Variable Definition Statement", + "githuburl":"" + }, + { + "uri":"dws_04_0525.html", + "product_code":"dws", + "code":"287", + "des":"Figure 1 shows the syntax diagram for assigning a value to a variable.The above syntax diagram is explained as follows:variable_name indicates the name of a variable.valu", + "doc_type":"devg", + "kw":"Assignment Statement,Basic Statements,Developer Guide", + "title":"Assignment Statement", + "githuburl":"" + }, + { + "uri":"dws_04_0526.html", + "product_code":"dws", + "code":"288", + "des":"Figure 1 shows the syntax diagram for calling a clause.The above syntax diagram is explained as follows:procedure_name specifies the name of a stored procedure.parameter ", + "doc_type":"devg", + "kw":"Call Statement,Basic Statements,Developer Guide", + "title":"Call Statement", + "githuburl":"" + }, + { + "uri":"dws_04_0527.html", + "product_code":"dws", + "code":"289", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Dynamic Statements", + "title":"Dynamic Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0528.html", + "product_code":"dws", + "code":"290", + "des":"You can perform dynamic queries using EXECUTE IMMEDIATE or OPEN FOR in GaussDB(DWS). EXECUTE IMMEDIATE dynamically executes SELECT statements and OPEN FOR combines use of", + "doc_type":"devg", + "kw":"Executing Dynamic Query Statements,Dynamic Statements,Developer Guide", + "title":"Executing Dynamic Query Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0529.html", + "product_code":"dws", + "code":"291", + "des":"Figure 1 shows the syntax diagram.Figure 2 shows the syntax diagram for using_clause.The above syntax diagram is explained as follows:USING IN bind_argument is used to sp", + "doc_type":"devg", + "kw":"Executing Dynamic Non-query Statements,Dynamic Statements,Developer Guide", + "title":"Executing Dynamic Non-query Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0530.html", + "product_code":"dws", + "code":"292", + "des":"This section describes how to dynamically call store procedures. You must use anonymous statement blocks to package stored procedures or statement blocks and append IN an", + "doc_type":"devg", + "kw":"Dynamically Calling Stored Procedures,Dynamic Statements,Developer Guide", + "title":"Dynamically Calling Stored Procedures", + "githuburl":"" + }, + { + "uri":"dws_04_0531.html", + "product_code":"dws", + "code":"293", + "des":"This section describes how to execute anonymous blocks in dynamic statements. Append IN and OUT behind the EXECUTE IMMEDIATE...USING statement to input and output paramet", + "doc_type":"devg", + "kw":"Dynamically Calling Anonymous Blocks,Dynamic Statements,Developer Guide", + "title":"Dynamically Calling Anonymous Blocks", + "githuburl":"" + }, + { + "uri":"dws_04_0532.html", + "product_code":"dws", + "code":"294", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Control Statements", + "title":"Control Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0533.html", + "product_code":"dws", + "code":"295", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"RETURN Statements", + "title":"RETURN Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0534.html", + "product_code":"dws", + "code":"296", + "des":"Figure 1 shows the syntax diagram for a return statement.The syntax details are as follows:This statement returns control from a stored procedure or function to a caller.", + "doc_type":"devg", + "kw":"RETURN,RETURN Statements,Developer Guide", + "title":"RETURN", + "githuburl":"" + }, + { + "uri":"dws_04_0535.html", + "product_code":"dws", + "code":"297", + "des":"When creating a function, specify SETOF datatype for the return values.return_next_clause::=return_query_clause::=The syntax details are as follows:If a function needs to", + "doc_type":"devg", + "kw":"RETURN NEXT and RETURN QUERY,RETURN Statements,Developer Guide", + "title":"RETURN NEXT and RETURN QUERY", + "githuburl":"" + }, + { + "uri":"dws_04_0536.html", + "product_code":"dws", + "code":"298", + "des":"Conditional statements are used to decide whether given conditions are met. Operations are executed based on the decisions made.GaussDB(DWS) supports five usages of IF:IF", + "doc_type":"devg", + "kw":"Conditional Statements,Control Statements,Developer Guide", + "title":"Conditional Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0537.html", + "product_code":"dws", + "code":"299", + "des":"The syntax diagram is as follows.Example:The loop must be exploited together with EXIT; otherwise, a dead loop occurs.The syntax diagram is as follows.If the conditional ", + "doc_type":"devg", + "kw":"Loop Statements,Control Statements,Developer Guide", + "title":"Loop Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0538.html", + "product_code":"dws", + "code":"300", + "des":"Figure 1 shows the syntax diagram.Figure 2 shows the syntax diagram for when_clause.Parameter description:case_expression: specifies the variable or expression.when_expre", + "doc_type":"devg", + "kw":"Branch Statements,Control Statements,Developer Guide", + "title":"Branch Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0539.html", + "product_code":"dws", + "code":"301", + "des":"In PL/SQL programs, NULL statements are used to indicate \"nothing should be done\", equal to placeholders. They grant meanings to some statements and improve program reada", + "doc_type":"devg", + "kw":"NULL Statements,Control Statements,Developer Guide", + "title":"NULL Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0540.html", + "product_code":"dws", + "code":"302", + "des":"By default, any error occurring in a PL/SQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and restore ", + "doc_type":"devg", + "kw":"Error Trapping Statements,Control Statements,Developer Guide", + "title":"Error Trapping Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0541.html", + "product_code":"dws", + "code":"303", + "des":"The GOTO statement unconditionally transfers the control from the current statement to a labeled statement. The GOTO statement changes the execution logic. Therefore, use", + "doc_type":"devg", + "kw":"GOTO Statements,Control Statements,Developer Guide", + "title":"GOTO Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0542.html", + "product_code":"dws", + "code":"304", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Other Statements", + "title":"Other Statements", + "githuburl":"" + }, + { + "uri":"dws_04_0543.html", + "product_code":"dws", + "code":"305", + "des":"GaussDB(DWS) provides multiple lock modes to control concurrent accesses to table data. These modes are used when Multi-Version Concurrency Control (MVCC) cannot give exp", + "doc_type":"devg", + "kw":"Lock Operations,Other Statements,Developer Guide", + "title":"Lock Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0544.html", + "product_code":"dws", + "code":"306", + "des":"GaussDB(DWS) provides cursors as a data buffer for users to store execution results of SQL statements. Each cursor region has a name. Users can use SQL statements to obta", + "doc_type":"devg", + "kw":"Cursor Operations,Other Statements,Developer Guide", + "title":"Cursor Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0545.html", + "product_code":"dws", + "code":"307", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Cursors", + "title":"Cursors", + "githuburl":"" + }, + { + "uri":"dws_04_0546.html", + "product_code":"dws", + "code":"308", + "des":"To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context areas. With curs", + "doc_type":"devg", + "kw":"Overview,Cursors,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_04_0547.html", + "product_code":"dws", + "code":"309", + "des":"An explicit cursor is used to process query statements, particularly when the query results contain multiple records.An explicit cursor performs the following six PL/SQL ", + "doc_type":"devg", + "kw":"Explicit Cursor,Cursors,Developer Guide", + "title":"Explicit Cursor", + "githuburl":"" + }, + { + "uri":"dws_04_0548.html", + "product_code":"dws", + "code":"310", + "des":"The system automatically sets implicit cursors for non-query statements, such as ALTER and DROP, and creates work areas for these statements. These implicit cursors are n", + "doc_type":"devg", + "kw":"Implicit Cursor,Cursors,Developer Guide", + "title":"Implicit Cursor", + "githuburl":"" + }, + { + "uri":"dws_04_0549.html", + "product_code":"dws", + "code":"311", + "des":"The use of cursors in WHILE and LOOP statements is called a cursor loop. Generally, OPEN, FETCH, and CLOSE statements are needed in cursor loop. The following describes a", + "doc_type":"devg", + "kw":"Cursor Loop,Cursors,Developer Guide", + "title":"Cursor Loop", + "githuburl":"" + }, + { + "uri":"dws_04_0550.html", + "product_code":"dws", + "code":"312", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Advanced Packages", + "title":"Advanced Packages", + "githuburl":"" + }, + { + "uri":"dws_04_0551.html", + "product_code":"dws", + "code":"313", + "des":"Table 1 provides all interfaces supported by the DBMS_LOB package.DBMS_LOB.GETLENGTHSpecifies the length of a LOB type object obtained and returned by the stored procedur", + "doc_type":"devg", + "kw":"DBMS_LOB,Advanced Packages,Developer Guide", + "title":"DBMS_LOB", + "githuburl":"" + }, + { + "uri":"dws_04_0552.html", + "product_code":"dws", + "code":"314", + "des":"Table 1 provides all interfaces supported by the DBMS_RANDOM package.DBMS_RANDOM.SEEDThe stored procedure SEED is used to set a seed for a random number. The DBMS_RANDOM.", + "doc_type":"devg", + "kw":"DBMS_RANDOM,Advanced Packages,Developer Guide", + "title":"DBMS_RANDOM", + "githuburl":"" + }, + { + "uri":"dws_04_0553.html", + "product_code":"dws", + "code":"315", + "des":"Table 1 provides all interfaces supported by the DBMS_OUTPUT package.DBMS_OUTPUT.PUT_LINEThe PUT_LINE procedure writes a row of text carrying a line end symbol in the buf", + "doc_type":"devg", + "kw":"DBMS_OUTPUT,Advanced Packages,Developer Guide", + "title":"DBMS_OUTPUT", + "githuburl":"" + }, + { + "uri":"dws_04_0554.html", + "product_code":"dws", + "code":"316", + "des":"Table 1 provides all interfaces supported by the UTL_RAW package.The external representation of the RAW type data is hexadecimal and its internal storage form is binary. ", + "doc_type":"devg", + "kw":"UTL_RAW,Advanced Packages,Developer Guide", + "title":"UTL_RAW", + "githuburl":"" + }, + { + "uri":"dws_04_0555.html", + "product_code":"dws", + "code":"317", + "des":"Table 1 lists all interfaces supported by the DBMS_JOB package.DBMS_JOB.SUBMITThe stored procedure SUBMIT submits a job provided by the system.A prototype of the DBMS_JOB", + "doc_type":"devg", + "kw":"DBMS_JOB,Advanced Packages,Developer Guide", + "title":"DBMS_JOB", + "githuburl":"" + }, + { + "uri":"dws_04_0556.html", + "product_code":"dws", + "code":"318", + "des":"Table 1 lists interfaces supported by the DBMS_SQL package.You are advised to use dbms_sql.define_column and dbms_sql.column_value to define columns.If the size of the re", + "doc_type":"devg", + "kw":"DBMS_SQL,Advanced Packages,Developer Guide", + "title":"DBMS_SQL", + "githuburl":"" + }, + { + "uri":"dws_04_0558.html", + "product_code":"dws", + "code":"319", + "des":"RAISE has the following five syntax formats:Parameter description:The level option is used to specify the error level, that is, DEBUG, LOG, INFO, NOTICE, WARNING, or EXCE", + "doc_type":"devg", + "kw":"Debugging,Stored Procedures,Developer Guide", + "title":"Debugging", + "githuburl":"" + }, + { + "uri":"dws_04_0559.html", + "product_code":"dws", + "code":"320", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"System Catalogs and System Views", + "title":"System Catalogs and System Views", + "githuburl":"" + }, + { + "uri":"dws_04_0560.html", + "product_code":"dws", + "code":"321", + "des":"System catalogs are used by GaussDB(DWS) to store structure metadata. They are a core component the GaussDB(DWS) database system and provide control information for the d", + "doc_type":"devg", + "kw":"Overview of System Catalogs and System Views,System Catalogs and System Views,Developer Guide", + "title":"Overview of System Catalogs and System Views", + "githuburl":"" + }, + { + "uri":"dws_04_0561.html", + "product_code":"dws", + "code":"322", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"System Catalogs", + "title":"System Catalogs", + "githuburl":"" + }, + { + "uri":"dws_04_0562.html", + "product_code":"dws", + "code":"323", + "des":"GS_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table", + "doc_type":"devg", + "kw":"GS_OBSSCANINFO,System Catalogs,Developer Guide", + "title":"GS_OBSSCANINFO", + "githuburl":"" + }, + { + "uri":"dws_04_0564.html", + "product_code":"dws", + "code":"324", + "des":"The GS_WLM_INSTANCE_HISTORY system catalog stores information about resource usage related to CN or DN instances. Each record in the system table indicates the resource u", + "doc_type":"devg", + "kw":"GS_WLM_INSTANCE_HISTORY,System Catalogs,Developer Guide", + "title":"GS_WLM_INSTANCE_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0565.html", + "product_code":"dws", + "code":"325", + "des":"GS_WLM_OPERATOR_INFO records operators of completed jobs. The data is dumped from the kernel to a system catalog.This system catalog's schema is dbms_om.This system catal", + "doc_type":"devg", + "kw":"GS_WLM_OPERATOR_INFO,System Catalogs,Developer Guide", + "title":"GS_WLM_OPERATOR_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0566.html", + "product_code":"dws", + "code":"326", + "des":"GS_WLM_SESSION_INFO records load management information about a completed job executed on all CNs. The data is dumped from the kernel to a system catalog.This system cata", + "doc_type":"devg", + "kw":"GS_WLM_SESSION_INFO,System Catalogs,Developer Guide", + "title":"GS_WLM_SESSION_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0567.html", + "product_code":"dws", + "code":"327", + "des":"The GS_WLM_USER_RESOURCE_HISTORY system table stores information about resources used by users and is valid only on CNs. Each record in the system table indicates the res", + "doc_type":"devg", + "kw":"GS_WLM_USER_RESOURCE_HISTORY,System Catalogs,Developer Guide", + "title":"GS_WLM_USER_RESOURCE_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0568.html", + "product_code":"dws", + "code":"328", + "des":"pg_aggregate records information about aggregation functions. Each entry in pg_aggregate is an extension of an entry in pg_proc. The pg_proc entry carries the aggregate's", + "doc_type":"devg", + "kw":"PG_AGGREGATE,System Catalogs,Developer Guide", + "title":"PG_AGGREGATE", + "githuburl":"" + }, + { + "uri":"dws_04_0569.html", + "product_code":"dws", + "code":"329", + "des":"PG_AM records information about index access methods. There is one row for each index access method supported by the system.", + "doc_type":"devg", + "kw":"PG_AM,System Catalogs,Developer Guide", + "title":"PG_AM", + "githuburl":"" + }, + { + "uri":"dws_04_0570.html", + "product_code":"dws", + "code":"330", + "des":"PG_AMOP records information about operators associated with access method operator families. There is one row for each operator that is a member of an operator family. A ", + "doc_type":"devg", + "kw":"PG_AMOP,System Catalogs,Developer Guide", + "title":"PG_AMOP", + "githuburl":"" + }, + { + "uri":"dws_04_0571.html", + "product_code":"dws", + "code":"331", + "des":"PG_AMPROC records information about the support procedures associated with the access method operator families. There is one row for each support procedure belonging to a", + "doc_type":"devg", + "kw":"PG_AMPROC,System Catalogs,Developer Guide", + "title":"PG_AMPROC", + "githuburl":"" + }, + { + "uri":"dws_04_0572.html", + "product_code":"dws", + "code":"332", + "des":"PG_ATTRDEF stores default values of columns.", + "doc_type":"devg", + "kw":"PG_ATTRDEF,System Catalogs,Developer Guide", + "title":"PG_ATTRDEF", + "githuburl":"" + }, + { + "uri":"dws_04_0573.html", + "product_code":"dws", + "code":"333", + "des":"PG_ATTRIBUTE records information about table columns.", + "doc_type":"devg", + "kw":"PG_ATTRIBUTE,System Catalogs,Developer Guide", + "title":"PG_ATTRIBUTE", + "githuburl":"" + }, + { + "uri":"dws_04_0574.html", + "product_code":"dws", + "code":"334", + "des":"PG_AUTHID records information about the database authentication identifiers (roles). The concept of users is contained in that of roles. A user is actually a role whose r", + "doc_type":"devg", + "kw":"PG_AUTHID,System Catalogs,Developer Guide", + "title":"PG_AUTHID", + "githuburl":"" + }, + { + "uri":"dws_04_0575.html", + "product_code":"dws", + "code":"335", + "des":"PG_AUTH_HISTORY records the authentication history of the role. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"PG_AUTH_HISTORY,System Catalogs,Developer Guide", + "title":"PG_AUTH_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0576.html", + "product_code":"dws", + "code":"336", + "des":"PG_AUTH_MEMBERS records the membership relations between roles.", + "doc_type":"devg", + "kw":"PG_AUTH_MEMBERS,System Catalogs,Developer Guide", + "title":"PG_AUTH_MEMBERS", + "githuburl":"" + }, + { + "uri":"dws_04_0577.html", + "product_code":"dws", + "code":"337", + "des":"PG_CAST records conversion relationships between data types.", + "doc_type":"devg", + "kw":"PG_CAST,System Catalogs,Developer Guide", + "title":"PG_CAST", + "githuburl":"" + }, + { + "uri":"dws_04_0578.html", + "product_code":"dws", + "code":"338", + "des":"PG_CLASS records database objects and their relations.View the OID and relfilenode of a table.Count row-store tables.Count column-store tables.", + "doc_type":"devg", + "kw":"PG_CLASS,System Catalogs,Developer Guide", + "title":"PG_CLASS", + "githuburl":"" + }, + { + "uri":"dws_04_0579.html", + "product_code":"dws", + "code":"339", + "des":"PG_COLLATION records the available collations, which are essentially mappings from an SQL name to operating system locale categories.", + "doc_type":"devg", + "kw":"PG_COLLATION,System Catalogs,Developer Guide", + "title":"PG_COLLATION", + "githuburl":"" + }, + { + "uri":"dws_04_0580.html", + "product_code":"dws", + "code":"340", + "des":"PG_CONSTRAINT records check, primary key, unique, and foreign key constraints on the tables.consrc is not updated when referenced objects change; for example, it will not", + "doc_type":"devg", + "kw":"PG_CONSTRAINT,System Catalogs,Developer Guide", + "title":"PG_CONSTRAINT", + "githuburl":"" + }, + { + "uri":"dws_04_0581.html", + "product_code":"dws", + "code":"341", + "des":"PG_CONVERSION records encoding conversion information.", + "doc_type":"devg", + "kw":"PG_CONVERSION,System Catalogs,Developer Guide", + "title":"PG_CONVERSION", + "githuburl":"" + }, + { + "uri":"dws_04_0582.html", + "product_code":"dws", + "code":"342", + "des":"PG_DATABASE records information about the available databases.", + "doc_type":"devg", + "kw":"PG_DATABASE,System Catalogs,Developer Guide", + "title":"PG_DATABASE", + "githuburl":"" + }, + { + "uri":"dws_04_0583.html", + "product_code":"dws", + "code":"343", + "des":"PG_DB_ROLE_SETTING records the default values of configuration items bonded to each role and database when the database is running.", + "doc_type":"devg", + "kw":"PG_DB_ROLE_SETTING,System Catalogs,Developer Guide", + "title":"PG_DB_ROLE_SETTING", + "githuburl":"" + }, + { + "uri":"dws_04_0584.html", + "product_code":"dws", + "code":"344", + "des":"PG_DEFAULT_ACL records the initial privileges assigned to the newly created objects.Run the following command to view the initial permissions of the new user role1:You ca", + "doc_type":"devg", + "kw":"PG_DEFAULT_ACL,System Catalogs,Developer Guide", + "title":"PG_DEFAULT_ACL", + "githuburl":"" + }, + { + "uri":"dws_04_0585.html", + "product_code":"dws", + "code":"345", + "des":"PG_DEPEND records the dependency relationships between database objects. This information allows DROP commands to find which other objects must be dropped by DROP CASCADE", + "doc_type":"devg", + "kw":"PG_DEPEND,System Catalogs,Developer Guide", + "title":"PG_DEPEND", + "githuburl":"" + }, + { + "uri":"dws_04_0586.html", + "product_code":"dws", + "code":"346", + "des":"PG_DESCRIPTION records optional descriptions (comments) for each database object. Descriptions of many built-in system objects are provided in the initial contents of PG_", + "doc_type":"devg", + "kw":"PG_DESCRIPTION,System Catalogs,Developer Guide", + "title":"PG_DESCRIPTION", + "githuburl":"" + }, + { + "uri":"dws_04_0588.html", + "product_code":"dws", + "code":"347", + "des":"PG_ENUM records entries showing the values and labels for each enum type. The internal representation of a given enum value is actually the OID of its associated row in p", + "doc_type":"devg", + "kw":"PG_ENUM,System Catalogs,Developer Guide", + "title":"PG_ENUM", + "githuburl":"" + }, + { + "uri":"dws_04_0589.html", + "product_code":"dws", + "code":"348", + "des":"PG_EXTENSION records information about the installed extensions. By default, GaussDB(DWS) has 12 extensions, that is, PLPGSQL, DIST_FDW, FILE_FDW, HDFS_FDW, HSTORE, PLDBG", + "doc_type":"devg", + "kw":"PG_EXTENSION,System Catalogs,Developer Guide", + "title":"PG_EXTENSION", + "githuburl":"" + }, + { + "uri":"dws_04_0590.html", + "product_code":"dws", + "code":"349", + "des":"PG_EXTENSION_DATA_SOURCE records information about external data source. An external data source contains information about an external database, such as its password enc", + "doc_type":"devg", + "kw":"PG_EXTENSION_DATA_SOURCE,System Catalogs,Developer Guide", + "title":"PG_EXTENSION_DATA_SOURCE", + "githuburl":"" + }, + { + "uri":"dws_04_0591.html", + "product_code":"dws", + "code":"350", + "des":"PG_FOREIGN_DATA_WRAPPER records foreign-data wrapper definitions. A foreign-data wrapper is the mechanism by which external data, residing on foreign servers, is accessed", + "doc_type":"devg", + "kw":"PG_FOREIGN_DATA_WRAPPER,System Catalogs,Developer Guide", + "title":"PG_FOREIGN_DATA_WRAPPER", + "githuburl":"" + }, + { + "uri":"dws_04_0592.html", + "product_code":"dws", + "code":"351", + "des":"PG_FOREIGN_SERVER records the foreign server definitions. A foreign server describes a source of external data, such as a remote server. Foreign servers are accessed via ", + "doc_type":"devg", + "kw":"PG_FOREIGN_SERVER,System Catalogs,Developer Guide", + "title":"PG_FOREIGN_SERVER", + "githuburl":"" + }, + { + "uri":"dws_04_0593.html", + "product_code":"dws", + "code":"352", + "des":"PG_FOREIGN_TABLE records auxiliary information about foreign tables.", + "doc_type":"devg", + "kw":"PG_FOREIGN_TABLE,System Catalogs,Developer Guide", + "title":"PG_FOREIGN_TABLE", + "githuburl":"" + }, + { + "uri":"dws_04_0594.html", + "product_code":"dws", + "code":"353", + "des":"PG_INDEX records part of the information about indexes. The rest is mostly in PG_CLASS.", + "doc_type":"devg", + "kw":"PG_INDEX,System Catalogs,Developer Guide", + "title":"PG_INDEX", + "githuburl":"" + }, + { + "uri":"dws_04_0595.html", + "product_code":"dws", + "code":"354", + "des":"PG_INHERITS records information about table inheritance hierarchies. There is one entry for each direct child table in the database. Indirect inheritance can be determine", + "doc_type":"devg", + "kw":"PG_INHERITS,System Catalogs,Developer Guide", + "title":"PG_INHERITS", + "githuburl":"" + }, + { + "uri":"dws_04_0596.html", + "product_code":"dws", + "code":"355", + "des":"PG_JOBS records detailed information about jobs created by users. Dedicated threads poll the pg_jobs table and trigger jobs based on scheduled job execution time. This ta", + "doc_type":"devg", + "kw":"PG_JOBS,System Catalogs,Developer Guide", + "title":"PG_JOBS", + "githuburl":"" + }, + { + "uri":"dws_04_0597.html", + "product_code":"dws", + "code":"356", + "des":"PG_LANGUAGE records programming languages. You can use them and interfaces to write functions or stored procedures.", + "doc_type":"devg", + "kw":"PG_LANGUAGE,System Catalogs,Developer Guide", + "title":"PG_LANGUAGE", + "githuburl":"" + }, + { + "uri":"dws_04_0598.html", + "product_code":"dws", + "code":"357", + "des":"PG_LARGEOBJECT records the data making up large objects A large object is identified by an OID assigned when it is created. Each large object is broken into segments or \"", + "doc_type":"devg", + "kw":"PG_LARGEOBJECT,System Catalogs,Developer Guide", + "title":"PG_LARGEOBJECT", + "githuburl":"" + }, + { + "uri":"dws_04_0599.html", + "product_code":"dws", + "code":"358", + "des":"PG_LARGEOBJECT_METADATA records metadata associated with large objects. The actual large object data is stored in PG_LARGEOBJECT.", + "doc_type":"devg", + "kw":"PG_LARGEOBJECT_METADATA,System Catalogs,Developer Guide", + "title":"PG_LARGEOBJECT_METADATA", + "githuburl":"" + }, + { + "uri":"dws_04_0600.html", + "product_code":"dws", + "code":"359", + "des":"PG_NAMESPACE records the namespaces, that is, schema-related information.", + "doc_type":"devg", + "kw":"PG_NAMESPACE,System Catalogs,Developer Guide", + "title":"PG_NAMESPACE", + "githuburl":"" + }, + { + "uri":"dws_04_0601.html", + "product_code":"dws", + "code":"360", + "des":"PG_OBJECT records the user creation, creation time, last modification time, and last analyzing time of objects of specified types (types existing in object_type).Only nor", + "doc_type":"devg", + "kw":"PG_OBJECT,System Catalogs,Developer Guide", + "title":"PG_OBJECT", + "githuburl":"" + }, + { + "uri":"dws_04_0602.html", + "product_code":"dws", + "code":"361", + "des":"PG_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table", + "doc_type":"devg", + "kw":"PG_OBSSCANINFO,System Catalogs,Developer Guide", + "title":"PG_OBSSCANINFO", + "githuburl":"" + }, + { + "uri":"dws_04_0603.html", + "product_code":"dws", + "code":"362", + "des":"PG_OPCLASS defines index access method operator classes.Each operator class defines semantics for index columns of a particular data type and a particular index access me", + "doc_type":"devg", + "kw":"PG_OPCLASS,System Catalogs,Developer Guide", + "title":"PG_OPCLASS", + "githuburl":"" + }, + { + "uri":"dws_04_0604.html", + "product_code":"dws", + "code":"363", + "des":"PG_OPERATOR records information about operators.", + "doc_type":"devg", + "kw":"PG_OPERATOR,System Catalogs,Developer Guide", + "title":"PG_OPERATOR", + "githuburl":"" + }, + { + "uri":"dws_04_0605.html", + "product_code":"dws", + "code":"364", + "des":"PG_OPFAMILY defines operator families.Each operator family is a collection of operators and associated support routines that implement the semantics specified for a parti", + "doc_type":"devg", + "kw":"PG_OPFAMILY,System Catalogs,Developer Guide", + "title":"PG_OPFAMILY", + "githuburl":"" + }, + { + "uri":"dws_04_0606.html", + "product_code":"dws", + "code":"365", + "des":"PG_PARTITION records all partitioned tables, table partitions, toast tables on table partitions, and index partitions in the database. Partitioned index information is no", + "doc_type":"devg", + "kw":"PG_PARTITION,System Catalogs,Developer Guide", + "title":"PG_PARTITION", + "githuburl":"" + }, + { + "uri":"dws_04_0607.html", + "product_code":"dws", + "code":"366", + "des":"PG_PLTEMPLATE records template information for procedural languages.", + "doc_type":"devg", + "kw":"PG_PLTEMPLATE,System Catalogs,Developer Guide", + "title":"PG_PLTEMPLATE", + "githuburl":"" + }, + { + "uri":"dws_04_0608.html", + "product_code":"dws", + "code":"367", + "des":"PG_PROC records information about functions or procedures.Query the OID of a specified function. For example, obtain the OID 1295 of the justify_days function.Query wheth", + "doc_type":"devg", + "kw":"PG_PROC,System Catalogs,Developer Guide", + "title":"PG_PROC", + "githuburl":"" + }, + { + "uri":"dws_04_0609.html", + "product_code":"dws", + "code":"368", + "des":"PG_RANGE records information about range types.This is in addition to the types' entries in PG_TYPE.rngsubopc (plus rngcollation, if the element type is collatable) deter", + "doc_type":"devg", + "kw":"PG_RANGE,System Catalogs,Developer Guide", + "title":"PG_RANGE", + "githuburl":"" + }, + { + "uri":"dws_04_0610.html", + "product_code":"dws", + "code":"369", + "des":"PG_REDACTION_COLUMN records the information about the redacted columns.", + "doc_type":"devg", + "kw":"PG_REDACTION_COLUMN,System Catalogs,Developer Guide", + "title":"PG_REDACTION_COLUMN", + "githuburl":"" + }, + { + "uri":"dws_04_0611.html", + "product_code":"dws", + "code":"370", + "des":"PG_REDACTION_POLICY records information about the object to be redacted.", + "doc_type":"devg", + "kw":"PG_REDACTION_POLICY,System Catalogs,Developer Guide", + "title":"PG_REDACTION_POLICY", + "githuburl":"" + }, + { + "uri":"dws_04_0612.html", + "product_code":"dws", + "code":"371", + "des":"PG_RLSPOLICY displays the information about row-level access control policies.", + "doc_type":"devg", + "kw":"PG_RLSPOLICY,System Catalogs,Developer Guide", + "title":"PG_RLSPOLICY", + "githuburl":"" + }, + { + "uri":"dws_04_0613.html", + "product_code":"dws", + "code":"372", + "des":"PG_RESOURCE_POOL records the information about database resource pool.", + "doc_type":"devg", + "kw":"PG_RESOURCE_POOL,System Catalogs,Developer Guide", + "title":"PG_RESOURCE_POOL", + "githuburl":"" + }, + { + "uri":"dws_04_0614.html", + "product_code":"dws", + "code":"373", + "des":"PG_REWRITE records rewrite rules defined for tables and views.", + "doc_type":"devg", + "kw":"PG_REWRITE,System Catalogs,Developer Guide", + "title":"PG_REWRITE", + "githuburl":"" + }, + { + "uri":"dws_04_0615.html", + "product_code":"dws", + "code":"374", + "des":"PG_SECLABEL records security labels on database objects.See also PG_SHSECLABEL, which performs a similar function for security labels of database objects that are shared ", + "doc_type":"devg", + "kw":"PG_SECLABEL,System Catalogs,Developer Guide", + "title":"PG_SECLABEL", + "githuburl":"" + }, + { + "uri":"dws_04_0616.html", + "product_code":"dws", + "code":"375", + "des":"PG_SHDEPEND records the dependency relationships between database objects and shared objects, such as roles. This information allows GaussDB(DWS) to ensure that those obj", + "doc_type":"devg", + "kw":"PG_SHDEPEND,System Catalogs,Developer Guide", + "title":"PG_SHDEPEND", + "githuburl":"" + }, + { + "uri":"dws_04_0617.html", + "product_code":"dws", + "code":"376", + "des":"PG_SHDESCRIPTION records optional comments for shared database objects. Descriptions can be manipulated with the COMMENT command and viewed with psql's \\d commands.See al", + "doc_type":"devg", + "kw":"PG_SHDESCRIPTION,System Catalogs,Developer Guide", + "title":"PG_SHDESCRIPTION", + "githuburl":"" + }, + { + "uri":"dws_04_0618.html", + "product_code":"dws", + "code":"377", + "des":"PG_SHSECLABEL records security labels on shared database objects. Security labels can be manipulated with the SECURITY LABEL command.For an easier way to view security la", + "doc_type":"devg", + "kw":"PG_SHSECLABEL,System Catalogs,Developer Guide", + "title":"PG_SHSECLABEL", + "githuburl":"" + }, + { + "uri":"dws_04_0619.html", + "product_code":"dws", + "code":"378", + "des":"PG_STATISTIC records statistics about tables and index columns in a database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"PG_STATISTIC,System Catalogs,Developer Guide", + "title":"PG_STATISTIC", + "githuburl":"" + }, + { + "uri":"dws_04_0620.html", + "product_code":"dws", + "code":"379", + "des":"PG_STATISTIC_EXT records the extended statistics of tables in a database, such as statistics of multiple columns. Statistics of expressions will be supported later. You c", + "doc_type":"devg", + "kw":"PG_STATISTIC_EXT,System Catalogs,Developer Guide", + "title":"PG_STATISTIC_EXT", + "githuburl":"" + }, + { + "uri":"dws_04_0621.html", + "product_code":"dws", + "code":"380", + "des":"PG_SYNONYM records the mapping between synonym object names and other database object names.", + "doc_type":"devg", + "kw":"PG_SYNONYM,System Catalogs,Developer Guide", + "title":"PG_SYNONYM", + "githuburl":"" + }, + { + "uri":"dws_04_0622.html", + "product_code":"dws", + "code":"381", + "des":"PG_TABLESPACE records tablespace information.", + "doc_type":"devg", + "kw":"PG_TABLESPACE,System Catalogs,Developer Guide", + "title":"PG_TABLESPACE", + "githuburl":"" + }, + { + "uri":"dws_04_0623.html", + "product_code":"dws", + "code":"382", + "des":"PG_TRIGGER records the trigger information.", + "doc_type":"devg", + "kw":"PG_TRIGGER,System Catalogs,Developer Guide", + "title":"PG_TRIGGER", + "githuburl":"" + }, + { + "uri":"dws_04_0624.html", + "product_code":"dws", + "code":"383", + "des":"PG_TS_CONFIG records entries representing text search configurations. A configuration specifies a particular text search parser and a list of dictionaries to use for each", + "doc_type":"devg", + "kw":"PG_TS_CONFIG,System Catalogs,Developer Guide", + "title":"PG_TS_CONFIG", + "githuburl":"" + }, + { + "uri":"dws_04_0625.html", + "product_code":"dws", + "code":"384", + "des":"PG_TS_CONFIG_MAP records entries showing which text search dictionaries should be consulted, and in what order, for each output token type of each text search configurati", + "doc_type":"devg", + "kw":"PG_TS_CONFIG_MAP,System Catalogs,Developer Guide", + "title":"PG_TS_CONFIG_MAP", + "githuburl":"" + }, + { + "uri":"dws_04_0626.html", + "product_code":"dws", + "code":"385", + "des":"PG_TS_DICT records entries that define text search dictionaries. A dictionary depends on a text search template, which specifies all the implementation functions needed. ", + "doc_type":"devg", + "kw":"PG_TS_DICT,System Catalogs,Developer Guide", + "title":"PG_TS_DICT", + "githuburl":"" + }, + { + "uri":"dws_04_0627.html", + "product_code":"dws", + "code":"386", + "des":"PG_TS_PARSER records entries defining text search parsers. A parser splits input text into lexemes and assigns a token type to each lexeme. Since a parser must be impleme", + "doc_type":"devg", + "kw":"PG_TS_PARSER,System Catalogs,Developer Guide", + "title":"PG_TS_PARSER", + "githuburl":"" + }, + { + "uri":"dws_04_0628.html", + "product_code":"dws", + "code":"387", + "des":"PG_TS_TEMPLATE records entries defining text search templates. A template provides a framework for text search dictionaries. Since a template must be implemented by C fun", + "doc_type":"devg", + "kw":"PG_TS_TEMPLATE,System Catalogs,Developer Guide", + "title":"PG_TS_TEMPLATE", + "githuburl":"" + }, + { + "uri":"dws_04_0629.html", + "product_code":"dws", + "code":"388", + "des":"PG_TYPE records the information about data types.", + "doc_type":"devg", + "kw":"PG_TYPE,System Catalogs,Developer Guide", + "title":"PG_TYPE", + "githuburl":"" + }, + { + "uri":"dws_04_0630.html", + "product_code":"dws", + "code":"389", + "des":"PG_USER_MAPPING records the mappings from local users to remote.It is accessible only to users with system administrator rights. You can use view PG_USER_MAPPINGS to quer", + "doc_type":"devg", + "kw":"PG_USER_MAPPING,System Catalogs,Developer Guide", + "title":"PG_USER_MAPPING", + "githuburl":"" + }, + { + "uri":"dws_04_0631.html", + "product_code":"dws", + "code":"390", + "des":"PG_USER_STATUS records the states of users that access to the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"PG_USER_STATUS,System Catalogs,Developer Guide", + "title":"PG_USER_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0632.html", + "product_code":"dws", + "code":"391", + "des":"PG_WORKLOAD_ACTION records information about query_band.", + "doc_type":"devg", + "kw":"PG_WORKLOAD_ACTION,System Catalogs,Developer Guide", + "title":"PG_WORKLOAD_ACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0633.html", + "product_code":"dws", + "code":"392", + "des":"PGXC_CLASS records the replicated or distributed information for each table.", + "doc_type":"devg", + "kw":"PGXC_CLASS,System Catalogs,Developer Guide", + "title":"PGXC_CLASS", + "githuburl":"" + }, + { + "uri":"dws_04_0634.html", + "product_code":"dws", + "code":"393", + "des":"PGXC_GROUP records information about node groups.", + "doc_type":"devg", + "kw":"PGXC_GROUP,System Catalogs,Developer Guide", + "title":"PGXC_GROUP", + "githuburl":"" + }, + { + "uri":"dws_04_0635.html", + "product_code":"dws", + "code":"394", + "des":"PGXC_NODE records information about cluster nodes.Query the CN and DN information of the cluster:", + "doc_type":"devg", + "kw":"PGXC_NODE,System Catalogs,Developer Guide", + "title":"PGXC_NODE", + "githuburl":"" + }, + { + "uri":"dws_04_0639.html", + "product_code":"dws", + "code":"395", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"System Views", + "title":"System Views", + "githuburl":"" + }, + { + "uri":"dws_04_0640.html", + "product_code":"dws", + "code":"396", + "des":"ALL_ALL_TABLES displays the tables or views accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_ALL_TABLES,System Views,Developer Guide", + "title":"ALL_ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0641.html", + "product_code":"dws", + "code":"397", + "des":"ALL_CONSTRAINTS displays information about constraints accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_CONSTRAINTS,System Views,Developer Guide", + "title":"ALL_CONSTRAINTS", + "githuburl":"" + }, + { + "uri":"dws_04_0642.html", + "product_code":"dws", + "code":"398", + "des":"ALL_CONS_COLUMNS displays information about constraint columns accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_CONS_COLUMNS,System Views,Developer Guide", + "title":"ALL_CONS_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0643.html", + "product_code":"dws", + "code":"399", + "des":"ALL_COL_COMMENTS displays the comment information about table columns accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_COL_COMMENTS,System Views,Developer Guide", + "title":"ALL_COL_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0644.html", + "product_code":"dws", + "code":"400", + "des":"ALL_DEPENDENCIES displays dependencies between functions and advanced packages accessible to the current user.Currently in GaussDB(DWS), this table is empty without any r", + "doc_type":"devg", + "kw":"ALL_DEPENDENCIES,System Views,Developer Guide", + "title":"ALL_DEPENDENCIES", + "githuburl":"" + }, + { + "uri":"dws_04_0645.html", + "product_code":"dws", + "code":"401", + "des":"ALL_IND_COLUMNS displays all index columns accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_IND_COLUMNS,System Views,Developer Guide", + "title":"ALL_IND_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0646.html", + "product_code":"dws", + "code":"402", + "des":"ALL_IND_EXPRESSIONS displays information about the expression indexes accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_IND_EXPRESSIONS,System Views,Developer Guide", + "title":"ALL_IND_EXPRESSIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0647.html", + "product_code":"dws", + "code":"403", + "des":"ALL_INDEXES displays information about indexes accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_INDEXES,System Views,Developer Guide", + "title":"ALL_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0648.html", + "product_code":"dws", + "code":"404", + "des":"ALL_OBJECTS displays all database objects accessible to the current user.For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.", + "doc_type":"devg", + "kw":"ALL_OBJECTS,System Views,Developer Guide", + "title":"ALL_OBJECTS", + "githuburl":"" + }, + { + "uri":"dws_04_0649.html", + "product_code":"dws", + "code":"405", + "des":"ALL_PROCEDURES displays information about all stored procedures or functions accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_PROCEDURES,System Views,Developer Guide", + "title":"ALL_PROCEDURES", + "githuburl":"" + }, + { + "uri":"dws_04_0650.html", + "product_code":"dws", + "code":"406", + "des":"ALL_SEQUENCES displays all sequences accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_SEQUENCES,System Views,Developer Guide", + "title":"ALL_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0651.html", + "product_code":"dws", + "code":"407", + "des":"ALL_SOURCE displays information about stored procedures or functions accessible to the current user, and provides the columns defined by the stored procedures and functio", + "doc_type":"devg", + "kw":"ALL_SOURCE,System Views,Developer Guide", + "title":"ALL_SOURCE", + "githuburl":"" + }, + { + "uri":"dws_04_0652.html", + "product_code":"dws", + "code":"408", + "des":"ALL_SYNONYMS displays all synonyms accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_SYNONYMS,System Views,Developer Guide", + "title":"ALL_SYNONYMS", + "githuburl":"" + }, + { + "uri":"dws_04_0653.html", + "product_code":"dws", + "code":"409", + "des":"ALL_TAB_COLUMNS displays description information about columns of the tables accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_TAB_COLUMNS,System Views,Developer Guide", + "title":"ALL_TAB_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0654.html", + "product_code":"dws", + "code":"410", + "des":"ALL_TAB_COMMENTS displays comments about all tables and views accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_TAB_COMMENTS,System Views,Developer Guide", + "title":"ALL_TAB_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0655.html", + "product_code":"dws", + "code":"411", + "des":"ALL_TABLES displays all the tables accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_TABLES,System Views,Developer Guide", + "title":"ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0656.html", + "product_code":"dws", + "code":"412", + "des":"ALL_USERS displays all users of the database visible to the current user, however, it does not describe the users.", + "doc_type":"devg", + "kw":"ALL_USERS,System Views,Developer Guide", + "title":"ALL_USERS", + "githuburl":"" + }, + { + "uri":"dws_04_0657.html", + "product_code":"dws", + "code":"413", + "des":"ALL_VIEWS displays the description about all views accessible to the current user.", + "doc_type":"devg", + "kw":"ALL_VIEWS,System Views,Developer Guide", + "title":"ALL_VIEWS", + "githuburl":"" + }, + { + "uri":"dws_04_0658.html", + "product_code":"dws", + "code":"414", + "des":"DBA_DATA_FILES displays the description of database files. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_DATA_FILES,System Views,Developer Guide", + "title":"DBA_DATA_FILES", + "githuburl":"" + }, + { + "uri":"dws_04_0659.html", + "product_code":"dws", + "code":"415", + "des":"DBA_USERS displays all user names in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_USERS,System Views,Developer Guide", + "title":"DBA_USERS", + "githuburl":"" + }, + { + "uri":"dws_04_0660.html", + "product_code":"dws", + "code":"416", + "des":"DBA_COL_COMMENTS displays information about table colum comments in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_COL_COMMENTS,System Views,Developer Guide", + "title":"DBA_COL_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0661.html", + "product_code":"dws", + "code":"417", + "des":"DBA_CONSTRAINTS displays information about table constraints in database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_CONSTRAINTS,System Views,Developer Guide", + "title":"DBA_CONSTRAINTS", + "githuburl":"" + }, + { + "uri":"dws_04_0662.html", + "product_code":"dws", + "code":"418", + "des":"DBA_CONS_COLUMNS displays information about constraint columns in database tables. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_CONS_COLUMNS,System Views,Developer Guide", + "title":"DBA_CONS_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0663.html", + "product_code":"dws", + "code":"419", + "des":"DBA_IND_COLUMNS displays column information about all indexes in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_IND_COLUMNS,System Views,Developer Guide", + "title":"DBA_IND_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0664.html", + "product_code":"dws", + "code":"420", + "des":"DBA_IND_EXPRESSIONS displays the information about expression indexes in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_IND_EXPRESSIONS,System Views,Developer Guide", + "title":"DBA_IND_EXPRESSIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0665.html", + "product_code":"dws", + "code":"421", + "des":"DBA_IND_PARTITIONS displays information about all index partitions in the database. Each index partition of a partitioned table in the database, if present, has a row of ", + "doc_type":"devg", + "kw":"DBA_IND_PARTITIONS,System Views,Developer Guide", + "title":"DBA_IND_PARTITIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0666.html", + "product_code":"dws", + "code":"422", + "des":"DBA_INDEXES displays all indexes in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_INDEXES,System Views,Developer Guide", + "title":"DBA_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0667.html", + "product_code":"dws", + "code":"423", + "des":"DBA_OBJECTS displays all database objects in the database. It is accessible only to users with system administrator rights.For details about the value ranges of last_ddl_", + "doc_type":"devg", + "kw":"DBA_OBJECTS,System Views,Developer Guide", + "title":"DBA_OBJECTS", + "githuburl":"" + }, + { + "uri":"dws_04_0668.html", + "product_code":"dws", + "code":"424", + "des":"DBA_PART_INDEXES displays information about all partitioned table indexes in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_PART_INDEXES,System Views,Developer Guide", + "title":"DBA_PART_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0669.html", + "product_code":"dws", + "code":"425", + "des":"DBA_PART_TABLES displays information about all partitioned tables in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_PART_TABLES,System Views,Developer Guide", + "title":"DBA_PART_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0670.html", + "product_code":"dws", + "code":"426", + "des":"DBA_PROCEDURES displays information about all stored procedures and functions in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_PROCEDURES,System Views,Developer Guide", + "title":"DBA_PROCEDURES", + "githuburl":"" + }, + { + "uri":"dws_04_0671.html", + "product_code":"dws", + "code":"427", + "des":"DBA_SEQUENCES displays information about all sequences in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_SEQUENCES,System Views,Developer Guide", + "title":"DBA_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0672.html", + "product_code":"dws", + "code":"428", + "des":"DBA_SOURCE displays all stored procedures or functions in the database, and it provides the columns defined by the stored procedures or functions. It is accessible only t", + "doc_type":"devg", + "kw":"DBA_SOURCE,System Views,Developer Guide", + "title":"DBA_SOURCE", + "githuburl":"" + }, + { + "uri":"dws_04_0673.html", + "product_code":"dws", + "code":"429", + "des":"DBA_SYNONYMS displays all synonyms in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_SYNONYMS,System Views,Developer Guide", + "title":"DBA_SYNONYMS", + "githuburl":"" + }, + { + "uri":"dws_04_0674.html", + "product_code":"dws", + "code":"430", + "des":"DBA_TAB_COLUMNS displays the columns of tables. Each column of a table in the database has a row in DBA_TAB_COLUMNS. It is accessible only to users with system administra", + "doc_type":"devg", + "kw":"DBA_TAB_COLUMNS,System Views,Developer Guide", + "title":"DBA_TAB_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0675.html", + "product_code":"dws", + "code":"431", + "des":"DBA_TAB_COMMENTS displays comments about all tables and views in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_TAB_COMMENTS,System Views,Developer Guide", + "title":"DBA_TAB_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0676.html", + "product_code":"dws", + "code":"432", + "des":"DBA_TAB_PARTITIONS displays information about all partitions in the database.", + "doc_type":"devg", + "kw":"DBA_TAB_PARTITIONS,System Views,Developer Guide", + "title":"DBA_TAB_PARTITIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0677.html", + "product_code":"dws", + "code":"433", + "des":"DBA_TABLES displays all tables in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_TABLES,System Views,Developer Guide", + "title":"DBA_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0678.html", + "product_code":"dws", + "code":"434", + "des":"DBA_TABLESPACES displays information about available tablespaces. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_TABLESPACES,System Views,Developer Guide", + "title":"DBA_TABLESPACES", + "githuburl":"" + }, + { + "uri":"dws_04_0679.html", + "product_code":"dws", + "code":"435", + "des":"DBA_TRIGGERS displays information about triggers in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_TRIGGERS,System Views,Developer Guide", + "title":"DBA_TRIGGERS", + "githuburl":"" + }, + { + "uri":"dws_04_0680.html", + "product_code":"dws", + "code":"436", + "des":"DBA_VIEWS displays views in the database. It is accessible only to users with system administrator rights.", + "doc_type":"devg", + "kw":"DBA_VIEWS,System Views,Developer Guide", + "title":"DBA_VIEWS", + "githuburl":"" + }, + { + "uri":"dws_04_0681.html", + "product_code":"dws", + "code":"437", + "des":"DUAL is automatically created by the database based on the data dictionary. It has only one text column in only one row for storing expression calculation results. It is ", + "doc_type":"devg", + "kw":"DUAL,System Views,Developer Guide", + "title":"DUAL", + "githuburl":"" + }, + { + "uri":"dws_04_0682.html", + "product_code":"dws", + "code":"438", + "des":"GLOBAL_REDO_STAT displays the total statistics of XLOG redo operations on all nodes in a cluster. Except the avgiotim column (indicating the average redo write time of al", + "doc_type":"devg", + "kw":"GLOBAL_REDO_STAT,System Views,Developer Guide", + "title":"GLOBAL_REDO_STAT", + "githuburl":"" + }, + { + "uri":"dws_04_0683.html", + "product_code":"dws", + "code":"439", + "des":"GLOBAL_REL_IOSTAT displays the total disk I/O statistics of all nodes in a cluster. The name of each column in this view is the same as that in the GS_REL_IOSTAT view, bu", + "doc_type":"devg", + "kw":"GLOBAL_REL_IOSTAT,System Views,Developer Guide", + "title":"GLOBAL_REL_IOSTAT", + "githuburl":"" + }, + { + "uri":"dws_04_0684.html", + "product_code":"dws", + "code":"440", + "des":"GLOBAL_STAT_DATABASE displays the status and statistics of databases on all nodes in a cluster.When you query the GLOBAL_STAT_DATABASE view on a CN, the respective values", + "doc_type":"devg", + "kw":"GLOBAL_STAT_DATABASE,System Views,Developer Guide", + "title":"GLOBAL_STAT_DATABASE", + "githuburl":"" + }, + { + "uri":"dws_04_0685.html", + "product_code":"dws", + "code":"441", + "des":"GLOBAL_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in all workload Cgroups in a cluster, including the number of SELECT, UPDATE, INSER", + "doc_type":"devg", + "kw":"GLOBAL_WORKLOAD_SQL_COUNT,System Views,Developer Guide", + "title":"GLOBAL_WORKLOAD_SQL_COUNT", + "githuburl":"" + }, + { + "uri":"dws_04_0686.html", + "product_code":"dws", + "code":"442", + "des":"GLOBAL_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in all workload Cgroups in a cluster, including the maximum, minimum, average, ", + "doc_type":"devg", + "kw":"GLOBAL_WORKLOAD_SQL_ELAPSE_TIME,System Views,Developer Guide", + "title":"GLOBAL_WORKLOAD_SQL_ELAPSE_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0687.html", + "product_code":"dws", + "code":"443", + "des":"GLOBAL_WORKLOAD_TRANSACTION provides the total transaction information about workload Cgroups on all CNs in the cluster. This view is accessible only to users with system", + "doc_type":"devg", + "kw":"GLOBAL_WORKLOAD_TRANSACTION,System Views,Developer Guide", + "title":"GLOBAL_WORKLOAD_TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0688.html", + "product_code":"dws", + "code":"444", + "des":"GS_ALL_CONTROL_GROUP_INFO displays all Cgroup information in a database.", + "doc_type":"devg", + "kw":"GS_ALL_CONTROL_GROUP_INFO,System Views,Developer Guide", + "title":"GS_ALL_CONTROL_GROUP_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0689.html", + "product_code":"dws", + "code":"445", + "des":"GS_CLUSTER_RESOURCE_INFO displays a DN resource summary.", + "doc_type":"devg", + "kw":"GS_CLUSTER_RESOURCE_INFO,System Views,Developer Guide", + "title":"GS_CLUSTER_RESOURCE_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0690.html", + "product_code":"dws", + "code":"446", + "des":"The database parses each received SQL text string and generates an internal parsing tree. The database traverses the parsing tree and ignores constant values in the parsi", + "doc_type":"devg", + "kw":"GS_INSTR_UNIQUE_SQL,System Views,Developer Guide", + "title":"GS_INSTR_UNIQUE_SQL", + "githuburl":"" + }, + { + "uri":"dws_04_0691.html", + "product_code":"dws", + "code":"447", + "des":"GS_REL_IOSTAT displays disk I/O statistics on the current node. In the current version, only one page is read or written in each read or write operation. Therefore, the n", + "doc_type":"devg", + "kw":"GS_REL_IOSTAT,System Views,Developer Guide", + "title":"GS_REL_IOSTAT", + "githuburl":"" + }, + { + "uri":"dws_04_0692.html", + "product_code":"dws", + "code":"448", + "des":"The GS_NODE_STAT_RESET_TIME view provides the reset time of statistics on the current node and returns the timestamp with the time zone. For details, see the get_node_sta", + "doc_type":"devg", + "kw":"GS_NODE_STAT_RESET_TIME,System Views,Developer Guide", + "title":"GS_NODE_STAT_RESET_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0693.html", + "product_code":"dws", + "code":"449", + "des":"GS_SESSION_CPU_STATISTICS displays load management information about CPU usage of ongoing complex jobs executed by the current user.", + "doc_type":"devg", + "kw":"GS_SESSION_CPU_STATISTICS,System Views,Developer Guide", + "title":"GS_SESSION_CPU_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0694.html", + "product_code":"dws", + "code":"450", + "des":"GS_SESSION_MEMORY_STATISTICS displays load management information about memory usage of ongoing complex jobs executed by the current user.", + "doc_type":"devg", + "kw":"GS_SESSION_MEMORY_STATISTICS,System Views,Developer Guide", + "title":"GS_SESSION_MEMORY_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0695.html", + "product_code":"dws", + "code":"451", + "des":"GS_SQL_COUNT displays statistics about the five types of statements (SELECT, INSERT, UPDATE, DELETE, and MERGE INTO) executed on the current node of the database, includi", + "doc_type":"devg", + "kw":"GS_SQL_COUNT,System Views,Developer Guide", + "title":"GS_SQL_COUNT", + "githuburl":"" + }, + { + "uri":"dws_04_0696.html", + "product_code":"dws", + "code":"452", + "des":"GS_WAIT_EVENTS displays statistics about waiting status and events on the current node.The values of statistical columns in this view are accumulated only when the enable", + "doc_type":"devg", + "kw":"GS_WAIT_EVENTS,System Views,Developer Guide", + "title":"GS_WAIT_EVENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0701.html", + "product_code":"dws", + "code":"453", + "des":"This view displays the execution information about operators in the query statements that have been executed on the current CN. The information comes from the system cata", + "doc_type":"devg", + "kw":"GS_WLM_OPERAROR_INFO,System Views,Developer Guide", + "title":"GS_WLM_OPERAROR_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0702.html", + "product_code":"dws", + "code":"454", + "des":"This view displays the records of operators in jobs that have been executed by the current user on the current CN.This view is used by Database Manager to query data from", + "doc_type":"devg", + "kw":"GS_WLM_OPERATOR_HISTORY,System Views,Developer Guide", + "title":"GS_WLM_OPERATOR_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0703.html", + "product_code":"dws", + "code":"455", + "des":"GS_WLM_OPERATOR_STATISTICS displays the operators of the jobs that are being executed by the current user.", + "doc_type":"devg", + "kw":"GS_WLM_OPERATOR_STATISTICS,System Views,Developer Guide", + "title":"GS_WLM_OPERATOR_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0704.html", + "product_code":"dws", + "code":"456", + "des":"This view displays the execution information about the query statements that have been executed on the current CN. The information comes from the system catalog dbms_om. ", + "doc_type":"devg", + "kw":"GS_WLM_SESSION_INFO,System Views,Developer Guide", + "title":"GS_WLM_SESSION_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0705.html", + "product_code":"dws", + "code":"457", + "des":"GS_WLM_SESSION_HISTORY displays load management information about a completed job executed by the current user on the current CN. This view is used by Database Manager to", + "doc_type":"devg", + "kw":"GS_WLM_SESSION_HISTORY,System Views,Developer Guide", + "title":"GS_WLM_SESSION_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0706.html", + "product_code":"dws", + "code":"458", + "des":"GS_WLM_SESSION_STATISTICS displays load management information about jobs being executed by the current user on the current CN.", + "doc_type":"devg", + "kw":"GS_WLM_SESSION_STATISTICS,System Views,Developer Guide", + "title":"GS_WLM_SESSION_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0708.html", + "product_code":"dws", + "code":"459", + "des":"GS_WLM_SQL_ALLOW displays the configured resource management SQL whitelist, including the default SQL whitelist and the SQL whitelist configured using the GUC parameter w", + "doc_type":"devg", + "kw":"GS_WLM_SQL_ALLOW,System Views,Developer Guide", + "title":"GS_WLM_SQL_ALLOW", + "githuburl":"" + }, + { + "uri":"dws_04_0709.html", + "product_code":"dws", + "code":"460", + "des":"GS_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on the current node, including the number of SELECT, UPDATE, INSERT", + "doc_type":"devg", + "kw":"GS_WORKLOAD_SQL_COUNT,System Views,Developer Guide", + "title":"GS_WORKLOAD_SQL_COUNT", + "githuburl":"" + }, + { + "uri":"dws_04_0710.html", + "product_code":"dws", + "code":"461", + "des":"GS_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on the current node, including the maximum, minimum, average, a", + "doc_type":"devg", + "kw":"GS_WORKLOAD_SQL_ELAPSE_TIME,System Views,Developer Guide", + "title":"GS_WORKLOAD_SQL_ELAPSE_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0711.html", + "product_code":"dws", + "code":"462", + "des":"GS_WORKLOAD_TRANSACTION provides transaction information about workload cgroups on a single CN. The database records the number of times that each workload Cgroup commits", + "doc_type":"devg", + "kw":"GS_WORKLOAD_TRANSACTION,System Views,Developer Guide", + "title":"GS_WORKLOAD_TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0712.html", + "product_code":"dws", + "code":"463", + "des":"GS_STAT_DB_CU displsys CU hits in a database and in each node in a cluster. You can clear it using gs_stat_reset().", + "doc_type":"devg", + "kw":"GS_STAT_DB_CU,System Views,Developer Guide", + "title":"GS_STAT_DB_CU", + "githuburl":"" + }, + { + "uri":"dws_04_0713.html", + "product_code":"dws", + "code":"464", + "des":"GS_STAT_SESSION_CU displays the CU hit rate of running sessions on each node in a cluster. This data about a session is cleared when you exit this session or restart the ", + "doc_type":"devg", + "kw":"GS_STAT_SESSION_CU,System Views,Developer Guide", + "title":"GS_STAT_SESSION_CU", + "githuburl":"" + }, + { + "uri":"dws_04_0714.html", + "product_code":"dws", + "code":"465", + "des":"GS_TOTAL_NODEGROUP_MEMORY_DETAIL displays statistics about memory usage of the logical cluster that the current database belongs to in the unit of MB.", + "doc_type":"devg", + "kw":"GS_TOTAL_NODEGROUP_MEMORY_DETAIL,System Views,Developer Guide", + "title":"GS_TOTAL_NODEGROUP_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0715.html", + "product_code":"dws", + "code":"466", + "des":"GS_USER_TRANSACTION provides transaction information about users on a single CN. The database records the number of times that each user commits and rolls back transactio", + "doc_type":"devg", + "kw":"GS_USER_TRANSACTION,System Views,Developer Guide", + "title":"GS_USER_TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0716.html", + "product_code":"dws", + "code":"467", + "des":"GS_VIEW_DEPENDENCY allows you to query the direct dependencies of all views visible to the current user.", + "doc_type":"devg", + "kw":"GS_VIEW_DEPENDENCY,System Views,Developer Guide", + "title":"GS_VIEW_DEPENDENCY", + "githuburl":"" + }, + { + "uri":"dws_04_0948.html", + "product_code":"dws", + "code":"468", + "des":"GS_VIEW_DEPENDENCY_PATH allows you to query the direct dependencies of all views visible to the current user. If the base table on which the view depends exists and the d", + "doc_type":"devg", + "kw":"GS_VIEW_DEPENDENCY_PATH,System Views,Developer Guide", + "title":"GS_VIEW_DEPENDENCY_PATH", + "githuburl":"" + }, + { + "uri":"dws_04_0717.html", + "product_code":"dws", + "code":"469", + "des":"GS_VIEW_INVALID queries all unavailable views visible to the current user. If the base table, function, or synonym that the view depends on is abnormal, the validtype col", + "doc_type":"devg", + "kw":"GS_VIEW_INVALID,System Views,Developer Guide", + "title":"GS_VIEW_INVALID", + "githuburl":"" + }, + { + "uri":"dws_04_0998.html", + "product_code":"dws", + "code":"470", + "des":"MPP_TABLES displays information about tables in PGXC_CLASS.", + "doc_type":"devg", + "kw":"MPP_TABLES,System Views,Developer Guide", + "title":"MPP_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0718.html", + "product_code":"dws", + "code":"471", + "des":"PG_AVAILABLE_EXTENSION_VERSIONS displays the extension versions of certain database features.", + "doc_type":"devg", + "kw":"PG_AVAILABLE_EXTENSION_VERSIONS,System Views,Developer Guide", + "title":"PG_AVAILABLE_EXTENSION_VERSIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0719.html", + "product_code":"dws", + "code":"472", + "des":"PG_AVAILABLE_EXTENSIONS displays the extended information about certain database features.", + "doc_type":"devg", + "kw":"PG_AVAILABLE_EXTENSIONS,System Views,Developer Guide", + "title":"PG_AVAILABLE_EXTENSIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0720.html", + "product_code":"dws", + "code":"473", + "des":"On any normal node in a cluster, PG_BULKLOAD_STATISTICS displays the execution status of the import and export services. Each import or export service corresponds to a re", + "doc_type":"devg", + "kw":"PG_BULKLOAD_STATISTICS,System Views,Developer Guide", + "title":"PG_BULKLOAD_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0721.html", + "product_code":"dws", + "code":"474", + "des":"PG_COMM_CLIENT_INFO stores the client connection information of a single node. (You can query this view on a DN to view the information about the connection between the C", + "doc_type":"devg", + "kw":"PG_COMM_CLIENT_INFO,System Views,Developer Guide", + "title":"PG_COMM_CLIENT_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0722.html", + "product_code":"dws", + "code":"475", + "des":"PG_COMM_DELAY displays the communication library delay status for a single DN.", + "doc_type":"devg", + "kw":"PG_COMM_DELAY,System Views,Developer Guide", + "title":"PG_COMM_DELAY", + "githuburl":"" + }, + { + "uri":"dws_04_0723.html", + "product_code":"dws", + "code":"476", + "des":"PG_COMM_STATUS displays the communication library status for a single DN.", + "doc_type":"devg", + "kw":"PG_COMM_STATUS,System Views,Developer Guide", + "title":"PG_COMM_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0724.html", + "product_code":"dws", + "code":"477", + "des":"PG_COMM_RECV_STREAM displays the receiving stream status of all the communication libraries for a single DN.", + "doc_type":"devg", + "kw":"PG_COMM_RECV_STREAM,System Views,Developer Guide", + "title":"PG_COMM_RECV_STREAM", + "githuburl":"" + }, + { + "uri":"dws_04_0725.html", + "product_code":"dws", + "code":"478", + "des":"PG_COMM_SEND_STREAM displays the sending stream status of all the communication libraries for a single DN.", + "doc_type":"devg", + "kw":"PG_COMM_SEND_STREAM,System Views,Developer Guide", + "title":"PG_COMM_SEND_STREAM", + "githuburl":"" + }, + { + "uri":"dws_04_0726.html", + "product_code":"dws", + "code":"479", + "des":"PG_CONTROL_GROUP_CONFIG displays the Cgroup configuration information in the system.", + "doc_type":"devg", + "kw":"PG_CONTROL_GROUP_CONFIG,System Views,Developer Guide", + "title":"PG_CONTROL_GROUP_CONFIG", + "githuburl":"" + }, + { + "uri":"dws_04_0727.html", + "product_code":"dws", + "code":"480", + "des":"PG_CURSORS displays the cursors that are currently available.", + "doc_type":"devg", + "kw":"PG_CURSORS,System Views,Developer Guide", + "title":"PG_CURSORS", + "githuburl":"" + }, + { + "uri":"dws_04_0728.html", + "product_code":"dws", + "code":"481", + "des":"PG_EXT_STATS displays extension statistics stored in the PG_STATISTIC_EXT table. The extension statistics means multiple columns of statistics.", + "doc_type":"devg", + "kw":"PG_EXT_STATS,System Views,Developer Guide", + "title":"PG_EXT_STATS", + "githuburl":"" + }, + { + "uri":"dws_04_0729.html", + "product_code":"dws", + "code":"482", + "des":"PG_GET_INVALID_BACKENDS displays the information about backend threads on the CN that are connected to the current standby DN.", + "doc_type":"devg", + "kw":"PG_GET_INVALID_BACKENDS,System Views,Developer Guide", + "title":"PG_GET_INVALID_BACKENDS", + "githuburl":"" + }, + { + "uri":"dws_04_0730.html", + "product_code":"dws", + "code":"483", + "des":"PG_GET_SENDERS_CATCHUP_TIME displays the catchup information of the currently active primary/standby instance sending thread on a single DN.", + "doc_type":"devg", + "kw":"PG_GET_SENDERS_CATCHUP_TIME,System Views,Developer Guide", + "title":"PG_GET_SENDERS_CATCHUP_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0731.html", + "product_code":"dws", + "code":"484", + "des":"PG_GROUP displays the database role authentication and the relationship between roles.", + "doc_type":"devg", + "kw":"PG_GROUP,System Views,Developer Guide", + "title":"PG_GROUP", + "githuburl":"" + }, + { + "uri":"dws_04_0732.html", + "product_code":"dws", + "code":"485", + "des":"PG_INDEXES displays access to useful information about each index in the database.", + "doc_type":"devg", + "kw":"PG_INDEXES,System Views,Developer Guide", + "title":"PG_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0733.html", + "product_code":"dws", + "code":"486", + "des":"The PG_JOB view replaces the PG_JOB system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB system catalog is cha", + "doc_type":"devg", + "kw":"PG_JOB,System Views,Developer Guide", + "title":"PG_JOB", + "githuburl":"" + }, + { + "uri":"dws_04_0734.html", + "product_code":"dws", + "code":"487", + "des":"The PG_JOB_PROC view replaces the PG_JOB_PROC system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB_PROC and PG", + "doc_type":"devg", + "kw":"PG_JOB_PROC,System Views,Developer Guide", + "title":"PG_JOB_PROC", + "githuburl":"" + }, + { + "uri":"dws_04_0735.html", + "product_code":"dws", + "code":"488", + "des":"PG_JOB_SINGLE displays job information about the current node.", + "doc_type":"devg", + "kw":"PG_JOB_SINGLE,System Views,Developer Guide", + "title":"PG_JOB_SINGLE", + "githuburl":"" + }, + { + "uri":"dws_04_0736.html", + "product_code":"dws", + "code":"489", + "des":"PG_LIFECYCLE_DATA_DISTRIBUTE displays the distribution of cold and hot data in a multi-temperature table of OBS.", + "doc_type":"devg", + "kw":"PG_LIFECYCLE_DATA_DISTRIBUTE,System Views,Developer Guide", + "title":"PG_LIFECYCLE_DATA_DISTRIBUTE", + "githuburl":"" + }, + { + "uri":"dws_04_0737.html", + "product_code":"dws", + "code":"490", + "des":"PG_LOCKS displays information about the locks held by open transactions.", + "doc_type":"devg", + "kw":"PG_LOCKS,System Views,Developer Guide", + "title":"PG_LOCKS", + "githuburl":"" + }, + { + "uri":"dws_04_0738.html", + "product_code":"dws", + "code":"491", + "des":"PG_NODE_ENVO displays the environmental variable information about the current node.", + "doc_type":"devg", + "kw":"PG_NODE_ENV,System Views,Developer Guide", + "title":"PG_NODE_ENV", + "githuburl":"" + }, + { + "uri":"dws_04_0739.html", + "product_code":"dws", + "code":"492", + "des":"PG_OS_THREADS displays the status information about all the threads under the current node.", + "doc_type":"devg", + "kw":"PG_OS_THREADS,System Views,Developer Guide", + "title":"PG_OS_THREADS", + "githuburl":"" + }, + { + "uri":"dws_04_0740.html", + "product_code":"dws", + "code":"493", + "des":"PG_POOLER_STATUS displays the cache connection status in the pooler. PG_POOLER_STATUS can only query on the CN, and displays the connection cache information about the po", + "doc_type":"devg", + "kw":"PG_POOLER_STATUS,System Views,Developer Guide", + "title":"PG_POOLER_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0741.html", + "product_code":"dws", + "code":"494", + "des":"PG_PREPARED_STATEMENTS displays all prepared statements that are available in the current session.", + "doc_type":"devg", + "kw":"PG_PREPARED_STATEMENTS,System Views,Developer Guide", + "title":"PG_PREPARED_STATEMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0742.html", + "product_code":"dws", + "code":"495", + "des":"PG_PREPARED_XACTS displays information about transactions that are currently prepared for two-phase commit.", + "doc_type":"devg", + "kw":"PG_PREPARED_XACTS,System Views,Developer Guide", + "title":"PG_PREPARED_XACTS", + "githuburl":"" + }, + { + "uri":"dws_04_0743.html", + "product_code":"dws", + "code":"496", + "des":"PG_QUERYBAND_ACTION displays information about the object associated with query_band and the query_band query order.", + "doc_type":"devg", + "kw":"PG_QUERYBAND_ACTION,System Views,Developer Guide", + "title":"PG_QUERYBAND_ACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0744.html", + "product_code":"dws", + "code":"497", + "des":"PG_REPLICATION_SLOTS displays the replication node information.", + "doc_type":"devg", + "kw":"PG_REPLICATION_SLOTS,System Views,Developer Guide", + "title":"PG_REPLICATION_SLOTS", + "githuburl":"" + }, + { + "uri":"dws_04_0745.html", + "product_code":"dws", + "code":"498", + "des":"PG_ROLES displays information about database roles.", + "doc_type":"devg", + "kw":"PG_ROLES,System Views,Developer Guide", + "title":"PG_ROLES", + "githuburl":"" + }, + { + "uri":"dws_04_0746.html", + "product_code":"dws", + "code":"499", + "des":"PG_RULES displays information about rewrite rules.", + "doc_type":"devg", + "kw":"PG_RULES,System Views,Developer Guide", + "title":"PG_RULES", + "githuburl":"" + }, + { + "uri":"dws_04_0747.html", + "product_code":"dws", + "code":"500", + "des":"PG_RUNNING_XACTS displays the running transaction information on the current node.", + "doc_type":"devg", + "kw":"PG_RUNNING_XACTS,System Views,Developer Guide", + "title":"PG_RUNNING_XACTS", + "githuburl":"" + }, + { + "uri":"dws_04_0748.html", + "product_code":"dws", + "code":"501", + "des":"PG_SECLABELS displays information about security labels.", + "doc_type":"devg", + "kw":"PG_SECLABELS,System Views,Developer Guide", + "title":"PG_SECLABELS", + "githuburl":"" + }, + { + "uri":"dws_04_0749.html", + "product_code":"dws", + "code":"502", + "des":"PG_SESSION_WLMSTAT displays the corresponding load management information about the task currently executed by the user.", + "doc_type":"devg", + "kw":"PG_SESSION_WLMSTAT,System Views,Developer Guide", + "title":"PG_SESSION_WLMSTAT", + "githuburl":"" + }, + { + "uri":"dws_04_0750.html", + "product_code":"dws", + "code":"503", + "des":"PG_SESSION_IOSTAT displays the I/O load management information about the task currently executed by the user.IOPS is counted by ones for column storage and by thousands f", + "doc_type":"devg", + "kw":"PG_SESSION_IOSTAT,System Views,Developer Guide", + "title":"PG_SESSION_IOSTAT", + "githuburl":"" + }, + { + "uri":"dws_04_0751.html", + "product_code":"dws", + "code":"504", + "des":"PG_SETTINGS displays information about parameters of the running database.", + "doc_type":"devg", + "kw":"PG_SETTINGS,System Views,Developer Guide", + "title":"PG_SETTINGS", + "githuburl":"" + }, + { + "uri":"dws_04_0752.html", + "product_code":"dws", + "code":"505", + "des":"PG_SHADOW displays properties of all roles that are marked as rolcanlogin in PG_AUTHID.The name stems from the fact that this table should not be readable by the public s", + "doc_type":"devg", + "kw":"PG_SHADOW,System Views,Developer Guide", + "title":"PG_SHADOW", + "githuburl":"" + }, + { + "uri":"dws_04_0753.html", + "product_code":"dws", + "code":"506", + "des":"PG_SHARED_MEMORY_DETAIL displays usage information about all the shared memory contexts.", + "doc_type":"devg", + "kw":"PG_SHARED_MEMORY_DETAIL,System Views,Developer Guide", + "title":"PG_SHARED_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0754.html", + "product_code":"dws", + "code":"507", + "des":"PG_STATS displays the single-column statistics stored in the pg_statistic table.", + "doc_type":"devg", + "kw":"PG_STATS,System Views,Developer Guide", + "title":"PG_STATS", + "githuburl":"" + }, + { + "uri":"dws_04_0755.html", + "product_code":"dws", + "code":"508", + "des":"PG_STAT_ACTIVITY displays information about the current user's queries.", + "doc_type":"devg", + "kw":"PG_STAT_ACTIVITY,System Views,Developer Guide", + "title":"PG_STAT_ACTIVITY", + "githuburl":"" + }, + { + "uri":"dws_04_0757.html", + "product_code":"dws", + "code":"509", + "des":"PG_STAT_ALL_INDEXES displays access informaton about all indexes in the database, with information about each index displayed in a row.Indexes can be used via either simp", + "doc_type":"devg", + "kw":"PG_STAT_ALL_INDEXES,System Views,Developer Guide", + "title":"PG_STAT_ALL_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0758.html", + "product_code":"dws", + "code":"510", + "des":"PG_STAT_ALL_TABLES displays access information about all rows in all tables (including TOAST tables) in the database.", + "doc_type":"devg", + "kw":"PG_STAT_ALL_TABLES,System Views,Developer Guide", + "title":"PG_STAT_ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0759.html", + "product_code":"dws", + "code":"511", + "des":"PG_STAT_BAD_BLOCK displays statistics about page or CU verification failures after a node is started.", + "doc_type":"devg", + "kw":"PG_STAT_BAD_BLOCK,System Views,Developer Guide", + "title":"PG_STAT_BAD_BLOCK", + "githuburl":"" + }, + { + "uri":"dws_04_0760.html", + "product_code":"dws", + "code":"512", + "des":"PG_STAT_BGWRITER displays statistics about the background writer process's activity.", + "doc_type":"devg", + "kw":"PG_STAT_BGWRITER,System Views,Developer Guide", + "title":"PG_STAT_BGWRITER", + "githuburl":"" + }, + { + "uri":"dws_04_0761.html", + "product_code":"dws", + "code":"513", + "des":"PG_STAT_DATABASE displays the status and statistics of each database on the current node.", + "doc_type":"devg", + "kw":"PG_STAT_DATABASE,System Views,Developer Guide", + "title":"PG_STAT_DATABASE", + "githuburl":"" + }, + { + "uri":"dws_04_0762.html", + "product_code":"dws", + "code":"514", + "des":"PG_STAT_DATABASE_CONFLICTS displays statistics about database conflicts.", + "doc_type":"devg", + "kw":"PG_STAT_DATABASE_CONFLICTS,System Views,Developer Guide", + "title":"PG_STAT_DATABASE_CONFLICTS", + "githuburl":"" + }, + { + "uri":"dws_04_0763.html", + "product_code":"dws", + "code":"515", + "des":"PG_STAT_GET_MEM_MBYTES_RESERVED displays the current activity information of a thread stored in memory. You need to specify the thread ID (pid in PG_STAT_ACTIVITY) for qu", + "doc_type":"devg", + "kw":"PG_STAT_GET_MEM_MBYTES_RESERVED,System Views,Developer Guide", + "title":"PG_STAT_GET_MEM_MBYTES_RESERVED", + "githuburl":"" + }, + { + "uri":"dws_04_0764.html", + "product_code":"dws", + "code":"516", + "des":"PG_STAT_USER_FUNCTIONS displays user-defined function status information in the namespace. (The language of the function is non-internal language.)", + "doc_type":"devg", + "kw":"PG_STAT_USER_FUNCTIONS,System Views,Developer Guide", + "title":"PG_STAT_USER_FUNCTIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0765.html", + "product_code":"dws", + "code":"517", + "des":"PG_STAT_USER_INDEXES displays information about the index status of user-defined ordinary tables and TOAST tables.", + "doc_type":"devg", + "kw":"PG_STAT_USER_INDEXES,System Views,Developer Guide", + "title":"PG_STAT_USER_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0766.html", + "product_code":"dws", + "code":"518", + "des":"PG_STAT_USER_TABLES displays status information about user-defined ordinary tables and TOAST tables in all namespaces.", + "doc_type":"devg", + "kw":"PG_STAT_USER_TABLES,System Views,Developer Guide", + "title":"PG_STAT_USER_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0767.html", + "product_code":"dws", + "code":"519", + "des":"PG_STAT_REPLICATION displays information about log synchronization status, such as the locations of the sender sending logs and the receiver receiving logs.", + "doc_type":"devg", + "kw":"PG_STAT_REPLICATION,System Views,Developer Guide", + "title":"PG_STAT_REPLICATION", + "githuburl":"" + }, + { + "uri":"dws_04_0768.html", + "product_code":"dws", + "code":"520", + "des":"PG_STAT_SYS_INDEXES displays the index status information about all the system catalogs in the pg_catalog and information_schema schemas.", + "doc_type":"devg", + "kw":"PG_STAT_SYS_INDEXES,System Views,Developer Guide", + "title":"PG_STAT_SYS_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0769.html", + "product_code":"dws", + "code":"521", + "des":"PG_STAT_SYS_TABLES displays the statistics about the system catalogs of all the namespaces in pg_catalog and information_schema schemas.", + "doc_type":"devg", + "kw":"PG_STAT_SYS_TABLES,System Views,Developer Guide", + "title":"PG_STAT_SYS_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0770.html", + "product_code":"dws", + "code":"522", + "des":"PG_STAT_XACT_ALL_TABLES displays the transaction status information about all ordinary tables and TOAST tables in the namespaces.", + "doc_type":"devg", + "kw":"PG_STAT_XACT_ALL_TABLES,System Views,Developer Guide", + "title":"PG_STAT_XACT_ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0771.html", + "product_code":"dws", + "code":"523", + "des":"PG_STAT_XACT_SYS_TABLES displays the transaction status information of the system catalog in the namespace.", + "doc_type":"devg", + "kw":"PG_STAT_XACT_SYS_TABLES,System Views,Developer Guide", + "title":"PG_STAT_XACT_SYS_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0772.html", + "product_code":"dws", + "code":"524", + "des":"PG_STAT_XACT_USER_FUNCTIONS displays statistics about function executions, with statistics about each execution displayed in a row.", + "doc_type":"devg", + "kw":"PG_STAT_XACT_USER_FUNCTIONS,System Views,Developer Guide", + "title":"PG_STAT_XACT_USER_FUNCTIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0773.html", + "product_code":"dws", + "code":"525", + "des":"PG_STAT_XACT_USER_TABLES displays the transaction status information of the user table in the namespace.", + "doc_type":"devg", + "kw":"PG_STAT_XACT_USER_TABLES,System Views,Developer Guide", + "title":"PG_STAT_XACT_USER_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0774.html", + "product_code":"dws", + "code":"526", + "des":"PG_STATIO_ALL_INDEXES contains each row of each index in the current database, showing I/O statistics about accesses to that specific index.", + "doc_type":"devg", + "kw":"PG_STATIO_ALL_INDEXES,System Views,Developer Guide", + "title":"PG_STATIO_ALL_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0775.html", + "product_code":"dws", + "code":"527", + "des":"PG_STATIO_ALL_SEQUENCES contains each row of each sequence in the current database, showing I/O statistics about accesses to that specific sequence.", + "doc_type":"devg", + "kw":"PG_STATIO_ALL_SEQUENCES,System Views,Developer Guide", + "title":"PG_STATIO_ALL_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0776.html", + "product_code":"dws", + "code":"528", + "des":"PG_STATIO_ALL_TABLES contains one row for each table in the current database (including TOAST tables), showing I/O statistics about accesses to that specific table.", + "doc_type":"devg", + "kw":"PG_STATIO_ALL_TABLES,System Views,Developer Guide", + "title":"PG_STATIO_ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0777.html", + "product_code":"dws", + "code":"529", + "des":"PG_STATIO_SYS_INDEXES displays the I/O status information about all system catalog indexes in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_SYS_INDEXES,System Views,Developer Guide", + "title":"PG_STATIO_SYS_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0778.html", + "product_code":"dws", + "code":"530", + "des":"PG_STATIO_SYS_SEQUENCES displays the I/O status information about all the system sequences in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_SYS_SEQUENCES,System Views,Developer Guide", + "title":"PG_STATIO_SYS_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0779.html", + "product_code":"dws", + "code":"531", + "des":"PG_STATIO_SYS_TABLES displays the I/O status information about all the system catalogs in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_SYS_TABLES,System Views,Developer Guide", + "title":"PG_STATIO_SYS_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0780.html", + "product_code":"dws", + "code":"532", + "des":"PG_STATIO_USER_INDEXES displays the I/O status information about all the user relationship table indexes in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_USER_INDEXES,System Views,Developer Guide", + "title":"PG_STATIO_USER_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0781.html", + "product_code":"dws", + "code":"533", + "des":"PG_STATIO_USER_SEQUENCES displays the I/O status information about all the user relation table sequences in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_USER_SEQUENCES,System Views,Developer Guide", + "title":"PG_STATIO_USER_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0782.html", + "product_code":"dws", + "code":"534", + "des":"PG_STATIO_USER_TABLES displays the I/O status information about all the user relation tables in the namespace.", + "doc_type":"devg", + "kw":"PG_STATIO_USER_TABLES,System Views,Developer Guide", + "title":"PG_STATIO_USER_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0783.html", + "product_code":"dws", + "code":"535", + "des":"PG_THREAD_WAIT_STATUS allows you to test the block waiting status about the backend thread and auxiliary thread of the current instance.The waiting statuses in the wait_s", + "doc_type":"devg", + "kw":"PG_THREAD_WAIT_STATUS,System Views,Developer Guide", + "title":"PG_THREAD_WAIT_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0784.html", + "product_code":"dws", + "code":"536", + "des":"PG_TABLES displays access to each table in the database.", + "doc_type":"devg", + "kw":"PG_TABLES,System Views,Developer Guide", + "title":"PG_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0785.html", + "product_code":"dws", + "code":"537", + "des":"PG_TDE_INFO displays the encryption information about the current cluster.Check whether the current cluster is encrypted, and check the encryption algorithm (if any) used", + "doc_type":"devg", + "kw":"PG_TDE_INFO,System Views,Developer Guide", + "title":"PG_TDE_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0786.html", + "product_code":"dws", + "code":"538", + "des":"PG_TIMEZONE_ABBREVS displays all time zone abbreviations that can be recognized by the input routines.", + "doc_type":"devg", + "kw":"PG_TIMEZONE_ABBREVS,System Views,Developer Guide", + "title":"PG_TIMEZONE_ABBREVS", + "githuburl":"" + }, + { + "uri":"dws_04_0787.html", + "product_code":"dws", + "code":"539", + "des":"PG_TIMEZONE_NAMES displays all time zone names that can be recognized by SET TIMEZONE, along with their associated abbreviations, UTC offsets, and daylight saving time st", + "doc_type":"devg", + "kw":"PG_TIMEZONE_NAMES,System Views,Developer Guide", + "title":"PG_TIMEZONE_NAMES", + "githuburl":"" + }, + { + "uri":"dws_04_0788.html", + "product_code":"dws", + "code":"540", + "des":"PG_TOTAL_MEMORY_DETAIL displays the memory usage of a certain node in the database.", + "doc_type":"devg", + "kw":"PG_TOTAL_MEMORY_DETAIL,System Views,Developer Guide", + "title":"PG_TOTAL_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0789.html", + "product_code":"dws", + "code":"541", + "des":"PG_TOTAL_SCHEMA_INFO displays the storage usage of all schemas in each database. This view is valid only if use_workload_manager is set to on.", + "doc_type":"devg", + "kw":"PG_TOTAL_SCHEMA_INFO,System Views,Developer Guide", + "title":"PG_TOTAL_SCHEMA_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0790.html", + "product_code":"dws", + "code":"542", + "des":"PG_TOTAL_USER_RESOURCE_INFO displays the resource usage of all users. Only administrators can query this view. This view is valid only if use_workload_manager is set to o", + "doc_type":"devg", + "kw":"PG_TOTAL_USER_RESOURCE_INFO,System Views,Developer Guide", + "title":"PG_TOTAL_USER_RESOURCE_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0791.html", + "product_code":"dws", + "code":"543", + "des":"PG_USER displays information about users who can access the database.", + "doc_type":"devg", + "kw":"PG_USER,System Views,Developer Guide", + "title":"PG_USER", + "githuburl":"" + }, + { + "uri":"dws_04_0792.html", + "product_code":"dws", + "code":"544", + "des":"PG_USER_MAPPINGS displays information about user mappings.This is essentially a publicly readable view of PG_USER_MAPPING that leaves out the options column if the user h", + "doc_type":"devg", + "kw":"PG_USER_MAPPINGS,System Views,Developer Guide", + "title":"PG_USER_MAPPINGS", + "githuburl":"" + }, + { + "uri":"dws_04_0793.html", + "product_code":"dws", + "code":"545", + "des":"PG_VIEWS displays basic information about each view in the database.", + "doc_type":"devg", + "kw":"PG_VIEWS,System Views,Developer Guide", + "title":"PG_VIEWS", + "githuburl":"" + }, + { + "uri":"dws_04_0794.html", + "product_code":"dws", + "code":"546", + "des":"PG_WLM_STATISTICS displays information about workload management after the task is complete or the exception has been handled.", + "doc_type":"devg", + "kw":"PG_WLM_STATISTICS,System Views,Developer Guide", + "title":"PG_WLM_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0795.html", + "product_code":"dws", + "code":"547", + "des":"PGXC_BULKLOAD_PROGRESS displays the progress of the service import. Only GDS common files can be imported. This view is accessible only to users with system administrator", + "doc_type":"devg", + "kw":"PGXC_BULKLOAD_PROGRESS,System Views,Developer Guide", + "title":"PGXC_BULKLOAD_PROGRESS", + "githuburl":"" + }, + { + "uri":"dws_04_0796.html", + "product_code":"dws", + "code":"548", + "des":"PGXC_BULKLOAD_STATISTICS displays real-time statistics about service execution, such as GDS, COPY, and \\COPY, on a CN. This view summarizes the real-time execution status", + "doc_type":"devg", + "kw":"PGXC_BULKLOAD_STATISTICS,System Views,Developer Guide", + "title":"PGXC_BULKLOAD_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0797.html", + "product_code":"dws", + "code":"549", + "des":"PGXC_COMM_CLIENT_INFO stores the client connection information of all nodes. (You can query this view on a DN to view the information about the connection between the CN ", + "doc_type":"devg", + "kw":"PGXC_COMM_CLIENT_INFO,System Views,Developer Guide", + "title":"PGXC_COMM_CLIENT_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0798.html", + "product_code":"dws", + "code":"550", + "des":"PGXC_COMM_STATUS displays the communication library delay status for all the DNs.", + "doc_type":"devg", + "kw":"PGXC_COMM_DELAY,System Views,Developer Guide", + "title":"PGXC_COMM_DELAY", + "githuburl":"" + }, + { + "uri":"dws_04_0799.html", + "product_code":"dws", + "code":"551", + "des":"PG_COMM_RECV_STREAM displays the receiving stream status of the communication libraries for all the DNs.", + "doc_type":"devg", + "kw":"PGXC_COMM_RECV_STREAM,System Views,Developer Guide", + "title":"PGXC_COMM_RECV_STREAM", + "githuburl":"" + }, + { + "uri":"dws_04_0800.html", + "product_code":"dws", + "code":"552", + "des":"PGXC_COMM_SEND_STREAM displays the sending stream status of the communication libraries for all the DNs.", + "doc_type":"devg", + "kw":"PGXC_COMM_SEND_STREAM,System Views,Developer Guide", + "title":"PGXC_COMM_SEND_STREAM", + "githuburl":"" + }, + { + "uri":"dws_04_0801.html", + "product_code":"dws", + "code":"553", + "des":"PGXC_COMM_STATUS displays the communication library status for all the DNs.", + "doc_type":"devg", + "kw":"PGXC_COMM_STATUS,System Views,Developer Guide", + "title":"PGXC_COMM_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0802.html", + "product_code":"dws", + "code":"554", + "des":"PGXC_DEADLOCK displays lock wait information generated due to distributed deadlocks.Currently, PGXC_DEADLOCK collects only lock wait information about locks whose locktyp", + "doc_type":"devg", + "kw":"PGXC_DEADLOCK,System Views,Developer Guide", + "title":"PGXC_DEADLOCK", + "githuburl":"" + }, + { + "uri":"dws_04_0803.html", + "product_code":"dws", + "code":"555", + "des":"PGXC_GET_STAT_ALL_TABLES displays information about insertion, update, and deletion operations on tables and the dirty page rate of tables.Before running VACUUM FULL to a", + "doc_type":"devg", + "kw":"PGXC_GET_STAT_ALL_TABLES,System Views,Developer Guide", + "title":"PGXC_GET_STAT_ALL_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0804.html", + "product_code":"dws", + "code":"556", + "des":"PGXC_GET_STAT_ALL_PARTITIONS displays information about insertion, update, and deletion operations on partitions of partitioned tables and the dirty page rate of tables.T", + "doc_type":"devg", + "kw":"PGXC_GET_STAT_ALL_PARTITIONS,System Views,Developer Guide", + "title":"PGXC_GET_STAT_ALL_PARTITIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0805.html", + "product_code":"dws", + "code":"557", + "des":"PGXC_GET_TABLE_SKEWNESS displays the data skew on tables in the current database.", + "doc_type":"devg", + "kw":"PGXC_GET_TABLE_SKEWNESS,System Views,Developer Guide", + "title":"PGXC_GET_TABLE_SKEWNESS", + "githuburl":"" + }, + { + "uri":"dws_04_0806.html", + "product_code":"dws", + "code":"558", + "des":"PGXC_GTM_SNAPSHOT_STATUS displays transaction information on the current GTM.", + "doc_type":"devg", + "kw":"PGXC_GTM_SNAPSHOT_STATUS,System Views,Developer Guide", + "title":"PGXC_GTM_SNAPSHOT_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0807.html", + "product_code":"dws", + "code":"559", + "des":"PGXC_INSTANCE_TIME displays the running time of processes on each node in the cluster and the time consumed in each execution phase. Except the node_name column, the othe", + "doc_type":"devg", + "kw":"PGXC_INSTANCE_TIME,System Views,Developer Guide", + "title":"PGXC_INSTANCE_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0808.html", + "product_code":"dws", + "code":"560", + "des":"PGXC_INSTR_UNIQUE_SQL displays the complete Unique SQL statistics of all CN nodes in the cluster.Only the system administrator can access this view. For details about the", + "doc_type":"devg", + "kw":"PGXC_INSTR_UNIQUE_SQL,System Views,Developer Guide", + "title":"PGXC_INSTR_UNIQUE_SQL", + "githuburl":"" + }, + { + "uri":"dws_04_0809.html", + "product_code":"dws", + "code":"561", + "des":"PGXC_LOCK_CONFLICTS displays information about conflicting locks in the cluster.When a lock is waiting for another lock or another lock is waiting for this one, a lock co", + "doc_type":"devg", + "kw":"PGXC_LOCK_CONFLICTS,System Views,Developer Guide", + "title":"PGXC_LOCK_CONFLICTS", + "githuburl":"" + }, + { + "uri":"dws_04_0810.html", + "product_code":"dws", + "code":"562", + "des":"PGXC_NODE_ENV displays the environmental variables information about all nodes in a cluster.", + "doc_type":"devg", + "kw":"PGXC_NODE_ENV,System Views,Developer Guide", + "title":"PGXC_NODE_ENV", + "githuburl":"" + }, + { + "uri":"dws_04_0811.html", + "product_code":"dws", + "code":"563", + "des":"PGXC_NODE_STAT_RESET_TIME displays the time when statistics of each node in the cluster are reset. All columns except node_name are the same as those in the GS_NODE_STAT_", + "doc_type":"devg", + "kw":"PGXC_NODE_STAT_RESET_TIME,System Views,Developer Guide", + "title":"PGXC_NODE_STAT_RESET_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0812.html", + "product_code":"dws", + "code":"564", + "des":"PGXC_OS_RUN_INFO displays the OS running status of each node in the cluster. All columns except node_name are the same as those in the PV_OS_RUN_INFO view. This view is a", + "doc_type":"devg", + "kw":"PGXC_OS_RUN_INFO,System Views,Developer Guide", + "title":"PGXC_OS_RUN_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0813.html", + "product_code":"dws", + "code":"565", + "des":"PGXC_OS_THREADS displays thread status information under all normal nodes in the current cluster.", + "doc_type":"devg", + "kw":"PGXC_OS_THREADS,System Views,Developer Guide", + "title":"PGXC_OS_THREADS", + "githuburl":"" + }, + { + "uri":"dws_04_0814.html", + "product_code":"dws", + "code":"566", + "des":"PGXC_PREPARED_XACTS displays the two-phase transactions in the prepared phase.", + "doc_type":"devg", + "kw":"PGXC_PREPARED_XACTS,System Views,Developer Guide", + "title":"PGXC_PREPARED_XACTS", + "githuburl":"" + }, + { + "uri":"dws_04_0815.html", + "product_code":"dws", + "code":"567", + "des":"PGXC_REDO_STAT displays statistics on redoing Xlogs of each node in the cluster. All columns except node_name are the same as those in the PV_REDO_STAT view. This view is", + "doc_type":"devg", + "kw":"PGXC_REDO_STAT,System Views,Developer Guide", + "title":"PGXC_REDO_STAT", + "githuburl":"" + }, + { + "uri":"dws_04_0816.html", + "product_code":"dws", + "code":"568", + "des":"PGXC_REL_IOSTAT displays statistics on disk read and write of each node in the cluster. All columns except node_name are the same as those in the GS_REL_IOSTAT view. This", + "doc_type":"devg", + "kw":"PGXC_REL_IOSTAT,System Views,Developer Guide", + "title":"PGXC_REL_IOSTAT", + "githuburl":"" + }, + { + "uri":"dws_04_0817.html", + "product_code":"dws", + "code":"569", + "des":"PGXC_REPLICATION_SLOTS displays the replication information of DNs in the cluster. All columns except node_name are the same as those in the PG_REPLICATION_SLOTS view. Th", + "doc_type":"devg", + "kw":"PGXC_REPLICATION_SLOTS,System Views,Developer Guide", + "title":"PGXC_REPLICATION_SLOTS", + "githuburl":"" + }, + { + "uri":"dws_04_0818.html", + "product_code":"dws", + "code":"570", + "des":"PGXC_RUNNING_XACTS displays information about running transactions on each node in the cluster. The content is the same as that displayed in PG_RUNNING_XACTS.", + "doc_type":"devg", + "kw":"PGXC_RUNNING_XACTS,System Views,Developer Guide", + "title":"PGXC_RUNNING_XACTS", + "githuburl":"" + }, + { + "uri":"dws_04_0819.html", + "product_code":"dws", + "code":"571", + "des":"PGXC_SETTINGS displays the database running status of each node in the cluster. All columns except node_name are the same as those in the PG_SETTINGS view. This view is a", + "doc_type":"devg", + "kw":"PGXC_SETTINGS,System Views,Developer Guide", + "title":"PGXC_SETTINGS", + "githuburl":"" + }, + { + "uri":"dws_04_0820.html", + "product_code":"dws", + "code":"572", + "des":"PGXC_STAT_ACTIVITY displays information about the query performed by the current user on all the CNs in the current cluster.Run the following command to view blocked quer", + "doc_type":"devg", + "kw":"PGXC_STAT_ACTIVITY,System Views,Developer Guide", + "title":"PGXC_STAT_ACTIVITY", + "githuburl":"" + }, + { + "uri":"dws_04_0821.html", + "product_code":"dws", + "code":"573", + "des":"PGXC_STAT_BAD_BLOCK displays statistics about page or CU verification failures after all nodes in a cluster are started.", + "doc_type":"devg", + "kw":"PGXC_STAT_BAD_BLOCK,System Views,Developer Guide", + "title":"PGXC_STAT_BAD_BLOCK", + "githuburl":"" + }, + { + "uri":"dws_04_0822.html", + "product_code":"dws", + "code":"574", + "des":"PGXC_STAT_BGWRITER displays statistics on the background writer of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_BGWRITER vi", + "doc_type":"devg", + "kw":"PGXC_STAT_BGWRITER,System Views,Developer Guide", + "title":"PGXC_STAT_BGWRITER", + "githuburl":"" + }, + { + "uri":"dws_04_0823.html", + "product_code":"dws", + "code":"575", + "des":"PGXC_STAT_DATABASE displays the database status and statistics of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_DATABASE vie", + "doc_type":"devg", + "kw":"PGXC_STAT_DATABASE,System Views,Developer Guide", + "title":"PGXC_STAT_DATABASE", + "githuburl":"" + }, + { + "uri":"dws_04_0824.html", + "product_code":"dws", + "code":"576", + "des":"PGXC_STAT_REPLICATION displays the log synchronization status of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_REPLICATION v", + "doc_type":"devg", + "kw":"PGXC_STAT_REPLICATION,System Views,Developer Guide", + "title":"PGXC_STAT_REPLICATION", + "githuburl":"" + }, + { + "uri":"dws_04_0825.html", + "product_code":"dws", + "code":"577", + "des":"PGXC_SQL_COUNT displays the node-level and user-level statistics for the SQL statements of SELECT, INSERT, UPDATE, DELETE, and MERGE INTO and DDL, DML, and DCL statements", + "doc_type":"devg", + "kw":"PGXC_SQL_COUNT,System Views,Developer Guide", + "title":"PGXC_SQL_COUNT", + "githuburl":"" + }, + { + "uri":"dws_04_0826.html", + "product_code":"dws", + "code":"578", + "des":"PGXC_THREAD_WAIT_STATUS displays all the call layer hierarchy relationship between threads of the SQL statements on all the nodes in a cluster, and the waiting status of ", + "doc_type":"devg", + "kw":"PGXC_THREAD_WAIT_STATUS,System Views,Developer Guide", + "title":"PGXC_THREAD_WAIT_STATUS", + "githuburl":"" + }, + { + "uri":"dws_04_0827.html", + "product_code":"dws", + "code":"579", + "des":"PGXC_TOTAL_MEMORY_DETAIL displays the memory usage in the cluster.", + "doc_type":"devg", + "kw":"PGXC_TOTAL_MEMORY_DETAIL,System Views,Developer Guide", + "title":"PGXC_TOTAL_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0828.html", + "product_code":"dws", + "code":"580", + "des":"PGXC_TOTAL_SCHEMA_INFO displays the schema space information of all instances in the cluster, providing visibility into the schema space usage of each instance. This view", + "doc_type":"devg", + "kw":"PGXC_TOTAL_SCHEMA_INFO,System Views,Developer Guide", + "title":"PGXC_TOTAL_SCHEMA_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0829.html", + "product_code":"dws", + "code":"581", + "des":"PGXC_TOTAL_SCHEMA_INFO_ANALYZE displays the overall schema space information of the cluster, including the total cluster space, average space of instances, skew ratio, ma", + "doc_type":"devg", + "kw":"PGXC_TOTAL_SCHEMA_INFO_ANALYZE,System Views,Developer Guide", + "title":"PGXC_TOTAL_SCHEMA_INFO_ANALYZE", + "githuburl":"" + }, + { + "uri":"dws_04_0830.html", + "product_code":"dws", + "code":"582", + "des":"PGXC_USER_TRANSACTION provides transaction information about users on all CNs. It is accessible only to users with system administrator rights. This view is valid only wh", + "doc_type":"devg", + "kw":"PGXC_USER_TRANSACTION,System Views,Developer Guide", + "title":"PGXC_USER_TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0831.html", + "product_code":"dws", + "code":"583", + "des":"PGXC_VARIABLE_INFO displays information about transaction IDs and OIDs of all nodes in a cluster.", + "doc_type":"devg", + "kw":"PGXC_VARIABLE_INFO,System Views,Developer Guide", + "title":"PGXC_VARIABLE_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0832.html", + "product_code":"dws", + "code":"584", + "des":"PGXC_WAIT_EVENTS displays statistics on the waiting status and events of each node in the cluster. The content is the same as that displayed in GS_WAIT_EVENTS. This view ", + "doc_type":"devg", + "kw":"PGXC_WAIT_EVENTS,System Views,Developer Guide", + "title":"PGXC_WAIT_EVENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0836.html", + "product_code":"dws", + "code":"585", + "des":"PGXC_WLM_OPERATOR_HISTORYdisplays the operator information of completed jobs executed on all CNs. This view is used by Database Manager to query data from a database. Dat", + "doc_type":"devg", + "kw":"PGXC_WLM_OPERATOR_HISTORY,System Views,Developer Guide", + "title":"PGXC_WLM_OPERATOR_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0837.html", + "product_code":"dws", + "code":"586", + "des":"PGXC_WLM_OPERATOR_INFO displays the operator information of completed jobs executed on CNs. The data in this view is obtained from GS_WLM_OPERATOR_INFO.This view is acces", + "doc_type":"devg", + "kw":"PGXC_WLM_OPERATOR_INFO,System Views,Developer Guide", + "title":"PGXC_WLM_OPERATOR_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0838.html", + "product_code":"dws", + "code":"587", + "des":"PGXC_WLM_OPERATOR_STATISTICS displays the operator information of jobs being executed on CNs.This view is accessible only to users with system administrators rights. For ", + "doc_type":"devg", + "kw":"PGXC_WLM_OPERATOR_STATISTICS,System Views,Developer Guide", + "title":"PGXC_WLM_OPERATOR_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0839.html", + "product_code":"dws", + "code":"588", + "des":"PGXC_WLM_SESSION_INFO displays load management information for completed jobs executed on all CNs. The data in this view is obtained from GS_WLM_SESSION_INFO.This view is", + "doc_type":"devg", + "kw":"PGXC_WLM_SESSION_INFO,System Views,Developer Guide", + "title":"PGXC_WLM_SESSION_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0840.html", + "product_code":"dws", + "code":"589", + "des":"PGXC_WLM_SESSION_HISTORY displays load management information for completed jobs executed on all CNs. This view is used by Data Manager to query data from a database. Dat", + "doc_type":"devg", + "kw":"PGXC_WLM_SESSION_HISTORY,System Views,Developer Guide", + "title":"PGXC_WLM_SESSION_HISTORY", + "githuburl":"" + }, + { + "uri":"dws_04_0841.html", + "product_code":"dws", + "code":"590", + "des":"PGXC_WLM_SESSION_STATISTICS displays load management information about jobs that are being executed on CNs.This view is accessible only to users with system administrator", + "doc_type":"devg", + "kw":"PGXC_WLM_SESSION_STATISTICS,System Views,Developer Guide", + "title":"PGXC_WLM_SESSION_STATISTICS", + "githuburl":"" + }, + { + "uri":"dws_04_0842.html", + "product_code":"dws", + "code":"591", + "des":"PGXC_WLM_WORKLOAD_RECORDS displays the status of job executed by the current user on CNs. It is accessible only to users with system administrator rights. This view is av", + "doc_type":"devg", + "kw":"PGXC_WLM_WORKLOAD_RECORDS,System Views,Developer Guide", + "title":"PGXC_WLM_WORKLOAD_RECORDS", + "githuburl":"" + }, + { + "uri":"dws_04_0843.html", + "product_code":"dws", + "code":"592", + "des":"PGXC_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on all CNs in a cluster, including the number of SELECT, UPDATE, ", + "doc_type":"devg", + "kw":"PGXC_WORKLOAD_SQL_COUNT,System Views,Developer Guide", + "title":"PGXC_WORKLOAD_SQL_COUNT", + "githuburl":"" + }, + { + "uri":"dws_04_0844.html", + "product_code":"dws", + "code":"593", + "des":"PGXC_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on all CNs in a cluster, including the maximum, minimum, aver", + "doc_type":"devg", + "kw":"PGXC_WORKLOAD_SQL_ELAPSE_TIME,System Views,Developer Guide", + "title":"PGXC_WORKLOAD_SQL_ELAPSE_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0845.html", + "product_code":"dws", + "code":"594", + "des":"PGXC_WORKLOAD_TRANSACTION provides transaction information about workload Cgroups on all CNs. It is accessible only to users with system administrator rights. This view i", + "doc_type":"devg", + "kw":"PGXC_WORKLOAD_TRANSACTION,System Views,Developer Guide", + "title":"PGXC_WORKLOAD_TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_04_0846.html", + "product_code":"dws", + "code":"595", + "des":"PLAN_TABLE displays the plan information collected by EXPLAIN PLAN. Plan information is in a session-level life cycle. After the session exits, the data will be deleted. ", + "doc_type":"devg", + "kw":"PLAN_TABLE,System Views,Developer Guide", + "title":"PLAN_TABLE", + "githuburl":"" + }, + { + "uri":"dws_04_0847.html", + "product_code":"dws", + "code":"596", + "des":"PLAN_TABLE_DATA displays the plan information collected by EXPLAIN PLAN. Different from the PLAN_TABLE view, the system catalog PLAN_TABLE_DATA stores the plan informatio", + "doc_type":"devg", + "kw":"PLAN_TABLE_DATA,System Views,Developer Guide", + "title":"PLAN_TABLE_DATA", + "githuburl":"" + }, + { + "uri":"dws_04_0848.html", + "product_code":"dws", + "code":"597", + "des":"By collecting statistics about the data file I/Os, PV_FILE_STAT displays the I/O performance of the data to detect the performance problems, such as abnormal I/O operatio", + "doc_type":"devg", + "kw":"PV_FILE_STAT,System Views,Developer Guide", + "title":"PV_FILE_STAT", + "githuburl":"" + }, + { + "uri":"dws_04_0849.html", + "product_code":"dws", + "code":"598", + "des":"PV_INSTANCE_TIME collects statistics on the running time of processes and the time consumed in each execution phase, in microseconds.PV_INSTANCE_TIME records time consump", + "doc_type":"devg", + "kw":"PV_INSTANCE_TIME,System Views,Developer Guide", + "title":"PV_INSTANCE_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0850.html", + "product_code":"dws", + "code":"599", + "des":"PV_OS_RUN_INFO displays the running status of the current operating system.", + "doc_type":"devg", + "kw":"PV_OS_RUN_INFO,System Views,Developer Guide", + "title":"PV_OS_RUN_INFO", + "githuburl":"" + }, + { + "uri":"dws_04_0851.html", + "product_code":"dws", + "code":"600", + "des":"PV_SESSION_MEMORY displays statistics about memory usage at the session level in the unit of MB, including all the memory allocated to Postgres and Stream threads on DNs ", + "doc_type":"devg", + "kw":"PV_SESSION_MEMORY,System Views,Developer Guide", + "title":"PV_SESSION_MEMORY", + "githuburl":"" + }, + { + "uri":"dws_04_0852.html", + "product_code":"dws", + "code":"601", + "des":"PV_SESSION_MEMORY_DETAIL displays statistics about thread memory usage by memory context.The memory context TempSmallContextGroup collects information about all memory co", + "doc_type":"devg", + "kw":"PV_SESSION_MEMORY_DETAIL,System Views,Developer Guide", + "title":"PV_SESSION_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0853.html", + "product_code":"dws", + "code":"602", + "des":"PV_SESSION_STAT displays session state statistics based on session threads or the AutoVacuum thread.", + "doc_type":"devg", + "kw":"PV_SESSION_STAT,System Views,Developer Guide", + "title":"PV_SESSION_STAT", + "githuburl":"" + }, + { + "uri":"dws_04_0854.html", + "product_code":"dws", + "code":"603", + "des":"PV_SESSION_TIME displays statistics about the running time of session threads and time consumed in each execution phase, in microseconds.", + "doc_type":"devg", + "kw":"PV_SESSION_TIME,System Views,Developer Guide", + "title":"PV_SESSION_TIME", + "githuburl":"" + }, + { + "uri":"dws_04_0855.html", + "product_code":"dws", + "code":"604", + "des":"PV_TOTAL_MEMORY_DETAIL displays statistics about memory usage of the current database node in the unit of MB.", + "doc_type":"devg", + "kw":"PV_TOTAL_MEMORY_DETAIL,System Views,Developer Guide", + "title":"PV_TOTAL_MEMORY_DETAIL", + "githuburl":"" + }, + { + "uri":"dws_04_0856.html", + "product_code":"dws", + "code":"605", + "des":"PV_REDO_STAT displays statistics on redoing Xlogs on the current node.", + "doc_type":"devg", + "kw":"PV_REDO_STAT,System Views,Developer Guide", + "title":"PV_REDO_STAT", + "githuburl":"" + }, + { + "uri":"dws_04_0857.html", + "product_code":"dws", + "code":"606", + "des":"REDACTION_COLUMNS displays information about all redaction columns in the current database.", + "doc_type":"devg", + "kw":"REDACTION_COLUMNS,System Views,Developer Guide", + "title":"REDACTION_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0858.html", + "product_code":"dws", + "code":"607", + "des":"REDACTION_POLICIES displays information about all redaction objects in the current database.", + "doc_type":"devg", + "kw":"REDACTION_POLICIES,System Views,Developer Guide", + "title":"REDACTION_POLICIES", + "githuburl":"" + }, + { + "uri":"dws_04_0859.html", + "product_code":"dws", + "code":"608", + "des":"USER_COL_COMMENTS displays the column comments of the table accessible to the current user.", + "doc_type":"devg", + "kw":"USER_COL_COMMENTS,System Views,Developer Guide", + "title":"USER_COL_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0860.html", + "product_code":"dws", + "code":"609", + "des":"USER_CONSTRAINTS displays the table constraint information accessible to the current user.", + "doc_type":"devg", + "kw":"USER_CONSTRAINTS,System Views,Developer Guide", + "title":"USER_CONSTRAINTS", + "githuburl":"" + }, + { + "uri":"dws_04_0861.html", + "product_code":"dws", + "code":"610", + "des":"USER_CONSTRAINTS displays the information about constraint columns of the tables accessible to the current user.", + "doc_type":"devg", + "kw":"USER_CONS_COLUMNS,System Views,Developer Guide", + "title":"USER_CONS_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0862.html", + "product_code":"dws", + "code":"611", + "des":"USER_INDEXES displays index information in the current schema.", + "doc_type":"devg", + "kw":"USER_INDEXES,System Views,Developer Guide", + "title":"USER_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0863.html", + "product_code":"dws", + "code":"612", + "des":"USER_IND_COLUMNS displays column information about all indexes accessible to the current user.", + "doc_type":"devg", + "kw":"USER_IND_COLUMNS,System Views,Developer Guide", + "title":"USER_IND_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0864.html", + "product_code":"dws", + "code":"613", + "des":"USER_IND_EXPRESSIONSdisplays information about the function-based expression index accessible to the current user.", + "doc_type":"devg", + "kw":"USER_IND_EXPRESSIONS,System Views,Developer Guide", + "title":"USER_IND_EXPRESSIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0865.html", + "product_code":"dws", + "code":"614", + "des":"USER_IND_PARTITIONS displays information about index partitions accessible to the current user.", + "doc_type":"devg", + "kw":"USER_IND_PARTITIONS,System Views,Developer Guide", + "title":"USER_IND_PARTITIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0866.html", + "product_code":"dws", + "code":"615", + "des":"USER_JOBS displays all jobs owned by the user.", + "doc_type":"devg", + "kw":"USER_JOBS,System Views,Developer Guide", + "title":"USER_JOBS", + "githuburl":"" + }, + { + "uri":"dws_04_0867.html", + "product_code":"dws", + "code":"616", + "des":"USER_OBJECTS displays all database objects accessible to the current user.For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.", + "doc_type":"devg", + "kw":"USER_OBJECTS,System Views,Developer Guide", + "title":"USER_OBJECTS", + "githuburl":"" + }, + { + "uri":"dws_04_0868.html", + "product_code":"dws", + "code":"617", + "des":"USER_PART_INDEXES displays information about partitioned table indexes accessible to the current user.", + "doc_type":"devg", + "kw":"USER_PART_INDEXES,System Views,Developer Guide", + "title":"USER_PART_INDEXES", + "githuburl":"" + }, + { + "uri":"dws_04_0869.html", + "product_code":"dws", + "code":"618", + "des":"USER_PART_TABLES displays information about partitioned tables accessible to the current user.", + "doc_type":"devg", + "kw":"USER_PART_TABLES,System Views,Developer Guide", + "title":"USER_PART_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0870.html", + "product_code":"dws", + "code":"619", + "des":"USER_PROCEDURES displays information about all stored procedures and functions in the current schema.", + "doc_type":"devg", + "kw":"USER_PROCEDURES,System Views,Developer Guide", + "title":"USER_PROCEDURES", + "githuburl":"" + }, + { + "uri":"dws_04_0871.html", + "product_code":"dws", + "code":"620", + "des":"USER_SEQUENCES displays sequence information in the current schema.", + "doc_type":"devg", + "kw":"USER_SEQUENCES,System Views,Developer Guide", + "title":"USER_SEQUENCES", + "githuburl":"" + }, + { + "uri":"dws_04_0872.html", + "product_code":"dws", + "code":"621", + "des":"USER_SOURCE displays information about stored procedures or functions in this mode, and provides the columns defined by the stored procedures or the functions.", + "doc_type":"devg", + "kw":"USER_SOURCE,System Views,Developer Guide", + "title":"USER_SOURCE", + "githuburl":"" + }, + { + "uri":"dws_04_0873.html", + "product_code":"dws", + "code":"622", + "des":"USER_SYNONYMS displays synonyms accessible to the current user.", + "doc_type":"devg", + "kw":"USER_SYNONYMS,System Views,Developer Guide", + "title":"USER_SYNONYMS", + "githuburl":"" + }, + { + "uri":"dws_04_0874.html", + "product_code":"dws", + "code":"623", + "des":"USER_TAB_COLUMNS displays information about table columns accessible to the current user.", + "doc_type":"devg", + "kw":"USER_TAB_COLUMNS,System Views,Developer Guide", + "title":"USER_TAB_COLUMNS", + "githuburl":"" + }, + { + "uri":"dws_04_0875.html", + "product_code":"dws", + "code":"624", + "des":"USER_TAB_COMMENTS displays comments about all tables and views accessible to the current user.", + "doc_type":"devg", + "kw":"USER_TAB_COMMENTS,System Views,Developer Guide", + "title":"USER_TAB_COMMENTS", + "githuburl":"" + }, + { + "uri":"dws_04_0876.html", + "product_code":"dws", + "code":"625", + "des":"USER_TAB_PARTITIONS displays all table partitions accessible to the current user. Each partition of a partitioned table accessible to the current user has a piece of reco", + "doc_type":"devg", + "kw":"USER_TAB_PARTITIONS,System Views,Developer Guide", + "title":"USER_TAB_PARTITIONS", + "githuburl":"" + }, + { + "uri":"dws_04_0877.html", + "product_code":"dws", + "code":"626", + "des":"USER_TABLES displays table information in the current schema.", + "doc_type":"devg", + "kw":"USER_TABLES,System Views,Developer Guide", + "title":"USER_TABLES", + "githuburl":"" + }, + { + "uri":"dws_04_0878.html", + "product_code":"dws", + "code":"627", + "des":"USER_TRIGGERS displays the information about triggers accessible to the current user.", + "doc_type":"devg", + "kw":"USER_TRIGGERS,System Views,Developer Guide", + "title":"USER_TRIGGERS", + "githuburl":"" + }, + { + "uri":"dws_04_0879.html", + "product_code":"dws", + "code":"628", + "des":"USER_VIEWS displays information about all views in the current schema.", + "doc_type":"devg", + "kw":"USER_VIEWS,System Views,Developer Guide", + "title":"USER_VIEWS", + "githuburl":"" + }, + { + "uri":"dws_04_0880.html", + "product_code":"dws", + "code":"629", + "des":"V$SESSION displays all session information about the current session.", + "doc_type":"devg", + "kw":"V$SESSION,System Views,Developer Guide", + "title":"V$SESSION", + "githuburl":"" + }, + { + "uri":"dws_04_0881.html", + "product_code":"dws", + "code":"630", + "des":"V$SESSION_LONGOPS displays the progress of ongoing operations.", + "doc_type":"devg", + "kw":"V$SESSION_LONGOPS,System Views,Developer Guide", + "title":"V$SESSION_LONGOPS", + "githuburl":"" + }, + { + "uri":"dws_04_0883.html", + "product_code":"dws", + "code":"631", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"GUC Parameters", + "title":"GUC Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0884.html", + "product_code":"dws", + "code":"632", + "des":"GaussDB(DWS) GUC parameters can control database system behaviors. You can check and adjust the GUC parameters based on your business scenario and data volume.After a clu", + "doc_type":"devg", + "kw":"Viewing GUC Parameters,GUC Parameters,Developer Guide", + "title":"Viewing GUC Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0885.html", + "product_code":"dws", + "code":"633", + "des":"To ensure the optimal performance of GaussDB(DWS), you can adjust the GUC parameters in the database.The GUC parameters of GaussDB(DWS) are classified into the following ", + "doc_type":"devg", + "kw":"Configuring GUC Parameters,GUC Parameters,Developer Guide", + "title":"Configuring GUC Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0886.html", + "product_code":"dws", + "code":"634", + "des":"The database provides many operation parameters. Configuration of these parameters affects the behavior of the database system. Before modifying these parameters, learn t", + "doc_type":"devg", + "kw":"GUC Parameter Usage,GUC Parameters,Developer Guide", + "title":"GUC Parameter Usage", + "githuburl":"" + }, + { + "uri":"dws_04_0888.html", + "product_code":"dws", + "code":"635", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Connection and Authentication", + "title":"Connection and Authentication", + "githuburl":"" + }, + { + "uri":"dws_04_0889.html", + "product_code":"dws", + "code":"636", + "des":"This section describes parameters related to the connection mode between the client and server.Parameter description: Specifies the maximum number of allowed parallel con", + "doc_type":"devg", + "kw":"Connection Settings,Connection and Authentication,Developer Guide", + "title":"Connection Settings", + "githuburl":"" + }, + { + "uri":"dws_04_0890.html", + "product_code":"dws", + "code":"637", + "des":"This section describes parameters about how to securely authenticate the client and server.Parameter description: Specifies the longest duration to wait before the client", + "doc_type":"devg", + "kw":"Security and Authentication (postgresql.conf),Connection and Authentication,Developer Guide", + "title":"Security and Authentication (postgresql.conf)", + "githuburl":"" + }, + { + "uri":"dws_04_0891.html", + "product_code":"dws", + "code":"638", + "des":"This section describes parameter settings and value ranges for communication libraries.Parameter description: Specifies whether the communication library uses the TCP or ", + "doc_type":"devg", + "kw":"Communication Library Parameters,Connection and Authentication,Developer Guide", + "title":"Communication Library Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0892.html", + "product_code":"dws", + "code":"639", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Resource Consumption", + "title":"Resource Consumption", + "githuburl":"" + }, + { + "uri":"dws_04_0893.html", + "product_code":"dws", + "code":"640", + "des":"This section describes memory parameters.Parameters described in this section take effect only after the database service restarts.Parameter description: Specifies whethe", + "doc_type":"devg", + "kw":"Memory,Resource Consumption,Developer Guide", + "title":"Memory", + "githuburl":"" + }, + { + "uri":"dws_04_0894.html", + "product_code":"dws", + "code":"641", + "des":"This section describes parameters related to statement disk space control, which are used to limit the disk space usage of statements.Parameter description: Specifies the", + "doc_type":"devg", + "kw":"Statement Disk Space Control,Resource Consumption,Developer Guide", + "title":"Statement Disk Space Control", + "githuburl":"" + }, + { + "uri":"dws_04_0895.html", + "product_code":"dws", + "code":"642", + "des":"This section describes kernel resource parameters. Whether these parameters take effect depends on OS settings.Parameter description: Specifies the maximum number of simu", + "doc_type":"devg", + "kw":"Kernel Resources,Resource Consumption,Developer Guide", + "title":"Kernel Resources", + "githuburl":"" + }, + { + "uri":"dws_04_0896.html", + "product_code":"dws", + "code":"643", + "des":"This feature allows administrators to reduce the I/O impact of the VACUUM and ANALYZE statements on concurrent database activities. It is often more important to prevent ", + "doc_type":"devg", + "kw":"Cost-based Vacuum Delay,Resource Consumption,Developer Guide", + "title":"Cost-based Vacuum Delay", + "githuburl":"" + }, + { + "uri":"dws_04_0898.html", + "product_code":"dws", + "code":"644", + "des":"Parameter description: Specifies whether O&M personnel are allowed to generate some ADIO logs to locate ADIO issues. This parameter is used only by developers. Common use", + "doc_type":"devg", + "kw":"Asynchronous I/O Operations,Resource Consumption,Developer Guide", + "title":"Asynchronous I/O Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0899.html", + "product_code":"dws", + "code":"645", + "des":"GaussDB(DWS) provides a parallel data import function that enables a large amount of data to be imported in a fast and efficient manner. This section describes parameters", + "doc_type":"devg", + "kw":"Parallel Data Import,GUC Parameters,Developer Guide", + "title":"Parallel Data Import", + "githuburl":"" + }, + { + "uri":"dws_04_0900.html", + "product_code":"dws", + "code":"646", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Write Ahead Logs", + "title":"Write Ahead Logs", + "githuburl":"" + }, + { + "uri":"dws_04_0901.html", + "product_code":"dws", + "code":"647", + "des":"Parameter description: Specifies the level of the information that is written to WALs.Type: POSTMASTERValue range: enumerated valuesminimalAdvantages: Certain bulk operat", + "doc_type":"devg", + "kw":"Settings,Write Ahead Logs,Developer Guide", + "title":"Settings", + "githuburl":"" + }, + { + "uri":"dws_04_0902.html", + "product_code":"dws", + "code":"648", + "des":"Parameter description: Specifies the minimum number of WAL segment files in the period specified by checkpoint_timeout. The size of each log file is 16 MB.Type: SIGHUPVal", + "doc_type":"devg", + "kw":"Checkpoints,Write Ahead Logs,Developer Guide", + "title":"Checkpoints", + "githuburl":"" + }, + { + "uri":"dws_04_0903.html", + "product_code":"dws", + "code":"649", + "des":"Parameter description: When archive_mode is enabled, completed WAL segments are sent to archive storage by setting archive_command.Type: SIGHUPValue range: Booleanon: The", + "doc_type":"devg", + "kw":"Archiving,Write Ahead Logs,Developer Guide", + "title":"Archiving", + "githuburl":"" + }, + { + "uri":"dws_04_0904.html", + "product_code":"dws", + "code":"650", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"HA Replication", + "title":"HA Replication", + "githuburl":"" + }, + { + "uri":"dws_04_0905.html", + "product_code":"dws", + "code":"651", + "des":"Parameter description: Specifies the number of Xlog file segments. Specifies the minimum number of transaction log files stored in the pg_xlog directory. The standby serv", + "doc_type":"devg", + "kw":"Sending Server,HA Replication,Developer Guide", + "title":"Sending Server", + "githuburl":"" + }, + { + "uri":"dws_04_0906.html", + "product_code":"dws", + "code":"652", + "des":"Parameter description: Specifies the number of transactions by which VACUUM will defer the cleanup of invalid row-store table records, so that VACUUM and VACUUM FULL do n", + "doc_type":"devg", + "kw":"Primary Server,HA Replication,Developer Guide", + "title":"Primary Server", + "githuburl":"" + }, + { + "uri":"dws_04_0908.html", + "product_code":"dws", + "code":"653", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Query Planning", + "title":"Query Planning", + "githuburl":"" + }, + { + "uri":"dws_04_0909.html", + "product_code":"dws", + "code":"654", + "des":"These configuration parameters provide a crude method of influencing the query plans chosen by the query optimizer. If the default plan chosen by the optimizer for a part", + "doc_type":"devg", + "kw":"Optimizer Method Configuration,Query Planning,Developer Guide", + "title":"Optimizer Method Configuration", + "githuburl":"" + }, + { + "uri":"dws_04_0910.html", + "product_code":"dws", + "code":"655", + "des":"This section describes the optimizer cost constants. The cost variables described in this section are measured on an arbitrary scale. Only their relative values matter, t", + "doc_type":"devg", + "kw":"Optimizer Cost Constants,Query Planning,Developer Guide", + "title":"Optimizer Cost Constants", + "githuburl":"" + }, + { + "uri":"dws_04_0911.html", + "product_code":"dws", + "code":"656", + "des":"This section describes parameters related to genetic query optimizer. The genetic query optimizer (GEQO) is an algorithm that plans queries by using heuristic searching. ", + "doc_type":"devg", + "kw":"Genetic Query Optimizer,Query Planning,Developer Guide", + "title":"Genetic Query Optimizer", + "githuburl":"" + }, + { + "uri":"dws_04_0912.html", + "product_code":"dws", + "code":"657", + "des":"Parameter description: Specifies the default statistics target for table columns without a column-specific target set via ALTER TABLE SET STATISTICS. If this parameter is", + "doc_type":"devg", + "kw":"Other Optimizer Options,Query Planning,Developer Guide", + "title":"Other Optimizer Options", + "githuburl":"" + }, + { + "uri":"dws_04_0913.html", + "product_code":"dws", + "code":"658", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Error Reporting and Logging", + "title":"Error Reporting and Logging", + "githuburl":"" + }, + { + "uri":"dws_04_0914.html", + "product_code":"dws", + "code":"659", + "des":"Parameter description: Specifies the writing mode of the log files when logging_collector is set to on.Type: SIGHUPValue range: Booleanon indicates that GaussDB(DWS) over", + "doc_type":"devg", + "kw":"Logging Destination,Error Reporting and Logging,Developer Guide", + "title":"Logging Destination", + "githuburl":"" + }, + { + "uri":"dws_04_0915.html", + "product_code":"dws", + "code":"660", + "des":"Parameter description: Specifies which level of messages are sent to the client. Each level covers all the levels following it. The lower the level is, the fewer messages", + "doc_type":"devg", + "kw":"Logging Time,Error Reporting and Logging,Developer Guide", + "title":"Logging Time", + "githuburl":"" + }, + { + "uri":"dws_04_0916.html", + "product_code":"dws", + "code":"661", + "des":"Parameter description: Specifies whether to print parsing tree results.Type: SIGHUPValue range: Booleanon indicates the printing result function is enabled.off indicates ", + "doc_type":"devg", + "kw":"Logging Content,Error Reporting and Logging,Developer Guide", + "title":"Logging Content", + "githuburl":"" + }, + { + "uri":"dws_04_0918.html", + "product_code":"dws", + "code":"662", + "des":"During cluster running, error scenarios can be detected in a timely manner to inform users as soon as possible.Parameter description: Enables the alarm detection thread t", + "doc_type":"devg", + "kw":"Alarm Detection,GUC Parameters,Developer Guide", + "title":"Alarm Detection", + "githuburl":"" + }, + { + "uri":"dws_04_0919.html", + "product_code":"dws", + "code":"663", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Statistics During the Database Running", + "title":"Statistics During the Database Running", + "githuburl":"" + }, + { + "uri":"dws_04_0920.html", + "product_code":"dws", + "code":"664", + "des":"The query and index statistics collector is used to collect statistics during database running. The statistics include the times of inserting and updating a table and an ", + "doc_type":"devg", + "kw":"Query and Index Statistics Collector,Statistics During the Database Running,Developer Guide", + "title":"Query and Index Statistics Collector", + "githuburl":"" + }, + { + "uri":"dws_04_0921.html", + "product_code":"dws", + "code":"665", + "des":"During the running of the database, the lock access, disk I/O operation, and invalid message process are involved. All these operations are the bottleneck of the database", + "doc_type":"devg", + "kw":"Performance Statistics,Statistics During the Database Running,Developer Guide", + "title":"Performance Statistics", + "githuburl":"" + }, + { + "uri":"dws_04_0922.html", + "product_code":"dws", + "code":"666", + "des":"If database resource usage is not controlled, concurrent tasks easily preempt resources. As a result, the OS will be overloaded and cannot respond to user tasks; or even ", + "doc_type":"devg", + "kw":"Workload Management,GUC Parameters,Developer Guide", + "title":"Workload Management", + "githuburl":"" + }, + { + "uri":"dws_04_0923.html", + "product_code":"dws", + "code":"667", + "des":"The automatic cleanup process (autovacuum) in the system automatically runs the VACUUM and ANALYZE commands to recycle the record space marked by the deleted status and u", + "doc_type":"devg", + "kw":"Automatic Cleanup,GUC Parameters,Developer Guide", + "title":"Automatic Cleanup", + "githuburl":"" + }, + { + "uri":"dws_04_0924.html", + "product_code":"dws", + "code":"668", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Default Settings of Client Connection", + "title":"Default Settings of Client Connection", + "githuburl":"" + }, + { + "uri":"dws_04_0925.html", + "product_code":"dws", + "code":"669", + "des":"This section describes related default parameters involved in the execution of SQL statements.Parameter description: Specifies the order in which schemas are searched whe", + "doc_type":"devg", + "kw":"Statement Behavior,Default Settings of Client Connection,Developer Guide", + "title":"Statement Behavior", + "githuburl":"" + }, + { + "uri":"dws_04_0926.html", + "product_code":"dws", + "code":"670", + "des":"This section describes parameters related to the time format setting.Parameter description: Specifies the display format for date and time values, as well as the rules fo", + "doc_type":"devg", + "kw":"Zone and Formatting,Default Settings of Client Connection,Developer Guide", + "title":"Zone and Formatting", + "githuburl":"" + }, + { + "uri":"dws_04_0927.html", + "product_code":"dws", + "code":"671", + "des":"This section describes the default database loading parameters of the database system.Parameter description: Specifies the path for saving the shared database files that ", + "doc_type":"devg", + "kw":"Other Default Parameters,Default Settings of Client Connection,Developer Guide", + "title":"Other Default Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0928.html", + "product_code":"dws", + "code":"672", + "des":"In GaussDB(DWS), a deadlock may occur when concurrently executed transactions compete for resources. This section describes parameters used for managing transaction lock ", + "doc_type":"devg", + "kw":"Lock Management,GUC Parameters,Developer Guide", + "title":"Lock Management", + "githuburl":"" + }, + { + "uri":"dws_04_0929.html", + "product_code":"dws", + "code":"673", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Version and Platform Compatibility", + "title":"Version and Platform Compatibility", + "githuburl":"" + }, + { + "uri":"dws_04_0930.html", + "product_code":"dws", + "code":"674", + "des":"This section describes the parameter control of the downward compatibility and external compatibility features of GaussDB(DWS). Backward compatibility of the database sys", + "doc_type":"devg", + "kw":"Compatibility with Earlier Versions,Version and Platform Compatibility,Developer Guide", + "title":"Compatibility with Earlier Versions", + "githuburl":"" + }, + { + "uri":"dws_04_0931.html", + "product_code":"dws", + "code":"675", + "des":"Many platforms use the database system. External compatibility of the database system provides a lot of convenience for platforms.Parameter description: Determines whethe", + "doc_type":"devg", + "kw":"Platform and Client Compatibility,Version and Platform Compatibility,Developer Guide", + "title":"Platform and Client Compatibility", + "githuburl":"" + }, + { + "uri":"dws_04_0932.html", + "product_code":"dws", + "code":"676", + "des":"This section describes parameters used for controlling the methods that the server processes an error occurring in the database system.Parameter description: Specifies wh", + "doc_type":"devg", + "kw":"Fault Tolerance,GUC Parameters,Developer Guide", + "title":"Fault Tolerance", + "githuburl":"" + }, + { + "uri":"dws_04_0933.html", + "product_code":"dws", + "code":"677", + "des":"When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need ", + "doc_type":"devg", + "kw":"Connection Pool Parameters,GUC Parameters,Developer Guide", + "title":"Connection Pool Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0934.html", + "product_code":"dws", + "code":"678", + "des":"This section describes the settings and value ranges of cluster transaction parameters.Parameter description: Specifies the isolation level of the current transaction.Typ", + "doc_type":"devg", + "kw":"Cluster Transaction Parameters,GUC Parameters,Developer Guide", + "title":"Cluster Transaction Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0936.html", + "product_code":"dws", + "code":"679", + "des":"Parameter description: Specifies whether to enable the lightweight column-store update.Type: USERSETValue range: Booleanon indicates that the lightweight column-store upd", + "doc_type":"devg", + "kw":"Developer Operations,GUC Parameters,Developer Guide", + "title":"Developer Operations", + "githuburl":"" + }, + { + "uri":"dws_04_0937.html", + "product_code":"dws", + "code":"680", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Auditing", + "title":"Auditing", + "githuburl":"" + }, + { + "uri":"dws_04_0938.html", + "product_code":"dws", + "code":"681", + "des":"Parameter description: Specifies whether to enable or disable the audit process. After the audit process is enabled, the auditing information written by the background pr", + "doc_type":"devg", + "kw":"Audit Switch,Auditing,Developer Guide", + "title":"Audit Switch", + "githuburl":"" + }, + { + "uri":"dws_04_0940.html", + "product_code":"dws", + "code":"682", + "des":"Parameter description: Specifies whether to audit successful operations in GaussDB(DWS). Set this parameter as required.Type: SIGHUPValue range: a stringnone: indicates t", + "doc_type":"devg", + "kw":"Operation Audit,Auditing,Developer Guide", + "title":"Operation Audit", + "githuburl":"" + }, + { + "uri":"dws_04_0941.html", + "product_code":"dws", + "code":"683", + "des":"The automatic rollback transaction can be monitored and its statement problems can be located by setting the transaction timeout warning. In addition, the statements with", + "doc_type":"devg", + "kw":"Transaction Monitoring,GUC Parameters,Developer Guide", + "title":"Transaction Monitoring", + "githuburl":"" + }, + { + "uri":"dws_04_0945.html", + "product_code":"dws", + "code":"684", + "des":"Parameter description: If an SQL statement involves tables belonging to different groups, you can enable this parameter to push the execution plan of the statement to imp", + "doc_type":"devg", + "kw":"Miscellaneous Parameters,GUC Parameters,Developer Guide", + "title":"Miscellaneous Parameters", + "githuburl":"" + }, + { + "uri":"dws_04_0946.html", + "product_code":"dws", + "code":"685", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Glossary,Developer Guide,Developer Guide", + "title":"Glossary", + "githuburl":"" + }, + { + "uri":"dws_04_2000.html", + "product_code":"dws", + "code":"686", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"SQL Syntax Reference", + "title":"SQL Syntax Reference", + "githuburl":"" + }, + { + "uri":"dws_06_0001.html", + "product_code":"dws", + "code":"687", + "des":"SQL is a standard computer language used to control the access to databases and manage data in databases.SQL provides different statements to enable you to:Query data.Ins", + "doc_type":"devg", + "kw":"GaussDB(DWS) SQL,SQL Syntax Reference,Developer Guide", + "title":"GaussDB(DWS) SQL", + "githuburl":"" + }, + { + "uri":"dws_06_0002.html", + "product_code":"dws", + "code":"688", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Differences Between GaussDB(DWS) and PostgreSQL", + "title":"Differences Between GaussDB(DWS) and PostgreSQL", + "githuburl":"" + }, + { + "uri":"dws_06_0003.html", + "product_code":"dws", + "code":"689", + "des":"GaussDB(DWS) gsql differs from PostgreSQL psql in that the former has made the following changes to enhance security:User passwords cannot be set by running the \\password", + "doc_type":"devg", + "kw":"GaussDB(DWS) gsql, PostgreSQL psql, and libpq,Differences Between GaussDB(DWS) and PostgreSQL,Develo", + "title":"GaussDB(DWS) gsql, PostgreSQL psql, and libpq", + "githuburl":"" + }, + { + "uri":"dws_06_0004.html", + "product_code":"dws", + "code":"690", + "des":"For details about supported data types by GaussDB(DWS), see Data Types.The following PostgreSQL data type is not supported:Lines, a geometric typepg_node_tree", + "doc_type":"devg", + "kw":"Data Type Differences,Differences Between GaussDB(DWS) and PostgreSQL,Developer Guide", + "title":"Data Type Differences", + "githuburl":"" + }, + { + "uri":"dws_06_0005.html", + "product_code":"dws", + "code":"691", + "des":"For details about the functions supported by GaussDB(DWS), see Functions and Operators.The following PostgreSQL functions are not supported:Enum support functionsAccess p", + "doc_type":"devg", + "kw":"Function Differences,Differences Between GaussDB(DWS) and PostgreSQL,Developer Guide", + "title":"Function Differences", + "githuburl":"" + }, + { + "uri":"dws_06_0006.html", + "product_code":"dws", + "code":"692", + "des":"Table inheritanceTable creation features:Use REFERENCES reftable [ (refcolumn) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] t", + "doc_type":"devg", + "kw":"PostgreSQL Features Unsupported by GaussDB(DWS),Differences Between GaussDB(DWS) and PostgreSQL,Deve", + "title":"PostgreSQL Features Unsupported by GaussDB(DWS)", + "githuburl":"" + }, + { + "uri":"dws_06_0007.html", + "product_code":"dws", + "code":"693", + "des":"The SQL contains reserved and non-reserved words. Standards require that reserved keywords not be used as other identifiers. Non-reserved keywords have special meanings o", + "doc_type":"devg", + "kw":"Keyword,SQL Syntax Reference,Developer Guide", + "title":"Keyword", + "githuburl":"" + }, + { + "uri":"dws_06_0008.html", + "product_code":"dws", + "code":"694", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Data Types", + "title":"Data Types", + "githuburl":"" + }, + { + "uri":"dws_06_0009.html", + "product_code":"dws", + "code":"695", + "des":"Numeric types consist of two-, four-, and eight-byte integers, four- and eight-byte floating-point numbers, and selectable-precision decimals.For details about numeric op", + "doc_type":"devg", + "kw":"Numeric Types,Data Types,Developer Guide", + "title":"Numeric Types", + "githuburl":"" + }, + { + "uri":"dws_06_0010.html", + "product_code":"dws", + "code":"696", + "des":"The money type stores a currency amount with fixed fractional precision. The range shown in Table 1 assumes there are two fractional digits. Input is accepted in a variet", + "doc_type":"devg", + "kw":"Monetary Types,Data Types,Developer Guide", + "title":"Monetary Types", + "githuburl":"" + }, + { + "uri":"dws_06_0011.html", + "product_code":"dws", + "code":"697", + "des":"Valid literal values for the \"true\" state are:TRUE, 't', 'true', 'y', 'yes', '1'Valid literal values for the \"false\" state include:FALSE, 'f', 'false', 'n', 'no', '0'TRUE", + "doc_type":"devg", + "kw":"Boolean Type,Data Types,Developer Guide", + "title":"Boolean Type", + "githuburl":"" + }, + { + "uri":"dws_06_0012.html", + "product_code":"dws", + "code":"698", + "des":"Table 1 lists the character types that can be used in GaussDB(DWS). For string operators and related built-in functions, see Character Processing Functions and Operators.", + "doc_type":"devg", + "kw":"Character Types,Data Types,Developer Guide", + "title":"Character Types", + "githuburl":"" + }, + { + "uri":"dws_06_0013.html", + "product_code":"dws", + "code":"699", + "des":"Table 1 lists the binary data types that can be used in GaussDB(DWS).In addition to the size limitation on each column, the total size of each tuple is 8203 bytes less th", + "doc_type":"devg", + "kw":"Binary Data Types,Data Types,Developer Guide", + "title":"Binary Data Types", + "githuburl":"" + }, + { + "uri":"dws_06_0014.html", + "product_code":"dws", + "code":"700", + "des":"Table 1 lists date and time types supported by GaussDB(DWS). For the operators and built-in functions of the types, see Date and Time Processing Functions and Operators.I", + "doc_type":"devg", + "kw":"Date/Time Types,Data Types,Developer Guide", + "title":"Date/Time Types", + "githuburl":"" + }, + { + "uri":"dws_06_0015.html", + "product_code":"dws", + "code":"701", + "des":"Table 1 lists the geometric types that can be used in GaussDB(DWS). The most fundamental type, the point, forms the basis for all of the other types.A rich set of functio", + "doc_type":"devg", + "kw":"Geometric Types,Data Types,Developer Guide", + "title":"Geometric Types", + "githuburl":"" + }, + { + "uri":"dws_06_0016.html", + "product_code":"dws", + "code":"702", + "des":"GaussDB(DWS) offers data types to store IPv4, IPv6, and MAC addresses.It is better to use network address types instead of plaintext types to store IPv4, IPv6, and MAC ad", + "doc_type":"devg", + "kw":"Network Address Types,Data Types,Developer Guide", + "title":"Network Address Types", + "githuburl":"" + }, + { + "uri":"dws_06_0017.html", + "product_code":"dws", + "code":"703", + "des":"Bit strings are strings of 1's and 0's. They can be used to store bit masks.GaussDB(DWS) supports two SQL bit types: bit(n) and bit varying(n), where n is a positive inte", + "doc_type":"devg", + "kw":"Bit String Types,Data Types,Developer Guide", + "title":"Bit String Types", + "githuburl":"" + }, + { + "uri":"dws_06_0018.html", + "product_code":"dws", + "code":"704", + "des":"GaussDB(DWS) offers two data types that are designed to support full text search. The tsvector type represents a document in a form optimized for text search. The tsquery", + "doc_type":"devg", + "kw":"Text Search Types,Data Types,Developer Guide", + "title":"Text Search Types", + "githuburl":"" + }, + { + "uri":"dws_06_0019.html", + "product_code":"dws", + "code":"705", + "des":"The data type UUID stores Universally Unique Identifiers (UUID) as defined by RFC 4122, ISO/IEF 9834-8:2005, and related standards. This identifier is a 128-bit quantity ", + "doc_type":"devg", + "kw":"UUID Type,Data Types,Developer Guide", + "title":"UUID Type", + "githuburl":"" + }, + { + "uri":"dws_06_0020.html", + "product_code":"dws", + "code":"706", + "des":"JSON data types are for storing JavaScript Object Notation (JSON) data. Such data can also be stored as TEXT, but the JSON data type has the advantage of checking that ea", + "doc_type":"devg", + "kw":"JSON Types,Data Types,Developer Guide", + "title":"JSON Types", + "githuburl":"" + }, + { + "uri":"dws_06_0021.html", + "product_code":"dws", + "code":"707", + "des":"HyperLoglog (HLL) is an approximation algorithm for efficiently counting the number of distinct values in a data set. It features faster computing and lower space usage. ", + "doc_type":"devg", + "kw":"HLL Data Types,Data Types,Developer Guide", + "title":"HLL Data Types", + "githuburl":"" + }, + { + "uri":"dws_06_0022.html", + "product_code":"dws", + "code":"708", + "des":"Object identifiers (OIDs) are used internally by GaussDB(DWS) as primary keys for various system catalogs. OIDs are not added to user-created tables by the system. The OI", + "doc_type":"devg", + "kw":"Object Identifier Types,Data Types,Developer Guide", + "title":"Object Identifier Types", + "githuburl":"" + }, + { + "uri":"dws_06_0023.html", + "product_code":"dws", + "code":"709", + "des":"GaussDB(DWS) has a number of special-purpose entries that are collectively called pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to ", + "doc_type":"devg", + "kw":"Pseudo-Types,Data Types,Developer Guide", + "title":"Pseudo-Types", + "githuburl":"" + }, + { + "uri":"dws_06_0024.html", + "product_code":"dws", + "code":"710", + "des":"Table 1 lists the data types supported by column-store tables.", + "doc_type":"devg", + "kw":"Data Types Supported by Column-Store Tables,Data Types,Developer Guide", + "title":"Data Types Supported by Column-Store Tables", + "githuburl":"" + }, + { + "uri":"dws_06_0025.html", + "product_code":"dws", + "code":"711", + "des":"XML data type stores Extensible Markup Language (XML) formatted data. Such data can also be stored as text, but the advantage of the XML data type is that it checks wheth", + "doc_type":"devg", + "kw":"XML,Data Types,Developer Guide", + "title":"XML", + "githuburl":"" + }, + { + "uri":"dws_06_0026.html", + "product_code":"dws", + "code":"712", + "des":"Table 1 lists the constants and macros that can be used in GaussDB(DWS).", + "doc_type":"devg", + "kw":"Constant and Macro,SQL Syntax Reference,Developer Guide", + "title":"Constant and Macro", + "githuburl":"" + }, + { + "uri":"dws_06_0027.html", + "product_code":"dws", + "code":"713", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Functions and Operators", + "title":"Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0028.html", + "product_code":"dws", + "code":"714", + "des":"The usual logical operators include AND, OR, and NOT. SQL uses a three-valued logical system with true, false, and null, which represents \"unknown\". Their priorities are ", + "doc_type":"devg", + "kw":"Logical Operators,Functions and Operators,Developer Guide", + "title":"Logical Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0029.html", + "product_code":"dws", + "code":"715", + "des":"Comparison operators are available for all data types and return Boolean values.All comparison operators are binary operators. Only data types that are the same or can be", + "doc_type":"devg", + "kw":"Comparison Operators,Functions and Operators,Developer Guide", + "title":"Comparison Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0030.html", + "product_code":"dws", + "code":"716", + "des":"String functions and operators provided by GaussDB(DWS) are for concatenating strings with each other, concatenating strings with non-strings, and matching the patterns o", + "doc_type":"devg", + "kw":"Character Processing Functions and Operators,Functions and Operators,Developer Guide", + "title":"Character Processing Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0031.html", + "product_code":"dws", + "code":"717", + "des":"SQL defines some string functions that use keywords, rather than commas, to separate arguments.octet_length(string)Description: Number of bytes in binary stringReturn typ", + "doc_type":"devg", + "kw":"Binary String Functions and Operators,Functions and Operators,Developer Guide", + "title":"Binary String Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0032.html", + "product_code":"dws", + "code":"718", + "des":"Aside from the usual comparison operators, the following operators can be used. Bit string operands of &, |, and # must be of equal length. When bit shifting, the origina", + "doc_type":"devg", + "kw":"Bit String Functions and Operators,Functions and Operators,Developer Guide", + "title":"Bit String Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0033.html", + "product_code":"dws", + "code":"719", + "des":"There are three separate approaches to pattern matching provided by the database: the traditional SQL LIKE operator, the more recent SIMILAR TO operator, and POSIX-style ", + "doc_type":"devg", + "kw":"Pattern Matching Operators,Functions and Operators,Developer Guide", + "title":"Pattern Matching Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0034.html", + "product_code":"dws", + "code":"720", + "des":"+Description: AdditionFor example:SELECT 2+3 AS RESULT;\n result \n--------\n 5\n(1 row)Description: AdditionFor example:-Description: SubtractionFor example:SELECT 2-3 ", + "doc_type":"devg", + "kw":"Mathematical Functions and Operators,Functions and Operators,Developer Guide", + "title":"Mathematical Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0035.html", + "product_code":"dws", + "code":"721", + "des":"When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent w", + "doc_type":"devg", + "kw":"Date and Time Processing Functions and Operators,Functions and Operators,Developer Guide", + "title":"Date and Time Processing Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0036.html", + "product_code":"dws", + "code":"722", + "des":"cast(x as y)Description: Converts x into the type specified by y.For example:SELECT cast('22-oct-1997' as timestamp);\n timestamp \n---------------------\n 1997-10", + "doc_type":"devg", + "kw":"Type Conversion Functions,Functions and Operators,Developer Guide", + "title":"Type Conversion Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0037.html", + "product_code":"dws", + "code":"723", + "des":"+Description: TranslationFor example:SELECT box '((0,0),(1,1))' + point '(2.0,0)' AS RESULT;\n result \n-------------\n (3,1),(2,0)\n(1 row)Description: TranslationFor e", + "doc_type":"devg", + "kw":"Geometric Functions and Operators,Functions and Operators,Developer Guide", + "title":"Geometric Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0038.html", + "product_code":"dws", + "code":"724", + "des":"The operators <<, <<=, >>, and >>= test for subnet inclusion. They consider only the network parts of the two addresses (ignoring any host part) and determine whether one", + "doc_type":"devg", + "kw":"Network Address Functions and Operators,Functions and Operators,Developer Guide", + "title":"Network Address Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0039.html", + "product_code":"dws", + "code":"725", + "des":"@@Description: Specifies whether the tsvector-typed words match the tsquery-typed words.For example:SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') AS ", + "doc_type":"devg", + "kw":"Text Search Functions and Operators,Functions and Operators,Developer Guide", + "title":"Text Search Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0040.html", + "product_code":"dws", + "code":"726", + "des":"UUID functions are used to generate UUID data (see UUID Type).uuid_generate_v1()Description: Generates a UUID sequence number.Return type: UUIDExample:SELECT uuid_generat", + "doc_type":"devg", + "kw":"UUID Functions,Functions and Operators,Developer Guide", + "title":"UUID Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0041.html", + "product_code":"dws", + "code":"727", + "des":"JSON functions are used to generate JSON data (see JSON Types).array_to_json(anyarray [, pretty_bool])Description: Returns the array as JSON. A multi-dimensional array be", + "doc_type":"devg", + "kw":"JSON Functions,Functions and Operators,Developer Guide", + "title":"JSON Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0042.html", + "product_code":"dws", + "code":"728", + "des":"hll_hash_boolean(bool)Description: Hashes data of the bool type.Return type: hll_hashvalFor example:SELECT hll_hash_boolean(FALSE);\n hll_hash_boolean \n----------------", + "doc_type":"devg", + "kw":"HLL Functions and Operators,Functions and Operators,Developer Guide", + "title":"HLL Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0043.html", + "product_code":"dws", + "code":"729", + "des":"The sequence functions provide a simple method to ensure security of multiple users for users to obtain sequence values from sequence objects.The hybrid data warehouse (s", + "doc_type":"devg", + "kw":"SEQUENCE Functions,Functions and Operators,Developer Guide", + "title":"SEQUENCE Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0044.html", + "product_code":"dws", + "code":"730", + "des":"=Description: Specifies whether two arrays are equal.For example:SELECT ARRAY[1.1,2.1,3.1]::int[] = ARRAY[1,2,3] AS RESULT ;\n result \n--------\n t\n(1 row)Description: Spec", + "doc_type":"devg", + "kw":"Array Functions and Operators,Functions and Operators,Developer Guide", + "title":"Array Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0045.html", + "product_code":"dws", + "code":"731", + "des":"=Description: EqualsFor example:SELECT int4range(1,5) = '[1,4]'::int4range AS RESULT;\n result\n--------\n t\n(1 row)Description: EqualsFor example:<>Description: Does not eq", + "doc_type":"devg", + "kw":"Range Functions and Operators,Functions and Operators,Developer Guide", + "title":"Range Functions and Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0046.html", + "product_code":"dws", + "code":"732", + "des":"sum(expression)Description: Sum of expression across all input valuesReturn type:Generally, same as the argument data type. In the following cases, type conversion occurs", + "doc_type":"devg", + "kw":"Aggregate Functions,Functions and Operators,Developer Guide", + "title":"Aggregate Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0047.html", + "product_code":"dws", + "code":"733", + "des":"Regular aggregate functions return a single value calculated from values in a row, or group all rows into a single output row. Window functions perform a calculation acro", + "doc_type":"devg", + "kw":"Window Functions,Functions and Operators,Developer Guide", + "title":"Window Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0048.html", + "product_code":"dws", + "code":"734", + "des":"gs_password_deadline()Description: Indicates the number of remaining days before the password of the current user expires. After the password expires, the system prompts ", + "doc_type":"devg", + "kw":"Security Functions,Functions and Operators,Developer Guide", + "title":"Security Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0049.html", + "product_code":"dws", + "code":"735", + "des":"generate_series(start, stop)Description: Generates a series of values, from start to stop with a step size of one.Parameter type: int, bigint, or numericReturn type: seto", + "doc_type":"devg", + "kw":"Set Returning Functions,Functions and Operators,Developer Guide", + "title":"Set Returning Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0050.html", + "product_code":"dws", + "code":"736", + "des":"coalesce(expr1, expr2, ..., exprn)Description: Returns the first argument that is not NULL in the argument list.COALESCE(expr1, expr2) is equivalent to CASE WHEN expr1 IS", + "doc_type":"devg", + "kw":"Conditional Expression Functions,Functions and Operators,Developer Guide", + "title":"Conditional Expression Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0051.html", + "product_code":"dws", + "code":"737", + "des":"current_catalogDescription: Name of the current database (called \"catalog\" in the SQL standard)Return type: nameFor example:SELECT current_catalog;\n current_database\n----", + "doc_type":"devg", + "kw":"System Information Functions,Functions and Operators,Developer Guide", + "title":"System Information Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0052.html", + "product_code":"dws", + "code":"738", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"System Administration Functions", + "title":"System Administration Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0053.html", + "product_code":"dws", + "code":"739", + "des":"Configuration setting functions are used for querying and modifying configuration parameters during running.current_setting(setting_name)Description: Specifies the curren", + "doc_type":"devg", + "kw":"Configuration Settings Functions,System Administration Functions,Developer Guide", + "title":"Configuration Settings Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0054.html", + "product_code":"dws", + "code":"740", + "des":"Universal file access functions provide local access interfaces for files on a database server. Only files in the database cluster directory and the log_directory directo", + "doc_type":"devg", + "kw":"Universal File Access Functions,System Administration Functions,Developer Guide", + "title":"Universal File Access Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0055.html", + "product_code":"dws", + "code":"741", + "des":"Server signaling functions send control signals to other server processes. Only system administrators can use these functions.pg_cancel_backend(pid int)Description: Cance", + "doc_type":"devg", + "kw":"Server Signaling Functions,System Administration Functions,Developer Guide", + "title":"Server Signaling Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0056.html", + "product_code":"dws", + "code":"742", + "des":"Backup control functions help online backup.pg_create_restore_point(name text)Description: Creates a named point for performing the restore operation (restricted to syste", + "doc_type":"devg", + "kw":"Backup and Restoration Control Functions,System Administration Functions,Developer Guide", + "title":"Backup and Restoration Control Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0057.html", + "product_code":"dws", + "code":"743", + "des":"Snapshot synchronization functions save the current snapshot and return its identifier.pg_export_snapshot()Description: Saves the current snapshot and returns its identif", + "doc_type":"devg", + "kw":"Snapshot Synchronization Functions,System Administration Functions,Developer Guide", + "title":"Snapshot Synchronization Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0058.html", + "product_code":"dws", + "code":"744", + "des":"Database object size functions calculate the actual disk space used by database objects.pg_column_size(any)Description: Specifies the number of bytes used to store a part", + "doc_type":"devg", + "kw":"Database Object Functions,System Administration Functions,Developer Guide", + "title":"Database Object Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0059.html", + "product_code":"dws", + "code":"745", + "des":"Advisory lock functions manage advisory locks. These functions are only for internal use currently.pg_advisory_lock(key bigint)Description: Obtains an exclusive session-l", + "doc_type":"devg", + "kw":"Advisory Lock Functions,System Administration Functions,Developer Guide", + "title":"Advisory Lock Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0060.html", + "product_code":"dws", + "code":"746", + "des":"pg_get_residualfiles()Description: Obtains all residual file records of the current node. This function is an instance-level function and is irrelevant to the current dat", + "doc_type":"devg", + "kw":"Residual File Management Functions,System Administration Functions,Developer Guide", + "title":"Residual File Management Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0061.html", + "product_code":"dws", + "code":"747", + "des":"A replication function synchronizes logs and data between instances. It is a statistics or operation method provided by the system to implement HA.Replication functions e", + "doc_type":"devg", + "kw":"Replication Functions,System Administration Functions,Developer Guide", + "title":"Replication Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0062.html", + "product_code":"dws", + "code":"748", + "des":"pgxc_pool_check()Description: Checks whether the connection data buffered in the pool is consistent with pgxc_node.Return type: booleanDescription: Checks whether the con", + "doc_type":"devg", + "kw":"Other Functions,System Administration Functions,Developer Guide", + "title":"Other Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0063.html", + "product_code":"dws", + "code":"749", + "des":"This section describes the functions of the resource management module.gs_wlm_readjust_user_space(oid)Description: This function calibrates the permanent storage space of", + "doc_type":"devg", + "kw":"Resource Management Functions,System Administration Functions,Developer Guide", + "title":"Resource Management Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0064.html", + "product_code":"dws", + "code":"750", + "des":"Data redaction functions are used to mask and protect sensitive data. Generally, you are advised to bind these functions to the columns to be redacted based on the data r", + "doc_type":"devg", + "kw":"Data Redaction Functions,Functions and Operators,Developer Guide", + "title":"Data Redaction Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0065.html", + "product_code":"dws", + "code":"751", + "des":"Statistics information functions are divided into the following two categories: functions that access databases, using the OID of each table or index in a database to mar", + "doc_type":"devg", + "kw":"Statistics Information Functions,Functions and Operators,Developer Guide", + "title":"Statistics Information Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0066.html", + "product_code":"dws", + "code":"752", + "des":"pg_get_triggerdef(oid)Description: Obtains the definition information of a trigger.Parameter: OID of the trigger to be queriedReturn type: textExample:select pg_get_trigg", + "doc_type":"devg", + "kw":"Trigger Functions,Functions and Operators,Developer Guide", + "title":"Trigger Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0067.html", + "product_code":"dws", + "code":"753", + "des":"XMLPARSE ( { DOCUMENT | CONTENT } value)Description: Generates an XML value from character data.Return type: XMLExample:XMLSERIALIZE ( { DOCUMENT | CONTENT } value AS typ", + "doc_type":"devg", + "kw":"XML Functions,Functions and Operators,Developer Guide", + "title":"XML Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0068.html", + "product_code":"dws", + "code":"754", + "des":"The pv_memory_profiling(type int) and environment variable MALLOC_CONF are used by GaussDB(DWS) to control the enabling and disabling of the memory allocation call stack ", + "doc_type":"devg", + "kw":"Call Stack Recording Functions,Functions and Operators,Developer Guide", + "title":"Call Stack Recording Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0069.html", + "product_code":"dws", + "code":"755", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Expressions", + "title":"Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0070.html", + "product_code":"dws", + "code":"756", + "des":"Logical Operators lists the operators and calculation rules of logical expressions.Comparison Operators lists the common comparative operators.In addition to comparative ", + "doc_type":"devg", + "kw":"Simple Expressions,Expressions,Developer Guide", + "title":"Simple Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0071.html", + "product_code":"dws", + "code":"757", + "des":"Data that meets the requirements specified by conditional expressions are filtered during SQL statement execution.Conditional expressions include the following types:CASE", + "doc_type":"devg", + "kw":"Conditional Expressions,Expressions,Developer Guide", + "title":"Conditional Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0072.html", + "product_code":"dws", + "code":"758", + "des":"Subquery expressions include the following types:EXISTS/NOT EXISTSFigure 1 shows the syntax of an EXISTS/NOT EXISTS expression.EXISTS/NOT EXISTS::=The parameter of an EXI", + "doc_type":"devg", + "kw":"Subquery Expressions,Expressions,Developer Guide", + "title":"Subquery Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0073.html", + "product_code":"dws", + "code":"759", + "des":"expressionIN(value [, ...])The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list.", + "doc_type":"devg", + "kw":"Array Expressions,Expressions,Developer Guide", + "title":"Array Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0074.html", + "product_code":"dws", + "code":"760", + "des":"Syntax:row_constructor operator row_constructorBoth sides of the row expression are row constructors. The values of both rows must have the same number of fields and they", + "doc_type":"devg", + "kw":"Row Expressions,Expressions,Developer Guide", + "title":"Row Expressions", + "githuburl":"" + }, + { + "uri":"dws_06_0075.html", + "product_code":"dws", + "code":"761", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Type Conversion", + "title":"Type Conversion", + "githuburl":"" + }, + { + "uri":"dws_06_0076.html", + "product_code":"dws", + "code":"762", + "des":"SQL is a typed language. That is, every data item has an associated data type which determines its behavior and allowed usage. GaussDB(DWS) has an extensible type system ", + "doc_type":"devg", + "kw":"Overview,Type Conversion,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0077.html", + "product_code":"dws", + "code":"763", + "des":"Select the operators to be considered from the pg_operator system catalog. Considered operators are those with the matching name and argument count. If the search path fi", + "doc_type":"devg", + "kw":"Operators,Type Conversion,Developer Guide", + "title":"Operators", + "githuburl":"" + }, + { + "uri":"dws_06_0078.html", + "product_code":"dws", + "code":"764", + "des":"Select the functions to be considered from the pg_proc system catalog. If a non-schema-qualified function name was used, the functions in the current search path are cons", + "doc_type":"devg", + "kw":"Functions,Type Conversion,Developer Guide", + "title":"Functions", + "githuburl":"" + }, + { + "uri":"dws_06_0079.html", + "product_code":"dws", + "code":"765", + "des":"Search for an exact match with the target column.Try to convert the expression to the target type. This will succeed if there is a registered cast between the two types. ", + "doc_type":"devg", + "kw":"Value Storage,Type Conversion,Developer Guide", + "title":"Value Storage", + "githuburl":"" + }, + { + "uri":"dws_06_0080.html", + "product_code":"dws", + "code":"766", + "des":"SQL UNION constructs must match up possibly dissimilar types to become a single result set. Since all query results from a SELECT UNION statement must appear in a single ", + "doc_type":"devg", + "kw":"UNION, CASE, and Related Constructs,Type Conversion,Developer Guide", + "title":"UNION, CASE, and Related Constructs", + "githuburl":"" + }, + { + "uri":"dws_06_0081.html", + "product_code":"dws", + "code":"767", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Full Text Search", + "title":"Full Text Search", + "githuburl":"" + }, + { + "uri":"dws_06_0082.html", + "product_code":"dws", + "code":"768", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Introduction", + "title":"Introduction", + "githuburl":"" + }, + { + "uri":"dws_06_0083.html", + "product_code":"dws", + "code":"769", + "des":"Textual search operators have been used in databases for years. GaussDB(DWS) has ~, ~*, LIKE, and ILIKE operators for textual data types, but they lack many essential pro", + "doc_type":"devg", + "kw":"Full-Text Retrieval,Introduction,Developer Guide", + "title":"Full-Text Retrieval", + "githuburl":"" + }, + { + "uri":"dws_06_0084.html", + "product_code":"dws", + "code":"770", + "des":"A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents", + "doc_type":"devg", + "kw":"What Is a Document?,Introduction,Developer Guide", + "title":"What Is a Document?", + "githuburl":"" + }, + { + "uri":"dws_06_0085.html", + "product_code":"dws", + "code":"771", + "des":"Full text search in GaussDB(DWS) is based on the match operator @@, which returns true if a tsvector (document) matches a tsquery (query). It does not matter which data t", + "doc_type":"devg", + "kw":"Basic Text Matching,Introduction,Developer Guide", + "title":"Basic Text Matching", + "githuburl":"" + }, + { + "uri":"dws_06_0086.html", + "product_code":"dws", + "code":"772", + "des":"Full text search functionality includes the ability to do many more things: skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, for", + "doc_type":"devg", + "kw":"Configurations,Introduction,Developer Guide", + "title":"Configurations", + "githuburl":"" + }, + { + "uri":"dws_06_0087.html", + "product_code":"dws", + "code":"773", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Table and index", + "title":"Table and index", + "githuburl":"" + }, + { + "uri":"dws_06_0088.html", + "product_code":"dws", + "code":"774", + "des":"It is possible to do a full text search without an index.A simple query to print each row that contains the word science in its body column is as follows:DROP SCHEMA IF E", + "doc_type":"devg", + "kw":"Searching a Table,Table and index,Developer Guide", + "title":"Searching a Table", + "githuburl":"" + }, + { + "uri":"dws_06_0089.html", + "product_code":"dws", + "code":"775", + "des":"You can create a GIN index to speed up text searches:The to_tsvector() function accepts one or two augments.If the one-augment version of the index is used, the system wi", + "doc_type":"devg", + "kw":"Creating an Index,Table and index,Developer Guide", + "title":"Creating an Index", + "githuburl":"" + }, + { + "uri":"dws_06_0090.html", + "product_code":"dws", + "code":"776", + "des":"The following is an example of using an index. Run the following statements in a database that uses the UTF-8 or GBK encoding:In this example, table1 has two GIN indexes ", + "doc_type":"devg", + "kw":"Constraints on Index Use,Table and index,Developer Guide", + "title":"Constraints on Index Use", + "githuburl":"" + }, + { + "uri":"dws_06_0091.html", + "product_code":"dws", + "code":"777", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Controlling Text Search", + "title":"Controlling Text Search", + "githuburl":"" + }, + { + "uri":"dws_06_0092.html", + "product_code":"dws", + "code":"778", + "des":"GaussDB(DWS) provides function to_tsvector for converting a document to the tsvector data type.to_tsvector parses a textual document into tokens, reduces the tokens to le", + "doc_type":"devg", + "kw":"Parsing Documents,Controlling Text Search,Developer Guide", + "title":"Parsing Documents", + "githuburl":"" + }, + { + "uri":"dws_06_0093.html", + "product_code":"dws", + "code":"779", + "des":"GaussDB(DWS) provides functions to_tsquery and plainto_tsquery for converting a query to the tsquery data type. to_tsquery offers access to more features than plainto_tsq", + "doc_type":"devg", + "kw":"Parsing Queries,Controlling Text Search,Developer Guide", + "title":"Parsing Queries", + "githuburl":"" + }, + { + "uri":"dws_06_0094.html", + "product_code":"dws", + "code":"780", + "des":"Ranking attempts to measure how relevant documents are to a particular query, so that when there are many matches the most relevant ones can be shown first. GaussDB(DWS) ", + "doc_type":"devg", + "kw":"Ranking Search Results,Controlling Text Search,Developer Guide", + "title":"Ranking Search Results", + "githuburl":"" + }, + { + "uri":"dws_06_0095.html", + "product_code":"dws", + "code":"781", + "des":"To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of the document with mark", + "doc_type":"devg", + "kw":"Highlighting Results,Controlling Text Search,Developer Guide", + "title":"Highlighting Results", + "githuburl":"" + }, + { + "uri":"dws_06_0096.html", + "product_code":"dws", + "code":"782", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Additional Features", + "title":"Additional Features", + "githuburl":"" + }, + { + "uri":"dws_06_0097.html", + "product_code":"dws", + "code":"783", + "des":"GaussDB(DWS) provides functions and operators that can be used to manipulate documents that are already in tsvector type.tsvector || tsvectorThe tsvector concatenation op", + "doc_type":"devg", + "kw":"Manipulating tsvector,Additional Features,Developer Guide", + "title":"Manipulating tsvector", + "githuburl":"" + }, + { + "uri":"dws_06_0098.html", + "product_code":"dws", + "code":"784", + "des":"GaussDB(DWS) provides functions and operators that can be used to manipulate queries that are already in tsquery type.tsquery && tsqueryReturns the AND-combination of the", + "doc_type":"devg", + "kw":"Manipulating Queries,Additional Features,Developer Guide", + "title":"Manipulating Queries", + "githuburl":"" + }, + { + "uri":"dws_06_0099.html", + "product_code":"dws", + "code":"785", + "des":"The ts_rewrite family of functions searches a given tsquery for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this ", + "doc_type":"devg", + "kw":"Rewriting Queries,Additional Features,Developer Guide", + "title":"Rewriting Queries", + "githuburl":"" + }, + { + "uri":"dws_06_0100.html", + "product_code":"dws", + "code":"786", + "des":"The function ts_stat is useful for checking your configuration and for finding stop-word candidates.sqlquery is a text value containing an SQL query which must return a s", + "doc_type":"devg", + "kw":"Gathering Document Statistics,Additional Features,Developer Guide", + "title":"Gathering Document Statistics", + "githuburl":"" + }, + { + "uri":"dws_06_0101.html", + "product_code":"dws", + "code":"787", + "des":"Text search parsers are responsible for splitting raw document text into tokens and identifying each token's type, where the set of types is defined by the parser itself.", + "doc_type":"devg", + "kw":"Parsers,Full Text Search,Developer Guide", + "title":"Parsers", + "githuburl":"" + }, + { + "uri":"dws_06_0102.html", + "product_code":"dws", + "code":"788", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Dictionaries", + "title":"Dictionaries", + "githuburl":"" + }, + { + "uri":"dws_06_0103.html", + "product_code":"dws", + "code":"789", + "des":"A dictionary is used to define stop words, that is, words to be ignored in full-text retrieval.A dictionary can also be used to normalize words so that different derived ", + "doc_type":"devg", + "kw":"Overview,Dictionaries,Developer Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0104.html", + "product_code":"dws", + "code":"790", + "des":"Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text s", + "doc_type":"devg", + "kw":"Stop Words,Dictionaries,Developer Guide", + "title":"Stop Words", + "githuburl":"" + }, + { + "uri":"dws_06_0105.html", + "product_code":"dws", + "code":"791", + "des":"A Simple dictionary operates by converting the input token to lower case and checking it against a list of stop words. If the token is found in the list, an empty array w", + "doc_type":"devg", + "kw":"Simple Dictionary,Dictionaries,Developer Guide", + "title":"Simple Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0106.html", + "product_code":"dws", + "code":"792", + "des":"A synonym dictionary is used to define, identify, and convert synonyms of tokens. Phrases are not supported (use the thesaurus dictionary in Thesaurus Dictionary).A synon", + "doc_type":"devg", + "kw":"Synonym Dictionary,Dictionaries,Developer Guide", + "title":"Synonym Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0107.html", + "product_code":"dws", + "code":"793", + "des":"A thesaurus dictionary (sometimes abbreviated as TZ) is a collection of words that include relationships between words and phrases, such as broader terms (BT), narrower t", + "doc_type":"devg", + "kw":"Thesaurus Dictionary,Dictionaries,Developer Guide", + "title":"Thesaurus Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0108.html", + "product_code":"dws", + "code":"794", + "des":"The Ispell dictionary template supports morphological dictionaries, which can normalize many different linguistic forms of a word into the same lexeme. For example, an En", + "doc_type":"devg", + "kw":"Ispell Dictionary,Dictionaries,Developer Guide", + "title":"Ispell Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0109.html", + "product_code":"dws", + "code":"795", + "des":"A Snowball dictionary is based on a project by Martin Porter and is used for stem analysis, providing stemming algorithms for many languages. GaussDB(DWS) provides predef", + "doc_type":"devg", + "kw":"Snowball Dictionary,Dictionaries,Developer Guide", + "title":"Snowball Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0110.html", + "product_code":"dws", + "code":"796", + "des":"Text search configuration specifies the following components required for converting a document into a tsvector:A parser, decomposes a text into tokens.Dictionary list, c", + "doc_type":"devg", + "kw":"Configuration Examples,Full Text Search,Developer Guide", + "title":"Configuration Examples", + "githuburl":"" + }, + { + "uri":"dws_06_0111.html", + "product_code":"dws", + "code":"797", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Testing and Debugging Text Search", + "title":"Testing and Debugging Text Search", + "githuburl":"" + }, + { + "uri":"dws_06_0112.html", + "product_code":"dws", + "code":"798", + "des":"The function ts_debug allows easy testing of a text search configuration.ts_debug displays information about every token of document as produced by the parser and process", + "doc_type":"devg", + "kw":"Testing a Configuration,Testing and Debugging Text Search,Developer Guide", + "title":"Testing a Configuration", + "githuburl":"" + }, + { + "uri":"dws_06_0113.html", + "product_code":"dws", + "code":"799", + "des":"The ts_parse function allows direct testing of a text search parser.ts_parse parses the given document and returns a series of records, one for each token produced by par", + "doc_type":"devg", + "kw":"Testing a Parser,Testing and Debugging Text Search,Developer Guide", + "title":"Testing a Parser", + "githuburl":"" + }, + { + "uri":"dws_06_0114.html", + "product_code":"dws", + "code":"800", + "des":"The ts_lexize function facilitates dictionary testing.ts_lexize(dict regdictionary, token text) returns text[] ts_lexize returns an array of lexemes if the input token is", + "doc_type":"devg", + "kw":"Testing a Dictionary,Testing and Debugging Text Search,Developer Guide", + "title":"Testing a Dictionary", + "githuburl":"" + }, + { + "uri":"dws_06_0115.html", + "product_code":"dws", + "code":"801", + "des":"The current limitations of GaussDB(DWS)'s full text search are:The length of each lexeme must be less than 2 KB.The length of a tsvector (lexemes + positions) must be les", + "doc_type":"devg", + "kw":"Limitations,Full Text Search,Developer Guide", + "title":"Limitations", + "githuburl":"" + }, + { + "uri":"dws_06_0116.html", + "product_code":"dws", + "code":"802", + "des":"GaussDB(DWS) runs SQL statements to perform different system operations, such as setting variables, displaying the execution plan, and collecting garbage data.For details", + "doc_type":"devg", + "kw":"System Operation,SQL Syntax Reference,Developer Guide", + "title":"System Operation", + "githuburl":"" + }, + { + "uri":"dws_06_0117.html", + "product_code":"dws", + "code":"803", + "des":"A transaction is a user-defined sequence of database operations, which form an integral unit of work.GaussDB(DWS) starts a transaction using START TRANSACTION and BEGIN. ", + "doc_type":"devg", + "kw":"Controlling Transactions,SQL Syntax Reference,Developer Guide", + "title":"Controlling Transactions", + "githuburl":"" + }, + { + "uri":"dws_06_0118.html", + "product_code":"dws", + "code":"804", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"DDL Syntax", + "title":"DDL Syntax", + "githuburl":"" + }, + { + "uri":"dws_06_0119.html", + "product_code":"dws", + "code":"805", + "des":"Data definition language (DDL) is used to define or modify an object in a database, such as a table, index, or view.GaussDB(DWS) does not support DDL if its CN is unavail", + "doc_type":"devg", + "kw":"DDL Syntax Overview,DDL Syntax,Developer Guide", + "title":"DDL Syntax Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0120.html", + "product_code":"dws", + "code":"806", + "des":"This command is used to modify the attributes of a database, including the database name, owner, maximum number of connections, and object isolation attribute.Only the ow", + "doc_type":"devg", + "kw":"ALTER DATABASE,DDL Syntax,Developer Guide", + "title":"ALTER DATABASE", + "githuburl":"" + }, + { + "uri":"dws_06_0123.html", + "product_code":"dws", + "code":"807", + "des":"ALTER FOREIGN TABLE modifies a foreign table.NoneSet the attributes of a foreign table.ALTER FOREIGN TABLE [ IF EXISTS ] table_name\n OPTIONS ( {[ ADD | SET | DROP ] o", + "doc_type":"devg", + "kw":"ALTER FOREIGN TABLE (for GDS),DDL Syntax,Developer Guide", + "title":"ALTER FOREIGN TABLE (for GDS)", + "githuburl":"" + }, + { + "uri":"dws_06_0124.html", + "product_code":"dws", + "code":"808", + "des":"ALTER FOREIGN TABLE modifies an HDFS or OBS foreign table.NoneSet a foreign table's attributes.ALTER FOREIGN TABLE [ IF EXISTS ] table_name\n OPTIONS ( {[ ADD | SET | ", + "doc_type":"devg", + "kw":"ALTER FOREIGN TABLE (for HDFS or OBS),DDL Syntax,Developer Guide", + "title":"ALTER FOREIGN TABLE (for HDFS or OBS)", + "githuburl":"" + }, + { + "uri":"dws_06_0126.html", + "product_code":"dws", + "code":"809", + "des":"ALTER FUNCTION modifies the attributes of a customized function.Only the owner of a function or a system administrator can run this statement. If a function involves oper", + "doc_type":"devg", + "kw":"ALTER FUNCTION,DDL Syntax,Developer Guide", + "title":"ALTER FUNCTION", + "githuburl":"" + }, + { + "uri":"dws_06_0127.html", + "product_code":"dws", + "code":"810", + "des":"ALTER GROUP modifies the attributes of a user group.ALTER GROUP is an alias for ALTER ROLE, and it is not a standard SQL command and not recommended. Users can use ALTER ", + "doc_type":"devg", + "kw":"ALTER GROUP,DDL Syntax,Developer Guide", + "title":"ALTER GROUP", + "githuburl":"" + }, + { + "uri":"dws_06_0128.html", + "product_code":"dws", + "code":"811", + "des":"ALTER INDEX modifies the definition of an existing index.There are several sub-forms:IF EXISTSIf the specified index does not exist, a notice instead of an error is sent.", + "doc_type":"devg", + "kw":"ALTER INDEX,DDL Syntax,Developer Guide", + "title":"ALTER INDEX", + "githuburl":"" + }, + { + "uri":"dws_06_0129.html", + "product_code":"dws", + "code":"812", + "des":"ALTER LARGE OBJECT modifies the definition of a large object. It can only assign a new owner to a large object.Only the administrator or the owner of the to-be-modified l", + "doc_type":"devg", + "kw":"ALTER LARGE OBJECT,DDL Syntax,Developer Guide", + "title":"ALTER LARGE OBJECT", + "githuburl":"" + }, + { + "uri":"dws_06_0132.html", + "product_code":"dws", + "code":"813", + "des":"ALTER REDACTION POLICY modifies a data redaction policy applied to a specified table.Only the owner of the table to which the redaction policy is applied has the permissi", + "doc_type":"devg", + "kw":"ALTER REDACTION POLICY,DDL Syntax,Developer Guide", + "title":"ALTER REDACTION POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0133.html", + "product_code":"dws", + "code":"814", + "des":"ALTER RESOURCE POOL changes the Cgroup of a resource pool.Users having the ALTER permission can modify resource pools.pool_nameSpecifies the name of the resource pool.The", + "doc_type":"devg", + "kw":"ALTER RESOURCE POOL,DDL Syntax,Developer Guide", + "title":"ALTER RESOURCE POOL", + "githuburl":"" + }, + { + "uri":"dws_06_0134.html", + "product_code":"dws", + "code":"815", + "des":"ALTER ROLE changes the attributes of a role.NoneModifying the Rights of a RoleALTER ROLE role_name [ [ WITH ] option [ ... ] ];The option clause for granting rights is as", + "doc_type":"devg", + "kw":"ALTER ROLE,DDL Syntax,Developer Guide", + "title":"ALTER ROLE", + "githuburl":"" + }, + { + "uri":"dws_06_0135.html", + "product_code":"dws", + "code":"816", + "des":"ALTER ROW LEVEL SECURITY POLICY modifies an existing row-level access control policy, including the policy name and the users and expressions affected by the policy.Only ", + "doc_type":"devg", + "kw":"ALTER ROW LEVEL SECURITY POLICY,DDL Syntax,Developer Guide", + "title":"ALTER ROW LEVEL SECURITY POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0136.html", + "product_code":"dws", + "code":"817", + "des":"ALTER SCHEMA changes the attributes of a schema.Only the owner of an index or a system administrator can run this statement.Rename a schema.ALTER SCHEMA schema_name \n ", + "doc_type":"devg", + "kw":"ALTER SCHEMA,DDL Syntax,Developer Guide", + "title":"ALTER SCHEMA", + "githuburl":"" + }, + { + "uri":"dws_06_0137.html", + "product_code":"dws", + "code":"818", + "des":"ALTER SEQUENCE modifies the parameters of an existing sequence.You must be the owner of the sequence to use ALTER SEQUENCE.In the current version, you can modify only the", + "doc_type":"devg", + "kw":"ALTER SEQUENCE,DDL Syntax,Developer Guide", + "title":"ALTER SEQUENCE", + "githuburl":"" + }, + { + "uri":"dws_06_0138.html", + "product_code":"dws", + "code":"819", + "des":"ALTER SERVER adds, modifies, or deletes the parameters of an existing server. You can query existing servers from the pg_foreign_server system catalog.Only the owner of a", + "doc_type":"devg", + "kw":"ALTER SERVER,DDL Syntax,Developer Guide", + "title":"ALTER SERVER", + "githuburl":"" + }, + { + "uri":"dws_06_0139.html", + "product_code":"dws", + "code":"820", + "des":"ALTER SESSION defines or modifies the conditions or parameters that affect the current session. Modified session parameters are kept until the current session is disconne", + "doc_type":"devg", + "kw":"ALTER SESSION,DDL Syntax,Developer Guide", + "title":"ALTER SESSION", + "githuburl":"" + }, + { + "uri":"dws_06_0140.html", + "product_code":"dws", + "code":"821", + "des":"ALTER SYNONYM is used to modify the attribute of a synonym.Only the synonym owner can be changed.Only the system administrator and the synonym owner has the permission to", + "doc_type":"devg", + "kw":"ALTER SYNONYM,DDL Syntax,Developer Guide", + "title":"ALTER SYNONYM", + "githuburl":"" + }, + { + "uri":"dws_06_0141.html", + "product_code":"dws", + "code":"822", + "des":"ALTER SYSTEM KILL SESSION ends a session.Nonesession_sid, serialSpecifies SID and SERIAL of a session (see examples for format).Value range: The SIDs and SERIALs of all s", + "doc_type":"devg", + "kw":"ALTER SYSTEM KILL SESSION,DDL Syntax,Developer Guide", + "title":"ALTER SYSTEM KILL SESSION", + "githuburl":"" + }, + { + "uri":"dws_06_0142.html", + "product_code":"dws", + "code":"823", + "des":"ALTER TABLE is used to modify tables, including modifying table definitions, renaming tables, renaming specified columns in tables, renaming table constraints, setting ta", + "doc_type":"devg", + "kw":"ALTER TABLE,DDL Syntax,Developer Guide", + "title":"ALTER TABLE", + "githuburl":"" + }, + { + "uri":"dws_06_0143.html", + "product_code":"dws", + "code":"824", + "des":"ALTER TABLE PARTITION modifies table partitioning, including adding, deleting, splitting, merging partitions, and modifying partition attributes.The name of the added par", + "doc_type":"devg", + "kw":"ALTER TABLE PARTITION,DDL Syntax,Developer Guide", + "title":"ALTER TABLE PARTITION", + "githuburl":"" + }, + { + "uri":"dws_06_0145.html", + "product_code":"dws", + "code":"825", + "des":"ALTER TEXT SEARCH CONFIGURATION modifies the definition of a text search configuration. You can modify its mappings from token types to dictionaries, change the configura", + "doc_type":"devg", + "kw":"ALTER TEXT SEARCH CONFIGURATION,DDL Syntax,Developer Guide", + "title":"ALTER TEXT SEARCH CONFIGURATION", + "githuburl":"" + }, + { + "uri":"dws_06_0146.html", + "product_code":"dws", + "code":"826", + "des":"ALTER TEXT SEARCH DICTIONARY modifies the definition of a full-text retrieval dictionary, including its parameters, name, owner, and schema.ALTER is not supported by pred", + "doc_type":"devg", + "kw":"ALTER TEXT SEARCH DICTIONARY,DDL Syntax,Developer Guide", + "title":"ALTER TEXT SEARCH DICTIONARY", + "githuburl":"" + }, + { + "uri":"dws_06_0147.html", + "product_code":"dws", + "code":"827", + "des":"ALTER TRIGGER modifies the definition of a trigger.Only the owner of a table where a trigger is created and system administrators can run the ALTER TRIGGER statement.trig", + "doc_type":"devg", + "kw":"ALTER TRIGGER,DDL Syntax,Developer Guide", + "title":"ALTER TRIGGER", + "githuburl":"" + }, + { + "uri":"dws_06_0148.html", + "product_code":"dws", + "code":"828", + "des":"ALTER TYPE modifies the definition of a type.Modify a type.ALTER TYPE name action [, ... ]\nALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER }\nALTER TYPE", + "doc_type":"devg", + "kw":"ALTER TYPE,DDL Syntax,Developer Guide", + "title":"ALTER TYPE", + "githuburl":"" + }, + { + "uri":"dws_06_0149.html", + "product_code":"dws", + "code":"829", + "des":"ALTER USER modifies the attributes of a database user.Session parameters modified by ALTER USER apply to a specified user and take effect in the next session.Modify user ", + "doc_type":"devg", + "kw":"ALTER USER,DDL Syntax,Developer Guide", + "title":"ALTER USER", + "githuburl":"" + }, + { + "uri":"dws_06_0150.html", + "product_code":"dws", + "code":"830", + "des":"ALTER VIEW modifies all auxiliary attributes of a view. (To modify the query definition of a view, use CREATE OR REPLACE VIEW.)Only the view owner can modify a view by ru", + "doc_type":"devg", + "kw":"ALTER VIEW,DDL Syntax,Developer Guide", + "title":"ALTER VIEW", + "githuburl":"" + }, + { + "uri":"dws_06_0151.html", + "product_code":"dws", + "code":"831", + "des":"CLEAN CONNECTION clears database connections when a database is abnormal. You may use this statement to delete a specific user's connections to a specified database.NoneC", + "doc_type":"devg", + "kw":"CLEAN CONNECTION,DDL Syntax,Developer Guide", + "title":"CLEAN CONNECTION", + "githuburl":"" + }, + { + "uri":"dws_06_0152.html", + "product_code":"dws", + "code":"832", + "des":"CLOSE frees the resources associated with an open cursor.After a cursor is closed, no subsequent operations are allowed on it.A cursor should be closed when it is no long", + "doc_type":"devg", + "kw":"CLOSE,DDL Syntax,Developer Guide", + "title":"CLOSE", + "githuburl":"" + }, + { + "uri":"dws_06_0153.html", + "product_code":"dws", + "code":"833", + "des":"Cluster a table according to an index.CLUSTER instructs GaussDB(DWS) to cluster the table specified by table_name based on the index specified by index_name. The index mu", + "doc_type":"devg", + "kw":"CLUSTER,DDL Syntax,Developer Guide", + "title":"CLUSTER", + "githuburl":"" + }, + { + "uri":"dws_06_0154.html", + "product_code":"dws", + "code":"834", + "des":"COMMENT defines or changes the comment of an object.Only one comment string is stored for each object. To modify a comment, issue a new COMMENT command for the same objec", + "doc_type":"devg", + "kw":"COMMENT,DDL Syntax,Developer Guide", + "title":"COMMENT", + "githuburl":"" + }, + { + "uri":"dws_06_0155.html", + "product_code":"dws", + "code":"835", + "des":"Creates a barrier for cluster nodes. The barrier can be used for data restoration.Before creating a barrier, ensure that gtm_backup_barrier and enable_cbm_tracking are se", + "doc_type":"devg", + "kw":"CREATE BARRIER,DDL Syntax,Developer Guide", + "title":"CREATE BARRIER", + "githuburl":"" + }, + { + "uri":"dws_06_0156.html", + "product_code":"dws", + "code":"836", + "des":"CREATE DATABASE creates a database. By default, the new database will be created by cloning the standard system database template1. A different template can be specified ", + "doc_type":"devg", + "kw":"CREATE DATABASE,DDL Syntax,Developer Guide", + "title":"CREATE DATABASE", + "githuburl":"" + }, + { + "uri":"dws_06_0159.html", + "product_code":"dws", + "code":"837", + "des":"CREATE FOREIGN TABLE creates a GDS foreign table.CREATE FOREIGN TABLE creates a GDS foreign table in the current database for concurrent data import and export. The GDS f", + "doc_type":"devg", + "kw":"CREATE FOREIGN TABLE (for GDS Import and Export),DDL Syntax,Developer Guide", + "title":"CREATE FOREIGN TABLE (for GDS Import and Export)", + "githuburl":"" + }, + { + "uri":"dws_06_0161.html", + "product_code":"dws", + "code":"838", + "des":"CREATE FOREIGN TABLE creates an HDFS or OBS foreign table in the current database to access or export structured data stored on HDFS or OBS. You can also export data in O", + "doc_type":"devg", + "kw":"CREATE FOREIGN TABLE (SQL on OBS or Hadoop),DDL Syntax,Developer Guide", + "title":"CREATE FOREIGN TABLE (SQL on OBS or Hadoop)", + "githuburl":"" + }, + { + "uri":"dws_06_0160.html", + "product_code":"dws", + "code":"839", + "des":"CREATE FOREIGN TABLE creates a foreign table in the current database for parallel data import and export of OBS data. The server used is gsmpp_server, which is created by", + "doc_type":"devg", + "kw":"CREATE FOREIGN TABLE (for OBS Import and Export),DDL Syntax,Developer Guide", + "title":"CREATE FOREIGN TABLE (for OBS Import and Export)", + "githuburl":"" + }, + { + "uri":"dws_06_0163.html", + "product_code":"dws", + "code":"840", + "des":"CREATE FUNCTION creates a function.The precision values (if any) of the parameters or return values of a function are not checked.When creating a function, you are advise", + "doc_type":"devg", + "kw":"CREATE FUNCTION,DDL Syntax,Developer Guide", + "title":"CREATE FUNCTION", + "githuburl":"" + }, + { + "uri":"dws_06_0164.html", + "product_code":"dws", + "code":"841", + "des":"CREATE GROUP creates a user group.CREATE GROUP is an alias for CREATE ROLE, and it is not a standard SQL command and not recommended. Users can use CREATE ROLE directly.T", + "doc_type":"devg", + "kw":"CREATE GROUP,DDL Syntax,Developer Guide", + "title":"CREATE GROUP", + "githuburl":"" + }, + { + "uri":"dws_06_0165.html", + "product_code":"dws", + "code":"842", + "des":"CREATE INDEX-bak defines a new index.Indexes are primarily used to enhance database performance (though inappropriate use can result in slower database performance). You ", + "doc_type":"devg", + "kw":"CREATE INDEX,DDL Syntax,Developer Guide", + "title":"CREATE INDEX", + "githuburl":"" + }, + { + "uri":"dws_06_0168.html", + "product_code":"dws", + "code":"843", + "des":"CREATE REDACTION POLICY creates a data redaction policy for a table.Only the table owner has the permission to create a data redaction policy.You can create data redactio", + "doc_type":"devg", + "kw":"CREATE REDACTION POLICY,DDL Syntax,Developer Guide", + "title":"CREATE REDACTION POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0169.html", + "product_code":"dws", + "code":"844", + "des":"CREATE ROW LEVEL SECURITY POLICY creates a row-level access control policy for a table.The policy takes effect only after row-level access control is enabled (by running ", + "doc_type":"devg", + "kw":"CREATE ROW LEVEL SECURITY POLICY,DDL Syntax,Developer Guide", + "title":"CREATE ROW LEVEL SECURITY POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0170.html", + "product_code":"dws", + "code":"845", + "des":"CREATE PROCEDURE creates a stored procedure.The precision values (if any) of the parameters or return values of a stored procedure are not checked.When creating a stored ", + "doc_type":"devg", + "kw":"CREATE PROCEDURE,DDL Syntax,Developer Guide", + "title":"CREATE PROCEDURE", + "githuburl":"" + }, + { + "uri":"dws_06_0171.html", + "product_code":"dws", + "code":"846", + "des":"CREATE RESOURCE POOL creates a resource pool and specifies the Cgroup for the resource pool.As long as the current user has CREATE permission, it can create a resource po", + "doc_type":"devg", + "kw":"CREATE RESOURCE POOL,DDL Syntax,Developer Guide", + "title":"CREATE RESOURCE POOL", + "githuburl":"" + }, + { + "uri":"dws_06_0172.html", + "product_code":"dws", + "code":"847", + "des":"Create a role.A role is an entity that has own database objects and permissions. In different environments, a role can be considered a user, a group, or both.CREATE ROLE ", + "doc_type":"devg", + "kw":"CREATE ROLE,DDL Syntax,Developer Guide", + "title":"CREATE ROLE", + "githuburl":"" + }, + { + "uri":"dws_06_0173.html", + "product_code":"dws", + "code":"848", + "des":"CREATE SCHEMA creates a schema.Named objects are accessed either by \"qualifying\" their names with the schema name as a prefix, or by setting a search path that includes t", + "doc_type":"devg", + "kw":"CREATE SCHEMA,DDL Syntax,Developer Guide", + "title":"CREATE SCHEMA", + "githuburl":"" + }, + { + "uri":"dws_06_0174.html", + "product_code":"dws", + "code":"849", + "des":"CREATE SEQUENCE adds a sequence to the current database. The owner of a sequence is the user who creates the sequence.A sequence is a special table that stores arithmetic", + "doc_type":"devg", + "kw":"CREATE SEQUENCE,DDL Syntax,Developer Guide", + "title":"CREATE SEQUENCE", + "githuburl":"" + }, + { + "uri":"dws_06_0175.html", + "product_code":"dws", + "code":"850", + "des":"CREATE SERVER creates an external server.An external server stores information of HDFS clusters, OBS servers, DLI connections, or other homogeneous clusters.By default, o", + "doc_type":"devg", + "kw":"CREATE SERVER,DDL Syntax,Developer Guide", + "title":"CREATE SERVER", + "githuburl":"" + }, + { + "uri":"dws_06_0176.html", + "product_code":"dws", + "code":"851", + "des":"CREATE SYNONYM is used to create a synonym object. A synonym is an alias of a database object and is used to record the mapping between database object names. You can use", + "doc_type":"devg", + "kw":"CREATE SYNONYM,DDL Syntax,Developer Guide", + "title":"CREATE SYNONYM", + "githuburl":"" + }, + { + "uri":"dws_06_0177.html", + "product_code":"dws", + "code":"852", + "des":"CREATE TABLE creates a table in the current database. The table will be owned by the user who created it.For details about the data types supported by column-store tables", + "doc_type":"devg", + "kw":"CREATE TABLE,DDL Syntax,Developer Guide", + "title":"CREATE TABLE", + "githuburl":"" + }, + { + "uri":"dws_06_0178.html", + "product_code":"dws", + "code":"853", + "des":"CREATE TABLE AS creates a table based on the results of a query.It creates a table and fills it with data obtained using SELECT. The table columns have the names and data", + "doc_type":"devg", + "kw":"CREATE TABLE AS,DDL Syntax,Developer Guide", + "title":"CREATE TABLE AS", + "githuburl":"" + }, + { + "uri":"dws_06_0179.html", + "product_code":"dws", + "code":"854", + "des":"CREATE TABLE PARTITION creates a partitioned table. Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific sche", + "doc_type":"devg", + "kw":"CREATE TABLE PARTITION,DDL Syntax,Developer Guide", + "title":"CREATE TABLE PARTITION", + "githuburl":"" + }, + { + "uri":"dws_06_0182.html", + "product_code":"dws", + "code":"855", + "des":"CREATE TEXT SEARCH CONFIGURATION creates a text search configuration. A text search configuration specifies a text search parser that can divide a string into tokens, plu", + "doc_type":"devg", + "kw":"CREATE TEXT SEARCH CONFIGURATION,DDL Syntax,Developer Guide", + "title":"CREATE TEXT SEARCH CONFIGURATION", + "githuburl":"" + }, + { + "uri":"dws_06_0183.html", + "product_code":"dws", + "code":"856", + "des":"CREATE TEXT SEARCH DICTIONARY creates a full-text search dictionary. A dictionary is used to identify and process specified words during full-text search.Dictionaries are", + "doc_type":"devg", + "kw":"CREATE TEXT SEARCH DICTIONARY,DDL Syntax,Developer Guide", + "title":"CREATE TEXT SEARCH DICTIONARY", + "githuburl":"" + }, + { + "uri":"dws_06_0184.html", + "product_code":"dws", + "code":"857", + "des":"CREATE TRIGGER creates a trigger. The trigger will be associated with a specified table or view, and will execute a specified function when certain events occur.Currently", + "doc_type":"devg", + "kw":"CREATE TRIGGER,DDL Syntax,Developer Guide", + "title":"CREATE TRIGGER", + "githuburl":"" + }, + { + "uri":"dws_06_0185.html", + "product_code":"dws", + "code":"858", + "des":"CREATE TYPE defines a new data type in the current database. The user who defines a new data type becomes its owner. Types are designed only for row-store tables.Four typ", + "doc_type":"devg", + "kw":"CREATE TYPE,DDL Syntax,Developer Guide", + "title":"CREATE TYPE", + "githuburl":"" + }, + { + "uri":"dws_06_0186.html", + "product_code":"dws", + "code":"859", + "des":"CREATE USER creates a user.A user created using the CREATE USER statement has the LOGIN permission by default.A schema named after the user is automatically created in th", + "doc_type":"devg", + "kw":"CREATE USER,DDL Syntax,Developer Guide", + "title":"CREATE USER", + "githuburl":"" + }, + { + "uri":"dws_06_0187.html", + "product_code":"dws", + "code":"860", + "des":"CREATE VIEW creates a view. A view is a virtual table, not a base table. A database only stores the definition of a view and does not store its data. The data is still st", + "doc_type":"devg", + "kw":"CREATE VIEW,DDL Syntax,Developer Guide", + "title":"CREATE VIEW", + "githuburl":"" + }, + { + "uri":"dws_06_0188.html", + "product_code":"dws", + "code":"861", + "des":"CURSOR defines a cursor. This command retrieves few rows of data in a query.To process SQL statements, the stored procedure process assigns a memory segment to store cont", + "doc_type":"devg", + "kw":"CURSOR,DDL Syntax,Developer Guide", + "title":"CURSOR", + "githuburl":"" + }, + { + "uri":"dws_06_0189.html", + "product_code":"dws", + "code":"862", + "des":"DROP DATABASE deletes a database.Only the owner of a database or a system administrator has the permission to run the DROP DATABASE command.DROP DATABASE does not take ef", + "doc_type":"devg", + "kw":"DROP DATABASE,DDL Syntax,Developer Guide", + "title":"DROP DATABASE", + "githuburl":"" + }, + { + "uri":"dws_06_0192.html", + "product_code":"dws", + "code":"863", + "des":"DROP FOREIGN TABLE deletes a specified foreign table.DROP FOREIGN TABLE forcibly deletes a specified table. After a table is deleted, any indexes that exist for the table", + "doc_type":"devg", + "kw":"DROP FOREIGN TABLE,DDL Syntax,Developer Guide", + "title":"DROP FOREIGN TABLE", + "githuburl":"" + }, + { + "uri":"dws_06_0193.html", + "product_code":"dws", + "code":"864", + "des":"DROP FUNCTION deletes an existing function.If a function involves operations on temporary tables, the function cannot be deleted by running DROP FUNCTION.IF EXISTSSends a", + "doc_type":"devg", + "kw":"DROP FUNCTION,DDL Syntax,Developer Guide", + "title":"DROP FUNCTION", + "githuburl":"" + }, + { + "uri":"dws_06_0194.html", + "product_code":"dws", + "code":"865", + "des":"DROP GROUP deletes a user group.DROP GROUP is the alias for DROP ROLE.DROP GROUP is the internal interface encapsulated in the gs_om tool. You are not advised to use this", + "doc_type":"devg", + "kw":"DROP GROUP,DDL Syntax,Developer Guide", + "title":"DROP GROUP", + "githuburl":"" + }, + { + "uri":"dws_06_0195.html", + "product_code":"dws", + "code":"866", + "des":"DROP INDEX deletes an index.Only the owner of an index or a system administrator can run DROP INDEX command.IF EXISTSSends a notice instead of an error if the specified i", + "doc_type":"devg", + "kw":"DROP INDEX,DDL Syntax,Developer Guide", + "title":"DROP INDEX", + "githuburl":"" + }, + { + "uri":"dws_06_0198.html", + "product_code":"dws", + "code":"867", + "des":"DROP OWNED deletes the database objects of a database role.The role's permissions on all the database objects in the current database and shared objects (databases and ta", + "doc_type":"devg", + "kw":"DROP OWNED,DDL Syntax,Developer Guide", + "title":"DROP OWNED", + "githuburl":"" + }, + { + "uri":"dws_06_0199.html", + "product_code":"dws", + "code":"868", + "des":"DROP REDACTION POLICY deletes a data redaction policy applied to a specified table.Only the table owner has the permission to delete a data redaction policy.IF EXISTSSend", + "doc_type":"devg", + "kw":"DROP REDACTION POLICY,DDL Syntax,Developer Guide", + "title":"DROP REDACTION POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0200.html", + "product_code":"dws", + "code":"869", + "des":"DROP ROW LEVEL SECURITY POLICY deletes a row-level access control policy from a table.Only the table owner or administrators can delete a row-level access control policy ", + "doc_type":"devg", + "kw":"DROP ROW LEVEL SECURITY POLICY,DDL Syntax,Developer Guide", + "title":"DROP ROW LEVEL SECURITY POLICY", + "githuburl":"" + }, + { + "uri":"dws_06_0201.html", + "product_code":"dws", + "code":"870", + "des":"DROP PROCEDURE deletes an existing stored procedure.None.IF EXISTSSends a notice instead of an error if the stored procedure does not exist.Sends a notice instead of an e", + "doc_type":"devg", + "kw":"DROP PROCEDURE,DDL Syntax,Developer Guide", + "title":"DROP PROCEDURE", + "githuburl":"" + }, + { + "uri":"dws_06_0202.html", + "product_code":"dws", + "code":"871", + "des":"DROP RESOURCE POOL deletes a resource pool.The resource pool cannot be deleted if it is associated with a role.The user must have the DROP permission in order to delete a", + "doc_type":"devg", + "kw":"DROP RESOURCE POOL,DDL Syntax,Developer Guide", + "title":"DROP RESOURCE POOL", + "githuburl":"" + }, + { + "uri":"dws_06_0203.html", + "product_code":"dws", + "code":"872", + "des":"DROP ROLE deletes a specified role.If a \"role is being used by other users\" error is displayed when you run DROP ROLE, it might be that threads cannot respond to signals ", + "doc_type":"devg", + "kw":"DROP ROLE,DDL Syntax,Developer Guide", + "title":"DROP ROLE", + "githuburl":"" + }, + { + "uri":"dws_06_0204.html", + "product_code":"dws", + "code":"873", + "des":"DROP SCHEMA deletes a schema in a database.Only a schema owner or a system administrator can run the DROP SCHEMA command.IF EXISTSSends a notice instead of an error if th", + "doc_type":"devg", + "kw":"DROP SCHEMA,DDL Syntax,Developer Guide", + "title":"DROP SCHEMA", + "githuburl":"" + }, + { + "uri":"dws_06_0205.html", + "product_code":"dws", + "code":"874", + "des":"DROP SEQUENCE deletes a sequence from the current database.Only a sequence owner or a system administrator can delete a sequence.IF EXISTSSends a notice instead of an err", + "doc_type":"devg", + "kw":"DROP SEQUENCE,DDL Syntax,Developer Guide", + "title":"DROP SEQUENCE", + "githuburl":"" + }, + { + "uri":"dws_06_0206.html", + "product_code":"dws", + "code":"875", + "des":"DROP SERVER deletes an existing data server.Only the server owner can delete a server.IF EXISTSSends a notice instead of an error if the specified table does not exist.Se", + "doc_type":"devg", + "kw":"DROP SERVER,DDL Syntax,Developer Guide", + "title":"DROP SERVER", + "githuburl":"" + }, + { + "uri":"dws_06_0207.html", + "product_code":"dws", + "code":"876", + "des":"DROP SYNONYM is used to delete a synonym object.Only a synonym owner or a system administrator can run the DROP SYNONYM command.IF EXISTSSend a notice instead of reportin", + "doc_type":"devg", + "kw":"DROP SYNONYM,DDL Syntax,Developer Guide", + "title":"DROP SYNONYM", + "githuburl":"" + }, + { + "uri":"dws_06_0208.html", + "product_code":"dws", + "code":"877", + "des":"DROP TABLE deletes a specified table.Only the table owner, schema owner, and system administrator have the permission to delete a table. To delete all the rows in a table", + "doc_type":"devg", + "kw":"DROP TABLE,DDL Syntax,Developer Guide", + "title":"DROP TABLE", + "githuburl":"" + }, + { + "uri":"dws_06_0210.html", + "product_code":"dws", + "code":"878", + "des":"DROP TEXT SEARCH CONFIGURATION deletes an existing text search configuration.To run the DROP TEXT SEARCH CONFIGURATION command, you must be the owner of the text search c", + "doc_type":"devg", + "kw":"DROP TEXT SEARCH CONFIGURATION,DDL Syntax,Developer Guide", + "title":"DROP TEXT SEARCH CONFIGURATION", + "githuburl":"" + }, + { + "uri":"dws_06_0211.html", + "product_code":"dws", + "code":"879", + "des":"DROPTEXT SEARCHDICTIONARY deletes a full-text retrieval dictionary.DROP is not supported by predefined dictionaries.Only the owner of a dictionary can do DROP to the dict", + "doc_type":"devg", + "kw":"DROP TEXT SEARCH DICTIONARY,DDL Syntax,Developer Guide", + "title":"DROP TEXT SEARCH DICTIONARY", + "githuburl":"" + }, + { + "uri":"dws_06_0212.html", + "product_code":"dws", + "code":"880", + "des":"DROP TRIGGER deletes a trigger.Only the owner of a trigger and system administrators can run the DROP TRIGGER statement.IF EXISTSSends a notice instead of an error if the", + "doc_type":"devg", + "kw":"DROP TRIGGER,DDL Syntax,Developer Guide", + "title":"DROP TRIGGER", + "githuburl":"" + }, + { + "uri":"dws_06_0213.html", + "product_code":"dws", + "code":"881", + "des":"DROP TYPE deletes a user-defined data type. Only the type owner has permission to run this statement.IF EXISTSSends a notice instead of an error if the specified type doe", + "doc_type":"devg", + "kw":"DROP TYPE,DDL Syntax,Developer Guide", + "title":"DROP TYPE", + "githuburl":"" + }, + { + "uri":"dws_06_0214.html", + "product_code":"dws", + "code":"882", + "des":"Deleting a user will also delete the schema having the same name as the user.CASCADE is used to delete objects (excluding databases) that depend on the user. CASCADE cann", + "doc_type":"devg", + "kw":"DROP USER,DDL Syntax,Developer Guide", + "title":"DROP USER", + "githuburl":"" + }, + { + "uri":"dws_06_0215.html", + "product_code":"dws", + "code":"883", + "des":"DROP VIEW forcibly deletes an existing view in a database.Only a view owner or a system administrator can run DROP VIEW command.IF EXISTSSends a notice instead of an erro", + "doc_type":"devg", + "kw":"DROP VIEW,DDL Syntax,Developer Guide", + "title":"DROP VIEW", + "githuburl":"" + }, + { + "uri":"dws_06_0216.html", + "product_code":"dws", + "code":"884", + "des":"FETCH retrieves data using a previously-created cursor.A cursor has an associated position, which is used by FETCH. The cursor position can be before the first row of the", + "doc_type":"devg", + "kw":"FETCH,DDL Syntax,Developer Guide", + "title":"FETCH", + "githuburl":"" + }, + { + "uri":"dws_06_0217.html", + "product_code":"dws", + "code":"885", + "des":"MOVE repositions a cursor without retrieving any data. MOVE works exactly like the FETCH command, except it only repositions the cursor and does not return rows.NoneThe d", + "doc_type":"devg", + "kw":"MOVE,DDL Syntax,Developer Guide", + "title":"MOVE", + "githuburl":"" + }, + { + "uri":"dws_06_0218.html", + "product_code":"dws", + "code":"886", + "des":"REINDEX rebuilds an index using the data stored in the index's table, replacing the old copy of the index.There are several scenarios in which REINDEX can be used:An inde", + "doc_type":"devg", + "kw":"REINDEX,DDL Syntax,Developer Guide", + "title":"REINDEX", + "githuburl":"" + }, + { + "uri":"dws_06_0219.html", + "product_code":"dws", + "code":"887", + "des":"RESET restores run-time parameters to their default values. The default values are parameter default values complied in the postgresql.conf configuration file.RESET is an", + "doc_type":"devg", + "kw":"RESET,DDL Syntax,Developer Guide", + "title":"RESET", + "githuburl":"" + }, + { + "uri":"dws_06_0220.html", + "product_code":"dws", + "code":"888", + "des":"SET modifies a run-time parameter.Most run-time parameters can be modified by executing SET. Some parameters cannot be modified after a server or session starts.Set the s", + "doc_type":"devg", + "kw":"SET,DDL Syntax,Developer Guide", + "title":"SET", + "githuburl":"" + }, + { + "uri":"dws_06_0221.html", + "product_code":"dws", + "code":"889", + "des":"SET CONSTRAINTS sets the behavior of constraint checking within the current transaction.IMMEDIATE constraints are checked at the end of each statement. DEFERRED constrain", + "doc_type":"devg", + "kw":"SET CONSTRAINTS,DDL Syntax,Developer Guide", + "title":"SET CONSTRAINTS", + "githuburl":"" + }, + { + "uri":"dws_06_0222.html", + "product_code":"dws", + "code":"890", + "des":"SET ROLE sets the current user identifier of the current session.Users of the current session must be members of specified rolename, but the system administrator can choo", + "doc_type":"devg", + "kw":"SET ROLE,DDL Syntax,Developer Guide", + "title":"SET ROLE", + "githuburl":"" + }, + { + "uri":"dws_06_0223.html", + "product_code":"dws", + "code":"891", + "des":"SET SESSION AUTHORIZATION sets the session user identifier and the current user identifier of the current SQL session to a specified user.The session identifier can be ch", + "doc_type":"devg", + "kw":"SET SESSION AUTHORIZATION,DDL Syntax,Developer Guide", + "title":"SET SESSION AUTHORIZATION", + "githuburl":"" + }, + { + "uri":"dws_06_0224.html", + "product_code":"dws", + "code":"892", + "des":"SHOW shows the current value of a run-time parameter. You can use the SET statement to set these parameters.Some parameters that can be viewed by SHOW are read-only. You ", + "doc_type":"devg", + "kw":"SHOW,DDL Syntax,Developer Guide", + "title":"SHOW", + "githuburl":"" + }, + { + "uri":"dws_06_0225.html", + "product_code":"dws", + "code":"893", + "des":"TRUNCATE quickly removes all rows from a database table.It has the same effect as an unqualified DELETE on each table, but it is faster since it does not actually scan th", + "doc_type":"devg", + "kw":"TRUNCATE,DDL Syntax,Developer Guide", + "title":"TRUNCATE", + "githuburl":"" + }, + { + "uri":"dws_06_0226.html", + "product_code":"dws", + "code":"894", + "des":"VACUUM reclaims storage space occupied by tables or B-tree indexes. In normal database operation, rows that have been deleted or obsoleted by an update are not physically", + "doc_type":"devg", + "kw":"VACUUM,DDL Syntax,Developer Guide", + "title":"VACUUM", + "githuburl":"" + }, + { + "uri":"dws_06_0227.html", + "product_code":"dws", + "code":"895", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"DML Syntax", + "title":"DML Syntax", + "githuburl":"" + }, + { + "uri":"dws_06_0228.html", + "product_code":"dws", + "code":"896", + "des":"Data Manipulation Language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data.Inserting data refers t", + "doc_type":"devg", + "kw":"DML Syntax Overview,DML Syntax,Developer Guide", + "title":"DML Syntax Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0229.html", + "product_code":"dws", + "code":"897", + "des":"CALL calls defined functions or stored procedures.NoneschemaSpecifies the name of the schema where a function or stored procedure is located.Specifies the name of the sch", + "doc_type":"devg", + "kw":"CALL,DML Syntax,Developer Guide", + "title":"CALL", + "githuburl":"" + }, + { + "uri":"dws_06_0230.html", + "product_code":"dws", + "code":"898", + "des":"COPY copies data between tables and files.COPY FROM copies data from a file to a table. COPY TO copies data from a table to a file.If CNs and DNs are enabled in security ", + "doc_type":"devg", + "kw":"COPY,DML Syntax,Developer Guide", + "title":"COPY", + "githuburl":"" + }, + { + "uri":"dws_06_0231.html", + "product_code":"dws", + "code":"899", + "des":"DELETE deletes rows that satisfy the WHERE clause from the specified table. If the WHERE clause does not exist, all rows in the table will be deleted. The result is a val", + "doc_type":"devg", + "kw":"DELETE,DML Syntax,Developer Guide", + "title":"DELETE", + "githuburl":"" + }, + { + "uri":"dws_06_0232.html", + "product_code":"dws", + "code":"900", + "des":"EXPLAIN shows the execution plan of an SQL statement.The execution plan shows how the tables referenced by the SQL statement will be scanned, for example, by plain sequen", + "doc_type":"devg", + "kw":"EXPLAIN,DML Syntax,Developer Guide", + "title":"EXPLAIN", + "githuburl":"" + }, + { + "uri":"dws_06_0233.html", + "product_code":"dws", + "code":"901", + "des":"You can run the EXPLAIN PLAN statement to save the information about an execution plan to the PLAN_TABLE table. Different from the EXPLAIN statement, EXPLAIN PLAN only st", + "doc_type":"devg", + "kw":"EXPLAIN PLAN,DML Syntax,Developer Guide", + "title":"EXPLAIN PLAN", + "githuburl":"" + }, + { + "uri":"dws_06_0234.html", + "product_code":"dws", + "code":"902", + "des":"LOCK TABLE obtains a table-level lock.GaussDB(DWS) always tries to select the lock mode with minimum constraints when automatically requesting a lock for a command refere", + "doc_type":"devg", + "kw":"LOCK,DML Syntax,Developer Guide", + "title":"LOCK", + "githuburl":"" + }, + { + "uri":"dws_06_0235.html", + "product_code":"dws", + "code":"903", + "des":"The MERGE INTO statement is used to conditionally match data in a target table with that in a source table. If data matches, UPDATE is executed on the target table; if da", + "doc_type":"devg", + "kw":"MERGE INTO,DML Syntax,Developer Guide", + "title":"MERGE INTO", + "githuburl":"" + }, + { + "uri":"dws_06_0275.html", + "product_code":"dws", + "code":"904", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"INSERT and UPSERT", + "title":"INSERT and UPSERT", + "githuburl":"" + }, + { + "uri":"dws_06_0236.html", + "product_code":"dws", + "code":"905", + "des":"INSERT inserts new rows into a table.You must have the INSERT permission on a table in order to insert into it.Use of the RETURNING clause requires the SELECT permission ", + "doc_type":"devg", + "kw":"INSERT,INSERT and UPSERT,Developer Guide", + "title":"INSERT", + "githuburl":"" + }, + { + "uri":"dws_06_0237.html", + "product_code":"dws", + "code":"906", + "des":"UPSERT inserts rows into a table. When a row duplicates an existing primary key or unique key value, the row will be ignored or updated.The UPSERT syntax is supported onl", + "doc_type":"devg", + "kw":"UPSERT,INSERT and UPSERT,Developer Guide", + "title":"UPSERT", + "githuburl":"" + }, + { + "uri":"dws_06_0240.html", + "product_code":"dws", + "code":"907", + "des":"UPDATE updates data in a table. UPDATE changes the values of the specified columns in all rows that satisfy the condition. The WHERE clause clarifies conditions. The colu", + "doc_type":"devg", + "kw":"UPDATE,DML Syntax,Developer Guide", + "title":"UPDATE", + "githuburl":"" + }, + { + "uri":"dws_06_0241.html", + "product_code":"dws", + "code":"908", + "des":"VALUES computes a row or a set of rows based on given values. It is most commonly used to generate a constant table within a large command.VALUES lists with large numbers", + "doc_type":"devg", + "kw":"VALUES,DML Syntax,Developer Guide", + "title":"VALUES", + "githuburl":"" + }, + { + "uri":"dws_06_0242.html", + "product_code":"dws", + "code":"909", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"DCL Syntax", + "title":"DCL Syntax", + "githuburl":"" + }, + { + "uri":"dws_06_0243.html", + "product_code":"dws", + "code":"910", + "des":"Data control language (DCL) is used to set or modify database users or role rights.GaussDB(DWS) provides a statement for granting rights to data objects and roles. For de", + "doc_type":"devg", + "kw":"DCL Syntax Overview,DCL Syntax,Developer Guide", + "title":"DCL Syntax Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0244.html", + "product_code":"dws", + "code":"911", + "des":"ALTER DEFAULT PRIVILEGES allows you to set the permissions that will be used for objects to be created. It does not affect permissions assigned to existing objects.To iso", + "doc_type":"devg", + "kw":"ALTER DEFAULT PRIVILEGES,DCL Syntax,Developer Guide", + "title":"ALTER DEFAULT PRIVILEGES", + "githuburl":"" + }, + { + "uri":"dws_06_0245.html", + "product_code":"dws", + "code":"912", + "des":"ANALYZE collects statistics about ordinary tables in a database, and stores the results in the PG_STATISTIC system catalog. The execution plan generator uses these statis", + "doc_type":"devg", + "kw":"ANALYZE | ANALYSE,DCL Syntax,Developer Guide", + "title":"ANALYZE | ANALYSE", + "githuburl":"" + }, + { + "uri":"dws_06_0246.html", + "product_code":"dws", + "code":"913", + "des":"DEALLOCATE deallocates a previously prepared statement. If you do not explicitly deallocate a prepared statement, it is deallocated when the session ends.The PREPARE key ", + "doc_type":"devg", + "kw":"DEALLOCATE,DCL Syntax,Developer Guide", + "title":"DEALLOCATE", + "githuburl":"" + }, + { + "uri":"dws_06_0247.html", + "product_code":"dws", + "code":"914", + "des":"DO executes an anonymous code block.A code block is a function body without parameters that returns void. It is analyzed and executed at the same time.Before using a prog", + "doc_type":"devg", + "kw":"DO,DCL Syntax,Developer Guide", + "title":"DO", + "githuburl":"" + }, + { + "uri":"dws_06_0248.html", + "product_code":"dws", + "code":"915", + "des":"EXECUTE executes a prepared statement. A prepared statement only exists in the lifecycle of a session. Therefore, only prepared statements created using PREPARE earlier i", + "doc_type":"devg", + "kw":"EXECUTE,DCL Syntax,Developer Guide", + "title":"EXECUTE", + "githuburl":"" + }, + { + "uri":"dws_06_0249.html", + "product_code":"dws", + "code":"916", + "des":"EXECUTE DIRECT executes an SQL statement on a specified node. Generally, the cluster automatically allocates an SQL statement to proper nodes. EXECUTE DIRECT is mainly us", + "doc_type":"devg", + "kw":"EXECUTE DIRECT,DCL Syntax,Developer Guide", + "title":"EXECUTE DIRECT", + "githuburl":"" + }, + { + "uri":"dws_06_0250.html", + "product_code":"dws", + "code":"917", + "des":"GRANT grants permissions to roles and users.GRANT is used in the following scenarios:Granting system permissions to roles or usersSystem permissions are also called user ", + "doc_type":"devg", + "kw":"GRANT,DCL Syntax,Developer Guide", + "title":"GRANT", + "githuburl":"" + }, + { + "uri":"dws_06_0251.html", + "product_code":"dws", + "code":"918", + "des":"PREPARE creates a prepared statement.A prepared statement is a performance optimizing object on the server. When the PREPARE statement is executed, the specified query is", + "doc_type":"devg", + "kw":"PREPARE,DCL Syntax,Developer Guide", + "title":"PREPARE", + "githuburl":"" + }, + { + "uri":"dws_06_0252.html", + "product_code":"dws", + "code":"919", + "des":"REASSIGN OWNED changes the owner of a database.REASSIGN OWNED requires that the system change owners of all the database objects owned by old_roles to new_role.REASSIGN O", + "doc_type":"devg", + "kw":"REASSIGN OWNED,DCL Syntax,Developer Guide", + "title":"REASSIGN OWNED", + "githuburl":"" + }, + { + "uri":"dws_06_0253.html", + "product_code":"dws", + "code":"920", + "des":"REVOKE revokes rights from one or more roles.If a non-owner user of an object attempts to REVOKE rights on the object, the command is executed based on the following rule", + "doc_type":"devg", + "kw":"REVOKE,DCL Syntax,Developer Guide", + "title":"REVOKE", + "githuburl":"" + }, + { + "uri":"dws_06_0276.html", + "product_code":"dws", + "code":"921", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"DQL Syntax", + "title":"DQL Syntax", + "githuburl":"" + }, + { + "uri":"dws_06_0277.html", + "product_code":"dws", + "code":"922", + "des":"Data Query Language (DQL) can obtain data from tables or views.GaussDB(DWS) provides statements for obtaining data from tables or views. For details, see SELECT.GaussDB(D", + "doc_type":"devg", + "kw":"DQL Syntax Overview,DQL Syntax,Developer Guide", + "title":"DQL Syntax Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0238.html", + "product_code":"dws", + "code":"923", + "des":"SELECT retrieves data from a table or view.Serving as an overlaid filter for a database table, SELECT using SQL keywords retrieves required data from data tables.Using SE", + "doc_type":"devg", + "kw":"SELECT,DQL Syntax,Developer Guide", + "title":"SELECT", + "githuburl":"" + }, + { + "uri":"dws_06_0239.html", + "product_code":"dws", + "code":"924", + "des":"SELECT INTO defines a new table based on a query result and insert data obtained by query to the new table.Different from SELECT, data found by SELECT INTO is not returne", + "doc_type":"devg", + "kw":"SELECT INTO,DQL Syntax,Developer Guide", + "title":"SELECT INTO", + "githuburl":"" + }, + { + "uri":"dws_06_0254.html", + "product_code":"dws", + "code":"925", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"TCL Syntax", + "title":"TCL Syntax", + "githuburl":"" + }, + { + "uri":"dws_06_0255.html", + "product_code":"dws", + "code":"926", + "des":"Transaction Control Language (TCL) controls the time and effect of database transactions and monitors the database.GaussDB(DWS) uses the COMMIT or END statement to commit", + "doc_type":"devg", + "kw":"TCL Syntax Overview,TCL Syntax,Developer Guide", + "title":"TCL Syntax Overview", + "githuburl":"" + }, + { + "uri":"dws_06_0256.html", + "product_code":"dws", + "code":"927", + "des":"ABORT rolls back the current transaction and cancels the changes in the transaction.This command is equivalent to ROLLBACK, and is present only for historical reasons. No", + "doc_type":"devg", + "kw":"ABORT,TCL Syntax,Developer Guide", + "title":"ABORT", + "githuburl":"" + }, + { + "uri":"dws_06_0257.html", + "product_code":"dws", + "code":"928", + "des":"BEGIN may be used to initiate an anonymous block or a single transaction. This section describes the syntax of BEGIN used to initiate an anonymous block. For details abou", + "doc_type":"devg", + "kw":"BEGIN,TCL Syntax,Developer Guide", + "title":"BEGIN", + "githuburl":"" + }, + { + "uri":"dws_06_0258.html", + "product_code":"dws", + "code":"929", + "des":"A checkpoint is a point in the transaction log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to", + "doc_type":"devg", + "kw":"CHECKPOINT,TCL Syntax,Developer Guide", + "title":"CHECKPOINT", + "githuburl":"" + }, + { + "uri":"dws_06_0259.html", + "product_code":"dws", + "code":"930", + "des":"COMMIT or END commits all operations of a transaction.Only the transaction creators or system administrators can run the COMMIT command. The creation and commit operation", + "doc_type":"devg", + "kw":"COMMIT | END,TCL Syntax,Developer Guide", + "title":"COMMIT | END", + "githuburl":"" + }, + { + "uri":"dws_06_0260.html", + "product_code":"dws", + "code":"931", + "des":"COMMIT PREPARED commits a prepared two-phase transaction.The function is only available in maintenance mode (when GUC parameter xc_maintenance_mode is on). Exercise cauti", + "doc_type":"devg", + "kw":"COMMIT PREPARED,TCL Syntax,Developer Guide", + "title":"COMMIT PREPARED", + "githuburl":"" + }, + { + "uri":"dws_06_0262.html", + "product_code":"dws", + "code":"932", + "des":"PREPARE TRANSACTION prepares the current transaction for two-phase commit.After this command, the transaction is no longer associated with the current session; instead, i", + "doc_type":"devg", + "kw":"PREPARE TRANSACTION,TCL Syntax,Developer Guide", + "title":"PREPARE TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_06_0263.html", + "product_code":"dws", + "code":"933", + "des":"SAVEPOINT establishes a new savepoint within the current transaction.A savepoint is a special mark inside a transaction that rolls back all commands that are executed aft", + "doc_type":"devg", + "kw":"SAVEPOINT,TCL Syntax,Developer Guide", + "title":"SAVEPOINT", + "githuburl":"" + }, + { + "uri":"dws_06_0264.html", + "product_code":"dws", + "code":"934", + "des":"SET TRANSACTION sets the characteristics of the current transaction. It has no effect on any subsequent transactions. Available transaction characteristics include the tr", + "doc_type":"devg", + "kw":"SET TRANSACTION,TCL Syntax,Developer Guide", + "title":"SET TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_06_0265.html", + "product_code":"dws", + "code":"935", + "des":"START TRANSACTION starts a transaction. If the isolation level, read/write mode, or deferrable mode is specified, a new transaction will have those characteristics. You c", + "doc_type":"devg", + "kw":"START TRANSACTION,TCL Syntax,Developer Guide", + "title":"START TRANSACTION", + "githuburl":"" + }, + { + "uri":"dws_06_0266.html", + "product_code":"dws", + "code":"936", + "des":"Rolls back the current transaction and backs out all updates in the transaction.ROLLBACK backs out of all changes that a transaction makes to a database if the transactio", + "doc_type":"devg", + "kw":"ROLLBACK,TCL Syntax,Developer Guide", + "title":"ROLLBACK", + "githuburl":"" + }, + { + "uri":"dws_06_0267.html", + "product_code":"dws", + "code":"937", + "des":"RELEASE SAVEPOINT destroys a savepoint previously defined in the current transaction.Destroying a savepoint makes it unavailable as a rollback point, but it has no other ", + "doc_type":"devg", + "kw":"RELEASE SAVEPOINT,TCL Syntax,Developer Guide", + "title":"RELEASE SAVEPOINT", + "githuburl":"" + }, + { + "uri":"dws_06_0268.html", + "product_code":"dws", + "code":"938", + "des":"ROLLBACK PREPARED cancels a transaction ready for two-phase committing.The function is only available in maintenance mode (when GUC parameter xc_maintenance_mode is on). ", + "doc_type":"devg", + "kw":"ROLLBACK PREPARED,TCL Syntax,Developer Guide", + "title":"ROLLBACK PREPARED", + "githuburl":"" + }, + { + "uri":"dws_06_0269.html", + "product_code":"dws", + "code":"939", + "des":"ROLLBACK TO SAVEPOINT rolls back to a savepoint. It implicitly destroys all savepoints that were established after the named savepoint.Rolls back all commands that were e", + "doc_type":"devg", + "kw":"ROLLBACK TO SAVEPOINT,TCL Syntax,Developer Guide", + "title":"ROLLBACK TO SAVEPOINT", + "githuburl":"" + }, + { + "uri":"dws_06_0270.html", + "product_code":"dws", + "code":"940", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"GIN Indexes", + "title":"GIN Indexes", + "githuburl":"" + }, + { + "uri":"dws_06_0271.html", + "product_code":"dws", + "code":"941", + "des":"Generalized Inverted Index (GIN) is designed for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to sea", + "doc_type":"devg", + "kw":"Introduction,GIN Indexes,Developer Guide", + "title":"Introduction", + "githuburl":"" + }, + { + "uri":"dws_06_0272.html", + "product_code":"dws", + "code":"942", + "des":"The GIN interface has a high level of abstraction, requiring the access method implementer only to implement the semantics of the data type being accessed. The GIN layer ", + "doc_type":"devg", + "kw":"Scalability,GIN Indexes,Developer Guide", + "title":"Scalability", + "githuburl":"" + }, + { + "uri":"dws_06_0273.html", + "product_code":"dws", + "code":"943", + "des":"Internally, a GIN index contains a B-tree index constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and w", + "doc_type":"devg", + "kw":"Implementation,GIN Indexes,Developer Guide", + "title":"Implementation", + "githuburl":"" + }, + { + "uri":"dws_06_0274.html", + "product_code":"dws", + "code":"944", + "des":"Create vs. InsertInsertion into a GIN index can be slow due to the likelihood of many keys being inserted for each item. So, for bulk insertions into a table, it is advis", + "doc_type":"devg", + "kw":"GIN Tips and Tricks,GIN Indexes,Developer Guide", + "title":"GIN Tips and Tricks", + "githuburl":"" + }, + { + "uri":"dws_04_3333.html", + "product_code":"dws", + "code":"945", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"devg", + "kw":"Change History,Developer Guide", + "title":"Change History", + "githuburl":"" + } +] \ No newline at end of file diff --git a/docs/dws/dev/CLASS.TXT.json b/docs/dws/dev/CLASS.TXT.json new file mode 100644 index 00000000..0dc84935 --- /dev/null +++ b/docs/dws/dev/CLASS.TXT.json @@ -0,0 +1,8507 @@ +[ + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Developer Guide", + "uri":"dws_04_1000.html", + "doc_type":"devg", + "p_code":"", + "code":"1" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Welcome", + "uri":"dws_04_0001.html", + "doc_type":"devg", + "p_code":"1", + "code":"2" + }, + { + "desc":"This document is intended for database designers, application developers, and database administrators, and provides information required for designing, building, querying", + "product_code":"dws", + "title":"Target Readers", + "uri":"dws_04_0002.html", + "doc_type":"devg", + "p_code":"2", + "code":"3" + }, + { + "desc":"If you are a new GaussDB(DWS) user, you are advised to read the following contents first:Sections describing the features, functions, and application scenarios of GaussDB", + "product_code":"dws", + "title":"Reading Guide", + "uri":"dws_04_0004.html", + "doc_type":"devg", + "p_code":"2", + "code":"4" + }, + { + "desc":"SQL examples in this manual are developed based on the TPC-DS model. Before you execute the examples, install the TPC-DS benchmark by following the instructions on the of", + "product_code":"dws", + "title":"Conventions", + "uri":"dws_04_0005.html", + "doc_type":"devg", + "p_code":"2", + "code":"5" + }, + { + "desc":"Complete the following tasks before you perform operations described in this document:Create a GaussDB(DWS) cluster.Install an SQL client.Connect the SQL client to the de", + "product_code":"dws", + "title":"Prerequisites", + "uri":"dws_04_0006.html", + "doc_type":"devg", + "p_code":"2", + "code":"6" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"System Overview", + "uri":"dws_04_0007.html", + "doc_type":"devg", + "p_code":"1", + "code":"7" + }, + { + "desc":"GaussDB(DWS) manages cluster transactions, the basis of HA and failovers. This ensures speedy fault recovery, guarantees the Atomicity, Consistency, Isolation, Durability", + "product_code":"dws", + "title":"Highly Reliable Transaction Processing", + "uri":"dws_04_0011.html", + "doc_type":"devg", + "p_code":"7", + "code":"8" + }, + { + "desc":"The following GaussDB(DWS) features help achieve high query performance.GaussDB(DWS) is an MPP system with the shared-nothing architecture. It consists of multiple indepe", + "product_code":"dws", + "title":"High Query Performance", + "uri":"dws_04_0012.html", + "doc_type":"devg", + "p_code":"7", + "code":"9" + }, + { + "desc":"A database manages data objects and is isolated from other databases. While creating a database, you can specify a tablespace. If you do not specify it, database objects ", + "product_code":"dws", + "title":"Related Concepts", + "uri":"dws_04_0015.html", + "doc_type":"devg", + "p_code":"7", + "code":"10" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Data Migration", + "uri":"dws_04_0985.html", + "doc_type":"devg", + "p_code":"1", + "code":"11" + }, + { + "desc":"GaussDB(DWS) provides flexible methods for importing data. You can import data from different sources to GaussDB(DWS). The features of each method are listed in Table 1. ", + "product_code":"dws", + "title":"Data Migration to GaussDB(DWS)", + "uri":"dws_04_0180.html", + "doc_type":"devg", + "p_code":"11", + "code":"12" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Data Import", + "uri":"dws_04_0179.html", + "doc_type":"devg", + "p_code":"11", + "code":"13" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Importing Data from OBS in Parallel", + "uri":"dws_04_0181.html", + "doc_type":"devg", + "p_code":"13", + "code":"14" + }, + { + "desc":"The object storage service (OBS) is an object-based cloud storage service, featuring data storage of high security, proven reliability, and cost-effectiveness. OBS provid", + "product_code":"dws", + "title":"About Parallel Data Import from OBS", + "uri":"dws_04_0182.html", + "doc_type":"devg", + "p_code":"14", + "code":"15" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Importing CSV/TXT Data from the OBS", + "uri":"dws_04_0154.html", + "doc_type":"devg", + "p_code":"14", + "code":"16" + }, + { + "desc":"In this example, OBS data is imported to GaussDB(DWS) databases. When users who have registered with the cloud platform access OBS using clients, call APIs, or SDKs, acce", + "product_code":"dws", + "title":"Creating Access Keys (AK and SK)", + "uri":"dws_04_0183.html", + "doc_type":"devg", + "p_code":"16", + "code":"17" + }, + { + "desc":"Before importing data from OBS to a cluster, prepare source data files and upload these files to OBS. If the data files have been stored on OBS, you only need to complete", + "product_code":"dws", + "title":"Uploading Data to OBS", + "uri":"dws_04_0184.html", + "doc_type":"devg", + "p_code":"16", + "code":"18" + }, + { + "desc":"format: format of the source data file in the foreign table. OBS foreign tables support CSV and TEXT formats. The default value is TEXT.header: Whether the data file cont", + "product_code":"dws", + "title":"Creating an OBS Foreign Table", + "uri":"dws_04_0185.html", + "doc_type":"devg", + "p_code":"16", + "code":"19" + }, + { + "desc":"Before importing data, you are advised to optimize your design and deployment based on the following excellent practices, helping maximize system resource utilization and", + "product_code":"dws", + "title":"Importing Data", + "uri":"dws_04_0186.html", + "doc_type":"devg", + "p_code":"16", + "code":"20" + }, + { + "desc":"Handle errors that occurred during data import.Errors that occur when data is imported are divided into data format errors and non-data format errors.Data format errorWhe", + "product_code":"dws", + "title":"Handling Import Errors", + "uri":"dws_04_0187.html", + "doc_type":"devg", + "p_code":"16", + "code":"21" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Importing ORC/CarbonData Data from OBS", + "uri":"dws_04_0155.html", + "doc_type":"devg", + "p_code":"14", + "code":"22" + }, + { + "desc":"Before you use the SQL on OBS feature to query OBS data:You have stored the ORC data on OBS.For example, the ORC table has been created when you use the Hive or Spark com", + "product_code":"dws", + "title":"Preparing Data on OBS", + "uri":"dws_04_0243.html", + "doc_type":"devg", + "p_code":"22", + "code":"23" + }, + { + "desc":"This section describes how to create a foreign server that is used to define the information about OBS servers and is invoked by foreign tables. For details about the syn", + "product_code":"dws", + "title":"Creating a Foreign Server", + "uri":"dws_04_0244.html", + "doc_type":"devg", + "p_code":"22", + "code":"24" + }, + { + "desc":"After performing steps in Creating a Foreign Server, create an OBS foreign table in the GaussDB(DWS) database to access the data stored in OBS. An OBS foreign table is re", + "product_code":"dws", + "title":"Creating a Foreign Table", + "uri":"dws_04_0245.html", + "doc_type":"devg", + "p_code":"22", + "code":"25" + }, + { + "desc":"If the data amount is small, you can directly run SELECT to query the foreign table and view the data on OBS.If the query result is the same as the data in Original Data,", + "product_code":"dws", + "title":"Querying Data on OBS Through Foreign Tables", + "uri":"dws_04_0246.html", + "doc_type":"devg", + "p_code":"22", + "code":"26" + }, + { + "desc":"After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quo", + "product_code":"dws", + "title":"Deleting Resources", + "uri":"dws_04_0247.html", + "doc_type":"devg", + "p_code":"22", + "code":"27" + }, + { + "desc":"In the big data field, the mainstream file format is ORC, which is supported by GaussDB(DWS). You can use Hive to export data to an ORC file and use a read-only foreign t", + "product_code":"dws", + "title":"Supported Data Types", + "uri":"dws_04_0156.html", + "doc_type":"devg", + "p_code":"22", + "code":"28" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Using GDS to Import Data from a Remote Server", + "uri":"dws_04_0189.html", + "doc_type":"devg", + "p_code":"13", + "code":"29" + }, + { + "desc":"INSERT and COPY statements are serially executed to import a small volume of data. To import a large volume of data to GaussDB(DWS), you can use GDS to import data in par", + "product_code":"dws", + "title":"Importing Data In Parallel Using GDS", + "uri":"dws_04_0190.html", + "doc_type":"devg", + "p_code":"29", + "code":"30" + }, + { + "desc":"Generally, the data to be imported has been uploaded to the data server. In this case, you only need to check the communication between the data server and GaussDB(DWS), ", + "product_code":"dws", + "title":"Preparing Source Data", + "uri":"dws_04_0192.html", + "doc_type":"devg", + "p_code":"29", + "code":"31" + }, + { + "desc":"GaussDB(DWS) uses GDS to allocate the source data for parallel data import. Deploy GDS on the data server.If a large volume of data is stored on multiple data servers, in", + "product_code":"dws", + "title":"Installing, Configuring, and Starting GDS", + "uri":"dws_04_0193.html", + "doc_type":"devg", + "p_code":"29", + "code":"32" + }, + { + "desc":"The source data information and GDS access information are configured in a foreign table. Then, GaussDB(DWS) can import data from a data server to a database table based ", + "product_code":"dws", + "title":"Creating a GDS Foreign Table", + "uri":"dws_04_0194.html", + "doc_type":"devg", + "p_code":"29", + "code":"33" + }, + { + "desc":"This section describes how to create tables in GaussDB(DWS) and import data to the tables.Before importing all the data from a table containing over 10 million records, y", + "product_code":"dws", + "title":"Importing Data", + "uri":"dws_04_0195.html", + "doc_type":"devg", + "p_code":"29", + "code":"34" + }, + { + "desc":"Handle errors that occurred during data import.Errors that occur when data is imported are divided into data format errors and non-data format errors.Data format errorWhe", + "product_code":"dws", + "title":"Handling Import Errors", + "uri":"dws_04_0196.html", + "doc_type":"devg", + "p_code":"29", + "code":"35" + }, + { + "desc":"Stop GDS after data is imported successfully.If GDS is started using the gds command, perform the following operations to stop GDS:Query the GDS process ID:ps -ef|grep gd", + "product_code":"dws", + "title":"Stopping GDS", + "uri":"dws_04_0197.html", + "doc_type":"devg", + "p_code":"29", + "code":"36" + }, + { + "desc":"The data servers and the cluster reside on the same intranet. The IP addresses are 192.168.0.90 and 192.168.0.91. Source data files are in CSV format.Create the target ta", + "product_code":"dws", + "title":"Example of Importing Data Using GDS", + "uri":"dws_04_0198.html", + "doc_type":"devg", + "p_code":"29", + "code":"37" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Importing Data from MRS to a Cluster", + "uri":"dws_04_0210.html", + "doc_type":"devg", + "p_code":"13", + "code":"38" + }, + { + "desc":"MRS is a big data cluster running based on the open-source Hadoop ecosystem. It provides the industry's latest cutting-edge storage and analytical capabilities of massive", + "product_code":"dws", + "title":"Overview", + "uri":"dws_04_0066.html", + "doc_type":"devg", + "p_code":"38", + "code":"39" + }, + { + "desc":"Before importing data from MRS to a GaussDB(DWS) cluster, you must have:Created an MRS cluster.Created the Hive/Spark ORC table in the MRS cluster and stored the table da", + "product_code":"dws", + "title":"Preparing Data in an MRS Cluster", + "uri":"dws_04_0212.html", + "doc_type":"devg", + "p_code":"38", + "code":"40" + }, + { + "desc":"In the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS) for creating a foreign table, you need to specify a foreign server associated with the MRS data source connectio", + "product_code":"dws", + "title":"Manually Creating a Foreign Server", + "uri":"dws_04_0213.html", + "doc_type":"devg", + "p_code":"38", + "code":"41" + }, + { + "desc":"This section describes how to create a Hadoop foreign table in the GaussDB(DWS) database to access the Hadoop structured data stored on MRS HDFS. A Hadoop foreign table i", + "product_code":"dws", + "title":"Creating a Foreign Table", + "uri":"dws_04_0214.html", + "doc_type":"devg", + "p_code":"38", + "code":"42" + }, + { + "desc":"If the data amount is small, you can directly run SELECT to query the foreign table and view the data in the MRS data source.If the query result is the same as the data i", + "product_code":"dws", + "title":"Importing Data", + "uri":"dws_04_0215.html", + "doc_type":"devg", + "p_code":"38", + "code":"43" + }, + { + "desc":"After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quo", + "product_code":"dws", + "title":"Deleting Resources", + "uri":"dws_04_0216.html", + "doc_type":"devg", + "p_code":"38", + "code":"44" + }, + { + "desc":"The following error information indicates that GaussDB(DWS) is to read an ORC data file but the actual file is in text format. Therefore, create a table of the Hive ORC t", + "product_code":"dws", + "title":"Error Handling", + "uri":"dws_04_0217.html", + "doc_type":"devg", + "p_code":"38", + "code":"45" + }, + { + "desc":"You can create foreign tables to perform associated queries and import data between clusters.Import data from one GaussDB(DWS) cluster to another.Perform associated queri", + "product_code":"dws", + "title":"Importing Data from One GaussDB(DWS) Cluster to Another", + "uri":"dws_04_0949.html", + "doc_type":"devg", + "p_code":"13", + "code":"46" + }, + { + "desc":"The gsql tool of GaussDB(DWS) provides the \\copy meta-command to import data.For details about the \\copy command, see Table 1.tableSpecifies the name (possibly schema-qua", + "product_code":"dws", + "title":"Using the gsql Meta-Command \\COPY to Import Data", + "uri":"dws_04_0208.html", + "doc_type":"devg", + "p_code":"13", + "code":"47" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Running the COPY FROM STDIN Statement to Import Data", + "uri":"dws_04_0203.html", + "doc_type":"devg", + "p_code":"13", + "code":"48" + }, + { + "desc":"This method is applicable to low-concurrency scenarios where a small volume of data is to be imported.Use either of the following methods to write data to GaussDB(DWS) us", + "product_code":"dws", + "title":"Data Import Using COPY FROM STDIN", + "uri":"dws_04_0204.html", + "doc_type":"devg", + "p_code":"48", + "code":"49" + }, + { + "desc":"CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.The CopyManager class is in the or", + "product_code":"dws", + "title":"Introduction to the CopyManager Class", + "uri":"dws_04_0205.html", + "doc_type":"devg", + "p_code":"48", + "code":"50" + }, + { + "desc":"When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or ", + "product_code":"dws", + "title":"Example: Importing and Exporting Data Through Local Files", + "uri":"dws_04_0206.html", + "doc_type":"devg", + "p_code":"48", + "code":"51" + }, + { + "desc":"The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).", + "product_code":"dws", + "title":"Example: Migrating Data from MySQL to GaussDB(DWS)", + "uri":"dws_04_0207.html", + "doc_type":"devg", + "p_code":"48", + "code":"52" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Full Database Migration", + "uri":"dws_04_0986.html", + "doc_type":"devg", + "p_code":"11", + "code":"53" + }, + { + "desc":"You can use CDM to migrate data from other data sources (for example, MySQL) to the databases in clusters on GaussDB(DWS).For details about scenarios where CDM is used to", + "product_code":"dws", + "title":"Using CDM to Migrate Data to GaussDB(DWS)", + "uri":"dws_04_0219.html", + "doc_type":"devg", + "p_code":"53", + "code":"54" + }, + { + "desc":"The DSC is a CLI tool running on the Linux or Windows OS. It is dedicated to providing customers with simple, fast, and reliable application SQL script migration services", + "product_code":"dws", + "title":"Using DSC to Migrate SQL Scripts", + "uri":"dws_01_0127.html", + "doc_type":"devg", + "p_code":"53", + "code":"55" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Metadata Migration", + "uri":"dws_04_0987.html", + "doc_type":"devg", + "p_code":"11", + "code":"56" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Using gs_dump and gs_dumpall to Export Metadata", + "uri":"dws_04_0269.html", + "doc_type":"devg", + "p_code":"56", + "code":"57" + }, + { + "desc":"GaussDB(DWS) provides gs_dump and gs_dumpall to export required database objects and related information. To migrate database information, you can use a tool to import th", + "product_code":"dws", + "title":"Overview", + "uri":"dws_04_0270.html", + "doc_type":"devg", + "p_code":"57", + "code":"58" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting a Single Database", + "uri":"dws_04_0271.html", + "doc_type":"devg", + "p_code":"57", + "code":"59" + }, + { + "desc":"You can use gs_dump to export data and all object definitions of a database from GaussDB(DWS). You can specify the information to be exported as follows:Export full infor", + "product_code":"dws", + "title":"Exporting a Database", + "uri":"dws_04_0272.html", + "doc_type":"devg", + "p_code":"59", + "code":"60" + }, + { + "desc":"You can use gs_dump to export data and all object definitions of a schema from GaussDB(DWS). You can export one or more specified schemas as needed. You can specify the i", + "product_code":"dws", + "title":"Exporting a Schema", + "uri":"dws_04_0273.html", + "doc_type":"devg", + "p_code":"59", + "code":"61" + }, + { + "desc":"You can use gs_dump to export data and all object definitions of a table-level object from GaussDB(DWS). Views, sequences, and foreign tables are special tables. You can ", + "product_code":"dws", + "title":"Exporting a Table", + "uri":"dws_04_0274.html", + "doc_type":"devg", + "p_code":"59", + "code":"62" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting All Databases", + "uri":"dws_04_0275.html", + "doc_type":"devg", + "p_code":"57", + "code":"63" + }, + { + "desc":"You can use gs_dumpall to export full information of all databases in a cluster from GaussDB(DWS), including information about each database and global objects in the clu", + "product_code":"dws", + "title":"Exporting All Databases", + "uri":"dws_04_0276.html", + "doc_type":"devg", + "p_code":"63", + "code":"64" + }, + { + "desc":"You can use gs_dumpall to export global objects from GaussDB(DWS), including database users, user groups, tablespaces, and attributes (for example, global access permissi", + "product_code":"dws", + "title":"Exporting Global Objects", + "uri":"dws_04_0277.html", + "doc_type":"devg", + "p_code":"63", + "code":"65" + }, + { + "desc":"gs_dump and gs_dumpall use -U to specify the user that performs the export. If the specified user does not have the required permission, data cannot be exported. In this ", + "product_code":"dws", + "title":"Data Export By a User Without Required Permissions", + "uri":"dws_04_0278.html", + "doc_type":"devg", + "p_code":"57", + "code":"66" + }, + { + "desc":"gs_restore is an import tool provided by GaussDB(DWS). You can use gs_restore to import the files exported by gs_dump to a database. gs_restore can import the files in .t", + "product_code":"dws", + "title":"Using gs_restore to Import Data", + "uri":"dws_04_0209.html", + "doc_type":"devg", + "p_code":"56", + "code":"67" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Data Export", + "uri":"dws_04_0249.html", + "doc_type":"devg", + "p_code":"11", + "code":"68" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting Data to OBS", + "uri":"dws_04_0250.html", + "doc_type":"devg", + "p_code":"68", + "code":"69" + }, + { + "desc":"GaussDB(DWS) databases allow you to export data in parallel using OBS foreign tables, in which the export mode and the exported data format are specified. Data is exporte", + "product_code":"dws", + "title":"Parallel OBS Data Export", + "uri":"dws_04_0251.html", + "doc_type":"devg", + "p_code":"69", + "code":"70" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting CSV/TXT Data to OBS", + "uri":"dws_04_0157.html", + "doc_type":"devg", + "p_code":"69", + "code":"71" + }, + { + "desc":"Plan the storage location of exported data in OBS.You need to specify the OBS path (to directory) for storing data that you want to export. The exported data can be saved", + "product_code":"dws", + "title":"Planning Data Export", + "uri":"dws_04_0252.html", + "doc_type":"devg", + "p_code":"71", + "code":"72" + }, + { + "desc":"To obtain access keys, log in to the management console, click the username in the upper right corner, and select My Credential from the menu. Then choose Access Keys in ", + "product_code":"dws", + "title":"Creating an OBS Foreign Table", + "uri":"dws_04_0253.html", + "doc_type":"devg", + "p_code":"71", + "code":"73" + }, + { + "desc":"Example 1: Export data from table product_info_output to a data file through the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM p", + "product_code":"dws", + "title":"Exporting Data", + "uri":"dws_04_0254.html", + "doc_type":"devg", + "p_code":"71", + "code":"74" + }, + { + "desc":"Create two foreign tables and use them to export tables from a database to two buckets in OBS.OBS and the database are in the same region. The example GaussDB(DWS) table ", + "product_code":"dws", + "title":"Examples", + "uri":"dws_04_0255.html", + "doc_type":"devg", + "p_code":"71", + "code":"75" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting ORC Data to OBS", + "uri":"dws_04_0256.html", + "doc_type":"devg", + "p_code":"69", + "code":"76" + }, + { + "desc":"For details about exporting data to OBS, see Planning Data Export.For details about the data types that can be exported to OBS, see Table 2.For details about HDFS data ex", + "product_code":"dws", + "title":"Planning Data Export", + "uri":"dws_04_0258.html", + "doc_type":"devg", + "p_code":"76", + "code":"77" + }, + { + "desc":"For details about creating a foreign server on OBS, see Creating a Foreign Server.For details about creating a foreign server in HDFS, see Manually Creating a Foreign Ser", + "product_code":"dws", + "title":"Creating a Foreign Server", + "uri":"dws_04_0259.html", + "doc_type":"devg", + "p_code":"76", + "code":"78" + }, + { + "desc":"After operations in Creating a Foreign Server are complete, create an OBS/HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in OBS/HDFS. Th", + "product_code":"dws", + "title":"Creating a Foreign Table", + "uri":"dws_04_0260.html", + "doc_type":"devg", + "p_code":"76", + "code":"79" + }, + { + "desc":"Example 1: Export data from table product_info_output to a data file using the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM pro", + "product_code":"dws", + "title":"Exporting Data", + "uri":"dws_04_0158.html", + "doc_type":"devg", + "p_code":"76", + "code":"80" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Exporting ORC Data to MRS", + "uri":"dws_04_0159.html", + "doc_type":"devg", + "p_code":"68", + "code":"81" + }, + { + "desc":"GaussDB(DWS) allows you to export ORC data to MRS using an HDFS foreign table. You can specify the export mode and export data format in the foreign table. Data is export", + "product_code":"dws", + "title":"Overview", + "uri":"dws_04_0160.html", + "doc_type":"devg", + "p_code":"81", + "code":"82" + }, + { + "desc":"For details about the data types that can be exported to MRS, see Table 2.For details about HDFS data export or MRS configuration, see the MapReduce Service User Guide.", + "product_code":"dws", + "title":"Planning Data Export", + "uri":"dws_04_0161.html", + "doc_type":"devg", + "p_code":"81", + "code":"83" + }, + { + "desc":"For details about creating a foreign server on HDFS, see Manually Creating a Foreign Server.", + "product_code":"dws", + "title":"Creating a Foreign Server", + "uri":"dws_04_0162.html", + "doc_type":"devg", + "p_code":"81", + "code":"84" + }, + { + "desc":"After operations in Creating a Foreign Server are complete, create an HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in HDFS. The foreig", + "product_code":"dws", + "title":"Creating a Foreign Table", + "uri":"dws_04_0163.html", + "doc_type":"devg", + "p_code":"81", + "code":"85" + }, + { + "desc":"Example 1: Export data from table product_info_output to a data file using the product_info_output_ext foreign table.INSERT INTO product_info_output_ext SELECT * FROM pro", + "product_code":"dws", + "title":"Exporting Data", + "uri":"dws_04_0164.html", + "doc_type":"devg", + "p_code":"81", + "code":"86" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Using GDS to Export Data to a Remote Server", + "uri":"dws_04_0261.html", + "doc_type":"devg", + "p_code":"68", + "code":"87" + }, + { + "desc":"In high-concurrency scenarios, you can use GDS to export data from a database to a common file system.In the current GDS version, data can be exported from a database to ", + "product_code":"dws", + "title":"Exporting Data In Parallel Using GDS", + "uri":"dws_04_0262.html", + "doc_type":"devg", + "p_code":"87", + "code":"88" + }, + { + "desc":"Before you use GDS to export data from a cluster, prepare data to be exported and plan the export path.Remote modeIf the following information is displayed, the user and ", + "product_code":"dws", + "title":"Planning Data Export", + "uri":"dws_04_0263.html", + "doc_type":"devg", + "p_code":"87", + "code":"89" + }, + { + "desc":"GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.For details, see Installing, Configuri", + "product_code":"dws", + "title":"Installing, Configuring, and Starting GDS", + "uri":"dws_04_0264.html", + "doc_type":"devg", + "p_code":"87", + "code":"90" + }, + { + "desc":"Remote modeSet the location parameter to the URL of the directory that stores the data files.You do not need to specify any file.For example:The IP address of the GDS dat", + "product_code":"dws", + "title":"Creating a GDS Foreign Table", + "uri":"dws_04_0265.html", + "doc_type":"devg", + "p_code":"87", + "code":"91" + }, + { + "desc":"Ensure that the IP addresses and ports of servers where CNs and DNs are deployed can connect to those of the GDS server.Create batch processing scripts to export data in ", + "product_code":"dws", + "title":"Exporting Data", + "uri":"dws_04_0266.html", + "doc_type":"devg", + "p_code":"87", + "code":"92" + }, + { + "desc":"GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.For details, see Stopping GDS.", + "product_code":"dws", + "title":"Stopping GDS", + "uri":"dws_04_0267.html", + "doc_type":"devg", + "p_code":"87", + "code":"93" + }, + { + "desc":"The data server and the cluster reside on the same intranet, the IP address of the data server is 192.168.0.90, and data source files are in CSV format. In this scenario,", + "product_code":"dws", + "title":"Examples of Exporting Data Using GDS", + "uri":"dws_04_0268.html", + "doc_type":"devg", + "p_code":"87", + "code":"94" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Other Operations", + "uri":"dws_04_0988.html", + "doc_type":"devg", + "p_code":"11", + "code":"95" + }, + { + "desc":"GDS supports concurrent import and export. The gds -t parameter is used to set the size of the thread pool and control the maximum number of concurrent working threads. B", + "product_code":"dws", + "title":"GDS Pipe FAQs", + "uri":"dws_04_0279.html", + "doc_type":"devg", + "p_code":"95", + "code":"96" + }, + { + "desc":"Data skew causes the query performance to deteriorate. Before importing all the data from a table consisting of over 10 million records, you are advised to import some of", + "product_code":"dws", + "title":"Checking for Data Skew", + "uri":"dws_04_0228.html", + "doc_type":"devg", + "p_code":"95", + "code":"97" + }, + { + "desc":"GaussDB(DWS) is compatible with Oracle, Teradata and MySQL syntax, of which the syntax behavior is different.", + "product_code":"dws", + "title":"Syntax Compatibility Differences Among Oracle, Teradata, and MySQL", + "uri":"dws_04_0042.html", + "doc_type":"devg", + "p_code":"1", + "code":"98" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Database Security Management", + "uri":"dws_04_0043.html", + "doc_type":"devg", + "p_code":"1", + "code":"99" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Managing Users and Their Permissions", + "uri":"dws_04_0053.html", + "doc_type":"devg", + "p_code":"99", + "code":"100" + }, + { + "desc":"A user who creates an object is the owner of this object. By default, Separation of Permissions is disabled after cluster installation. A database system administrator ha", + "product_code":"dws", + "title":"Default Permission Mechanism", + "uri":"dws_04_0054.html", + "doc_type":"devg", + "p_code":"100", + "code":"101" + }, + { + "desc":"A system administrator is an account with the SYSADMIN permission. After a cluster is installed, a system administrator has the permissions of all object owners by defaul", + "product_code":"dws", + "title":"System Administrator", + "uri":"dws_04_0055.html", + "doc_type":"devg", + "p_code":"100", + "code":"102" + }, + { + "desc":"Descriptions in Default Permission Mechanism and System Administrator are about the initial situation after a cluster is created. By default, a system administrator with ", + "product_code":"dws", + "title":"Separation of Permissions", + "uri":"dws_04_0056.html", + "doc_type":"devg", + "p_code":"100", + "code":"103" + }, + { + "desc":"You can use CREATE USER and ALTER USER to create and manage database users, respectively. The database cluster has one or more named databases. Users and roles are shared", + "product_code":"dws", + "title":"Users", + "uri":"dws_04_0057.html", + "doc_type":"devg", + "p_code":"100", + "code":"104" + }, + { + "desc":"A role is a set of permissions. After a role is granted to a user through GRANT, the user will have all the permissions of the role. It is recommended that roles be used ", + "product_code":"dws", + "title":"Roles", + "uri":"dws_04_0058.html", + "doc_type":"devg", + "p_code":"100", + "code":"105" + }, + { + "desc":"Schemas function as models. Schema management allows multiple users to use the same database without mutual impacts, to organize database objects as manageable logical gr", + "product_code":"dws", + "title":"Schema", + "uri":"dws_04_0059.html", + "doc_type":"devg", + "p_code":"100", + "code":"106" + }, + { + "desc":"To grant the permission for an object directly to a user, use GRANT.When permissions for a table or view in a schema are granted to a user or role, the USAGE permission o", + "product_code":"dws", + "title":"User Permission Setting", + "uri":"dws_04_0060.html", + "doc_type":"devg", + "p_code":"100", + "code":"107" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Setting Security Policies", + "uri":"dws_04_0063.html", + "doc_type":"devg", + "p_code":"100", + "code":"108" + }, + { + "desc":"For data security purposes, GaussDB(DWS) provides a series of security measures, such as automatically locking and unlocking accounts, manually locking and unlocking abno", + "product_code":"dws", + "title":"Setting Account Security Policies", + "uri":"dws_04_0064.html", + "doc_type":"devg", + "p_code":"108", + "code":"109" + }, + { + "desc":"When creating a user, you need to specify the validity period of the user, including the start time and end time.To enable a user not within the validity period to use it", + "product_code":"dws", + "title":"Setting the Validity Period of an Account", + "uri":"dws_04_0065.html", + "doc_type":"devg", + "p_code":"108", + "code":"110" + }, + { + "desc":"User passwords are stored in the system catalog pg_authid. To prevent password leakage, GaussDB(DWS) encrypts and stores the user passwords.Password complexityThe passwor", + "product_code":"dws", + "title":"Setting a User Password", + "uri":"dws_04_0067.html", + "doc_type":"devg", + "p_code":"108", + "code":"111" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Sensitive Data Management", + "uri":"dws_04_0994.html", + "doc_type":"devg", + "p_code":"99", + "code":"112" + }, + { + "desc":"The row-level access control feature enables database access control to be accurate to each row of data tables. In this way, the same SQL query may return different resul", + "product_code":"dws", + "title":"Row-Level Access Control", + "uri":"dws_04_0061.html", + "doc_type":"devg", + "p_code":"112", + "code":"113" + }, + { + "desc":"GaussDB(DWS) provides the column-level dynamic data masking (DDM) function. For sensitive data, such as the ID card number, mobile number, and bank card number, the DDM f", + "product_code":"dws", + "title":"Data Redaction", + "uri":"dws_04_0062.html", + "doc_type":"devg", + "p_code":"112", + "code":"114" + }, + { + "desc":"GaussDB(DWS) supports encryption and decryption of strings using the following functions:gs_encrypt(encryptstr, keystr, cryptotype, cryptomode, hashmethod)Description: En", + "product_code":"dws", + "title":"Using Functions for Encryption and Decryption", + "uri":"dws_04_0995.html", + "doc_type":"devg", + "p_code":"112", + "code":"115" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Development and Design Proposal", + "uri":"dws_04_0074.html", + "doc_type":"devg", + "p_code":"1", + "code":"116" + }, + { + "desc":"This chapter describes the design specifications for database modeling and application development. Modeling compliant with these specifications fits the distributed proc", + "product_code":"dws", + "title":"Development and Design Proposal", + "uri":"dws_04_0075.html", + "doc_type":"devg", + "p_code":"116", + "code":"117" + }, + { + "desc":"The name of a database object must contain 1 to 63 characters, start with a letter or underscore (_), and can contain letters, digits, underscores (_), dollar signs ($), ", + "product_code":"dws", + "title":"Database Object Naming Conventions", + "uri":"dws_04_0076.html", + "doc_type":"devg", + "p_code":"116", + "code":"118" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Database Object Design", + "uri":"dws_04_0077.html", + "doc_type":"devg", + "p_code":"116", + "code":"119" + }, + { + "desc":"In GaussDB(DWS), services can be isolated by databases and schemas. Databases share little resources and cannot directly access each other. Connections to and permissions", + "product_code":"dws", + "title":"Database and Schema Design", + "uri":"dws_04_0078.html", + "doc_type":"devg", + "p_code":"119", + "code":"120" + }, + { + "desc":"GaussDB(DWS) uses a distributed architecture. Data is distributed on DNs. Comply with the following principles to properly design a table:[Notice] Evenly distribute data ", + "product_code":"dws", + "title":"Table Design", + "uri":"dws_04_0079.html", + "doc_type":"devg", + "p_code":"119", + "code":"121" + }, + { + "desc":"Comply with the following rules to improve query efficiency when you design columns:[Proposal] Use the most efficient data types allowed.If all of the following number ty", + "product_code":"dws", + "title":"Column Design", + "uri":"dws_04_0080.html", + "doc_type":"devg", + "p_code":"119", + "code":"122" + }, + { + "desc":"[Proposal] If all the column values can be obtained from services, you are not advised to use the DEFAULT constraint, because doing so will generate unexpected results du", + "product_code":"dws", + "title":"Constraint Design", + "uri":"dws_04_0081.html", + "doc_type":"devg", + "p_code":"119", + "code":"123" + }, + { + "desc":"[Proposal] Do not nest views unless they have strong dependency on each other.[Proposal] Try to avoid sort operations in a view definition.[Proposal] Minimize joined colu", + "product_code":"dws", + "title":"View and Joined Table Design", + "uri":"dws_04_0082.html", + "doc_type":"devg", + "p_code":"119", + "code":"124" + }, + { + "desc":"Currently, third-party tools are connected to GaussDB(DWS) trough JDBC. This section describes the precautions for configuring the tools.[Notice] When a third-party tool ", + "product_code":"dws", + "title":"JDBC Configuration", + "uri":"dws_04_0083.html", + "doc_type":"devg", + "p_code":"116", + "code":"125" + }, + { + "desc":"[Proposal] In GaussDB(DWS), you are advised to execute DDL operations, such as creating table or making comments, separately from batch processing jobs to avoid performan", + "product_code":"dws", + "title":"SQL Compilation", + "uri":"dws_04_0084.html", + "doc_type":"devg", + "p_code":"116", + "code":"126" + }, + { + "desc":"[Notice] Java UDFs can perform some Java logic calculation. Do not encapsulate services in Java UDFs.[Notice] Do not connect to a database in any way (for example, by usi", + "product_code":"dws", + "title":"PL/Java Usage", + "uri":"dws_04_0971.html", + "doc_type":"devg", + "p_code":"116", + "code":"127" + }, + { + "desc":"Development shall strictly comply with design documents.Program modules shall be highly cohesive and loosely coupled.Proper, comprehensive troubleshooting measures shall ", + "product_code":"dws", + "title":"PL/pgSQL Usage", + "uri":"dws_04_0972.html", + "doc_type":"devg", + "p_code":"116", + "code":"128" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Guide: JDBC- or ODBC-Based Development", + "uri":"dws_04_0085.html", + "doc_type":"devg", + "p_code":"1", + "code":"129" + }, + { + "desc":"If the connection pool mechanism is used during application development, comply with the following specifications:If GUC parameters are set in the connection, before you ", + "product_code":"dws", + "title":"Development Specifications", + "uri":"dws_04_0086.html", + "doc_type":"devg", + "p_code":"129", + "code":"130" + }, + { + "desc":"For details, see section \"Downloading the JDBC or ODBC Driver\" in the Data Warehouse Service User Guide.", + "product_code":"dws", + "title":"Downloading Drivers", + "uri":"dws_04_0087.html", + "doc_type":"devg", + "p_code":"129", + "code":"131" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"JDBC-Based Development", + "uri":"dws_04_0088.html", + "doc_type":"devg", + "p_code":"129", + "code":"132" + }, + { + "desc":"Obtain the package dws_8.1.x_jdbc_driver.zip from the management console. For details, see Downloading Drivers.Compressed in it is the JDBC driver JAR package:gsjdbc4.jar", + "product_code":"dws", + "title":"JDBC Package and Driver Class", + "uri":"dws_04_0090.html", + "doc_type":"devg", + "p_code":"132", + "code":"133" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Development Process", + "uri":"dws_04_0091.html", + "doc_type":"devg", + "p_code":"132", + "code":"134" + }, + { + "desc":"Load the database driver before creating a database connection.You can load the driver in the following ways:Implicitly loading the driver before creating a connection in", + "product_code":"dws", + "title":"Loading a Driver", + "uri":"dws_04_0092.html", + "doc_type":"devg", + "p_code":"132", + "code":"135" + }, + { + "desc":"After a database is connected, you can execute SQL statements in the database.If you use an open-source Java Database Connectivity (JDBC) driver, ensure that the database", + "product_code":"dws", + "title":"Connecting to a Database", + "uri":"dws_04_0093.html", + "doc_type":"devg", + "p_code":"132", + "code":"136" + }, + { + "desc":"The application performs data (parameter statements do not need to be transferred) in the database by running SQL statements, and you need to perform the following steps:", + "product_code":"dws", + "title":"Executing SQL Statements", + "uri":"dws_04_0095.html", + "doc_type":"devg", + "p_code":"132", + "code":"137" + }, + { + "desc":"Different types of result sets are applicable to different application scenarios. Applications select proper types of result sets based on requirements. Before executing ", + "product_code":"dws", + "title":"Processing Data in a Result Set", + "uri":"dws_04_0096.html", + "doc_type":"devg", + "p_code":"132", + "code":"138" + }, + { + "desc":"After you complete required data operations in the database, close the database connection.Call the close method to close the connection, such as, conn. close().", + "product_code":"dws", + "title":"Closing the Connection", + "uri":"dws_04_0097.html", + "doc_type":"devg", + "p_code":"132", + "code":"139" + }, + { + "desc":"Before completing the following example, you need to create a stored procedure.This example illustrates how to develop applications based on the GaussDB(DWS) JDBC interfa", + "product_code":"dws", + "title":"Example: Common Operations", + "uri":"dws_04_0098.html", + "doc_type":"devg", + "p_code":"132", + "code":"140" + }, + { + "desc":"If the primary DN is faulty and cannot be restored within 40s, its standby is automatically promoted to primary to ensure the normal running of the cluster. Jobs running ", + "product_code":"dws", + "title":"Example: Retrying SQL Queries for Applications", + "uri":"dws_04_0099.html", + "doc_type":"devg", + "p_code":"132", + "code":"141" + }, + { + "desc":"When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or ", + "product_code":"dws", + "title":"Example: Importing and Exporting Data Through Local Files", + "uri":"dws_04_0100.html", + "doc_type":"devg", + "p_code":"132", + "code":"142" + }, + { + "desc":"The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).", + "product_code":"dws", + "title":"Example: Migrating Data from MySQL to GaussDB(DWS)", + "uri":"dws_04_0101.html", + "doc_type":"devg", + "p_code":"132", + "code":"143" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"JDBC Interface Reference", + "uri":"dws_04_0102.html", + "doc_type":"devg", + "p_code":"132", + "code":"144" + }, + { + "desc":"This section describes java.sql.Connection, the interface for connecting to a database.The AutoCommit mode is used by default within the interface. If you disable it runn", + "product_code":"dws", + "title":"java.sql.Connection", + "uri":"dws_04_0103.html", + "doc_type":"devg", + "p_code":"144", + "code":"145" + }, + { + "desc":"This section describes java.sql.CallableStatement, the stored procedure execution interface.The batch operation of statements containing OUT parameter is not allowed.The ", + "product_code":"dws", + "title":"java.sql.CallableStatement", + "uri":"dws_04_0104.html", + "doc_type":"devg", + "p_code":"144", + "code":"146" + }, + { + "desc":"This section describes java.sql.DatabaseMetaData, the interface for defining database objects.", + "product_code":"dws", + "title":"java.sql.DatabaseMetaData", + "uri":"dws_04_0105.html", + "doc_type":"devg", + "p_code":"144", + "code":"147" + }, + { + "desc":"This section describes java.sql.Driver, the database driver interface.", + "product_code":"dws", + "title":"java.sql.Driver", + "uri":"dws_04_0106.html", + "doc_type":"devg", + "p_code":"144", + "code":"148" + }, + { + "desc":"This section describes java.sql.PreparedStatement, the interface for preparing statements.Execute addBatch() and execute() only after running clearBatch().Batch is not cl", + "product_code":"dws", + "title":"java.sql.PreparedStatement", + "uri":"dws_04_0107.html", + "doc_type":"devg", + "p_code":"144", + "code":"149" + }, + { + "desc":"This section describes java.sql.ResultSet, the interface for execution result sets.One Statement cannot have multiple open ResultSets.The cursor that is used for traversi", + "product_code":"dws", + "title":"java.sql.ResultSet", + "uri":"dws_04_0108.html", + "doc_type":"devg", + "p_code":"144", + "code":"150" + }, + { + "desc":"This section describes java.sql.ResultSetMetaData, which provides details about ResultSet object information.", + "product_code":"dws", + "title":"java.sql.ResultSetMetaData", + "uri":"dws_04_0109.html", + "doc_type":"devg", + "p_code":"144", + "code":"151" + }, + { + "desc":"This section describes java.sql.Statement, the interface for executing SQL statements.Using setFetchSize can reduce the memory occupied by result sets on the client. Resu", + "product_code":"dws", + "title":"java.sql.Statement", + "uri":"dws_04_0110.html", + "doc_type":"devg", + "p_code":"144", + "code":"152" + }, + { + "desc":"This section describes javax.sql.ConnectionPoolDataSource, the interface for data source connection pools.", + "product_code":"dws", + "title":"javax.sql.ConnectionPoolDataSource", + "uri":"dws_04_0111.html", + "doc_type":"devg", + "p_code":"144", + "code":"153" + }, + { + "desc":"This section describes javax.sql.DataSource, the interface for data sources.", + "product_code":"dws", + "title":"javax.sql.DataSource", + "uri":"dws_04_0112.html", + "doc_type":"devg", + "p_code":"144", + "code":"154" + }, + { + "desc":"This section describes javax.sql.PooledConnection, the connection interface created by a connection pool.", + "product_code":"dws", + "title":"javax.sql.PooledConnection", + "uri":"dws_04_0113.html", + "doc_type":"devg", + "p_code":"144", + "code":"155" + }, + { + "desc":"This section describes javax.naming.Context, the context interface for connection configuration.", + "product_code":"dws", + "title":"javax.naming.Context", + "uri":"dws_04_0114.html", + "doc_type":"devg", + "p_code":"144", + "code":"156" + }, + { + "desc":"This section describes javax.naming.spi.InitialContextFactory, the initial context factory interface.", + "product_code":"dws", + "title":"javax.naming.spi.InitialContextFactory", + "uri":"dws_04_0115.html", + "doc_type":"devg", + "p_code":"144", + "code":"157" + }, + { + "desc":"CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.The CopyManager class is in the or", + "product_code":"dws", + "title":"CopyManager", + "uri":"dws_04_0116.html", + "doc_type":"devg", + "p_code":"144", + "code":"158" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"ODBC-Based Development", + "uri":"dws_04_0117.html", + "doc_type":"devg", + "p_code":"129", + "code":"159" + }, + { + "desc":"Obtain the dws_8.1.x_odbc_driver_for_xxx_xxx.zip package from the release package. In the Linux OS, header files (including sql.h and sqlext.h) and library (libodbc.so) a", + "product_code":"dws", + "title":"ODBC Package and Its Dependent Libraries and Header Files", + "uri":"dws_04_0118.html", + "doc_type":"devg", + "p_code":"159", + "code":"160" + }, + { + "desc":"The ODBC DRIVER (psqlodbcw.so) provided by GaussDB(DWS) can be used after it has been configured in the data source. To configure data sources, users must configure the o", + "product_code":"dws", + "title":"Configuring a Data Source in the Linux OS", + "uri":"dws_04_0119.html", + "doc_type":"devg", + "p_code":"159", + "code":"161" + }, + { + "desc":"Configure the ODBC data source using the ODBC data source manager preinstalled in the Windows OS.Decompress GaussDB-8.1.1-Windows-Odbc.tar.gz and install psqlodbc.msi (fo", + "product_code":"dws", + "title":"Configuring a Data Source in the Windows OS", + "uri":"dws_04_0120.html", + "doc_type":"devg", + "p_code":"159", + "code":"162" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"ODBC Development Example", + "uri":"dws_04_0123.html", + "doc_type":"devg", + "p_code":"159", + "code":"163" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"ODBC Interfaces", + "uri":"dws_04_0124.html", + "doc_type":"devg", + "p_code":"159", + "code":"164" + }, + { + "desc":"In ODBC 3.x, SQLAllocEnv (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "product_code":"dws", + "title":"SQLAllocEnv", + "uri":"dws_04_0125.html", + "doc_type":"devg", + "p_code":"164", + "code":"165" + }, + { + "desc":"In ODBC 3.x, SQLAllocConnect (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "product_code":"dws", + "title":"SQLAllocConnect", + "uri":"dws_04_0126.html", + "doc_type":"devg", + "p_code":"164", + "code":"166" + }, + { + "desc":"SQLAllocHandle allocates environment, connection, or statement handles. This function is a generic function for allocating handles that replaces the deprecated ODBC 2.x f", + "product_code":"dws", + "title":"SQLAllocHandle", + "uri":"dws_04_0127.html", + "doc_type":"devg", + "p_code":"164", + "code":"167" + }, + { + "desc":"In ODBC 3.x, SQLAllocStmt was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.", + "product_code":"dws", + "title":"SQLAllocStmt", + "uri":"dws_04_0128.html", + "doc_type":"devg", + "p_code":"164", + "code":"168" + }, + { + "desc":"SQLBindCol is used to associate (bind) columns in a result set to an application data buffer.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates", + "product_code":"dws", + "title":"SQLBindCol", + "uri":"dws_04_0129.html", + "doc_type":"devg", + "p_code":"164", + "code":"169" + }, + { + "desc":"SQLBindParameter is used to associate (bind) parameter markers in an SQL statement to a buffer.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicat", + "product_code":"dws", + "title":"SQLBindParameter", + "uri":"dws_04_0130.html", + "doc_type":"devg", + "p_code":"164", + "code":"170" + }, + { + "desc":"SQLColAttribute returns the descriptor information about a column in the result set.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some war", + "product_code":"dws", + "title":"SQLColAttribute", + "uri":"dws_04_0131.html", + "doc_type":"devg", + "p_code":"164", + "code":"171" + }, + { + "desc":"SQLConnect establishes a connection between a driver and a data source. After the connection, the connection handle can be used to access all information about the data s", + "product_code":"dws", + "title":"SQLConnect", + "uri":"dws_04_0132.html", + "doc_type":"devg", + "p_code":"164", + "code":"172" + }, + { + "desc":"SQLDisconnect closes the connection associated with the database connection handle.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warn", + "product_code":"dws", + "title":"SQLDisconnect", + "uri":"dws_04_0133.html", + "doc_type":"devg", + "p_code":"164", + "code":"173" + }, + { + "desc":"SQLExecDirect executes a prepared SQL statement specified in this parameter. This is the fastest execution method for executing only one SQL statement at a time.SQL_SUCCE", + "product_code":"dws", + "title":"SQLExecDirect", + "uri":"dws_04_0134.html", + "doc_type":"devg", + "p_code":"164", + "code":"174" + }, + { + "desc":"The SQLExecute function executes a prepared SQL statement using SQLPrepare. The statement is executed using the current value of any application variables that were bound", + "product_code":"dws", + "title":"SQLExecute", + "uri":"dws_04_0135.html", + "doc_type":"devg", + "p_code":"164", + "code":"175" + }, + { + "desc":"SQLFetch advances the cursor to the next row of the result set and retrieves any bound columns.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicat", + "product_code":"dws", + "title":"SQLFetch", + "uri":"dws_04_0136.html", + "doc_type":"devg", + "p_code":"164", + "code":"176" + }, + { + "desc":"In ODBC 3.x, SQLFreeStmt (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "product_code":"dws", + "title":"SQLFreeStmt", + "uri":"dws_04_0137.html", + "doc_type":"devg", + "p_code":"164", + "code":"177" + }, + { + "desc":"In ODBC 3.x, SQLFreeConnect (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "product_code":"dws", + "title":"SQLFreeConnect", + "uri":"dws_04_0138.html", + "doc_type":"devg", + "p_code":"164", + "code":"178" + }, + { + "desc":"SQLFreeHandle releases resources associated with a specific environment, connection, or statement handle. It replaces the ODBC 2.x functions: SQLFreeEnv, SQLFreeConnect, ", + "product_code":"dws", + "title":"SQLFreeHandle", + "uri":"dws_04_0139.html", + "doc_type":"devg", + "p_code":"164", + "code":"179" + }, + { + "desc":"In ODBC 3.x, SQLFreeEnv (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.", + "product_code":"dws", + "title":"SQLFreeEnv", + "uri":"dws_04_0140.html", + "doc_type":"devg", + "p_code":"164", + "code":"180" + }, + { + "desc":"SQLPrepare prepares an SQL statement to be executed.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQ", + "product_code":"dws", + "title":"SQLPrepare", + "uri":"dws_04_0141.html", + "doc_type":"devg", + "p_code":"164", + "code":"181" + }, + { + "desc":"SQLGetData retrieves data for a single column in the current row of the result set. It can be called for many times to retrieve data of variable lengths.SQL_SUCCESS indic", + "product_code":"dws", + "title":"SQLGetData", + "uri":"dws_04_0142.html", + "doc_type":"devg", + "p_code":"164", + "code":"182" + }, + { + "desc":"SQLGetDiagRec returns the current values of multiple fields of a diagnostic record that contains error, warning, and status information.SQL_SUCCESS indicates that the cal", + "product_code":"dws", + "title":"SQLGetDiagRec", + "uri":"dws_04_0143.html", + "doc_type":"devg", + "p_code":"164", + "code":"183" + }, + { + "desc":"SQLSetConnectAttr sets connection attributes.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQL_ERROR", + "product_code":"dws", + "title":"SQLSetConnectAttr", + "uri":"dws_04_0144.html", + "doc_type":"devg", + "p_code":"164", + "code":"184" + }, + { + "desc":"SQLSetEnvAttr sets environment attributes.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.SQL_ERROR in", + "product_code":"dws", + "title":"SQLSetEnvAttr", + "uri":"dws_04_0145.html", + "doc_type":"devg", + "p_code":"164", + "code":"185" + }, + { + "desc":"SQLSetStmtAttr sets attributes related to a statement.SQL_SUCCESS indicates that the call succeeded.SQL_SUCCESS_WITH_INFO indicates some warning information is displayed.", + "product_code":"dws", + "title":"SQLSetStmtAttr", + "uri":"dws_04_0146.html", + "doc_type":"devg", + "p_code":"164", + "code":"186" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"PostGIS Extension", + "uri":"dws_04_0301.html", + "doc_type":"devg", + "p_code":"1", + "code":"187" + }, + { + "desc":"The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical ", + "product_code":"dws", + "title":"PostGIS", + "uri":"dws_04_0302.html", + "doc_type":"devg", + "p_code":"187", + "code":"188" + }, + { + "desc":"The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical ", + "product_code":"dws", + "title":"Using PostGIS", + "uri":"dws_04_0304.html", + "doc_type":"devg", + "p_code":"187", + "code":"189" + }, + { + "desc":"In GaussDB(DWS), PostGIS Extension support the following data types:box2dbox3dgeometry_dumpgeometrygeographyrasterIf PostGIS is used by a user other than the creator of t", + "product_code":"dws", + "title":"PostGIS Support and Constraints", + "uri":"dws_04_0305.html", + "doc_type":"devg", + "p_code":"187", + "code":"190" + }, + { + "desc":"This document contains open source software notice for the product. And this document is confidential information of copyright holder. Recipient shall protect it in due c", + "product_code":"dws", + "title":"OPEN SOURCE SOFTWARE NOTICE (For PostGIS)", + "uri":"dws_04_0306.html", + "doc_type":"devg", + "p_code":"187", + "code":"191" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Resource Monitoring", + "uri":"dws_04_0393.html", + "doc_type":"devg", + "p_code":"1", + "code":"192" + }, + { + "desc":"In the multi-tenant management framework, you can query the real-time or historical usage of all user resources (including memory, CPU cores, storage space, temporary spa", + "product_code":"dws", + "title":"User Resource Query", + "uri":"dws_04_0394.html", + "doc_type":"devg", + "p_code":"192", + "code":"193" + }, + { + "desc":"GaussDB(DWS) provides a view for monitoring the memory usage of the entire cluster.Query the pgxc_total_memory_detail view as a user with sysadmin permissions.SELECT * FR", + "product_code":"dws", + "title":"Monitoring Memory Resources", + "uri":"dws_04_0395.html", + "doc_type":"devg", + "p_code":"192", + "code":"194" + }, + { + "desc":"GaussDB(DWS) provides system catalogs for monitoring the resource usage of CNs and DNs (including memory, CPU usage, disk I/O, process physical I/O, and process logical I", + "product_code":"dws", + "title":"Instance Resource Monitoring", + "uri":"dws_04_0396.html", + "doc_type":"devg", + "p_code":"192", + "code":"195" + }, + { + "desc":"You can query real-time Top SQL in real-time resource monitoring views at different levels. The real-time resource monitoring view records the resource usage (including m", + "product_code":"dws", + "title":"Real-time TopSQL", + "uri":"dws_04_0397.html", + "doc_type":"devg", + "p_code":"192", + "code":"196" + }, + { + "desc":"You can query historical Top SQL in historical resource monitoring views. The historical resource monitoring view records the resource usage (of memory, disk, CPU time, a", + "product_code":"dws", + "title":"Historical TopSQL", + "uri":"dws_04_0398.html", + "doc_type":"devg", + "p_code":"192", + "code":"197" + }, + { + "desc":"In this section, TPC-DS sample data is used as an example to describe how to query Real-time TopSQL and Historical TopSQL.To query for historical or archived resource mon", + "product_code":"dws", + "title":"TopSQL Query Example", + "uri":"dws_04_0399.html", + "doc_type":"devg", + "p_code":"192", + "code":"198" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Query Performance Optimization", + "uri":"dws_04_0400.html", + "doc_type":"devg", + "p_code":"1", + "code":"199" + }, + { + "desc":"The aim of SQL optimization is to maximize the utilization of resources, including CPU, memory, disk I/O, and network I/O. To maximize resource utilization is to run SQL ", + "product_code":"dws", + "title":"Overview of Query Performance Optimization", + "uri":"dws_04_0402.html", + "doc_type":"devg", + "p_code":"199", + "code":"200" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Query Analysis", + "uri":"dws_04_0403.html", + "doc_type":"devg", + "p_code":"199", + "code":"201" + }, + { + "desc":"The process from receiving SQL statements to the statement execution by the SQL engine is shown in Figure 1 and Table 1. The texts in red are steps where database adminis", + "product_code":"dws", + "title":"Query Execution Process", + "uri":"dws_04_0409.html", + "doc_type":"devg", + "p_code":"201", + "code":"202" + }, + { + "desc":"The SQL execution plan is a node tree, which displays detailed procedure when GaussDB(DWS) runs an SQL statement. A database operator indicates one step.You can run the E", + "product_code":"dws", + "title":"Overview of the SQL Execution Plan", + "uri":"dws_04_0410.html", + "doc_type":"devg", + "p_code":"201", + "code":"203" + }, + { + "desc":"As described in Overview of the SQL Execution Plan, EXPLAIN displays the execution plan, but will not actually run SQL statements. EXPLAIN ANALYZE and EXPLAIN PERFORMANCE", + "product_code":"dws", + "title":"Deep Dive on the SQL Execution Plan", + "uri":"dws_04_0411.html", + "doc_type":"devg", + "p_code":"201", + "code":"204" + }, + { + "desc":"This section describes how to query SQL statements whose execution takes a long time, leading to poor system performance.After the query, query statements are returned as", + "product_code":"dws", + "title":"Querying SQL Statements That Affect Performance Most", + "uri":"dws_04_0412.html", + "doc_type":"devg", + "p_code":"201", + "code":"205" + }, + { + "desc":"During database running, query statements are blocked in some service scenarios and run for an excessively long time. In this case, you can forcibly terminate the faulty ", + "product_code":"dws", + "title":"Checking Blocked Statements", + "uri":"dws_04_0413.html", + "doc_type":"devg", + "p_code":"201", + "code":"206" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Query Improvement", + "uri":"dws_04_0430.html", + "doc_type":"devg", + "p_code":"199", + "code":"207" + }, + { + "desc":"You can analyze slow SQL statements to optimize them.", + "product_code":"dws", + "title":"Optimization Process", + "uri":"dws_04_0435.html", + "doc_type":"devg", + "p_code":"207", + "code":"208" + }, + { + "desc":"In a database, statistics indicate the source data of a plan generated by a planner. If no collection statistics are available or out of date, the execution plan may seri", + "product_code":"dws", + "title":"Updating Statistics", + "uri":"dws_04_0436.html", + "doc_type":"devg", + "p_code":"207", + "code":"209" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Reviewing and Modifying a Table Definition", + "uri":"dws_04_0437.html", + "doc_type":"devg", + "p_code":"207", + "code":"210" + }, + { + "desc":"In a distributed framework, data is distributed on DNs. Data on one or more DNs is stored on a physical storage device. To properly define a table, you must:Evenly distri", + "product_code":"dws", + "title":"Reviewing and Modifying a Table Definition", + "uri":"dws_04_0438.html", + "doc_type":"devg", + "p_code":"210", + "code":"211" + }, + { + "desc":"During database design, some key factors about table design will greatly affect the subsequent query performance of the database. Table design affects data storage as wel", + "product_code":"dws", + "title":"Selecting a Storage Model", + "uri":"dws_04_0439.html", + "doc_type":"devg", + "p_code":"210", + "code":"212" + }, + { + "desc":"In replication mode, full data in a table is copied to each DN in the cluster. This mode is used for tables containing a small volume of data. Full data in a table stored", + "product_code":"dws", + "title":"Selecting a Distribution Mode", + "uri":"dws_04_0440.html", + "doc_type":"devg", + "p_code":"210", + "code":"213" + }, + { + "desc":"The distribution column in a hash table must meet the following requirements, which are ranked by priority in descending order:The value of the distribution column should", + "product_code":"dws", + "title":"Selecting a Distribution Column", + "uri":"dws_04_0441.html", + "doc_type":"devg", + "p_code":"210", + "code":"214" + }, + { + "desc":"Partial Cluster Key is the column-based technology. It can minimize or maximize sparse indexes to quickly filter base tables. Partial cluster key can specify multiple col", + "product_code":"dws", + "title":"Using Partial Clustering", + "uri":"dws_04_0442.html", + "doc_type":"devg", + "p_code":"210", + "code":"215" + }, + { + "desc":"Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partit", + "product_code":"dws", + "title":"Using Partitioned Tables", + "uri":"dws_04_0443.html", + "doc_type":"devg", + "p_code":"210", + "code":"216" + }, + { + "desc":"Use the following principles to obtain efficient data types:Using the data type that can be efficiently executedGenerally, calculation of integers (including common compa", + "product_code":"dws", + "title":"Selecting a Data Type", + "uri":"dws_04_0444.html", + "doc_type":"devg", + "p_code":"210", + "code":"217" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Typical SQL Optimization Methods", + "uri":"dws_04_0445.html", + "doc_type":"devg", + "p_code":"207", + "code":"218" + }, + { + "desc":"Performance issues may occur when you query data or run the INSERT, DELETE, UPDATE, or CREATE TABLE AS statement. You can query the warning column in the GS_WLM_SESSION_S", + "product_code":"dws", + "title":"SQL Self-Diagnosis", + "uri":"dws_04_0446.html", + "doc_type":"devg", + "p_code":"218", + "code":"219" + }, + { + "desc":"Currently, the GaussDB(DWS) optimizer can use three methods to develop statement execution policies in the distributed framework: generating a statement pushdown plan, a ", + "product_code":"dws", + "title":"Optimizing Statement Pushdown", + "uri":"dws_04_0447.html", + "doc_type":"devg", + "p_code":"218", + "code":"220" + }, + { + "desc":"When an application runs a SQL statement to operate the database, a large number of subqueries are used because they are more clear than table join. Especially in complic", + "product_code":"dws", + "title":"Optimizing Subqueries", + "uri":"dws_04_0448.html", + "doc_type":"devg", + "p_code":"218", + "code":"221" + }, + { + "desc":"GaussDB(DWS) generates optimal execution plans based on the cost estimation. Optimizers need to estimate the number of data rows and the cost based on statistics collecte", + "product_code":"dws", + "title":"Optimizing Statistics", + "uri":"dws_04_0449.html", + "doc_type":"devg", + "p_code":"218", + "code":"222" + }, + { + "desc":"A query statement needs to go through multiple operator procedures to generate the final result. Sometimes, the overall query performance deteriorates due to long executi", + "product_code":"dws", + "title":"Optimizing Operators", + "uri":"dws_04_0450.html", + "doc_type":"devg", + "p_code":"218", + "code":"223" + }, + { + "desc":"Data skew breaks the balance among nodes in the distributed MPP architecture. If the amount of data stored or processed by a node is much greater than that by other nodes", + "product_code":"dws", + "title":"Optimizing Data Skew", + "uri":"dws_04_0451.html", + "doc_type":"devg", + "p_code":"218", + "code":"224" + }, + { + "desc":"Based on the database SQL execution mechanism and a large number of practices, summarize finds that: using rules of a certain SQL statement, on the basis of the so that t", + "product_code":"dws", + "title":"Experience in Rewriting SQL Statements", + "uri":"dws_04_0452.html", + "doc_type":"devg", + "p_code":"207", + "code":"225" + }, + { + "desc":"This section describes the key CN parameters that affect GaussDB(DWS) SQL tuning performance. For details about how to configure these parameters, see Configuring GUC Par", + "product_code":"dws", + "title":"Adjusting Key Parameters During SQL Tuning", + "uri":"dws_04_0453.html", + "doc_type":"devg", + "p_code":"207", + "code":"226" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Hint-based Tuning", + "uri":"dws_04_0454.html", + "doc_type":"devg", + "p_code":"207", + "code":"227" + }, + { + "desc":"In plan hints, you can specify a join order, join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution p", + "product_code":"dws", + "title":"Plan Hint Optimization", + "uri":"dws_04_0455.html", + "doc_type":"devg", + "p_code":"227", + "code":"228" + }, + { + "desc":"Theses hints specify the join order and outer/inner tables.Specify only the join order.Specify the join order and outer/inner tables. The outer/inner tables are specified", + "product_code":"dws", + "title":"Join Order Hints", + "uri":"dws_04_0456.html", + "doc_type":"devg", + "p_code":"227", + "code":"229" + }, + { + "desc":"Specifies the join method. It can be nested loop join, hash join, or merge join.no indicates that the specified hint will not be used for a join.table_list specifies the ", + "product_code":"dws", + "title":"Join Operation Hints", + "uri":"dws_04_0457.html", + "doc_type":"devg", + "p_code":"227", + "code":"230" + }, + { + "desc":"These hints specify the number of rows in an intermediate result set. Both absolute values and relative values are supported.#,+,-, and * are operators used for hinting t", + "product_code":"dws", + "title":"Rows Hints", + "uri":"dws_04_0458.html", + "doc_type":"devg", + "p_code":"227", + "code":"231" + }, + { + "desc":"These hints specify a stream operation, which can be broadcast or redistribute.no indicates that the specified hint will not be used for a join.table_list specifies the t", + "product_code":"dws", + "title":"Stream Operation Hints", + "uri":"dws_04_0459.html", + "doc_type":"devg", + "p_code":"227", + "code":"232" + }, + { + "desc":"These hints specify a scan operation, which can be tablescan, indexscan, or indexonlyscan.no indicates that the specified hint will not be used for a join.table specifies", + "product_code":"dws", + "title":"Scan Operation Hints", + "uri":"dws_04_0460.html", + "doc_type":"devg", + "p_code":"227", + "code":"233" + }, + { + "desc":"These hints specify the name of a sublink block.table indicates the name you have specified for a sublink block.This hint is used by an outer query only when a sublink is", + "product_code":"dws", + "title":"Sublink Name Hints", + "uri":"dws_04_0461.html", + "doc_type":"devg", + "p_code":"227", + "code":"234" + }, + { + "desc":"Theses hints specify redistribution keys containing skew data and skew values, and are used to optimize redistribution involving Join or HashAgg.Specify single-table skew", + "product_code":"dws", + "title":"Skew Hints", + "uri":"dws_04_0462.html", + "doc_type":"devg", + "p_code":"227", + "code":"235" + }, + { + "desc":"A hint, or a GUC hint, specifies a configuration parameter value when a plan is generated. Currently, only the following parameters are supported:agg_redistribute_enhance", + "product_code":"dws", + "title":"Configuration Parameter Hints", + "uri":"dws_04_0463.html", + "doc_type":"devg", + "p_code":"227", + "code":"236" + }, + { + "desc":"Plan hints change an execution plan. You can run EXPLAIN to view the changes.Hints containing errors are invalid and do not affect statement execution. The errors will be", + "product_code":"dws", + "title":"Hint Errors, Conflicts, and Other Warnings", + "uri":"dws_04_0464.html", + "doc_type":"devg", + "p_code":"227", + "code":"237" + }, + { + "desc":"This section takes the statements in TPC-DS (Q24) as an example to describe how to optimize an execution plan by using hints in 1000X+24DN environments. For example:The o", + "product_code":"dws", + "title":"Plan Hint Cases", + "uri":"dws_04_0465.html", + "doc_type":"devg", + "p_code":"227", + "code":"238" + }, + { + "desc":"To ensure proper database running, after INSERT and DELETE operations, you need to routinely do VACUUM FULL and ANALYZE as appropriate for customer scenarios and update s", + "product_code":"dws", + "title":"Routinely Maintaining Tables", + "uri":"dws_04_0466.html", + "doc_type":"devg", + "p_code":"207", + "code":"239" + }, + { + "desc":"When data deletion is repeatedly performed in the database, index keys will be deleted from the index page, resulting in index distention. Recreating an index routinely i", + "product_code":"dws", + "title":"Routinely Recreating an Index", + "uri":"dws_04_0467.html", + "doc_type":"devg", + "p_code":"207", + "code":"240" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Configuring the SMP", + "uri":"dws_04_0468.html", + "doc_type":"devg", + "p_code":"207", + "code":"241" + }, + { + "desc":"The SMP feature improves the performance through operator parallelism and occupies more system resources, including CPU, memory, network, and I/O. Actually, SMP is a meth", + "product_code":"dws", + "title":"Application Scenarios and Restrictions", + "uri":"dws_04_0469.html", + "doc_type":"devg", + "p_code":"241", + "code":"242" + }, + { + "desc":"The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, the resource consumption is added, including the CPU, memory, I/O, an", + "product_code":"dws", + "title":"Resource Impact on SMP Performance", + "uri":"dws_04_0470.html", + "doc_type":"devg", + "p_code":"241", + "code":"243" + }, + { + "desc":"Besides resource factors, there are other factors that impact the SMP parallelism performance, such as unevenly data distributed in a partitioned table and system paralle", + "product_code":"dws", + "title":"Other Factors Affecting SMP Performance", + "uri":"dws_04_0471.html", + "doc_type":"devg", + "p_code":"241", + "code":"244" + }, + { + "desc":"Starting from this version, SMP auto adaptation is enabled. For newly deployed clusters, the default value of query_dop is 0, and SMP parameters have been adjusted. To en", + "product_code":"dws", + "title":"Suggestions for SMP Parameter Settings", + "uri":"dws_04_0472.html", + "doc_type":"devg", + "p_code":"241", + "code":"245" + }, + { + "desc":"To manually optimize SMP, you need to be familiar with Suggestions for SMP Parameter Settings. This section describes how to optimize SMP.The CPU, memory, I/O, and networ", + "product_code":"dws", + "title":"SMP Manual Optimization Suggestions", + "uri":"dws_04_0473.html", + "doc_type":"devg", + "p_code":"241", + "code":"246" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Optimization Cases", + "uri":"dws_04_0474.html", + "doc_type":"devg", + "p_code":"199", + "code":"247" + }, + { + "desc":"Tables are defined as follows:The following query is executed:If a is the distribution column of t1 and t2:Then Streaming exists in the execution plan and the data volume", + "product_code":"dws", + "title":"Case: Selecting an Appropriate Distribution Column", + "uri":"dws_04_0475.html", + "doc_type":"devg", + "p_code":"247", + "code":"248" + }, + { + "desc":"Query the information about all personnel in the sales department.The original execution plan is as follows before creating the places.place_id and states.state_id indexe", + "product_code":"dws", + "title":"Case: Creating an Appropriate Index", + "uri":"dws_04_0476.html", + "doc_type":"devg", + "p_code":"247", + "code":"249" + }, + { + "desc":"Figure 1 shows the execution plan.As shown in Figure 1, the sequential scan phase is time consuming.The JOIN performance is poor because a large number of null values exi", + "product_code":"dws", + "title":"Case: Adding NOT NULL for JOIN Columns", + "uri":"dws_04_0477.html", + "doc_type":"devg", + "p_code":"247", + "code":"250" + }, + { + "desc":"In an execution plan, more than 95% of the execution time is spent on window agg performed on the CN. In this case, sum is performed for the two columns separately, and t", + "product_code":"dws", + "title":"Case: Pushing Down Sort Operations to DNs", + "uri":"dws_04_0478.html", + "doc_type":"devg", + "p_code":"247", + "code":"251" + }, + { + "desc":"If bit0 of cost_param is set to 1, an improved mechanism is used for estimating the selection rate of non-equi-joins. This method is more accurate for estimating the sele", + "product_code":"dws", + "title":"Case: Configuring cost_param for Better Query Performance", + "uri":"dws_04_0479.html", + "doc_type":"devg", + "p_code":"247", + "code":"252" + }, + { + "desc":"During a site test, the information is displayed after EXPLAIN ANALYZE is executed:According to the execution information, HashJoin becomes the performance bottleneck of ", + "product_code":"dws", + "title":"Case: Adjusting the Distribution Key", + "uri":"dws_04_0480.html", + "doc_type":"devg", + "p_code":"247", + "code":"253" + }, + { + "desc":"Information on the EXPLAIN PERFORMANCE at a site is as follows: As shown in the red boxes, two performance bottlenecks are scan operations in a table.After further analys", + "product_code":"dws", + "title":"Case: Adjusting the Partial Clustering Key", + "uri":"dws_04_0481.html", + "doc_type":"devg", + "p_code":"247", + "code":"254" + }, + { + "desc":"In the GaussDB(DWS) database, row-store tables use the row execution engine, and column-store tables use the column execution engine. If both row-store table and column-s", + "product_code":"dws", + "title":"Case: Adjusting the Table Storage Mode in a Medium Table", + "uri":"dws_04_0482.html", + "doc_type":"devg", + "p_code":"247", + "code":"255" + }, + { + "desc":"During the test at a site, if the following execution plan is performed, the customer expects that the performance can be improved and the result can be returned within 3", + "product_code":"dws", + "title":"Case: Adjusting the Local Clustering Column", + "uri":"dws_04_0483.html", + "doc_type":"devg", + "p_code":"247", + "code":"256" + }, + { + "desc":"In the following simple SQL statements, the performance bottlenecks exist in the scan operation of dwcjk.Obviously, there are date features in the cjrq field of table dat", + "product_code":"dws", + "title":"Case: Reconstructing Partition Tables", + "uri":"dws_04_0484.html", + "doc_type":"devg", + "p_code":"247", + "code":"257" + }, + { + "desc":"The t1 table is defined as follows:Assume that the distribution column of the result set provided by the agg lower-layer operator is setA, and the group by column of the ", + "product_code":"dws", + "title":"Case: Adjusting the GUC Parameter best_agg_plan", + "uri":"dws_04_0485.html", + "doc_type":"devg", + "p_code":"247", + "code":"258" + }, + { + "desc":"This SQL performance is poor. SubPlan exists in the execution plan as follows:The core of this optimization is to eliminate subqueries. Based on the service scenario anal", + "product_code":"dws", + "title":"Case: Rewriting SQL and Deleting Subqueries (Case 1)", + "uri":"dws_04_0486.html", + "doc_type":"devg", + "p_code":"247", + "code":"259" + }, + { + "desc":"On a site, the customer gave the feedback saying that the execution time of the following SQL statements lasted over one day and did not end:The corresponding execution p", + "product_code":"dws", + "title":"Case: Rewriting SQL and Deleting Subqueries (Case 2)", + "uri":"dws_04_0487.html", + "doc_type":"devg", + "p_code":"247", + "code":"260" + }, + { + "desc":"In a test at a site, ddw_f10_op_cust_asset_mon is a partitioned table and the partition key is year_mth whose value is a combined string of month and year values.The foll", + "product_code":"dws", + "title":"Case: Rewriting SQL Statements and Eliminating Prune Interference", + "uri":"dws_04_0488.html", + "doc_type":"devg", + "p_code":"247", + "code":"261" + }, + { + "desc":"in-clause/any-clause is a common SQL statement constraint. Sometimes, the clause following in or any is a constant. For example:orSome special usages are as follows:Where", + "product_code":"dws", + "title":"Case: Rewriting SQL Statements and Deleting in-clause", + "uri":"dws_04_0489.html", + "doc_type":"devg", + "p_code":"247", + "code":"262" + }, + { + "desc":"You can add PARTIAL CLUSTER KEY(column_name[,...]) to the definition of a column-store table to set one or more columns of this table as partial cluster keys. In this way", + "product_code":"dws", + "title":"Case: Setting Partial Cluster Keys", + "uri":"dws_04_0490.html", + "doc_type":"devg", + "p_code":"247", + "code":"263" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"SQL Execution Troubleshooting", + "uri":"dws_04_0491.html", + "doc_type":"devg", + "p_code":"199", + "code":"264" + }, + { + "desc":"A query task that used to take a few milliseconds to complete is now requiring several seconds, and that used to take several seconds is now requiring even half an hour. ", + "product_code":"dws", + "title":"Low Query Efficiency", + "uri":"dws_04_0492.html", + "doc_type":"devg", + "p_code":"264", + "code":"265" + }, + { + "desc":"DROP TABLE fails to be executed in the following scenarios:A user runs the \\dt+ command using gsql and finds that the table_name table does not exist. When the user runs ", + "product_code":"dws", + "title":"DROP TABLE Fails to Be Executed", + "uri":"dws_04_0494.html", + "doc_type":"devg", + "p_code":"264", + "code":"266" + }, + { + "desc":"Two users log in to the same database human_resource and run the select count(*) from areas statement separately to query the areas table, but obtain different results.Ch", + "product_code":"dws", + "title":"Different Data Is Displayed for the Same Table Queried By Multiple Users", + "uri":"dws_04_0495.html", + "doc_type":"devg", + "p_code":"264", + "code":"267" + }, + { + "desc":"The following error is reported during the integer conversion:Some data types cannot be converted to the target data type.Gradually narrow down the range of SQL statement", + "product_code":"dws", + "title":"An Error Occurs During the Integer Conversion", + "uri":"dws_04_0496.html", + "doc_type":"devg", + "p_code":"264", + "code":"268" + }, + { + "desc":"With automatic retry (referred to as CN retry), GaussDB(DWS) retries an SQL statement when the execution of this statement fails. If an SQL statement sent from the gsql c", + "product_code":"dws", + "title":"Automatic Retry upon SQL Statement Execution Errors", + "uri":"dws_04_0497.html", + "doc_type":"devg", + "p_code":"264", + "code":"269" + }, + { + "desc":"To improve the cluster performance, you can use multiple methods to optimize the database, including hardware configuration, software driver upgrade, and internal paramet", + "product_code":"dws", + "title":"Common Performance Parameter Optimization Design", + "uri":"dws_04_0970.html", + "doc_type":"devg", + "p_code":"199", + "code":"270" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"User-Defined Functions", + "uri":"dws_04_0507.html", + "doc_type":"devg", + "p_code":"1", + "code":"271" + }, + { + "desc":"With the GaussDB(DWS) PL/Java functions, you can choose your favorite Java IDE to write Java methods and install the JAR files containing these methods into the GaussDB(D", + "product_code":"dws", + "title":"PL/Java Functions", + "uri":"dws_04_0509.html", + "doc_type":"devg", + "p_code":"271", + "code":"272" + }, + { + "desc":"PL/pgSQL is similar to PL/SQL of Oracle. It is a loadable procedural language.The functions created using PL/pgSQL can be used in any place where you can use built-in fun", + "product_code":"dws", + "title":"PL/pgSQL Functions", + "uri":"dws_04_0511.html", + "doc_type":"devg", + "p_code":"271", + "code":"273" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Stored Procedures", + "uri":"dws_04_0512.html", + "doc_type":"devg", + "p_code":"1", + "code":"274" + }, + { + "desc":"In GaussDB(DWS), business rules and logics are saved as stored procedures.A stored procedure is a combination of SQL, PL/SQL, and Java statements, enabling business rule ", + "product_code":"dws", + "title":"Stored Procedure", + "uri":"dws_04_0513.html", + "doc_type":"devg", + "p_code":"274", + "code":"275" + }, + { + "desc":"A data type refers to a value set and an operation set defined on the value set. A GaussDB(DWS) database consists of tables, each of which is defined by its own columns. ", + "product_code":"dws", + "title":"Data Types", + "uri":"dws_04_0514.html", + "doc_type":"devg", + "p_code":"274", + "code":"276" + }, + { + "desc":"Certain data types in the database support implicit data type conversions, such as assignments and parameters invoked by functions. For other data types, you can use the ", + "product_code":"dws", + "title":"Data Type Conversion", + "uri":"dws_04_0515.html", + "doc_type":"devg", + "p_code":"274", + "code":"277" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Arrays and Records", + "uri":"dws_04_0516.html", + "doc_type":"devg", + "p_code":"274", + "code":"278" + }, + { + "desc":"Before the use of arrays, an array type needs to be defined:Define an array type immediately after the AS keyword in a stored procedure. Run the following statement:TYPE ", + "product_code":"dws", + "title":"Arrays", + "uri":"dws_04_0517.html", + "doc_type":"devg", + "p_code":"278", + "code":"279" + }, + { + "desc":"Perform the following operations to create a record variable:Define a record type and use this type to declare a variable.For the syntax of the record type, see Figure 1.", + "product_code":"dws", + "title":"record", + "uri":"dws_04_0518.html", + "doc_type":"devg", + "p_code":"278", + "code":"280" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Syntax", + "uri":"dws_04_0519.html", + "doc_type":"devg", + "p_code":"274", + "code":"281" + }, + { + "desc":"A PL/SQL block can contain a sub-block which can be placed in any section. The following describes the architecture of a PL/SQL block:DECLARE: declares variables, types, ", + "product_code":"dws", + "title":"Basic Structure", + "uri":"dws_04_0520.html", + "doc_type":"devg", + "p_code":"281", + "code":"282" + }, + { + "desc":"An anonymous block applies to a script infrequently executed or a one-off activity. An anonymous block is executed in a session and is not stored.Figure 1 shows the synta", + "product_code":"dws", + "title":"Anonymous Block", + "uri":"dws_04_0521.html", + "doc_type":"devg", + "p_code":"281", + "code":"283" + }, + { + "desc":"A subprogram stores stored procedures, functions, operators, and advanced packages. A subprogram created in a database can be called by other programs.", + "product_code":"dws", + "title":"Subprogram", + "uri":"dws_04_0522.html", + "doc_type":"devg", + "p_code":"281", + "code":"284" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Basic Statements", + "uri":"dws_04_0523.html", + "doc_type":"devg", + "p_code":"274", + "code":"285" + }, + { + "desc":"This section describes the declaration of variables in the PL/SQL and the scope of this variable in codes.For details about the variable declaration syntax, see Figure 1.", + "product_code":"dws", + "title":"Variable Definition Statement", + "uri":"dws_04_0524.html", + "doc_type":"devg", + "p_code":"285", + "code":"286" + }, + { + "desc":"Figure 1 shows the syntax diagram for assigning a value to a variable.The above syntax diagram is explained as follows:variable_name indicates the name of a variable.valu", + "product_code":"dws", + "title":"Assignment Statement", + "uri":"dws_04_0525.html", + "doc_type":"devg", + "p_code":"285", + "code":"287" + }, + { + "desc":"Figure 1 shows the syntax diagram for calling a clause.The above syntax diagram is explained as follows:procedure_name specifies the name of a stored procedure.parameter ", + "product_code":"dws", + "title":"Call Statement", + "uri":"dws_04_0526.html", + "doc_type":"devg", + "p_code":"285", + "code":"288" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Dynamic Statements", + "uri":"dws_04_0527.html", + "doc_type":"devg", + "p_code":"274", + "code":"289" + }, + { + "desc":"You can perform dynamic queries using EXECUTE IMMEDIATE or OPEN FOR in GaussDB(DWS). EXECUTE IMMEDIATE dynamically executes SELECT statements and OPEN FOR combines use of", + "product_code":"dws", + "title":"Executing Dynamic Query Statements", + "uri":"dws_04_0528.html", + "doc_type":"devg", + "p_code":"289", + "code":"290" + }, + { + "desc":"Figure 1 shows the syntax diagram.Figure 2 shows the syntax diagram for using_clause.The above syntax diagram is explained as follows:USING IN bind_argument is used to sp", + "product_code":"dws", + "title":"Executing Dynamic Non-query Statements", + "uri":"dws_04_0529.html", + "doc_type":"devg", + "p_code":"289", + "code":"291" + }, + { + "desc":"This section describes how to dynamically call store procedures. You must use anonymous statement blocks to package stored procedures or statement blocks and append IN an", + "product_code":"dws", + "title":"Dynamically Calling Stored Procedures", + "uri":"dws_04_0530.html", + "doc_type":"devg", + "p_code":"289", + "code":"292" + }, + { + "desc":"This section describes how to execute anonymous blocks in dynamic statements. Append IN and OUT behind the EXECUTE IMMEDIATE...USING statement to input and output paramet", + "product_code":"dws", + "title":"Dynamically Calling Anonymous Blocks", + "uri":"dws_04_0531.html", + "doc_type":"devg", + "p_code":"289", + "code":"293" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Control Statements", + "uri":"dws_04_0532.html", + "doc_type":"devg", + "p_code":"274", + "code":"294" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"RETURN Statements", + "uri":"dws_04_0533.html", + "doc_type":"devg", + "p_code":"294", + "code":"295" + }, + { + "desc":"Figure 1 shows the syntax diagram for a return statement.The syntax details are as follows:This statement returns control from a stored procedure or function to a caller.", + "product_code":"dws", + "title":"RETURN", + "uri":"dws_04_0534.html", + "doc_type":"devg", + "p_code":"295", + "code":"296" + }, + { + "desc":"When creating a function, specify SETOF datatype for the return values.return_next_clause::=return_query_clause::=The syntax details are as follows:If a function needs to", + "product_code":"dws", + "title":"RETURN NEXT and RETURN QUERY", + "uri":"dws_04_0535.html", + "doc_type":"devg", + "p_code":"295", + "code":"297" + }, + { + "desc":"Conditional statements are used to decide whether given conditions are met. Operations are executed based on the decisions made.GaussDB(DWS) supports five usages of IF:IF", + "product_code":"dws", + "title":"Conditional Statements", + "uri":"dws_04_0536.html", + "doc_type":"devg", + "p_code":"294", + "code":"298" + }, + { + "desc":"The syntax diagram is as follows.Example:The loop must be exploited together with EXIT; otherwise, a dead loop occurs.The syntax diagram is as follows.If the conditional ", + "product_code":"dws", + "title":"Loop Statements", + "uri":"dws_04_0537.html", + "doc_type":"devg", + "p_code":"294", + "code":"299" + }, + { + "desc":"Figure 1 shows the syntax diagram.Figure 2 shows the syntax diagram for when_clause.Parameter description:case_expression: specifies the variable or expression.when_expre", + "product_code":"dws", + "title":"Branch Statements", + "uri":"dws_04_0538.html", + "doc_type":"devg", + "p_code":"294", + "code":"300" + }, + { + "desc":"In PL/SQL programs, NULL statements are used to indicate \"nothing should be done\", equal to placeholders. They grant meanings to some statements and improve program reada", + "product_code":"dws", + "title":"NULL Statements", + "uri":"dws_04_0539.html", + "doc_type":"devg", + "p_code":"294", + "code":"301" + }, + { + "desc":"By default, any error occurring in a PL/SQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and restore ", + "product_code":"dws", + "title":"Error Trapping Statements", + "uri":"dws_04_0540.html", + "doc_type":"devg", + "p_code":"294", + "code":"302" + }, + { + "desc":"The GOTO statement unconditionally transfers the control from the current statement to a labeled statement. The GOTO statement changes the execution logic. Therefore, use", + "product_code":"dws", + "title":"GOTO Statements", + "uri":"dws_04_0541.html", + "doc_type":"devg", + "p_code":"294", + "code":"303" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Other Statements", + "uri":"dws_04_0542.html", + "doc_type":"devg", + "p_code":"274", + "code":"304" + }, + { + "desc":"GaussDB(DWS) provides multiple lock modes to control concurrent accesses to table data. These modes are used when Multi-Version Concurrency Control (MVCC) cannot give exp", + "product_code":"dws", + "title":"Lock Operations", + "uri":"dws_04_0543.html", + "doc_type":"devg", + "p_code":"304", + "code":"305" + }, + { + "desc":"GaussDB(DWS) provides cursors as a data buffer for users to store execution results of SQL statements. Each cursor region has a name. Users can use SQL statements to obta", + "product_code":"dws", + "title":"Cursor Operations", + "uri":"dws_04_0544.html", + "doc_type":"devg", + "p_code":"304", + "code":"306" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Cursors", + "uri":"dws_04_0545.html", + "doc_type":"devg", + "p_code":"274", + "code":"307" + }, + { + "desc":"To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context areas. With curs", + "product_code":"dws", + "title":"Overview", + "uri":"dws_04_0546.html", + "doc_type":"devg", + "p_code":"307", + "code":"308" + }, + { + "desc":"An explicit cursor is used to process query statements, particularly when the query results contain multiple records.An explicit cursor performs the following six PL/SQL ", + "product_code":"dws", + "title":"Explicit Cursor", + "uri":"dws_04_0547.html", + "doc_type":"devg", + "p_code":"307", + "code":"309" + }, + { + "desc":"The system automatically sets implicit cursors for non-query statements, such as ALTER and DROP, and creates work areas for these statements. These implicit cursors are n", + "product_code":"dws", + "title":"Implicit Cursor", + "uri":"dws_04_0548.html", + "doc_type":"devg", + "p_code":"307", + "code":"310" + }, + { + "desc":"The use of cursors in WHILE and LOOP statements is called a cursor loop. Generally, OPEN, FETCH, and CLOSE statements are needed in cursor loop. The following describes a", + "product_code":"dws", + "title":"Cursor Loop", + "uri":"dws_04_0549.html", + "doc_type":"devg", + "p_code":"307", + "code":"311" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Advanced Packages", + "uri":"dws_04_0550.html", + "doc_type":"devg", + "p_code":"274", + "code":"312" + }, + { + "desc":"Table 1 provides all interfaces supported by the DBMS_LOB package.DBMS_LOB.GETLENGTHSpecifies the length of a LOB type object obtained and returned by the stored procedur", + "product_code":"dws", + "title":"DBMS_LOB", + "uri":"dws_04_0551.html", + "doc_type":"devg", + "p_code":"312", + "code":"313" + }, + { + "desc":"Table 1 provides all interfaces supported by the DBMS_RANDOM package.DBMS_RANDOM.SEEDThe stored procedure SEED is used to set a seed for a random number. The DBMS_RANDOM.", + "product_code":"dws", + "title":"DBMS_RANDOM", + "uri":"dws_04_0552.html", + "doc_type":"devg", + "p_code":"312", + "code":"314" + }, + { + "desc":"Table 1 provides all interfaces supported by the DBMS_OUTPUT package.DBMS_OUTPUT.PUT_LINEThe PUT_LINE procedure writes a row of text carrying a line end symbol in the buf", + "product_code":"dws", + "title":"DBMS_OUTPUT", + "uri":"dws_04_0553.html", + "doc_type":"devg", + "p_code":"312", + "code":"315" + }, + { + "desc":"Table 1 provides all interfaces supported by the UTL_RAW package.The external representation of the RAW type data is hexadecimal and its internal storage form is binary. ", + "product_code":"dws", + "title":"UTL_RAW", + "uri":"dws_04_0554.html", + "doc_type":"devg", + "p_code":"312", + "code":"316" + }, + { + "desc":"Table 1 lists all interfaces supported by the DBMS_JOB package.DBMS_JOB.SUBMITThe stored procedure SUBMIT submits a job provided by the system.A prototype of the DBMS_JOB", + "product_code":"dws", + "title":"DBMS_JOB", + "uri":"dws_04_0555.html", + "doc_type":"devg", + "p_code":"312", + "code":"317" + }, + { + "desc":"Table 1 lists interfaces supported by the DBMS_SQL package.You are advised to use dbms_sql.define_column and dbms_sql.column_value to define columns.If the size of the re", + "product_code":"dws", + "title":"DBMS_SQL", + "uri":"dws_04_0556.html", + "doc_type":"devg", + "p_code":"312", + "code":"318" + }, + { + "desc":"RAISE has the following five syntax formats:Parameter description:The level option is used to specify the error level, that is, DEBUG, LOG, INFO, NOTICE, WARNING, or EXCE", + "product_code":"dws", + "title":"Debugging", + "uri":"dws_04_0558.html", + "doc_type":"devg", + "p_code":"274", + "code":"319" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"System Catalogs and System Views", + "uri":"dws_04_0559.html", + "doc_type":"devg", + "p_code":"1", + "code":"320" + }, + { + "desc":"System catalogs are used by GaussDB(DWS) to store structure metadata. They are a core component the GaussDB(DWS) database system and provide control information for the d", + "product_code":"dws", + "title":"Overview of System Catalogs and System Views", + "uri":"dws_04_0560.html", + "doc_type":"devg", + "p_code":"320", + "code":"321" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"System Catalogs", + "uri":"dws_04_0561.html", + "doc_type":"devg", + "p_code":"320", + "code":"322" + }, + { + "desc":"GS_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table", + "product_code":"dws", + "title":"GS_OBSSCANINFO", + "uri":"dws_04_0562.html", + "doc_type":"devg", + "p_code":"322", + "code":"323" + }, + { + "desc":"The GS_WLM_INSTANCE_HISTORY system catalog stores information about resource usage related to CN or DN instances. Each record in the system table indicates the resource u", + "product_code":"dws", + "title":"GS_WLM_INSTANCE_HISTORY", + "uri":"dws_04_0564.html", + "doc_type":"devg", + "p_code":"322", + "code":"324" + }, + { + "desc":"GS_WLM_OPERATOR_INFO records operators of completed jobs. The data is dumped from the kernel to a system catalog.This system catalog's schema is dbms_om.This system catal", + "product_code":"dws", + "title":"GS_WLM_OPERATOR_INFO", + "uri":"dws_04_0565.html", + "doc_type":"devg", + "p_code":"322", + "code":"325" + }, + { + "desc":"GS_WLM_SESSION_INFO records load management information about a completed job executed on all CNs. The data is dumped from the kernel to a system catalog.This system cata", + "product_code":"dws", + "title":"GS_WLM_SESSION_INFO", + "uri":"dws_04_0566.html", + "doc_type":"devg", + "p_code":"322", + "code":"326" + }, + { + "desc":"The GS_WLM_USER_RESOURCE_HISTORY system table stores information about resources used by users and is valid only on CNs. Each record in the system table indicates the res", + "product_code":"dws", + "title":"GS_WLM_USER_RESOURCE_HISTORY", + "uri":"dws_04_0567.html", + "doc_type":"devg", + "p_code":"322", + "code":"327" + }, + { + "desc":"pg_aggregate records information about aggregation functions. Each entry in pg_aggregate is an extension of an entry in pg_proc. The pg_proc entry carries the aggregate's", + "product_code":"dws", + "title":"PG_AGGREGATE", + "uri":"dws_04_0568.html", + "doc_type":"devg", + "p_code":"322", + "code":"328" + }, + { + "desc":"PG_AM records information about index access methods. There is one row for each index access method supported by the system.", + "product_code":"dws", + "title":"PG_AM", + "uri":"dws_04_0569.html", + "doc_type":"devg", + "p_code":"322", + "code":"329" + }, + { + "desc":"PG_AMOP records information about operators associated with access method operator families. There is one row for each operator that is a member of an operator family. A ", + "product_code":"dws", + "title":"PG_AMOP", + "uri":"dws_04_0570.html", + "doc_type":"devg", + "p_code":"322", + "code":"330" + }, + { + "desc":"PG_AMPROC records information about the support procedures associated with the access method operator families. There is one row for each support procedure belonging to a", + "product_code":"dws", + "title":"PG_AMPROC", + "uri":"dws_04_0571.html", + "doc_type":"devg", + "p_code":"322", + "code":"331" + }, + { + "desc":"PG_ATTRDEF stores default values of columns.", + "product_code":"dws", + "title":"PG_ATTRDEF", + "uri":"dws_04_0572.html", + "doc_type":"devg", + "p_code":"322", + "code":"332" + }, + { + "desc":"PG_ATTRIBUTE records information about table columns.", + "product_code":"dws", + "title":"PG_ATTRIBUTE", + "uri":"dws_04_0573.html", + "doc_type":"devg", + "p_code":"322", + "code":"333" + }, + { + "desc":"PG_AUTHID records information about the database authentication identifiers (roles). The concept of users is contained in that of roles. A user is actually a role whose r", + "product_code":"dws", + "title":"PG_AUTHID", + "uri":"dws_04_0574.html", + "doc_type":"devg", + "p_code":"322", + "code":"334" + }, + { + "desc":"PG_AUTH_HISTORY records the authentication history of the role. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"PG_AUTH_HISTORY", + "uri":"dws_04_0575.html", + "doc_type":"devg", + "p_code":"322", + "code":"335" + }, + { + "desc":"PG_AUTH_MEMBERS records the membership relations between roles.", + "product_code":"dws", + "title":"PG_AUTH_MEMBERS", + "uri":"dws_04_0576.html", + "doc_type":"devg", + "p_code":"322", + "code":"336" + }, + { + "desc":"PG_CAST records conversion relationships between data types.", + "product_code":"dws", + "title":"PG_CAST", + "uri":"dws_04_0577.html", + "doc_type":"devg", + "p_code":"322", + "code":"337" + }, + { + "desc":"PG_CLASS records database objects and their relations.View the OID and relfilenode of a table.Count row-store tables.Count column-store tables.", + "product_code":"dws", + "title":"PG_CLASS", + "uri":"dws_04_0578.html", + "doc_type":"devg", + "p_code":"322", + "code":"338" + }, + { + "desc":"PG_COLLATION records the available collations, which are essentially mappings from an SQL name to operating system locale categories.", + "product_code":"dws", + "title":"PG_COLLATION", + "uri":"dws_04_0579.html", + "doc_type":"devg", + "p_code":"322", + "code":"339" + }, + { + "desc":"PG_CONSTRAINT records check, primary key, unique, and foreign key constraints on the tables.consrc is not updated when referenced objects change; for example, it will not", + "product_code":"dws", + "title":"PG_CONSTRAINT", + "uri":"dws_04_0580.html", + "doc_type":"devg", + "p_code":"322", + "code":"340" + }, + { + "desc":"PG_CONVERSION records encoding conversion information.", + "product_code":"dws", + "title":"PG_CONVERSION", + "uri":"dws_04_0581.html", + "doc_type":"devg", + "p_code":"322", + "code":"341" + }, + { + "desc":"PG_DATABASE records information about the available databases.", + "product_code":"dws", + "title":"PG_DATABASE", + "uri":"dws_04_0582.html", + "doc_type":"devg", + "p_code":"322", + "code":"342" + }, + { + "desc":"PG_DB_ROLE_SETTING records the default values of configuration items bonded to each role and database when the database is running.", + "product_code":"dws", + "title":"PG_DB_ROLE_SETTING", + "uri":"dws_04_0583.html", + "doc_type":"devg", + "p_code":"322", + "code":"343" + }, + { + "desc":"PG_DEFAULT_ACL records the initial privileges assigned to the newly created objects.Run the following command to view the initial permissions of the new user role1:You ca", + "product_code":"dws", + "title":"PG_DEFAULT_ACL", + "uri":"dws_04_0584.html", + "doc_type":"devg", + "p_code":"322", + "code":"344" + }, + { + "desc":"PG_DEPEND records the dependency relationships between database objects. This information allows DROP commands to find which other objects must be dropped by DROP CASCADE", + "product_code":"dws", + "title":"PG_DEPEND", + "uri":"dws_04_0585.html", + "doc_type":"devg", + "p_code":"322", + "code":"345" + }, + { + "desc":"PG_DESCRIPTION records optional descriptions (comments) for each database object. Descriptions of many built-in system objects are provided in the initial contents of PG_", + "product_code":"dws", + "title":"PG_DESCRIPTION", + "uri":"dws_04_0586.html", + "doc_type":"devg", + "p_code":"322", + "code":"346" + }, + { + "desc":"PG_ENUM records entries showing the values and labels for each enum type. The internal representation of a given enum value is actually the OID of its associated row in p", + "product_code":"dws", + "title":"PG_ENUM", + "uri":"dws_04_0588.html", + "doc_type":"devg", + "p_code":"322", + "code":"347" + }, + { + "desc":"PG_EXTENSION records information about the installed extensions. By default, GaussDB(DWS) has 12 extensions, that is, PLPGSQL, DIST_FDW, FILE_FDW, HDFS_FDW, HSTORE, PLDBG", + "product_code":"dws", + "title":"PG_EXTENSION", + "uri":"dws_04_0589.html", + "doc_type":"devg", + "p_code":"322", + "code":"348" + }, + { + "desc":"PG_EXTENSION_DATA_SOURCE records information about external data source. An external data source contains information about an external database, such as its password enc", + "product_code":"dws", + "title":"PG_EXTENSION_DATA_SOURCE", + "uri":"dws_04_0590.html", + "doc_type":"devg", + "p_code":"322", + "code":"349" + }, + { + "desc":"PG_FOREIGN_DATA_WRAPPER records foreign-data wrapper definitions. A foreign-data wrapper is the mechanism by which external data, residing on foreign servers, is accessed", + "product_code":"dws", + "title":"PG_FOREIGN_DATA_WRAPPER", + "uri":"dws_04_0591.html", + "doc_type":"devg", + "p_code":"322", + "code":"350" + }, + { + "desc":"PG_FOREIGN_SERVER records the foreign server definitions. A foreign server describes a source of external data, such as a remote server. Foreign servers are accessed via ", + "product_code":"dws", + "title":"PG_FOREIGN_SERVER", + "uri":"dws_04_0592.html", + "doc_type":"devg", + "p_code":"322", + "code":"351" + }, + { + "desc":"PG_FOREIGN_TABLE records auxiliary information about foreign tables.", + "product_code":"dws", + "title":"PG_FOREIGN_TABLE", + "uri":"dws_04_0593.html", + "doc_type":"devg", + "p_code":"322", + "code":"352" + }, + { + "desc":"PG_INDEX records part of the information about indexes. The rest is mostly in PG_CLASS.", + "product_code":"dws", + "title":"PG_INDEX", + "uri":"dws_04_0594.html", + "doc_type":"devg", + "p_code":"322", + "code":"353" + }, + { + "desc":"PG_INHERITS records information about table inheritance hierarchies. There is one entry for each direct child table in the database. Indirect inheritance can be determine", + "product_code":"dws", + "title":"PG_INHERITS", + "uri":"dws_04_0595.html", + "doc_type":"devg", + "p_code":"322", + "code":"354" + }, + { + "desc":"PG_JOBS records detailed information about jobs created by users. Dedicated threads poll the pg_jobs table and trigger jobs based on scheduled job execution time. This ta", + "product_code":"dws", + "title":"PG_JOBS", + "uri":"dws_04_0596.html", + "doc_type":"devg", + "p_code":"322", + "code":"355" + }, + { + "desc":"PG_LANGUAGE records programming languages. You can use them and interfaces to write functions or stored procedures.", + "product_code":"dws", + "title":"PG_LANGUAGE", + "uri":"dws_04_0597.html", + "doc_type":"devg", + "p_code":"322", + "code":"356" + }, + { + "desc":"PG_LARGEOBJECT records the data making up large objects A large object is identified by an OID assigned when it is created. Each large object is broken into segments or \"", + "product_code":"dws", + "title":"PG_LARGEOBJECT", + "uri":"dws_04_0598.html", + "doc_type":"devg", + "p_code":"322", + "code":"357" + }, + { + "desc":"PG_LARGEOBJECT_METADATA records metadata associated with large objects. The actual large object data is stored in PG_LARGEOBJECT.", + "product_code":"dws", + "title":"PG_LARGEOBJECT_METADATA", + "uri":"dws_04_0599.html", + "doc_type":"devg", + "p_code":"322", + "code":"358" + }, + { + "desc":"PG_NAMESPACE records the namespaces, that is, schema-related information.", + "product_code":"dws", + "title":"PG_NAMESPACE", + "uri":"dws_04_0600.html", + "doc_type":"devg", + "p_code":"322", + "code":"359" + }, + { + "desc":"PG_OBJECT records the user creation, creation time, last modification time, and last analyzing time of objects of specified types (types existing in object_type).Only nor", + "product_code":"dws", + "title":"PG_OBJECT", + "uri":"dws_04_0601.html", + "doc_type":"devg", + "p_code":"322", + "code":"360" + }, + { + "desc":"PG_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table", + "product_code":"dws", + "title":"PG_OBSSCANINFO", + "uri":"dws_04_0602.html", + "doc_type":"devg", + "p_code":"322", + "code":"361" + }, + { + "desc":"PG_OPCLASS defines index access method operator classes.Each operator class defines semantics for index columns of a particular data type and a particular index access me", + "product_code":"dws", + "title":"PG_OPCLASS", + "uri":"dws_04_0603.html", + "doc_type":"devg", + "p_code":"322", + "code":"362" + }, + { + "desc":"PG_OPERATOR records information about operators.", + "product_code":"dws", + "title":"PG_OPERATOR", + "uri":"dws_04_0604.html", + "doc_type":"devg", + "p_code":"322", + "code":"363" + }, + { + "desc":"PG_OPFAMILY defines operator families.Each operator family is a collection of operators and associated support routines that implement the semantics specified for a parti", + "product_code":"dws", + "title":"PG_OPFAMILY", + "uri":"dws_04_0605.html", + "doc_type":"devg", + "p_code":"322", + "code":"364" + }, + { + "desc":"PG_PARTITION records all partitioned tables, table partitions, toast tables on table partitions, and index partitions in the database. Partitioned index information is no", + "product_code":"dws", + "title":"PG_PARTITION", + "uri":"dws_04_0606.html", + "doc_type":"devg", + "p_code":"322", + "code":"365" + }, + { + "desc":"PG_PLTEMPLATE records template information for procedural languages.", + "product_code":"dws", + "title":"PG_PLTEMPLATE", + "uri":"dws_04_0607.html", + "doc_type":"devg", + "p_code":"322", + "code":"366" + }, + { + "desc":"PG_PROC records information about functions or procedures.Query the OID of a specified function. For example, obtain the OID 1295 of the justify_days function.Query wheth", + "product_code":"dws", + "title":"PG_PROC", + "uri":"dws_04_0608.html", + "doc_type":"devg", + "p_code":"322", + "code":"367" + }, + { + "desc":"PG_RANGE records information about range types.This is in addition to the types' entries in PG_TYPE.rngsubopc (plus rngcollation, if the element type is collatable) deter", + "product_code":"dws", + "title":"PG_RANGE", + "uri":"dws_04_0609.html", + "doc_type":"devg", + "p_code":"322", + "code":"368" + }, + { + "desc":"PG_REDACTION_COLUMN records the information about the redacted columns.", + "product_code":"dws", + "title":"PG_REDACTION_COLUMN", + "uri":"dws_04_0610.html", + "doc_type":"devg", + "p_code":"322", + "code":"369" + }, + { + "desc":"PG_REDACTION_POLICY records information about the object to be redacted.", + "product_code":"dws", + "title":"PG_REDACTION_POLICY", + "uri":"dws_04_0611.html", + "doc_type":"devg", + "p_code":"322", + "code":"370" + }, + { + "desc":"PG_RLSPOLICY displays the information about row-level access control policies.", + "product_code":"dws", + "title":"PG_RLSPOLICY", + "uri":"dws_04_0612.html", + "doc_type":"devg", + "p_code":"322", + "code":"371" + }, + { + "desc":"PG_RESOURCE_POOL records the information about database resource pool.", + "product_code":"dws", + "title":"PG_RESOURCE_POOL", + "uri":"dws_04_0613.html", + "doc_type":"devg", + "p_code":"322", + "code":"372" + }, + { + "desc":"PG_REWRITE records rewrite rules defined for tables and views.", + "product_code":"dws", + "title":"PG_REWRITE", + "uri":"dws_04_0614.html", + "doc_type":"devg", + "p_code":"322", + "code":"373" + }, + { + "desc":"PG_SECLABEL records security labels on database objects.See also PG_SHSECLABEL, which performs a similar function for security labels of database objects that are shared ", + "product_code":"dws", + "title":"PG_SECLABEL", + "uri":"dws_04_0615.html", + "doc_type":"devg", + "p_code":"322", + "code":"374" + }, + { + "desc":"PG_SHDEPEND records the dependency relationships between database objects and shared objects, such as roles. This information allows GaussDB(DWS) to ensure that those obj", + "product_code":"dws", + "title":"PG_SHDEPEND", + "uri":"dws_04_0616.html", + "doc_type":"devg", + "p_code":"322", + "code":"375" + }, + { + "desc":"PG_SHDESCRIPTION records optional comments for shared database objects. Descriptions can be manipulated with the COMMENT command and viewed with psql's \\d commands.See al", + "product_code":"dws", + "title":"PG_SHDESCRIPTION", + "uri":"dws_04_0617.html", + "doc_type":"devg", + "p_code":"322", + "code":"376" + }, + { + "desc":"PG_SHSECLABEL records security labels on shared database objects. Security labels can be manipulated with the SECURITY LABEL command.For an easier way to view security la", + "product_code":"dws", + "title":"PG_SHSECLABEL", + "uri":"dws_04_0618.html", + "doc_type":"devg", + "p_code":"322", + "code":"377" + }, + { + "desc":"PG_STATISTIC records statistics about tables and index columns in a database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"PG_STATISTIC", + "uri":"dws_04_0619.html", + "doc_type":"devg", + "p_code":"322", + "code":"378" + }, + { + "desc":"PG_STATISTIC_EXT records the extended statistics of tables in a database, such as statistics of multiple columns. Statistics of expressions will be supported later. You c", + "product_code":"dws", + "title":"PG_STATISTIC_EXT", + "uri":"dws_04_0620.html", + "doc_type":"devg", + "p_code":"322", + "code":"379" + }, + { + "desc":"PG_SYNONYM records the mapping between synonym object names and other database object names.", + "product_code":"dws", + "title":"PG_SYNONYM", + "uri":"dws_04_0621.html", + "doc_type":"devg", + "p_code":"322", + "code":"380" + }, + { + "desc":"PG_TABLESPACE records tablespace information.", + "product_code":"dws", + "title":"PG_TABLESPACE", + "uri":"dws_04_0622.html", + "doc_type":"devg", + "p_code":"322", + "code":"381" + }, + { + "desc":"PG_TRIGGER records the trigger information.", + "product_code":"dws", + "title":"PG_TRIGGER", + "uri":"dws_04_0623.html", + "doc_type":"devg", + "p_code":"322", + "code":"382" + }, + { + "desc":"PG_TS_CONFIG records entries representing text search configurations. A configuration specifies a particular text search parser and a list of dictionaries to use for each", + "product_code":"dws", + "title":"PG_TS_CONFIG", + "uri":"dws_04_0624.html", + "doc_type":"devg", + "p_code":"322", + "code":"383" + }, + { + "desc":"PG_TS_CONFIG_MAP records entries showing which text search dictionaries should be consulted, and in what order, for each output token type of each text search configurati", + "product_code":"dws", + "title":"PG_TS_CONFIG_MAP", + "uri":"dws_04_0625.html", + "doc_type":"devg", + "p_code":"322", + "code":"384" + }, + { + "desc":"PG_TS_DICT records entries that define text search dictionaries. A dictionary depends on a text search template, which specifies all the implementation functions needed. ", + "product_code":"dws", + "title":"PG_TS_DICT", + "uri":"dws_04_0626.html", + "doc_type":"devg", + "p_code":"322", + "code":"385" + }, + { + "desc":"PG_TS_PARSER records entries defining text search parsers. A parser splits input text into lexemes and assigns a token type to each lexeme. Since a parser must be impleme", + "product_code":"dws", + "title":"PG_TS_PARSER", + "uri":"dws_04_0627.html", + "doc_type":"devg", + "p_code":"322", + "code":"386" + }, + { + "desc":"PG_TS_TEMPLATE records entries defining text search templates. A template provides a framework for text search dictionaries. Since a template must be implemented by C fun", + "product_code":"dws", + "title":"PG_TS_TEMPLATE", + "uri":"dws_04_0628.html", + "doc_type":"devg", + "p_code":"322", + "code":"387" + }, + { + "desc":"PG_TYPE records the information about data types.", + "product_code":"dws", + "title":"PG_TYPE", + "uri":"dws_04_0629.html", + "doc_type":"devg", + "p_code":"322", + "code":"388" + }, + { + "desc":"PG_USER_MAPPING records the mappings from local users to remote.It is accessible only to users with system administrator rights. You can use view PG_USER_MAPPINGS to quer", + "product_code":"dws", + "title":"PG_USER_MAPPING", + "uri":"dws_04_0630.html", + "doc_type":"devg", + "p_code":"322", + "code":"389" + }, + { + "desc":"PG_USER_STATUS records the states of users that access to the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"PG_USER_STATUS", + "uri":"dws_04_0631.html", + "doc_type":"devg", + "p_code":"322", + "code":"390" + }, + { + "desc":"PG_WORKLOAD_ACTION records information about query_band.", + "product_code":"dws", + "title":"PG_WORKLOAD_ACTION", + "uri":"dws_04_0632.html", + "doc_type":"devg", + "p_code":"322", + "code":"391" + }, + { + "desc":"PGXC_CLASS records the replicated or distributed information for each table.", + "product_code":"dws", + "title":"PGXC_CLASS", + "uri":"dws_04_0633.html", + "doc_type":"devg", + "p_code":"322", + "code":"392" + }, + { + "desc":"PGXC_GROUP records information about node groups.", + "product_code":"dws", + "title":"PGXC_GROUP", + "uri":"dws_04_0634.html", + "doc_type":"devg", + "p_code":"322", + "code":"393" + }, + { + "desc":"PGXC_NODE records information about cluster nodes.Query the CN and DN information of the cluster:", + "product_code":"dws", + "title":"PGXC_NODE", + "uri":"dws_04_0635.html", + "doc_type":"devg", + "p_code":"322", + "code":"394" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"System Views", + "uri":"dws_04_0639.html", + "doc_type":"devg", + "p_code":"320", + "code":"395" + }, + { + "desc":"ALL_ALL_TABLES displays the tables or views accessible to the current user.", + "product_code":"dws", + "title":"ALL_ALL_TABLES", + "uri":"dws_04_0640.html", + "doc_type":"devg", + "p_code":"395", + "code":"396" + }, + { + "desc":"ALL_CONSTRAINTS displays information about constraints accessible to the current user.", + "product_code":"dws", + "title":"ALL_CONSTRAINTS", + "uri":"dws_04_0641.html", + "doc_type":"devg", + "p_code":"395", + "code":"397" + }, + { + "desc":"ALL_CONS_COLUMNS displays information about constraint columns accessible to the current user.", + "product_code":"dws", + "title":"ALL_CONS_COLUMNS", + "uri":"dws_04_0642.html", + "doc_type":"devg", + "p_code":"395", + "code":"398" + }, + { + "desc":"ALL_COL_COMMENTS displays the comment information about table columns accessible to the current user.", + "product_code":"dws", + "title":"ALL_COL_COMMENTS", + "uri":"dws_04_0643.html", + "doc_type":"devg", + "p_code":"395", + "code":"399" + }, + { + "desc":"ALL_DEPENDENCIES displays dependencies between functions and advanced packages accessible to the current user.Currently in GaussDB(DWS), this table is empty without any r", + "product_code":"dws", + "title":"ALL_DEPENDENCIES", + "uri":"dws_04_0644.html", + "doc_type":"devg", + "p_code":"395", + "code":"400" + }, + { + "desc":"ALL_IND_COLUMNS displays all index columns accessible to the current user.", + "product_code":"dws", + "title":"ALL_IND_COLUMNS", + "uri":"dws_04_0645.html", + "doc_type":"devg", + "p_code":"395", + "code":"401" + }, + { + "desc":"ALL_IND_EXPRESSIONS displays information about the expression indexes accessible to the current user.", + "product_code":"dws", + "title":"ALL_IND_EXPRESSIONS", + "uri":"dws_04_0646.html", + "doc_type":"devg", + "p_code":"395", + "code":"402" + }, + { + "desc":"ALL_INDEXES displays information about indexes accessible to the current user.", + "product_code":"dws", + "title":"ALL_INDEXES", + "uri":"dws_04_0647.html", + "doc_type":"devg", + "p_code":"395", + "code":"403" + }, + { + "desc":"ALL_OBJECTS displays all database objects accessible to the current user.For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.", + "product_code":"dws", + "title":"ALL_OBJECTS", + "uri":"dws_04_0648.html", + "doc_type":"devg", + "p_code":"395", + "code":"404" + }, + { + "desc":"ALL_PROCEDURES displays information about all stored procedures or functions accessible to the current user.", + "product_code":"dws", + "title":"ALL_PROCEDURES", + "uri":"dws_04_0649.html", + "doc_type":"devg", + "p_code":"395", + "code":"405" + }, + { + "desc":"ALL_SEQUENCES displays all sequences accessible to the current user.", + "product_code":"dws", + "title":"ALL_SEQUENCES", + "uri":"dws_04_0650.html", + "doc_type":"devg", + "p_code":"395", + "code":"406" + }, + { + "desc":"ALL_SOURCE displays information about stored procedures or functions accessible to the current user, and provides the columns defined by the stored procedures and functio", + "product_code":"dws", + "title":"ALL_SOURCE", + "uri":"dws_04_0651.html", + "doc_type":"devg", + "p_code":"395", + "code":"407" + }, + { + "desc":"ALL_SYNONYMS displays all synonyms accessible to the current user.", + "product_code":"dws", + "title":"ALL_SYNONYMS", + "uri":"dws_04_0652.html", + "doc_type":"devg", + "p_code":"395", + "code":"408" + }, + { + "desc":"ALL_TAB_COLUMNS displays description information about columns of the tables accessible to the current user.", + "product_code":"dws", + "title":"ALL_TAB_COLUMNS", + "uri":"dws_04_0653.html", + "doc_type":"devg", + "p_code":"395", + "code":"409" + }, + { + "desc":"ALL_TAB_COMMENTS displays comments about all tables and views accessible to the current user.", + "product_code":"dws", + "title":"ALL_TAB_COMMENTS", + "uri":"dws_04_0654.html", + "doc_type":"devg", + "p_code":"395", + "code":"410" + }, + { + "desc":"ALL_TABLES displays all the tables accessible to the current user.", + "product_code":"dws", + "title":"ALL_TABLES", + "uri":"dws_04_0655.html", + "doc_type":"devg", + "p_code":"395", + "code":"411" + }, + { + "desc":"ALL_USERS displays all users of the database visible to the current user, however, it does not describe the users.", + "product_code":"dws", + "title":"ALL_USERS", + "uri":"dws_04_0656.html", + "doc_type":"devg", + "p_code":"395", + "code":"412" + }, + { + "desc":"ALL_VIEWS displays the description about all views accessible to the current user.", + "product_code":"dws", + "title":"ALL_VIEWS", + "uri":"dws_04_0657.html", + "doc_type":"devg", + "p_code":"395", + "code":"413" + }, + { + "desc":"DBA_DATA_FILES displays the description of database files. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_DATA_FILES", + "uri":"dws_04_0658.html", + "doc_type":"devg", + "p_code":"395", + "code":"414" + }, + { + "desc":"DBA_USERS displays all user names in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_USERS", + "uri":"dws_04_0659.html", + "doc_type":"devg", + "p_code":"395", + "code":"415" + }, + { + "desc":"DBA_COL_COMMENTS displays information about table colum comments in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_COL_COMMENTS", + "uri":"dws_04_0660.html", + "doc_type":"devg", + "p_code":"395", + "code":"416" + }, + { + "desc":"DBA_CONSTRAINTS displays information about table constraints in database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_CONSTRAINTS", + "uri":"dws_04_0661.html", + "doc_type":"devg", + "p_code":"395", + "code":"417" + }, + { + "desc":"DBA_CONS_COLUMNS displays information about constraint columns in database tables. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_CONS_COLUMNS", + "uri":"dws_04_0662.html", + "doc_type":"devg", + "p_code":"395", + "code":"418" + }, + { + "desc":"DBA_IND_COLUMNS displays column information about all indexes in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_IND_COLUMNS", + "uri":"dws_04_0663.html", + "doc_type":"devg", + "p_code":"395", + "code":"419" + }, + { + "desc":"DBA_IND_EXPRESSIONS displays the information about expression indexes in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_IND_EXPRESSIONS", + "uri":"dws_04_0664.html", + "doc_type":"devg", + "p_code":"395", + "code":"420" + }, + { + "desc":"DBA_IND_PARTITIONS displays information about all index partitions in the database. Each index partition of a partitioned table in the database, if present, has a row of ", + "product_code":"dws", + "title":"DBA_IND_PARTITIONS", + "uri":"dws_04_0665.html", + "doc_type":"devg", + "p_code":"395", + "code":"421" + }, + { + "desc":"DBA_INDEXES displays all indexes in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_INDEXES", + "uri":"dws_04_0666.html", + "doc_type":"devg", + "p_code":"395", + "code":"422" + }, + { + "desc":"DBA_OBJECTS displays all database objects in the database. It is accessible only to users with system administrator rights.For details about the value ranges of last_ddl_", + "product_code":"dws", + "title":"DBA_OBJECTS", + "uri":"dws_04_0667.html", + "doc_type":"devg", + "p_code":"395", + "code":"423" + }, + { + "desc":"DBA_PART_INDEXES displays information about all partitioned table indexes in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_PART_INDEXES", + "uri":"dws_04_0668.html", + "doc_type":"devg", + "p_code":"395", + "code":"424" + }, + { + "desc":"DBA_PART_TABLES displays information about all partitioned tables in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_PART_TABLES", + "uri":"dws_04_0669.html", + "doc_type":"devg", + "p_code":"395", + "code":"425" + }, + { + "desc":"DBA_PROCEDURES displays information about all stored procedures and functions in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_PROCEDURES", + "uri":"dws_04_0670.html", + "doc_type":"devg", + "p_code":"395", + "code":"426" + }, + { + "desc":"DBA_SEQUENCES displays information about all sequences in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_SEQUENCES", + "uri":"dws_04_0671.html", + "doc_type":"devg", + "p_code":"395", + "code":"427" + }, + { + "desc":"DBA_SOURCE displays all stored procedures or functions in the database, and it provides the columns defined by the stored procedures or functions. It is accessible only t", + "product_code":"dws", + "title":"DBA_SOURCE", + "uri":"dws_04_0672.html", + "doc_type":"devg", + "p_code":"395", + "code":"428" + }, + { + "desc":"DBA_SYNONYMS displays all synonyms in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_SYNONYMS", + "uri":"dws_04_0673.html", + "doc_type":"devg", + "p_code":"395", + "code":"429" + }, + { + "desc":"DBA_TAB_COLUMNS displays the columns of tables. Each column of a table in the database has a row in DBA_TAB_COLUMNS. It is accessible only to users with system administra", + "product_code":"dws", + "title":"DBA_TAB_COLUMNS", + "uri":"dws_04_0674.html", + "doc_type":"devg", + "p_code":"395", + "code":"430" + }, + { + "desc":"DBA_TAB_COMMENTS displays comments about all tables and views in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_TAB_COMMENTS", + "uri":"dws_04_0675.html", + "doc_type":"devg", + "p_code":"395", + "code":"431" + }, + { + "desc":"DBA_TAB_PARTITIONS displays information about all partitions in the database.", + "product_code":"dws", + "title":"DBA_TAB_PARTITIONS", + "uri":"dws_04_0676.html", + "doc_type":"devg", + "p_code":"395", + "code":"432" + }, + { + "desc":"DBA_TABLES displays all tables in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_TABLES", + "uri":"dws_04_0677.html", + "doc_type":"devg", + "p_code":"395", + "code":"433" + }, + { + "desc":"DBA_TABLESPACES displays information about available tablespaces. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_TABLESPACES", + "uri":"dws_04_0678.html", + "doc_type":"devg", + "p_code":"395", + "code":"434" + }, + { + "desc":"DBA_TRIGGERS displays information about triggers in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_TRIGGERS", + "uri":"dws_04_0679.html", + "doc_type":"devg", + "p_code":"395", + "code":"435" + }, + { + "desc":"DBA_VIEWS displays views in the database. It is accessible only to users with system administrator rights.", + "product_code":"dws", + "title":"DBA_VIEWS", + "uri":"dws_04_0680.html", + "doc_type":"devg", + "p_code":"395", + "code":"436" + }, + { + "desc":"DUAL is automatically created by the database based on the data dictionary. It has only one text column in only one row for storing expression calculation results. It is ", + "product_code":"dws", + "title":"DUAL", + "uri":"dws_04_0681.html", + "doc_type":"devg", + "p_code":"395", + "code":"437" + }, + { + "desc":"GLOBAL_REDO_STAT displays the total statistics of XLOG redo operations on all nodes in a cluster. Except the avgiotim column (indicating the average redo write time of al", + "product_code":"dws", + "title":"GLOBAL_REDO_STAT", + "uri":"dws_04_0682.html", + "doc_type":"devg", + "p_code":"395", + "code":"438" + }, + { + "desc":"GLOBAL_REL_IOSTAT displays the total disk I/O statistics of all nodes in a cluster. The name of each column in this view is the same as that in the GS_REL_IOSTAT view, bu", + "product_code":"dws", + "title":"GLOBAL_REL_IOSTAT", + "uri":"dws_04_0683.html", + "doc_type":"devg", + "p_code":"395", + "code":"439" + }, + { + "desc":"GLOBAL_STAT_DATABASE displays the status and statistics of databases on all nodes in a cluster.When you query the GLOBAL_STAT_DATABASE view on a CN, the respective values", + "product_code":"dws", + "title":"GLOBAL_STAT_DATABASE", + "uri":"dws_04_0684.html", + "doc_type":"devg", + "p_code":"395", + "code":"440" + }, + { + "desc":"GLOBAL_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in all workload Cgroups in a cluster, including the number of SELECT, UPDATE, INSER", + "product_code":"dws", + "title":"GLOBAL_WORKLOAD_SQL_COUNT", + "uri":"dws_04_0685.html", + "doc_type":"devg", + "p_code":"395", + "code":"441" + }, + { + "desc":"GLOBAL_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in all workload Cgroups in a cluster, including the maximum, minimum, average, ", + "product_code":"dws", + "title":"GLOBAL_WORKLOAD_SQL_ELAPSE_TIME", + "uri":"dws_04_0686.html", + "doc_type":"devg", + "p_code":"395", + "code":"442" + }, + { + "desc":"GLOBAL_WORKLOAD_TRANSACTION provides the total transaction information about workload Cgroups on all CNs in the cluster. This view is accessible only to users with system", + "product_code":"dws", + "title":"GLOBAL_WORKLOAD_TRANSACTION", + "uri":"dws_04_0687.html", + "doc_type":"devg", + "p_code":"395", + "code":"443" + }, + { + "desc":"GS_ALL_CONTROL_GROUP_INFO displays all Cgroup information in a database.", + "product_code":"dws", + "title":"GS_ALL_CONTROL_GROUP_INFO", + "uri":"dws_04_0688.html", + "doc_type":"devg", + "p_code":"395", + "code":"444" + }, + { + "desc":"GS_CLUSTER_RESOURCE_INFO displays a DN resource summary.", + "product_code":"dws", + "title":"GS_CLUSTER_RESOURCE_INFO", + "uri":"dws_04_0689.html", + "doc_type":"devg", + "p_code":"395", + "code":"445" + }, + { + "desc":"The database parses each received SQL text string and generates an internal parsing tree. The database traverses the parsing tree and ignores constant values in the parsi", + "product_code":"dws", + "title":"GS_INSTR_UNIQUE_SQL", + "uri":"dws_04_0690.html", + "doc_type":"devg", + "p_code":"395", + "code":"446" + }, + { + "desc":"GS_REL_IOSTAT displays disk I/O statistics on the current node. In the current version, only one page is read or written in each read or write operation. Therefore, the n", + "product_code":"dws", + "title":"GS_REL_IOSTAT", + "uri":"dws_04_0691.html", + "doc_type":"devg", + "p_code":"395", + "code":"447" + }, + { + "desc":"The GS_NODE_STAT_RESET_TIME view provides the reset time of statistics on the current node and returns the timestamp with the time zone. For details, see the get_node_sta", + "product_code":"dws", + "title":"GS_NODE_STAT_RESET_TIME", + "uri":"dws_04_0692.html", + "doc_type":"devg", + "p_code":"395", + "code":"448" + }, + { + "desc":"GS_SESSION_CPU_STATISTICS displays load management information about CPU usage of ongoing complex jobs executed by the current user.", + "product_code":"dws", + "title":"GS_SESSION_CPU_STATISTICS", + "uri":"dws_04_0693.html", + "doc_type":"devg", + "p_code":"395", + "code":"449" + }, + { + "desc":"GS_SESSION_MEMORY_STATISTICS displays load management information about memory usage of ongoing complex jobs executed by the current user.", + "product_code":"dws", + "title":"GS_SESSION_MEMORY_STATISTICS", + "uri":"dws_04_0694.html", + "doc_type":"devg", + "p_code":"395", + "code":"450" + }, + { + "desc":"GS_SQL_COUNT displays statistics about the five types of statements (SELECT, INSERT, UPDATE, DELETE, and MERGE INTO) executed on the current node of the database, includi", + "product_code":"dws", + "title":"GS_SQL_COUNT", + "uri":"dws_04_0695.html", + "doc_type":"devg", + "p_code":"395", + "code":"451" + }, + { + "desc":"GS_WAIT_EVENTS displays statistics about waiting status and events on the current node.The values of statistical columns in this view are accumulated only when the enable", + "product_code":"dws", + "title":"GS_WAIT_EVENTS", + "uri":"dws_04_0696.html", + "doc_type":"devg", + "p_code":"395", + "code":"452" + }, + { + "desc":"This view displays the execution information about operators in the query statements that have been executed on the current CN. The information comes from the system cata", + "product_code":"dws", + "title":"GS_WLM_OPERAROR_INFO", + "uri":"dws_04_0701.html", + "doc_type":"devg", + "p_code":"395", + "code":"453" + }, + { + "desc":"This view displays the records of operators in jobs that have been executed by the current user on the current CN.This view is used by Database Manager to query data from", + "product_code":"dws", + "title":"GS_WLM_OPERATOR_HISTORY", + "uri":"dws_04_0702.html", + "doc_type":"devg", + "p_code":"395", + "code":"454" + }, + { + "desc":"GS_WLM_OPERATOR_STATISTICS displays the operators of the jobs that are being executed by the current user.", + "product_code":"dws", + "title":"GS_WLM_OPERATOR_STATISTICS", + "uri":"dws_04_0703.html", + "doc_type":"devg", + "p_code":"395", + "code":"455" + }, + { + "desc":"This view displays the execution information about the query statements that have been executed on the current CN. The information comes from the system catalog dbms_om. ", + "product_code":"dws", + "title":"GS_WLM_SESSION_INFO", + "uri":"dws_04_0704.html", + "doc_type":"devg", + "p_code":"395", + "code":"456" + }, + { + "desc":"GS_WLM_SESSION_HISTORY displays load management information about a completed job executed by the current user on the current CN. This view is used by Database Manager to", + "product_code":"dws", + "title":"GS_WLM_SESSION_HISTORY", + "uri":"dws_04_0705.html", + "doc_type":"devg", + "p_code":"395", + "code":"457" + }, + { + "desc":"GS_WLM_SESSION_STATISTICS displays load management information about jobs being executed by the current user on the current CN.", + "product_code":"dws", + "title":"GS_WLM_SESSION_STATISTICS", + "uri":"dws_04_0706.html", + "doc_type":"devg", + "p_code":"395", + "code":"458" + }, + { + "desc":"GS_WLM_SQL_ALLOW displays the configured resource management SQL whitelist, including the default SQL whitelist and the SQL whitelist configured using the GUC parameter w", + "product_code":"dws", + "title":"GS_WLM_SQL_ALLOW", + "uri":"dws_04_0708.html", + "doc_type":"devg", + "p_code":"395", + "code":"459" + }, + { + "desc":"GS_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on the current node, including the number of SELECT, UPDATE, INSERT", + "product_code":"dws", + "title":"GS_WORKLOAD_SQL_COUNT", + "uri":"dws_04_0709.html", + "doc_type":"devg", + "p_code":"395", + "code":"460" + }, + { + "desc":"GS_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on the current node, including the maximum, minimum, average, a", + "product_code":"dws", + "title":"GS_WORKLOAD_SQL_ELAPSE_TIME", + "uri":"dws_04_0710.html", + "doc_type":"devg", + "p_code":"395", + "code":"461" + }, + { + "desc":"GS_WORKLOAD_TRANSACTION provides transaction information about workload cgroups on a single CN. The database records the number of times that each workload Cgroup commits", + "product_code":"dws", + "title":"GS_WORKLOAD_TRANSACTION", + "uri":"dws_04_0711.html", + "doc_type":"devg", + "p_code":"395", + "code":"462" + }, + { + "desc":"GS_STAT_DB_CU displsys CU hits in a database and in each node in a cluster. You can clear it using gs_stat_reset().", + "product_code":"dws", + "title":"GS_STAT_DB_CU", + "uri":"dws_04_0712.html", + "doc_type":"devg", + "p_code":"395", + "code":"463" + }, + { + "desc":"GS_STAT_SESSION_CU displays the CU hit rate of running sessions on each node in a cluster. This data about a session is cleared when you exit this session or restart the ", + "product_code":"dws", + "title":"GS_STAT_SESSION_CU", + "uri":"dws_04_0713.html", + "doc_type":"devg", + "p_code":"395", + "code":"464" + }, + { + "desc":"GS_TOTAL_NODEGROUP_MEMORY_DETAIL displays statistics about memory usage of the logical cluster that the current database belongs to in the unit of MB.", + "product_code":"dws", + "title":"GS_TOTAL_NODEGROUP_MEMORY_DETAIL", + "uri":"dws_04_0714.html", + "doc_type":"devg", + "p_code":"395", + "code":"465" + }, + { + "desc":"GS_USER_TRANSACTION provides transaction information about users on a single CN. The database records the number of times that each user commits and rolls back transactio", + "product_code":"dws", + "title":"GS_USER_TRANSACTION", + "uri":"dws_04_0715.html", + "doc_type":"devg", + "p_code":"395", + "code":"466" + }, + { + "desc":"GS_VIEW_DEPENDENCY allows you to query the direct dependencies of all views visible to the current user.", + "product_code":"dws", + "title":"GS_VIEW_DEPENDENCY", + "uri":"dws_04_0716.html", + "doc_type":"devg", + "p_code":"395", + "code":"467" + }, + { + "desc":"GS_VIEW_DEPENDENCY_PATH allows you to query the direct dependencies of all views visible to the current user. If the base table on which the view depends exists and the d", + "product_code":"dws", + "title":"GS_VIEW_DEPENDENCY_PATH", + "uri":"dws_04_0948.html", + "doc_type":"devg", + "p_code":"395", + "code":"468" + }, + { + "desc":"GS_VIEW_INVALID queries all unavailable views visible to the current user. If the base table, function, or synonym that the view depends on is abnormal, the validtype col", + "product_code":"dws", + "title":"GS_VIEW_INVALID", + "uri":"dws_04_0717.html", + "doc_type":"devg", + "p_code":"395", + "code":"469" + }, + { + "desc":"MPP_TABLES displays information about tables in PGXC_CLASS.", + "product_code":"dws", + "title":"MPP_TABLES", + "uri":"dws_04_0998.html", + "doc_type":"devg", + "p_code":"395", + "code":"470" + }, + { + "desc":"PG_AVAILABLE_EXTENSION_VERSIONS displays the extension versions of certain database features.", + "product_code":"dws", + "title":"PG_AVAILABLE_EXTENSION_VERSIONS", + "uri":"dws_04_0718.html", + "doc_type":"devg", + "p_code":"395", + "code":"471" + }, + { + "desc":"PG_AVAILABLE_EXTENSIONS displays the extended information about certain database features.", + "product_code":"dws", + "title":"PG_AVAILABLE_EXTENSIONS", + "uri":"dws_04_0719.html", + "doc_type":"devg", + "p_code":"395", + "code":"472" + }, + { + "desc":"On any normal node in a cluster, PG_BULKLOAD_STATISTICS displays the execution status of the import and export services. Each import or export service corresponds to a re", + "product_code":"dws", + "title":"PG_BULKLOAD_STATISTICS", + "uri":"dws_04_0720.html", + "doc_type":"devg", + "p_code":"395", + "code":"473" + }, + { + "desc":"PG_COMM_CLIENT_INFO stores the client connection information of a single node. (You can query this view on a DN to view the information about the connection between the C", + "product_code":"dws", + "title":"PG_COMM_CLIENT_INFO", + "uri":"dws_04_0721.html", + "doc_type":"devg", + "p_code":"395", + "code":"474" + }, + { + "desc":"PG_COMM_DELAY displays the communication library delay status for a single DN.", + "product_code":"dws", + "title":"PG_COMM_DELAY", + "uri":"dws_04_0722.html", + "doc_type":"devg", + "p_code":"395", + "code":"475" + }, + { + "desc":"PG_COMM_STATUS displays the communication library status for a single DN.", + "product_code":"dws", + "title":"PG_COMM_STATUS", + "uri":"dws_04_0723.html", + "doc_type":"devg", + "p_code":"395", + "code":"476" + }, + { + "desc":"PG_COMM_RECV_STREAM displays the receiving stream status of all the communication libraries for a single DN.", + "product_code":"dws", + "title":"PG_COMM_RECV_STREAM", + "uri":"dws_04_0724.html", + "doc_type":"devg", + "p_code":"395", + "code":"477" + }, + { + "desc":"PG_COMM_SEND_STREAM displays the sending stream status of all the communication libraries for a single DN.", + "product_code":"dws", + "title":"PG_COMM_SEND_STREAM", + "uri":"dws_04_0725.html", + "doc_type":"devg", + "p_code":"395", + "code":"478" + }, + { + "desc":"PG_CONTROL_GROUP_CONFIG displays the Cgroup configuration information in the system.", + "product_code":"dws", + "title":"PG_CONTROL_GROUP_CONFIG", + "uri":"dws_04_0726.html", + "doc_type":"devg", + "p_code":"395", + "code":"479" + }, + { + "desc":"PG_CURSORS displays the cursors that are currently available.", + "product_code":"dws", + "title":"PG_CURSORS", + "uri":"dws_04_0727.html", + "doc_type":"devg", + "p_code":"395", + "code":"480" + }, + { + "desc":"PG_EXT_STATS displays extension statistics stored in the PG_STATISTIC_EXT table. The extension statistics means multiple columns of statistics.", + "product_code":"dws", + "title":"PG_EXT_STATS", + "uri":"dws_04_0728.html", + "doc_type":"devg", + "p_code":"395", + "code":"481" + }, + { + "desc":"PG_GET_INVALID_BACKENDS displays the information about backend threads on the CN that are connected to the current standby DN.", + "product_code":"dws", + "title":"PG_GET_INVALID_BACKENDS", + "uri":"dws_04_0729.html", + "doc_type":"devg", + "p_code":"395", + "code":"482" + }, + { + "desc":"PG_GET_SENDERS_CATCHUP_TIME displays the catchup information of the currently active primary/standby instance sending thread on a single DN.", + "product_code":"dws", + "title":"PG_GET_SENDERS_CATCHUP_TIME", + "uri":"dws_04_0730.html", + "doc_type":"devg", + "p_code":"395", + "code":"483" + }, + { + "desc":"PG_GROUP displays the database role authentication and the relationship between roles.", + "product_code":"dws", + "title":"PG_GROUP", + "uri":"dws_04_0731.html", + "doc_type":"devg", + "p_code":"395", + "code":"484" + }, + { + "desc":"PG_INDEXES displays access to useful information about each index in the database.", + "product_code":"dws", + "title":"PG_INDEXES", + "uri":"dws_04_0732.html", + "doc_type":"devg", + "p_code":"395", + "code":"485" + }, + { + "desc":"The PG_JOB view replaces the PG_JOB system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB system catalog is cha", + "product_code":"dws", + "title":"PG_JOB", + "uri":"dws_04_0733.html", + "doc_type":"devg", + "p_code":"395", + "code":"486" + }, + { + "desc":"The PG_JOB_PROC view replaces the PG_JOB_PROC system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB_PROC and PG", + "product_code":"dws", + "title":"PG_JOB_PROC", + "uri":"dws_04_0734.html", + "doc_type":"devg", + "p_code":"395", + "code":"487" + }, + { + "desc":"PG_JOB_SINGLE displays job information about the current node.", + "product_code":"dws", + "title":"PG_JOB_SINGLE", + "uri":"dws_04_0735.html", + "doc_type":"devg", + "p_code":"395", + "code":"488" + }, + { + "desc":"PG_LIFECYCLE_DATA_DISTRIBUTE displays the distribution of cold and hot data in a multi-temperature table of OBS.", + "product_code":"dws", + "title":"PG_LIFECYCLE_DATA_DISTRIBUTE", + "uri":"dws_04_0736.html", + "doc_type":"devg", + "p_code":"395", + "code":"489" + }, + { + "desc":"PG_LOCKS displays information about the locks held by open transactions.", + "product_code":"dws", + "title":"PG_LOCKS", + "uri":"dws_04_0737.html", + "doc_type":"devg", + "p_code":"395", + "code":"490" + }, + { + "desc":"PG_NODE_ENVO displays the environmental variable information about the current node.", + "product_code":"dws", + "title":"PG_NODE_ENV", + "uri":"dws_04_0738.html", + "doc_type":"devg", + "p_code":"395", + "code":"491" + }, + { + "desc":"PG_OS_THREADS displays the status information about all the threads under the current node.", + "product_code":"dws", + "title":"PG_OS_THREADS", + "uri":"dws_04_0739.html", + "doc_type":"devg", + "p_code":"395", + "code":"492" + }, + { + "desc":"PG_POOLER_STATUS displays the cache connection status in the pooler. PG_POOLER_STATUS can only query on the CN, and displays the connection cache information about the po", + "product_code":"dws", + "title":"PG_POOLER_STATUS", + "uri":"dws_04_0740.html", + "doc_type":"devg", + "p_code":"395", + "code":"493" + }, + { + "desc":"PG_PREPARED_STATEMENTS displays all prepared statements that are available in the current session.", + "product_code":"dws", + "title":"PG_PREPARED_STATEMENTS", + "uri":"dws_04_0741.html", + "doc_type":"devg", + "p_code":"395", + "code":"494" + }, + { + "desc":"PG_PREPARED_XACTS displays information about transactions that are currently prepared for two-phase commit.", + "product_code":"dws", + "title":"PG_PREPARED_XACTS", + "uri":"dws_04_0742.html", + "doc_type":"devg", + "p_code":"395", + "code":"495" + }, + { + "desc":"PG_QUERYBAND_ACTION displays information about the object associated with query_band and the query_band query order.", + "product_code":"dws", + "title":"PG_QUERYBAND_ACTION", + "uri":"dws_04_0743.html", + "doc_type":"devg", + "p_code":"395", + "code":"496" + }, + { + "desc":"PG_REPLICATION_SLOTS displays the replication node information.", + "product_code":"dws", + "title":"PG_REPLICATION_SLOTS", + "uri":"dws_04_0744.html", + "doc_type":"devg", + "p_code":"395", + "code":"497" + }, + { + "desc":"PG_ROLES displays information about database roles.", + "product_code":"dws", + "title":"PG_ROLES", + "uri":"dws_04_0745.html", + "doc_type":"devg", + "p_code":"395", + "code":"498" + }, + { + "desc":"PG_RULES displays information about rewrite rules.", + "product_code":"dws", + "title":"PG_RULES", + "uri":"dws_04_0746.html", + "doc_type":"devg", + "p_code":"395", + "code":"499" + }, + { + "desc":"PG_RUNNING_XACTS displays the running transaction information on the current node.", + "product_code":"dws", + "title":"PG_RUNNING_XACTS", + "uri":"dws_04_0747.html", + "doc_type":"devg", + "p_code":"395", + "code":"500" + }, + { + "desc":"PG_SECLABELS displays information about security labels.", + "product_code":"dws", + "title":"PG_SECLABELS", + "uri":"dws_04_0748.html", + "doc_type":"devg", + "p_code":"395", + "code":"501" + }, + { + "desc":"PG_SESSION_WLMSTAT displays the corresponding load management information about the task currently executed by the user.", + "product_code":"dws", + "title":"PG_SESSION_WLMSTAT", + "uri":"dws_04_0749.html", + "doc_type":"devg", + "p_code":"395", + "code":"502" + }, + { + "desc":"PG_SESSION_IOSTAT displays the I/O load management information about the task currently executed by the user.IOPS is counted by ones for column storage and by thousands f", + "product_code":"dws", + "title":"PG_SESSION_IOSTAT", + "uri":"dws_04_0750.html", + "doc_type":"devg", + "p_code":"395", + "code":"503" + }, + { + "desc":"PG_SETTINGS displays information about parameters of the running database.", + "product_code":"dws", + "title":"PG_SETTINGS", + "uri":"dws_04_0751.html", + "doc_type":"devg", + "p_code":"395", + "code":"504" + }, + { + "desc":"PG_SHADOW displays properties of all roles that are marked as rolcanlogin in PG_AUTHID.The name stems from the fact that this table should not be readable by the public s", + "product_code":"dws", + "title":"PG_SHADOW", + "uri":"dws_04_0752.html", + "doc_type":"devg", + "p_code":"395", + "code":"505" + }, + { + "desc":"PG_SHARED_MEMORY_DETAIL displays usage information about all the shared memory contexts.", + "product_code":"dws", + "title":"PG_SHARED_MEMORY_DETAIL", + "uri":"dws_04_0753.html", + "doc_type":"devg", + "p_code":"395", + "code":"506" + }, + { + "desc":"PG_STATS displays the single-column statistics stored in the pg_statistic table.", + "product_code":"dws", + "title":"PG_STATS", + "uri":"dws_04_0754.html", + "doc_type":"devg", + "p_code":"395", + "code":"507" + }, + { + "desc":"PG_STAT_ACTIVITY displays information about the current user's queries.", + "product_code":"dws", + "title":"PG_STAT_ACTIVITY", + "uri":"dws_04_0755.html", + "doc_type":"devg", + "p_code":"395", + "code":"508" + }, + { + "desc":"PG_STAT_ALL_INDEXES displays access informaton about all indexes in the database, with information about each index displayed in a row.Indexes can be used via either simp", + "product_code":"dws", + "title":"PG_STAT_ALL_INDEXES", + "uri":"dws_04_0757.html", + "doc_type":"devg", + "p_code":"395", + "code":"509" + }, + { + "desc":"PG_STAT_ALL_TABLES displays access information about all rows in all tables (including TOAST tables) in the database.", + "product_code":"dws", + "title":"PG_STAT_ALL_TABLES", + "uri":"dws_04_0758.html", + "doc_type":"devg", + "p_code":"395", + "code":"510" + }, + { + "desc":"PG_STAT_BAD_BLOCK displays statistics about page or CU verification failures after a node is started.", + "product_code":"dws", + "title":"PG_STAT_BAD_BLOCK", + "uri":"dws_04_0759.html", + "doc_type":"devg", + "p_code":"395", + "code":"511" + }, + { + "desc":"PG_STAT_BGWRITER displays statistics about the background writer process's activity.", + "product_code":"dws", + "title":"PG_STAT_BGWRITER", + "uri":"dws_04_0760.html", + "doc_type":"devg", + "p_code":"395", + "code":"512" + }, + { + "desc":"PG_STAT_DATABASE displays the status and statistics of each database on the current node.", + "product_code":"dws", + "title":"PG_STAT_DATABASE", + "uri":"dws_04_0761.html", + "doc_type":"devg", + "p_code":"395", + "code":"513" + }, + { + "desc":"PG_STAT_DATABASE_CONFLICTS displays statistics about database conflicts.", + "product_code":"dws", + "title":"PG_STAT_DATABASE_CONFLICTS", + "uri":"dws_04_0762.html", + "doc_type":"devg", + "p_code":"395", + "code":"514" + }, + { + "desc":"PG_STAT_GET_MEM_MBYTES_RESERVED displays the current activity information of a thread stored in memory. You need to specify the thread ID (pid in PG_STAT_ACTIVITY) for qu", + "product_code":"dws", + "title":"PG_STAT_GET_MEM_MBYTES_RESERVED", + "uri":"dws_04_0763.html", + "doc_type":"devg", + "p_code":"395", + "code":"515" + }, + { + "desc":"PG_STAT_USER_FUNCTIONS displays user-defined function status information in the namespace. (The language of the function is non-internal language.)", + "product_code":"dws", + "title":"PG_STAT_USER_FUNCTIONS", + "uri":"dws_04_0764.html", + "doc_type":"devg", + "p_code":"395", + "code":"516" + }, + { + "desc":"PG_STAT_USER_INDEXES displays information about the index status of user-defined ordinary tables and TOAST tables.", + "product_code":"dws", + "title":"PG_STAT_USER_INDEXES", + "uri":"dws_04_0765.html", + "doc_type":"devg", + "p_code":"395", + "code":"517" + }, + { + "desc":"PG_STAT_USER_TABLES displays status information about user-defined ordinary tables and TOAST tables in all namespaces.", + "product_code":"dws", + "title":"PG_STAT_USER_TABLES", + "uri":"dws_04_0766.html", + "doc_type":"devg", + "p_code":"395", + "code":"518" + }, + { + "desc":"PG_STAT_REPLICATION displays information about log synchronization status, such as the locations of the sender sending logs and the receiver receiving logs.", + "product_code":"dws", + "title":"PG_STAT_REPLICATION", + "uri":"dws_04_0767.html", + "doc_type":"devg", + "p_code":"395", + "code":"519" + }, + { + "desc":"PG_STAT_SYS_INDEXES displays the index status information about all the system catalogs in the pg_catalog and information_schema schemas.", + "product_code":"dws", + "title":"PG_STAT_SYS_INDEXES", + "uri":"dws_04_0768.html", + "doc_type":"devg", + "p_code":"395", + "code":"520" + }, + { + "desc":"PG_STAT_SYS_TABLES displays the statistics about the system catalogs of all the namespaces in pg_catalog and information_schema schemas.", + "product_code":"dws", + "title":"PG_STAT_SYS_TABLES", + "uri":"dws_04_0769.html", + "doc_type":"devg", + "p_code":"395", + "code":"521" + }, + { + "desc":"PG_STAT_XACT_ALL_TABLES displays the transaction status information about all ordinary tables and TOAST tables in the namespaces.", + "product_code":"dws", + "title":"PG_STAT_XACT_ALL_TABLES", + "uri":"dws_04_0770.html", + "doc_type":"devg", + "p_code":"395", + "code":"522" + }, + { + "desc":"PG_STAT_XACT_SYS_TABLES displays the transaction status information of the system catalog in the namespace.", + "product_code":"dws", + "title":"PG_STAT_XACT_SYS_TABLES", + "uri":"dws_04_0771.html", + "doc_type":"devg", + "p_code":"395", + "code":"523" + }, + { + "desc":"PG_STAT_XACT_USER_FUNCTIONS displays statistics about function executions, with statistics about each execution displayed in a row.", + "product_code":"dws", + "title":"PG_STAT_XACT_USER_FUNCTIONS", + "uri":"dws_04_0772.html", + "doc_type":"devg", + "p_code":"395", + "code":"524" + }, + { + "desc":"PG_STAT_XACT_USER_TABLES displays the transaction status information of the user table in the namespace.", + "product_code":"dws", + "title":"PG_STAT_XACT_USER_TABLES", + "uri":"dws_04_0773.html", + "doc_type":"devg", + "p_code":"395", + "code":"525" + }, + { + "desc":"PG_STATIO_ALL_INDEXES contains each row of each index in the current database, showing I/O statistics about accesses to that specific index.", + "product_code":"dws", + "title":"PG_STATIO_ALL_INDEXES", + "uri":"dws_04_0774.html", + "doc_type":"devg", + "p_code":"395", + "code":"526" + }, + { + "desc":"PG_STATIO_ALL_SEQUENCES contains each row of each sequence in the current database, showing I/O statistics about accesses to that specific sequence.", + "product_code":"dws", + "title":"PG_STATIO_ALL_SEQUENCES", + "uri":"dws_04_0775.html", + "doc_type":"devg", + "p_code":"395", + "code":"527" + }, + { + "desc":"PG_STATIO_ALL_TABLES contains one row for each table in the current database (including TOAST tables), showing I/O statistics about accesses to that specific table.", + "product_code":"dws", + "title":"PG_STATIO_ALL_TABLES", + "uri":"dws_04_0776.html", + "doc_type":"devg", + "p_code":"395", + "code":"528" + }, + { + "desc":"PG_STATIO_SYS_INDEXES displays the I/O status information about all system catalog indexes in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_SYS_INDEXES", + "uri":"dws_04_0777.html", + "doc_type":"devg", + "p_code":"395", + "code":"529" + }, + { + "desc":"PG_STATIO_SYS_SEQUENCES displays the I/O status information about all the system sequences in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_SYS_SEQUENCES", + "uri":"dws_04_0778.html", + "doc_type":"devg", + "p_code":"395", + "code":"530" + }, + { + "desc":"PG_STATIO_SYS_TABLES displays the I/O status information about all the system catalogs in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_SYS_TABLES", + "uri":"dws_04_0779.html", + "doc_type":"devg", + "p_code":"395", + "code":"531" + }, + { + "desc":"PG_STATIO_USER_INDEXES displays the I/O status information about all the user relationship table indexes in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_USER_INDEXES", + "uri":"dws_04_0780.html", + "doc_type":"devg", + "p_code":"395", + "code":"532" + }, + { + "desc":"PG_STATIO_USER_SEQUENCES displays the I/O status information about all the user relation table sequences in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_USER_SEQUENCES", + "uri":"dws_04_0781.html", + "doc_type":"devg", + "p_code":"395", + "code":"533" + }, + { + "desc":"PG_STATIO_USER_TABLES displays the I/O status information about all the user relation tables in the namespace.", + "product_code":"dws", + "title":"PG_STATIO_USER_TABLES", + "uri":"dws_04_0782.html", + "doc_type":"devg", + "p_code":"395", + "code":"534" + }, + { + "desc":"PG_THREAD_WAIT_STATUS allows you to test the block waiting status about the backend thread and auxiliary thread of the current instance.The waiting statuses in the wait_s", + "product_code":"dws", + "title":"PG_THREAD_WAIT_STATUS", + "uri":"dws_04_0783.html", + "doc_type":"devg", + "p_code":"395", + "code":"535" + }, + { + "desc":"PG_TABLES displays access to each table in the database.", + "product_code":"dws", + "title":"PG_TABLES", + "uri":"dws_04_0784.html", + "doc_type":"devg", + "p_code":"395", + "code":"536" + }, + { + "desc":"PG_TDE_INFO displays the encryption information about the current cluster.Check whether the current cluster is encrypted, and check the encryption algorithm (if any) used", + "product_code":"dws", + "title":"PG_TDE_INFO", + "uri":"dws_04_0785.html", + "doc_type":"devg", + "p_code":"395", + "code":"537" + }, + { + "desc":"PG_TIMEZONE_ABBREVS displays all time zone abbreviations that can be recognized by the input routines.", + "product_code":"dws", + "title":"PG_TIMEZONE_ABBREVS", + "uri":"dws_04_0786.html", + "doc_type":"devg", + "p_code":"395", + "code":"538" + }, + { + "desc":"PG_TIMEZONE_NAMES displays all time zone names that can be recognized by SET TIMEZONE, along with their associated abbreviations, UTC offsets, and daylight saving time st", + "product_code":"dws", + "title":"PG_TIMEZONE_NAMES", + "uri":"dws_04_0787.html", + "doc_type":"devg", + "p_code":"395", + "code":"539" + }, + { + "desc":"PG_TOTAL_MEMORY_DETAIL displays the memory usage of a certain node in the database.", + "product_code":"dws", + "title":"PG_TOTAL_MEMORY_DETAIL", + "uri":"dws_04_0788.html", + "doc_type":"devg", + "p_code":"395", + "code":"540" + }, + { + "desc":"PG_TOTAL_SCHEMA_INFO displays the storage usage of all schemas in each database. This view is valid only if use_workload_manager is set to on.", + "product_code":"dws", + "title":"PG_TOTAL_SCHEMA_INFO", + "uri":"dws_04_0789.html", + "doc_type":"devg", + "p_code":"395", + "code":"541" + }, + { + "desc":"PG_TOTAL_USER_RESOURCE_INFO displays the resource usage of all users. Only administrators can query this view. This view is valid only if use_workload_manager is set to o", + "product_code":"dws", + "title":"PG_TOTAL_USER_RESOURCE_INFO", + "uri":"dws_04_0790.html", + "doc_type":"devg", + "p_code":"395", + "code":"542" + }, + { + "desc":"PG_USER displays information about users who can access the database.", + "product_code":"dws", + "title":"PG_USER", + "uri":"dws_04_0791.html", + "doc_type":"devg", + "p_code":"395", + "code":"543" + }, + { + "desc":"PG_USER_MAPPINGS displays information about user mappings.This is essentially a publicly readable view of PG_USER_MAPPING that leaves out the options column if the user h", + "product_code":"dws", + "title":"PG_USER_MAPPINGS", + "uri":"dws_04_0792.html", + "doc_type":"devg", + "p_code":"395", + "code":"544" + }, + { + "desc":"PG_VIEWS displays basic information about each view in the database.", + "product_code":"dws", + "title":"PG_VIEWS", + "uri":"dws_04_0793.html", + "doc_type":"devg", + "p_code":"395", + "code":"545" + }, + { + "desc":"PG_WLM_STATISTICS displays information about workload management after the task is complete or the exception has been handled.", + "product_code":"dws", + "title":"PG_WLM_STATISTICS", + "uri":"dws_04_0794.html", + "doc_type":"devg", + "p_code":"395", + "code":"546" + }, + { + "desc":"PGXC_BULKLOAD_PROGRESS displays the progress of the service import. Only GDS common files can be imported. This view is accessible only to users with system administrator", + "product_code":"dws", + "title":"PGXC_BULKLOAD_PROGRESS", + "uri":"dws_04_0795.html", + "doc_type":"devg", + "p_code":"395", + "code":"547" + }, + { + "desc":"PGXC_BULKLOAD_STATISTICS displays real-time statistics about service execution, such as GDS, COPY, and \\COPY, on a CN. This view summarizes the real-time execution status", + "product_code":"dws", + "title":"PGXC_BULKLOAD_STATISTICS", + "uri":"dws_04_0796.html", + "doc_type":"devg", + "p_code":"395", + "code":"548" + }, + { + "desc":"PGXC_COMM_CLIENT_INFO stores the client connection information of all nodes. (You can query this view on a DN to view the information about the connection between the CN ", + "product_code":"dws", + "title":"PGXC_COMM_CLIENT_INFO", + "uri":"dws_04_0797.html", + "doc_type":"devg", + "p_code":"395", + "code":"549" + }, + { + "desc":"PGXC_COMM_STATUS displays the communication library delay status for all the DNs.", + "product_code":"dws", + "title":"PGXC_COMM_DELAY", + "uri":"dws_04_0798.html", + "doc_type":"devg", + "p_code":"395", + "code":"550" + }, + { + "desc":"PG_COMM_RECV_STREAM displays the receiving stream status of the communication libraries for all the DNs.", + "product_code":"dws", + "title":"PGXC_COMM_RECV_STREAM", + "uri":"dws_04_0799.html", + "doc_type":"devg", + "p_code":"395", + "code":"551" + }, + { + "desc":"PGXC_COMM_SEND_STREAM displays the sending stream status of the communication libraries for all the DNs.", + "product_code":"dws", + "title":"PGXC_COMM_SEND_STREAM", + "uri":"dws_04_0800.html", + "doc_type":"devg", + "p_code":"395", + "code":"552" + }, + { + "desc":"PGXC_COMM_STATUS displays the communication library status for all the DNs.", + "product_code":"dws", + "title":"PGXC_COMM_STATUS", + "uri":"dws_04_0801.html", + "doc_type":"devg", + "p_code":"395", + "code":"553" + }, + { + "desc":"PGXC_DEADLOCK displays lock wait information generated due to distributed deadlocks.Currently, PGXC_DEADLOCK collects only lock wait information about locks whose locktyp", + "product_code":"dws", + "title":"PGXC_DEADLOCK", + "uri":"dws_04_0802.html", + "doc_type":"devg", + "p_code":"395", + "code":"554" + }, + { + "desc":"PGXC_GET_STAT_ALL_TABLES displays information about insertion, update, and deletion operations on tables and the dirty page rate of tables.Before running VACUUM FULL to a", + "product_code":"dws", + "title":"PGXC_GET_STAT_ALL_TABLES", + "uri":"dws_04_0803.html", + "doc_type":"devg", + "p_code":"395", + "code":"555" + }, + { + "desc":"PGXC_GET_STAT_ALL_PARTITIONS displays information about insertion, update, and deletion operations on partitions of partitioned tables and the dirty page rate of tables.T", + "product_code":"dws", + "title":"PGXC_GET_STAT_ALL_PARTITIONS", + "uri":"dws_04_0804.html", + "doc_type":"devg", + "p_code":"395", + "code":"556" + }, + { + "desc":"PGXC_GET_TABLE_SKEWNESS displays the data skew on tables in the current database.", + "product_code":"dws", + "title":"PGXC_GET_TABLE_SKEWNESS", + "uri":"dws_04_0805.html", + "doc_type":"devg", + "p_code":"395", + "code":"557" + }, + { + "desc":"PGXC_GTM_SNAPSHOT_STATUS displays transaction information on the current GTM.", + "product_code":"dws", + "title":"PGXC_GTM_SNAPSHOT_STATUS", + "uri":"dws_04_0806.html", + "doc_type":"devg", + "p_code":"395", + "code":"558" + }, + { + "desc":"PGXC_INSTANCE_TIME displays the running time of processes on each node in the cluster and the time consumed in each execution phase. Except the node_name column, the othe", + "product_code":"dws", + "title":"PGXC_INSTANCE_TIME", + "uri":"dws_04_0807.html", + "doc_type":"devg", + "p_code":"395", + "code":"559" + }, + { + "desc":"PGXC_INSTR_UNIQUE_SQL displays the complete Unique SQL statistics of all CN nodes in the cluster.Only the system administrator can access this view. For details about the", + "product_code":"dws", + "title":"PGXC_INSTR_UNIQUE_SQL", + "uri":"dws_04_0808.html", + "doc_type":"devg", + "p_code":"395", + "code":"560" + }, + { + "desc":"PGXC_LOCK_CONFLICTS displays information about conflicting locks in the cluster.When a lock is waiting for another lock or another lock is waiting for this one, a lock co", + "product_code":"dws", + "title":"PGXC_LOCK_CONFLICTS", + "uri":"dws_04_0809.html", + "doc_type":"devg", + "p_code":"395", + "code":"561" + }, + { + "desc":"PGXC_NODE_ENV displays the environmental variables information about all nodes in a cluster.", + "product_code":"dws", + "title":"PGXC_NODE_ENV", + "uri":"dws_04_0810.html", + "doc_type":"devg", + "p_code":"395", + "code":"562" + }, + { + "desc":"PGXC_NODE_STAT_RESET_TIME displays the time when statistics of each node in the cluster are reset. All columns except node_name are the same as those in the GS_NODE_STAT_", + "product_code":"dws", + "title":"PGXC_NODE_STAT_RESET_TIME", + "uri":"dws_04_0811.html", + "doc_type":"devg", + "p_code":"395", + "code":"563" + }, + { + "desc":"PGXC_OS_RUN_INFO displays the OS running status of each node in the cluster. All columns except node_name are the same as those in the PV_OS_RUN_INFO view. This view is a", + "product_code":"dws", + "title":"PGXC_OS_RUN_INFO", + "uri":"dws_04_0812.html", + "doc_type":"devg", + "p_code":"395", + "code":"564" + }, + { + "desc":"PGXC_OS_THREADS displays thread status information under all normal nodes in the current cluster.", + "product_code":"dws", + "title":"PGXC_OS_THREADS", + "uri":"dws_04_0813.html", + "doc_type":"devg", + "p_code":"395", + "code":"565" + }, + { + "desc":"PGXC_PREPARED_XACTS displays the two-phase transactions in the prepared phase.", + "product_code":"dws", + "title":"PGXC_PREPARED_XACTS", + "uri":"dws_04_0814.html", + "doc_type":"devg", + "p_code":"395", + "code":"566" + }, + { + "desc":"PGXC_REDO_STAT displays statistics on redoing Xlogs of each node in the cluster. All columns except node_name are the same as those in the PV_REDO_STAT view. This view is", + "product_code":"dws", + "title":"PGXC_REDO_STAT", + "uri":"dws_04_0815.html", + "doc_type":"devg", + "p_code":"395", + "code":"567" + }, + { + "desc":"PGXC_REL_IOSTAT displays statistics on disk read and write of each node in the cluster. All columns except node_name are the same as those in the GS_REL_IOSTAT view. This", + "product_code":"dws", + "title":"PGXC_REL_IOSTAT", + "uri":"dws_04_0816.html", + "doc_type":"devg", + "p_code":"395", + "code":"568" + }, + { + "desc":"PGXC_REPLICATION_SLOTS displays the replication information of DNs in the cluster. All columns except node_name are the same as those in the PG_REPLICATION_SLOTS view. Th", + "product_code":"dws", + "title":"PGXC_REPLICATION_SLOTS", + "uri":"dws_04_0817.html", + "doc_type":"devg", + "p_code":"395", + "code":"569" + }, + { + "desc":"PGXC_RUNNING_XACTS displays information about running transactions on each node in the cluster. The content is the same as that displayed in PG_RUNNING_XACTS.", + "product_code":"dws", + "title":"PGXC_RUNNING_XACTS", + "uri":"dws_04_0818.html", + "doc_type":"devg", + "p_code":"395", + "code":"570" + }, + { + "desc":"PGXC_SETTINGS displays the database running status of each node in the cluster. All columns except node_name are the same as those in the PG_SETTINGS view. This view is a", + "product_code":"dws", + "title":"PGXC_SETTINGS", + "uri":"dws_04_0819.html", + "doc_type":"devg", + "p_code":"395", + "code":"571" + }, + { + "desc":"PGXC_STAT_ACTIVITY displays information about the query performed by the current user on all the CNs in the current cluster.Run the following command to view blocked quer", + "product_code":"dws", + "title":"PGXC_STAT_ACTIVITY", + "uri":"dws_04_0820.html", + "doc_type":"devg", + "p_code":"395", + "code":"572" + }, + { + "desc":"PGXC_STAT_BAD_BLOCK displays statistics about page or CU verification failures after all nodes in a cluster are started.", + "product_code":"dws", + "title":"PGXC_STAT_BAD_BLOCK", + "uri":"dws_04_0821.html", + "doc_type":"devg", + "p_code":"395", + "code":"573" + }, + { + "desc":"PGXC_STAT_BGWRITER displays statistics on the background writer of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_BGWRITER vi", + "product_code":"dws", + "title":"PGXC_STAT_BGWRITER", + "uri":"dws_04_0822.html", + "doc_type":"devg", + "p_code":"395", + "code":"574" + }, + { + "desc":"PGXC_STAT_DATABASE displays the database status and statistics of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_DATABASE vie", + "product_code":"dws", + "title":"PGXC_STAT_DATABASE", + "uri":"dws_04_0823.html", + "doc_type":"devg", + "p_code":"395", + "code":"575" + }, + { + "desc":"PGXC_STAT_REPLICATION displays the log synchronization status of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_REPLICATION v", + "product_code":"dws", + "title":"PGXC_STAT_REPLICATION", + "uri":"dws_04_0824.html", + "doc_type":"devg", + "p_code":"395", + "code":"576" + }, + { + "desc":"PGXC_SQL_COUNT displays the node-level and user-level statistics for the SQL statements of SELECT, INSERT, UPDATE, DELETE, and MERGE INTO and DDL, DML, and DCL statements", + "product_code":"dws", + "title":"PGXC_SQL_COUNT", + "uri":"dws_04_0825.html", + "doc_type":"devg", + "p_code":"395", + "code":"577" + }, + { + "desc":"PGXC_THREAD_WAIT_STATUS displays all the call layer hierarchy relationship between threads of the SQL statements on all the nodes in a cluster, and the waiting status of ", + "product_code":"dws", + "title":"PGXC_THREAD_WAIT_STATUS", + "uri":"dws_04_0826.html", + "doc_type":"devg", + "p_code":"395", + "code":"578" + }, + { + "desc":"PGXC_TOTAL_MEMORY_DETAIL displays the memory usage in the cluster.", + "product_code":"dws", + "title":"PGXC_TOTAL_MEMORY_DETAIL", + "uri":"dws_04_0827.html", + "doc_type":"devg", + "p_code":"395", + "code":"579" + }, + { + "desc":"PGXC_TOTAL_SCHEMA_INFO displays the schema space information of all instances in the cluster, providing visibility into the schema space usage of each instance. This view", + "product_code":"dws", + "title":"PGXC_TOTAL_SCHEMA_INFO", + "uri":"dws_04_0828.html", + "doc_type":"devg", + "p_code":"395", + "code":"580" + }, + { + "desc":"PGXC_TOTAL_SCHEMA_INFO_ANALYZE displays the overall schema space information of the cluster, including the total cluster space, average space of instances, skew ratio, ma", + "product_code":"dws", + "title":"PGXC_TOTAL_SCHEMA_INFO_ANALYZE", + "uri":"dws_04_0829.html", + "doc_type":"devg", + "p_code":"395", + "code":"581" + }, + { + "desc":"PGXC_USER_TRANSACTION provides transaction information about users on all CNs. It is accessible only to users with system administrator rights. This view is valid only wh", + "product_code":"dws", + "title":"PGXC_USER_TRANSACTION", + "uri":"dws_04_0830.html", + "doc_type":"devg", + "p_code":"395", + "code":"582" + }, + { + "desc":"PGXC_VARIABLE_INFO displays information about transaction IDs and OIDs of all nodes in a cluster.", + "product_code":"dws", + "title":"PGXC_VARIABLE_INFO", + "uri":"dws_04_0831.html", + "doc_type":"devg", + "p_code":"395", + "code":"583" + }, + { + "desc":"PGXC_WAIT_EVENTS displays statistics on the waiting status and events of each node in the cluster. The content is the same as that displayed in GS_WAIT_EVENTS. This view ", + "product_code":"dws", + "title":"PGXC_WAIT_EVENTS", + "uri":"dws_04_0832.html", + "doc_type":"devg", + "p_code":"395", + "code":"584" + }, + { + "desc":"PGXC_WLM_OPERATOR_HISTORYdisplays the operator information of completed jobs executed on all CNs. This view is used by Database Manager to query data from a database. Dat", + "product_code":"dws", + "title":"PGXC_WLM_OPERATOR_HISTORY", + "uri":"dws_04_0836.html", + "doc_type":"devg", + "p_code":"395", + "code":"585" + }, + { + "desc":"PGXC_WLM_OPERATOR_INFO displays the operator information of completed jobs executed on CNs. The data in this view is obtained from GS_WLM_OPERATOR_INFO.This view is acces", + "product_code":"dws", + "title":"PGXC_WLM_OPERATOR_INFO", + "uri":"dws_04_0837.html", + "doc_type":"devg", + "p_code":"395", + "code":"586" + }, + { + "desc":"PGXC_WLM_OPERATOR_STATISTICS displays the operator information of jobs being executed on CNs.This view is accessible only to users with system administrators rights. For ", + "product_code":"dws", + "title":"PGXC_WLM_OPERATOR_STATISTICS", + "uri":"dws_04_0838.html", + "doc_type":"devg", + "p_code":"395", + "code":"587" + }, + { + "desc":"PGXC_WLM_SESSION_INFO displays load management information for completed jobs executed on all CNs. The data in this view is obtained from GS_WLM_SESSION_INFO.This view is", + "product_code":"dws", + "title":"PGXC_WLM_SESSION_INFO", + "uri":"dws_04_0839.html", + "doc_type":"devg", + "p_code":"395", + "code":"588" + }, + { + "desc":"PGXC_WLM_SESSION_HISTORY displays load management information for completed jobs executed on all CNs. This view is used by Data Manager to query data from a database. Dat", + "product_code":"dws", + "title":"PGXC_WLM_SESSION_HISTORY", + "uri":"dws_04_0840.html", + "doc_type":"devg", + "p_code":"395", + "code":"589" + }, + { + "desc":"PGXC_WLM_SESSION_STATISTICS displays load management information about jobs that are being executed on CNs.This view is accessible only to users with system administrator", + "product_code":"dws", + "title":"PGXC_WLM_SESSION_STATISTICS", + "uri":"dws_04_0841.html", + "doc_type":"devg", + "p_code":"395", + "code":"590" + }, + { + "desc":"PGXC_WLM_WORKLOAD_RECORDS displays the status of job executed by the current user on CNs. It is accessible only to users with system administrator rights. This view is av", + "product_code":"dws", + "title":"PGXC_WLM_WORKLOAD_RECORDS", + "uri":"dws_04_0842.html", + "doc_type":"devg", + "p_code":"395", + "code":"591" + }, + { + "desc":"PGXC_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on all CNs in a cluster, including the number of SELECT, UPDATE, ", + "product_code":"dws", + "title":"PGXC_WORKLOAD_SQL_COUNT", + "uri":"dws_04_0843.html", + "doc_type":"devg", + "p_code":"395", + "code":"592" + }, + { + "desc":"PGXC_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on all CNs in a cluster, including the maximum, minimum, aver", + "product_code":"dws", + "title":"PGXC_WORKLOAD_SQL_ELAPSE_TIME", + "uri":"dws_04_0844.html", + "doc_type":"devg", + "p_code":"395", + "code":"593" + }, + { + "desc":"PGXC_WORKLOAD_TRANSACTION provides transaction information about workload Cgroups on all CNs. It is accessible only to users with system administrator rights. This view i", + "product_code":"dws", + "title":"PGXC_WORKLOAD_TRANSACTION", + "uri":"dws_04_0845.html", + "doc_type":"devg", + "p_code":"395", + "code":"594" + }, + { + "desc":"PLAN_TABLE displays the plan information collected by EXPLAIN PLAN. Plan information is in a session-level life cycle. After the session exits, the data will be deleted. ", + "product_code":"dws", + "title":"PLAN_TABLE", + "uri":"dws_04_0846.html", + "doc_type":"devg", + "p_code":"395", + "code":"595" + }, + { + "desc":"PLAN_TABLE_DATA displays the plan information collected by EXPLAIN PLAN. Different from the PLAN_TABLE view, the system catalog PLAN_TABLE_DATA stores the plan informatio", + "product_code":"dws", + "title":"PLAN_TABLE_DATA", + "uri":"dws_04_0847.html", + "doc_type":"devg", + "p_code":"395", + "code":"596" + }, + { + "desc":"By collecting statistics about the data file I/Os, PV_FILE_STAT displays the I/O performance of the data to detect the performance problems, such as abnormal I/O operatio", + "product_code":"dws", + "title":"PV_FILE_STAT", + "uri":"dws_04_0848.html", + "doc_type":"devg", + "p_code":"395", + "code":"597" + }, + { + "desc":"PV_INSTANCE_TIME collects statistics on the running time of processes and the time consumed in each execution phase, in microseconds.PV_INSTANCE_TIME records time consump", + "product_code":"dws", + "title":"PV_INSTANCE_TIME", + "uri":"dws_04_0849.html", + "doc_type":"devg", + "p_code":"395", + "code":"598" + }, + { + "desc":"PV_OS_RUN_INFO displays the running status of the current operating system.", + "product_code":"dws", + "title":"PV_OS_RUN_INFO", + "uri":"dws_04_0850.html", + "doc_type":"devg", + "p_code":"395", + "code":"599" + }, + { + "desc":"PV_SESSION_MEMORY displays statistics about memory usage at the session level in the unit of MB, including all the memory allocated to Postgres and Stream threads on DNs ", + "product_code":"dws", + "title":"PV_SESSION_MEMORY", + "uri":"dws_04_0851.html", + "doc_type":"devg", + "p_code":"395", + "code":"600" + }, + { + "desc":"PV_SESSION_MEMORY_DETAIL displays statistics about thread memory usage by memory context.The memory context TempSmallContextGroup collects information about all memory co", + "product_code":"dws", + "title":"PV_SESSION_MEMORY_DETAIL", + "uri":"dws_04_0852.html", + "doc_type":"devg", + "p_code":"395", + "code":"601" + }, + { + "desc":"PV_SESSION_STAT displays session state statistics based on session threads or the AutoVacuum thread.", + "product_code":"dws", + "title":"PV_SESSION_STAT", + "uri":"dws_04_0853.html", + "doc_type":"devg", + "p_code":"395", + "code":"602" + }, + { + "desc":"PV_SESSION_TIME displays statistics about the running time of session threads and time consumed in each execution phase, in microseconds.", + "product_code":"dws", + "title":"PV_SESSION_TIME", + "uri":"dws_04_0854.html", + "doc_type":"devg", + "p_code":"395", + "code":"603" + }, + { + "desc":"PV_TOTAL_MEMORY_DETAIL displays statistics about memory usage of the current database node in the unit of MB.", + "product_code":"dws", + "title":"PV_TOTAL_MEMORY_DETAIL", + "uri":"dws_04_0855.html", + "doc_type":"devg", + "p_code":"395", + "code":"604" + }, + { + "desc":"PV_REDO_STAT displays statistics on redoing Xlogs on the current node.", + "product_code":"dws", + "title":"PV_REDO_STAT", + "uri":"dws_04_0856.html", + "doc_type":"devg", + "p_code":"395", + "code":"605" + }, + { + "desc":"REDACTION_COLUMNS displays information about all redaction columns in the current database.", + "product_code":"dws", + "title":"REDACTION_COLUMNS", + "uri":"dws_04_0857.html", + "doc_type":"devg", + "p_code":"395", + "code":"606" + }, + { + "desc":"REDACTION_POLICIES displays information about all redaction objects in the current database.", + "product_code":"dws", + "title":"REDACTION_POLICIES", + "uri":"dws_04_0858.html", + "doc_type":"devg", + "p_code":"395", + "code":"607" + }, + { + "desc":"USER_COL_COMMENTS displays the column comments of the table accessible to the current user.", + "product_code":"dws", + "title":"USER_COL_COMMENTS", + "uri":"dws_04_0859.html", + "doc_type":"devg", + "p_code":"395", + "code":"608" + }, + { + "desc":"USER_CONSTRAINTS displays the table constraint information accessible to the current user.", + "product_code":"dws", + "title":"USER_CONSTRAINTS", + "uri":"dws_04_0860.html", + "doc_type":"devg", + "p_code":"395", + "code":"609" + }, + { + "desc":"USER_CONSTRAINTS displays the information about constraint columns of the tables accessible to the current user.", + "product_code":"dws", + "title":"USER_CONS_COLUMNS", + "uri":"dws_04_0861.html", + "doc_type":"devg", + "p_code":"395", + "code":"610" + }, + { + "desc":"USER_INDEXES displays index information in the current schema.", + "product_code":"dws", + "title":"USER_INDEXES", + "uri":"dws_04_0862.html", + "doc_type":"devg", + "p_code":"395", + "code":"611" + }, + { + "desc":"USER_IND_COLUMNS displays column information about all indexes accessible to the current user.", + "product_code":"dws", + "title":"USER_IND_COLUMNS", + "uri":"dws_04_0863.html", + "doc_type":"devg", + "p_code":"395", + "code":"612" + }, + { + "desc":"USER_IND_EXPRESSIONSdisplays information about the function-based expression index accessible to the current user.", + "product_code":"dws", + "title":"USER_IND_EXPRESSIONS", + "uri":"dws_04_0864.html", + "doc_type":"devg", + "p_code":"395", + "code":"613" + }, + { + "desc":"USER_IND_PARTITIONS displays information about index partitions accessible to the current user.", + "product_code":"dws", + "title":"USER_IND_PARTITIONS", + "uri":"dws_04_0865.html", + "doc_type":"devg", + "p_code":"395", + "code":"614" + }, + { + "desc":"USER_JOBS displays all jobs owned by the user.", + "product_code":"dws", + "title":"USER_JOBS", + "uri":"dws_04_0866.html", + "doc_type":"devg", + "p_code":"395", + "code":"615" + }, + { + "desc":"USER_OBJECTS displays all database objects accessible to the current user.For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.", + "product_code":"dws", + "title":"USER_OBJECTS", + "uri":"dws_04_0867.html", + "doc_type":"devg", + "p_code":"395", + "code":"616" + }, + { + "desc":"USER_PART_INDEXES displays information about partitioned table indexes accessible to the current user.", + "product_code":"dws", + "title":"USER_PART_INDEXES", + "uri":"dws_04_0868.html", + "doc_type":"devg", + "p_code":"395", + "code":"617" + }, + { + "desc":"USER_PART_TABLES displays information about partitioned tables accessible to the current user.", + "product_code":"dws", + "title":"USER_PART_TABLES", + "uri":"dws_04_0869.html", + "doc_type":"devg", + "p_code":"395", + "code":"618" + }, + { + "desc":"USER_PROCEDURES displays information about all stored procedures and functions in the current schema.", + "product_code":"dws", + "title":"USER_PROCEDURES", + "uri":"dws_04_0870.html", + "doc_type":"devg", + "p_code":"395", + "code":"619" + }, + { + "desc":"USER_SEQUENCES displays sequence information in the current schema.", + "product_code":"dws", + "title":"USER_SEQUENCES", + "uri":"dws_04_0871.html", + "doc_type":"devg", + "p_code":"395", + "code":"620" + }, + { + "desc":"USER_SOURCE displays information about stored procedures or functions in this mode, and provides the columns defined by the stored procedures or the functions.", + "product_code":"dws", + "title":"USER_SOURCE", + "uri":"dws_04_0872.html", + "doc_type":"devg", + "p_code":"395", + "code":"621" + }, + { + "desc":"USER_SYNONYMS displays synonyms accessible to the current user.", + "product_code":"dws", + "title":"USER_SYNONYMS", + "uri":"dws_04_0873.html", + "doc_type":"devg", + "p_code":"395", + "code":"622" + }, + { + "desc":"USER_TAB_COLUMNS displays information about table columns accessible to the current user.", + "product_code":"dws", + "title":"USER_TAB_COLUMNS", + "uri":"dws_04_0874.html", + "doc_type":"devg", + "p_code":"395", + "code":"623" + }, + { + "desc":"USER_TAB_COMMENTS displays comments about all tables and views accessible to the current user.", + "product_code":"dws", + "title":"USER_TAB_COMMENTS", + "uri":"dws_04_0875.html", + "doc_type":"devg", + "p_code":"395", + "code":"624" + }, + { + "desc":"USER_TAB_PARTITIONS displays all table partitions accessible to the current user. Each partition of a partitioned table accessible to the current user has a piece of reco", + "product_code":"dws", + "title":"USER_TAB_PARTITIONS", + "uri":"dws_04_0876.html", + "doc_type":"devg", + "p_code":"395", + "code":"625" + }, + { + "desc":"USER_TABLES displays table information in the current schema.", + "product_code":"dws", + "title":"USER_TABLES", + "uri":"dws_04_0877.html", + "doc_type":"devg", + "p_code":"395", + "code":"626" + }, + { + "desc":"USER_TRIGGERS displays the information about triggers accessible to the current user.", + "product_code":"dws", + "title":"USER_TRIGGERS", + "uri":"dws_04_0878.html", + "doc_type":"devg", + "p_code":"395", + "code":"627" + }, + { + "desc":"USER_VIEWS displays information about all views in the current schema.", + "product_code":"dws", + "title":"USER_VIEWS", + "uri":"dws_04_0879.html", + "doc_type":"devg", + "p_code":"395", + "code":"628" + }, + { + "desc":"V$SESSION displays all session information about the current session.", + "product_code":"dws", + "title":"V$SESSION", + "uri":"dws_04_0880.html", + "doc_type":"devg", + "p_code":"395", + "code":"629" + }, + { + "desc":"V$SESSION_LONGOPS displays the progress of ongoing operations.", + "product_code":"dws", + "title":"V$SESSION_LONGOPS", + "uri":"dws_04_0881.html", + "doc_type":"devg", + "p_code":"395", + "code":"630" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"GUC Parameters", + "uri":"dws_04_0883.html", + "doc_type":"devg", + "p_code":"1", + "code":"631" + }, + { + "desc":"GaussDB(DWS) GUC parameters can control database system behaviors. You can check and adjust the GUC parameters based on your business scenario and data volume.After a clu", + "product_code":"dws", + "title":"Viewing GUC Parameters", + "uri":"dws_04_0884.html", + "doc_type":"devg", + "p_code":"631", + "code":"632" + }, + { + "desc":"To ensure the optimal performance of GaussDB(DWS), you can adjust the GUC parameters in the database.The GUC parameters of GaussDB(DWS) are classified into the following ", + "product_code":"dws", + "title":"Configuring GUC Parameters", + "uri":"dws_04_0885.html", + "doc_type":"devg", + "p_code":"631", + "code":"633" + }, + { + "desc":"The database provides many operation parameters. Configuration of these parameters affects the behavior of the database system. Before modifying these parameters, learn t", + "product_code":"dws", + "title":"GUC Parameter Usage", + "uri":"dws_04_0886.html", + "doc_type":"devg", + "p_code":"631", + "code":"634" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Connection and Authentication", + "uri":"dws_04_0888.html", + "doc_type":"devg", + "p_code":"631", + "code":"635" + }, + { + "desc":"This section describes parameters related to the connection mode between the client and server.Parameter description: Specifies the maximum number of allowed parallel con", + "product_code":"dws", + "title":"Connection Settings", + "uri":"dws_04_0889.html", + "doc_type":"devg", + "p_code":"635", + "code":"636" + }, + { + "desc":"This section describes parameters about how to securely authenticate the client and server.Parameter description: Specifies the longest duration to wait before the client", + "product_code":"dws", + "title":"Security and Authentication (postgresql.conf)", + "uri":"dws_04_0890.html", + "doc_type":"devg", + "p_code":"635", + "code":"637" + }, + { + "desc":"This section describes parameter settings and value ranges for communication libraries.Parameter description: Specifies whether the communication library uses the TCP or ", + "product_code":"dws", + "title":"Communication Library Parameters", + "uri":"dws_04_0891.html", + "doc_type":"devg", + "p_code":"635", + "code":"638" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Resource Consumption", + "uri":"dws_04_0892.html", + "doc_type":"devg", + "p_code":"631", + "code":"639" + }, + { + "desc":"This section describes memory parameters.Parameters described in this section take effect only after the database service restarts.Parameter description: Specifies whethe", + "product_code":"dws", + "title":"Memory", + "uri":"dws_04_0893.html", + "doc_type":"devg", + "p_code":"639", + "code":"640" + }, + { + "desc":"This section describes parameters related to statement disk space control, which are used to limit the disk space usage of statements.Parameter description: Specifies the", + "product_code":"dws", + "title":"Statement Disk Space Control", + "uri":"dws_04_0894.html", + "doc_type":"devg", + "p_code":"639", + "code":"641" + }, + { + "desc":"This section describes kernel resource parameters. Whether these parameters take effect depends on OS settings.Parameter description: Specifies the maximum number of simu", + "product_code":"dws", + "title":"Kernel Resources", + "uri":"dws_04_0895.html", + "doc_type":"devg", + "p_code":"639", + "code":"642" + }, + { + "desc":"This feature allows administrators to reduce the I/O impact of the VACUUM and ANALYZE statements on concurrent database activities. It is often more important to prevent ", + "product_code":"dws", + "title":"Cost-based Vacuum Delay", + "uri":"dws_04_0896.html", + "doc_type":"devg", + "p_code":"639", + "code":"643" + }, + { + "desc":"Parameter description: Specifies whether O&M personnel are allowed to generate some ADIO logs to locate ADIO issues. This parameter is used only by developers. Common use", + "product_code":"dws", + "title":"Asynchronous I/O Operations", + "uri":"dws_04_0898.html", + "doc_type":"devg", + "p_code":"639", + "code":"644" + }, + { + "desc":"GaussDB(DWS) provides a parallel data import function that enables a large amount of data to be imported in a fast and efficient manner. This section describes parameters", + "product_code":"dws", + "title":"Parallel Data Import", + "uri":"dws_04_0899.html", + "doc_type":"devg", + "p_code":"631", + "code":"645" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Write Ahead Logs", + "uri":"dws_04_0900.html", + "doc_type":"devg", + "p_code":"631", + "code":"646" + }, + { + "desc":"Parameter description: Specifies the level of the information that is written to WALs.Type: POSTMASTERValue range: enumerated valuesminimalAdvantages: Certain bulk operat", + "product_code":"dws", + "title":"Settings", + "uri":"dws_04_0901.html", + "doc_type":"devg", + "p_code":"646", + "code":"647" + }, + { + "desc":"Parameter description: Specifies the minimum number of WAL segment files in the period specified by checkpoint_timeout. The size of each log file is 16 MB.Type: SIGHUPVal", + "product_code":"dws", + "title":"Checkpoints", + "uri":"dws_04_0902.html", + "doc_type":"devg", + "p_code":"646", + "code":"648" + }, + { + "desc":"Parameter description: When archive_mode is enabled, completed WAL segments are sent to archive storage by setting archive_command.Type: SIGHUPValue range: Booleanon: The", + "product_code":"dws", + "title":"Archiving", + "uri":"dws_04_0903.html", + "doc_type":"devg", + "p_code":"646", + "code":"649" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"HA Replication", + "uri":"dws_04_0904.html", + "doc_type":"devg", + "p_code":"631", + "code":"650" + }, + { + "desc":"Parameter description: Specifies the number of Xlog file segments. Specifies the minimum number of transaction log files stored in the pg_xlog directory. The standby serv", + "product_code":"dws", + "title":"Sending Server", + "uri":"dws_04_0905.html", + "doc_type":"devg", + "p_code":"650", + "code":"651" + }, + { + "desc":"Parameter description: Specifies the number of transactions by which VACUUM will defer the cleanup of invalid row-store table records, so that VACUUM and VACUUM FULL do n", + "product_code":"dws", + "title":"Primary Server", + "uri":"dws_04_0906.html", + "doc_type":"devg", + "p_code":"650", + "code":"652" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Query Planning", + "uri":"dws_04_0908.html", + "doc_type":"devg", + "p_code":"631", + "code":"653" + }, + { + "desc":"These configuration parameters provide a crude method of influencing the query plans chosen by the query optimizer. If the default plan chosen by the optimizer for a part", + "product_code":"dws", + "title":"Optimizer Method Configuration", + "uri":"dws_04_0909.html", + "doc_type":"devg", + "p_code":"653", + "code":"654" + }, + { + "desc":"This section describes the optimizer cost constants. The cost variables described in this section are measured on an arbitrary scale. Only their relative values matter, t", + "product_code":"dws", + "title":"Optimizer Cost Constants", + "uri":"dws_04_0910.html", + "doc_type":"devg", + "p_code":"653", + "code":"655" + }, + { + "desc":"This section describes parameters related to genetic query optimizer. The genetic query optimizer (GEQO) is an algorithm that plans queries by using heuristic searching. ", + "product_code":"dws", + "title":"Genetic Query Optimizer", + "uri":"dws_04_0911.html", + "doc_type":"devg", + "p_code":"653", + "code":"656" + }, + { + "desc":"Parameter description: Specifies the default statistics target for table columns without a column-specific target set via ALTER TABLE SET STATISTICS. If this parameter is", + "product_code":"dws", + "title":"Other Optimizer Options", + "uri":"dws_04_0912.html", + "doc_type":"devg", + "p_code":"653", + "code":"657" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Error Reporting and Logging", + "uri":"dws_04_0913.html", + "doc_type":"devg", + "p_code":"631", + "code":"658" + }, + { + "desc":"Parameter description: Specifies the writing mode of the log files when logging_collector is set to on.Type: SIGHUPValue range: Booleanon indicates that GaussDB(DWS) over", + "product_code":"dws", + "title":"Logging Destination", + "uri":"dws_04_0914.html", + "doc_type":"devg", + "p_code":"658", + "code":"659" + }, + { + "desc":"Parameter description: Specifies which level of messages are sent to the client. Each level covers all the levels following it. The lower the level is, the fewer messages", + "product_code":"dws", + "title":"Logging Time", + "uri":"dws_04_0915.html", + "doc_type":"devg", + "p_code":"658", + "code":"660" + }, + { + "desc":"Parameter description: Specifies whether to print parsing tree results.Type: SIGHUPValue range: Booleanon indicates the printing result function is enabled.off indicates ", + "product_code":"dws", + "title":"Logging Content", + "uri":"dws_04_0916.html", + "doc_type":"devg", + "p_code":"658", + "code":"661" + }, + { + "desc":"During cluster running, error scenarios can be detected in a timely manner to inform users as soon as possible.Parameter description: Enables the alarm detection thread t", + "product_code":"dws", + "title":"Alarm Detection", + "uri":"dws_04_0918.html", + "doc_type":"devg", + "p_code":"631", + "code":"662" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Statistics During the Database Running", + "uri":"dws_04_0919.html", + "doc_type":"devg", + "p_code":"631", + "code":"663" + }, + { + "desc":"The query and index statistics collector is used to collect statistics during database running. The statistics include the times of inserting and updating a table and an ", + "product_code":"dws", + "title":"Query and Index Statistics Collector", + "uri":"dws_04_0920.html", + "doc_type":"devg", + "p_code":"663", + "code":"664" + }, + { + "desc":"During the running of the database, the lock access, disk I/O operation, and invalid message process are involved. All these operations are the bottleneck of the database", + "product_code":"dws", + "title":"Performance Statistics", + "uri":"dws_04_0921.html", + "doc_type":"devg", + "p_code":"663", + "code":"665" + }, + { + "desc":"If database resource usage is not controlled, concurrent tasks easily preempt resources. As a result, the OS will be overloaded and cannot respond to user tasks; or even ", + "product_code":"dws", + "title":"Workload Management", + "uri":"dws_04_0922.html", + "doc_type":"devg", + "p_code":"631", + "code":"666" + }, + { + "desc":"The automatic cleanup process (autovacuum) in the system automatically runs the VACUUM and ANALYZE commands to recycle the record space marked by the deleted status and u", + "product_code":"dws", + "title":"Automatic Cleanup", + "uri":"dws_04_0923.html", + "doc_type":"devg", + "p_code":"631", + "code":"667" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Default Settings of Client Connection", + "uri":"dws_04_0924.html", + "doc_type":"devg", + "p_code":"631", + "code":"668" + }, + { + "desc":"This section describes related default parameters involved in the execution of SQL statements.Parameter description: Specifies the order in which schemas are searched whe", + "product_code":"dws", + "title":"Statement Behavior", + "uri":"dws_04_0925.html", + "doc_type":"devg", + "p_code":"668", + "code":"669" + }, + { + "desc":"This section describes parameters related to the time format setting.Parameter description: Specifies the display format for date and time values, as well as the rules fo", + "product_code":"dws", + "title":"Zone and Formatting", + "uri":"dws_04_0926.html", + "doc_type":"devg", + "p_code":"668", + "code":"670" + }, + { + "desc":"This section describes the default database loading parameters of the database system.Parameter description: Specifies the path for saving the shared database files that ", + "product_code":"dws", + "title":"Other Default Parameters", + "uri":"dws_04_0927.html", + "doc_type":"devg", + "p_code":"668", + "code":"671" + }, + { + "desc":"In GaussDB(DWS), a deadlock may occur when concurrently executed transactions compete for resources. This section describes parameters used for managing transaction lock ", + "product_code":"dws", + "title":"Lock Management", + "uri":"dws_04_0928.html", + "doc_type":"devg", + "p_code":"631", + "code":"672" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Version and Platform Compatibility", + "uri":"dws_04_0929.html", + "doc_type":"devg", + "p_code":"631", + "code":"673" + }, + { + "desc":"This section describes the parameter control of the downward compatibility and external compatibility features of GaussDB(DWS). Backward compatibility of the database sys", + "product_code":"dws", + "title":"Compatibility with Earlier Versions", + "uri":"dws_04_0930.html", + "doc_type":"devg", + "p_code":"673", + "code":"674" + }, + { + "desc":"Many platforms use the database system. External compatibility of the database system provides a lot of convenience for platforms.Parameter description: Determines whethe", + "product_code":"dws", + "title":"Platform and Client Compatibility", + "uri":"dws_04_0931.html", + "doc_type":"devg", + "p_code":"673", + "code":"675" + }, + { + "desc":"This section describes parameters used for controlling the methods that the server processes an error occurring in the database system.Parameter description: Specifies wh", + "product_code":"dws", + "title":"Fault Tolerance", + "uri":"dws_04_0932.html", + "doc_type":"devg", + "p_code":"631", + "code":"676" + }, + { + "desc":"When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need ", + "product_code":"dws", + "title":"Connection Pool Parameters", + "uri":"dws_04_0933.html", + "doc_type":"devg", + "p_code":"631", + "code":"677" + }, + { + "desc":"This section describes the settings and value ranges of cluster transaction parameters.Parameter description: Specifies the isolation level of the current transaction.Typ", + "product_code":"dws", + "title":"Cluster Transaction Parameters", + "uri":"dws_04_0934.html", + "doc_type":"devg", + "p_code":"631", + "code":"678" + }, + { + "desc":"Parameter description: Specifies whether to enable the lightweight column-store update.Type: USERSETValue range: Booleanon indicates that the lightweight column-store upd", + "product_code":"dws", + "title":"Developer Operations", + "uri":"dws_04_0936.html", + "doc_type":"devg", + "p_code":"631", + "code":"679" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Auditing", + "uri":"dws_04_0937.html", + "doc_type":"devg", + "p_code":"631", + "code":"680" + }, + { + "desc":"Parameter description: Specifies whether to enable or disable the audit process. After the audit process is enabled, the auditing information written by the background pr", + "product_code":"dws", + "title":"Audit Switch", + "uri":"dws_04_0938.html", + "doc_type":"devg", + "p_code":"680", + "code":"681" + }, + { + "desc":"Parameter description: Specifies whether to audit successful operations in GaussDB(DWS). Set this parameter as required.Type: SIGHUPValue range: a stringnone: indicates t", + "product_code":"dws", + "title":"Operation Audit", + "uri":"dws_04_0940.html", + "doc_type":"devg", + "p_code":"680", + "code":"682" + }, + { + "desc":"The automatic rollback transaction can be monitored and its statement problems can be located by setting the transaction timeout warning. In addition, the statements with", + "product_code":"dws", + "title":"Transaction Monitoring", + "uri":"dws_04_0941.html", + "doc_type":"devg", + "p_code":"631", + "code":"683" + }, + { + "desc":"Parameter description: If an SQL statement involves tables belonging to different groups, you can enable this parameter to push the execution plan of the statement to imp", + "product_code":"dws", + "title":"Miscellaneous Parameters", + "uri":"dws_04_0945.html", + "doc_type":"devg", + "p_code":"631", + "code":"684" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Glossary", + "uri":"dws_04_0946.html", + "doc_type":"devg", + "p_code":"1", + "code":"685" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"SQL Syntax Reference", + "uri":"dws_04_2000.html", + "doc_type":"devg", + "p_code":"", + "code":"686" + }, + { + "desc":"SQL is a standard computer language used to control the access to databases and manage data in databases.SQL provides different statements to enable you to:Query data.Ins", + "product_code":"dws", + "title":"GaussDB(DWS) SQL", + "uri":"dws_06_0001.html", + "doc_type":"devg", + "p_code":"686", + "code":"687" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Differences Between GaussDB(DWS) and PostgreSQL", + "uri":"dws_06_0002.html", + "doc_type":"devg", + "p_code":"686", + "code":"688" + }, + { + "desc":"GaussDB(DWS) gsql differs from PostgreSQL psql in that the former has made the following changes to enhance security:User passwords cannot be set by running the \\password", + "product_code":"dws", + "title":"GaussDB(DWS) gsql, PostgreSQL psql, and libpq", + "uri":"dws_06_0003.html", + "doc_type":"devg", + "p_code":"688", + "code":"689" + }, + { + "desc":"For details about supported data types by GaussDB(DWS), see Data Types.The following PostgreSQL data type is not supported:Lines, a geometric typepg_node_tree", + "product_code":"dws", + "title":"Data Type Differences", + "uri":"dws_06_0004.html", + "doc_type":"devg", + "p_code":"688", + "code":"690" + }, + { + "desc":"For details about the functions supported by GaussDB(DWS), see Functions and Operators.The following PostgreSQL functions are not supported:Enum support functionsAccess p", + "product_code":"dws", + "title":"Function Differences", + "uri":"dws_06_0005.html", + "doc_type":"devg", + "p_code":"688", + "code":"691" + }, + { + "desc":"Table inheritanceTable creation features:Use REFERENCES reftable [ (refcolumn) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] t", + "product_code":"dws", + "title":"PostgreSQL Features Unsupported by GaussDB(DWS)", + "uri":"dws_06_0006.html", + "doc_type":"devg", + "p_code":"688", + "code":"692" + }, + { + "desc":"The SQL contains reserved and non-reserved words. Standards require that reserved keywords not be used as other identifiers. Non-reserved keywords have special meanings o", + "product_code":"dws", + "title":"Keyword", + "uri":"dws_06_0007.html", + "doc_type":"devg", + "p_code":"686", + "code":"693" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Data Types", + "uri":"dws_06_0008.html", + "doc_type":"devg", + "p_code":"686", + "code":"694" + }, + { + "desc":"Numeric types consist of two-, four-, and eight-byte integers, four- and eight-byte floating-point numbers, and selectable-precision decimals.For details about numeric op", + "product_code":"dws", + "title":"Numeric Types", + "uri":"dws_06_0009.html", + "doc_type":"devg", + "p_code":"694", + "code":"695" + }, + { + "desc":"The money type stores a currency amount with fixed fractional precision. The range shown in Table 1 assumes there are two fractional digits. Input is accepted in a variet", + "product_code":"dws", + "title":"Monetary Types", + "uri":"dws_06_0010.html", + "doc_type":"devg", + "p_code":"694", + "code":"696" + }, + { + "desc":"Valid literal values for the \"true\" state are:TRUE, 't', 'true', 'y', 'yes', '1'Valid literal values for the \"false\" state include:FALSE, 'f', 'false', 'n', 'no', '0'TRUE", + "product_code":"dws", + "title":"Boolean Type", + "uri":"dws_06_0011.html", + "doc_type":"devg", + "p_code":"694", + "code":"697" + }, + { + "desc":"Table 1 lists the character types that can be used in GaussDB(DWS). For string operators and related built-in functions, see Character Processing Functions and Operators.", + "product_code":"dws", + "title":"Character Types", + "uri":"dws_06_0012.html", + "doc_type":"devg", + "p_code":"694", + "code":"698" + }, + { + "desc":"Table 1 lists the binary data types that can be used in GaussDB(DWS).In addition to the size limitation on each column, the total size of each tuple is 8203 bytes less th", + "product_code":"dws", + "title":"Binary Data Types", + "uri":"dws_06_0013.html", + "doc_type":"devg", + "p_code":"694", + "code":"699" + }, + { + "desc":"Table 1 lists date and time types supported by GaussDB(DWS). For the operators and built-in functions of the types, see Date and Time Processing Functions and Operators.I", + "product_code":"dws", + "title":"Date/Time Types", + "uri":"dws_06_0014.html", + "doc_type":"devg", + "p_code":"694", + "code":"700" + }, + { + "desc":"Table 1 lists the geometric types that can be used in GaussDB(DWS). The most fundamental type, the point, forms the basis for all of the other types.A rich set of functio", + "product_code":"dws", + "title":"Geometric Types", + "uri":"dws_06_0015.html", + "doc_type":"devg", + "p_code":"694", + "code":"701" + }, + { + "desc":"GaussDB(DWS) offers data types to store IPv4, IPv6, and MAC addresses.It is better to use network address types instead of plaintext types to store IPv4, IPv6, and MAC ad", + "product_code":"dws", + "title":"Network Address Types", + "uri":"dws_06_0016.html", + "doc_type":"devg", + "p_code":"694", + "code":"702" + }, + { + "desc":"Bit strings are strings of 1's and 0's. They can be used to store bit masks.GaussDB(DWS) supports two SQL bit types: bit(n) and bit varying(n), where n is a positive inte", + "product_code":"dws", + "title":"Bit String Types", + "uri":"dws_06_0017.html", + "doc_type":"devg", + "p_code":"694", + "code":"703" + }, + { + "desc":"GaussDB(DWS) offers two data types that are designed to support full text search. The tsvector type represents a document in a form optimized for text search. The tsquery", + "product_code":"dws", + "title":"Text Search Types", + "uri":"dws_06_0018.html", + "doc_type":"devg", + "p_code":"694", + "code":"704" + }, + { + "desc":"The data type UUID stores Universally Unique Identifiers (UUID) as defined by RFC 4122, ISO/IEF 9834-8:2005, and related standards. This identifier is a 128-bit quantity ", + "product_code":"dws", + "title":"UUID Type", + "uri":"dws_06_0019.html", + "doc_type":"devg", + "p_code":"694", + "code":"705" + }, + { + "desc":"JSON data types are for storing JavaScript Object Notation (JSON) data. Such data can also be stored as TEXT, but the JSON data type has the advantage of checking that ea", + "product_code":"dws", + "title":"JSON Types", + "uri":"dws_06_0020.html", + "doc_type":"devg", + "p_code":"694", + "code":"706" + }, + { + "desc":"HyperLoglog (HLL) is an approximation algorithm for efficiently counting the number of distinct values in a data set. It features faster computing and lower space usage. ", + "product_code":"dws", + "title":"HLL Data Types", + "uri":"dws_06_0021.html", + "doc_type":"devg", + "p_code":"694", + "code":"707" + }, + { + "desc":"Object identifiers (OIDs) are used internally by GaussDB(DWS) as primary keys for various system catalogs. OIDs are not added to user-created tables by the system. The OI", + "product_code":"dws", + "title":"Object Identifier Types", + "uri":"dws_06_0022.html", + "doc_type":"devg", + "p_code":"694", + "code":"708" + }, + { + "desc":"GaussDB(DWS) has a number of special-purpose entries that are collectively called pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to ", + "product_code":"dws", + "title":"Pseudo-Types", + "uri":"dws_06_0023.html", + "doc_type":"devg", + "p_code":"694", + "code":"709" + }, + { + "desc":"Table 1 lists the data types supported by column-store tables.", + "product_code":"dws", + "title":"Data Types Supported by Column-Store Tables", + "uri":"dws_06_0024.html", + "doc_type":"devg", + "p_code":"694", + "code":"710" + }, + { + "desc":"XML data type stores Extensible Markup Language (XML) formatted data. Such data can also be stored as text, but the advantage of the XML data type is that it checks wheth", + "product_code":"dws", + "title":"XML", + "uri":"dws_06_0025.html", + "doc_type":"devg", + "p_code":"694", + "code":"711" + }, + { + "desc":"Table 1 lists the constants and macros that can be used in GaussDB(DWS).", + "product_code":"dws", + "title":"Constant and Macro", + "uri":"dws_06_0026.html", + "doc_type":"devg", + "p_code":"686", + "code":"712" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Functions and Operators", + "uri":"dws_06_0027.html", + "doc_type":"devg", + "p_code":"686", + "code":"713" + }, + { + "desc":"The usual logical operators include AND, OR, and NOT. SQL uses a three-valued logical system with true, false, and null, which represents \"unknown\". Their priorities are ", + "product_code":"dws", + "title":"Logical Operators", + "uri":"dws_06_0028.html", + "doc_type":"devg", + "p_code":"713", + "code":"714" + }, + { + "desc":"Comparison operators are available for all data types and return Boolean values.All comparison operators are binary operators. Only data types that are the same or can be", + "product_code":"dws", + "title":"Comparison Operators", + "uri":"dws_06_0029.html", + "doc_type":"devg", + "p_code":"713", + "code":"715" + }, + { + "desc":"String functions and operators provided by GaussDB(DWS) are for concatenating strings with each other, concatenating strings with non-strings, and matching the patterns o", + "product_code":"dws", + "title":"Character Processing Functions and Operators", + "uri":"dws_06_0030.html", + "doc_type":"devg", + "p_code":"713", + "code":"716" + }, + { + "desc":"SQL defines some string functions that use keywords, rather than commas, to separate arguments.octet_length(string)Description: Number of bytes in binary stringReturn typ", + "product_code":"dws", + "title":"Binary String Functions and Operators", + "uri":"dws_06_0031.html", + "doc_type":"devg", + "p_code":"713", + "code":"717" + }, + { + "desc":"Aside from the usual comparison operators, the following operators can be used. Bit string operands of &, |, and # must be of equal length. When bit shifting, the origina", + "product_code":"dws", + "title":"Bit String Functions and Operators", + "uri":"dws_06_0032.html", + "doc_type":"devg", + "p_code":"713", + "code":"718" + }, + { + "desc":"There are three separate approaches to pattern matching provided by the database: the traditional SQL LIKE operator, the more recent SIMILAR TO operator, and POSIX-style ", + "product_code":"dws", + "title":"Pattern Matching Operators", + "uri":"dws_06_0033.html", + "doc_type":"devg", + "p_code":"713", + "code":"719" + }, + { + "desc":"+Description: AdditionFor example:SELECT 2+3 AS RESULT;\n result \n--------\n 5\n(1 row)Description: AdditionFor example:-Description: SubtractionFor example:SELECT 2-3 ", + "product_code":"dws", + "title":"Mathematical Functions and Operators", + "uri":"dws_06_0034.html", + "doc_type":"devg", + "p_code":"713", + "code":"720" + }, + { + "desc":"When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent w", + "product_code":"dws", + "title":"Date and Time Processing Functions and Operators", + "uri":"dws_06_0035.html", + "doc_type":"devg", + "p_code":"713", + "code":"721" + }, + { + "desc":"cast(x as y)Description: Converts x into the type specified by y.For example:SELECT cast('22-oct-1997' as timestamp);\n timestamp \n---------------------\n 1997-10", + "product_code":"dws", + "title":"Type Conversion Functions", + "uri":"dws_06_0036.html", + "doc_type":"devg", + "p_code":"713", + "code":"722" + }, + { + "desc":"+Description: TranslationFor example:SELECT box '((0,0),(1,1))' + point '(2.0,0)' AS RESULT;\n result \n-------------\n (3,1),(2,0)\n(1 row)Description: TranslationFor e", + "product_code":"dws", + "title":"Geometric Functions and Operators", + "uri":"dws_06_0037.html", + "doc_type":"devg", + "p_code":"713", + "code":"723" + }, + { + "desc":"The operators <<, <<=, >>, and >>= test for subnet inclusion. They consider only the network parts of the two addresses (ignoring any host part) and determine whether one", + "product_code":"dws", + "title":"Network Address Functions and Operators", + "uri":"dws_06_0038.html", + "doc_type":"devg", + "p_code":"713", + "code":"724" + }, + { + "desc":"@@Description: Specifies whether the tsvector-typed words match the tsquery-typed words.For example:SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') AS ", + "product_code":"dws", + "title":"Text Search Functions and Operators", + "uri":"dws_06_0039.html", + "doc_type":"devg", + "p_code":"713", + "code":"725" + }, + { + "desc":"UUID functions are used to generate UUID data (see UUID Type).uuid_generate_v1()Description: Generates a UUID sequence number.Return type: UUIDExample:SELECT uuid_generat", + "product_code":"dws", + "title":"UUID Functions", + "uri":"dws_06_0040.html", + "doc_type":"devg", + "p_code":"713", + "code":"726" + }, + { + "desc":"JSON functions are used to generate JSON data (see JSON Types).array_to_json(anyarray [, pretty_bool])Description: Returns the array as JSON. A multi-dimensional array be", + "product_code":"dws", + "title":"JSON Functions", + "uri":"dws_06_0041.html", + "doc_type":"devg", + "p_code":"713", + "code":"727" + }, + { + "desc":"hll_hash_boolean(bool)Description: Hashes data of the bool type.Return type: hll_hashvalFor example:SELECT hll_hash_boolean(FALSE);\n hll_hash_boolean \n----------------", + "product_code":"dws", + "title":"HLL Functions and Operators", + "uri":"dws_06_0042.html", + "doc_type":"devg", + "p_code":"713", + "code":"728" + }, + { + "desc":"The sequence functions provide a simple method to ensure security of multiple users for users to obtain sequence values from sequence objects.The hybrid data warehouse (s", + "product_code":"dws", + "title":"SEQUENCE Functions", + "uri":"dws_06_0043.html", + "doc_type":"devg", + "p_code":"713", + "code":"729" + }, + { + "desc":"=Description: Specifies whether two arrays are equal.For example:SELECT ARRAY[1.1,2.1,3.1]::int[] = ARRAY[1,2,3] AS RESULT ;\n result \n--------\n t\n(1 row)Description: Spec", + "product_code":"dws", + "title":"Array Functions and Operators", + "uri":"dws_06_0044.html", + "doc_type":"devg", + "p_code":"713", + "code":"730" + }, + { + "desc":"=Description: EqualsFor example:SELECT int4range(1,5) = '[1,4]'::int4range AS RESULT;\n result\n--------\n t\n(1 row)Description: EqualsFor example:<>Description: Does not eq", + "product_code":"dws", + "title":"Range Functions and Operators", + "uri":"dws_06_0045.html", + "doc_type":"devg", + "p_code":"713", + "code":"731" + }, + { + "desc":"sum(expression)Description: Sum of expression across all input valuesReturn type:Generally, same as the argument data type. In the following cases, type conversion occurs", + "product_code":"dws", + "title":"Aggregate Functions", + "uri":"dws_06_0046.html", + "doc_type":"devg", + "p_code":"713", + "code":"732" + }, + { + "desc":"Regular aggregate functions return a single value calculated from values in a row, or group all rows into a single output row. Window functions perform a calculation acro", + "product_code":"dws", + "title":"Window Functions", + "uri":"dws_06_0047.html", + "doc_type":"devg", + "p_code":"713", + "code":"733" + }, + { + "desc":"gs_password_deadline()Description: Indicates the number of remaining days before the password of the current user expires. After the password expires, the system prompts ", + "product_code":"dws", + "title":"Security Functions", + "uri":"dws_06_0048.html", + "doc_type":"devg", + "p_code":"713", + "code":"734" + }, + { + "desc":"generate_series(start, stop)Description: Generates a series of values, from start to stop with a step size of one.Parameter type: int, bigint, or numericReturn type: seto", + "product_code":"dws", + "title":"Set Returning Functions", + "uri":"dws_06_0049.html", + "doc_type":"devg", + "p_code":"713", + "code":"735" + }, + { + "desc":"coalesce(expr1, expr2, ..., exprn)Description: Returns the first argument that is not NULL in the argument list.COALESCE(expr1, expr2) is equivalent to CASE WHEN expr1 IS", + "product_code":"dws", + "title":"Conditional Expression Functions", + "uri":"dws_06_0050.html", + "doc_type":"devg", + "p_code":"713", + "code":"736" + }, + { + "desc":"current_catalogDescription: Name of the current database (called \"catalog\" in the SQL standard)Return type: nameFor example:SELECT current_catalog;\n current_database\n----", + "product_code":"dws", + "title":"System Information Functions", + "uri":"dws_06_0051.html", + "doc_type":"devg", + "p_code":"713", + "code":"737" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"System Administration Functions", + "uri":"dws_06_0052.html", + "doc_type":"devg", + "p_code":"713", + "code":"738" + }, + { + "desc":"Configuration setting functions are used for querying and modifying configuration parameters during running.current_setting(setting_name)Description: Specifies the curren", + "product_code":"dws", + "title":"Configuration Settings Functions", + "uri":"dws_06_0053.html", + "doc_type":"devg", + "p_code":"738", + "code":"739" + }, + { + "desc":"Universal file access functions provide local access interfaces for files on a database server. Only files in the database cluster directory and the log_directory directo", + "product_code":"dws", + "title":"Universal File Access Functions", + "uri":"dws_06_0054.html", + "doc_type":"devg", + "p_code":"738", + "code":"740" + }, + { + "desc":"Server signaling functions send control signals to other server processes. Only system administrators can use these functions.pg_cancel_backend(pid int)Description: Cance", + "product_code":"dws", + "title":"Server Signaling Functions", + "uri":"dws_06_0055.html", + "doc_type":"devg", + "p_code":"738", + "code":"741" + }, + { + "desc":"Backup control functions help online backup.pg_create_restore_point(name text)Description: Creates a named point for performing the restore operation (restricted to syste", + "product_code":"dws", + "title":"Backup and Restoration Control Functions", + "uri":"dws_06_0056.html", + "doc_type":"devg", + "p_code":"738", + "code":"742" + }, + { + "desc":"Snapshot synchronization functions save the current snapshot and return its identifier.pg_export_snapshot()Description: Saves the current snapshot and returns its identif", + "product_code":"dws", + "title":"Snapshot Synchronization Functions", + "uri":"dws_06_0057.html", + "doc_type":"devg", + "p_code":"738", + "code":"743" + }, + { + "desc":"Database object size functions calculate the actual disk space used by database objects.pg_column_size(any)Description: Specifies the number of bytes used to store a part", + "product_code":"dws", + "title":"Database Object Functions", + "uri":"dws_06_0058.html", + "doc_type":"devg", + "p_code":"738", + "code":"744" + }, + { + "desc":"Advisory lock functions manage advisory locks. These functions are only for internal use currently.pg_advisory_lock(key bigint)Description: Obtains an exclusive session-l", + "product_code":"dws", + "title":"Advisory Lock Functions", + "uri":"dws_06_0059.html", + "doc_type":"devg", + "p_code":"738", + "code":"745" + }, + { + "desc":"pg_get_residualfiles()Description: Obtains all residual file records of the current node. This function is an instance-level function and is irrelevant to the current dat", + "product_code":"dws", + "title":"Residual File Management Functions", + "uri":"dws_06_0060.html", + "doc_type":"devg", + "p_code":"738", + "code":"746" + }, + { + "desc":"A replication function synchronizes logs and data between instances. It is a statistics or operation method provided by the system to implement HA.Replication functions e", + "product_code":"dws", + "title":"Replication Functions", + "uri":"dws_06_0061.html", + "doc_type":"devg", + "p_code":"738", + "code":"747" + }, + { + "desc":"pgxc_pool_check()Description: Checks whether the connection data buffered in the pool is consistent with pgxc_node.Return type: booleanDescription: Checks whether the con", + "product_code":"dws", + "title":"Other Functions", + "uri":"dws_06_0062.html", + "doc_type":"devg", + "p_code":"738", + "code":"748" + }, + { + "desc":"This section describes the functions of the resource management module.gs_wlm_readjust_user_space(oid)Description: This function calibrates the permanent storage space of", + "product_code":"dws", + "title":"Resource Management Functions", + "uri":"dws_06_0063.html", + "doc_type":"devg", + "p_code":"738", + "code":"749" + }, + { + "desc":"Data redaction functions are used to mask and protect sensitive data. Generally, you are advised to bind these functions to the columns to be redacted based on the data r", + "product_code":"dws", + "title":"Data Redaction Functions", + "uri":"dws_06_0064.html", + "doc_type":"devg", + "p_code":"713", + "code":"750" + }, + { + "desc":"Statistics information functions are divided into the following two categories: functions that access databases, using the OID of each table or index in a database to mar", + "product_code":"dws", + "title":"Statistics Information Functions", + "uri":"dws_06_0065.html", + "doc_type":"devg", + "p_code":"713", + "code":"751" + }, + { + "desc":"pg_get_triggerdef(oid)Description: Obtains the definition information of a trigger.Parameter: OID of the trigger to be queriedReturn type: textExample:select pg_get_trigg", + "product_code":"dws", + "title":"Trigger Functions", + "uri":"dws_06_0066.html", + "doc_type":"devg", + "p_code":"713", + "code":"752" + }, + { + "desc":"XMLPARSE ( { DOCUMENT | CONTENT } value)Description: Generates an XML value from character data.Return type: XMLExample:XMLSERIALIZE ( { DOCUMENT | CONTENT } value AS typ", + "product_code":"dws", + "title":"XML Functions", + "uri":"dws_06_0067.html", + "doc_type":"devg", + "p_code":"713", + "code":"753" + }, + { + "desc":"The pv_memory_profiling(type int) and environment variable MALLOC_CONF are used by GaussDB(DWS) to control the enabling and disabling of the memory allocation call stack ", + "product_code":"dws", + "title":"Call Stack Recording Functions", + "uri":"dws_06_0068.html", + "doc_type":"devg", + "p_code":"713", + "code":"754" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Expressions", + "uri":"dws_06_0069.html", + "doc_type":"devg", + "p_code":"686", + "code":"755" + }, + { + "desc":"Logical Operators lists the operators and calculation rules of logical expressions.Comparison Operators lists the common comparative operators.In addition to comparative ", + "product_code":"dws", + "title":"Simple Expressions", + "uri":"dws_06_0070.html", + "doc_type":"devg", + "p_code":"755", + "code":"756" + }, + { + "desc":"Data that meets the requirements specified by conditional expressions are filtered during SQL statement execution.Conditional expressions include the following types:CASE", + "product_code":"dws", + "title":"Conditional Expressions", + "uri":"dws_06_0071.html", + "doc_type":"devg", + "p_code":"755", + "code":"757" + }, + { + "desc":"Subquery expressions include the following types:EXISTS/NOT EXISTSFigure 1 shows the syntax of an EXISTS/NOT EXISTS expression.EXISTS/NOT EXISTS::=The parameter of an EXI", + "product_code":"dws", + "title":"Subquery Expressions", + "uri":"dws_06_0072.html", + "doc_type":"devg", + "p_code":"755", + "code":"758" + }, + { + "desc":"expressionIN(value [, ...])The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list.", + "product_code":"dws", + "title":"Array Expressions", + "uri":"dws_06_0073.html", + "doc_type":"devg", + "p_code":"755", + "code":"759" + }, + { + "desc":"Syntax:row_constructor operator row_constructorBoth sides of the row expression are row constructors. The values of both rows must have the same number of fields and they", + "product_code":"dws", + "title":"Row Expressions", + "uri":"dws_06_0074.html", + "doc_type":"devg", + "p_code":"755", + "code":"760" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Type Conversion", + "uri":"dws_06_0075.html", + "doc_type":"devg", + "p_code":"686", + "code":"761" + }, + { + "desc":"SQL is a typed language. That is, every data item has an associated data type which determines its behavior and allowed usage. GaussDB(DWS) has an extensible type system ", + "product_code":"dws", + "title":"Overview", + "uri":"dws_06_0076.html", + "doc_type":"devg", + "p_code":"761", + "code":"762" + }, + { + "desc":"Select the operators to be considered from the pg_operator system catalog. Considered operators are those with the matching name and argument count. If the search path fi", + "product_code":"dws", + "title":"Operators", + "uri":"dws_06_0077.html", + "doc_type":"devg", + "p_code":"761", + "code":"763" + }, + { + "desc":"Select the functions to be considered from the pg_proc system catalog. If a non-schema-qualified function name was used, the functions in the current search path are cons", + "product_code":"dws", + "title":"Functions", + "uri":"dws_06_0078.html", + "doc_type":"devg", + "p_code":"761", + "code":"764" + }, + { + "desc":"Search for an exact match with the target column.Try to convert the expression to the target type. This will succeed if there is a registered cast between the two types. ", + "product_code":"dws", + "title":"Value Storage", + "uri":"dws_06_0079.html", + "doc_type":"devg", + "p_code":"761", + "code":"765" + }, + { + "desc":"SQL UNION constructs must match up possibly dissimilar types to become a single result set. Since all query results from a SELECT UNION statement must appear in a single ", + "product_code":"dws", + "title":"UNION, CASE, and Related Constructs", + "uri":"dws_06_0080.html", + "doc_type":"devg", + "p_code":"761", + "code":"766" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Full Text Search", + "uri":"dws_06_0081.html", + "doc_type":"devg", + "p_code":"686", + "code":"767" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Introduction", + "uri":"dws_06_0082.html", + "doc_type":"devg", + "p_code":"767", + "code":"768" + }, + { + "desc":"Textual search operators have been used in databases for years. GaussDB(DWS) has ~, ~*, LIKE, and ILIKE operators for textual data types, but they lack many essential pro", + "product_code":"dws", + "title":"Full-Text Retrieval", + "uri":"dws_06_0083.html", + "doc_type":"devg", + "p_code":"768", + "code":"769" + }, + { + "desc":"A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents", + "product_code":"dws", + "title":"What Is a Document?", + "uri":"dws_06_0084.html", + "doc_type":"devg", + "p_code":"768", + "code":"770" + }, + { + "desc":"Full text search in GaussDB(DWS) is based on the match operator @@, which returns true if a tsvector (document) matches a tsquery (query). It does not matter which data t", + "product_code":"dws", + "title":"Basic Text Matching", + "uri":"dws_06_0085.html", + "doc_type":"devg", + "p_code":"768", + "code":"771" + }, + { + "desc":"Full text search functionality includes the ability to do many more things: skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, for", + "product_code":"dws", + "title":"Configurations", + "uri":"dws_06_0086.html", + "doc_type":"devg", + "p_code":"768", + "code":"772" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Table and index", + "uri":"dws_06_0087.html", + "doc_type":"devg", + "p_code":"767", + "code":"773" + }, + { + "desc":"It is possible to do a full text search without an index.A simple query to print each row that contains the word science in its body column is as follows:DROP SCHEMA IF E", + "product_code":"dws", + "title":"Searching a Table", + "uri":"dws_06_0088.html", + "doc_type":"devg", + "p_code":"773", + "code":"774" + }, + { + "desc":"You can create a GIN index to speed up text searches:The to_tsvector() function accepts one or two augments.If the one-augment version of the index is used, the system wi", + "product_code":"dws", + "title":"Creating an Index", + "uri":"dws_06_0089.html", + "doc_type":"devg", + "p_code":"773", + "code":"775" + }, + { + "desc":"The following is an example of using an index. Run the following statements in a database that uses the UTF-8 or GBK encoding:In this example, table1 has two GIN indexes ", + "product_code":"dws", + "title":"Constraints on Index Use", + "uri":"dws_06_0090.html", + "doc_type":"devg", + "p_code":"773", + "code":"776" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Controlling Text Search", + "uri":"dws_06_0091.html", + "doc_type":"devg", + "p_code":"767", + "code":"777" + }, + { + "desc":"GaussDB(DWS) provides function to_tsvector for converting a document to the tsvector data type.to_tsvector parses a textual document into tokens, reduces the tokens to le", + "product_code":"dws", + "title":"Parsing Documents", + "uri":"dws_06_0092.html", + "doc_type":"devg", + "p_code":"777", + "code":"778" + }, + { + "desc":"GaussDB(DWS) provides functions to_tsquery and plainto_tsquery for converting a query to the tsquery data type. to_tsquery offers access to more features than plainto_tsq", + "product_code":"dws", + "title":"Parsing Queries", + "uri":"dws_06_0093.html", + "doc_type":"devg", + "p_code":"777", + "code":"779" + }, + { + "desc":"Ranking attempts to measure how relevant documents are to a particular query, so that when there are many matches the most relevant ones can be shown first. GaussDB(DWS) ", + "product_code":"dws", + "title":"Ranking Search Results", + "uri":"dws_06_0094.html", + "doc_type":"devg", + "p_code":"777", + "code":"780" + }, + { + "desc":"To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of the document with mark", + "product_code":"dws", + "title":"Highlighting Results", + "uri":"dws_06_0095.html", + "doc_type":"devg", + "p_code":"777", + "code":"781" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Additional Features", + "uri":"dws_06_0096.html", + "doc_type":"devg", + "p_code":"767", + "code":"782" + }, + { + "desc":"GaussDB(DWS) provides functions and operators that can be used to manipulate documents that are already in tsvector type.tsvector || tsvectorThe tsvector concatenation op", + "product_code":"dws", + "title":"Manipulating tsvector", + "uri":"dws_06_0097.html", + "doc_type":"devg", + "p_code":"782", + "code":"783" + }, + { + "desc":"GaussDB(DWS) provides functions and operators that can be used to manipulate queries that are already in tsquery type.tsquery && tsqueryReturns the AND-combination of the", + "product_code":"dws", + "title":"Manipulating Queries", + "uri":"dws_06_0098.html", + "doc_type":"devg", + "p_code":"782", + "code":"784" + }, + { + "desc":"The ts_rewrite family of functions searches a given tsquery for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this ", + "product_code":"dws", + "title":"Rewriting Queries", + "uri":"dws_06_0099.html", + "doc_type":"devg", + "p_code":"782", + "code":"785" + }, + { + "desc":"The function ts_stat is useful for checking your configuration and for finding stop-word candidates.sqlquery is a text value containing an SQL query which must return a s", + "product_code":"dws", + "title":"Gathering Document Statistics", + "uri":"dws_06_0100.html", + "doc_type":"devg", + "p_code":"782", + "code":"786" + }, + { + "desc":"Text search parsers are responsible for splitting raw document text into tokens and identifying each token's type, where the set of types is defined by the parser itself.", + "product_code":"dws", + "title":"Parsers", + "uri":"dws_06_0101.html", + "doc_type":"devg", + "p_code":"767", + "code":"787" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Dictionaries", + "uri":"dws_06_0102.html", + "doc_type":"devg", + "p_code":"767", + "code":"788" + }, + { + "desc":"A dictionary is used to define stop words, that is, words to be ignored in full-text retrieval.A dictionary can also be used to normalize words so that different derived ", + "product_code":"dws", + "title":"Overview", + "uri":"dws_06_0103.html", + "doc_type":"devg", + "p_code":"788", + "code":"789" + }, + { + "desc":"Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text s", + "product_code":"dws", + "title":"Stop Words", + "uri":"dws_06_0104.html", + "doc_type":"devg", + "p_code":"788", + "code":"790" + }, + { + "desc":"A Simple dictionary operates by converting the input token to lower case and checking it against a list of stop words. If the token is found in the list, an empty array w", + "product_code":"dws", + "title":"Simple Dictionary", + "uri":"dws_06_0105.html", + "doc_type":"devg", + "p_code":"788", + "code":"791" + }, + { + "desc":"A synonym dictionary is used to define, identify, and convert synonyms of tokens. Phrases are not supported (use the thesaurus dictionary in Thesaurus Dictionary).A synon", + "product_code":"dws", + "title":"Synonym Dictionary", + "uri":"dws_06_0106.html", + "doc_type":"devg", + "p_code":"788", + "code":"792" + }, + { + "desc":"A thesaurus dictionary (sometimes abbreviated as TZ) is a collection of words that include relationships between words and phrases, such as broader terms (BT), narrower t", + "product_code":"dws", + "title":"Thesaurus Dictionary", + "uri":"dws_06_0107.html", + "doc_type":"devg", + "p_code":"788", + "code":"793" + }, + { + "desc":"The Ispell dictionary template supports morphological dictionaries, which can normalize many different linguistic forms of a word into the same lexeme. For example, an En", + "product_code":"dws", + "title":"Ispell Dictionary", + "uri":"dws_06_0108.html", + "doc_type":"devg", + "p_code":"788", + "code":"794" + }, + { + "desc":"A Snowball dictionary is based on a project by Martin Porter and is used for stem analysis, providing stemming algorithms for many languages. GaussDB(DWS) provides predef", + "product_code":"dws", + "title":"Snowball Dictionary", + "uri":"dws_06_0109.html", + "doc_type":"devg", + "p_code":"788", + "code":"795" + }, + { + "desc":"Text search configuration specifies the following components required for converting a document into a tsvector:A parser, decomposes a text into tokens.Dictionary list, c", + "product_code":"dws", + "title":"Configuration Examples", + "uri":"dws_06_0110.html", + "doc_type":"devg", + "p_code":"767", + "code":"796" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Testing and Debugging Text Search", + "uri":"dws_06_0111.html", + "doc_type":"devg", + "p_code":"767", + "code":"797" + }, + { + "desc":"The function ts_debug allows easy testing of a text search configuration.ts_debug displays information about every token of document as produced by the parser and process", + "product_code":"dws", + "title":"Testing a Configuration", + "uri":"dws_06_0112.html", + "doc_type":"devg", + "p_code":"797", + "code":"798" + }, + { + "desc":"The ts_parse function allows direct testing of a text search parser.ts_parse parses the given document and returns a series of records, one for each token produced by par", + "product_code":"dws", + "title":"Testing a Parser", + "uri":"dws_06_0113.html", + "doc_type":"devg", + "p_code":"797", + "code":"799" + }, + { + "desc":"The ts_lexize function facilitates dictionary testing.ts_lexize(dict regdictionary, token text) returns text[] ts_lexize returns an array of lexemes if the input token is", + "product_code":"dws", + "title":"Testing a Dictionary", + "uri":"dws_06_0114.html", + "doc_type":"devg", + "p_code":"797", + "code":"800" + }, + { + "desc":"The current limitations of GaussDB(DWS)'s full text search are:The length of each lexeme must be less than 2 KB.The length of a tsvector (lexemes + positions) must be les", + "product_code":"dws", + "title":"Limitations", + "uri":"dws_06_0115.html", + "doc_type":"devg", + "p_code":"767", + "code":"801" + }, + { + "desc":"GaussDB(DWS) runs SQL statements to perform different system operations, such as setting variables, displaying the execution plan, and collecting garbage data.For details", + "product_code":"dws", + "title":"System Operation", + "uri":"dws_06_0116.html", + "doc_type":"devg", + "p_code":"686", + "code":"802" + }, + { + "desc":"A transaction is a user-defined sequence of database operations, which form an integral unit of work.GaussDB(DWS) starts a transaction using START TRANSACTION and BEGIN. ", + "product_code":"dws", + "title":"Controlling Transactions", + "uri":"dws_06_0117.html", + "doc_type":"devg", + "p_code":"686", + "code":"803" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"DDL Syntax", + "uri":"dws_06_0118.html", + "doc_type":"devg", + "p_code":"686", + "code":"804" + }, + { + "desc":"Data definition language (DDL) is used to define or modify an object in a database, such as a table, index, or view.GaussDB(DWS) does not support DDL if its CN is unavail", + "product_code":"dws", + "title":"DDL Syntax Overview", + "uri":"dws_06_0119.html", + "doc_type":"devg", + "p_code":"804", + "code":"805" + }, + { + "desc":"This command is used to modify the attributes of a database, including the database name, owner, maximum number of connections, and object isolation attribute.Only the ow", + "product_code":"dws", + "title":"ALTER DATABASE", + "uri":"dws_06_0120.html", + "doc_type":"devg", + "p_code":"804", + "code":"806" + }, + { + "desc":"ALTER FOREIGN TABLE modifies a foreign table.NoneSet the attributes of a foreign table.ALTER FOREIGN TABLE [ IF EXISTS ] table_name\n OPTIONS ( {[ ADD | SET | DROP ] o", + "product_code":"dws", + "title":"ALTER FOREIGN TABLE (for GDS)", + "uri":"dws_06_0123.html", + "doc_type":"devg", + "p_code":"804", + "code":"807" + }, + { + "desc":"ALTER FOREIGN TABLE modifies an HDFS or OBS foreign table.NoneSet a foreign table's attributes.ALTER FOREIGN TABLE [ IF EXISTS ] table_name\n OPTIONS ( {[ ADD | SET | ", + "product_code":"dws", + "title":"ALTER FOREIGN TABLE (for HDFS or OBS)", + "uri":"dws_06_0124.html", + "doc_type":"devg", + "p_code":"804", + "code":"808" + }, + { + "desc":"ALTER FUNCTION modifies the attributes of a customized function.Only the owner of a function or a system administrator can run this statement. If a function involves oper", + "product_code":"dws", + "title":"ALTER FUNCTION", + "uri":"dws_06_0126.html", + "doc_type":"devg", + "p_code":"804", + "code":"809" + }, + { + "desc":"ALTER GROUP modifies the attributes of a user group.ALTER GROUP is an alias for ALTER ROLE, and it is not a standard SQL command and not recommended. Users can use ALTER ", + "product_code":"dws", + "title":"ALTER GROUP", + "uri":"dws_06_0127.html", + "doc_type":"devg", + "p_code":"804", + "code":"810" + }, + { + "desc":"ALTER INDEX modifies the definition of an existing index.There are several sub-forms:IF EXISTSIf the specified index does not exist, a notice instead of an error is sent.", + "product_code":"dws", + "title":"ALTER INDEX", + "uri":"dws_06_0128.html", + "doc_type":"devg", + "p_code":"804", + "code":"811" + }, + { + "desc":"ALTER LARGE OBJECT modifies the definition of a large object. It can only assign a new owner to a large object.Only the administrator or the owner of the to-be-modified l", + "product_code":"dws", + "title":"ALTER LARGE OBJECT", + "uri":"dws_06_0129.html", + "doc_type":"devg", + "p_code":"804", + "code":"812" + }, + { + "desc":"ALTER REDACTION POLICY modifies a data redaction policy applied to a specified table.Only the owner of the table to which the redaction policy is applied has the permissi", + "product_code":"dws", + "title":"ALTER REDACTION POLICY", + "uri":"dws_06_0132.html", + "doc_type":"devg", + "p_code":"804", + "code":"813" + }, + { + "desc":"ALTER RESOURCE POOL changes the Cgroup of a resource pool.Users having the ALTER permission can modify resource pools.pool_nameSpecifies the name of the resource pool.The", + "product_code":"dws", + "title":"ALTER RESOURCE POOL", + "uri":"dws_06_0133.html", + "doc_type":"devg", + "p_code":"804", + "code":"814" + }, + { + "desc":"ALTER ROLE changes the attributes of a role.NoneModifying the Rights of a RoleALTER ROLE role_name [ [ WITH ] option [ ... ] ];The option clause for granting rights is as", + "product_code":"dws", + "title":"ALTER ROLE", + "uri":"dws_06_0134.html", + "doc_type":"devg", + "p_code":"804", + "code":"815" + }, + { + "desc":"ALTER ROW LEVEL SECURITY POLICY modifies an existing row-level access control policy, including the policy name and the users and expressions affected by the policy.Only ", + "product_code":"dws", + "title":"ALTER ROW LEVEL SECURITY POLICY", + "uri":"dws_06_0135.html", + "doc_type":"devg", + "p_code":"804", + "code":"816" + }, + { + "desc":"ALTER SCHEMA changes the attributes of a schema.Only the owner of an index or a system administrator can run this statement.Rename a schema.ALTER SCHEMA schema_name \n ", + "product_code":"dws", + "title":"ALTER SCHEMA", + "uri":"dws_06_0136.html", + "doc_type":"devg", + "p_code":"804", + "code":"817" + }, + { + "desc":"ALTER SEQUENCE modifies the parameters of an existing sequence.You must be the owner of the sequence to use ALTER SEQUENCE.In the current version, you can modify only the", + "product_code":"dws", + "title":"ALTER SEQUENCE", + "uri":"dws_06_0137.html", + "doc_type":"devg", + "p_code":"804", + "code":"818" + }, + { + "desc":"ALTER SERVER adds, modifies, or deletes the parameters of an existing server. You can query existing servers from the pg_foreign_server system catalog.Only the owner of a", + "product_code":"dws", + "title":"ALTER SERVER", + "uri":"dws_06_0138.html", + "doc_type":"devg", + "p_code":"804", + "code":"819" + }, + { + "desc":"ALTER SESSION defines or modifies the conditions or parameters that affect the current session. Modified session parameters are kept until the current session is disconne", + "product_code":"dws", + "title":"ALTER SESSION", + "uri":"dws_06_0139.html", + "doc_type":"devg", + "p_code":"804", + "code":"820" + }, + { + "desc":"ALTER SYNONYM is used to modify the attribute of a synonym.Only the synonym owner can be changed.Only the system administrator and the synonym owner has the permission to", + "product_code":"dws", + "title":"ALTER SYNONYM", + "uri":"dws_06_0140.html", + "doc_type":"devg", + "p_code":"804", + "code":"821" + }, + { + "desc":"ALTER SYSTEM KILL SESSION ends a session.Nonesession_sid, serialSpecifies SID and SERIAL of a session (see examples for format).Value range: The SIDs and SERIALs of all s", + "product_code":"dws", + "title":"ALTER SYSTEM KILL SESSION", + "uri":"dws_06_0141.html", + "doc_type":"devg", + "p_code":"804", + "code":"822" + }, + { + "desc":"ALTER TABLE is used to modify tables, including modifying table definitions, renaming tables, renaming specified columns in tables, renaming table constraints, setting ta", + "product_code":"dws", + "title":"ALTER TABLE", + "uri":"dws_06_0142.html", + "doc_type":"devg", + "p_code":"804", + "code":"823" + }, + { + "desc":"ALTER TABLE PARTITION modifies table partitioning, including adding, deleting, splitting, merging partitions, and modifying partition attributes.The name of the added par", + "product_code":"dws", + "title":"ALTER TABLE PARTITION", + "uri":"dws_06_0143.html", + "doc_type":"devg", + "p_code":"804", + "code":"824" + }, + { + "desc":"ALTER TEXT SEARCH CONFIGURATION modifies the definition of a text search configuration. You can modify its mappings from token types to dictionaries, change the configura", + "product_code":"dws", + "title":"ALTER TEXT SEARCH CONFIGURATION", + "uri":"dws_06_0145.html", + "doc_type":"devg", + "p_code":"804", + "code":"825" + }, + { + "desc":"ALTER TEXT SEARCH DICTIONARY modifies the definition of a full-text retrieval dictionary, including its parameters, name, owner, and schema.ALTER is not supported by pred", + "product_code":"dws", + "title":"ALTER TEXT SEARCH DICTIONARY", + "uri":"dws_06_0146.html", + "doc_type":"devg", + "p_code":"804", + "code":"826" + }, + { + "desc":"ALTER TRIGGER modifies the definition of a trigger.Only the owner of a table where a trigger is created and system administrators can run the ALTER TRIGGER statement.trig", + "product_code":"dws", + "title":"ALTER TRIGGER", + "uri":"dws_06_0147.html", + "doc_type":"devg", + "p_code":"804", + "code":"827" + }, + { + "desc":"ALTER TYPE modifies the definition of a type.Modify a type.ALTER TYPE name action [, ... ]\nALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER }\nALTER TYPE", + "product_code":"dws", + "title":"ALTER TYPE", + "uri":"dws_06_0148.html", + "doc_type":"devg", + "p_code":"804", + "code":"828" + }, + { + "desc":"ALTER USER modifies the attributes of a database user.Session parameters modified by ALTER USER apply to a specified user and take effect in the next session.Modify user ", + "product_code":"dws", + "title":"ALTER USER", + "uri":"dws_06_0149.html", + "doc_type":"devg", + "p_code":"804", + "code":"829" + }, + { + "desc":"ALTER VIEW modifies all auxiliary attributes of a view. (To modify the query definition of a view, use CREATE OR REPLACE VIEW.)Only the view owner can modify a view by ru", + "product_code":"dws", + "title":"ALTER VIEW", + "uri":"dws_06_0150.html", + "doc_type":"devg", + "p_code":"804", + "code":"830" + }, + { + "desc":"CLEAN CONNECTION clears database connections when a database is abnormal. You may use this statement to delete a specific user's connections to a specified database.NoneC", + "product_code":"dws", + "title":"CLEAN CONNECTION", + "uri":"dws_06_0151.html", + "doc_type":"devg", + "p_code":"804", + "code":"831" + }, + { + "desc":"CLOSE frees the resources associated with an open cursor.After a cursor is closed, no subsequent operations are allowed on it.A cursor should be closed when it is no long", + "product_code":"dws", + "title":"CLOSE", + "uri":"dws_06_0152.html", + "doc_type":"devg", + "p_code":"804", + "code":"832" + }, + { + "desc":"Cluster a table according to an index.CLUSTER instructs GaussDB(DWS) to cluster the table specified by table_name based on the index specified by index_name. The index mu", + "product_code":"dws", + "title":"CLUSTER", + "uri":"dws_06_0153.html", + "doc_type":"devg", + "p_code":"804", + "code":"833" + }, + { + "desc":"COMMENT defines or changes the comment of an object.Only one comment string is stored for each object. To modify a comment, issue a new COMMENT command for the same objec", + "product_code":"dws", + "title":"COMMENT", + "uri":"dws_06_0154.html", + "doc_type":"devg", + "p_code":"804", + "code":"834" + }, + { + "desc":"Creates a barrier for cluster nodes. The barrier can be used for data restoration.Before creating a barrier, ensure that gtm_backup_barrier and enable_cbm_tracking are se", + "product_code":"dws", + "title":"CREATE BARRIER", + "uri":"dws_06_0155.html", + "doc_type":"devg", + "p_code":"804", + "code":"835" + }, + { + "desc":"CREATE DATABASE creates a database. By default, the new database will be created by cloning the standard system database template1. A different template can be specified ", + "product_code":"dws", + "title":"CREATE DATABASE", + "uri":"dws_06_0156.html", + "doc_type":"devg", + "p_code":"804", + "code":"836" + }, + { + "desc":"CREATE FOREIGN TABLE creates a GDS foreign table.CREATE FOREIGN TABLE creates a GDS foreign table in the current database for concurrent data import and export. The GDS f", + "product_code":"dws", + "title":"CREATE FOREIGN TABLE (for GDS Import and Export)", + "uri":"dws_06_0159.html", + "doc_type":"devg", + "p_code":"804", + "code":"837" + }, + { + "desc":"CREATE FOREIGN TABLE creates an HDFS or OBS foreign table in the current database to access or export structured data stored on HDFS or OBS. You can also export data in O", + "product_code":"dws", + "title":"CREATE FOREIGN TABLE (SQL on OBS or Hadoop)", + "uri":"dws_06_0161.html", + "doc_type":"devg", + "p_code":"804", + "code":"838" + }, + { + "desc":"CREATE FOREIGN TABLE creates a foreign table in the current database for parallel data import and export of OBS data. The server used is gsmpp_server, which is created by", + "product_code":"dws", + "title":"CREATE FOREIGN TABLE (for OBS Import and Export)", + "uri":"dws_06_0160.html", + "doc_type":"devg", + "p_code":"804", + "code":"839" + }, + { + "desc":"CREATE FUNCTION creates a function.The precision values (if any) of the parameters or return values of a function are not checked.When creating a function, you are advise", + "product_code":"dws", + "title":"CREATE FUNCTION", + "uri":"dws_06_0163.html", + "doc_type":"devg", + "p_code":"804", + "code":"840" + }, + { + "desc":"CREATE GROUP creates a user group.CREATE GROUP is an alias for CREATE ROLE, and it is not a standard SQL command and not recommended. Users can use CREATE ROLE directly.T", + "product_code":"dws", + "title":"CREATE GROUP", + "uri":"dws_06_0164.html", + "doc_type":"devg", + "p_code":"804", + "code":"841" + }, + { + "desc":"CREATE INDEX-bak defines a new index.Indexes are primarily used to enhance database performance (though inappropriate use can result in slower database performance). You ", + "product_code":"dws", + "title":"CREATE INDEX", + "uri":"dws_06_0165.html", + "doc_type":"devg", + "p_code":"804", + "code":"842" + }, + { + "desc":"CREATE REDACTION POLICY creates a data redaction policy for a table.Only the table owner has the permission to create a data redaction policy.You can create data redactio", + "product_code":"dws", + "title":"CREATE REDACTION POLICY", + "uri":"dws_06_0168.html", + "doc_type":"devg", + "p_code":"804", + "code":"843" + }, + { + "desc":"CREATE ROW LEVEL SECURITY POLICY creates a row-level access control policy for a table.The policy takes effect only after row-level access control is enabled (by running ", + "product_code":"dws", + "title":"CREATE ROW LEVEL SECURITY POLICY", + "uri":"dws_06_0169.html", + "doc_type":"devg", + "p_code":"804", + "code":"844" + }, + { + "desc":"CREATE PROCEDURE creates a stored procedure.The precision values (if any) of the parameters or return values of a stored procedure are not checked.When creating a stored ", + "product_code":"dws", + "title":"CREATE PROCEDURE", + "uri":"dws_06_0170.html", + "doc_type":"devg", + "p_code":"804", + "code":"845" + }, + { + "desc":"CREATE RESOURCE POOL creates a resource pool and specifies the Cgroup for the resource pool.As long as the current user has CREATE permission, it can create a resource po", + "product_code":"dws", + "title":"CREATE RESOURCE POOL", + "uri":"dws_06_0171.html", + "doc_type":"devg", + "p_code":"804", + "code":"846" + }, + { + "desc":"Create a role.A role is an entity that has own database objects and permissions. In different environments, a role can be considered a user, a group, or both.CREATE ROLE ", + "product_code":"dws", + "title":"CREATE ROLE", + "uri":"dws_06_0172.html", + "doc_type":"devg", + "p_code":"804", + "code":"847" + }, + { + "desc":"CREATE SCHEMA creates a schema.Named objects are accessed either by \"qualifying\" their names with the schema name as a prefix, or by setting a search path that includes t", + "product_code":"dws", + "title":"CREATE SCHEMA", + "uri":"dws_06_0173.html", + "doc_type":"devg", + "p_code":"804", + "code":"848" + }, + { + "desc":"CREATE SEQUENCE adds a sequence to the current database. The owner of a sequence is the user who creates the sequence.A sequence is a special table that stores arithmetic", + "product_code":"dws", + "title":"CREATE SEQUENCE", + "uri":"dws_06_0174.html", + "doc_type":"devg", + "p_code":"804", + "code":"849" + }, + { + "desc":"CREATE SERVER creates an external server.An external server stores information of HDFS clusters, OBS servers, DLI connections, or other homogeneous clusters.By default, o", + "product_code":"dws", + "title":"CREATE SERVER", + "uri":"dws_06_0175.html", + "doc_type":"devg", + "p_code":"804", + "code":"850" + }, + { + "desc":"CREATE SYNONYM is used to create a synonym object. A synonym is an alias of a database object and is used to record the mapping between database object names. You can use", + "product_code":"dws", + "title":"CREATE SYNONYM", + "uri":"dws_06_0176.html", + "doc_type":"devg", + "p_code":"804", + "code":"851" + }, + { + "desc":"CREATE TABLE creates a table in the current database. The table will be owned by the user who created it.For details about the data types supported by column-store tables", + "product_code":"dws", + "title":"CREATE TABLE", + "uri":"dws_06_0177.html", + "doc_type":"devg", + "p_code":"804", + "code":"852" + }, + { + "desc":"CREATE TABLE AS creates a table based on the results of a query.It creates a table and fills it with data obtained using SELECT. The table columns have the names and data", + "product_code":"dws", + "title":"CREATE TABLE AS", + "uri":"dws_06_0178.html", + "doc_type":"devg", + "p_code":"804", + "code":"853" + }, + { + "desc":"CREATE TABLE PARTITION creates a partitioned table. Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific sche", + "product_code":"dws", + "title":"CREATE TABLE PARTITION", + "uri":"dws_06_0179.html", + "doc_type":"devg", + "p_code":"804", + "code":"854" + }, + { + "desc":"CREATE TEXT SEARCH CONFIGURATION creates a text search configuration. A text search configuration specifies a text search parser that can divide a string into tokens, plu", + "product_code":"dws", + "title":"CREATE TEXT SEARCH CONFIGURATION", + "uri":"dws_06_0182.html", + "doc_type":"devg", + "p_code":"804", + "code":"855" + }, + { + "desc":"CREATE TEXT SEARCH DICTIONARY creates a full-text search dictionary. A dictionary is used to identify and process specified words during full-text search.Dictionaries are", + "product_code":"dws", + "title":"CREATE TEXT SEARCH DICTIONARY", + "uri":"dws_06_0183.html", + "doc_type":"devg", + "p_code":"804", + "code":"856" + }, + { + "desc":"CREATE TRIGGER creates a trigger. The trigger will be associated with a specified table or view, and will execute a specified function when certain events occur.Currently", + "product_code":"dws", + "title":"CREATE TRIGGER", + "uri":"dws_06_0184.html", + "doc_type":"devg", + "p_code":"804", + "code":"857" + }, + { + "desc":"CREATE TYPE defines a new data type in the current database. The user who defines a new data type becomes its owner. Types are designed only for row-store tables.Four typ", + "product_code":"dws", + "title":"CREATE TYPE", + "uri":"dws_06_0185.html", + "doc_type":"devg", + "p_code":"804", + "code":"858" + }, + { + "desc":"CREATE USER creates a user.A user created using the CREATE USER statement has the LOGIN permission by default.A schema named after the user is automatically created in th", + "product_code":"dws", + "title":"CREATE USER", + "uri":"dws_06_0186.html", + "doc_type":"devg", + "p_code":"804", + "code":"859" + }, + { + "desc":"CREATE VIEW creates a view. A view is a virtual table, not a base table. A database only stores the definition of a view and does not store its data. The data is still st", + "product_code":"dws", + "title":"CREATE VIEW", + "uri":"dws_06_0187.html", + "doc_type":"devg", + "p_code":"804", + "code":"860" + }, + { + "desc":"CURSOR defines a cursor. This command retrieves few rows of data in a query.To process SQL statements, the stored procedure process assigns a memory segment to store cont", + "product_code":"dws", + "title":"CURSOR", + "uri":"dws_06_0188.html", + "doc_type":"devg", + "p_code":"804", + "code":"861" + }, + { + "desc":"DROP DATABASE deletes a database.Only the owner of a database or a system administrator has the permission to run the DROP DATABASE command.DROP DATABASE does not take ef", + "product_code":"dws", + "title":"DROP DATABASE", + "uri":"dws_06_0189.html", + "doc_type":"devg", + "p_code":"804", + "code":"862" + }, + { + "desc":"DROP FOREIGN TABLE deletes a specified foreign table.DROP FOREIGN TABLE forcibly deletes a specified table. After a table is deleted, any indexes that exist for the table", + "product_code":"dws", + "title":"DROP FOREIGN TABLE", + "uri":"dws_06_0192.html", + "doc_type":"devg", + "p_code":"804", + "code":"863" + }, + { + "desc":"DROP FUNCTION deletes an existing function.If a function involves operations on temporary tables, the function cannot be deleted by running DROP FUNCTION.IF EXISTSSends a", + "product_code":"dws", + "title":"DROP FUNCTION", + "uri":"dws_06_0193.html", + "doc_type":"devg", + "p_code":"804", + "code":"864" + }, + { + "desc":"DROP GROUP deletes a user group.DROP GROUP is the alias for DROP ROLE.DROP GROUP is the internal interface encapsulated in the gs_om tool. You are not advised to use this", + "product_code":"dws", + "title":"DROP GROUP", + "uri":"dws_06_0194.html", + "doc_type":"devg", + "p_code":"804", + "code":"865" + }, + { + "desc":"DROP INDEX deletes an index.Only the owner of an index or a system administrator can run DROP INDEX command.IF EXISTSSends a notice instead of an error if the specified i", + "product_code":"dws", + "title":"DROP INDEX", + "uri":"dws_06_0195.html", + "doc_type":"devg", + "p_code":"804", + "code":"866" + }, + { + "desc":"DROP OWNED deletes the database objects of a database role.The role's permissions on all the database objects in the current database and shared objects (databases and ta", + "product_code":"dws", + "title":"DROP OWNED", + "uri":"dws_06_0198.html", + "doc_type":"devg", + "p_code":"804", + "code":"867" + }, + { + "desc":"DROP REDACTION POLICY deletes a data redaction policy applied to a specified table.Only the table owner has the permission to delete a data redaction policy.IF EXISTSSend", + "product_code":"dws", + "title":"DROP REDACTION POLICY", + "uri":"dws_06_0199.html", + "doc_type":"devg", + "p_code":"804", + "code":"868" + }, + { + "desc":"DROP ROW LEVEL SECURITY POLICY deletes a row-level access control policy from a table.Only the table owner or administrators can delete a row-level access control policy ", + "product_code":"dws", + "title":"DROP ROW LEVEL SECURITY POLICY", + "uri":"dws_06_0200.html", + "doc_type":"devg", + "p_code":"804", + "code":"869" + }, + { + "desc":"DROP PROCEDURE deletes an existing stored procedure.None.IF EXISTSSends a notice instead of an error if the stored procedure does not exist.Sends a notice instead of an e", + "product_code":"dws", + "title":"DROP PROCEDURE", + "uri":"dws_06_0201.html", + "doc_type":"devg", + "p_code":"804", + "code":"870" + }, + { + "desc":"DROP RESOURCE POOL deletes a resource pool.The resource pool cannot be deleted if it is associated with a role.The user must have the DROP permission in order to delete a", + "product_code":"dws", + "title":"DROP RESOURCE POOL", + "uri":"dws_06_0202.html", + "doc_type":"devg", + "p_code":"804", + "code":"871" + }, + { + "desc":"DROP ROLE deletes a specified role.If a \"role is being used by other users\" error is displayed when you run DROP ROLE, it might be that threads cannot respond to signals ", + "product_code":"dws", + "title":"DROP ROLE", + "uri":"dws_06_0203.html", + "doc_type":"devg", + "p_code":"804", + "code":"872" + }, + { + "desc":"DROP SCHEMA deletes a schema in a database.Only a schema owner or a system administrator can run the DROP SCHEMA command.IF EXISTSSends a notice instead of an error if th", + "product_code":"dws", + "title":"DROP SCHEMA", + "uri":"dws_06_0204.html", + "doc_type":"devg", + "p_code":"804", + "code":"873" + }, + { + "desc":"DROP SEQUENCE deletes a sequence from the current database.Only a sequence owner or a system administrator can delete a sequence.IF EXISTSSends a notice instead of an err", + "product_code":"dws", + "title":"DROP SEQUENCE", + "uri":"dws_06_0205.html", + "doc_type":"devg", + "p_code":"804", + "code":"874" + }, + { + "desc":"DROP SERVER deletes an existing data server.Only the server owner can delete a server.IF EXISTSSends a notice instead of an error if the specified table does not exist.Se", + "product_code":"dws", + "title":"DROP SERVER", + "uri":"dws_06_0206.html", + "doc_type":"devg", + "p_code":"804", + "code":"875" + }, + { + "desc":"DROP SYNONYM is used to delete a synonym object.Only a synonym owner or a system administrator can run the DROP SYNONYM command.IF EXISTSSend a notice instead of reportin", + "product_code":"dws", + "title":"DROP SYNONYM", + "uri":"dws_06_0207.html", + "doc_type":"devg", + "p_code":"804", + "code":"876" + }, + { + "desc":"DROP TABLE deletes a specified table.Only the table owner, schema owner, and system administrator have the permission to delete a table. To delete all the rows in a table", + "product_code":"dws", + "title":"DROP TABLE", + "uri":"dws_06_0208.html", + "doc_type":"devg", + "p_code":"804", + "code":"877" + }, + { + "desc":"DROP TEXT SEARCH CONFIGURATION deletes an existing text search configuration.To run the DROP TEXT SEARCH CONFIGURATION command, you must be the owner of the text search c", + "product_code":"dws", + "title":"DROP TEXT SEARCH CONFIGURATION", + "uri":"dws_06_0210.html", + "doc_type":"devg", + "p_code":"804", + "code":"878" + }, + { + "desc":"DROPTEXT SEARCHDICTIONARY deletes a full-text retrieval dictionary.DROP is not supported by predefined dictionaries.Only the owner of a dictionary can do DROP to the dict", + "product_code":"dws", + "title":"DROP TEXT SEARCH DICTIONARY", + "uri":"dws_06_0211.html", + "doc_type":"devg", + "p_code":"804", + "code":"879" + }, + { + "desc":"DROP TRIGGER deletes a trigger.Only the owner of a trigger and system administrators can run the DROP TRIGGER statement.IF EXISTSSends a notice instead of an error if the", + "product_code":"dws", + "title":"DROP TRIGGER", + "uri":"dws_06_0212.html", + "doc_type":"devg", + "p_code":"804", + "code":"880" + }, + { + "desc":"DROP TYPE deletes a user-defined data type. Only the type owner has permission to run this statement.IF EXISTSSends a notice instead of an error if the specified type doe", + "product_code":"dws", + "title":"DROP TYPE", + "uri":"dws_06_0213.html", + "doc_type":"devg", + "p_code":"804", + "code":"881" + }, + { + "desc":"Deleting a user will also delete the schema having the same name as the user.CASCADE is used to delete objects (excluding databases) that depend on the user. CASCADE cann", + "product_code":"dws", + "title":"DROP USER", + "uri":"dws_06_0214.html", + "doc_type":"devg", + "p_code":"804", + "code":"882" + }, + { + "desc":"DROP VIEW forcibly deletes an existing view in a database.Only a view owner or a system administrator can run DROP VIEW command.IF EXISTSSends a notice instead of an erro", + "product_code":"dws", + "title":"DROP VIEW", + "uri":"dws_06_0215.html", + "doc_type":"devg", + "p_code":"804", + "code":"883" + }, + { + "desc":"FETCH retrieves data using a previously-created cursor.A cursor has an associated position, which is used by FETCH. The cursor position can be before the first row of the", + "product_code":"dws", + "title":"FETCH", + "uri":"dws_06_0216.html", + "doc_type":"devg", + "p_code":"804", + "code":"884" + }, + { + "desc":"MOVE repositions a cursor without retrieving any data. MOVE works exactly like the FETCH command, except it only repositions the cursor and does not return rows.NoneThe d", + "product_code":"dws", + "title":"MOVE", + "uri":"dws_06_0217.html", + "doc_type":"devg", + "p_code":"804", + "code":"885" + }, + { + "desc":"REINDEX rebuilds an index using the data stored in the index's table, replacing the old copy of the index.There are several scenarios in which REINDEX can be used:An inde", + "product_code":"dws", + "title":"REINDEX", + "uri":"dws_06_0218.html", + "doc_type":"devg", + "p_code":"804", + "code":"886" + }, + { + "desc":"RESET restores run-time parameters to their default values. The default values are parameter default values complied in the postgresql.conf configuration file.RESET is an", + "product_code":"dws", + "title":"RESET", + "uri":"dws_06_0219.html", + "doc_type":"devg", + "p_code":"804", + "code":"887" + }, + { + "desc":"SET modifies a run-time parameter.Most run-time parameters can be modified by executing SET. Some parameters cannot be modified after a server or session starts.Set the s", + "product_code":"dws", + "title":"SET", + "uri":"dws_06_0220.html", + "doc_type":"devg", + "p_code":"804", + "code":"888" + }, + { + "desc":"SET CONSTRAINTS sets the behavior of constraint checking within the current transaction.IMMEDIATE constraints are checked at the end of each statement. DEFERRED constrain", + "product_code":"dws", + "title":"SET CONSTRAINTS", + "uri":"dws_06_0221.html", + "doc_type":"devg", + "p_code":"804", + "code":"889" + }, + { + "desc":"SET ROLE sets the current user identifier of the current session.Users of the current session must be members of specified rolename, but the system administrator can choo", + "product_code":"dws", + "title":"SET ROLE", + "uri":"dws_06_0222.html", + "doc_type":"devg", + "p_code":"804", + "code":"890" + }, + { + "desc":"SET SESSION AUTHORIZATION sets the session user identifier and the current user identifier of the current SQL session to a specified user.The session identifier can be ch", + "product_code":"dws", + "title":"SET SESSION AUTHORIZATION", + "uri":"dws_06_0223.html", + "doc_type":"devg", + "p_code":"804", + "code":"891" + }, + { + "desc":"SHOW shows the current value of a run-time parameter. You can use the SET statement to set these parameters.Some parameters that can be viewed by SHOW are read-only. You ", + "product_code":"dws", + "title":"SHOW", + "uri":"dws_06_0224.html", + "doc_type":"devg", + "p_code":"804", + "code":"892" + }, + { + "desc":"TRUNCATE quickly removes all rows from a database table.It has the same effect as an unqualified DELETE on each table, but it is faster since it does not actually scan th", + "product_code":"dws", + "title":"TRUNCATE", + "uri":"dws_06_0225.html", + "doc_type":"devg", + "p_code":"804", + "code":"893" + }, + { + "desc":"VACUUM reclaims storage space occupied by tables or B-tree indexes. In normal database operation, rows that have been deleted or obsoleted by an update are not physically", + "product_code":"dws", + "title":"VACUUM", + "uri":"dws_06_0226.html", + "doc_type":"devg", + "p_code":"804", + "code":"894" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"DML Syntax", + "uri":"dws_06_0227.html", + "doc_type":"devg", + "p_code":"686", + "code":"895" + }, + { + "desc":"Data Manipulation Language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data.Inserting data refers t", + "product_code":"dws", + "title":"DML Syntax Overview", + "uri":"dws_06_0228.html", + "doc_type":"devg", + "p_code":"895", + "code":"896" + }, + { + "desc":"CALL calls defined functions or stored procedures.NoneschemaSpecifies the name of the schema where a function or stored procedure is located.Specifies the name of the sch", + "product_code":"dws", + "title":"CALL", + "uri":"dws_06_0229.html", + "doc_type":"devg", + "p_code":"895", + "code":"897" + }, + { + "desc":"COPY copies data between tables and files.COPY FROM copies data from a file to a table. COPY TO copies data from a table to a file.If CNs and DNs are enabled in security ", + "product_code":"dws", + "title":"COPY", + "uri":"dws_06_0230.html", + "doc_type":"devg", + "p_code":"895", + "code":"898" + }, + { + "desc":"DELETE deletes rows that satisfy the WHERE clause from the specified table. If the WHERE clause does not exist, all rows in the table will be deleted. The result is a val", + "product_code":"dws", + "title":"DELETE", + "uri":"dws_06_0231.html", + "doc_type":"devg", + "p_code":"895", + "code":"899" + }, + { + "desc":"EXPLAIN shows the execution plan of an SQL statement.The execution plan shows how the tables referenced by the SQL statement will be scanned, for example, by plain sequen", + "product_code":"dws", + "title":"EXPLAIN", + "uri":"dws_06_0232.html", + "doc_type":"devg", + "p_code":"895", + "code":"900" + }, + { + "desc":"You can run the EXPLAIN PLAN statement to save the information about an execution plan to the PLAN_TABLE table. Different from the EXPLAIN statement, EXPLAIN PLAN only st", + "product_code":"dws", + "title":"EXPLAIN PLAN", + "uri":"dws_06_0233.html", + "doc_type":"devg", + "p_code":"895", + "code":"901" + }, + { + "desc":"LOCK TABLE obtains a table-level lock.GaussDB(DWS) always tries to select the lock mode with minimum constraints when automatically requesting a lock for a command refere", + "product_code":"dws", + "title":"LOCK", + "uri":"dws_06_0234.html", + "doc_type":"devg", + "p_code":"895", + "code":"902" + }, + { + "desc":"The MERGE INTO statement is used to conditionally match data in a target table with that in a source table. If data matches, UPDATE is executed on the target table; if da", + "product_code":"dws", + "title":"MERGE INTO", + "uri":"dws_06_0235.html", + "doc_type":"devg", + "p_code":"895", + "code":"903" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"INSERT and UPSERT", + "uri":"dws_06_0275.html", + "doc_type":"devg", + "p_code":"895", + "code":"904" + }, + { + "desc":"INSERT inserts new rows into a table.You must have the INSERT permission on a table in order to insert into it.Use of the RETURNING clause requires the SELECT permission ", + "product_code":"dws", + "title":"INSERT", + "uri":"dws_06_0236.html", + "doc_type":"devg", + "p_code":"904", + "code":"905" + }, + { + "desc":"UPSERT inserts rows into a table. When a row duplicates an existing primary key or unique key value, the row will be ignored or updated.The UPSERT syntax is supported onl", + "product_code":"dws", + "title":"UPSERT", + "uri":"dws_06_0237.html", + "doc_type":"devg", + "p_code":"904", + "code":"906" + }, + { + "desc":"UPDATE updates data in a table. UPDATE changes the values of the specified columns in all rows that satisfy the condition. The WHERE clause clarifies conditions. The colu", + "product_code":"dws", + "title":"UPDATE", + "uri":"dws_06_0240.html", + "doc_type":"devg", + "p_code":"895", + "code":"907" + }, + { + "desc":"VALUES computes a row or a set of rows based on given values. It is most commonly used to generate a constant table within a large command.VALUES lists with large numbers", + "product_code":"dws", + "title":"VALUES", + "uri":"dws_06_0241.html", + "doc_type":"devg", + "p_code":"895", + "code":"908" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"DCL Syntax", + "uri":"dws_06_0242.html", + "doc_type":"devg", + "p_code":"686", + "code":"909" + }, + { + "desc":"Data control language (DCL) is used to set or modify database users or role rights.GaussDB(DWS) provides a statement for granting rights to data objects and roles. For de", + "product_code":"dws", + "title":"DCL Syntax Overview", + "uri":"dws_06_0243.html", + "doc_type":"devg", + "p_code":"909", + "code":"910" + }, + { + "desc":"ALTER DEFAULT PRIVILEGES allows you to set the permissions that will be used for objects to be created. It does not affect permissions assigned to existing objects.To iso", + "product_code":"dws", + "title":"ALTER DEFAULT PRIVILEGES", + "uri":"dws_06_0244.html", + "doc_type":"devg", + "p_code":"909", + "code":"911" + }, + { + "desc":"ANALYZE collects statistics about ordinary tables in a database, and stores the results in the PG_STATISTIC system catalog. The execution plan generator uses these statis", + "product_code":"dws", + "title":"ANALYZE | ANALYSE", + "uri":"dws_06_0245.html", + "doc_type":"devg", + "p_code":"909", + "code":"912" + }, + { + "desc":"DEALLOCATE deallocates a previously prepared statement. If you do not explicitly deallocate a prepared statement, it is deallocated when the session ends.The PREPARE key ", + "product_code":"dws", + "title":"DEALLOCATE", + "uri":"dws_06_0246.html", + "doc_type":"devg", + "p_code":"909", + "code":"913" + }, + { + "desc":"DO executes an anonymous code block.A code block is a function body without parameters that returns void. It is analyzed and executed at the same time.Before using a prog", + "product_code":"dws", + "title":"DO", + "uri":"dws_06_0247.html", + "doc_type":"devg", + "p_code":"909", + "code":"914" + }, + { + "desc":"EXECUTE executes a prepared statement. A prepared statement only exists in the lifecycle of a session. Therefore, only prepared statements created using PREPARE earlier i", + "product_code":"dws", + "title":"EXECUTE", + "uri":"dws_06_0248.html", + "doc_type":"devg", + "p_code":"909", + "code":"915" + }, + { + "desc":"EXECUTE DIRECT executes an SQL statement on a specified node. Generally, the cluster automatically allocates an SQL statement to proper nodes. EXECUTE DIRECT is mainly us", + "product_code":"dws", + "title":"EXECUTE DIRECT", + "uri":"dws_06_0249.html", + "doc_type":"devg", + "p_code":"909", + "code":"916" + }, + { + "desc":"GRANT grants permissions to roles and users.GRANT is used in the following scenarios:Granting system permissions to roles or usersSystem permissions are also called user ", + "product_code":"dws", + "title":"GRANT", + "uri":"dws_06_0250.html", + "doc_type":"devg", + "p_code":"909", + "code":"917" + }, + { + "desc":"PREPARE creates a prepared statement.A prepared statement is a performance optimizing object on the server. When the PREPARE statement is executed, the specified query is", + "product_code":"dws", + "title":"PREPARE", + "uri":"dws_06_0251.html", + "doc_type":"devg", + "p_code":"909", + "code":"918" + }, + { + "desc":"REASSIGN OWNED changes the owner of a database.REASSIGN OWNED requires that the system change owners of all the database objects owned by old_roles to new_role.REASSIGN O", + "product_code":"dws", + "title":"REASSIGN OWNED", + "uri":"dws_06_0252.html", + "doc_type":"devg", + "p_code":"909", + "code":"919" + }, + { + "desc":"REVOKE revokes rights from one or more roles.If a non-owner user of an object attempts to REVOKE rights on the object, the command is executed based on the following rule", + "product_code":"dws", + "title":"REVOKE", + "uri":"dws_06_0253.html", + "doc_type":"devg", + "p_code":"909", + "code":"920" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"DQL Syntax", + "uri":"dws_06_0276.html", + "doc_type":"devg", + "p_code":"686", + "code":"921" + }, + { + "desc":"Data Query Language (DQL) can obtain data from tables or views.GaussDB(DWS) provides statements for obtaining data from tables or views. For details, see SELECT.GaussDB(D", + "product_code":"dws", + "title":"DQL Syntax Overview", + "uri":"dws_06_0277.html", + "doc_type":"devg", + "p_code":"921", + "code":"922" + }, + { + "desc":"SELECT retrieves data from a table or view.Serving as an overlaid filter for a database table, SELECT using SQL keywords retrieves required data from data tables.Using SE", + "product_code":"dws", + "title":"SELECT", + "uri":"dws_06_0238.html", + "doc_type":"devg", + "p_code":"921", + "code":"923" + }, + { + "desc":"SELECT INTO defines a new table based on a query result and insert data obtained by query to the new table.Different from SELECT, data found by SELECT INTO is not returne", + "product_code":"dws", + "title":"SELECT INTO", + "uri":"dws_06_0239.html", + "doc_type":"devg", + "p_code":"921", + "code":"924" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"TCL Syntax", + "uri":"dws_06_0254.html", + "doc_type":"devg", + "p_code":"686", + "code":"925" + }, + { + "desc":"Transaction Control Language (TCL) controls the time and effect of database transactions and monitors the database.GaussDB(DWS) uses the COMMIT or END statement to commit", + "product_code":"dws", + "title":"TCL Syntax Overview", + "uri":"dws_06_0255.html", + "doc_type":"devg", + "p_code":"925", + "code":"926" + }, + { + "desc":"ABORT rolls back the current transaction and cancels the changes in the transaction.This command is equivalent to ROLLBACK, and is present only for historical reasons. No", + "product_code":"dws", + "title":"ABORT", + "uri":"dws_06_0256.html", + "doc_type":"devg", + "p_code":"925", + "code":"927" + }, + { + "desc":"BEGIN may be used to initiate an anonymous block or a single transaction. This section describes the syntax of BEGIN used to initiate an anonymous block. For details abou", + "product_code":"dws", + "title":"BEGIN", + "uri":"dws_06_0257.html", + "doc_type":"devg", + "p_code":"925", + "code":"928" + }, + { + "desc":"A checkpoint is a point in the transaction log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to", + "product_code":"dws", + "title":"CHECKPOINT", + "uri":"dws_06_0258.html", + "doc_type":"devg", + "p_code":"925", + "code":"929" + }, + { + "desc":"COMMIT or END commits all operations of a transaction.Only the transaction creators or system administrators can run the COMMIT command. The creation and commit operation", + "product_code":"dws", + "title":"COMMIT | END", + "uri":"dws_06_0259.html", + "doc_type":"devg", + "p_code":"925", + "code":"930" + }, + { + "desc":"COMMIT PREPARED commits a prepared two-phase transaction.The function is only available in maintenance mode (when GUC parameter xc_maintenance_mode is on). Exercise cauti", + "product_code":"dws", + "title":"COMMIT PREPARED", + "uri":"dws_06_0260.html", + "doc_type":"devg", + "p_code":"925", + "code":"931" + }, + { + "desc":"PREPARE TRANSACTION prepares the current transaction for two-phase commit.After this command, the transaction is no longer associated with the current session; instead, i", + "product_code":"dws", + "title":"PREPARE TRANSACTION", + "uri":"dws_06_0262.html", + "doc_type":"devg", + "p_code":"925", + "code":"932" + }, + { + "desc":"SAVEPOINT establishes a new savepoint within the current transaction.A savepoint is a special mark inside a transaction that rolls back all commands that are executed aft", + "product_code":"dws", + "title":"SAVEPOINT", + "uri":"dws_06_0263.html", + "doc_type":"devg", + "p_code":"925", + "code":"933" + }, + { + "desc":"SET TRANSACTION sets the characteristics of the current transaction. It has no effect on any subsequent transactions. Available transaction characteristics include the tr", + "product_code":"dws", + "title":"SET TRANSACTION", + "uri":"dws_06_0264.html", + "doc_type":"devg", + "p_code":"925", + "code":"934" + }, + { + "desc":"START TRANSACTION starts a transaction. If the isolation level, read/write mode, or deferrable mode is specified, a new transaction will have those characteristics. You c", + "product_code":"dws", + "title":"START TRANSACTION", + "uri":"dws_06_0265.html", + "doc_type":"devg", + "p_code":"925", + "code":"935" + }, + { + "desc":"Rolls back the current transaction and backs out all updates in the transaction.ROLLBACK backs out of all changes that a transaction makes to a database if the transactio", + "product_code":"dws", + "title":"ROLLBACK", + "uri":"dws_06_0266.html", + "doc_type":"devg", + "p_code":"925", + "code":"936" + }, + { + "desc":"RELEASE SAVEPOINT destroys a savepoint previously defined in the current transaction.Destroying a savepoint makes it unavailable as a rollback point, but it has no other ", + "product_code":"dws", + "title":"RELEASE SAVEPOINT", + "uri":"dws_06_0267.html", + "doc_type":"devg", + "p_code":"925", + "code":"937" + }, + { + "desc":"ROLLBACK PREPARED cancels a transaction ready for two-phase committing.The function is only available in maintenance mode (when GUC parameter xc_maintenance_mode is on). ", + "product_code":"dws", + "title":"ROLLBACK PREPARED", + "uri":"dws_06_0268.html", + "doc_type":"devg", + "p_code":"925", + "code":"938" + }, + { + "desc":"ROLLBACK TO SAVEPOINT rolls back to a savepoint. It implicitly destroys all savepoints that were established after the named savepoint.Rolls back all commands that were e", + "product_code":"dws", + "title":"ROLLBACK TO SAVEPOINT", + "uri":"dws_06_0269.html", + "doc_type":"devg", + "p_code":"925", + "code":"939" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"GIN Indexes", + "uri":"dws_06_0270.html", + "doc_type":"devg", + "p_code":"686", + "code":"940" + }, + { + "desc":"Generalized Inverted Index (GIN) is designed for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to sea", + "product_code":"dws", + "title":"Introduction", + "uri":"dws_06_0271.html", + "doc_type":"devg", + "p_code":"940", + "code":"941" + }, + { + "desc":"The GIN interface has a high level of abstraction, requiring the access method implementer only to implement the semantics of the data type being accessed. The GIN layer ", + "product_code":"dws", + "title":"Scalability", + "uri":"dws_06_0272.html", + "doc_type":"devg", + "p_code":"940", + "code":"942" + }, + { + "desc":"Internally, a GIN index contains a B-tree index constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and w", + "product_code":"dws", + "title":"Implementation", + "uri":"dws_06_0273.html", + "doc_type":"devg", + "p_code":"940", + "code":"943" + }, + { + "desc":"Create vs. InsertInsertion into a GIN index can be slow due to the likelihood of many keys being inserted for each item. So, for bulk insertions into a table, it is advis", + "product_code":"dws", + "title":"GIN Tips and Tricks", + "uri":"dws_06_0274.html", + "doc_type":"devg", + "p_code":"940", + "code":"944" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dws", + "title":"Change History", + "uri":"dws_04_3333.html", + "doc_type":"devg", + "p_code":"", + "code":"945" + } +] \ No newline at end of file diff --git a/docs/dws/dev/PARAMETERS.txt b/docs/dws/dev/PARAMETERS.txt new file mode 100644 index 00000000..6da8d5f0 --- /dev/null +++ b/docs/dws/dev/PARAMETERS.txt @@ -0,0 +1,3 @@ +version="" +language="en-us" +type="" \ No newline at end of file diff --git a/docs/dws/dev/dws_01_0127.html b/docs/dws/dev/dws_01_0127.html new file mode 100644 index 00000000..9681f16c --- /dev/null +++ b/docs/dws/dev/dws_01_0127.html @@ -0,0 +1,20 @@ + + +

Using DSC to Migrate SQL Scripts

+

The DSC is a CLI tool running on the Linux or Windows OS. It is dedicated to providing customers with simple, fast, and reliable application SQL script migration services. It parses the SQL scripts of source database applications using the built-in syntax migration logic, and converts them to SQL scripts applicable to GaussDB(DWS) databases. You do not need to connect the DSC to a database. It can migrate data in offline mode without service interruption. In GaussDB(DWS), you can run the migrated SQL scripts to restore the database, thereby easily migrating offline databases to the cloud.

+

The DSC can migrate SQL scripts of Teradata, Oracle, Netezza, MySQL, and DB2 databases.

+

Downloading the DSC SQL Migration Tool

  1. Log in to the GaussDB(DWS) management console.
  2. In the navigation tree on the left, click Connection Management.
  3. In the Download Client and Driver area, click here to download the DSC migration tool.

    If you have clusters of different versions, the system displays a dialog box, prompting you to select the cluster version and download the client corresponding to the cluster version. In the cluster list on the Cluster Management page, click the name of the specified cluster and click the Basic Information tab to view the cluster version.

    +
    Figure 1 Downloading the tool
    +

    +

  4. After downloading the DSC tool to the local PC, use WinSCP to upload it to a Linux host.

    The user who uploads the tool must have the full control permission on the target directory of the Linux host.

    +

+
+

Operation Guide for the DSC SQL Syntax Migration Tool

For details, see "DSC - SQL Syntax Migration Tool" in the Data Warehouse Service Tool Guide.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0001.html b/docs/dws/dev/dws_04_0001.html new file mode 100644 index 00000000..1a251b98 --- /dev/null +++ b/docs/dws/dev/dws_04_0001.html @@ -0,0 +1,21 @@ + + +

Welcome

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0002.html b/docs/dws/dev/dws_04_0002.html new file mode 100644 index 00000000..f4c59221 --- /dev/null +++ b/docs/dws/dev/dws_04_0002.html @@ -0,0 +1,20 @@ + + +

Target Readers

+

This document is intended for database designers, application developers, and database administrators, and provides information required for designing, building, querying and maintaining data warehouses.

+

As a database administrator or application developer, you need to be familiar with:

+ +

Statement

When writing documents, the writers of GaussDB(DWS) try their best to provide guidance from the perspective of commercial use, application scenarios, and task completion. Even so, references to PostgreSQL content may still exist in the document. For this type of content, the following PostgreSQL Copyright is applicable:

+

Postgres-XC is Copyright © 1996-2013 by the PostgreSQL Global Development Group.

+

PostgreSQL is Copyright © 1996-2013 by the PostgreSQL Global Development Group.

+

Postgres95 is Copyright © 1994-5 by the Regents of the University of California.

+

IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+

THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS-IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0004.html b/docs/dws/dev/dws_04_0004.html new file mode 100644 index 00000000..07e319a3 --- /dev/null +++ b/docs/dws/dev/dws_04_0004.html @@ -0,0 +1,57 @@ + + +

Reading Guide

+

If you are a new GaussDB(DWS) user, you are advised to read the following contents first:

+ +

If you intend to or are migrating applications from other data warehouses to GaussDB(DWS), you might want to know how GaussDB(DWS) differs from them.

+

You can find useful information from the following table for GaussDB(DWS) database application development.

+ +
+ + + + + + + + + + + + + + + + + + + + + + +

If you want to...

+

Query Suggestions

+

Quickly get started with GaussDB(DWS).

+

Deploy a cluster, connect to the database, and perform some queries by following the instructions provided in "Getting Started" in the Data Warehouse Service (DWS) User Guide.

+

When you are ready to construct a database, load data to tables and compile the query content to operate the data in the data warehouse. Then, you can return to the Data Warehouse Service Database Developer Guide.

+

Understand the internal architecture of a GaussDB(DWS) data warehouse.

+

To know more about GaussDB(DWS), go to the GaussDB(DWS) home page.

+

Learn how to design tables to achieve the excellent performance.

+

Development and Design Proposal introduces the design specifications that should be complied with during the development of database applications. Modeling compliant with these specifications fits the distributed processing architecture of GaussDB(DWS) and provides efficient SQL code.

+

To facilitate service execution through optimization, you can refer to Query Performance Optimization. Successful performance optimization depends more on database administrators' experience and judgment than on instructions and explanation. However, Query Performance Optimization still tries to systematically illustrate the performance optimization methods for application development personnel and new GaussDB(DWS) database administrators.

+

Load data.

+

Data Import describes how to import data to GaussDB(DWS).

+

Manage users, groups, and database security.

+

Database Security Management covers database security topics.

+

Monitor and optimize system performance.

+

System Catalogs and System Views describes the system catalogs where you can query the database status and monitor the query content and process.

+

You can learn how to check the system running status and monitoring metrics on the GaussDB(DWS) console by referring to "Monitoring Clusters" in the Data Warehouse Service (DWS) User Guide.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0005.html b/docs/dws/dev/dws_04_0005.html new file mode 100644 index 00000000..2d775112 --- /dev/null +++ b/docs/dws/dev/dws_04_0005.html @@ -0,0 +1,94 @@ + + +

Conventions

+

Example Conventions

+
+ + + + + + + + + + +

Example

+

Description

+

dbadmin

+

Indicates the user operating and maintaining GaussDB(DWS) appointed when the cluster is created.

+

8000

+

Indicates the port number used by GaussDB(DWS) to monitor connection requests from the client.

+
+
+

SQL examples in this manual are developed based on the TPC-DS model. Before you execute the examples, install the TPC-DS benchmark by following the instructions on the official website https://www.tpc.org/tpcds/.

+
+

SQL Syntax Text Conventions

To better understand the syntax usage, you can refer to the SQL syntax text conventions described as follows:

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Format

+

Description

+

Uppercase characters

+

Indicates that keywords must be in uppercase.

+

Lowercase characters

+

Indicates that parameters must be in lowercase.

+

[ ]

+

Indicates that the items in brackets [] are optional.

+

...

+

Indicates that preceding elements can appear repeatedly.

+

[ x | y | ... ]

+

Indicates that one item is selected from two or more options or no item is selected.

+

{ x | y | ... }

+

Indicates that one item is selected from two or more options.

+

[x | y | ... ] [ ... ]

+

Indicates that multiple parameters or no parameter can be selected. If multiple parameters are selected, separate them with spaces.

+

[ x | y | ... ] [ ,... ]

+

Indicates that multiple parameters or no parameter can be selected. If multiple parameters are selected, separate them with commas (,).

+

{ x | y | ... } [ ... ]

+

Indicates that at least one parameter can be selected. If multiple parameters are selected, separate them with spaces.

+

{ x | y | ... } [ ,... ]

+

Indicates that at least one parameter can be selected. If multiple parameters are selected, separate them with commas (,).

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0006.html b/docs/dws/dev/dws_04_0006.html new file mode 100644 index 00000000..547a9cc5 --- /dev/null +++ b/docs/dws/dev/dws_04_0006.html @@ -0,0 +1,13 @@ + + +

Prerequisites

+

Complete the following tasks before you perform operations described in this document:

+ +

For details about the preceding tasks, see "Getting Started" in the Data Warehouse Service (DWS) User Guide.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0007.html b/docs/dws/dev/dws_04_0007.html new file mode 100644 index 00000000..61c48b17 --- /dev/null +++ b/docs/dws/dev/dws_04_0007.html @@ -0,0 +1,20 @@ + + +

System Overview

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0011.html b/docs/dws/dev/dws_04_0011.html new file mode 100644 index 00000000..b91e6713 --- /dev/null +++ b/docs/dws/dev/dws_04_0011.html @@ -0,0 +1,16 @@ + + +

Highly Reliable Transaction Processing

+

GaussDB(DWS) manages cluster transactions, the basis of HA and failovers. This ensures speedy fault recovery, guarantees the Atomicity, Consistency, Isolation, Durability (ACID) properties for transactions and after a recovery, and enables concurrent control.

+

Fault Rectification

+

GaussDB(DWS) provides an HA mechanism to reduce the service interruption time when a cluster is faulty. It protects key user programs to continuously provide external services, minimizing the impact of hardware, software, and human faults on services and ensuring service continuity.

+ +

Transaction Management

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0012.html b/docs/dws/dev/dws_04_0012.html new file mode 100644 index 00000000..68717d7c --- /dev/null +++ b/docs/dws/dev/dws_04_0012.html @@ -0,0 +1,20 @@ + + +

High Query Performance

+

The following GaussDB(DWS) features help achieve high query performance.

+

Fully Parallel Query

GaussDB(DWS) is an MPP system with the shared-nothing architecture. It consists of multiple independent logical nodes that do not share system resources, such as the CPU, memory, and storage units. In such a system architecture, service data is separately stored on numerous nodes. Data analysis tasks are executed in parallel on the nodes where data is stored. The massively parallel data processing significantly improves response speed.

+

In addition, GaussDB(DWS) improves data query performance by executing operators in parallel, executing commands in registers in parallel, and using LLVM to dynamically compile the logical conditions of redundancy prune.

+
+

Hybrid Row-Column Storage

GaussDB(DWS) supports both the row and column storage models. You can choose a row- or column-store table as needed.

+

The hybrid row-column storage engine achieves higher data compression ratio (column storage), index performance (column storage), and point update and point query (row storage) performance.

+
+

Data Compression in Column Storage

You can compress old, inactive data to free up space, reducing procurement and O&M costs.

+

In GaussDB(DWS), data can be compressed using the Delta Value Encoding, Dictionary, RLE, LZ4, and ZLIB algorithms. The system automatically selects a compression algorithm based on data characteristics. The average compression ratio is 7:1. Compressed data can be directly accessed and is transparent to services, greatly reducing the preparation time before accessing historical data.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0015.html b/docs/dws/dev/dws_04_0015.html new file mode 100644 index 00000000..084c2b82 --- /dev/null +++ b/docs/dws/dev/dws_04_0015.html @@ -0,0 +1,22 @@ + + +

Related Concepts

+

Database

A database manages data objects and is isolated from other databases. While creating a database, you can specify a tablespace. If you do not specify it, database objects will be saved to the PG_DEFAULT by default. Objects managed by a database can be distributed to multiple tablespaces.

+
+

Instance

In GaussDB(DWS), instances are a group of database processes running in the memory. An instance can manage one or more databases that form a cluster. A cluster is an area in the storage disk. This area is initialized during installation and composed of a directory. The directory, called data directory, stores all data and is created by initdb. Theoretically, one server can start multiple instances on different ports, but GaussDB(DWS) manages only one instance at a time. The start and stop of an instance rely on the specific data directory. For compatibility purposes, the concept of instance name may be introduced.

+
+

Tablespaces

In GaussDB(DWS), a tablespace is a directory storing physical files of the databases the tablespace contains. Multiple tablespaces can coexist. Files are physically isolated using tablespaces and managed by a file system.

+
+

schema

GaussDB(DWS) schemas logically separate databases. All database objects are created under certain schemas. In GaussDB(DWS), schemas and users are loosely bound. When you create a user, a schema with the same name as the user will be created automatically. You can also create a schema or specify another schema.

+
+

User and Role

GaussDB(DWS) uses users and roles to control the access to databases. A role can be a database user or a group of database users, depending on role settings. In GaussDB(DWS), the difference between roles and users is that a role does not have the LOGIN permission by default. In GaussDB(DWS), one user can have only one role, but you can put a user's role under a parent role to grant multiple permissions to the user.

+
+

Transaction Management

In GaussDB(DWS), transactions are managed by multi-version concurrency control (MVCC) and two-phase locking (2PL). It enables smooth data reads and writes. In GaussDB(DWS), MVCC saves historical version data together with the current tuple version. GaussDB(DWS) uses the VACUUM process instead of rollback segments to routinely delete historical version data. Unless in performance optimization, you do not need to pay attention to the VACUUM process. Transactions are automatically submitted in GaussDB(DWS).

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0042.html b/docs/dws/dev/dws_04_0042.html new file mode 100644 index 00000000..faa3efb3 --- /dev/null +++ b/docs/dws/dev/dws_04_0042.html @@ -0,0 +1,246 @@ + + +

Syntax Compatibility Differences Among Oracle, Teradata, and MySQL

+

GaussDB(DWS) is compatible with Oracle, Teradata and MySQL syntax, of which the syntax behavior is different.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Compatibility differences

Compatibility Item

+

Oracle

+

Teradata

+

MySQL

+

Empty string

+

An empty string is treated as NULL.

+

An empty string is distinguished from NULL.

+

An empty string is distinguished from NULL.

+

Conversion of an empty string to a number

+

NULL

+

0

+

0

+

Automatic truncation of overlong characters

+

Not supported

+

Supported (set GUC parameter td_compatible_truncation to ON)

+

Not supported

+

NULL concatenation

+

Returns a non-NULL object after combining a non-NULL object with NULL.

+

For example, 'abc'||NULL returns 'abc'.

+

The strict_text_concat_td option is added to the GUC parameter behavior_compat_options to be compatible with the Teradata behavior. After the NULL type is concatenated, NULL is returned.

+

For example, 'abc'||NULL returns NULL.

+

Is compatible with MySQL behavior. After the NULL type is concatenated, NULL is returned.

+

For example, 'abc'||NULL returns NULL.

+

Concatenation of the char(n) type

+

Removes spaces and placeholders on the right when the char(n) type is concatenated.

+

For example, cast('a' as char(3))||'b' returns 'ab'.

+

After the bpchar_text_without_rtrim option is added to the GUC parameter behavior_compat_options, when the char(n) type is concatenated, spaces are reserved and supplemented to the specified length n.

+

Currently, ignoring spaces at the end of a string for comparison is not supported. If the concatenated string contains spaces at the end, the comparison is space-sensitive.

+

For example, cast('a' as char(3))||'b' returns 'a b'.

+

Removes spaces and placeholders on the right.

+

concat(str1,str2)

+

Returns the concatenation of all non-NULL strings.

+

Returns the concatenation of all non-NULL strings.

+

If an input parameter is NULL, NULL is returned.

+

left and right processing of negative values

+

Returns all characters except the first and last |n| characters.

+

Returns all characters except the first and last |n| characters.

+

Returns an empty string.

+

lpad(string text, length int [, fill text])

+

rpad(string text, length int [, fill text])

+

Fills up the string to the specified length by appending the fill characters (a space by default). If the string is already longer than length then it is truncated (on the right). If fill is an empty string or length is a negative number, null is returned.

+

If fill is an empty string and the string length is less than the specified length, the original string is returned. If length is a negative number, an empty string is returned.

+

If fill is an empty string and the string length is less than the specified length, an empty string is returned. If length is a negative number, null is returned.

+

log(x)

+

Returns the logarithm with 10 as the base.

+

Returns the logarithm with 10 as the base.

+

Returns the natural logarithm.

+

mod(x, 0)

+

Returns x if the divisor is 0.

+

Returns x if the divisor is 0.

+

Reports an error if the divisor is 0.

+

Data type DATE

+

Converts the DATE data type to the TIMESTAMP data type which stores year, month, day, hour, minute, and second values.

+

Stores year and month values.

+

Stores year and month values.

+

to_char(date)

+

The maximum value of the input parameter can only be the maximum value of the timestamp type. The maximum value of the date type is not supported. The return value is of the timestamp type.

+

The maximum value of the input parameter can only be the maximum value of the timestamp type. The maximum value of the date type is not supported. The return value is of the date type in YYYY/MM/DD format. (The GUC parameter convert_empty_str_to_null_td is enabled.)

+

The maximum value of the input parameter can only be the maximum value of the timestamp type. The maximum value of the date type is not supported. The return value is of the date type.

+

to_date, to_timestamp, and to_number processing of empty strings

+

Returns NULL.

+

Returns NULL. (The convert_empty_str_to_null_td parameter is enabled.)

+

to_date and to_timestamp returns NULL. If the parameter passed to to_number is an empty string, 0 is returned.

+

Return value types of last_day and next_day

+

Returns values of the timestamp type.

+

Returns values of the timestamp type.

+

Returns values of the date type.

+

Return value type of add_months

+

Returns values of the timestamp type.

+

Returns values of the timestamp type.

+

If the input parameter is of the date type, the return value is of the date type.

+

If the input parameter is of the timestamp type, the return value is of the timestamp type.

+

If the input parameter is of the timestamptz type, the return value is of the timestamptz type.

+

CURRENT_TIME

+

CURRENT_TIME(p)

+

Obtains the time of the current transaction. The return value type is timetz.

+

Obtains the time of the current transaction. The return value type is timetz.

+

Obtains the execution time of the current statement. The return value type is time.

+

CURRENT_TIMESTAMP

+

CURRENT_TIMESTAMP(p)

+

Obtains the execution time of the current statement. The return value type is timestamptz.

+

Obtains the execution time of the current statement. The return value type is timestamptz.

+

Obtains the execution time of the current statement. The return value type is timestamp.

+

LOCALTIME

+

LOCALTIME(p)

+

Obtains the time of the current transaction. The return value type is time.

+

Obtains the time of the current transaction. The return value type is time.

+

Obtains the execution time of the current statement. The return value type is time.

+

LOCALTIMESTAMP

+

LOCALTIMESTAMP(p)

+

Obtains the time of the current transaction. The return value type is timestamp.

+

Obtains the time of the current transaction. The return value type is timestamp.

+

Obtains the execution time of the current statement. The return value type is timestamp.

+

SYSDATE

+

SYSDATE(p)

+

Obtains the execution time of the current statement. The return value type is timestamp(0).

+

Obtains the execution time of the current statement. The return value type is timestamp(0).

+

Obtains the current system time. The return value type is timestamp(0).

+

NOW()

+

Obtains the time of the current transaction. The return value type is timestamptz.

+

Obtains the time of the current transaction. The return value type is timestamptz.

+

Obtains the statement execution time. The return value type is timestamptz.

+

Operator ^

+

Performs exponentiation.

+

Performs exponentiation.

+

Performs the exclusive OR operation.

+

Different input parameter types of CASE, COALESCE, IF, and IFNULL expressions

+

Reports error.

+

Is compatible with behavior of Teradata and supports type conversion between digits and strings. For example, if input parameters for COALESCE are of INT and VARCHAR types, the parameters are resolved as VARCHAR type.

+

Is compatible with behavior of MySQL and supports type conversion between strings and other types. For example, if input parameters for COALESCE are of DATE, INT, and VARCHAR types, the parameters are resolved as VARCHAR type.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0043.html b/docs/dws/dev/dws_04_0043.html new file mode 100644 index 00000000..7b520a0c --- /dev/null +++ b/docs/dws/dev/dws_04_0043.html @@ -0,0 +1,18 @@ + + +

Database Security Management

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0053.html b/docs/dws/dev/dws_04_0053.html new file mode 100644 index 00000000..f682daaa --- /dev/null +++ b/docs/dws/dev/dws_04_0053.html @@ -0,0 +1,29 @@ + + +

Managing Users and Their Permissions

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0054.html b/docs/dws/dev/dws_04_0054.html new file mode 100644 index 00000000..d0a13347 --- /dev/null +++ b/docs/dws/dev/dws_04_0054.html @@ -0,0 +1,16 @@ + + +

Default Permission Mechanism

+

A user who creates an object is the owner of this object. By default, Separation of Permissions is disabled after cluster installation. A database system administrator has the same permissions as object owners. After an object is created, only the object owner or system administrator can query, modify, and delete the object, and grant permissions for the object to other users through GRANT by default.

+

To enable another user to use the object, grant required permissions to the user or the role that contains the user.

+

GaussDB(DWS) supports the following permissions: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, CREATE, CONNECT, EXECUTE, USAGE and ANALYZE|ANALYSE. Permission types are associated with object types. For permission details, see GRANT.

+

To remove permissions, use REVOKE. Object owner permissions such as ALTER, DROP, GRANT, and REVOKE are implicit and cannot be granted or revoked. That is, you have the implicit permissions for an object if you are the owner of the object. Object owners can remove their own common permissions, for example, making tables read-only to themselves or others.

+

System catalogs and views are visible to either system administrators or all users. System catalogs and views that require system administrator permissions can be queried only by system administrators. For details, see System Catalogs and System Views.

+

The database provides the object isolation feature. If this feature is enabled, users can view only the objects (tables, views, columns, and functions) that they have the permission to access. System administrators are not affected by this feature. For details, see ALTER DATABASE.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0055.html b/docs/dws/dev/dws_04_0055.html new file mode 100644 index 00000000..c5b21c9f --- /dev/null +++ b/docs/dws/dev/dws_04_0055.html @@ -0,0 +1,23 @@ + + +

System Administrator

+

A system administrator is an account with the SYSADMIN permission. After a cluster is installed, a system administrator has the permissions of all object owners by default.

+

The user dbadmin created upon GaussDB(DWS) startup is a system administrator.

+

To create a database administrator, connect to the database as an administrator and run the CREATE USER or ALTER statement with SYSADMIN specified.

+
1
CREATE USER sysadmin WITH SYSADMIN password 'password';
+
+ +
+

Alternatively, you can run the following statement:

+
1
ALTER USER joe SYSADMIN;
+
+ +
+

To run the ALTER USER statement, the user must exist.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0056.html b/docs/dws/dev/dws_04_0056.html new file mode 100644 index 00000000..ef665f3d --- /dev/null +++ b/docs/dws/dev/dws_04_0056.html @@ -0,0 +1,155 @@ + + +

Separation of Permissions

+

Descriptions in Default Permission Mechanism and System Administrator are about the initial situation after a cluster is created. By default, a system administrator with the SYSADMIN attribute has the highest-level permissions.

+

To avoid risks caused by centralized permissions, you can enable the separation of permissions to delegate system administrator permissions to security administrators and audit administrators.

+

After the separation of permissions is enabled, a system administrator does not have the CREATEROLE attribute (security administrator) and AUDITADMIN attribute (audit administrator). That is, you do not have the permissions for creating roles and users and the permissions for viewing and maintaining database audit logs. For details about the CREATEROLE and AUDITADMIN attributes, see CREATE ROLE.

+

After the separation of permissions is enabled, system administrators have the permissions only for the objects owned by them.

+

For details, see Separating Rights of Roles.

+

For details about permission changes before and after enabling the separation of permissions, see Table 1 and Table 2.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Default user permissions

Object

+

System Administrator

+

Security Administrator

+

Audit Administrator

+

Common User

+

Tablespace

+

Can create, modify, delete, access, and allocate tablespaces.

+

Cannot create, modify, delete, or allocate tablespaces, with authorization required for accessing tablespaces.

+

Table

+

Has permissions for all tables.

+

Has permissions for its own tables, but does not have permissions for other users' tables.

+

Index

+

Can create indexes on all tables.

+

Can create indexes on their own tables.

+

Schema

+

Has permissions for all schemas.

+

Has all permissions for its own schemas, but does not have permissions for other users' schemas.

+

Function

+

Has permissions for all functions.

+

Has permissions for its own functions, has the call permission for other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.

+

Customized view

+

Has permissions for all views.

+

Has permissions for its own views, but does not have permissions for other users' views.

+

System catalog and system view

+

Has permissions for querying all system catalogs and views.

+

Has permissions for querying only some system catalogs and views. For details, see System Catalogs and System Views.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Changes in permissions after the separation of permissions

Object

+

System Administrator

+

Security Administrator

+

Audit Administrator

+

Common User

+

Tablespace

+

No change

+

No change

+

Table

+

Permissions reduced

+

Has all permissions for its own tables, but does not have permissions for other users' tables in their schemas.

+

No change

+

Index

+

Permissions reduced

+

Can create indexes on its own tables.

+

No change

+

Schema

+

Permissions reduced

+

Has all permissions for its own schemas, but does not have permissions for other users' schemas.

+

No change

+

Function

+

Permissions reduced

+

Has all permissions for its own functions, but does not have permissions for other users' functions in their schemas.

+

No change

+

Customized view

+

Permissions reduced

+

Has all permissions for its own views and other users' views in the public schema, but does not have permissions for other users' views in their schemas.

+

No change

+

System catalog and system view

+

No change

+

No change

+

No change

+

Has no permission for viewing any system catalogs or views.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0057.html b/docs/dws/dev/dws_04_0057.html new file mode 100644 index 00000000..d0e680cc --- /dev/null +++ b/docs/dws/dev/dws_04_0057.html @@ -0,0 +1,36 @@ + + +

Users

+

You can use CREATE USER and ALTER USER to create and manage database users, respectively. The database cluster has one or more named databases. Users and roles are shared within the entire cluster, but their data is not shared. That is, a user can connect to any database, but after the connection is successful, any user can access only the database declared in the connection request.

+

In non-separation-of-duty scenarios, a GaussDB(DWS) user account can be created and deleted only by a system administrator or a security administrator with the CREATEROLE attribute. In separation-of-duty scenarios, a user account can be created only by a security administrator.

+

When a user logs in, GaussDB(DWS) authenticates the user. A user can own databases and database objects (such as tables), and grant permissions of these objects to other users and roles. In addition to system administrators, users with the CREATEDB attribute can create databases and grant permissions to these databases.

+

Adding, Modifying, and Deleting Users

+
+

Private Users

If multiple service departments use different database user accounts to perform service operations and a database maintenance department at the same level uses database administrator accounts to perform maintenance operations, service departments may require that database administrators, without specific authorization, can manage (DROP, ALTER, and TRUNCATE) their data but cannot access (INSERT, DELETE, UPDATE, SELECT, and COPY) the data. That is, the management permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users.

+

In Separation of Permissions mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither management permissions nor access permissions, which does not meet the requirements of the service departments mentioned above. Therefore, GaussDB(DWS) provides private users to solve the problem. That is, create private users with the INDEPENDENT attribute in non-separation-of-duties mode.

+
1
CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "password";
+
+ +
+

Database administrators can manage (DROP, ALTER, and TRUNCATE) objects of private users but cannot access (INSERT, DELETE, SELECT, UPDATE, COPY, GRANT, REVOKE, and ALTER OWNER the objects before being authorized.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0058.html b/docs/dws/dev/dws_04_0058.html new file mode 100644 index 00000000..01883662 --- /dev/null +++ b/docs/dws/dev/dws_04_0058.html @@ -0,0 +1,20 @@ + + +

Roles

+

A role is a set of permissions. After a role is granted to a user through GRANT, the user will have all the permissions of the role. It is recommended that roles be used to efficiently grant permissions. For example, you can create different roles of design, development, and maintenance personnel, grant the roles to users, and then grant specific data permissions required by different users. When permissions are granted or revoked at the role level, these changes take effect on all members of the role.

+

GaussDB(DWS) provides an implicitly defined group PUBLIC that contains all roles. By default, all new users and roles have the permissions of PUBLIC. For details about the default permissions of PUBLIC, see GRANT. To revoke permissions of PUBLIC from a user or role, or re-grant these permissions to them, add the PUBLIC keyword in the REVOKE or GRANT statement.

+

To view all roles, query the system catalog PG_ROLES.

+
1
SELECT * FROM PG_ROLES;
+
+ +
+

Adding, Modifying, and Deleting Roles

In non-separation-of-duty scenarios, a role can be created, modified, and deleted only by a system administrator or a user with the CREATEROLE attribute. In separation-of-duty scenarios, a role can be created, modified, and deleted only by a user with the CREATEROLE attribute.

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0059.html b/docs/dws/dev/dws_04_0059.html new file mode 100644 index 00000000..afbf34f7 --- /dev/null +++ b/docs/dws/dev/dws_04_0059.html @@ -0,0 +1,52 @@ + + +

Schema

+

Schemas function as models. Schema management allows multiple users to use the same database without mutual impacts, to organize database objects as manageable logical groups, and to add third-party applications to the same schema without causing conflicts.

+

Each database has one or more schemas. Each schema contains tables and other types of objects. When a database is created, a schema named public is created by default, and all users have permissions for this schema. You can group database objects by schema. A schema is similar to an OS directory but cannot be nested.

+

The same database object name can be used in different schemas of the same database without causing conflicts. For example, both a_schema and b_schema can contain a table named mytable. Users with required permissions can access objects across multiple schemas of the same database.

+

If a user is created, a schema named after the user will also be created in the current database.

+

Database objects are generally created in the first schema in a database search path. For details about the first schema and how to change the schema order, see Search Path.

+

Creating, Modifying, and Deleting Schemas

+
+

Search Path

A search path is defined in the search_path parameter. The parameter value is a list of schema names separated by commas (,). If no target schema is specified during object creation, the object will be added to the first schema listed in the search path. If there are objects with the same name across different schemas and no schema is specified for an object query, the object will be returned from the first schema containing the object in the search path.

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0060.html b/docs/dws/dev/dws_04_0060.html new file mode 100644 index 00000000..e6c6ffb2 --- /dev/null +++ b/docs/dws/dev/dws_04_0060.html @@ -0,0 +1,38 @@ + + +

User Permission Setting

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0061.html b/docs/dws/dev/dws_04_0061.html new file mode 100644 index 00000000..28f1ee91 --- /dev/null +++ b/docs/dws/dev/dws_04_0061.html @@ -0,0 +1,182 @@ + + +

Row-Level Access Control

+

The row-level access control feature enables database access control to be accurate to each row of data tables. In this way, the same SQL query may return different results for different users.

+

You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policies of the data table, the expressions that meet the specified condition will be combined by using AND or OR based on the attribute type (PERMISSIVE | RESTRICTIVE) and applied to the execution plan in the query optimization phase.

+

Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include SELECT, UPDATE, and DELETE.

+

Scenario 1: A table summarizes the data of different users. Users can view only their own data.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
-- Create users alice, bob, and peter.
+CREATE ROLE alice PASSWORD 'password';
+CREATE ROLE bob PASSWORD 'password';
+CREATE ROLE peter PASSWORD 'password';
+
+-- Create the public.all_data table that contains user information.
+CREATE TABLE public.all_data(id int, role varchar(100), data varchar(100));
+
+-- Insert data into the data table.
+INSERT INTO all_data VALUES(1, 'alice', 'alice data');
+INSERT INTO all_data VALUES(2, 'bob', 'bob data');
+INSERT INTO all_data VALUES(3, 'peter', 'peter data');
+
+-- Grant the read permission for the all_data table to users alice, bob, and peter.
+GRANT SELECT ON all_data TO alice, bob, peter;
+
+-- Enable row-level access control.
+ALTER TABLE all_data ENABLE ROW LEVEL SECURITY;
+
+-- Create a row-level access control policy to specify that the current user can view only their own data.
+CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER);
+
+-- View table details.
+\d+ all_data
+                               Table "public.all_data"
+ Column |          Type          | Modifiers | Storage  | Stats target | Description
+--------+------------------------+-----------+----------+--------------+-------------
+ id     | integer                |           | plain    |              |
+ role   | character varying(100) |           | extended |              |
+ data   | character varying(100) |           | extended |              |
+Row Level Security Policies:
+    POLICY "all_data_rls"
+      USING (((role)::name = "current_user"()))
+Has OIDs: no
+Distribute By: HASH(id)
+Location Nodes: ALL DATANODES
+Options: orientation=row, compression=no, enable_rowsecurity=true
+
+-- Switch to user alice and run SELECT * FROM all_data.
+SET ROLE alice PASSWORD 'password';
+SELECT * FROM all_data;
+ id | role  |    data
+----+-------+------------
+  1 | alice | alice data
+(1 row)
+
+EXPLAIN(COSTS OFF) SELECT * FROM all_data;
+                           QUERY PLAN
+----------------------------------------------------------------
+  id |          operation
+ ----+------------------------------
+   1 | ->  Streaming (type: GATHER)
+   2 |    ->  Seq Scan on all_data
+
+         Predicate Information (identified by plan id)
+ --------------------------------------------------------------
+   2 --Seq Scan on all_data
+         Filter: ((role)::name = 'alice'::name)
+ Notice: This query is influenced by row level security feature
+(10 rows)
+
+-- Switch to user peter and run SELECT * FROM .all_data.
+SET ROLE peter PASSWORD 'password';
+SELECT * FROM all_data;
+ id | role  |    data
+----+-------+------------
+  3 | peter | peter data
+(1 row)
+
+EXPLAIN(COSTS OFF) SELECT * FROM all_data;
+                           QUERY PLAN
+----------------------------------------------------------------
+  id |          operation
+ ----+------------------------------
+   1 | ->  Streaming (type: GATHER)
+   2 |    ->  Seq Scan on all_data
+
+         Predicate Information (identified by plan id)
+ --------------------------------------------------------------
+   2 --Seq Scan on all_data
+         Filter: ((role)::name = 'peter'::name)
+ Notice: This query is influenced by row level security feature
+(10 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0062.html b/docs/dws/dev/dws_04_0062.html new file mode 100644 index 00000000..7eb887e5 --- /dev/null +++ b/docs/dws/dev/dws_04_0062.html @@ -0,0 +1,114 @@ + + +

Data Redaction

+

GaussDB(DWS) provides the column-level dynamic data masking (DDM) function. For sensitive data, such as the ID card number, mobile number, and bank card number, the DDM function is used to redact the original data to protect data security and user privacy.

+ +
  • Generally, you can execute a SELECT statement to view the data redaction result. If the statement has the following features, sensitive data may be deliberately obtained. In this case, an error will be reported during statement execution.
    • The GROUP BY clause references a Target Entry that contains redacted columns as the target column.
    • The DISTINCT clause is executed on the output redacted columns.
    • The statement contains CTE.
    • Set operations are involved.
    • The target columns of a subquery are not redacted columns of the base table, but are the expressions or function calls for the redacted columns of the base table.
    +
  • You can use COPY TO or GDS to export the redacted data. As the redacted data is irreversible, any secondary operation on the redacted data is meaningless.
  • The target columns of UPDATE, MERGE INTO, and DELETE statements cannot contain redacted columns.
  • The UPSERT statement allows you to update data using EXCLUDED. If data in the base table is updated by referencing redacted columns, the data may be modified by mistake. As a result, an error will be reported during the execution.
+
+

Examples

The following uses the employee table emp, administrator alice, and common users matu and july as examples to describe the data redaction process. The user alice is the owner of the emp table. The emp table contains private data such as the employee name, mobile number, email address, bank card number, and salary.

+
  1. Create users alice, matu, and july:
    CREATE ROLE alice PASSWORD 'password';
    +CREATE ROLE matu PASSWORD 'password';
    +CREATE ROLE july PASSWORD 'password';
    +
  2. Create the emp table as user alice, and insert three employee records into the table.
    CREATE TABLE emp(id int, name varchar(20), phone_no varchar(11), card_no number, card_string varchar(19), email text, salary numeric(100, 4), birthday date);
    +
    +INSERT INTO emp VALUES(1, 'anny', '13420002340', 1234123412341234, '1234-1234-1234-1234', 'smithWu@163.com', 10000.00, '1999-10-02');
    +INSERT INTO emp VALUES(2, 'bob', '18299023211', 3456345634563456, '3456-3456-3456-3456', '66allen_mm@qq.com', 9999.99, '1989-12-12');
    +INSERT INTO emp VALUES(3, 'cici', '15512231233', NULL, NULL, 'jonesishere@sina.com', NULL, '1992-11-06');
    +
  3. User alice grants the emp table read permission to users matu and july.
    GRANT SELECT ON emp TO matu, july;
    +
  4. Only user alice can view all employee information. Users matu and july cannot view bank card numbers and salary data of the employees. Create a redaction policy for the emp table and bind the redaction function to card_no, card_string, and salary columns, respectively.
    CREATE REDACTION POLICY mask_emp ON emp WHEN (current_user IN ('matu', 'july'))
    +ADD COLUMN card_no WITH mask_full(card_no),
    +ADD COLUMN card_string WITH mask_partial(card_string, 'VVVVFVVVVFVVVVFVVVV','VVVV-VVVV-VVVV-VVVV','#',1,12),
    +ADD COLUMN salary WITH mask_partial(salary, '9', 1, length(salary) - 2);
    +
  5. Switch to users matu and july and view the emp table, respectively.
    SET ROLE matu PASSWORD 'password';
    +SELECT * FROM emp;
    + id | name |  phone_no   | card_no |     card_string     |        email         |   salary   |      birthday       
    +----+------+-------------+---------+---------------------+----------------------+------------+---------------------
    +  1 | anny | 13420002340 |       0 | ####-####-####-1234 | smithWu@163.com      | 99999.9990 | 1999-10-02 00:00:00
    +  2 | bob  | 18299023211 |       0 | ####-####-####-3456 | 66allen_mm@qq.com    |  9999.9990 | 1989-12-12 00:00:00
    +  3 | cici | 15512231233 |         |                     | jonesishere@sina.com |            | 1992-11-06 00:00:00
    +(3 rows)
    +
    +SET ROLE july PASSWORD 'password';
    +SELECT * FROM emp;
    + id | name |  phone_no   | card_no |     card_string     |        email         |   salary   |      birthday       
    +----+------+-------------+---------+---------------------+----------------------+------------+---------------------
    +  1 | anny | 13420002340 |       0 | ####-####-####-1234 | smithWu@163.com      | 99999.9990 | 1999-10-02 00:00:00
    +  2 | bob  | 18299023211 |       0 | ####-####-####-3456 | 66allen_mm@qq.com    |  9999.9990 | 1989-12-12 00:00:00
    +  3 | cici | 15512231233 |         |                     | jonesishere@sina.com |            | 1992-11-06 00:00:00
    +(3 rows)
    +
  6. User matu also has the permission for viewing all employee information, but user july does not. Modify the effective scope of the redaction policy.
    ALTER REDACTION POLICY mask_emp ON emp WHEN(current_user = 'july');
    +
  7. Switch to users matu and july and view the emp table again, respectively.
    SET ROLE matu PASSWORD 'password';
    +SELECT * FROM emp;
    + id | name |  phone_no   |     card_no      |     card_string     |        email         |   salary   |      birthday       
    +----+------+-------------+------------------+---------------------+----------------------+------------+---------------------
    +  1 | anny | 13420002340 | 1234123412341234 | 1234-1234-1234-1234 | smithWu@163.com      | 10000.0000 | 1999-10-02 00:00:00
    +  2 | bob  | 18299023211 | 3456345634563456 | 3456-3456-3456-3456 | 66allen_mm@qq.com    |  9999.9900 | 1989-12-12 00:00:00
    +  3 | cici | 15512231233 |                  |                     | jonesishere@sina.com |            | 1992-11-06 00:00:00
    +(3 rows)
    +
    +SET ROLE july PASSWORD 'password';
    +SELECT * FROM emp;
    + id | name |  phone_no   | card_no |     card_string     |        email         |   salary   |      birthday       
    +----+------+-------------+---------+---------------------+----------------------+------------+---------------------
    +  1 | anny | 13420002340 |       0 | ####-####-####-1234 | smithWu@163.com      | 99999.9990 | 1999-10-02 00:00:00
    +  2 | bob  | 18299023211 |       0 | ####-####-####-3456 | 66allen_mm@qq.com    |  9999.9990 | 1989-12-12 00:00:00
    +  3 | cici | 15512231233 |         |                     | jonesishere@sina.com |            | 1992-11-06 00:00:00
    +(3 rows)
    +
  8. The information in the phone_no, email, and birthday columns is private data. Update the redaction policy mask_emp and add three redacted columns.
    ALTER REDACTION POLICY mask_emp ON emp ADD COLUMN phone_no WITH mask_partial(phone_no, '*', 4);
    +ALTER REDACTION POLICY mask_emp ON emp ADD COLUMN email WITH mask_partial(email, '*', 1, position('@' in email));
    +ALTER REDACTION POLICY mask_emp ON emp ADD COLUMN birthday WITH mask_full(birthday);
    +
  9. Switch to user july and view the emp table data.
    SET ROLE july PASSWORD 'password';
    +SELECT * FROM emp;
    + id | name |  phone_no   | card_no |     card_string     |        email         |   salary   |      birthday       
    +----+------+-------------+---------+---------------------+----------------------+------------+---------------------
    +  1 | anny | 134******** |       0 | ####-####-####-1234 | ********163.com      | 99999.9990 | 1970-01-01 00:00:00
    +  2 | bob  | 182******** |       0 | ####-####-####-3456 | ***********qq.com    |  9999.9990 | 1970-01-01 00:00:00
    +  3 | cici | 155******** |         |                     | ************sina.com |            | 1970-01-01 00:00:00
    +(3 rows)
    +
  10. Query redaction_policies and redaction_columns to view details about the current redaction policy mask_emp.
    SELECT * FROM redaction_policies;
    + object_schema | object_owner | object_name | policy_name |            expression             | enable | policy_description 
    +---------------+--------------+-------------+-------------+-----------------------------------+--------+--------------------
    + public        | alice        | emp         | mask_emp    | ("current_user"() = 'july'::name) | t      | 
    +(1 row)
    +
    +SELECT object_name, column_name, function_info FROM redaction_columns;
    + object_name | column_name |                                             function_info                                             
    +-------------+-------------+-------------------------------------------------------------------------------------------------------
    + emp         | card_no     | mask_full(card_no)
    + emp         | card_string | mask_partial(card_string, 'VVVVFVVVVFVVVVFVVVV'::text, 'VVVV-VVVV-VVVV-VVVV'::text, '#'::text, 1, 12)
    + emp         | email       | mask_partial(email, '*'::text, 1, "position"(email, '@'::text))
    + emp         | salary      | mask_partial(salary, '9'::text, 1, (length((salary)::text) - 2))
    + emp         | birthday    | mask_full(birthday)
    + emp         | phone_no    | mask_partial(phone_no, '*'::text, 4)
    +(6 rows)
    +
  11. Add the salary_info column. To replace the salary information in text format with *.*, you can create a user-defined redaction function. In this step, you can use the PL/pgSQL to define the redaction function mask_regexp_salary. To create a redaction column, you simply need to customize the function name and parameter list. For details, see User-Defined Functions.
    ALTER TABLE emp ADD COLUMN salary_info TEXT;
    +UPDATE emp SET salary_info = salary::text;
    +
    +CREATE FUNCTION mask_regexp_salary(salary_info text) RETURNS text AS
    +$$
    + SELECT regexp_replace($1, '[0-9]+','*','g');
    +$$
    +LANGUAGE SQL
    +STRICT SHIPPABLE;
    +
    +ALTER REDACTION POLICY mask_emp ON emp ADD COLUMN salary_info WITH mask_regexp_salary(salary_info);
    +
    +SET ROLE july PASSWORD 'password';
    +SELECT id, name, salary_info FROM emp;
    + id | name | salary_info 
    +----+------+-------------
    +  1 | anny | *.*
    +  2 | bob  | *.*
    +  3 | cici | 
    +(3 rows)
    +
  12. If there is no need to set a redaction policy for the emp table, delete the redaction policy mask_emp.
    DROP REDACTION POLICY mask_emp ON emp;
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0063.html b/docs/dws/dev/dws_04_0063.html new file mode 100644 index 00000000..7bc8e0e7 --- /dev/null +++ b/docs/dws/dev/dws_04_0063.html @@ -0,0 +1,19 @@ + + +

Setting Security Policies

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0064.html b/docs/dws/dev/dws_04_0064.html new file mode 100644 index 00000000..39a31ca3 --- /dev/null +++ b/docs/dws/dev/dws_04_0064.html @@ -0,0 +1,35 @@ + + +

Setting Account Security Policies

+

Background

For data security purposes, GaussDB(DWS) provides a series of security measures, such as automatically locking and unlocking accounts, manually locking and unlocking abnormal accounts, and deleting accounts that are no longer used.

+
+

Automatically Locking and Unlocking Accounts

+
+

Manually Locking and Unlocking Accounts

If administrators detect an abnormal account that may be stolen or illegally accesses the database, they can manually lock the account.

+

The administrator can also manually unlock the account if the account becomes normal again.

+

For details about how to create a user, see Users. To manually lock and unlock user joe, run commands in the following format:

+ +
+

Deleting Accounts that Are No Longer Used

An administrator can delete an account that is no longer used. This operation cannot be rolled back.

+

When an account to be deleted is in the active state, it is deleted after the session is disconnected.

+

For example, if you want to delete account joe, run the command in the following format:

+
1
DROP USER joe CASCADE;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0065.html b/docs/dws/dev/dws_04_0065.html new file mode 100644 index 00000000..9e4f6ac8 --- /dev/null +++ b/docs/dws/dev/dws_04_0065.html @@ -0,0 +1,25 @@ + + +

Setting the Validity Period of an Account

+

Precautions

When creating a user, you need to specify the validity period of the user, including the start time and end time.

+

To enable a user not within the validity period to use its account, set a new validity period.

+
+

Procedure

  1. Run the following command to create a user and specify the start time and end time.

    1
    CREATE USER joe WITH PASSWORD 'password' VALID BEGIN '2015-10-10 08:00:00' VALID UNTIL '2016-10-10 08:00:00';
    +
    + +
    +

  2. If the user is not within the specified validity period, run the following command to set the start time and end time of a new validity period.

    1
    ALTER USER joe WITH VALID BEGIN '2016-11-10 08:00:00' VALID UNTIL '2017-11-10 08:00:00';
    +
    + +
    +

+

If VALID BEGIN is not specified in the CREATE ROLE or ALTER ROLE statement, the start time of the validity period is not limited. If VALID UNTIL is not specified, the end time of the validity period is not limited. If both of the parameters are not specified, the user is always valid.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0066.html b/docs/dws/dev/dws_04_0066.html new file mode 100644 index 00000000..afd91c73 --- /dev/null +++ b/docs/dws/dev/dws_04_0066.html @@ -0,0 +1,20 @@ + + +

Overview

+

MRS is a big data cluster running based on the open-source Hadoop ecosystem. It provides the industry's latest cutting-edge storage and analytical capabilities of massive volumes of data, satisfying your data storage and processing requirements. For details, see the MapReduce Service User Guide.

+

You can use Hive/Spark (analysis cluster of MRS) to store massive volumes of service data. Hive/Spark data files are stored on HDFS. On GaussDB(DWS), you can connect a GaussDB(DWS) cluster to an MRS cluster, read data from HDFS files, and write the data to GaussDB(DWS) when the clusters are on the same network.

+

Ensure that MRS can communicate with DWS:

+

Scenario 1: If MRS and DWS are in the same region and VPC, they can communicate with each other by default.

+

Scenario 2: If MRS and DWS are in the same region but in different VPCs, you need to create a VPC peering connection. For details, see "VPC Peering Connection Overview" in Virtual Private Cloud User Guide.

+

Scenario 3: If MRS and DWS are not in the same region. You need to use Cloud Connect (CC) to create network connections. For details, see the user guide of the corresponding service.

+

Scenario 4: If MRS is deployed on-premises, you need to use Direct Connect (DC) or Virtual Private Network (VPN) to create network connections. For details, see the user guide of the corresponding service.

+
+

Importing Data from MRS to a GaussDB(DWS) Cluster

  1. Preparing Data in an MRS Cluster
  2. (Optional) Manually Creating a Foreign Server
  3. Creating a Foreign Table
  4. Importing Data
  5. Deleting Resources
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0067.html b/docs/dws/dev/dws_04_0067.html new file mode 100644 index 00000000..70892859 --- /dev/null +++ b/docs/dws/dev/dws_04_0067.html @@ -0,0 +1,202 @@ + + +

Setting a User Password

+

User passwords are stored in the system catalog pg_authid. To prevent password leakage, GaussDB(DWS) encrypts and stores the user passwords.

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Special characters

No.

+

Character

+

No.

+

Character

+

No.

+

Character

+

No.

+

Character

+

1

+

~

+

9

+

*

+

17

+

|

+

25

+

<

+

2

+

!

+

10

+

(

+

18

+

[

+

26

+

.

+

3

+

@

+

11

+

)

+

19

+

{

+

27

+

>

+

4

+

#

+

12

+

-

+

20

+

}

+

28

+

/

+

5

+

$

+

13

+

_

+

21

+

]

+

29

+

?

+

6

+

%

+

14

+

=

+

22

+

;

+

-

+

-

+

7

+

^

+

15

+

+

+

23

+

:

+

-

+

-

+

8

+

&

+

16

+

\

+

24

+

,

+

-

+

-

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0074.html b/docs/dws/dev/dws_04_0074.html new file mode 100644 index 00000000..6cdc50b3 --- /dev/null +++ b/docs/dws/dev/dws_04_0074.html @@ -0,0 +1,27 @@ + + +

Development and Design Proposal

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0075.html b/docs/dws/dev/dws_04_0075.html new file mode 100644 index 00000000..ea886ac8 --- /dev/null +++ b/docs/dws/dev/dws_04_0075.html @@ -0,0 +1,13 @@ + + +

Development and Design Proposal

+

This chapter describes the design specifications for database modeling and application development. Modeling compliant with these specifications fits the distributed processing architecture of GaussDB(DWS) and provides efficient SQL code.

+

The meaning of "Proposal" and "Notice" in this chapter is as follows:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0076.html b/docs/dws/dev/dws_04_0076.html new file mode 100644 index 00000000..b491d1ad --- /dev/null +++ b/docs/dws/dev/dws_04_0076.html @@ -0,0 +1,16 @@ + + +

Database Object Naming Conventions

+

The name of a database object must contain 1 to 63 characters, start with a letter or underscore (_), and can contain letters, digits, underscores (_), dollar signs ($), and number signs (#).

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0077.html b/docs/dws/dev/dws_04_0077.html new file mode 100644 index 00000000..ed25866a --- /dev/null +++ b/docs/dws/dev/dws_04_0077.html @@ -0,0 +1,23 @@ + + +

Database Object Design

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0078.html b/docs/dws/dev/dws_04_0078.html new file mode 100644 index 00000000..fb33fc70 --- /dev/null +++ b/docs/dws/dev/dws_04_0078.html @@ -0,0 +1,16 @@ + + +

Database and Schema Design

+

In GaussDB(DWS), services can be isolated by databases and schemas. Databases share little resources and cannot directly access each other. Connections to and permissions on them are also isolated. Schemas share more resources than databases do. User permissions on schemas and subordinate objects can be controlled using the GRANT and REVOKE syntax.

+ +

Database Design Suggestions

+
+

Schema Design Suggestions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0079.html b/docs/dws/dev/dws_04_0079.html new file mode 100644 index 00000000..de397c74 --- /dev/null +++ b/docs/dws/dev/dws_04_0079.html @@ -0,0 +1,111 @@ + + +

Table Design

+

GaussDB(DWS) uses a distributed architecture. Data is distributed on DNs. Comply with the following principles to properly design a table:

+ +

Selecting a Storage Mode

[Proposal] Selecting a storage mode is the first step in defining a table. The storage mode mainly depends on the customer's service type. For details, see Table 1.

+ +
+ + + + + + + + + + +
Table 1 Table storage modes and scenarios

Storage Mode

+

Application Scenarios

+

Row storage

+
  • Point queries (simple index-based queries that only return a few records)
  • Scenarios requiring frequent addition, deletion, and modification
+

Column storage

+
  • Statistical analysis queries (requiring a large number of association and grouping operations)
  • Ad hoc queries (using uncertain query conditions and unable to utilize indexes to scan row-store tables)
+
+
+
+

Selecting a Distribution Mode

[Proposal] Comply with the following rules to distribute table data. +
+ + + + + + + + + + + + + +
Table 2 Table distribution modes and scenarios

Distribution Mode

+

Description

+

Application Scenarios

+

Hash

+

Table data is distributed on all DNs in a cluster by hash.

+

Fact tables containing a large amount of data

+

Replication

+

Full data in a table is stored on every DN in a cluster.

+

Dimension tables and fact tables containing a small amount of data

+
+
+
+
+

Selecting a Partitioning Mode

Comply with the following rules to partition a table containing a large amount of data:

+ +

The example of a partitioned table definition is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
CREATE TABLE staffS_p1
+(
+  staff_ID       NUMBER(6) not null,
+  FIRST_NAME     VARCHAR2(20),
+  LAST_NAME      VARCHAR2(25),
+  EMAIL          VARCHAR2(25),
+  PHONE_NUMBER   VARCHAR2(20),
+  HIRE_DATE      DATE,
+  employment_ID  VARCHAR2(10),
+  SALARY         NUMBER(8,2),
+  COMMISSION_PCT NUMBER(4,2),
+  MANAGER_ID     NUMBER(6),
+  section_ID     NUMBER(4)
+)
+PARTITION BY RANGE (HIRE_DATE)
+( 
+   PARTITION HIRE_19950501 VALUES LESS THAN ('1995-05-01 00:00:00'),
+   PARTITION HIRE_19950502 VALUES LESS THAN ('1995-05-02 00:00:00'),
+   PARTITION HIRE_maxvalue VALUES LESS THAN (MAXVALUE)
+);
+
+ +
+
+

Selecting a Distribution Key

Selecting a distribution key is important for a hash table. An improper distribution key may cause data skew. As a result, the I/O load is heavy on several DNs, affecting the overall query performance. After you select a distribution policy for a hash table, check for data skew to ensure that data is evenly distributed. Comply with the following rules to select a distribution key:

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0080.html b/docs/dws/dev/dws_04_0080.html new file mode 100644 index 00000000..25ce142d --- /dev/null +++ b/docs/dws/dev/dws_04_0080.html @@ -0,0 +1,92 @@ + + +

Column Design

+

Selecting a Data Type

Comply with the following rules to improve query efficiency when you design columns:

+ +
+

Common String Types

Every column requires a data type suitable for its data characteristics. The following table lists common string types in GaussDB(DWS).

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Common string types

Parameter

+

Description

+

Max. Storage Capacity

+

CHAR(n)

+

Fixed-length string, where n indicates the stored bytes. If the length of an input string is smaller than n, the string is automatically padded to n bytes using NULL characters.

+

10 MB

+

CHARACTER(n)

+

Fixed-length string, where n indicates the stored bytes. If the length of an input string is smaller than n, the string is automatically padded to n bytes using NULL characters.

+

10 MB

+

NCHAR(n)

+

Fixed-length string, where n indicates the stored bytes. If the length of an input string is smaller than n, the string is automatically padded to n bytes using NULL characters.

+

10 MB

+

BPCHAR(n)

+

Fixed-length string, where n indicates the stored bytes. If the length of an input string is smaller than n, the string is automatically padded to n bytes using NULL characters.

+

10 MB

+

VARCHAR(n)

+

Variable-length string, where n indicates the maximum number of bytes that can be stored.

+

10 MB

+

CHARACTER VARYING(n)

+

Variable-length string, where n indicates the maximum number of bytes that can be stored. This data type and VARCHAR(n) are different representations of the same data type.

+

10 MB

+

VARCHAR2(n)

+

Variable-length string, where n indicates the maximum number of bytes that can be stored. This data type is added to be compatible with the Oracle database, and its behavior is the same as that of VARCHAR(n).

+

10 MB

+

NVARCHAR2(n)

+

Variable-length string, where n indicates the maximum number of bytes that can be stored.

+

10 MB

+

TEXT

+

Variable-length string. Its maximum length is 8203 bytes less than 1 GB.

+

8203 bytes less than 1 GB

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0081.html b/docs/dws/dev/dws_04_0081.html new file mode 100644 index 00000000..969e7d95 --- /dev/null +++ b/docs/dws/dev/dws_04_0081.html @@ -0,0 +1,21 @@ + + +

Constraint Design

+

DEFAULT and NULL Constraints

+
+

Partial Cluster Key

A partial cluster key (PCK) is a local clustering technology used for column-store tables. After creating a PCK, you can quickly filter and scan fact tables using min or max sparse indexes in GaussDB(DWS). Comply with the following rules to create a PCK:

+ +
+

Unique Constraint

+
+

Primary Key Constraint

+
+

Check Constraint

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0082.html b/docs/dws/dev/dws_04_0082.html new file mode 100644 index 00000000..8322f025 --- /dev/null +++ b/docs/dws/dev/dws_04_0082.html @@ -0,0 +1,14 @@ + + +

View and Joined Table Design

+

View Design

+
+

Joined Table Design

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0083.html b/docs/dws/dev/dws_04_0083.html new file mode 100644 index 00000000..1b4497c1 --- /dev/null +++ b/docs/dws/dev/dws_04_0083.html @@ -0,0 +1,35 @@ + + +

JDBC Configuration

+

Currently, third-party tools are connected to GaussDB(DWS) trough JDBC. This section describes the precautions for configuring the tools.

+

Connection Parameters

+
+

fetchsize

[Notice] To use fetchsize in applications, disable the autocommit switch. Enabling the autocommit switch makes the fetchsize configuration invalid.

+
+

autocommit

[Proposal] It is recommended that you enable the autocommit switch in the code for connecting to GaussDB(DWS) by the JDBC. If autocommit needs to be disabled to improve performance or for other purposes, applications need to ensure their transactions are committed. For example, explicitly commit translations after specifying service SQL statements. Particularly, ensure that all transactions are committed before the client exits.

+
+

Connection Releasing

[Proposal] You are advised to use connection pools to limit the number of connections from applications. Do not connect to a database every time you run an SQL statement.

+

[Proposal] After an application completes its tasks, disconnect its connection to GaussDB(DWS) to release occupied resources. You are advised to set the session timeout interval in the task.

+

[Proposal] Reset the session environment before releasing connections to the JDBC connection tool. Otherwise, historical session information may cause object conflicts.

+ +
+

CopyManager

[Proposal] In the scenario where the ETL tool is not used and real-time data import is required, it is recommended that you use the CopyManger interface driven by the GaussDB(DWS) JDBC to import data in batches during application development. For details about how to use CopyManager, see CopyManager.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0084.html b/docs/dws/dev/dws_04_0084.html new file mode 100644 index 00000000..ba246f73 --- /dev/null +++ b/docs/dws/dev/dws_04_0084.html @@ -0,0 +1,74 @@ + + +

SQL Compilation

+

DDL

+
+

Data Loading and Uninstalling

+
+

Type conversion

+
+

Query Operation

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0085.html b/docs/dws/dev/dws_04_0085.html new file mode 100644 index 00000000..c67ca82a --- /dev/null +++ b/docs/dws/dev/dws_04_0085.html @@ -0,0 +1,21 @@ + + +

Guide: JDBC- or ODBC-Based Development

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0086.html b/docs/dws/dev/dws_04_0086.html new file mode 100644 index 00000000..a0bb5ed5 --- /dev/null +++ b/docs/dws/dev/dws_04_0086.html @@ -0,0 +1,13 @@ + + +

Development Specifications

+

If the connection pool mechanism is used during application development, comply with the following specifications:

+ +

If you do not do so, the status of connections in the connection pool will remain, which affects subsequent operations using the connection pool.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0087.html b/docs/dws/dev/dws_04_0087.html new file mode 100644 index 00000000..84ca25e5 --- /dev/null +++ b/docs/dws/dev/dws_04_0087.html @@ -0,0 +1,11 @@ + + +

Downloading Drivers

+

For details, see section "Downloading the JDBC or ODBC Driver" in the Data Warehouse Service User Guide.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0088.html b/docs/dws/dev/dws_04_0088.html new file mode 100644 index 00000000..fd2817ab --- /dev/null +++ b/docs/dws/dev/dws_04_0088.html @@ -0,0 +1,38 @@ + + +

JDBC-Based Development

+

Java Database Connectivity (JDBC) is a Java API for executing SQL statements, providing a unified access interface for different relational databases, based on which applications process data. GaussDB(DWS) supports JDBC 4.0 and requires JDK 1.6 or later for code compiling. It does not support JDBC-ODBC Bridge.

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0090.html b/docs/dws/dev/dws_04_0090.html new file mode 100644 index 00000000..03564848 --- /dev/null +++ b/docs/dws/dev/dws_04_0090.html @@ -0,0 +1,16 @@ + + +

JDBC Package and Driver Class

+

JDBC Package

Obtain the package dws_8.1.x_jdbc_driver.zip from the management console. For details, see Downloading Drivers.

+

Compressed in it is the JDBC driver JAR package:

+

gsjdbc4.jar: Driver package compatible with PostgreSQL. The class name and class structure in the driver are the same as those in the PostgreSQL driver. All the applications running on PostgreSQL can be smoothly transferred to the current system.

+
+

Driver Class

Before creating a database connection, you need to load the database driver class org.postgresql.Driver (decompressed from gsjdbc4.jar).

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0091.html b/docs/dws/dev/dws_04_0091.html new file mode 100644 index 00000000..ed5f2881 --- /dev/null +++ b/docs/dws/dev/dws_04_0091.html @@ -0,0 +1,11 @@ + + +

Development Process

+
Figure 1 JDBC-based application development process
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0092.html b/docs/dws/dev/dws_04_0092.html new file mode 100644 index 00000000..15e76e3d --- /dev/null +++ b/docs/dws/dev/dws_04_0092.html @@ -0,0 +1,13 @@ + + +

Loading a Driver

+

Load the database driver before creating a database connection.

+

You can load the driver in the following ways:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0093.html b/docs/dws/dev/dws_04_0093.html new file mode 100644 index 00000000..bad9b7d9 --- /dev/null +++ b/docs/dws/dev/dws_04_0093.html @@ -0,0 +1,138 @@ + + +

Connecting to a Database

+

After a database is connected, you can execute SQL statements in the database.

+

If you use an open-source Java Database Connectivity (JDBC) driver, ensure that the database parameter password_encryption_type is set to 1. If the value is not 1, the connection may fail. A typical error message is "none of the server's SASL authentication mechanisms are supported." To avoid such problems, perform the following operations:

+
  1. Set password_encryption_type to 1. For details, see "Modifying Database Parameters" in User Guide.
  2. Create a new database user for connection or reset the password of the existing database user.
    • If you use an administrator account, reset the password. For details, see "Password Reset" in User Guide.
    • If you are a common user, use another client tool (such as Data Studio) to connect to the database and run the ALTER USER statement to change your password.
    +
  3. Connect to the database.
+

+

Here are the reasons why you need to perform these operations:

+
  • MD5 algorithms may by vulnerable to collision attacks and cannot be used for password verification. Currently, GaussDB(DWS) uses the default security design. By default, MD5 password verification is disabled, but MD5 is required by the open-source libpq communication protocol of PostgreSQL. For connectivity purposes, you need to adjust the cryptographic algorithm parameter password_encryption_type and enable the MD5 algorithm.
  • The database stores the hash digest of passwords instead of password text. During password verification, the system compares the hash digest with the password digest sent from the client (salt operations are involved). If you change your cryptographic algorithm policy, the database cannot generate a new MD5 hash digest for your existing password. For connectivity purposes, you must manually change your password or create a new user. The new password will be encrypted using the hash algorithm and stored for authentication in the next connection.
+
+

Function Prototype

JDBC provides the following three database connection methods:

+ +
+

Parameter

+
+ + + + + + + + + + + + + + + + +
Table 1 Database connection parameters

Parameter

+

Description

+

url

+

gsjdbc4.jar database connection descriptor. The descriptor format can be:

+
  • jdbc:postgresql:database
  • jdbc:postgresql://host/database
  • jdbc:postgresql://host:port/database
  • jdbc:postgresql://host:port[,host:port][...]/database
+
NOTE:

If gsjdbc200.jar is used, replace jdbc:postgresql with jdbc:gaussdb.

+
  • database: indicates the name of the database to be connected.
  • host: indicates the name or IP address of the database server.

    Specify the IP address for connecting to the GaussDB(DWS) cluster on GaussDB(DWS) management console. If the connected host and the GaussDB(DWS) cluster are in the same network, select the private IP address. Otherwise, select the public IP address.

    +

    For security purposes, the CN forbids access from other nodes in the cluster without authentication. To access the CN from inside the cluster, deploy the JDBC program on the host where the CN is located and set host to 127.0.0.1. Otherwise, the error message FATAL: Forbid remote connection with trust method! may be displayed.

    +

    It is recommended that the service system be deployed outside the cluster. Otherwise, the database performance may be affected.

    +
  • port: indicates the port number of a database server. By default, the database on port 8000 of the local host is connected.
  • Multiple IP addresses and ports can be configured. JDBC balances load by random access and failover, and will automatically ignore unreachable IP addresses.

    IP addresses are separated using commas. Example: jdbc:postgresql://10.10.0.13:8000,10.10.0.14:8000/database

    +
+
+

info

+

Database connection properties. Common properties include:

+
  • user: string type. It indicates the database user who creates the connection.
  • password: string type. It indicates the password of a database user.
  • ssl: boolean type. It indicates whether the Secure Socket Layer (SSL) is used.
  • loggerLevel: string type. It indicates the amount of information that the driver logs and prints to the LogStream or LogWriter specified in the DriverManager. Currently, OFF, DEBUG, and TRACE are supported. DEBUG indicates that only logs of the DEBUG or higher level are printed, generating little log information. TRACE indicates that logs of the DEBUG and TRACE levels are printed, generating detailed log information. The default value is OFF, indicating that no information will be logged.
  • prepareThreshold: integer type. It indicates the number of PreparedStatement executions required before requests are converted to prepared statements in servers. The default value is 5.
  • batchMode: boolean type. It indicates whether to connect the database in batch mode.
  • fetchsize: integer type. It indicates the default fetch size for statements in the created connection.
  • ApplicationName: string type. It indicates an application name. The default value is PostgreSQL JDBC Driver.
  • allowReadOnly: boolean type. It indicates whether to enable the read-only mode for connection. The default value is false. If the value is not changed to true, the execution of connection.setReadOnly does not take effect.
  • blobMode: string type. It is used to set the setBinaryStream method to assign values to different data types. The value on indicates that values are assigned to the BLOB data type and off indicates that values are assigned to the BYTEA data type. The default value is on.
  • connectionExtraInfo: boolean type. It indicates whether the JDBC driver reports the driver deployment path and process owner to the database.
    NOTE:

    The value can be true or false. The default value is false. If connectionExtraInfo is set to true, the JDBC driver reports the driver deployment path and process owner to the database and displays the information in the connection_info parameter (see connection_info). In this case, you can query the information from PG_STAT_ACTIVITY or PGXC_STAT_ACTIVITY.

    +
    +
+

user

+

Indicates a database user.

+

password

+

Indicates the password of a database user.

+
+
+
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
//gsjdbc4.jar is used as an example.
+//The following code encapsulates database connection operations into an interface. The database can then be connected using an authorized username and password.
+
+public static Connection GetConnection(String username, String passwd)
+    {
+        //Set the driver class.
+        String driver = "org.postgresql.Driver";
+        //Set the database connection descriptor.
+        String sourceURL = "jdbc:postgresql://10.10.0.13:8000/postgres?currentSchema=test";
+        Connection conn = null;
+        
+        try
+        {
+            //Load the driver.
+            Class.forName(driver);
+        }
+        catch( Exception e )
+        {
+            e.printStackTrace();
+            return null;
+        }
+        
+        try
+        {
+             //Create a connection.
+            conn = DriverManager.getConnection(sourceURL, username, passwd);
+            System.out.println("Connection succeed!");
+        }
+        catch(Exception e)
+        {
+            e.printStackTrace();
+            return null;
+        }
+        
+        return conn;
+    };
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0095.html b/docs/dws/dev/dws_04_0095.html new file mode 100644 index 00000000..61a328cc --- /dev/null +++ b/docs/dws/dev/dws_04_0095.html @@ -0,0 +1,131 @@ + + +

Executing SQL Statements

+

Executing an Ordinary SQL Statement

The application performs data (parameter statements do not need to be transferred) in the database by running SQL statements, and you need to perform the following steps:

+
  1. Create a statement object by triggering the createStatement method in Connection.

    1
    Statement stmt = con.createStatement();
    +
    + +
    +

  2. Execute the SQL statement by triggering the executeUpdate method in Statement.

    1
    int rc = stmt.executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));");
    +
    + +
    +

    If an execution request (not in a transaction block) received in the database contains multiple statements, the request is packed into a transaction. VACUUM is not supported in a transaction block. If one of the statements fails, the entire request will be rolled back.

    +
    +

  3. Close the statement object.

    1
    stmt.close();
    +
    + +
    +

+
+

Executing a Prepared SQL Statement

Pre-compiled statements were once complied and optimized and can have additional parameters for different usage. For the statements have been pre-compiled, the execution efficiency is greatly improved. If you want to execute a statement for several times, use a precompiled statement. Perform the following procedure:

+
  1. Create a prepared statement object by calling the prepareStatement method in Connection.

    1
    PreparedStatement pstmt = con.prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1");
    +
    + +
    +

  2. Set parameters by triggering the setShort method in PreparedStatement.

    1
    pstmt.setShort(1, (short)2);
    +
    + +
    +

  3. Execute the precompiled SQL statement by triggering the executeUpdate method in PreparedStatement.

    1
    int rowcount = pstmt.executeUpdate();
    +
    + +
    +

  4. Close the precompiled statement object by calling the close method in PreparedStatement.

    1
    pstmt.close();
    +
    + +
    +

+
+

Calling a Stored Procedure

Perform the following steps to call existing stored procedures through the JDBC interface in GaussDB(DWS):

+
  1. Create a call statement object by calling the prepareCall method in Connection.

    1
    CallableStatement cstmt = myConn.prepareCall("{? = CALL TESTPROC(?,?,?)}");
    +
    + +
    +

  2. Set parameters by calling the setInt method in CallableStatement.

    1
    +2
    +3
    cstmt.setInt(2, 50); 
    +cstmt.setInt(1, 20);
    +cstmt.setInt(3, 90);
    +
    + +
    +

  3. Register with an output parameter by calling the registerOutParameter method in CallableStatement.

    1
    cstmt.registerOutParameter(4, Types.INTEGER);  //Register an OUT parameter as an integer.
    +
    + +
    +

  4. Call the stored procedure by calling the execute method in CallableStatement.

    1
    cstmt.execute();
    +
    + +
    +

  5. Obtain the output parameter by calling the getInt method in CallableStatement.

    1
    int out = cstmt.getInt(4);  //Obtain the OUT parameter.
    +
    + +
    +

    For example:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    //The following stored procedure has been created with the OUT parameter:
    +create or replace procedure testproc 
    +(
    +    psv_in1 in integer,
    +    psv_in2 in integer,
    +    psv_inout in out integer
    +)
    +as
    +begin
    +    psv_inout := psv_in1 + psv_in2 + psv_inout;
    +end;
    +/
    +
    + +
    +

  6. Close the call statement by calling the close method in CallableStatement.

    1
    cstmt.close();
    +
    + +
    +
    • Many database classes such as Connection, Statement, and ResultSet have a close() method. Close these classes after using their objects. Close these actions after using their objects. Closing Connection will close all the related Statements, and closing a Statement will close its ResultSet.
    • Some JDBC drivers support named parameters, which can be used to set parameters by name rather than sequence. If a parameter has a default value, you do not need to specify any parameter value but can use the default value directly. Even though the parameter sequence changes during a stored procedure, the application does not need to be modified. Currently, the GaussDB(DWS) JDBC driver does not support this method.
    • GaussDB(DWS) does not support functions containing OUT parameters, or default values of stored procedures and function parameters.
    +
    +

+
  • If JDBC is used to call a stored procedure whose returned value is a cursor, the returned cursor cannot be used.
  • A stored procedure and an SQL statement must be executed separately.
+
+
+

Batch Processing

When a prepared statement batch processes multiple pieces of similar data, the database creates only one execution plan. This improves the compilation and optimization efficiency. Perform the following procedure:

+
  1. Create a prepared statement object by calling the prepareStatement method in Connection.

    1
    PreparedStatement pstmt = con.prepareStatement("INSERT INTO customer_t1 VALUES (?)");
    +
    + +
    +

  2. Call the setShort parameter for each piece of data, and call addBatch to confirm that the setting is complete.

    1
    +2
    pstmt.setShort(1, (short)2);
    +pstmt.addBatch();
    +
    + +
    +

  3. Execute batch processing by calling the executeBatch method in PreparedStatement.

    1
    int[] rowcount = pstmt.executeBatch();
    +
    + +
    +

  4. Close the precompiled statement object by calling the close method in PreparedStatement.

    1
    pstmt.close();
    +
    + +
    +

    Do not terminate a batch processing action when it is ongoing; otherwise, the database performance will deteriorate. Therefore, disable the automatic submission function during batch processing, and manually submit every several lines. The statement for disabling automatic submission is conn.setAutoCommit(false).

    +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0096.html b/docs/dws/dev/dws_04_0096.html new file mode 100644 index 00000000..c5a71175 --- /dev/null +++ b/docs/dws/dev/dws_04_0096.html @@ -0,0 +1,185 @@ + + +

Processing Data in a Result Set

+

Setting a Result Set Type

Different types of result sets are applicable to different application scenarios. Applications select proper types of result sets based on requirements. Before executing an SQL statement, you must create a statement object. Some methods of creating statement objects can set the type of a result set. Table 1 lists result set parameters. The related Connection methods are as follows:

+
1
+2
+3
+4
+5
+6
+7
+8
//Create a Statement object. This object will generate a ResultSet object with a specified type and concurrency.
+createStatement(int resultSetType, int resultSetConcurrency);
+
+//Create a PreparedStatement object. This object will generate a ResultSet object with a specified type and concurrency.
+prepareStatement(String sql, int resultSetType, int resultSetConcurrency);
+
+//Create a CallableStatement object. This object will generate a ResultSet object with a specified type and concurrency.
+prepareCall(String sql, int resultSetType, int resultSetConcurrency);
+
+ +
+ +
+ + + + + + + + + + +
Table 1 Result set types

Parameter

+

Description

+

resultSetType

+

Indicates the type of a result set. There are three types of result sets:

+
  • ResultSet.TYPE_FORWARD_ONLY: The ResultSet object can only be navigated forward. It is the default value.
  • ResultSet.TYPE_SCROLL_SENSITIVE: You can view the modified result by scrolling to the modified row.
  • ResultSet.TYPE_SCROLL_INSENSITIVE: The ResultSet object is insensitive to changes in the underlying data source.
+
NOTE:

After a result set has obtained data from the database, the result set is insensitive to data changes made by other transactions, even if the result set type is ResultSet.TYPE_SCROLL_SENSITIVE. To obtain up-to-date data of the record pointed by the cursor from the database, call the refreshRow() method in a ResultSet object.

+
+

resultSetConcurrency

+

Indicates the concurrency type of a result set. There are two types of concurrency.

+
  • ResultSet.CONCUR_READ_ONLY: The data in a result set cannot be updated except that an updated statement has been created in the result set data.
  • ResultSet.CONCUR_UPDATEABLE: changeable result set. The concurrency type for a result set object can be updated if the result set is scrollable.
+
+
+
+

Positioning a Cursor in a Result Set

ResultSet objects include a cursor pointing to the current data row. The cursor is initially positioned before the first row. The next method moves the cursor to the next row from its current position. When a ResultSet object does not have a next row, a call to the next method returns false. Therefore, this method is used in the while loop for result set iteration. However, the JDBC driver provides more cursor positioning methods for scrollable result sets, which allows positioning cursor in the specified row. Table 2 lists these methods.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Methods for positioning a cursor in a result set

Method

+

Description

+

next()

+

Moves cursor to the next row from its current position.

+

previous()

+

Moves cursor to the previous row from its current position.

+

beforeFirst()

+

Places cursor before the first row.

+

afterLast()

+

Places cursor after the last row.

+

first()

+

Places cursor to the first row.

+

last()

+

Places cursor to the last row.

+

absolute(int)

+

Places cursor to a specified row.

+

relative(int)

+

Moves cursor forward or backward a specified number of rows.

+
+
+
+

Obtaining the cursor position from a result set

This cursor positioning method will be used to change the cursor position for a scrollable result set. JDBC driver provides a method to obtain the cursor position in a result set. Table 3 lists the method.

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 3 Method for obtaining the cursor position in a result set

Method

+

Description

+

isFirst()

+

Checks whether the cursor is in the first row.

+

isLast()

+

Checks whether the cursor is in the last row.

+

isBeforeFirst()

+

Checks whether the cursor is before the first row.

+

isAfterLast()

+

Checks whether the cursor is after the last row.

+

getRow()

+

Gets the current row number of the cursor.

+
+
+
+

Obtaining data from a result set

ResultSet objects provide a variety of methods to obtain data from a result set. Table 4 lists the common methods for obtaining data. If you want to know more about other methods, see JDK official documents.

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 4 Common methods for obtaining data from a result set

Method

+

Description

+

int getInt(int columnIndex)

+

Retrieves the value of the column designated by a column index in the current row as an int.

+

int getInt(String columnLabel)

+

Retrieves the value of the column designated by a column label in the current row as an int.

+

String getString(int columnIndex)

+

Retrieves the value of the column designated by a column index in the current row as a String.

+

String getString(String columnLabel)

+

Retrieves the value of the column designated by a column label in the current row as a String.

+

Date getDate(int columnIndex)

+

Retrieves the value of the column designated by a column index in the current row as a Date.

+

Date getDate(String columnLabel)

+

Retrieves the value of the column designated by a column name in the current row as a Date.

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0097.html b/docs/dws/dev/dws_04_0097.html new file mode 100644 index 00000000..8e8a82a7 --- /dev/null +++ b/docs/dws/dev/dws_04_0097.html @@ -0,0 +1,12 @@ + + +

Closing the Connection

+

After you complete required data operations in the database, close the database connection.

+

Call the close method to close the connection, such as, conn. close().

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0098.html b/docs/dws/dev/dws_04_0098.html new file mode 100644 index 00000000..80a90310 --- /dev/null +++ b/docs/dws/dev/dws_04_0098.html @@ -0,0 +1,428 @@ + + +

Example: Common Operations

+

Example 1

Before completing the following example, you need to create a stored procedure.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
create or replace procedure testproc 
+(
+    psv_in1 in integer,
+    psv_in2 in integer,
+    psv_inout in out integer
+)
+as
+begin
+    psv_inout := psv_in1 + psv_in2 + psv_inout;
+end;
+/
+
+ +
+

This example illustrates how to develop applications based on the GaussDB(DWS) JDBC interface.

+
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
//DBtest.java
+//gsjdbc4.jar is used as an example.
+// This example illustrates the main processes of JDBC-based development, covering database connection creation, table creation, and data insertion.
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.CallableStatement;
+
+public class DBTest {
+
+  //Establish a connection to the database.
+  public static Connection GetConnection(String username, String passwd) {
+    String driver = "org.postgresql.Driver";
+    String sourceURL = "jdbc:postgresql://localhost:8000/gaussdb";
+    Connection conn = null;
+    try {
+      //Load the database driver.
+      Class.forName(driver).newInstance();
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    try {
+      //Establish a connection to the database.
+      conn = DriverManager.getConnection(sourceURL, username, passwd);
+      System.out.println("Connection succeed!");
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    return conn;
+  };
+
+  //Run an ordinary SQL statement. Create a customer_t1 table.
+  public static void CreateTable(Connection conn) {
+    Statement stmt = null;
+    try {
+      stmt = conn.createStatement();
+
+      //Run an ordinary SQL statement.
+      int rc = stmt
+          .executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));");
+
+      stmt.close();
+    } catch (SQLException e) {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e1) {
+          e1.printStackTrace();
+        }
+      }
+      e.printStackTrace();
+    }
+  }
+
+  //Run the preprocessing statement to insert data in batches.
+  public static void BatchInsertData(Connection conn) {
+    PreparedStatement pst = null;
+
+    try {
+      //Generate a prepared statement.
+      pst = conn.prepareStatement("INSERT INTO customer_t1 VALUES (?,?)");
+      for (int i = 0; i < 3; i++) {
+        //Add parameters.
+        pst.setInt(1, i);
+        pst.setString(2, "data " + i);
+        pst.addBatch();
+      }
+      //Run batch processing.
+      pst.executeBatch();
+      pst.close();
+    } catch (SQLException e) {
+      if (pst != null) {
+        try {
+          pst.close();
+        } catch (SQLException e1) {
+        e1.printStackTrace();
+        }
+      }
+      e.printStackTrace();
+    }
+  }
+
+  //Run the precompilation statement to update data.
+  public static void ExecPreparedSQL(Connection conn) {
+    PreparedStatement pstmt = null;
+    try {
+      pstmt = conn
+          .prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1");
+      pstmt.setString(1, "new Data");
+      int rowcount = pstmt.executeUpdate();
+      pstmt.close();
+    } catch (SQLException e) {
+      if (pstmt != null) {
+        try {
+          pstmt.close();
+        } catch (SQLException e1) {
+          e1.printStackTrace();
+        }
+      }
+      e.printStackTrace();
+    }
+  }
+
+
+//Run a stored procedure.
+  public static void ExecCallableSQL(Connection conn) {
+    CallableStatement cstmt = null;
+    try {
+      
+      cstmt=conn.prepareCall("{? = CALL TESTPROC(?,?,?)}");
+      cstmt.setInt(2, 50); 
+      cstmt.setInt(1, 20);
+      cstmt.setInt(3, 90);
+       cstmt.registerOutParameter(4, Types.INTEGER);  //Register an OUT parameter as an integer.
+      cstmt.execute();
+      int out = cstmt.getInt(4);  //Obtain the out parameter value.
+      System.out.println("The CallableStatment TESTPROC returns:"+out);
+      cstmt.close();
+    } catch (SQLException e) {
+      if (cstmt != null) {
+        try {
+          cstmt.close();
+        } catch (SQLException e1) {
+          e1.printStackTrace();
+        }
+      }
+      e.printStackTrace();
+    }
+  }
+  
+
+  /**
+   * Main process. Call static methods one by one.
+   * @param args
+  */
+  public static void main(String[] args) {
+    //Establish a connection to the database.
+    Connection conn = GetConnection("tester", "password");
+
+    //Create a table.
+    CreateTable(conn);
+
+    //Insert data in batches.
+    BatchInsertData(conn);
+
+  //Run the precompilation statement to update data.
+    ExecPreparedSQL(conn);
+
+    //Run a stored procedure.
+    ExecCallableSQL(conn);
+
+    //Close the connection to the database.
+    try {
+      conn.close();
+    } catch (SQLException e) {
+      e.printStackTrace();
+    }
+
+  }
+
+}
+
+ +
+
+

Example 2: High Client Memory Usage

In this example, setFetchSize adjusts the memory usage of the client by using the database cursor to obtain server data in batches. It may increase network interaction and damage some performance.

+

The cursor is valid within a transaction. Therefore, you need to disable the autocommit function.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
// Disable the autocommit function.
+conn.setAutoCommit(false);
+Statement st = conn.createStatement();
+
+// Open the cursor and obtain 50 lines of data each time.
+st.setFetchSize(50);
+ResultSet rs = st.executeQuery("SELECT * FROM mytable");
+while (rs.next())
+{
+    System.out.print("a row was returned.");
+}
+rs.close();
+
+// Disable the server cursor.
+st.setFetchSize(0);
+rs = st.executeQuery("SELECT * FROM mytable");
+while (rs.next())
+{
+    System.out.print("many rows were returned.");
+}
+rs.close();
+
+// Close the statement.
+st.close();
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0099.html b/docs/dws/dev/dws_04_0099.html new file mode 100644 index 00000000..885c4775 --- /dev/null +++ b/docs/dws/dev/dws_04_0099.html @@ -0,0 +1,409 @@ + + +

Example: Retrying SQL Queries for Applications

+

If the primary DN is faulty and cannot be restored within 40s, its standby is automatically promoted to primary to ensure the normal running of the cluster. Jobs running during the failover will fail and those started after the failover will not be affected. To protect upper-layer services from being affected by the failover, refer to the following example to construct a SQL retry mechanism at the service layer.

+
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
//gsjdbc4.jar is used as an example.
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+/**
+ * 
+ *
+ */
+
+class ExitHandler extends Thread {
+    private Statement cancel_stmt = null;
+
+    public ExitHandler(Statement stmt) {
+        super("Exit Handler");
+        this.cancel_stmt = stmt;
+    }
+    public void run() {
+        System.out.println("exit handle");
+        try {
+            this.cancel_stmt.cancel();
+        } catch (SQLException e) {
+            System.out.println("cancel query failed.");
+            e.printStackTrace();
+        }
+    }
+}
+
+public class SQLRetry {
+  //Establish a connection to the database.
+   public static Connection GetConnection(String username, String passwd) {
+     String driver = "org.postgresql.Driver";
+     String sourceURL = "jdbc:postgresql://10.131.72.136:8000/gaussdb";
+     Connection conn = null;
+     try {
+      //Load the database driver.
+       Class.forName(driver).newInstance();
+     } catch (Exception e) {
+       e.printStackTrace();
+       return null;
+     }
+
+     try {
+      //Establish a connection to the database.
+       conn = DriverManager.getConnection(sourceURL, username, passwd);
+       System.out.println("Connection succeed!");
+     } catch (Exception e) {
+       e.printStackTrace();
+       return null;
+     }
+
+     return conn;
+}
+ 
+  //Run an ordinary SQL statement. Create a jdbc_test1 table.
+   public static void CreateTable(Connection conn) {
+     Statement stmt = null;
+     try {
+       stmt = conn.createStatement();
+
+       // add ctrl+c handler
+       Runtime.getRuntime().addShutdownHook(new ExitHandler(stmt));
+
+      // Run an ordinary SQL statement.
+       int rc2 = stmt
+          .executeUpdate("DROP TABLE if exists jdbc_test1;");
+
+       int rc1 = stmt
+          .executeUpdate("CREATE TABLE jdbc_test1(col1 INTEGER, col2 VARCHAR(10));");
+
+       stmt.close();
+     } catch (SQLException e) {
+       if (stmt != null) {
+         try {
+           stmt.close();
+         } catch (SQLException e1) {
+           e1.printStackTrace();
+         }
+       }
+       e.printStackTrace();
+     }
+   }
+
+  //Run the preprocessing statement to insert data in batches.
+   public static void BatchInsertData(Connection conn) {
+     PreparedStatement pst = null;
+
+     try {
+      //Generate a prepared statement.
+       pst = conn.prepareStatement("INSERT INTO jdbc_test1 VALUES (?,?)");
+       for (int i = 0; i < 100; i++) {
+        //Add parameters.
+         pst.setInt(1, i);
+         pst.setString(2, "data " + i);
+         pst.addBatch();
+       }
+      //Perform batch processing.
+       pst.executeBatch();
+       pst.close();
+     } catch (SQLException e) {
+       if (pst != null) {
+         try {
+           pst.close();
+         } catch (SQLException e1) {
+         e1.printStackTrace();
+         }
+       }
+       e.printStackTrace();
+     }
+   }
+ 
+  //Run the precompilation statement to update data.
+   private static boolean QueryRedo(Connection conn){
+     PreparedStatement pstmt = null;
+     boolean retValue = false;
+     try {
+       pstmt = conn
+           .prepareStatement("SELECT col1 FROM jdbc_test1 WHERE col2 = ?");
+ 
+           pstmt.setString(1, "data 10");
+           ResultSet rs = pstmt.executeQuery();
+
+           while (rs.next()) {
+               System.out.println("col1 = " + rs.getString("col1"));
+           }
+           rs.close();
+ 
+       pstmt.close();
+        retValue = true;
+      } catch (SQLException e) {
+       System.out.println("catch...... retValue " + retValue);
+       if (pstmt != null) {
+         try {
+          pstmt.close();
+        } catch (SQLException e1) {
+          e1.printStackTrace();
+         }
+       }
+       e.printStackTrace();
+     }
+ 
+      System.out.println("finesh......"); 
+     return retValue;
+   }
+
+//Run a query statement and retry upon a failure. The number of retry times can be configured.
+   public static void ExecPreparedSQL(Connection conn) throws InterruptedException {
+         int maxRetryTime = 50;
+         int time = 0;
+         String result = null;
+         do {
+             time++;
+             try {
+  System.out.println("time:" + time);
+  boolean ret = QueryRedo(conn);
+  if(ret == false){
+   System.out.println("retry, time:" + time);
+   Thread.sleep(10000); 
+   QueryRedo(conn);
+  }
+             } catch (Exception e) {
+                 e.printStackTrace();
+             }
+         } while (null == result && time < maxRetryTime); 
+ 
+   }
+
+   /**
+   * Main process. Call static methods one by one.
+    * @param args
+  * @throws InterruptedException 
+   */
+   public static void main(String[] args) throws InterruptedException {
+    //Establish a connection to the database.
+     Connection conn = GetConnection("testuser", "test@123");
+
+    //Create a table.
+     CreateTable(conn);
+
+    //Insert data in batches.
+     BatchInsertData(conn);
+
+    //Run the precompilation statement to update data.
+     ExecPreparedSQL(conn);
+
+    //Disconnect from the database.
+     try {
+       conn.close();
+     } catch (SQLException e) {
+       e.printStackTrace();
+     }
+
+   }
+
+ }
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0100.html b/docs/dws/dev/dws_04_0100.html new file mode 100644 index 00000000..9ee98f7e --- /dev/null +++ b/docs/dws/dev/dws_04_0100.html @@ -0,0 +1,222 @@ + + +

Example: Importing and Exporting Data Through Local Files

+

When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or import a local file to the database by streaming. The file can be in CSV or TEXT format.

+

The sample program is as follows. Load the GaussDB(DWS) JDBC driver before running it.

+
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
//gsjdbc4.jar is used as an example.
+import java.sql.Connection; 
+import java.sql.DriverManager; 
+import java.io.IOException;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.sql.SQLException; 
+import org.postgresql.copy.CopyManager; 
+import org.postgresql.core.BaseConnection;
+ 
+public class Copy{ 
+
+     public static void main(String[] args) 
+     { 
+      String urls = new String("jdbc:postgresql://10.180.155.74:8000/gaussdb"); //Database URL
+      String username = new String("jack");            //Username
+      String password = new String("********");       // Password
+      String tablename = new String("migration_table"); //Define table information.
+      String tablename1 = new String("migration_table_1"); //Define table information.
+      String driver = "org.postgresql.Driver"; 
+      Connection conn = null; 
+      
+      try { 
+            Class.forName(driver); 
+            conn = DriverManager.getConnection(urls, username, password);         
+          } catch (ClassNotFoundException e) { 
+               e.printStackTrace(System.out); 
+          } catch (SQLException e) { 
+               e.printStackTrace(System.out); 
+          } 
+      
+      //Export the query result of SELECT * FROM migration_table to the local file d:/data.txt.
+      try {
+     copyToFile(conn, "d:/data.txt", "(SELECT * FROM migration_table)");
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   }    
+      //Import data from the d:/data.txt file to the migration_table_1 table.
+      try {
+      copyFromFile(conn, "d:/data.txt", tablename1);
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+         e.printStackTrace();
+ } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+ }  
+
+      //Export the data from the migration_table_1 table to the d:/data1.txt file.
+      try {
+      copyToFile(conn, "d:/data1.txt", tablename1);
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+ }        
+     } 
+
+  public static void copyFromFile(Connection connection, String filePath, String tableName)   
+         throws SQLException, IOException {  
+       
+     FileInputStream fileInputStream = null;  
+   
+     try {  
+         CopyManager copyManager = new CopyManager((BaseConnection)connection);  
+         fileInputStream = new FileInputStream(filePath);  
+         copyManager.copyIn("COPY " + tableName + " FROM STDIN", fileInputStream);  
+     } finally {  
+         if (fileInputStream != null) {  
+             try {  
+                 fileInputStream.close();  
+             } catch (IOException e) {  
+                 e.printStackTrace();  
+             }  
+         }  
+     }  
+ }  
+  
+  public static void copyToFile(Connection connection, String filePath, String tableOrQuery)   
+          throws SQLException, IOException {  
+        
+      FileOutputStream fileOutputStream = null;  
+   
+      try {  
+          CopyManager copyManager = new CopyManager((BaseConnection)connection);  
+          fileOutputStream = new FileOutputStream(filePath);  
+          copyManager.copyOut("COPY " + tableOrQuery + " TO STDOUT", fileOutputStream);  
+      } finally {  
+          if (fileOutputStream != null) {  
+              try {  
+                  fileOutputStream.close();  
+              } catch (IOException e) {  
+                  e.printStackTrace();  
+              }  
+          }  
+      }  
+  }  
+}
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0101.html b/docs/dws/dev/dws_04_0101.html new file mode 100644 index 00000000..418cc4b9 --- /dev/null +++ b/docs/dws/dev/dws_04_0101.html @@ -0,0 +1,183 @@ + + +

Example: Migrating Data from MySQL to GaussDB(DWS)

+

The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
//gsjdbc4.jar is used as an example.
+import java.io.StringReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+
+public class Migration{
+
+    public static void main(String[] args) {
+        String url = new String("jdbc:postgresql://10.180.155.74:8000/gaussdb"); //Database URL
+        String user = new String("jack");            //Database username
+        String pass = new String("********");             //Database password
+        String tablename = new String("migration_table"); //Define table information.
+        String delimiter = new String("|");              //Define a delimiter.
+        String encoding = new String("UTF8");            //Define a character set.
+        String driver = "org.postgresql.Driver";
+        StringBuffer buffer = new StringBuffer();       //Define the buffer to store formatted data.
+
+        try {
+            //Obtain the query result set of the source database.
+            ResultSet rs = getDataSet();
+
+            //Traverse the result set and obtain records row by row.
+            //The values of columns in each record are separated by the specified delimiter and end with a newline character to form strings.
+            ////Add the strings to the buffer.
+            while (rs.next()) {
+                buffer.append(rs.getString(1) + delimiter
+                        + rs.getString(2) + delimiter
+                        + rs.getString(3) + delimiter
+                        + rs.getString(4)
+                        + "\n");
+            }
+            rs.close();
+
+            try {
+                //Connect to the target database.
+                Class.forName(driver);
+                Connection conn = DriverManager.getConnection(url, user, pass);
+                BaseConnection baseConn = (BaseConnection) conn;
+                baseConn.setAutoCommit(false);
+
+                //Initialize table information.
+                String sql = "Copy " + tablename + " from STDIN DELIMITER " + "'" + delimiter + "'" + " ENCODING " + "'" + encoding + "'";
+
+                //Submit data in the buffer.
+                CopyManager cp = new CopyManager(baseConn);
+                StringReader reader = new StringReader(buffer.toString());
+                cp.copyIn(sql, reader);
+                baseConn.commit();
+                reader.close();
+                baseConn.close();
+            } catch (ClassNotFoundException e) {
+                e.printStackTrace(System.out);
+            } catch (SQLException e) {
+                e.printStackTrace(System.out);
+            }
+
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+
+    //******************************** 
+    //Return the query result set from the source database.
+    //********************************* 
+    private static ResultSet getDataSet() {
+        ResultSet rs = null;
+        try {
+            Class.forName("com.mysql.jdbc.Driver").newInstance();
+            Connection conn = DriverManager.getConnection("jdbc:mysql://10.119.179.227:3306/jack?useSSL=false&allowPublicKeyRetrieval=true", "jack", "********");
+            Statement stmt = conn.createStatement();
+            rs = stmt.executeQuery("select * from migration_table");
+        } catch (SQLException e) {
+            e.printStackTrace();
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        return rs;
+    }
+}
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0102.html b/docs/dws/dev/dws_04_0102.html new file mode 100644 index 00000000..23c49f3e --- /dev/null +++ b/docs/dws/dev/dws_04_0102.html @@ -0,0 +1,42 @@ + + +

JDBC Interface Reference

+

JDBC interface is a set of API methods for users. This section describes some common interfaces. For other interfaces, see information in JDK1.6 (software package) and JDBC4.0.

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0103.html b/docs/dws/dev/dws_04_0103.html new file mode 100644 index 00000000..be28daa2 --- /dev/null +++ b/docs/dws/dev/dws_04_0103.html @@ -0,0 +1,123 @@ + + +

java.sql.Connection

+

This section describes java.sql.Connection, the interface for connecting to a database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.Connection

Method Name

+

Return Type

+

Support JDBC 4

+

close()

+

void

+

Yes

+

commit()

+

void

+

Yes

+

createStatement()

+

Statement

+

Yes

+

getAutoCommit()

+

boolean

+

Yes

+

getClientInfo()

+

Properties

+

Yes

+

getClientInfo(String name)

+

String

+

Yes

+

getTransactionIsolation()

+

int

+

Yes

+

isClosed()

+

boolean

+

Yes

+

isReadOnly()

+

boolean

+

Yes

+

prepareStatement(String sql)

+

PreparedStatement

+

Yes

+

rollback()

+

void

+

Yes

+

setAutoCommit(boolean autoCommit)

+

void

+

Yes

+

setClientInfo(Properties properties)

+

void

+

Yes

+

setClientInfo(String name,String value)

+

void

+

Yes

+
+
+

The AutoCommit mode is used by default within the interface. If you disable it running setAutoCommit(false), all the statements executed later will be packaged in explicit transactions, and you cannot execute statements that cannot be executed within transactions.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0104.html b/docs/dws/dev/dws_04_0104.html new file mode 100644 index 00000000..5295f130 --- /dev/null +++ b/docs/dws/dev/dws_04_0104.html @@ -0,0 +1,138 @@ + + +

java.sql.CallableStatement

+

This section describes java.sql.CallableStatement, the stored procedure execution interface.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.CallableStatement

Method Name

+

Return Type

+

Support JDBC 4

+

registerOutParameter(int parameterIndex, int type)

+

void

+

Yes

+

wasNull()

+

boolean

+

Yes

+

getString(int parameterIndex)

+

String

+

Yes

+

getBoolean(int parameterIndex)

+

boolean

+

Yes

+

getByte(int parameterIndex)

+

byte

+

Yes

+

getShort(int parameterIndex)

+

short

+

Yes

+

getInt(int parameterIndex)

+

int

+

Yes

+

getLong(int parameterIndex)

+

long

+

Yes

+

getFloat(int parameterIndex)

+

float

+

Yes

+

getDouble(int parameterIndex)

+

double

+

Yes

+

getBigDecimal(int parameterIndex)

+

BigDecimal

+

Yes

+

getBytes(int parameterIndex)

+

byte[]

+

Yes

+

getDate(int parameterIndex)

+

Date

+

Yes

+

getTime(int parameterIndex)

+

Time

+

Yes

+

getTimestamp(int parameterIndex)

+

Timestamp

+

Yes

+

getObject(int parameterIndex)

+

Object

+

Yes

+
+
+
  • The batch operation of statements containing OUT parameter is not allowed.
  • The following methods are inherited from java.sql.Statement: close, execute, executeQuery, executeUpdate, getConnection, getResultSet, getUpdateCount, isClosed, setMaxRows, and setFetchSize.
  • The following methods are inherited from java.sql.PreparedStatement: addBatch, clearParameters, execute, executeQuery, executeUpdate, getMetaData, setBigDecimal, setBoolean, setByte, setBytes, setDate, setDouble, setFloat, setInt, setLong, setNull, setObject, setString, setTime, and setTimestamp.
+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0105.html b/docs/dws/dev/dws_04_0105.html new file mode 100644 index 00000000..3d35297e --- /dev/null +++ b/docs/dws/dev/dws_04_0105.html @@ -0,0 +1,443 @@ + + +

java.sql.DatabaseMetaData

+

This section describes java.sql.DatabaseMetaData, the interface for defining database objects.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.DatabaseMetaData

Method Name

+

Return Type

+

Support JDBC 4

+

getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)

+

ResultSet

+

Yes

+

getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)

+

ResultSet

+

Yes

+

getTableTypes()

+

ResultSet

+

Yes

+

getUserName()

+

String

+

Yes

+

isReadOnly()

+

boolean

+

Yes

+

nullsAreSortedHigh()

+

boolean

+

Yes

+

nullsAreSortedLow()

+

boolean

+

Yes

+

nullsAreSortedAtStart()

+

boolean

+

Yes

+

nullsAreSortedAtEnd()

+

boolean

+

Yes

+

getDatabaseProductName()

+

String

+

Yes

+

getDatabaseProductVersion()

+

String

+

Yes

+

getDriverName()

+

String

+

Yes

+

getDriverVersion()

+

String

+

Yes

+

getDriverMajorVersion()

+

int

+

Yes

+

getDriverMinorVersion()

+

int

+

Yes

+

usesLocalFiles()

+

boolean

+

Yes

+

usesLocalFilePerTable()

+

boolean

+

Yes

+

supportsMixedCaseIdentifiers()

+

boolean

+

Yes

+

storesUpperCaseIdentifiers()

+

boolean

+

Yes

+

storesLowerCaseIdentifiers()

+

boolean

+

Yes

+

supportsMixedCaseQuotedIdentifiers()

+

boolean

+

Yes

+

storesUpperCaseQuotedIdentifiers()

+

boolean

+

Yes

+

storesLowerCaseQuotedIdentifiers()

+

boolean

+

Yes

+

storesMixedCaseQuotedIdentifiers()

+

boolean

+

Yes

+

supportsAlterTableWithAddColumn()

+

boolean

+

Yes

+

supportsAlterTableWithDropColumn()

+

boolean

+

Yes

+

supportsColumnAliasing()

+

boolean

+

Yes

+

nullPlusNonNullIsNull()

+

boolean

+

Yes

+

supportsConvert()

+

boolean

+

Yes

+

supportsConvert(int fromType, int toType)

+

boolean

+

Yes

+

supportsTableCorrelationNames()

+

boolean

+

Yes

+

supportsDifferentTableCorrelationNames()

+

boolean

+

Yes

+

supportsExpressionsInOrderBy()

+

boolean

+

Yes

+

supportsOrderByUnrelated()

+

boolean

+

Yes

+

supportsGroupBy()

+

boolean

+

Yes

+

supportsGroupByUnrelated()

+

boolean

+

Yes

+

supportsGroupByBeyondSelect()

+

boolean

+

Yes

+

supportsLikeEscapeClause()

+

boolean

+

Yes

+

supportsMultipleResultSets()

+

boolean

+

Yes

+

supportsMultipleTransactions()

+

boolean

+

Yes

+

supportsNonNullableColumns()

+

boolean

+

Yes

+

supportsMinimumSQLGrammar()

+

boolean

+

Yes

+

supportsCoreSQLGrammar()

+

boolean

+

Yes

+

supportsExtendedSQLGrammar()

+

boolean

+

Yes

+

supportsANSI92EntryLevelSQL()

+

boolean

+

Yes

+

supportsANSI92IntermediateSQL()

+

boolean

+

Yes

+

supportsANSI92FullSQL()

+

boolean

+

Yes

+

supportsIntegrityEnhancementFacility()

+

boolean

+

Yes

+

supportsOuterJoins()

+

boolean

+

Yes

+

supportsFullOuterJoins()

+

boolean

+

Yes

+

supportsLimitedOuterJoins()

+

boolean

+

Yes

+

isCatalogAtStart()

+

boolean

+

Yes

+

supportsSchemasInDataManipulation()

+

boolean

+

Yes

+

supportsSavepoints()

+

boolean

+

Yes

+

supportsResultSetHoldability(int holdability)

+

boolean

+

Yes

+

getResultSetHoldability()

+

int

+

Yes

+

getDatabaseMajorVersion()

+

int

+

Yes

+

getDatabaseMinorVersion()

+

int

+

Yes

+

getJDBCMajorVersion()

+

int

+

Yes

+

getJDBCMinorVersion()

+

int

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0106.html b/docs/dws/dev/dws_04_0106.html new file mode 100644 index 00000000..2393a782 --- /dev/null +++ b/docs/dws/dev/dws_04_0106.html @@ -0,0 +1,58 @@ + + +

java.sql.Driver

+

This section describes java.sql.Driver, the database driver interface.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.Driver

Method Name

+

Return Type

+

Support JDBC 4

+

acceptsURL(String url)

+

boolean

+

Yes

+

connect(String url, Properties info)

+

Connection

+

Yes

+

jdbcCompliant()

+

boolean

+

Yes

+

getMajorVersion()

+

int

+

Yes

+

getMinorVersion()

+

int

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0107.html b/docs/dws/dev/dws_04_0107.html new file mode 100644 index 00000000..9cd1cc1f --- /dev/null +++ b/docs/dws/dev/dws_04_0107.html @@ -0,0 +1,165 @@ + + +

java.sql.PreparedStatement

+

This section describes java.sql.PreparedStatement, the interface for preparing statements.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.PreparedStatement

Method Name

+

Return Type

+

Support JDBC 4

+

clearParameters()

+

void

+

Yes

+

execute()

+

boolean

+

Yes

+

executeQuery()

+

ResultSet

+

Yes

+

excuteUpdate()

+

int

+

Yes

+

getMetaData()

+

ResultSetMetaData

+

Yes

+

setBoolean(int parameterIndex, boolean x)

+

void

+

Yes

+

setBigDecimal(int parameterIndex, BigDecimal x)

+

void

+

Yes

+

setByte(int parameterIndex, byte x)

+

void

+

Yes

+

setBytes(int parameterIndex, byte[] x)

+

void

+

Yes

+

setDate(int parameterIndex, Date x)

+

void

+

Yes

+

setDouble(int parameterIndex, double x)

+

void

+

Yes

+

setFloat(int parameterIndex, float x)

+

void

+

Yes

+

setInt(int parameterIndex, int x)

+

void

+

Yes

+

setLong(int parameterIndex, long x)

+

void

+

Yes

+

setNString(int parameterIndex, String value)

+

void

+

Yes

+

setShort(int parameterIndex, short x)

+

void

+

Yes

+

setString(int parameterIndex, String x)

+

void

+

Yes

+

addBatch()

+

void

+

Yes

+

executeBatch()

+

int[]

+

Yes

+

clearBatch()

+

void

+

Yes

+
+
+
  • Execute addBatch() and execute() only after running clearBatch().
  • Batch is not cleared by calling executeBatch(). Clear batch by explicitly calling clearBatch().
  • After bounded variables of a batch are added, if you want to reuse these values (add a batch again), set*() is not necessary.
  • The following methods are inherited from java.sql.Statement: close, execute, executeQuery, executeUpdate, getConnection, getResultSet, getUpdateCount, isClosed, setMaxRows, and setFetchSize.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0108.html b/docs/dws/dev/dws_04_0108.html new file mode 100644 index 00000000..3bd91a70 --- /dev/null +++ b/docs/dws/dev/dws_04_0108.html @@ -0,0 +1,242 @@ + + +

java.sql.ResultSet

+

This section describes java.sql.ResultSet, the interface for execution result sets.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.ResultSet

Method Name

+

Return Type

+

Support JDBC 4

+

findColumn(String columnLabel)

+

int

+

Yes

+

getBigDecimal(int columnIndex)

+

BigDecimal

+

Yes

+

getBigDecimal(String columnLabel)

+

BigDecimal

+

Yes

+

getBoolean(int columnIndex)

+

boolean

+

Yes

+

getBoolean(String columnLabel)

+

boolean

+

Yes

+

getByte(int columnIndex)

+

byte

+

Yes

+

getBytes(int columnIndex)

+

byte[]

+

Yes

+

getByte(String columnLabel)

+

byte

+

Yes

+

getBytes(String columnLabel)

+

byte[]

+

Yes

+

getDate(int columnIndex)

+

Date

+

Yes

+

getDate(String columnLabel)

+

Date

+

Yes

+

getDouble(int columnIndex)

+

double

+

Yes

+

getDouble(String columnLabel)

+

double

+

Yes

+

getFloat(int columnIndex)

+

float

+

Yes

+

getFloat(String columnLabel)

+

float

+

Yes

+

getInt(int columnIndex)

+

int

+

Yes

+

getInt(String columnLabel)

+

int

+

Yes

+

getLong(int columnIndex)

+

long

+

Yes

+

getLong(String columnLabel)

+

long

+

Yes

+

getShort(int columnIndex)

+

short

+

Yes

+

getShort(String columnLabel)

+

short

+

Yes

+

getString(int columnIndex)

+

String

+

Yes

+

getString(String columnLabel)

+

String

+

Yes

+

getTime(int columnIndex)

+

Time

+

Yes

+

getTime(String columnLabel)

+

Time

+

Yes

+

getTimestamp(int columnIndex)

+

Timestamp

+

Yes

+

getTimestamp(String columnLabel)

+

Timestamp

+

Yes

+

isAfterLast()

+

boolean

+

Yes

+

isBeforeFirst()

+

boolean

+

Yes

+

isFirst()

+

boolean

+

Yes

+

next()

+

boolean

+

Yes

+
+
+
  • One Statement cannot have multiple open ResultSets.
  • The cursor that is used for traversing the ResultSet cannot be open after committed.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0109.html b/docs/dws/dev/dws_04_0109.html new file mode 100644 index 00000000..abe55470 --- /dev/null +++ b/docs/dws/dev/dws_04_0109.html @@ -0,0 +1,51 @@ + + +

java.sql.ResultSetMetaData

+

This section describes java.sql.ResultSetMetaData, which provides details about ResultSet object information.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.ResultSetMetaData

Method Name

+

Return Type

+

Support JDBC 4

+

getColumnCount()

+

int

+

Yes

+

getColumnName(int column)

+

String

+

Yes

+

getColumnType(int column)

+

int

+

Yes

+

getColumnTypeName(int column)

+

String

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0110.html b/docs/dws/dev/dws_04_0110.html new file mode 100644 index 00000000..06c51a71 --- /dev/null +++ b/docs/dws/dev/dws_04_0110.html @@ -0,0 +1,110 @@ + + +

java.sql.Statement

+

This section describes java.sql.Statement, the interface for executing SQL statements.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for java.sql.Statement

Method Name

+

Return Type

+

Support JDBC 4

+

close()

+

void

+

Yes

+

execute(String sql)

+

boolean

+

Yes

+

executeQuery(String sql)

+

ResultSet

+

Yes

+

executeUpdate(String sql)

+

int

+

Yes

+

getConnection()

+

Connection

+

Yes

+

getResultSet()

+

ResultSet

+

Yes

+

getQueryTimeout()

+

int

+

Yes

+

getUpdateCount()

+

int

+

Yes

+

isClosed()

+

boolean

+

Yes

+

setQueryTimeout(int seconds)

+

void

+

Yes

+

setFetchSize(int rows)

+

void

+

Yes

+

cancel()

+

void

+

Yes

+
+
+

Using setFetchSize can reduce the memory occupied by result sets on the client. Result sets are packaged into cursors and segmented for processing, which will increase the communication traffic between the database and the client, affecting performance.

+

Database cursors are valid only within their transaction. If setFetchSize is set, set setAutoCommit(false) and commit transactions on the connection to flush service data to a database.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0111.html b/docs/dws/dev/dws_04_0111.html new file mode 100644 index 00000000..42bce3e8 --- /dev/null +++ b/docs/dws/dev/dws_04_0111.html @@ -0,0 +1,65 @@ + + +

javax.sql.ConnectionPoolDataSource

+

This section describes javax.sql.ConnectionPoolDataSource, the interface for data source connection pools.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for javax.sql.ConnectionPoolDataSource

Method Name

+

Return Type

+

Support JDBC 4

+

getLoginTimeout()

+

int

+

Yes

+

getLogWriter()

+

PrintWriter

+

Yes

+

getPooledConnection()

+

PooledConnection

+

Yes

+

getPooledConnection(String user,String password)

+

PooledConnection

+

Yes

+

setLoginTimeout(int seconds)

+

void

+

Yes

+

setLogWriter(PrintWriter out)

+

void

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0112.html b/docs/dws/dev/dws_04_0112.html new file mode 100644 index 00000000..5769c823 --- /dev/null +++ b/docs/dws/dev/dws_04_0112.html @@ -0,0 +1,65 @@ + + +

javax.sql.DataSource

+

This section describes javax.sql.DataSource, the interface for data sources.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for javax.sql.DataSource

Method Name

+

Return Type

+

Support JDBC 4

+

getConneciton()

+

Connection

+

Yes

+

getConnection(String username,String password)

+

Connection

+

Yes

+

getLoginTimeout()

+

int

+

Yes

+

getLogWriter()

+

PrintWriter

+

Yes

+

setLoginTimeout(int seconds)

+

void

+

Yes

+

setLogWriter(PrintWriter out)

+

void

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0113.html b/docs/dws/dev/dws_04_0113.html new file mode 100644 index 00000000..b7c25488 --- /dev/null +++ b/docs/dws/dev/dws_04_0113.html @@ -0,0 +1,65 @@ + + +

javax.sql.PooledConnection

+

This section describes javax.sql.PooledConnection, the connection interface created by a connection pool.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for javax.sql.PooledConnection

Method Name

+

Return Type

+

Support JDBC 4

+

addConnectionEventListener (ConnectionEventListener listener)

+

void

+

Yes

+

close()

+

void

+

Yes

+

getConnection()

+

Connection

+

Yes

+

removeConnectionEventListener (ConnectionEventListener listener)

+

void

+

Yes

+

addStatementEventListener (StatementEventListener listener)

+

void

+

Yes

+

removeStatementEventListener (StatementEventListener listener)

+

void

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0114.html b/docs/dws/dev/dws_04_0114.html new file mode 100644 index 00000000..66788692 --- /dev/null +++ b/docs/dws/dev/dws_04_0114.html @@ -0,0 +1,93 @@ + + +

javax.naming.Context

+

This section describes javax.naming.Context, the context interface for connection configuration.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Support status for javax.naming.Context

Method Name

+

Return Type

+

Support JDBC 4

+

bind(Name name, Object obj)

+

void

+

Yes

+

bind(String name, Object obj)

+

void

+

Yes

+

lookup(Name name)

+

Object

+

Yes

+

lookup(String name)

+

Object

+

Yes

+

rebind(Name name, Object obj)

+

void

+

Yes

+

rebind(String name, Object obj)

+

void

+

Yes

+

rename(Name oldName, Name newName)

+

void

+

Yes

+

rename(String oldName, String newName)

+

void

+

Yes

+

unbind(Name name)

+

void

+

Yes

+

unbind(String name)

+

void

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0115.html b/docs/dws/dev/dws_04_0115.html new file mode 100644 index 00000000..19bb2db9 --- /dev/null +++ b/docs/dws/dev/dws_04_0115.html @@ -0,0 +1,30 @@ + + +

javax.naming.spi.InitialContextFactory

+

This section describes javax.naming.spi.InitialContextFactory, the initial context factory interface.

+ +
+ + + + + + + + + +
Table 1 Support status for javax.naming.spi.InitialContextFactory

Method Name

+

Return Type

+

Support JDBC 4

+

getInitialContext(Hashtable<?,?> environment)

+

Context

+

Yes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0116.html b/docs/dws/dev/dws_04_0116.html new file mode 100644 index 00000000..43325dad --- /dev/null +++ b/docs/dws/dev/dws_04_0116.html @@ -0,0 +1,105 @@ + + +

CopyManager

+

CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.

+

Inheritance Relationship of CopyManager

The CopyManager class is in the org.postgresql.copy package class and inherits the java.lang.Object class. The declaration of the class is as follows:

+
public class CopyManager
+extends Object
+
+

Construction Method

public CopyManager(BaseConnection connection)

+

throws SQLException

+
+

Basic Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Common methods of CopyManager

Return Value

+

Method

+

Description

+

throws

+

CopyIn

+

copyIn(String sql)

+

-

+

SQLException

+

long

+

copyIn(String sql, InputStream from)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, InputStream from, int bufferSize)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from Reader.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from, int bufferSize)

+

Uses COPY FROM STDIN to quickly load data to tables in the database from Reader.

+

SQLException,IOException

+

CopyOut

+

copyOut(String sql)

+

-

+

SQLException

+

long

+

copyOut(String sql, OutputStream to)

+

Sends the result set of COPY TO STDOUT from the database to the OutputStream class.

+

SQLException,IOException

+

long

+

copyOut(String sql, Writer to)

+

Sends the result set of COPY TO STDOUT from the database to the Writer class.

+

SQLException,IOException

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0117.html b/docs/dws/dev/dws_04_0117.html new file mode 100644 index 00000000..9f11459a --- /dev/null +++ b/docs/dws/dev/dws_04_0117.html @@ -0,0 +1,108 @@ + + +

ODBC-Based Development

+

Open Database Connectivity (ODBC) is a Microsoft API for accessing databases based on the X/OPEN CLI. The ODBC API alleviates applications from directly operating in databases, and enhances the database portability, extensibility, and maintainability.

+

Figure 1 shows the system structure of ODBC.

+
Figure 1 ODBC system structure
+

GaussDB(DWS) supports ODBC 3.5 in the following environments.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 OSs Supported by ODBC

OS

+

Platform

+

SUSE Linux Enterprise Server 11 SP1/SP2/SP3/SP4

+

SUSE Linux Enterprise Server 12 and SP1/SP2/SP3/SP5

+

x86_64

+

Red Hat Enterprise Linux 6.4/6.5/6.6/6.7/6.8/6.9/7.0/7.1/7.2/7.3/7.4/7.5

+

x86_64

+

Red Hat Enterprise Linux 7.5

+

ARM64

+

CentOS 6.4/6.5/6.6/6.7/6.8/6.9/7.0/7.1/7.2/7.3/7.4

+

x86_64

+

CentOS 7.6

+

ARM64

+

EulerOS 2.0 SP2/SP3

+

x86_64

+

EulerOS 2.0 SP8

+

ARM64

+

NeoKylin 7.5/7.6

+

ARM64

+

Oracle Linux R7U4

+

x86_64

+

Windows 7

+

32-bit

+

Windows 7

+

64-bit

+

Windows Server 2008

+

32-bit

+

Windows Server 2008

+

64-bit

+
+
+

The operating systems listed above refer to the operating systems on which the ODBC program runs. They can be different from the operating systems where databases are deployed.

+

The ODBC Driver Manager running on UNIX or Linux can be unixODBC or iODBC. Select unixODBC-2.3.0 here as the component for connecting the database.

+

Windows has a native ODBC Driver Manager. You can locate Data Sources (ODBC) by choosing Control Panel > Administrative Tools.

+

The current database ODBC driver is based on an open source version and may be incompatible with vendor-unique data types, such as tinyint, smalldatetime, and nvarchar2.

+
+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0118.html b/docs/dws/dev/dws_04_0118.html new file mode 100644 index 00000000..ef8163da --- /dev/null +++ b/docs/dws/dev/dws_04_0118.html @@ -0,0 +1,14 @@ + + +

ODBC Package and Its Dependent Libraries and Header Files

+

ODBC Package for the Linux OS

Obtain the dws_8.1.x_odbc_driver_for_xxx_xxx.zip package from the release package. In the Linux OS, header files (including sql.h and sqlext.h) and library (libodbc.so) are required in application development. These header files and libraries can be obtained from the unixODBC-2.3.0 installation package.

+
+

ODBC Package for the Windows OS

Obtain the dws_8.1.x_odbc_driver_for_windows.zip package from the release package. In the Windows OS, the required header files and library files are system-resident.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0119.html b/docs/dws/dev/dws_04_0119.html new file mode 100644 index 00000000..c4b79049 --- /dev/null +++ b/docs/dws/dev/dws_04_0119.html @@ -0,0 +1,309 @@ + + +

Configuring a Data Source in the Linux OS

+

The ODBC DRIVER (psqlodbcw.so) provided by GaussDB(DWS) can be used after it has been configured in the data source. To configure data sources, users must configure the odbc.ini and odbcinst.ini files on the server. The two files are generated during the unixODBC compilation and installation, and are saved in the /usr/local/etc directory by default.

+

Procedure

  1. Obtain the source code package of unixODBC at:

    https://sourceforge.net/projects/unixodbc/files/unixODBC/2.3.0/unixODBC-2.3.0.tar.gz/download

    +

  2. Currently, unixODBC-2.2.1 is not supported. Assume you are to install unixODBC-2.3.0. Run the following commands. unixODBC is installed in the /usr/local directory by default. The data source file is generated in the /usr/local/etc directory, and the library file is generated in the /usr/local/lib directory.

    tar zxvf unixODBC-2.3.0.tar.gz
    +cd unixODBC-2.3.0
    +# Open the configure file. If it does not exist, open the configure.ac file. Find LIB_VERSION.
    +# Change the value of LIB_VERSION to 1:0:0 to compile a *.so.1 dynamic library with the same dependency on psqlodbcw.so.
    +vim configure
    +
    +./configure --enable-gui=no # To perform the compilation on a TaiShan server, add the configure parameter --build=aarch64-unknown-linux-gnu.
    +make
    +# The installation may require root permissions.
    +make install
    +

    Install unixODBC. If another version of unixODBC has been installed, it will be overwritten after installation.

    +

  3. Replace the GaussDB(DWS) client driver.

    Decompress the dws_8.1.x_odbc_driver_for_xxx_xxx.zip package.

    +
    • Obtain the psqlodbcw.la and psqlodbcw.so files in the /dws_8.1.x_odbc_driver_for_xxx_xxx/odbc/lib directory.
    • The lib file is generated in the /dws_8.1.x_odbc_driver_for_xxx_xxx/lib directory.
    +

  4. Configure the data source.

    1. Configure the ODBC driver file.

      Add the following content to the end of the /usr/local/etc/odbcinst.ini file:

      +
      [GaussMPP]
      +Driver64=/usr/local/lib/psqlodbcw.so
      +setup=/usr/local/lib/psqlodbcw.so
      +

      For descriptions of the parameters in the odbcinst.ini file, see Table 1.

      + +
      + + + + + + + + + + + + + + + + + +
      Table 1 odbcinst.ini configuration parameters

      Parameter

      +

      Description

      +

      Example

      +

      [DriverName]

      +

      Driver name, corresponding to Driver in DSN.

      +

      [DRIVER_N]

      +

      Driver64

      +

      Path of the dynamic driver library

      +

      Driver64=/xxx/odbc/lib/psqlodbcw.so

      +

      setup

      +

      Driver installation path, which is the same as the dynamic library path in Driver64.

      +

      setup=/xxx/odbc/lib/psqlodbcw.so

      +
      +
      +
    2. Configure the data source file.

      Add the following content to the end of the /usr/local/etc/odbc.ini file:

      +
      [MPPODBC]
      +Driver=GaussMPP
      +Servername=10.10.0.13 (database server IP address)
      +Database=gaussdb (database name)
      +Username=dbadmin (database username)
      +Password= (database user password)
      +Port=8000 (database listening port)
      +Sslmode=allow
      +

      For descriptions of the parameters in the odbc.ini file, see Table 2.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 2 odbc.ini configuration parameters

      Parameter

      +

      Description

      +

      Example

      +

      [DSN]

      +

      Data source name

      +

      [MPPODBC]

      +

      Driver

      +

      Driver name, corresponding to DriverName in odbcinst.ini

      +

      Driver=DRIVER_N

      +

      Servername

      +

      IP address of the server

      +

      Servername=10.145.130.26

      +

      Database

      +

      Name of the database to connect to

      +

      Database=gaussdb

      +

      Username

      +

      Name of the database user

      +

      Username=dbadmin

      +

      Password

      +

      Password of the database user

      +

      Password=

      +
      NOTE:

      After a user established a connection, the ODBC driver automatically clears their password stored in memory.

      +

      However, if this parameter is configured, unixODBC will cache data source files, which may cause the password to be stored in the memory for a long time.

      +

      When you connect to an application, you are advised to send your password through an API instead of writing it in a data source configuration file. After the connection has been established, immediately clear the memory segment where your password is stored.

      +
      +

      Port

      +

      Port ID of the server

      +

      Port=8000

      +

      Sslmode

      +

      Whether to enable the SSL mode

      +

      Sslmode=allow

      +

      UseServerSidePrepare

      +

      Whether to enable the extended query protocol for the database.

      +

      The value can be 0 or 1. The default value is 1, indicating that the extended query protocol is enabled.

      +

      UseServerSidePrepare=1

      +

      UseBatchProtocol

      +

      Whether to enable the batch query protocol. If it is enabled, the DML performance can be improved. The value can be 0 or 1. The default value is 1.

      +

      If this parameter is set to 0, the batch query protocol is disabled (mainly for communication with earlier database versions).

      +

      If this parameter is set to 1 and the support_batch_bind parameter is set to on, the batch query protocol is enabled.

      +

      UseBatchProtocol=1

      +

      ConnectionExtraInfo

      +

      Whether to display the driver deployment path and process owner in the connection_info parameter mentioned in connection_info

      +

      ConnectionExtraInfo=1

      +
      NOTE:

      The default value is 0. If this parameter is set to 1, the ODBC driver reports the driver deployment path and process owner to the database and displays the information in the connection_info parameter (see connection_info). In this case, you can query the information from PG_STAT_ACTIVITY or PGXC_STAT_ACTIVITY.

      +
      +

      ForExtensionConnector

      +

      ETL tool performance optimization parameter. It can be used to optimize the memory and reduce the memory usage by the peer CN, to avoid system instability caused by excessive CN memory usage.

      +

      The value can be 0 or 1. The default value is 0, indicating that the optimization item is disabled.

      +

      Do not set this parameter for other services outside the database system. Otherwise, the service correctness may be affected.

      +

      ForExtensionConnector=1

      +

      KeepDisallowPremature

      +

      Specifies whether the cursor in the SQL statement has the with hold attribute when the following conditions are met: UseDeclareFetch is set to 1, and the application invokes SQLNumResultCols, SQLDescribeCol, or SQLColAttribute after invoking SQLPrepare to obtain the column information of the result set.

      +

      The value can be 0 or 1. 0 indicates that the with hold attribute is supported, and 1 indicates that the with hold attribute is not supported. The default value is 0.

      +

      KeepDisallowPremature=1

      +
      NOTE:
      • When UseServerSidePrepare is set to 1, the KeepDisallowPremature parameter does not take effect. To use this parameter, set UseServerSidePrepare to 0. For example, set UseDeclareFetch to 1.

        KeepDisallowPremature=1

        +

        UseServerSidePrepare=0

        +
      +
      +
      +
      +

      The valid values of sslmode are as follows:

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 3 sslmode options

      sslmode

      +

      Whether SSL Encryption Is Enabled

      +

      Description

      +

      disable

      +

      No

      +

      The SSL secure connection is not used.

      +

      allow

      +

      Probably

      +

      The SSL secure encrypted connection is used if required by the database server, but does not check the authenticity of the server.

      +

      prefer

      +

      Probably

      +

      The SSL secure encrypted connection is used as a preferred mode if supported by the database, but does not check the authenticity of the server.

      +

      require

      +

      Yes

      +

      The SSL secure connection must be used, but it only encrypts data and does not check the authenticity of the server.

      +

      verify-ca

      +

      Yes

      +

      The SSL secure connection must be used, and it checks whether the database has certificates issued by a trusted CA.

      +

      verify-full

      +

      Yes

      +

      The SSL secure connection must be used. In addition to the check scope specified by verify-ca, it checks whether the name of the host where the database resides is the same as that on the certificate. This mode is not supported.

      +
      +
      +
    +

  5. Enable the SSL mode.

    To use SSL certificates for connection, decompress the certificate package contained in the GaussDB(DWS) installation package, and run the source sslcert_env.sh file in a shell environment to deploy certificates in the default location of the current session.

    +

    Or manually declare the following environment variables and ensure that the permission for the client.key* series files is set to 600.

    +
    export PGSSLCERT= "/YOUR/PATH/OF/client.crt" # Change the path to the absolute path of client.crt.
    +export PGSSLKEY= "/YOUR/PATH/OF/client.key" # Change the path to the absolute path of client.key.
    +

    In addition, change the value of Sslmode in the data source to verify-ca.

    +

  6. In the security group rules of GaussDB(DWS), add the IP address segment of the servers with the client deployed to ensure that the servers can communicate with GaussDB(DWS).
  7. Configure environment variables.

    vim ~/.bashrc
    +

    Add the following content to the end of the configuration file:

    +
    export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
    +export ODBCSYSINI=/usr/local/etc
    +export ODBCINI=/usr/local/etc/odbc.ini
    +

  8. Run the following commands to validate the settings:

    source ~/.bashrc
    +

+
+

Testing Data Source Configuration

Run the isql -v GaussODBC command (GaussODBC is the data source name).

+
+ +

Troubleshooting

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0120.html b/docs/dws/dev/dws_04_0120.html new file mode 100644 index 00000000..1c4228f3 --- /dev/null +++ b/docs/dws/dev/dws_04_0120.html @@ -0,0 +1,112 @@ + + +

Configuring a Data Source in the Windows OS

+

Configure the ODBC data source using the ODBC data source manager preinstalled in the Windows OS.

+

Procedure

  1. Replace the GaussDB(DWS) client driver.

    Decompress GaussDB-8.1.1-Windows-Odbc.tar.gz and install psqlodbc.msi (for 32-bit OS) or psqlodbc_x64.msi (for 64-bit OS).

    +

  2. Open Driver Manager.

    Use the Driver Manager suitable for your OS to configure the data source. (Assume the Windows system drive is drive C.)

    +
    • If you develop 32-bit programs in the 64-bit Windows OS, open the 32-bit Driver Manager at C:\Windows\SysWOW64\odbcad32.exe after you install the 32-bit driver.

      Do not open Driver Manager by choosing Control Panel, clicking Administrative Tools, and clicking Data Sources (ODBC).

      +

      WoW64 is the acronym for "Windows 32-bit on Windows 64-bit". C:\Windows\SysWOW64\ stores the 32-bit environment on a 64-bit system. C:\Windows\System32\ stores the environment consistent with the current OS. For technical details, see Windows technical documents.

      +
      +
    • If you develop 64-bit programs in the 64-bit Windows OS, open the 64-bit Driver Manager at C:\Windows\System32\odbcad32.exe after you install the 64-bit driver.

      Do not open Driver Manager by choosing Control Panel, clicking Administrative Tools, and clicking Data Sources (ODBC).

      +
    • In a 32-bit Windows OS, open C:\Windows\System32\odbcad32.exe.

      In the Windows OS, click Computer, and choose Control Panel. Click Administrative Tools and click Data Sources (ODBC).

      +
    +

  3. Configure the data source.

    On the User DSN tab, click Add, and choose PostgreSQL Unicode for setup. (An identifier will be displayed for the 64-bit OS.)

    +

    +

    +

    +

    The entered username and password will be recorded in the Windows registry and you do not need to enter them again when connecting to the database next time. For security purposes, you are advised to delete sensitive information before clicking Save and enter the required username and password again when using ODBC APIs to connect to the database.

    +
    +

  4. Enable the SSL mode.

    To use SSL certificates for connection, decompress the certificate package contained in the GaussDB(DWS) installation package, and double-click the sslcert_env.bat file to deploy certificates in the default location.

    +

    The sslcert_env.bat file ensures the purity of the certificate environment. When the %APPDATA%\postgresql directory exists, a message will be prompted asking you whether you want to remove related directories. If you want to remove related directories, back up files in the directory.

    +
    +

    Alternatively, you can copy the client.crt, client.key, client.key.cipher, and client.key.rand files in the certificate file folder to the manually created %APPDATA%\postgresql directory. Change client in the file names to postgres, for example, change client.key to postgres.key. Copy the cacert.pem file to the %APPDATA%\postgresql directory and change its name to root.crt.

    +

    Change the value of SSL Mode in step 2 to verify-ca.

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 sslmode options

    sslmode

    +

    Whether SSL Encryption Is Enabled

    +

    Description

    +

    disable

    +

    No

    +

    The SSL secure connection is not used.

    +

    allow

    +

    Probably

    +

    The SSL secure encrypted connection is used if required by the database server, but does not check the authenticity of the server.

    +

    prefer

    +

    Probably

    +

    The SSL secure encrypted connection is used as a preferred mode if supported by the database, but does not check the authenticity of the server.

    +

    require

    +

    Yes

    +

    The SSL secure connection must be used, but it only encrypts data and does not check the authenticity of the server.

    +

    verify-ca

    +

    Yes

    +

    The SSL secure connection must be used, and it checks whether the database has certificates issued by a trusted CA.

    +

    verify-full

    +

    Yes

    +

    The SSL secure connection must be used. In addition to the check scope specified by verify-ca, it checks whether the name of the host where the database resides is the same as that on the certificate.

    +
    NOTE:

    This mode cannot be used.

    +
    +
    +
    +

  5. Add the IP address segment of the host where the client is located to the security group rules of GaussDB(DWS) to ensure that the host can communicate with GaussDB(DWS).
+
+

Testing Data Source Configuration

Click Test.

+ +
+

Troubleshooting

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0123.html b/docs/dws/dev/dws_04_0123.html new file mode 100644 index 00000000..475b2504 --- /dev/null +++ b/docs/dws/dev/dws_04_0123.html @@ -0,0 +1,666 @@ + + +

ODBC Development Example

+

Code for Common Functions

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
// The following example shows how to obtain data from GaussDB(DWS) through the ODBC interface.
+// DBtest.c (compile with: libodbc.so)   
+#include <stdlib.h> 
+#include <stdio.h> 
+#include <sqlext.h>
+#ifdef WIN32
+#include <windows.h>
+#endif 
+SQLHENV       V_OD_Env;        // Handle ODBC environment 
+SQLHSTMT      V_OD_hstmt;      // Handle statement 
+SQLHDBC       V_OD_hdbc;       // Handle connection     
+char          typename[100];
+SQLINTEGER    value = 100;
+SQLINTEGER    V_OD_erg,V_OD_buffer,V_OD_err,V_OD_id;
+SQLLEN        V_StrLen_or_IndPtr;
+int main(int argc,char *argv[]) 
+{         
+      // 1. Apply for an environment handle.       
+      V_OD_erg = SQLAllocHandle(SQL_HANDLE_ENV,SQL_NULL_HANDLE,&V_OD_Env);     
+      if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO))        
+      {           
+           printf("Error AllocHandle\n");           
+           exit(0);        
+      } 
+      // 2. Set environment attributes (version information)         
+      SQLSetEnvAttr(V_OD_Env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0);      
+      // 3. Apply for a connection handle.        
+      V_OD_erg = SQLAllocHandle(SQL_HANDLE_DBC, V_OD_Env, &V_OD_hdbc);     
+      if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO))      
+      {                     
+           SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env);          
+           exit(0);       
+      }
+      // 4. Set connection attributes.
+      SQLSetConnectAttr(V_OD_hdbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_ON, 0);          
+// 5. Connect to the data source. userName and password indicate the username and password for connecting to the database. Set them as needed.
+// If the username and password have been set in the odbc.ini file, you do not need to set userName or password here, retaining "" for them. However, you are not advised to do so because the username and password will be disclosed if the permission for odbc.ini is abused.
+      V_OD_erg = SQLConnect(V_OD_hdbc, (SQLCHAR*) "gaussdb", SQL_NTS,  
+                           (SQLCHAR*) "userName", SQL_NTS,  (SQLCHAR*) "password", SQL_NTS);        
+      if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO))      
+      {           
+          printf("Error SQLConnect %d\n",V_OD_erg);            
+          SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env);       
+          exit(0);        
+      }     
+      printf("Connected !\n"); 
+      // 6. Set statement attributes
+      SQLSetStmtAttr(V_OD_hstmt,SQL_ATTR_QUERY_TIMEOUT,(SQLPOINTER *)3,0);
+      // 7. Apply for a statement handle
+      SQLAllocHandle(SQL_HANDLE_STMT, V_OD_hdbc, &V_OD_hstmt);       
+      // 8. Executes an SQL statement directly
+      SQLExecDirect(V_OD_hstmt,"drop table IF EXISTS customer_t1",SQL_NTS);
+      SQLExecDirect(V_OD_hstmt,"CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));",SQL_NTS);
+      SQLExecDirect(V_OD_hstmt,"insert into customer_t1 values(25,'li')",SQL_NTS);
+      // 9. Prepare for execution
+      SQLPrepare(V_OD_hstmt,"insert into customer_t1 values(?)",SQL_NTS); 
+      // 10. Bind parameters
+      SQLBindParameter(V_OD_hstmt,1,SQL_PARAM_INPUT,SQL_C_SLONG,SQL_INTEGER,0,0,
+                       &value,0,NULL);
+      // 11. Execute the ready statement
+      SQLExecute(V_OD_hstmt);
+      SQLExecDirect(V_OD_hstmt,"select id from testtable",SQL_NTS);
+      // 12. Obtain the attributes of a certain column in the result set
+      SQLColAttribute(V_OD_hstmt,1,SQL_DESC_TYPE_NAME,typename,sizeof(typename),NULL,NULL);                 
+      printf("SQLColAtrribute %s\n",typename);
+      // 13. Bind the result set
+      SQLBindCol(V_OD_hstmt,1,SQL_C_SLONG, (SQLPOINTER)&V_OD_buffer,150,
+                (SQLLEN *)&V_StrLen_or_IndPtr);
+      // 14. Collect data using SQLFetch
+      V_OD_erg=SQLFetch(V_OD_hstmt);
+      // 15. Obtain and return data using SQLGetData
+      while(V_OD_erg != SQL_NO_DATA)
+      {
+          SQLGetData(V_OD_hstmt,1,SQL_C_SLONG,(SQLPOINTER)&V_OD_id,0,NULL);
+          printf("SQLGetData ----ID = %d\n",V_OD_id);
+          V_OD_erg=SQLFetch(V_OD_hstmt);
+      };
+      printf("Done !\n");
+      // 16. Disconnect from the data source and release handles
+      SQLFreeHandle(SQL_HANDLE_STMT,V_OD_hstmt);    
+      SQLDisconnect(V_OD_hdbc);         
+      SQLFreeHandle(SQL_HANDLE_DBC,V_OD_hdbc);       
+      SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env);  
+      return(0);
+ }
+
+ +
+
+

Code for Batch Processing

  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
/**********************************************************************
+* Set UseBatchProtocol to 1 in the data source and set the database parameter support_batch_bind
+* to on.
+* The CHECK_ERROR command is used to check and print error information.
+* This example is used to interactively obtain the DSN, data volume to be processed, and volume of ignored data from users, and insert required data into the test_odbc_batch_insert table.
+***********************************************************************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <sql.h>
+#include <sqlext.h>
+#include <string.h>
+
+#include "util.c"
+
+void Exec(SQLHDBC hdbc, SQLCHAR* sql)
+{
+    SQLRETURN retcode;                  // Return status
+    SQLHSTMT hstmt = SQL_NULL_HSTMT;    // Statement handle
+    SQLCHAR     loginfo[2048];
+
+    // Allocate Statement Handle
+    retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmt);
+    CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_STMT)",
+                hstmt, SQL_HANDLE_STMT);
+
+    // Prepare Statement
+    retcode = SQLPrepare(hstmt, (SQLCHAR*) sql, SQL_NTS);
+    sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql);
+    CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT);
+
+    retcode = SQLExecute(hstmt);
+    sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql);
+    CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT);
+
+    retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmt);
+    sprintf((char*)loginfo, "SQLFreeHandle stmt log: %s", (char*)sql);
+    CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT);
+}
+
+int main () 
+{
+    SQLHENV  henv  = SQL_NULL_HENV;
+    SQLHDBC  hdbc  = SQL_NULL_HDBC; 
+    int      batchCount = 1000;
+    SQLLEN   rowsCount = 0;
+    int      ignoreCount = 0;
+
+    SQLRETURN   retcode;
+    SQLCHAR     dsn[1024] = {'\0'};
+    SQLCHAR     loginfo[2048];
+
+// Interactively obtain data source names.
+    getStr("Please input your DSN", (char*)dsn, sizeof(dsn), 'N');
+// Interactively obtain the amount of data to be batch processed.
+    getInt("batchCount", &batchCount, 'N', 1);
+    do 
+    {
+// Interactively obtain the amount of batch processing data that is not inserted into the database.
+        getInt("ignoreCount", &ignoreCount, 'N', 1);
+        if (ignoreCount > batchCount)
+        {
+            printf("ignoreCount(%d) should be less than batchCount(%d)\n", ignoreCount, batchCount);
+        }
+    }while(ignoreCount > batchCount);
+
+    retcode = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv);
+    CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_ENV)",
+                henv, SQL_HANDLE_ENV);
+
+    // Set ODBC Verion
+    retcode = SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION,
+                                        (SQLPOINTER*)SQL_OV_ODBC3, 0);
+    CHECK_ERROR(retcode, "SQLSetEnvAttr(SQL_ATTR_ODBC_VERSION)",
+                henv, SQL_HANDLE_ENV);
+
+    // Allocate Connection
+    retcode = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc);
+    CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_DBC)",
+                henv, SQL_HANDLE_DBC);
+
+    // Set Login Timeout
+    retcode = SQLSetConnectAttr(hdbc, SQL_LOGIN_TIMEOUT, (SQLPOINTER)5, 0);
+    CHECK_ERROR(retcode, "SQLSetConnectAttr(SQL_LOGIN_TIMEOUT)",
+                hdbc, SQL_HANDLE_DBC);
+
+    // Set Auto Commit
+    retcode = SQLSetConnectAttr(hdbc, SQL_ATTR_AUTOCOMMIT,
+                                        (SQLPOINTER)(1), 0);
+    CHECK_ERROR(retcode, "SQLSetConnectAttr(SQL_ATTR_AUTOCOMMIT)",
+                hdbc, SQL_HANDLE_DBC);
+
+    // Connect to DSN
+    sprintf(loginfo, "SQLConnect(DSN:%s)", dsn);
+    retcode = SQLConnect(hdbc, (SQLCHAR*) dsn, SQL_NTS,
+                               (SQLCHAR*) NULL, 0, NULL, 0);
+    CHECK_ERROR(retcode, loginfo, hdbc, SQL_HANDLE_DBC);
+
+    // init table info.
+    Exec(hdbc, "drop table if exists test_odbc_batch_insert");
+    Exec(hdbc, "create table test_odbc_batch_insert(id int primary key, col varchar2(50))");
+
+// The following code constructs the data to be inserted based on the data volume entered by users:
+    {
+        SQLRETURN retcode; 
+        SQLHSTMT hstmtinesrt = SQL_NULL_HSTMT;
+        int          i;
+        SQLCHAR      *sql = NULL;
+        SQLINTEGER   *ids  = NULL;
+        SQLCHAR      *cols = NULL;
+        SQLLEN       *bufLenIds = NULL;
+        SQLLEN       *bufLenCols = NULL;
+        SQLUSMALLINT *operptr = NULL;
+        SQLUSMALLINT *statusptr = NULL;
+        SQLULEN      process = 0;
+
+// Data is constructed by column. Each column is stored continuously.
+        ids = (SQLINTEGER*)malloc(sizeof(ids[0]) * batchCount);
+        cols = (SQLCHAR*)malloc(sizeof(cols[0]) * batchCount * 50);
+// Data size in each row for a column
+        bufLenIds = (SQLLEN*)malloc(sizeof(bufLenIds[0]) * batchCount);
+        bufLenCols = (SQLLEN*)malloc(sizeof(bufLenCols[0]) * batchCount);
+// Whether this row needs to be processed. The value is SQL_PARAM_IGNORE or SQL_PARAM_PROCEED.
+        operptr = (SQLUSMALLINT*)malloc(sizeof(operptr[0]) * batchCount);
+        memset(operptr, 0, sizeof(operptr[0]) * batchCount);
+// Processing result of the row
+// Note: In the database, a statement belongs to one transaction. Therefore, data is processed as a unit. That is, either all data is inserted successfully or all data fails to be inserted.
+        statusptr = (SQLUSMALLINT*)malloc(sizeof(statusptr[0]) * batchCount);
+        memset(statusptr, 88, sizeof(statusptr[0]) * batchCount);
+
+        if (NULL == ids || NULL == cols || NULL == bufLenCols || NULL == bufLenIds)
+        {
+            fprintf(stderr, "FAILED:\tmalloc data memory failed\n");
+            goto exit;
+        }
+
+        for (int i = 0; i < batchCount; i++)
+        {
+            ids[i] = i;
+            sprintf(cols + 50 * i, "column test value %d", i);
+            bufLenIds[i] = sizeof(ids[i]);
+            bufLenCols[i] = strlen(cols + 50 * i);
+            operptr[i] = (i < ignoreCount) ? SQL_PARAM_IGNORE : SQL_PARAM_PROCEED;
+        }
+
+        // Allocate Statement Handle
+        retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmtinesrt);
+        CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_STMT)",
+                    hstmtinesrt, SQL_HANDLE_STMT);
+
+        // Prepare Statement
+        sql = (SQLCHAR*)"insert into test_odbc_batch_insert values(?, ?)";
+        retcode = SQLPrepare(hstmtinesrt, (SQLCHAR*) sql, SQL_NTS);
+        sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql);
+        CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)batchCount, sizeof(batchCount));
+        CHECK_ERROR(retcode, "SQLSetStmtAttr", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLBindParameter(hstmtinesrt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_INTEGER, sizeof(ids[0]), 0,&(ids[0]), 0, bufLenIds);
+        CHECK_ERROR(retcode, "SQLBindParameter for id", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLBindParameter(hstmtinesrt, 2, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_CHAR, 50, 50, cols, 50, bufLenCols);
+        CHECK_ERROR(retcode, "SQLBindParameter for cols", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)&process, sizeof(process));
+        CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAMS_PROCESSED_PTR", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)statusptr, sizeof(statusptr[0]) * batchCount);
+        CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAM_STATUS_PTR", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_OPERATION_PTR, (SQLPOINTER)operptr, sizeof(operptr[0]) * batchCount);
+        CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAM_OPERATION_PTR", hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLExecute(hstmtinesrt);
+        sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql);
+        CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT);
+
+        retcode = SQLRowCount(hstmtinesrt, &rowsCount);
+        CHECK_ERROR(retcode, "SQLRowCount execution", hstmtinesrt, SQL_HANDLE_STMT);
+
+        if (rowsCount != (batchCount - ignoreCount))
+        {
+            sprintf(loginfo, "(batchCount - ignoreCount)(%d) != rowsCount(%d)", (batchCount - ignoreCount), rowsCount);
+            CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT);
+        }
+        else
+        {
+            sprintf(loginfo, "(batchCount - ignoreCount)(%d) == rowsCount(%d)", (batchCount - ignoreCount), rowsCount);
+            CHECK_ERROR(SQL_SUCCESS, loginfo, NULL, SQL_HANDLE_STMT);
+        }
+
+        if (rowsCount != process)
+        {
+            sprintf(loginfo, "process(%d) != rowsCount(%d)", process, rowsCount);
+            CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT);
+        }
+        else
+        {
+            sprintf(loginfo, "process(%d) == rowsCount(%d)", process, rowsCount);
+            CHECK_ERROR(SQL_SUCCESS, loginfo, NULL, SQL_HANDLE_STMT);
+        }
+
+        for (int i = 0; i < batchCount; i++)
+        {
+            if (i < ignoreCount)
+            {
+                if (statusptr[i] != SQL_PARAM_UNUSED)
+                {
+                    sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_UNUSED", i, statusptr[i]);
+                    CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT);
+                }
+            }
+            else if (statusptr[i] != SQL_PARAM_SUCCESS)
+            {
+                sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_SUCCESS", i, statusptr[i]);
+                CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT);
+            }
+        }
+
+        retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmtinesrt);
+        sprintf((char*)loginfo, "SQLFreeHandle hstmtinesrt");
+        CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT);
+    }
+
+
+exit:
+    printf ("\nComplete.\n");
+
+    // Connection
+    if (hdbc != SQL_NULL_HDBC) {
+        SQLDisconnect(hdbc);
+        SQLFreeHandle(SQL_HANDLE_DBC, hdbc);
+    }
+
+    // Environment
+    if (henv != SQL_NULL_HENV)
+        SQLFreeHandle(SQL_HANDLE_ENV, henv);
+
+    return 0;
+}
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0124.html b/docs/dws/dev/dws_04_0124.html new file mode 100644 index 00000000..9964f17b --- /dev/null +++ b/docs/dws/dev/dws_04_0124.html @@ -0,0 +1,58 @@ + + +

ODBC Interfaces

+

The ODBC interface is a set of API functions provided to users. This chapter describes its common interfaces. For details on other interfaces, see "ODBC Programmer's Reference" at MSDN (https://msdn.microsoft.com/en-us/library/windows/desktop/ms714177(v=vs.85).aspx).

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0125.html b/docs/dws/dev/dws_04_0125.html new file mode 100644 index 00000000..8fd4ec5b --- /dev/null +++ b/docs/dws/dev/dws_04_0125.html @@ -0,0 +1,11 @@ + + +

SQLAllocEnv

+

In ODBC 3.x, SQLAllocEnv (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0126.html b/docs/dws/dev/dws_04_0126.html new file mode 100644 index 00000000..0cc19782 --- /dev/null +++ b/docs/dws/dev/dws_04_0126.html @@ -0,0 +1,11 @@ + + +

SQLAllocConnect

+

In ODBC 3.x, SQLAllocConnect (an ODBC 2.x function) was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0127.html b/docs/dws/dev/dws_04_0127.html new file mode 100644 index 00000000..980db577 --- /dev/null +++ b/docs/dws/dev/dws_04_0127.html @@ -0,0 +1,56 @@ + + +

SQLAllocHandle

+

Function

SQLAllocHandle allocates environment, connection, or statement handles. This function is a generic function for allocating handles that replaces the deprecated ODBC 2.x functions SQLAllocEnv, SQLAllocConnect, and SQLAllocStmt.

+
+

Prototype

1
+2
+3
SQLRETURN SQLAllocHandle(SQLSMALLINT   HandleType,    
+                         SQLHANDLE     InputHandle,     
+                         SQLHANDLE     *OutputHandlePtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 SQLAllocHandle parameters

Keyword

+

Description

+

HandleType

+

The type of handle to be allocated by SQLAllocHandle. The value must be one of the following:

+
  • SQL_HANDLE_ENV (environment handle)
  • SQL_HANDLE_DBC (connection handle)
  • SQL_HANDLE_STMT (statement handle)
  • SQL_HANDLE_DESC (description handle)
+

The handle application sequence is: SQL_HANDLE_ENV > SQL_HANDLE_DBC > SQL_HANDLE_STMT. The handle applied later depends on the handle applied prior to it.

+

InputHandle

+

Existing handle to use as a context for the new handle being allocated.

+
  • If HandleType is SQL_HANDLE_ENV, this is SQL_NULL_HANDLE.
  • If HandleType is SQL_HANDLE_DBC, this must be an environment handle.
  • If HandleType is SQL_HANDLE_STMT or SQL_HANDLE_DESC, it must be a connection handle.
+

OutputHandlePtr

+

Output parameter: Pointer to a buffer in which to return the handle to the newly allocated data structure.

+
+
+
+

Return Values

+
+

Precautions

When allocating a non-environment handle, if SQLAllocHandle returns SQL_ERROR, it sets OutputHandlePtr to SQL_NULL_HENV, SQL_NULL_HDBC, SQL_NULL_HSTMT, or SQL_NULL_HDESC. The application can then call SQLGetDiagRec, with HandleType and Handle set to IntputHandle, to obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0128.html b/docs/dws/dev/dws_04_0128.html new file mode 100644 index 00000000..2fed2319 --- /dev/null +++ b/docs/dws/dev/dws_04_0128.html @@ -0,0 +1,11 @@ + + +

SQLAllocStmt

+

In ODBC 3.x, SQLAllocStmt was deprecated and replaced with SQLAllocHandle. For details, see SQLAllocHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0129.html b/docs/dws/dev/dws_04_0129.html new file mode 100644 index 00000000..22f3b19c --- /dev/null +++ b/docs/dws/dev/dws_04_0129.html @@ -0,0 +1,74 @@ + + +

SQLBindCol

+

Function

SQLBindCol is used to associate (bind) columns in a result set to an application data buffer.

+
+

Prototype

1
+2
+3
+4
+5
+6
SQLRETURN SQLBindCol(SQLHSTMT       StatementHandle,    
+                     SQLUSMALLINT   ColumnNumber,     
+                     SQLSMALLINT    TargetType,
+                     SQLPOINTER     TargetValuePtr,
+                     SQLLEN         BufferLength,
+                     SQLLEN         *StrLen_or_IndPtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLBindCol parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle.

+

ColumnNumber

+

Number of the column to be bound. The column number starts with 0 and increases in ascending order. Column 0 is the bookmark column. If no bookmark column is set, column numbers start at 1.

+

TargetType

+

The C data type in the buffer.

+

TargetValuePtr

+

Output parameter: pointer to the buffer bound with the column. The SQLFetch function returns data in the buffer. If TargetValuePtr is null, StrLen_or_IndPtr is a valid value.

+

BufferLength

+

Size of the TargetValuePtr buffer in bytes available to store the column data.

+

StrLen_or_IndPtr

+

Output parameter: pointer to the length or indicator of the buffer. If StrLen_or_IndPtr is null, no length or indicator is used.

+
+
+
+

Return Values

+
+

Precautions

If SQLBindCol returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, with HandleType and Handle set to SQL_HANDLE_STMT and StatementHandle, respectively, to obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0130.html b/docs/dws/dev/dws_04_0130.html new file mode 100644 index 00000000..552988ff --- /dev/null +++ b/docs/dws/dev/dws_04_0130.html @@ -0,0 +1,102 @@ + + +

SQLBindParameter

+

Function

SQLBindParameter is used to associate (bind) parameter markers in an SQL statement to a buffer.

+
+

Prototype

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
SQLRETURN SQLBindParameter(SQLHSTMT       StatementHandle,    
+                           SQLUSMALLINT   ParameterNumber,     
+                           SQLSMALLINT    InputOutputType,
+                           SQLSMALLINT    ValuetType,
+                           SQLSMALLINT    ParameterType,
+                           SQLULEN        ColumnSize,
+                           SQLSMALLINT    DecimalDigits,
+                           SQLPOINTER     ParameterValuePtr,
+                           SQLLEN         BufferLength,
+                           SQLLEN         *StrLen_or_IndPtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLBindParameter

Keyword

+

Description

+

StatementHandle

+

Statement handle.

+

ParameterNumber

+

Parameter marker number, starting at 1 and increasing in an ascending order.

+

InputOutputType

+

Input/output type of the parameter.

+

ValueType

+

C data type of the parameter.

+

ParameterType

+

SQL data type of the parameter.

+

ColumnSize

+

Size of the column or expression of the corresponding parameter marker.

+

DecimalDigits

+

Digital number of the column or the expression of the corresponding parameter marker.

+

ParameterValuePtr

+

Pointer to the storage parameter buffer.

+

BufferLength

+

Size of the ParameterValuePtr buffer in bytes.

+

StrLen_or_IndPtr

+

Pointer to the length or indicator of the buffer. If StrLen_or_IndPtr is null, no length or indicator is used.

+
+
+
+

Return Values

+
+

Precautions

If SQLBindCol returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, with HandleType and Handle set to SQL_HANDLE_STMT and StatementHandle, respectively, to obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0131.html b/docs/dws/dev/dws_04_0131.html new file mode 100644 index 00000000..70f504de --- /dev/null +++ b/docs/dws/dev/dws_04_0131.html @@ -0,0 +1,81 @@ + + +

SQLColAttribute

+

Function

SQLColAttribute returns the descriptor information about a column in the result set.

+
+

Prototype

1
+2
+3
+4
+5
+6
+7
SQLRETURN SQLColAttribute(SQLHSTMT        StatementHandle,    
+                          SQLUSMALLINT    ColumnNumber,     
+                          SQLUSMALLINT    FieldIdentifier,
+                          SQLPOINTER      CharacterAtrriburePtr,
+                          SQLSMALLINT     BufferLength,
+                          SQLSMALLINT     *StringLengthPtr,
+                          SQLPOINTER      NumericAttributePtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLColAttribute parameter

Keyword

+

Description

+

StatementHandle

+

Statement handle.

+

ColumnNumber

+

Column number of the field to be queried, starting at 1 and increasing in an ascending order.

+

FieldIdentifier

+

Field identifier of ColumnNumber in IRD.

+

CharacterAttributePtr

+

Output parameter: pointer to the buffer that returns FieldIdentifier field value.

+

BufferLength

+
  • FieldIdentifier indicates the length of the buffer if FieldIdentifier is an ODBC-defined field and CharacterAttributePtr points to a character string or a binary buffer.
  • Ignore this parameter if FieldIdentifier is an ODBC-defined field and CharacterAttributePtr points to an integer.
+

StringLengthPtr

+

Output parameter: pointer to a buffer in which the total number of valid bytes (for string data) is stored in *CharacterAttributePtr. Ignore the value of BufferLength if the data is not a string.

+

NumericAttributePtr

+

Output parameter: pointer to an integer buffer in which the value of the FieldIdentifier field in the ColumnNumber row of the IRD is returned.

+
+
+
+

Return Values

+
+

Precautions

If SQLColAttribute returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0132.html b/docs/dws/dev/dws_04_0132.html new file mode 100644 index 00000000..40dc44db --- /dev/null +++ b/docs/dws/dev/dws_04_0132.html @@ -0,0 +1,81 @@ + + +

SQLConnect

+

Function

SQLConnect establishes a connection between a driver and a data source. After the connection, the connection handle can be used to access all information about the data source, including its application operating status, transaction processing status, and error information.

+
+

Prototype

1
+2
+3
+4
+5
+6
+7
SQLRETURN  SQLConnect(SQLHDBC        ConnectionHandle,
+                      SQLCHAR        *ServerName,
+                      SQLSMALLINT    NameLength1,
+                      SQLCHAR        *UserName,
+                      SQLSMALLINT    NameLength2,
+                      SQLCHAR        *Authentication,
+                      SQLSMALLINT    NameLength3);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLConnect parameters

Keyword

+

Description

+

ConnectionHandle

+

Connection handle, obtained from SQLAllocHandle.

+

ServerName

+

Name of the data source to connect to.

+

NameLength1

+

Length of ServerName.

+

UserName

+

User name of the database in the data source.

+

NameLength2

+

Length of UserName.

+

Authentication

+

User password of the database in the data source.

+

NameLength3

+

Length of Authentication.

+
+
+
+

Return Values

+
+

Precautions

If SQLConnect returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_DBC and ConnectionHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0133.html b/docs/dws/dev/dws_04_0133.html new file mode 100644 index 00000000..2ab94ad7 --- /dev/null +++ b/docs/dws/dev/dws_04_0133.html @@ -0,0 +1,39 @@ + + +

SQLDisconnect

+

Function

SQLDisconnect closes the connection associated with the database connection handle.

+
+

Prototype

1
SQLRETURN SQLDisconnect(SQLHDBC    ConnectionHandle);
+
+ +
+
+

Parameter

+
+ + + + + + + +
Table 1 SQLDisconnect parameters

Keyword

+

Description

+

ConnectionHandle

+

Connection handle, obtained from SQLAllocHandle.

+
+
+
+

Return Values

+
+

Precautions

If SQLDisconnect returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_DBC and ConnectionHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0134.html b/docs/dws/dev/dws_04_0134.html new file mode 100644 index 00000000..42154967 --- /dev/null +++ b/docs/dws/dev/dws_04_0134.html @@ -0,0 +1,53 @@ + + +

SQLExecDirect

+

Function

SQLExecDirect executes a prepared SQL statement specified in this parameter. This is the fastest execution method for executing only one SQL statement at a time.

+
+

Prototype

1
+2
+3
SQLRETURN SQLExecDirect(SQLHSTMT         StatementHandle,
+                        SQLCHAR         *StatementText,     
+                        SQLINTEGER       TextLength);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 SQLExecDirect parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle, obtained from SQLAllocHandle.

+

StatementText

+

SQL statement to be executed. One SQL statement can be executed at a time.

+

TextLength

+

Length of StatementText.

+
+
+
+

Return Values

+
+

Precautions

If SQLExecDirect returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0135.html b/docs/dws/dev/dws_04_0135.html new file mode 100644 index 00000000..2acc40ab --- /dev/null +++ b/docs/dws/dev/dws_04_0135.html @@ -0,0 +1,39 @@ + + +

SQLExecute

+

Function

The SQLExecute function executes a prepared SQL statement using SQLPrepare. The statement is executed using the current value of any application variables that were bound to parameter markers by SQLBindParameter.

+
+

Prototype

1
SQLRETURN SQLExecute(SQLHSTMT    StatementHandle);
+
+ +
+
+

Parameter

+
+ + + + + + + +
Table 1 SQLExecute parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle to be executed.

+
+
+
+

Return Values

+
+

Precautions

If SQLExecute returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0136.html b/docs/dws/dev/dws_04_0136.html new file mode 100644 index 00000000..1fdb9258 --- /dev/null +++ b/docs/dws/dev/dws_04_0136.html @@ -0,0 +1,39 @@ + + +

SQLFetch

+

Function

SQLFetch advances the cursor to the next row of the result set and retrieves any bound columns.

+
+

Prototype

1
SQLRETURN SQLFetch(SQLHSTMT    StatementHandle);
+
+ +
+
+

Parameter

+
+ + + + + + + +
Table 1 SQLFetch parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle, obtained from SQLAllocHandle.

+
+
+
+

Return Values

+
+

Precautions

If SQLFetch returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0137.html b/docs/dws/dev/dws_04_0137.html new file mode 100644 index 00000000..f33720bb --- /dev/null +++ b/docs/dws/dev/dws_04_0137.html @@ -0,0 +1,11 @@ + + +

SQLFreeStmt

+

In ODBC 3.x, SQLFreeStmt (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0138.html b/docs/dws/dev/dws_04_0138.html new file mode 100644 index 00000000..afb47dc9 --- /dev/null +++ b/docs/dws/dev/dws_04_0138.html @@ -0,0 +1,11 @@ + + +

SQLFreeConnect

+

In ODBC 3.x, SQLFreeConnect (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0139.html b/docs/dws/dev/dws_04_0139.html new file mode 100644 index 00000000..ca76f984 --- /dev/null +++ b/docs/dws/dev/dws_04_0139.html @@ -0,0 +1,48 @@ + + +

SQLFreeHandle

+

Function

SQLFreeHandle releases resources associated with a specific environment, connection, or statement handle. It replaces the ODBC 2.x functions: SQLFreeEnv, SQLFreeConnect, and SQLFreeStmt.

+
+

Prototype

1
+2
SQLRETURN SQLFreeHandle(SQLSMALLINT   HandleType,    
+                        SQLHANDLE     Handle);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + +
Table 1 SQLFreeHandle parameters

Keyword

+

Description

+

HandleType

+

The type of handle to be freed by SQLFreeHandle. The value must be one of the following:

+
  • SQL_HANDLE_ENV
  • SQL_HANDLE_DBC
  • SQL_HANDLE_STMT
  • SQL_HANDLE_DESC
+

If HandleType is not one of the preceding values, SQLFreeHandle returns SQL_INVALID_HANDLE.

+

Handle

+

The name of the handle to be freed.

+
+
+
+

Return Values

+
+

Precautions

If SQLFreeHandle returns SQL_ERROR, the handle is still valid.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0140.html b/docs/dws/dev/dws_04_0140.html new file mode 100644 index 00000000..ce3128a5 --- /dev/null +++ b/docs/dws/dev/dws_04_0140.html @@ -0,0 +1,11 @@ + + +

SQLFreeEnv

+

In ODBC 3.x, SQLFreeEnv (an ODBC 2.x function) was deprecated and replaced with SQLFreeHandle. For details, see SQLFreeHandle.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0141.html b/docs/dws/dev/dws_04_0141.html new file mode 100644 index 00000000..2cc228d2 --- /dev/null +++ b/docs/dws/dev/dws_04_0141.html @@ -0,0 +1,53 @@ + + +

SQLPrepare

+

Function

SQLPrepare prepares an SQL statement to be executed.

+
+

Prototype

1
+2
+3
SQLRETURN SQLPrepare(SQLHSTMT      StatementHandle,    
+                     SQLCHAR       *StatementText,     
+                     SQLINTEGER    TextLength);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 SQLPrepare parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle.

+

StatementText

+

SQL text string.

+

TextLength

+

Length of StatementText.

+
+
+
+

Return Values

+
+

Precautions

If SQLPrepare returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0142.html b/docs/dws/dev/dws_04_0142.html new file mode 100644 index 00000000..3fef884a --- /dev/null +++ b/docs/dws/dev/dws_04_0142.html @@ -0,0 +1,74 @@ + + +

SQLGetData

+

Function

SQLGetData retrieves data for a single column in the current row of the result set. It can be called for many times to retrieve data of variable lengths.

+
+

Prototype

1
+2
+3
+4
+5
+6
SQLRETURN SQLGetData(SQLHSTMT        StatementHandle,
+                     SQLUSMALLINT    Col_or_Param_Num,
+                     SQLSMALLINT     TargetType,
+                     SQLPOINTER      TargetValuePtr,
+                     SQLLEN          BufferLength,
+                     SQLLEN          *StrLen_or_IndPtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLGetData parameters

Keyword

+

Description

+

StatementHandle

+

Statement handle, obtained from SQLAllocHandle.

+

Col_or_Param_Num

+

Column number for which the data retrieval is requested. The column number starts with 1 and increases in ascending order. The number of the bookmark column is 0.

+

TargetType

+

C data type in the TargetValuePtr buffer. If TargetType is SQL_ARD_TYPE, the driver uses the data type of the SQL_DESC_CONCISE_TYPE field in ARD. If TargetType is SQL_C_DEFAULT, the driver selects a default data type according to the source SQL data type.

+

TargetValuePtr

+

Output parameter: pointer to the pointer that points to the buffer where the data is located.

+

BufferLength

+

Size of the buffer pointed to by TargetValuePtr.

+

StrLen_or_IndPtr

+

Output parameter: pointer to the buffer where the length or identifier value is returned.

+
+
+
+

Return Values

+
+

Precautions

If SQLFetch returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0143.html b/docs/dws/dev/dws_04_0143.html new file mode 100644 index 00000000..44b2f40c --- /dev/null +++ b/docs/dws/dev/dws_04_0143.html @@ -0,0 +1,152 @@ + + +

SQLGetDiagRec

+

Function

SQLGetDiagRec returns the current values of multiple fields of a diagnostic record that contains error, warning, and status information.

+
+

Prototype

1
+2
+3
+4
+5
+6
+7
+8
SQLRETURN  SQLGetDiagRec(SQLSMALLINT    HandleType
+                         SQLHANDLE      Handle,
+                         SQLSMALLINT    RecNumber,
+                         SQLCHAR        *SQLState,
+                         SQLINTEGER     *NativeErrorPtr,
+                         SQLCHAR        *MessageText,
+                         SQLSMALLINT    BufferLength
+                         SQLSMALLINT    *TextLengthPtr);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQLGetDiagRec parameters

Keyword

+

Description

+

HandleType

+

A handle-type identifier that describes the type of handle for which diagnostics are desired. The value must be one of the following:

+
  • SQL_HANDLE_ENV
  • SQL_HANDLE_DBC
  • SQL_HANDLE_STMT
  • SQL_HANDLE_DESC
+

Handle

+

A handle for the diagnostic data structure. Its type is indicated by HandleType. If HandleType is SQL_HANDLE_ENV, Handle may be shared or non-shared environment handle.

+

RecNumber

+

Indicates the status record from which the application seeks information. RecNumber starts with 1.

+

SQLState

+

Output parameter: pointer to a buffer that saves the 5-character SQLSTATE code pertaining to RecNumber.

+

NativeErrorPtr

+

Output parameter: pointer to a buffer that saves the native error code.

+

MessageText

+

Pointer to a buffer that saves text strings of diagnostic information.

+

BufferLength

+

Length of MessageText.

+

TextLengthPtr

+

Output parameter: pointer to the buffer, the total number of bytes in the returned MessageText. If the number of bytes available to return is greater than BufferLength, then the diagnostics information text in MessageText is truncated to BufferLength minus the length of the null termination character.

+
+
+
+

Return Values

+
+

Precautions

SQLGetDiagRec does not release diagnostic records for itself. It uses the following returned values to report execution results:

+ +

If an ODBC function returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec and obtain the SQLSTATE value. The possible SQLSTATE values are listed as follows:

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 SQLSTATE values

SQLSATATE

+

Error

+

Description

+

HY000

+

General error

+

An error occurred for which there is no specific SQLSTATE.

+

HY001

+

Memory allocation error

+

The driver is unable to allocate memory required to support execution or completion of the function.

+

HY008

+

Operation canceled

+

SQLCancel is called to terminate the statement execution, but the StatementHandle function is still called.

+

HY010

+

Function sequence error

+

The function is called prior to sending data to data parameters or columns being executed.

+

HY013

+

Memory management error

+

The function fails to be called. The error may be caused by low memory conditions.

+

HYT01

+

Connection timed out

+

The timeout period expired before the application was able to connect to the data source.

+

IM001

+

Function not supported by the driver

+

The called function is not supported by the StatementHandle driver.

+
+
+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0144.html b/docs/dws/dev/dws_04_0144.html new file mode 100644 index 00000000..d06d3a68 --- /dev/null +++ b/docs/dws/dev/dws_04_0144.html @@ -0,0 +1,60 @@ + + +

SQLSetConnectAttr

+

Function

SQLSetConnectAttr sets connection attributes.

+
+

Prototype

1
+2
+3
+4
SQLRETURN SQLSetConnectAttr(SQLHDBC       ConnectionHandle
+                            SQLINTEGER    Attribute,    
+                            SQLPOINTER    ValuePtr,     
+                            SQLINTEGER    StringLength);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + +
Table 1 SQLSetConnectAttr parameters

Keyword

+

Description

+

StatementtHandle

+

Connection handle.

+

Attribute

+

Attribute to set.

+

ValuePtr

+

Pointer to the Attribute value. ValuePtr depends on the Attribute value, and can be a 32-bit unsigned integer value or a null-terminated string. If ValuePtr parameter is driver-specific value, it may be signed integer.

+

StringLength

+

If ValuePtr points to a string or a binary buffer, this parameter should be the length of *ValuePtr. If ValuePtr points to an integer, StringLength is ignored.

+
+
+
+

Return Values

+
+

Precautions

If SQLSetConnectAttr returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_DBC and ConnectionHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0145.html b/docs/dws/dev/dws_04_0145.html new file mode 100644 index 00000000..0c7047b4 --- /dev/null +++ b/docs/dws/dev/dws_04_0145.html @@ -0,0 +1,61 @@ + + +

SQLSetEnvAttr

+

Function

SQLSetEnvAttr sets environment attributes.

+
+

Prototype

1
+2
+3
+4
SQLRETURN SQLSetEnvAttr(SQLHENV       EnvironmentHandle
+                        SQLINTEGER    Attribute,    
+                        SQLPOINTER    ValuePtr,     
+                        SQLINTEGER    StringLength);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + +
Table 1 SQLSetEnvAttr parameters

Keyword

+

Description

+

EnviromentHandle

+

Environment handle.

+

Attribute

+

Environment attribute to be set. Its value must be one of the following:

+
  • SQL_ATTR_ODBC_VERSION: ODBC version
  • SQL_CONNECTION_POOLING: connection pool attribute
  • SQL_OUTPUT_NTS: string type returned by the driver
+

ValuePtr

+

Pointer to the Attribute value. ValuePtr depends on the Attribute value, and can be a 32-bit integer value or a null-terminated string.

+

StringLength

+

If ValuePtr points to a string or a binary buffer, this parameter should be the length of *ValuePtr. If ValuePtr points to an integer, StringLength is ignored.

+
+
+
+

Return Values

+
+

Precautions

If SQLSetEnvAttr returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_ENV and EnvironmentHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0146.html b/docs/dws/dev/dws_04_0146.html new file mode 100644 index 00000000..2a68b97d --- /dev/null +++ b/docs/dws/dev/dws_04_0146.html @@ -0,0 +1,60 @@ + + +

SQLSetStmtAttr

+

Function

SQLSetStmtAttr sets attributes related to a statement.

+
+

Prototype

1
+2
+3
+4
SQLRETURN SQLSetStmtAttr(SQLHSTMT      StatementHandle
+                         SQLINTEGER    Attribute,    
+                         SQLPOINTER    ValuePtr,     
+                         SQLINTEGER    StringLength);
+
+ +
+
+

Parameter

+
+ + + + + + + + + + + + + + + + +
Table 1 SQLSetStmtAttr parameters

Keyword

+

Description

+

StatementtHandle

+

Statement handle.

+

Attribute

+

Attribute to set.

+

ValuePtr

+

Pointer to the Attribute value. ValuePtr depends on the Attribute value, and can be a 32-bit unsigned integer value or a pointer to a null-terminated string, a binary buffer, and a driver-specified value. If ValuePtr parameter is driver-specific value, it may be signed integer.

+

StringLength

+

If ValuePtr points to a string or a binary buffer, this parameter should be the length of *ValuePtr. If ValuePtr points to an integer, StringLength is ignored.

+
+
+
+

Return Values

+
+

Precautions

If SQLSetStmtAttr returns SQL_ERROR or SQL_SUCCESS_WITH_INFO, the application can then call SQLGetDiagRec, set HandleType and Handle to SQL_HANDLE_STMT and StatementHandle, and obtain the SQLSTATE value. The SQLSTATE value provides the detailed function calling information.

+
+

Examples

See Examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0154.html b/docs/dws/dev/dws_04_0154.html new file mode 100644 index 00000000..4cb58146 --- /dev/null +++ b/docs/dws/dev/dws_04_0154.html @@ -0,0 +1,27 @@ + + + +

Importing CSV/TXT Data from the OBS

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0155.html b/docs/dws/dev/dws_04_0155.html new file mode 100644 index 00000000..52b826a6 --- /dev/null +++ b/docs/dws/dev/dws_04_0155.html @@ -0,0 +1,29 @@ + + + +

Importing ORC/CarbonData Data from OBS

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0156.html b/docs/dws/dev/dws_04_0156.html new file mode 100644 index 00000000..1d870127 --- /dev/null +++ b/docs/dws/dev/dws_04_0156.html @@ -0,0 +1,291 @@ + + +

Supported Data Types

+

+

In the big data field, the mainstream file format is ORC, which is supported by GaussDB(DWS). You can use Hive to export data to an ORC file and use a read-only foreign table to query and analyze the data in the ORC file. Therefore, you need to map the data types supported by the ORC file format with the data types supported by GaussDB(DWS). For details, see Table 1. Similarly, GaussDB(DWS) exports data through a write-only foreign table, and stores the data in the ORC format. Using Hive to read the ORC file content also requires matched data types. Table 2 shows the matching relationship.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Mapping between ORC read-only foreign tables and Hive data types

Type

+

Type Supported by GaussDB(DWS) Foreign Tables

+

Hive Table Type

+

1-byte integer

+

TINYINT (not recommended)

+

TINYINT

+

SMALLINT (recommended)

+

TINYINT

+

2-byte integer

+

SMALLINT

+

SMALLINT

+

4-byte integer

+

INTEGER

+

INT

+

8-byte integer

+

BIGINT

+

BIGINT

+

Single-precision floating point number

+

FLOAT4 (REAL)

+

FLOAT

+

Double-precision floating point number

+

FLOAT8(DOUBLE PRECISION)

+

DOUBLE

+

Scientific data type

+

DECIMAL[p (,s)] (The maximum precision can reach up to 38.)

+

DECIMAL (The maximum precision can reach up to 38.) (HIVE 0.11)

+

Date type

+

DATE

+

DATE

+

Time type

+

TIMESTAMP

+

TIMESTAMP

+

Boolean type

+

BOOLEAN

+

BOOLEAN

+

CHAR type

+

CHAR(n)

+

CHAR (n)

+

VARCHAR type

+

VARCHAR(n)

+

VARCHAR (n)

+

String (large text object)

+

TEXT(CLOB)

+

STRING

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Mapping between ORC write-only foreign tables and Hive data types

Type

+

Type Supported by GaussDB(DWS) Internal Tables (Data Source Table)

+

Type Supported by GaussDB(DWS) Write-only Foreign Tables

+

Hive Table Type

+

1-byte integer

+

TINYINT

+

TINYINT (not recommended)

+

SMALLINT

+

SMALLINT (recommended)

+

SMALLINT

+

2-byte integer

+

SMALLINT

+

SMALLINT

+

SMALLINT

+

4-byte integer

+

INTEGER, BINARY_INTEGER

+

INTEGER

+

INT

+

8-byte integer

+

BIGINT

+

BIGINT

+

BIGINT

+

Single-precision floating point number

+

FLOAT4, REAL

+

FLOAT4, REAL

+

FLOAT

+

Double-precision floating point number

+

DOUBLE PRECISION, FLOAT8, BINARY_DOUBLE

+

DOUBLE PRECISION, FLOAT8, BINARY_DOUBLE

+

DOUBLE

+

Scientific data type

+

DECIMAL, NUMERIC

+

DECIMAL[p (,s)] (The maximum precision can reach up to 38.)

+

precision38: DECIMAL; precision > 38: STRING

+

Date type

+

DATE

+

TIMESTAMP[(p)] [WITHOUT TIME ZONE]

+

TIMESTAMP

+

+

+

Time type

+

TIME [(p)] [WITHOUT TIME ZONE], TIME [(p)] [WITH TIME ZONE]

+

TEXT

+

STRING

+

TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)][WITH TIME ZONE], SMALLDATETIME

+

TIMESTAMP[(p)] [WITHOUT TIME ZONE]

+

TIMESTAMP

+

INTERVAL DAY (l) TO SECOND (p), INTERVAL [FIELDS] [(p)]

+

VARCHAR(n)

+

VARCHAR(n)

+

Boolean type

+

BOOLEAN

+

BOOLEAN

+

BOOLEAN

+

CHAR type

+

CHAR(n), CHARACTER(n), NCHAR(n)

+

CHAR(n), CHARACTER(n), NCHAR(n)

+

n255: CHAR(n); n > 255: STRING

+

VARCHAR type

+

+

VARCHAR(n), CHARACTER VARYING(n), VARCHAR2(n)

+

VARCHAR(n)

+

n65535: VARCHAR(n); n > 65535: STRING

+

NVARCHAR2(n)

+

TEXT

+

STRING

+

String (large text object)

+

TEXT, CLOB

+

TEXT, CLOB

+

STRING

+

Monetary type

+

MONEY

+

NUMERIC

+

BIGINT

+
+
+
  1. The GaussDB(DWS) foreign table supports the NULL definition, and the Hive data table supports and uses the corresponding NULL definition.
  2. The value range of TINYINT in the Hive data table is [-128, 127], and the value range of TINYINT in GaussDB(DWS) is [0, 255]. You are advised to use the SMALLINT type when creating the GaussDB(DWS) read-only foreign table for TINYINT in the Hive table. If TINYINT is used, the read value may be different from the actual value. Similarly, when exporting data of the TINYINT type from GaussDB(DWS), you are advised to use the SMALLINT type for write-only foreign tables and Hive tables.
  3. The time zone definition is not supported by the date and time types of the GaussDB(DWS) foreign table, or by the Hive table.
  4. The date type in Hive contains only date. The date type in GaussDB(DWS) contains date and time.
  5. In GaussDB(DWS), ORC files can be compressed in ZLIB, SNAPPY, LZ4, or NONE mode.
  6. The FLOAT4 format itself is not accurate, and the sum operation results vary with environments. You are advised to use the DECIMAL type in high-precision scenarios.
  7. In Teradata-compatible mode, foreign tables do not support the DATE type.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0157.html b/docs/dws/dev/dws_04_0157.html new file mode 100644 index 00000000..a347bb5f --- /dev/null +++ b/docs/dws/dev/dws_04_0157.html @@ -0,0 +1,25 @@ + + + +

Exporting CSV/TXT Data to OBS

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0158.html b/docs/dws/dev/dws_04_0158.html new file mode 100644 index 00000000..ffb303e0 --- /dev/null +++ b/docs/dws/dev/dws_04_0158.html @@ -0,0 +1,30 @@ + + +

Exporting Data

+

Procedure

  1. Export data.

    1
    INSERT INTO [Foreign table name] SELECT * FROM [Source table name];
    +
    + +
    +

+
+

Examples

+

Data of a special type, such as RAW, is exported as a binary file, which cannot be recognized by the import tool. As a result, you need to use the RAWTOHEX() function to convert it to the hexadecimal format before export.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0159.html b/docs/dws/dev/dws_04_0159.html new file mode 100644 index 00000000..56a7203b --- /dev/null +++ b/docs/dws/dev/dws_04_0159.html @@ -0,0 +1,24 @@ + + +

Exporting ORC Data to MRS

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0160.html b/docs/dws/dev/dws_04_0160.html new file mode 100644 index 00000000..bb97ff06 --- /dev/null +++ b/docs/dws/dev/dws_04_0160.html @@ -0,0 +1,15 @@ + + +

Overview

+
GaussDB(DWS) allows you to export ORC data to MRS using an HDFS foreign table. You can specify the export mode and export data format in the foreign table. Data is exported from GaussDB(DWS) in parallel using multiple DNs and stored in HDFS. In this way, the overall export performance is improved. +
+

Naming Rules of Exported Files

The rules for naming ORC data files exported from GaussDB(DWS) are as follows:

+
+
  1. Data exported to MRS (HDFS): When data is exported from a DN, the data is stored in HDFS in the segment format. The file is named in the format of mpp_Database name_Schema name_Table name_Node name_n.orc. n is a natural number starting from 0 in ascending order, for example, 0, 1, 2, 3.
  2. You are advised to export data from different clusters or databases to different paths. The maximum size of an ORC file is 128 MB, and that of a stripe file is 64 MB.
  3. After the export is complete, the _SUCCESS file is generated.
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0161.html b/docs/dws/dev/dws_04_0161.html new file mode 100644 index 00000000..54ddf5d0 --- /dev/null +++ b/docs/dws/dev/dws_04_0161.html @@ -0,0 +1,12 @@ + + +

Planning Data Export

+

For details about the data types that can be exported to MRS, see Table 2.

+

For details about HDFS data export or MRS configuration, see the MapReduce Service User Guide.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0162.html b/docs/dws/dev/dws_04_0162.html new file mode 100644 index 00000000..c7e3c778 --- /dev/null +++ b/docs/dws/dev/dws_04_0162.html @@ -0,0 +1,11 @@ + + +

Creating a Foreign Server

+

For details about creating a foreign server on HDFS, see Manually Creating a Foreign Server.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0163.html b/docs/dws/dev/dws_04_0163.html new file mode 100644 index 00000000..caade022 --- /dev/null +++ b/docs/dws/dev/dws_04_0163.html @@ -0,0 +1,109 @@ + + +

Creating a Foreign Table

+

After operations in Creating a Foreign Server are complete, create an HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in HDFS. The foreign table is write-only and can be used only for data export.

+
The syntax for creating a foreign table is as follows. For details, see CREATE FOREIGN TABLE (SQL on Hadoop or OBS).
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name 
+( [ { column_name type_name 
+    [ { [CONSTRAINT constraint_name] NULL |
+    [CONSTRAINT constraint_name] NOT NULL |
+      column_constraint [...]} ] |
+      table_constraint [, ...]} [, ...] ] ) 
+    SERVER dfs_server 
+    OPTIONS ( { option_name ' value ' } [, ...] ) 
+    [ {WRITE ONLY }]
+    DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
+    [ PARTITION BY ( column_name ) [ AUTOMAPPED ] ] ;
+
+ +
+
+

For example, when creating a foreign table product_info_ext_obs, configure the parameters in the syntax as follows.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0164.html b/docs/dws/dev/dws_04_0164.html new file mode 100644 index 00000000..4e38146e --- /dev/null +++ b/docs/dws/dev/dws_04_0164.html @@ -0,0 +1,30 @@ + + +

Exporting Data

+

Procedure

  1. Export data.

    1
    INSERT INTO [Foreign table name] SELECT * FROM [Source table name];
    +
    + +
    +

+
+

Examples

+

Data of a special type, such as RAW, is exported as a binary file, which cannot be recognized by the import tool. As a result, you need to use the RAWTOHEX() function to convert it to the hexadecimal format before export.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0179.html b/docs/dws/dev/dws_04_0179.html new file mode 100644 index 00000000..d899550e --- /dev/null +++ b/docs/dws/dev/dws_04_0179.html @@ -0,0 +1,25 @@ + + +

Data Import

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0180.html b/docs/dws/dev/dws_04_0180.html new file mode 100644 index 00000000..39318ed0 --- /dev/null +++ b/docs/dws/dev/dws_04_0180.html @@ -0,0 +1,121 @@ + + +

Data Migration to GaussDB(DWS)

+

GaussDB(DWS) provides flexible methods for importing data. You can import data from different sources to GaussDB(DWS). The features of each method are listed in Table 1. You can select a method as required. You are advised to use GaussDB(DWS) with Cloud Data Migration (CDM), and Data Lake Factory (DLF). CDM is used for batch data migration, and DLF orchestrates and schedules the entire ETL process and provides a visualized development environment.

+
  • DRS, CDM, OBS, and MRS are cloud services.
  • GDS, DSC, and gs_restore, and gs_dump are internal tools.
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Data migration methods

Data Migration Mode

+

Supported Data Source/Database

+

Description

+

Advantage

+

Importing Data from OBS in Parallel

+

TEXT, CSV, ORC, and CarbonData data file formats

+

You can import data in TEXT, CSV, ORC, or CarbonData format from OBS to GaussDB(DWS) for query, and can remotely read data from OBS.

+

It is recommended for GaussDB(DWS).

+

This method features high performance and flexible scale-out.

+

Using GDS to Import Data from a Remote Server

+

TEXT and CSV data file formats

+

You can use the GDS tool provided by GaussDB(DWS) to import data from the remote server to GaussDB(DWS) in parallel. Multiple DNs are used for the import. This method is efficient and applicable to importing a large amount of data to the database.

+

Importing Data from MRS to a Cluster

+
  • Hive
  • Spark
+

You can configure a GaussDB(DWS) cluster to connect to an MRS cluster, and read data from the HDFS of MRS to GaussDB(DWS).

+
NOTE:

This import method is not supported currently.

+
+

This method features high performance and flexible scale-out.

+

Importing Data from One GaussDB(DWS) Cluster to Another

+

-

+

Data communication between two GaussDB(DWS) clusters is supported. You can use foreign tables to access and import data across GaussDB(DWS) clusters.

+

This method is applicable to data synchronization between multiple GaussDB(DWS) clusters.

+

Using the gsql Meta-Command \COPY to Import Data

+

Local files

+

Unlike the SQL COPY statement, the \copy command can be used to read data from or write data to only local files on a gsql client.

+

This method is easy-to-operate and suitable for importing a small amount of data to the database.

+

Running the COPY FROM STDIN Statement to Import Data

+

Other files or databases

+

When you use Java to develop applications, the CopyManager interface of the JDBC driver is invoked to write data from files or other databases to GaussDB(DWS).

+

Data is directly written from other databases to GaussDB(DWS). Service data does not need to be stored in files.

+

Using CDM to Migrate Data to GaussDB(DWS)

+

+
  • MySQL
  • PostgreSQL
  • Microsoft SQL Server
  • Oracle
  • IBM Db2
  • SAP HANA
  • GaussDB(DWS)
  • Hive
+

CDM can migrate various types of data in batches between homogeneous and heterogeneous data sources. CDM migrates data to GaussDB(DWS) using the COPY statement or the GDS parallel import method.

+

This method supports data import from abundant data sources and is easy-to-operate.

+

Using DSC to Migrate SQL Scripts

+

Databases, NoSQL, file systems, and big data platforms

+

For details, see the documents of the third-party ETL tool.

+

GaussDB(DWS) provides the DSC tool to migrate Teradata/Oracle scripts to GaussDB(DWS).

+

This method supports abundant data sources and provides powerful data conversion through OBS.

+

Using gs_dump and gs_dumpall to Export Metadata

+
  • Plaintext
  • Custom
  • Directory
  • .tar
+

gs_dump exports a single database or its objects. gs_dumpall exports all databases or global objects in a cluster.

+

To migrate database information, you can use a tool to import the exported metadata to a target database.

+

+

This method is applicable to metadata migration.

+

Using gs_restore to Import Data

+

SQL, TMP, and TAR file formats

+

During database migration, you can use the gs_restore tool to import the file exported using the gs_dump tool to a GaussDB(DWS) cluster. In this way, metadata, such as table definitions and database object definitions, is imported. The following data needs to be imported:

+
  • All database object definitions
  • Definition of each database object
  • Definition of each schema
  • Definition of each table
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0181.html b/docs/dws/dev/dws_04_0181.html new file mode 100644 index 00000000..cf757d07 --- /dev/null +++ b/docs/dws/dev/dws_04_0181.html @@ -0,0 +1,19 @@ + + +

Importing Data from OBS in Parallel

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0182.html b/docs/dws/dev/dws_04_0182.html new file mode 100644 index 00000000..cdb014eb --- /dev/null +++ b/docs/dws/dev/dws_04_0182.html @@ -0,0 +1,96 @@ + + +

About Parallel Data Import from OBS

+

The object storage service (OBS) is an object-based cloud storage service, featuring data storage of high security, proven reliability, and cost-effectiveness. OBS provides large storage capacity for you to store files of any type.

+

GaussDB(DWS), a data warehouse service, uses OBS as a platform for converting cluster data and external data, satisfying the requirements for secure, reliable, and cost-effective storage.

+

You can import data in TXT, CSV, ORC, or CarbonData format from OBS to GaussDB(DWS) for query, and can remotely read data from OBS. You are advised to import frequently accessed hot data to GaussDB(DWS) to facilitate queries and store cold data to OBS for remote read to reduce cost.

+

Currently, data can be imported using either of the following methods:

+ +
  • Ensure that no Chinese characters are contained in paths used for importing data to or exporting data from OBS.
  • Data cannot be imported to or exported from OBS across regions. Ensure that OBS and the DWS cluster are in the same region.
+
+

Overview

During data migration and Extract-Transform-Load (ETL), a massive volume of data needs to be imported to GaussDB(DWS) in parallel. The common import mode is time-consuming. When you import data in parallel using OBS foreign tables, source data files to be imported are identified based on the import URL and data formats specified in the tables. Data is imported in parallel through DNs to GaussDB(DWS), which improves the overall import performance.

+
+
Advantages: +
+

Disadvantage:

+

You need to create OBS foreign tables and store to-be-imported data on OBS.

+

Application Scenario:

+

A large volume of local data is imported concurrently on many DNs.

+

Related Concepts

+
+

How Data Is Imported

Figure 1 shows how data is imported from OBS. The CN plans and delivers data import tasks. It delivers tasks to each DN by file.

+

The delivery method is as follows:

+

In Figure 1, there are four DNs (DN0 to DN3) and OBS stores six files numbered from t1.data.0 to t1.data.5. The files are delivered as follows:

+

t1.data.0 -> DN0

+

t1.data.1 -> DN1

+

t1.data.2 -> DN2

+

t1.data.3 -> DN3

+

t1.data.4 -> DN0

+

t1.data.5 -> DN1

+

Two files are delivered to DN0 and DN1, respectively. One file is delivered to each of the other DNs.

+

The import performance is the best when one OBS file is delivered to each DN and all the files have the same size. To improve the performance of loading data from OBS, split the data file into multiple files as evenly as possible before storing it to OBS. The recommended number of split files is an integer multiple of the DN quantity.

+
Figure 1 Parallel data import using OBS foreign tables
+
+

Import Flowchart

Figure 2 Parallel import procedure
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Procedure description

Procedure

+

Description

+

Subtask

+

Upload data to OBS.

+

Plan the storage path on the OBS server and upload data files.

+

For details, see Uploading Data to OBS.

+

-

+

Create an OBS foreign table.

+

Create a foreign table to identify source data files on the OBS server. The OBS foreign table stores data source information, such as its bucket name, object name, file format, storage location, encoding format, and delimiter.

+

For details, see Creating an OBS Foreign Table.

+

-

+

Import data.

+

After creating the foreign table, run the INSERT statement to efficiently import data to the target tables.

+

For details, see Importing Data.

+

-

+

Handle the table with import errors.

+

If errors occur during data import, handle them based on the displayed error information described in Handling Import Errors to ensure data integrity.

+

-

+

Improve query efficiency.

+

After data is imported, run the ANALYZE statement to generate table statistics. The ANALYZE statement stores the statistics in the PG_STATISTIC system catalog. When you run the plan generator, the statistics help you generate an efficient query execution plan.

+

-

+
+
+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0183.html b/docs/dws/dev/dws_04_0183.html new file mode 100644 index 00000000..16e5fb95 --- /dev/null +++ b/docs/dws/dev/dws_04_0183.html @@ -0,0 +1,28 @@ + + +

Creating Access Keys (AK and SK)

+

In this example, OBS data is imported to GaussDB(DWS) databases. When users who have registered with the cloud platform access OBS using clients, call APIs, or SDKs, access keys (AK/SK) are required for user authentication. Therefore, if you want to connect to the GaussDB(DWS) database through a client or a JDBC/ODBC application to access OBS, obtain the access keys (AK and SK) first.

+ +

Creating Access Keys (AK and SK)

Before creating an AK/SK pair, ensure that your account (used to log in to the management console) has passed real-name authentication.

+

To create an AK/SK pair on the management console, perform the following steps:

+
  1. Log in to the GaussDB(DWS) management console.
  2. Click the username in the upper right corner and choose My Credentials from the drop-down list.
  3. Select the Access Keys tab.

    If an access key already exists in the access key list, you can directly use it. However, you can view only Access Key ID in the access key list. You can download the key file containing the AK and SK only when adding an access key. If you do not have the key file, click Create Access Key to create one.

    +
    • Each user can create a maximum of two valid access keys. If there are already two access keys, delete them and create one. To delete an access key, you need to enter the current login password and email address or SMS verification code. Deletion is successful only after the verification is passed.
    • To ensure account security, change your access keys periodically and keep them secure.
    +
    +

  4. Click Add Access Key.
  5. In the displayed Add Access Key dialog box, enter the password and its verification code and click OK.

    • If you have not bound an email address or a mobile number, enter only the login password.
    • If you have bound an email address and a mobile phone number, you can use either of them for verification.
    +
    +

  6. In the displayed Download Access Key dialog box, click OK to save the access keys to your browser's default download path.

    • Keep the access keys secure to prevent them from being leaked.
    • If you click Cancel in the dialog box, the access keys will not be downloaded, and you cannot download them later. In this case, re-create access keys.
    +
    +

  7. Open the downloaded credentials.csv file to obtain the access keys (AK and SK).
+
+

Precautions

If you find that your AK/SK pair is abnormally used (for example, the AK/SK pair is lost or leaked) or will be no longer used, delete your AK/SK pair in the access key list or contact the administrator to reset your AK/SK pair.

+

When deleting the access keys, you need to enter the login password and either an email or mobile verification code.

+

Deleted AK/SK pairs cannot be restored.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0184.html b/docs/dws/dev/dws_04_0184.html new file mode 100644 index 00000000..4b717015 --- /dev/null +++ b/docs/dws/dev/dws_04_0184.html @@ -0,0 +1,104 @@ + + +

Uploading Data to OBS

+

Scenarios

Before importing data from OBS to a cluster, prepare source data files and upload these files to OBS. If the data files have been stored on OBS, you only need to complete 2 to 3 in Uploading Data to OBS.

+
+

Preparing Data Files

Prepare source data files to be uploaded to OBS. GaussDB(DWS) supports only source data files in CSV, TXT, ORC, or CarbonData format.

+

If user data cannot be saved in CSV format, store the data as any text file.

+

According to How Data Is Imported, when the source data file contains a large volume of data, evenly split the file into multiple files before storing it to OBS. The import performance is better when the number of files is an integer multiple of the DN quantity.

+
+
+

Assume that you have stored the following three CSV files in OBS:

+ +

Uploading Data to OBS

  1. Upload data to OBS.

    Store the source data files to be imported in the OBS bucket in advance.

    +
    1. Log in to the OBS management console.

      Click Service List and choose Object Storage Service to open the OBS management console.

      +
    2. Create a bucket.

      For details about how to create an OBS bucket, see "OBS Console Operation Guide > Managing Buckets > Creating a Bucket" in the Object Storage Service User Guide..

      +

      For example, create two buckets named mybucket and mybucket02.

      +
    3. Create a folder.

      For details, see "OBS Console Operation Guide > Managing Objects > Creating a Folder" in the Object Storage Service User Guide.

      +

      For example:

      +
      • Create a folder named input_data in the mybucket OBS bucket.
      • Create a folder named input_data in the mybucket02 OBS bucket.
      +
    4. Upload the files.

      For details, see "OBS Console Operation Guide > Managing Objects > Uploading a File" in the Object Storage Service User Guide..

      +

      For example:

      +
      • Upload the following data files to the input_data folder in the mybucket OBS bucket:
        1
        +2
        product_info.0
        +product_info.1
        +
        + +
        +
      • Upload the following data file to the input_data folder in the mybucket02 OBS bucket:
        1
        product_info.2
        +
        + +
        +
      +
    +

  2. Obtain the OBS path for storing source data files.

    After the source data files are uploaded to an OBS bucket, a globally unique access path is generated. The OBS path of the source data files is the value of the location parameter used for creating a foreign table.

    +

    The OBS path in the location parameter is in the format of obs://bucket_name/file_path/

    +

    For example, the OBS paths are as follows:

    +
    1
    +2
    +3
    obs://mybucket/input_data/product_info.0
    +obs://mybucket/input_data/product_info.1
    +obs://mybucket02/input_data/product_info.2
    +
    + +
    +

  3. Grant the OBS bucket read permission for the user who will import data.

    When importing data from OBS to a cluster, the user must have the read permission for the OBS buckets where the source data files are located. You can configure the ACL for the OBS buckets to grant the read permission to a specific user.

    +

    For details, see "OBS Console Operation Guide > Permission Control > Configuring a Bucket ACL" in the Object Storage Service User Guide.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0185.html b/docs/dws/dev/dws_04_0185.html new file mode 100644 index 00000000..a629c322 --- /dev/null +++ b/docs/dws/dev/dws_04_0185.html @@ -0,0 +1,99 @@ + + +

Creating an OBS Foreign Table

+

Procedure

  1. Set location of the foreign table based on the path planned in Uploading Data to OBS.
  2. Obtain the access keys (AK and SK) to access OBS. To obtain access keys, log in to the management console, click the username in the upper right corner, and select My Credentials from the menu. Then choose Access Keys in the navigation tree on the left. On the Access Keys page, you can view the existing AKs or click Add Access Key to create an AK/SK pair.
  3. Set data format parameters for the foreign table based on the formats of data to be imported. You need to collect the following source data information:

    • format: format of the source data file in the foreign table. OBS foreign tables support CSV and TEXT formats. The default value is TEXT.
    • header: Whether the data file contains a table header. Only CSV files can have headers.
    • delimiter: Delimiter specified to separate data fields in a file. If no delimiter is specified, the default one will be used.
    • For more parameters used for foreign tables, see data format parameters.
    +

  4. Plan the error tolerance of parallel import to specify how errors are handled during the import.

    • fill_missing_fields: When the last column in a row of the source data file is empty, this parameter specifies whether to report an error or set this field in the row to NULL.
    • ignore_extra_data: When the number of columns in the source data file is greater than that specified in the foreign table, this parameter specifies whether to report an error or ignore the extra columns.
    • per node reject limit: This parameter specifies the number of data format errors allowed on each DN. If the number of errors recorded in the error table on a DN exceeds the specified value, the import fails and an error message will be reported. This parameter is optional.
    • compatible_illegal_chars: When an illegal character is encountered, this parameter specifies whether to import an error, or convert it and proceed with the import.

      The following describes the rules for converting an invalid character:

      +
      • \0 is converted to a space.
      • Other invalid characters are converted to question marks (?).
      • If NULL, DELIMITER, QUOTE, or ESCAPE is also set to a space or question mark, an error message such as "illegal chars conversion may confuse COPY escape 0x20" is displayed, prompting you to modify parameter settings that may cause import errors.
      +
    • error_table_name: This parameter specifies the name of the table that records data format errors. After the parallel import, you can query this table for error details.
    • For details about the parameters, see error tolerance parameters.
    +

  5. Create an OBS table based on the parameter settings in the preceding steps. For details about how to create a foreign table, see CREATE FOREIGN TABLE (for GDS Import and Export).
+
+

Example

Create a foreign table in the GaussDB(DWS) database. Parameters are described as follows:

+ + +

Based on the preceding settings, the foreign table is created using the following statements:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
DROP FOREIGN TABLE product_info_ext;
+
+CREATE FOREIGN TABLE product_info_ext
+(
+    product_price                integer        not null,
+    product_id                   char(30)       not null,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)                   
+) 
+SERVER gsmpp_server 
+OPTIONS(
+
+LOCATION 'obs://mybucket/input_data/product_info | obs://mybucket02/input_data/product_info',
+FORMAT 'CSV' ,
+DELIMITER ',',
+encoding 'utf8',
+header 'false',
+ACCESS_KEY 'access_key_value_to_be_replaced',
+SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced',
+fill_missing_fields 'true',
+ignore_extra_data 'true'
+)
+READ ONLY 
+LOG INTO product_info_err 
+PER NODE REJECT LIMIT 'unlimited';
+
+ +
+

If the following information is displayed, the foreign table has been created:

+
1
CREATE FOREIGN TABLE
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0186.html b/docs/dws/dev/dws_04_0186.html new file mode 100644 index 00000000..8435f3d8 --- /dev/null +++ b/docs/dws/dev/dws_04_0186.html @@ -0,0 +1,77 @@ + + +

Importing Data

+

Context

Before importing data, you are advised to optimize your design and deployment based on the following excellent practices, helping maximize system resource utilization and improving data import performance.

+ + + + +
+

Procedure

  1. Create a table in the GaussDB(DWS) database to store the data imported from the OBS. For details about the syntax, see CREATE TABLE.

    The structure of the table must be consistent with that of the fields in the source data file. That is, the number of fields and field types must be the same. In addition, the structure of the target table must be the same as that of the foreign table. The field names can be different.

    +

  2. (Optional) If the target table has an index, the index information is incrementally updated during the import, affecting data import performance. You are advised to delete the index from the target table before the import. You can create index again after the import is complete.
  3. Import data.

    1
    INSERT INTO [Target table name] SELECT * FROM [Foreign table name]
    +
    + +
    +
    • If information similar to the following is displayed, the data has been imported. Query the error information table to check whether any data format errors occurred. For details, see Handling Import Errors.
      1
      INSERT 0 20
      +
      + +
      +
    • If data fails to be loaded, rectify the problem by following the instructions provided in Handling Import Errors and try again.
    +

+
+

Example

For example, create a table named product_info.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
DROP TABLE IF EXISTS product_info;
+CREATE TABLE product_info
+(
+    product_price                integer        not null,
+    product_id                   char(30)       not null,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)                   
+) 
+with (
+orientation = column,
+compression=middle
+) 
+DISTRIBUTE BY HASH (product_id);
+
+ +
+

Run the following statement to import data from the product_info_ext foreign table to the product_info table:

+
1
INSERT INTO product_info SELECT * FROM product_info_ext;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0187.html b/docs/dws/dev/dws_04_0187.html new file mode 100644 index 00000000..97615800 --- /dev/null +++ b/docs/dws/dev/dws_04_0187.html @@ -0,0 +1,141 @@ + + +

Handling Import Errors

+

Scenarios

Handle errors that occurred during data import.

+
+

Querying Error Information

Errors that occur when data is imported are divided into data format errors and non-data format errors.

+
+ + +

Handling data import errors

Troubleshoot data import errors based on obtained error information and the description in the following table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Handling data import errors

Error Information

+

Cause

+

Solution

+

missing data for column "r_reason_desc"

+
  1. The number of columns in the source data file is less than that in the foreign table.
  2. In a TEXT format source data file, an escape character (for example, \) leads to delimiter or quote mislocation.

    Example: The target table contains three columns as shown in the following command output. The escape character (\) converts the delimiter (|) into the value of the second column, causing loss of the value of the third column.

    +
    BE|Belgium\|1
    +
+
  1. If an error is reported due to missing columns, perform the following operations:
    • Add the r_reason_desc column to the source data file.
    • When creating a foreign table, set the parameter fill_missing_fields to on. In this way, if the last column of a row in the source data file is missing, it is set to NULL and no error will be reported.
    +
  2. Check whether the row where an error occurred contains the escape character (\). If the row contains such a character, you are advised to set the parameter noescaping to true when creating a foreign table, indicating that the escape character (\) and the characters following it are not escaped.
+

extra data after last expected column

+

The number of columns in the source data file is greater than that in the foreign table.

+
  • Delete the unnecessary columns from the source data file.
  • When creating a foreign table, set the parameter ignore_extra_data to on. In this way, if the number of columns in a source data file is greater than that in the foreign table, the extra columns at the end of rows will not be imported.
+

invalid input syntax for type numeric: "a"

+

The data type is incorrect.

+

In the source data file, change the data type of the columns to be imported. If this error information is displayed, change the data type to numeric.

+

null value in column "staff_id" violates not-null constraint

+

The not-null constraint is violated.

+

+

In the source data file, add values to the specified columns. If this error information is displayed, add values to the staff_id column.

+

duplicate key value violates unique constraint "reg_id_pk"

+

The unique constraint is violated.

+
  • Delete the duplicate rows from the source data file.
  • Run the SELECT statement with the DISTINCT keyword to ensure that all imported rows are unique.
    1
    INSERT INTO reasons SELECT DISTINCT * FROM foreign_tpcds_reasons;
    +
    + +
    +
+

value too long for type character varying(16)

+

The column length exceeds the upper limit.

+

In the source data file, change the column length. If this error information is displayed, reduce the column length to no greater than 16 bytes.

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0189.html b/docs/dws/dev/dws_04_0189.html new file mode 100644 index 00000000..99f98da1 --- /dev/null +++ b/docs/dws/dev/dws_04_0189.html @@ -0,0 +1,29 @@ + + +

Using GDS to Import Data from a Remote Server

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0190.html b/docs/dws/dev/dws_04_0190.html new file mode 100644 index 00000000..fce0739f --- /dev/null +++ b/docs/dws/dev/dws_04_0190.html @@ -0,0 +1,86 @@ + + +

Importing Data In Parallel Using GDS

+

INSERT and COPY statements are serially executed to import a small volume of data. To import a large volume of data to GaussDB(DWS), you can use GDS to import data in parallel using a foreign table.

+

In the current GDS version, you can import data to databases from pipe files.

+ +

Overview

You can import data in parallel from the common file system (excluding HDFS) of a server to GaussDB(DWS).

+

Data files to be imported are specified based on the import policy and data formats set in a foreign table. Data is imported in parallel through multiple DNs from source data files to the database, which improves the overall data import performance. Figure 1 shows an example.

+ +
You can pre-process data, such as replacing invalid characters and processing fault tolerance, by configuring parameters in a foreign table.
Figure 1 Importing data in parallel
+
+

The concepts mentioned in the preceding figure are described as follows:

+ +
+

Parallel Import Using GDS

+
+

Import Process

Figure 3 Concurrent import process using GDS
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Process description

Process

+

Description

+

Prepare source data.

+

Prepare the source data files to be imported to the database and upload the files to the data server.

+

For details, see Preparing Source Data.

+

Start GDS.

+

Install, configure, and start GDS on the data server.

+

For details, see Installing, Configuring, and Starting GDS.

+

Create a foreign table.

+

A foreign table is used to identify source files. The foreign table stores information of a source data file, such as location, format, destination location, encoding format, and data delimiter.

+

For details, see Creating a GDS Foreign Table.

+

Import data.

+

After creating the foreign table, run the INSERT statement to quickly import data to the target table. For details, see Importing Data.

+

Handle import errors.

+

If errors occur during parallel data import, handle errors based on the error information to ensure data integrity.

+

For details, see Handling Import Errors.

+

Improve query efficiency.

+

After data is imported, run the ANALYZE statement to generate table statistics. The ANALYZE statement stores the statistics in the PG_STATISTIC system catalog. When you run the plan generator, the statistics help you generate an efficient query execution plan.

+

Stop GDS.

+

After data is imported, log in to each data server and stop GDS.

+

For details, see Stopping GDS.

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0192.html b/docs/dws/dev/dws_04_0192.html new file mode 100644 index 00000000..b0326c2a --- /dev/null +++ b/docs/dws/dev/dws_04_0192.html @@ -0,0 +1,17 @@ + + +

Preparing Source Data

+

Scenario

Generally, the data to be imported has been uploaded to the data server. In this case, you only need to check the communication between the data server and GaussDB(DWS), and record the data storage directory on the data server before the import.

+

If the data has not been uploaded to the data server, perform the following operations to upload it:

+
+

Procedure

  1. Log in to the data server as user root.
  2. Create the directory /input_data.

    mkdir -p /input_data
    +

  3. Upload the source data files to the created directory.

    GDS parallel import supports source data only in CSV or TEXT format.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0193.html b/docs/dws/dev/dws_04_0193.html new file mode 100644 index 00000000..c5375598 --- /dev/null +++ b/docs/dws/dev/dws_04_0193.html @@ -0,0 +1,175 @@ + + +

Installing, Configuring, and Starting GDS

+

Scenario

GaussDB(DWS) uses GDS to allocate the source data for parallel data import. Deploy GDS on the data server.

+

If a large volume of data is stored on multiple data servers, install, configure, and start GDS on each server. Then, data on all the servers can be imported in parallel. The procedure for installing, configuring, and starting GDS is the same on each data server. This section describes how to perform this procedure on one data server.

+
+

Context

  1. The GDS version must match the cluster version. For example, GDS V100R008C00 matches DWS 1.3.X. Otherwise, the import or export may fail, or the import or export process may fail to respond.

    Therefore, use the latest version of GDS. After the database is upgraded, download the latest version of GaussDB(DWS) GDS as instructed in Procedure. When the import or export starts, GaussDB(DWS) checks the GDS versions. If the versions do not match, an error message is displayed and the import or export is terminated.

    +

    To obtain the version number of GDS, run the following command in the GDS decompression directory:

    +
    gds -V
    +

    To view the database version, run the following SQL statement after connecting to the database:

    +
    1
    SELECT version();
    +
    + +
    +
+
+

Procedure

  1. For details about how to import or export data using GDS, see "Tutorial: Using GDS to Import Data > Step 1: Preparing an ECS as the GDS Server" in the Data Warehouse Service User Guide.
  2. Log in as user root to the data server where GDS is to be installed and run the following command to create the directory for storing the GDS package:

    mkdir -p /opt/bin/dws
    +

  3. Upload the GDS package to the created directory.

    Use the SUSE Linux package as an example. Upload the GDS package dws_client_8.1.x_suse_x64.zip to the directory created in the previous step.

    +

  4. (Optional) If SSL is used, upload the SSL certificates to the directory created in 2.
  5. Go to the directory and decompress the package.

    cd /opt/bin/dws
    +unzip dws_client_8.1.x_suse_x64.zip
    +

  6. Create a GDS user and the user group to which the user belongs. This user is used to start GDS and read source data.

    groupadd gdsgrp
    +useradd -g gdsgrp gds_user
    +

  7. Change the owner of the GDS package directory and source data file directory to the GDS user.

    chown -R gds_user:gdsgrp /opt/bin/dws/gds
    +chown -R gds_user:gdsgrp /input_data 
    +

  8. Switch to user gds_user.

    su - gds_user
    +

    If the current cluster version is 8.0.x or earlier, skip 9 and go to 10.

    +

    If the current cluster version is 8.1.x, go to the next step.

    +

  9. Execute the script on which the environment depends (applicable only to 8.1.x).

    cd /opt/bin/dws/gds/bin
    +source gds_env
    +

  10. Start GDS.

    GDS is green software and can be started after being decompressed. There are two ways to start GDS. One is to run the gds command to configure startup parameters. The other is to write the startup parameters into the gds.conf configuration file and run the gds_ctl.py command to start GDS.

    +
    The first method is recommended when you do not need to import data again. The second method is recommended when you need to import data regularly.
    • Method 1: Run the gds command to start GDS.
      • If data is transmitted in non-SSL mode, run the following command to start GDS:
        gds -d dir -p ip:port -H address_string -l log_file -D -t worker_num
        +

        Example:

        +
        +/opt/bin/dws/gds/bin/gds -d /input_data/ -p 192.168.0.90:5000 -H 10.10.0.1/24 -l /opt/bin/dws/gds/gds_log.txt -D -t 2
        +
      • If data is transmitted in SSL mode, run the following command to start GDS:
        gds -d dir -p ip:port -H address_string -l log_file -D 
        +-t worker_num --enable-ssl --ssl-dir Cert_file
        +

        Example:

        +
        Run the following command to upload the SSL certificate mentioned in 4 to /opt/bin:
        +/opt/bin/dws/gds/bin/gds -d /input_data/ -p 192.168.0.90:5000 -H 10.10.0.1/24 -l /opt/bin/dws/gds/gds_log.txt -D --enable-ssl --ssl-dir /opt/bin/
        +
        +
      +

      Replace the information in italic as required.

      +
      • -d dir: directory for storing data files that contain data to be imported. This tutorial uses /input_data/ as an example.
      • -p ip:port: listening IP address and port for GDS. The default value is 127.0.0.1. Replace it with the IP address of a 10GE network that can communicate with GaussDB(DWS). The port number ranges from 1024 to 65535. The default port is 8098. This tutorial uses 192.168.0.90:5000 as an example.
      • -H address_string: specifies the hosts that are allowed to connect to and use GDS. The value must be in CIDR format. Configure this parameter to enable a GaussDB(DWS) cluster to access GDS for data import. Ensure that the network segment covers all hosts in a GaussDB(DWS) cluster.
      • -l log_file: GDS log directory and log file name. This tutorial uses /opt/bin/dws/gds/gds_log.txt as an example.
      • -D: GDS in daemon mode. This parameter is used only in Linux.
      • -t worker_num: number of concurrent GDS threads. If the data server and GaussDB(DWS) have available I/O resources, you can increase the number of concurrent GDS threads.

        GDS determines the number of threads based on the number of concurrent import transactions. Even if multi-thread import is configured before GDS startup, the import of a single transaction will not be accelerated. By default, an INSERT statement is an import transaction.

        +
      • --enable-ssl: enables SSL for data transmission.
      • --ssl-dir Cert_file: SSL certificate directory. Set this parameter to the certificate directory in 4.
      • For details about GDS parameters, see "GDS - Parallel Data Loader > gds" in the Data Warehouse Service (DWS) Tool Guide.
      +
    • Method 2: Write the startup parameters into the gds.conf configuration file and run the gds_ctl.py command to start GDS.
      1. Run the following command to go to the config directory of the GDS package and modify the gds.conf configuration file. For details about the parameters in the gds.conf configuration file, see Table 1.
        vim /opt/bin/dws/gds/config/gds.conf
        +

        Example:

        +

        The gds.conf configuration file contains the following information:

        +
        <?xml version="1.0"?>
        +<config>
        +<gds name="gds1" ip="192.168.0.90" port="5000" data_dir="/input_data/" err_dir="/err" data_seg="100MB" err_seg="100MB" log_file="/log/gds_log.txt" host="10.10.0.1/24" daemon='true' recursive="true" parallel="32"></gds>
        +</config>
        +

        Information in the configuration file is described as follows:

        +
        • The data server IP address is 192.168.0.90 and the GDS listening port is 5000.
        • Data files are stored in the /input_data/ directory.
        • Error log files are stored in the /err directory. The directory must be created by a user who has the GDS read and write permissions.
        • The size of a single data file is 100 MB.
        • The size of a single error log file is 100 MB.
        • Logs are stored in the /log/gds_log.txt file. The directory must be created by a user who has the GDS read and write permissions.
        • Only nodes with the IP address 10.10.0.* can be connected.
        • The GDS process is running in daemon mode.
        • Recursive data file directories are used.
        • The number of concurrent import threads is 2.
        +
      2. Start GDS and check whether it has been started.
        python3 gds_ctl.py start
        +

        Example:

        +
        cd /opt/bin/dws/gds/bin
        +python3 gds_ctl.py start
        +Start GDS gds1                  [OK]
        +gds [options]:
        + -d dir            Set data directory.
        + -p port           Set GDS listening port.
        +    ip:port        Set GDS listening ip address and port.
        + -l log_file       Set log file.
        + -H secure_ip_range
        +                   Set secure IP checklist in CIDR notation. Required for GDS to start.
        + -e dir            Set error log directory.
        + -E size           Set size of per error log segment.(0 < size < 1TB)
        + -S size           Set size of data segment.(1MB < size < 100TB)
        + -t worker_num     Set number of worker thread in multi-thread mode, the upper limit is 32. If without setting, the default value is 1.
        + -s status_file    Enable GDS status report.
        + -D                Run the GDS as a daemon process.
        + -r                Read the working directory recursively.
        + -h                Display usage.
        +
      +
    +
    +

+
+

gds.conf Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 gds.conf configuration description

Attribute

+

Description

+

Value Range

+

name

+

Identifier

+

-

+

ip

+

Listening IP address

+

The IP address must be valid.

+

Default value: 127.0.0.1

+

port

+

Listening port

+

Value range: 1024 to 65535 (integer)

+

Default value: 8098

+

data_dir

+

Data file directory

+

-

+

err_dir

+

Error log file directory

+

Default value: data file directory

+

log_file

+

Log file Path

+

-

+

host

+

Host IP address allowed to be connected to GDS (The value must in CIDR format and this parameter is available for the Linux OS only.)

+

-

+

recursive

+

Whether the data file directories are recursive

+

Value range:

+
  • true: recursive
  • false: not recursive
+

Default value: false

+

daemon

+

Whether the process is running in daemon mode

+

Value range:

+
  • true: The process is running in daemon mode.
  • false: The process is not running in daemon mode.
+

Default value: false

+

parallel

+

Number of concurrent data import threads

+

Value range: 0 to 32 (integer)

+

Default value: 1

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0194.html b/docs/dws/dev/dws_04_0194.html new file mode 100644 index 00000000..6738a948 --- /dev/null +++ b/docs/dws/dev/dws_04_0194.html @@ -0,0 +1,114 @@ + + +

Creating a GDS Foreign Table

+

The source data information and GDS access information are configured in a foreign table. Then, GaussDB(DWS) can import data from a data server to a database table based on the configuration in the foreign table.

+

Procedure

  1. Collect source data information and GDS access information.

    You need to collect the following source data information:

    +
    • format: format of the data to be imported. Only data in CSV, TEXT, or FIXED format can be imported using GDS in parallel.
    • header: whether a source data file has a header. This parameter is set only for files in CSV or FIXED format.
    • delimiter: delimiter in the source data file. For example, it can be a comma (,).
    • encoding: encoding format of the data source file. Assume that the encoding format is UTF-8.
    • eol: line break character in the data file. It can be a default character, such as 0x0D0A or 0X0A, or a customized line break character, such as a string: !@#. This parameter can be set only for TEXT import.
    • For details about more source data information configured in a foreign table, see data format parameters.
    +

    You need to collect the following GDS access information:

    +

    location: GDS URL. GDS information in Installing, Configuring, and Starting GDS is used as an example. In non-SSL mode, location is set to gsfs://192.168.0.90:5000//input_data/. In SSL mode, location is set to gsfss://192.168.0.90:5000//input_data/. 192.168.0.90:5000 indicates the IP address and port number of GDS. input_data indicates the path of data source files managed by GDS. Replace the values as required.

    +

  2. Design an error tolerance mechanism for data import.

    GaussDB(DWS) supports the following error tolerance in data import:
    • fill_missing_fields: This parameter specifies whether to report an error when the last column in a row of the source data file is empty, or to fill the column with NULL.
    • ignore_extra_data: This parameter specifies whether to report an error when the number of columns in the source data file is greater than that specified in the foreign table, or to ignore the extra columns.
    • per node reject_limit: This parameter specifies the number of data format errors allowed on each DN. If the number of errors recorded in the error table on a DN exceeds the specified value, the import will fail and an error message will be reported. You can also set it to unlimited.
    • compatible_illegal_chars: This parameter specifies whether to report an error when an illegal character is encountered, or to convert it and proceed with the import.

      The following describes the rules for converting an illegal character:

      +
      • \0 is converted to a space.
      • Other illegal characters are converted to question marks.
      • If NULL, DELIMITER, QUOTE, or ESCAPE is also set to a space or question mark, an error message such as "illegal chars conversion may confuse COPY escape 0x20" is displayed, prompting you to modify parameter settings that may cause import errors.
      +
    • error_table_name: This parameter specifies the name of the table that records data format errors. After the parallel import, you can query this table for error details.
    • remote log 'name': This parameter specifies whether to store data format errors in files on the GDS server. name is the prefix of the error data file.
    • For details about more error tolerance parameters, see error tolerance parameters.
    +
    +

  3. After connecting to the database using gsql or Data Studio, create a GDS foreign table based on the collected and design information.

    For example:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    CREATE FOREIGN TABLE foreign_tpcds_reasons
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +)
    + SERVER gsmpp_server
    + OPTIONS 
    +(
    +LOCATION 'gsfs://192.168.0.90:5000/input_data | gsfs://192.168.0.91:5000/input_data', 
    +FORMAT 'CSV' ,
    +DELIMITER ',',
    +ENCODING 'utf8',
    +HEADER 'false',
    +FILL_MISSING_FIELDS 'true',
    +IGNORE_EXTRA_DATA 'true'
    +)
    +LOG INTO product_info_err 
    +PER NODE REJECT LIMIT 'unlimited';
    +
    + +
    +

    The following describes information in the preceding command:

    +
    • The columns specified in the foreign table must be the same as those in the target table.
    • Retain the value gsmpp_server for SERVER.
    • Set location based on the GDS access information collected in 1. If SSL is used, replace gsfs with gsfss.
    • Set FORMAT, DELIMITER, ENCODING, and HEADER based on the source data information collected in 1.
    • Set FILL_MISSING_FIELDS, IGNORE_EXTRA_DATA, LOG INTO, and PER NODE REJECT LIMIT based on the error tolerance mechanism designed in 2. LOG INTO specifies the name of the error table.
    +

    For details about the CREATE FOREIGN TABLE syntax, see CREATE FOREIGN TABLE (for GDS Import and Export).

    +

+
+

Example

For more examples, see Example of Importing Data Using GDS.

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0195.html b/docs/dws/dev/dws_04_0195.html new file mode 100644 index 00000000..60dad5d5 --- /dev/null +++ b/docs/dws/dev/dws_04_0195.html @@ -0,0 +1,51 @@ + + +

Importing Data

+

This section describes how to create tables in GaussDB(DWS) and import data to the tables.

+

Before importing all the data from a table containing over 10 million records, you are advised to import some of the data, and check whether there is data skew and whether the distribution keys need to be changed. Troubleshoot the data skew if any. It is costly to address data skew and change the distribution keys after a large amount of data has been imported.

+

Prerequisites

The GDS server can communicate with GaussDB(DWS).

+ +
+

Procedure

  1. Create a table in GaussDB(DWS) to store imported data. For details, see CREATE TABLE.
  2. Import data.

    1
    INSERT INTO [Target table name] SELECT * FROM [Foreign table name]
    +
    + +
    +
    • If information similar to the following is displayed, the data has been imported. Query the error information table to check whether any data format errors occurred. For details, see Handling Import Errors.
      INSERT 0 9
      +
    • If data fails to be loaded, troubleshoot the problem by following the instructions provided in Handling Import Errors and try again.
    +
    • If a data loading error occurs, the entire data import task will fail.
    • Compile a batch-processing task script to concurrently import data. The degree of parallelism (DOP) depends on the server resource usage. You can test-import several tables, monitor resource utilization, and increase or reduce concurrency accordingly. Common resource monitoring commands include top for monitoring memory and CPU usage, iostat for monitoring I/O usage, and sar for monitoring networks. For details about application cases, see .
    • If possible, more GDS servers can significantly improve the data import efficiency. For details about application cases, see Importing Data in Parallel from Multiple Data Servers.
    • In a scenario where many GDS servers import data concurrently, you can increase the TCP Keepalive interval for connections between GDS servers and DNs to ensure connection stability. (The recommended interval is 5 minutes.) TCP Keepalive settings of the cluster affect its fault detection response time.
    +
    +

+
+

Example:

  1. Create a target table named reasons.

    1
    +2
    +3
    +4
    +5
    +6
    +7
    CREATE TABLE reasons
    +(
    +  r_reason_sk   integer  not null,
    +  r_reason_id   char(16) not null,   
    +  r_reason_desc char(100)
    +)
    +DISTRIBUTE BY HASH (r_reason_sk);     
    +
    + +
    +

  2. Import data from source data files through the foreign_tpcds_reasons foreign table to the reasons table.

    1
    INSERT INTO reasons SELECT * FROM foreign_tpcds_reasons ;
    +
    + +
    +

  3. You can create indexes again after the import is complete.

    1
    CREATE INDEX reasons_idx ON reasons(r_reasons_id);
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0196.html b/docs/dws/dev/dws_04_0196.html new file mode 100644 index 00000000..141aeb5e --- /dev/null +++ b/docs/dws/dev/dws_04_0196.html @@ -0,0 +1,142 @@ + + +

Handling Import Errors

+

Scenarios

Handle errors that occurred during data import.

+
+

Querying Error Information

Errors that occur when data is imported are divided into data format errors and non-data format errors.

+
+ + +

Handling data import errors

Troubleshoot data import errors based on obtained error information and the description in the following table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Handling data import errors

Error Information

+

Cause

+

Solution

+

missing data for column "r_reason_desc"

+
  1. The number of columns in the source data file is less than that in the foreign table.
  2. In a TEXT format source data file, an escape character (for example, \) leads to delimiter or quote mislocation.

    Example: The target table contains three columns as shown in the following command output. The escape character (\) converts the delimiter (|) into the value of the second column, causing loss of the value of the third column.

    +
    BE|Belgium\|1
    +
+
  1. If an error is reported due to missing columns, perform the following operations:
    • Add the r_reason_desc column to the source data file.
    • When creating a foreign table, set the parameter fill_missing_fields to on. In this way, if the last column of a row in the source data file is missing, it is set to NULL and no error will be reported.
    +
  2. Check whether the row where an error occurred contains the escape character (\). If the row contains such a character, you are advised to set the parameter noescaping to true when creating a foreign table, indicating that the escape character (\) and the characters following it are not escaped.
+

extra data after last expected column

+

The number of columns in the source data file is greater than that in the foreign table.

+
  • Delete the unnecessary columns from the source data file.
  • When creating a foreign table, set the parameter ignore_extra_data to on. In this way, if the number of columns in a source data file is greater than that in the foreign table, the extra columns at the end of rows will not be imported.
+

invalid input syntax for type numeric: "a"

+

The data type is incorrect.

+

In the source data file, change the data type of the columns to be imported. If this error information is displayed, change the data type to numeric.

+

null value in column "staff_id" violates not-null constraint

+

The not-null constraint is violated.

+

+

In the source data file, add values to the specified columns. If this error information is displayed, add values to the staff_id column.

+

duplicate key value violates unique constraint "reg_id_pk"

+

The unique constraint is violated.

+
  • Delete the duplicate rows from the source data file.
  • Run the SELECT statement with the DISTINCT keyword to ensure that all imported rows are unique.
    1
    INSERT INTO reasons SELECT DISTINCT * FROM foreign_tpcds_reasons;
    +
    + +
    +
+

value too long for type character varying(16)

+

The column length exceeds the upper limit.

+

In the source data file, change the column length. If this error information is displayed, reduce the column length to no greater than 16 bytes (VARCHAR2).

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0197.html b/docs/dws/dev/dws_04_0197.html new file mode 100644 index 00000000..c59962f8 --- /dev/null +++ b/docs/dws/dev/dws_04_0197.html @@ -0,0 +1,22 @@ + + +

Stopping GDS

+

Scenarios

Stop GDS after data is imported successfully.

+
+

Procedure

  1. Log in as user gds_user to the data server where GDS is installed.
  2. Select the mode of stopping GDS based on the mode of starting it.

    • If GDS is started using the gds command, perform the following operations to stop GDS:
      1. Query the GDS process ID:
        ps -ef|grep gds
        +

        For example, the GDS process ID is 128954.

        +
        ps -ef|grep gds
        +gds_user 128954      1  0 15:03 ?        00:00:00 gds -d /input_data/ -p 192.168.0.90:5000 -l /log/gds_log.txt -D
        +gds_user 129003 118723  0 15:04 pts/0    00:00:00 grep gds
        +
      2. Run the kill command to stop GDS. 128954 in the command is the GDS process ID.
        kill -9 128954
        +
      +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0198.html b/docs/dws/dev/dws_04_0198.html new file mode 100644 index 00000000..1873edb6 --- /dev/null +++ b/docs/dws/dev/dws_04_0198.html @@ -0,0 +1,254 @@ + + +

Example of Importing Data Using GDS

+

Importing Data in Parallel from Multiple Data Servers

The data servers and the cluster reside on the same intranet. The IP addresses are 192.168.0.90 and 192.168.0.91. Source data files are in CSV format.

+
  1. Create the target table tpcds.reasons.
    1
    +2
    +3
    +4
    +5
    +6
    CREATE TABLE tpcds.reasons
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +);
    +
    + +
    +
  2. Log in to each GDS data server as user root and create the /input_data directory for storing data files on the servers. The following takes the data server whose IP address is 192.168.0.90 as an example. Operations on the other server are the same.
    mkdir -p /input_data
    +
  3. (Optional) Create a user and the user group it belongs to. The user is used to start GDS. If the user and user group exist, skip this step.
    groupadd gdsgrp
    +useradd -g gdsgrp gds_user
    +
  4. Evenly distribute source data files to the /input_data directories on the data servers.
  5. Change the owners of source data files and the /input_data directory on each data server to gds_user. The data server whose IP address is 192.168.0.90 is used as an example.
    chown -R gds_user:gdsgrp /input_data 
    +
  6. Log in to each data server as user gds_user and start GDS.

    The GDS installation path is /opt/bin/dws/gds. Source data files are stored in /input_data/. The IP addresses of the data servers are 192.168.0.90 and 192.168.0.91. The GDS listening port is 5000. GDS runs in daemon mode.

    +
    Start GDS on the data server whose IP address is 192.168.0.90.
    /opt/bin/dws/gds/bin/gds -d /input_data -p 192.168.0.90:5000 -H 10.10.0.1/24 -D
    +
    +

    Start GDS on the data server whose IP address is 192.168.0.91.

    +
    /opt/bin/dws/gds/bin/gds -d /input_data -p 192.168.0.91:5000 -H 10.10.0.1/24  -D
    +
  7. Create the foreign table tpcds.foreign_tpcds_reasons for receiving data from the data server.

    Data export mode settings are as follows:

    +
    • Set the import mode to Normal.
    • When GDS is started, the source data file directory is /input_data and the GDS listening port is 5000. Therefore, set location to gsfs://192.168.0.90:5000/* | gsfs://192.168.0.91:5000/*.
    +

    Information about the data format is configured based on data format parameters specified during data export. The parameter configurations are as follows:

    +
    • format is set to CSV.
    • encoding is set to UTF-8.
    • delimiter is set to E'\x08'.
    • quote is set to E'\x1b'.
    • null is set to an empty string without quotation marks.
    • escape defaults to the value of quote.
    • header is set to false, indicating that the first row is identified as a data row in an imported file.
    +

    Configure import error tolerance parameters as follows:

    +
    • Set PER NODE REJECT LIMIT (number of allowed data format errors) to unlimited. In this case, all the data format errors detected during data import will be tolerated.
    • Set LOG INTO to err_tpcds_reasons. The data format errors detected during data import will be recorded in the err_tpcds_reasons table.
    +

    Based on the above settings, the foreign table is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    CREATE FOREIGN TABLE tpcds.foreign_tpcds_reasons
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +)
    +SERVER gsmpp_server OPTIONS (location 'gsfs://192.168.0.90:5000/* | gsfs://192.168.0.91:5000/*', format 'CSV',mode 'Normal', encoding 'utf8', delimiter E'\x08', quote E'\x1b', null '', fill_missing_fields 'false') LOG INTO err_tpcds_reasons PER NODE REJECT LIMIT 'unlimited';
    +
    + +
    +
  8. Import data through the foreign table tpcds.foreign_tpcds_reasons to the target table tpcds.reasons.
    1
    INSERT INTO tpcds.reasons SELECT * FROM tpcds.foreign_tpcds_reasons;
    +
    + +
    +
  9. Query data import errors in the err_tpcds_reasons table and rectify the errors (if any). For details, see Handling Import Errors.
    1
    SELECT * FROM err_tpcds_reasons;
    +
    + +
    +
  10. After data import is complete, log in to each data server as user gds_user and stop GDS.
    The data server whose IP address is 192.168.0.90 is used as an example. The GDS process ID is 128954.
    ps -ef|grep gds
    +gds_user 128954      1  0 15:03 ?        00:00:00 gds -d /input_data -p 192.168.0.90:5000 -D
    +gds_user 129003 118723  0 15:04 pts/0    00:00:00 grep gds
    +kill -9 128954
    +
    +
+
+

Importing Data Using Multiple Threads

The data servers and the cluster reside on the same intranet. The server IP address is 192.168.0.90. Source data files are in CSV format. Data will be imported to two tables using multiple threads in Normal mode.

+
  1. In the database, create the target tables tpcds.reasons1 and tpcds.reasons2.
    1
    +2
    +3
    +4
    +5
    +6
    CREATE TABLE tpcds.reasons1
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +) ;
    +
    + +
    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE TABLE tpcds.reasons2
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +) ;
    +
    + +
    +
  2. Log in to the GDS data server as user root, and then create the data file directory /input_data and its sub-directories /input_data/import1/ and /input_data/import2/.
    mkdir -p /input_data
    +
  3. Store the source data files of the target table tpcds.reasons1 in /input_data/import1/ and the source data files of the target table tpcds.reasons2 in /input_data/import2/.
  4. (Optional) Create a user and the user group it belongs to. The user is used to start GDS. If the user and user group already exist, skip this step.
    groupadd gdsgrp
    +useradd -g gdsgrp gds_user
    +
  5. Change the owners of source data files and the /input_data directory on the data server to gds_user.
    chown -R gds_user:gdsgrp /input_data 
    +
  6. Log in to the data server as user gds_user and start GDS.
    The GDS installation path is /gds. Source data files are stored in /input_data/. The IP address of the data server is 192.168.0.90. The GDS listening port is 5000. GDS runs in daemon mode. The degree of parallelism is 2. A recursive directory is specified.
    /gds/gds -d /input_data -p 192.168.0.90:5000 -H 10.10.0.1/24  -D -t 2 -r
    +
    +
  7. In the database, create the foreign tables tpcds.foreign_tpcds_reasons1 and tpcds.foreign_tpcds_reasons2 for the source data.

    The foreign table tpcds.foreign_tpcds_reasons1 is used as an example to describe how to configure parameters in a foreign table.

    +

    Data export mode settings are as follows:

    +
    • Set the import mode to Normal.
    • When GDS is started, the configured source data file directory is /input_data and the GDS listening port is 5000. However, source data files are actually stored in /input_data/import1/. Therefore, set location to gsfs://192.168.0.90:5000/import1/*.
    +

    Information about the data format is configured based on data format parameters specified during data export. The parameter configurations are as follows:

    +
    • format is set to CSV.
    • encoding is set to UTF-8.
    • delimiter is set to E'\x08'.
    • quote is set to E'\x1b'.
    • null is set to an empty string without quotation marks.
    • escape defaults to the value of quote.
    • header is set to false, indicating that the first row is identified as a data row in an imported file.
    +

    Configure import error tolerance parameters as follows:

    +
    • Set PER NODE REJECT LIMIT (number of allowed data format errors) to unlimited. In this case, all the data format errors detected during data import will be tolerated.
    • Set LOG INTO to err_tpcds_reasons1. The data format errors detected during data import will be recorded in the err_tpcds_reasons1 table.
    • If the last column of a source data file is missing, the fill_missing_fields parameter is automatically set to NULL.
    +

    Based on the preceding settings, the foreign table tpcds.foreign_tpcds_reasons1 is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE FOREIGN TABLE tpcds.foreign_tpcds_reasons1
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +) SERVER gsmpp_server OPTIONS (location 'gsfs://192.168.0.90:5000/import1/*', format 'CSV',mode 'Normal', encoding 'utf8', delimiter E'\x08', quote E'\x1b', null '',fill_missing_fields 'on')LOG INTO err_tpcds_reasons1 PER NODE REJECT LIMIT 'unlimited';
    +
    + +
    +

    Based on the preceding settings, the foreign table tpcds.foreign_tpcds_reasons2 is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE FOREIGN TABLE tpcds.foreign_tpcds_reasons2
    +(
    +  r_reason_sk integer not null,
    +  r_reason_id char(16) not null,
    +  r_reason_desc char(100)
    +) SERVER gsmpp_server OPTIONS (location 'gsfs://192.168.0.90:5000/import2/*', format 'CSV',mode 'Normal', encoding 'utf8', delimiter E'\x08', quote E'\x1b', null '',fill_missing_fields 'on')LOG INTO err_tpcds_reasons2 PER NODE REJECT LIMIT 'unlimited';
    +
    + +
    +
  8. Import data through the foreign table tpcds.foreign_tpcds_reasons1 to tpcds.reasons1 and through tpcds.foreign_tpcds_reasons2 to tpcds.reasons2.
    1
    INSERT INTO tpcds.reasons1 SELECT * FROM tpcds.foreign_tpcds_reasons1;
    +
    + +
    +
    1
    INSERT INTO tpcds.reasons2 SELECT * FROM tpcds.foreign_tpcds_reasons2;
    +
    + +
    +
  9. Query data import errors in the err_tpcds_reasons1 and err_tpcds_reasons2 tables and rectify the errors (if any). For details, see Handling Import Errors.
    1
    +2
    SELECT * FROM err_tpcds_reasons1;
    +SELECT * FROM err_tpcds_reasons2;
    +
    + +
    +
  10. After data import is complete, log in to the data server as user gds_user and stop GDS.
    The GDS process ID is 128954.
    ps -ef|grep gds
    +gds_user 128954      1  0 15:03 ?        00:00:00 gds -d /input_data -p 192.168.0.90:5000 -D -t 2 -r
    +gds_user 129003 118723  0 15:04 pts/0    00:00:00 grep gds
    +kill -9 128954
    +
    +
+
+

Importing Data Through a Pipe File

  1. Start GDS.

    gds -d /***/gds_data/ -D -p 192.168.0.1:7789 -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +

    If you need to set the timeout interval of a pipe, use the --pipe-timeout parameter.

    +

  2. Import data.

    1. Log in to the database and create an internal table.
      CREATE TABLE test_pipe_1( id integer not null, sex text not null, name  text );
      +
    2. Create a read-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe_tr( like test_pipe ) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.1:7789/foreign_test_pipe.pipe', FORMAT 'text', DELIMITER ',',  NULL '', EOL '0x0a' ,file_type 'pipe',auto_create_pipe 'false');
      +
    3. Execute the import statement. The statement will be blocked.
      INSERT INTO test_pipe_1 select * from foreign_test_pipe_tr;
      +
    +

  3. Import data through a GDS pipe file.

    1. Log in to the GDS server and go to the GDS data directory.
      cd /***/gds_data/   
      +
    2. Create a pipe. If auto_create_pipe is set to true, skip this step.
      mkfifo foreign_test_pipe.pipe;  
      +

      A pipe will be automatically cleared after an operation is complete. To perform another operation, create a pipe again.

      +
      +
    3. Write data to the pipe.
      cat postgres_public_foreign_test_pipe_tw.txt > foreign_test_pipe.pipe
      +
    4. To read the compressed file to the pipe, run the following command:
      gzip -d < out.gz > foreign_test_pipe.pipe
      +
    5. To read the HDFS file to the pipe, run the following command:
      hdfs dfs -cat - /user/hive/***/test_pipe.txt > foreign_test_pipe.pipe
      +
    +

  4. View the result returned by the import statement.

    INSERT INTO test_pipe_1 select * from foreign_test_pipe_tr;
    +INSERT 0 4
    +SELECT * FROM test_pipe_1;
    +id | sex |      name
    +----+-----+----------------
    +3 | 2   | 11111111111111
    +1 | 2   | 11111111111111
    +2 | 2   | 11111111111111
    +4 | 2   | 11111111111111
    +(4 rows)
    +
    +

+
+

Importing Data Through Multi-Process Pipes

GDS also supports importing data through multi-process pipes. That is, one foreign table corresponds to multiple GDSs.

+

The following takes importing a local file as an example.

+
  1. Start multiple GDSs. If the GDSs have been started, skip this step.

    gds -d /***/gds_data/ -D -p 192.168.0.1:7789 -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +gds -d /***/gds_data_1/ -D -p 192.168.0.1:7790 -l /***/gds_log_1/aa.log -H 0/0 -t 10 -D
    +

    If you need to set the timeout interval of a pipe, use the --pipe-timeout parameter.

    +

  2. Import data.

    1. Log in to the database and create an internal table.
      CREATE TABLE test_pipe( id integer not null, sex text not null, name  text );
      +
    2. Create a read-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe_tr( like test_pipe ) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.1:7789/foreign_test_pipe.pipe|gsfs://192.168.0.1:7790/foreign_test_pipe.pipe', FORMAT 'text', DELIMITER ',', NULL '', EOL '0x0a' , file_type 'pipe', auto_create_pipe 'false');
      +
    3. Execute the import statement. The statement is blocked.
      INSERT INTO test_pipe_1 select * from foreign_test_pipe_tr;
      +
    +

  3. Import data through a GDS pipe file.

    1. Log in to GDS and go to each GDS data directory.
      cd /***/gds_data/ 
      +cd /***/gds_data_1/
      +
    2. Create a pipe. If auto_create_pipe is set to true, skip this step.
      mkfifo foreign_test_pipe.pipe;  
      +
    3. Read each pipe and write the new file to the pipes.
      cat postgres_public_foreign_test_pipe_tw.txt > foreign_test_pipe.pipe
      +
    +

  4. View the result returned by the import statement.

    INSERT INTO test_pipe_1 select * from foreign_test_pipe_tr;
    +INSERT 0 4
    +SELECT * FROM test_pipe_1;
    +id | sex |      name
    +----+-----+----------------
    +3 | 2   | 11111111111111
    +1 | 2   | 11111111111111
    +2 | 2   | 11111111111111
    +4 | 2   | 11111111111111
    +(4 rows)
    +

+
+

Direct Data Import Between Clusters

  1. Start GDS. (If the process has been started, skip this step.)

    gds -d /***/gds_data/ -D -p GDS_IP:GDS_PORT -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +

    If you need to set the timeout interval of a pipe, use the --pipe-timeout parameter.

    +

  2. Export data from the source database.

    1. Log in to the target database, create an internal table, and write data to the table.
      CREATE TABLE test_pipe( id integer not null, sex text not null, name  text );
      +INSERT INTO test_pipe values(1,2,'11111111111111');
      +INSERT INTO test_pipe values(2,2,'11111111111111');
      +INSERT INTO test_pipe values(3,2,'11111111111111');
      +INSERT INTO test_pipe values(4,2,'11111111111111');
      +INSERT INTO test_pipe values(5,2,'11111111111111');
      +
    2. Create a write-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe( id integer not null, age text not null, name  text ) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://GDS_IP:GDS_PORT/', FORMAT 'text', DELIMITER ',', NULL '', EOL '0x0a' ,file_type 'pipe') WRITE ONLY;
      +
    3. Execute the import statement. The statement will be blocked.
      INSERT INTO foreign_test_pipe SELECT * FROM test_pipe;
      +
    +

  3. Import data to the target cluster.

    1. Create an internal table.
      CREATE TABLE test_pipe (id integer not null, sex text not null, name text);
      +
    2. Create a read-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe(like test_pipe) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://GDS_IP:GDS_PORT/', FORMAT 'text', DELIMITER ',', NULL '', EOL '0x0a' , file_type 'pipe', auto_create_pipe 'false');
      +
    3. Run the following command to import data to the table:
      INSERT INTO test_pipe SELECT * FROM foreign_test_pipe;
      +
    +

  4. View the result returned by the import statement from the target cluster.

    SELECT * FROM test_pipe;
    + id | sex |      name
    +----+-----+----------------
    +  3 | 2   | 11111111111111
    +  6 | 2   | 11111111111111
    +  7 | 2   | 11111111111111
    +  1 | 2   | 11111111111111
    +  2 | 2   | 11111111111111
    +  4 | 2   | 11111111111111
    +  5 | 2   | 11111111111111
    +  8 | 2   | 11111111111111
    +  9 | 2   | 11111111111111
    +(9 rows)
    +

+

By default, the pipeline file exported from or imported to GDS is named in the format of Database name_Schema name_Foreign table name .pipe. Therefore, the database name and schema name of the target cluster must be the same as those of the source cluster. If the database or schema is inconsistent, you can specify the same pipe file in the URL of the location.

+

For example:

+
  • Pipe name specified by a write-only foreign table
    CREATE FOREIGN TABLE foreign_test_pipe(id integer not null, age text not null, name  text) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://GDS_IP:GDS_PORT/foreign_test_pipe.pipe', FORMAT 'text', DELIMITER ',',  NULL '', EOL '0x0a' ,file_type 'pipe') WRITE ONLY;
    +
  • Pipe name specified by a read-only foreign table
    CREATE FOREIGN TABLE foreign_test_pipe(like test_pipe) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://GDS_IP:GDS_PORT/foreign_test_pipe.pipe', FORMAT 'text', DELIMITER ',',  NULL '', EOL '0x0a' ,file_type 'pipe',auto_create_pipe 'false');
    +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0203.html b/docs/dws/dev/dws_04_0203.html new file mode 100644 index 00000000..06886b73 --- /dev/null +++ b/docs/dws/dev/dws_04_0203.html @@ -0,0 +1,21 @@ + + +

Running the COPY FROM STDIN Statement to Import Data

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0204.html b/docs/dws/dev/dws_04_0204.html new file mode 100644 index 00000000..37b2a6ea --- /dev/null +++ b/docs/dws/dev/dws_04_0204.html @@ -0,0 +1,13 @@ + + +

Data Import Using COPY FROM STDIN

+

This method is applicable to low-concurrency scenarios where a small volume of data is to be imported.

+

Use either of the following methods to write data to GaussDB(DWS) using the COPY FROM STDIN statement:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0205.html b/docs/dws/dev/dws_04_0205.html new file mode 100644 index 00000000..90e2fa9e --- /dev/null +++ b/docs/dws/dev/dws_04_0205.html @@ -0,0 +1,105 @@ + + +

Introduction to the CopyManager Class

+

CopyManager is an API interface class provided by the JDBC driver in GaussDB(DWS). It is used to import data to GaussDB(DWS) in batches.

+

Inheritance Relationship of CopyManager

The CopyManager class is in the org.postgresql.copy package class and is inherited from the java.lang.Object class. The declaration of the class is as follows:

+
public class CopyManager
+extends Object
+
+

Constructor Method

public CopyManager(BaseConnection connection)

+

throws SQLException

+
+

Basic Methods

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Basic methods of CopyManager

Return Value

+

Method

+

Description

+

Throws

+

CopyIn

+

copyIn(String sql)

+

-

+

SQLException

+

long

+

copyIn(String sql, InputStream from)

+

Uses COPY FROM STDIN to quickly import data to tables in a database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, InputStream from, int bufferSize)

+

Uses COPY FROM STDIN to quickly import data to tables in a database from InputStream.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from)

+

Uses COPY FROM STDIN to quickly import data to tables in a database from Reader.

+

SQLException,IOException

+

long

+

copyIn(String sql, Reader from, int bufferSize)

+

Uses COPY FROM STDIN to quickly import data to tables in a database from Reader.

+

SQLException,IOException

+

CopyOut

+

copyOut(String sql)

+

-

+

SQLException

+

long

+

copyOut(String sql, OutputStream to)

+

Sends the result set of COPY TO STDOUT from a database to the OutputStream class.

+

SQLException,IOException

+

long

+

copyOut(String sql, Writer to)

+

Sends the result set of COPY TO STDOUT from a database to the Writer class.

+

SQLException,IOException

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0206.html b/docs/dws/dev/dws_04_0206.html new file mode 100644 index 00000000..440d5235 --- /dev/null +++ b/docs/dws/dev/dws_04_0206.html @@ -0,0 +1,222 @@ + + +

Example: Importing and Exporting Data Through Local Files

+

When the JAVA language is used for secondary development based on GaussDB(DWS), you can use the CopyManager interface to export data from the database to a local file or import a local file to the database by streaming. The file can be in CSV or TEXT format.

+

The sample program is as follows. Load the GaussDB(DWS) JDBC driver before running it.

+
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
//gsjdbc4.jar is used as an example.
+import java.sql.Connection; 
+import java.sql.DriverManager; 
+import java.io.IOException;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.sql.SQLException; 
+import org.postgresql.copy.CopyManager; 
+import org.postgresql.core.BaseConnection;
+ 
+public class Copy{ 
+
+     public static void main(String[] args) 
+     { 
+      String urls = new String("jdbc:postgresql://10.180.155.74:8000/gaussdb"); //Database URL
+      String username = new String("jack");            //Username
+      String password = new String("********");       // Password
+      String tablename = new String("migration_table"); //Define table information.
+      String tablename1 = new String("migration_table_1"); //Define table information.
+      String driver = "org.postgresql.Driver"; 
+      Connection conn = null; 
+      
+      try { 
+            Class.forName(driver); 
+            conn = DriverManager.getConnection(urls, username, password);         
+          } catch (ClassNotFoundException e) { 
+               e.printStackTrace(System.out); 
+          } catch (SQLException e) { 
+               e.printStackTrace(System.out); 
+          } 
+      
+      //Export the query result of SELECT * FROM migration_table to the local file d:/data.txt.
+      try {
+     copyToFile(conn, "d:/data.txt", "(SELECT * FROM migration_table)");
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   }    
+      //Import data from the d:/data.txt file to the migration_table_1 table.
+      try {
+      copyFromFile(conn, "d:/data.txt", tablename1);
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+         e.printStackTrace();
+ } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+ }  
+
+      //Export the data from the migration_table_1 table to the d:/data1.txt file.
+      try {
+      copyToFile(conn, "d:/data1.txt", tablename1);
+   } catch (SQLException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+   } catch (IOException e) {
+  // TODO Auto-generated catch block
+  e.printStackTrace();
+ }        
+     } 
+
+  public static void copyFromFile(Connection connection, String filePath, String tableName)   
+         throws SQLException, IOException {  
+       
+     FileInputStream fileInputStream = null;  
+   
+     try {  
+         CopyManager copyManager = new CopyManager((BaseConnection)connection);  
+         fileInputStream = new FileInputStream(filePath);  
+         copyManager.copyIn("COPY " + tableName + " FROM STDIN", fileInputStream);  
+     } finally {  
+         if (fileInputStream != null) {  
+             try {  
+                 fileInputStream.close();  
+             } catch (IOException e) {  
+                 e.printStackTrace();  
+             }  
+         }  
+     }  
+ }  
+  
+  public static void copyToFile(Connection connection, String filePath, String tableOrQuery)   
+          throws SQLException, IOException {  
+        
+      FileOutputStream fileOutputStream = null;  
+   
+      try {  
+          CopyManager copyManager = new CopyManager((BaseConnection)connection);  
+          fileOutputStream = new FileOutputStream(filePath);  
+          copyManager.copyOut("COPY " + tableOrQuery + " TO STDOUT", fileOutputStream);  
+      } finally {  
+          if (fileOutputStream != null) {  
+              try {  
+                  fileOutputStream.close();  
+              } catch (IOException e) {  
+                  e.printStackTrace();  
+              }  
+          }  
+      }  
+  }  
+}
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0207.html b/docs/dws/dev/dws_04_0207.html new file mode 100644 index 00000000..f3afb1ee --- /dev/null +++ b/docs/dws/dev/dws_04_0207.html @@ -0,0 +1,183 @@ + + +

Example: Migrating Data from MySQL to GaussDB(DWS)

+

The following example shows how to use CopyManager to migrate data from MySQL to GaussDB(DWS).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
//gsjdbc4.jar is used as an example.
+import java.io.StringReader;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+
+public class Migration{
+
+    public static void main(String[] args) {
+        String url = new String("jdbc:postgresql://10.180.155.74:8000/gaussdb"); //Database URL
+        String user = new String("jack");            //Database username
+        String pass = new String("********");             //Database password
+        String tablename = new String("migration_table"); //Define table information.
+        String delimiter = new String("|");              //Define a delimiter.
+        String encoding = new String("UTF8");            //Define a character set.
+        String driver = "org.postgresql.Driver";
+        StringBuffer buffer = new StringBuffer();       //Define the buffer to store formatted data.
+
+        try {
+            //Obtain the query result set of the source database.
+            ResultSet rs = getDataSet();
+
+            //Traverse the result set and obtain records row by row.
+            //The values of columns in each record are separated by the specified delimiter and end with a newline character to form strings.
+            ////Add the strings to the buffer.
+            while (rs.next()) {
+                buffer.append(rs.getString(1) + delimiter
+                        + rs.getString(2) + delimiter
+                        + rs.getString(3) + delimiter
+                        + rs.getString(4)
+                        + "\n");
+            }
+            rs.close();
+
+            try {
+                //Connect to the target database.
+                Class.forName(driver);
+                Connection conn = DriverManager.getConnection(url, user, pass);
+                BaseConnection baseConn = (BaseConnection) conn;
+                baseConn.setAutoCommit(false);
+
+                //Initialize table information.
+                String sql = "Copy " + tablename + " from STDIN DELIMITER " + "'" + delimiter + "'" + " ENCODING " + "'" + encoding + "'";
+
+                //Submit data in the buffer.
+                CopyManager cp = new CopyManager(baseConn);
+                StringReader reader = new StringReader(buffer.toString());
+                cp.copyIn(sql, reader);
+                baseConn.commit();
+                reader.close();
+                baseConn.close();
+            } catch (ClassNotFoundException e) {
+                e.printStackTrace(System.out);
+            } catch (SQLException e) {
+                e.printStackTrace(System.out);
+            }
+
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+
+    //******************************** 
+    //Return the query result set from the source database.
+    //********************************* 
+    private static ResultSet getDataSet() {
+        ResultSet rs = null;
+        try {
+            Class.forName("com.mysql.jdbc.Driver").newInstance();
+            Connection conn = DriverManager.getConnection("jdbc:mysql://10.119.179.227:3306/jack?useSSL=false&allowPublicKeyRetrieval=true", "jack", "********");
+            Statement stmt = conn.createStatement();
+            rs = stmt.executeQuery("select * from migration_table");
+        } catch (SQLException e) {
+            e.printStackTrace();
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+        return rs;
+    }
+}
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0208.html b/docs/dws/dev/dws_04_0208.html new file mode 100644 index 00000000..71d1b6ca --- /dev/null +++ b/docs/dws/dev/dws_04_0208.html @@ -0,0 +1,138 @@ + + +

Using the gsql Meta-Command \COPY to Import Data

+

The gsql tool of GaussDB(DWS) provides the \copy meta-command to import data.

+

\copy Command

For details about the \copy command, see Table 1.

+ +
+ + + + + + + +
Table 1 \copy meta-command

Syntax

+

Description

+

\copy { table [ ( column_list ) ] |

+

( query ) } { from | to } { filename |

+

stdin | stdout | pstdin | pstdout }

+

[ with ] [ binary ] [ oids ] [ delimiter

+

[ as ] 'character' ] [ null [ as ] 'string' ]

+

[ csv [ header ] [ quote [ as ]

+

'character' ] [ escape [ as ] 'character' ]

+

[ force quote column_list | * ] [ force

+

not null column_list ] ]

+

You can run this command to import or export data after logging in to the database on any gsql client. Different from the COPY statement in SQL, this command performs read/write operations on local files rather than files on database servers. The accessibility and permissions of the local files are restricted to local users.

+
NOTE:

\copy only applies to small-batch data import with uniform formats but poor error tolerance capability. GDS or COPY is preferred for data import.

+
+
+
+
+

Parameter Description

+
+

Examples

Create the target table copy_example.
1
+2
+3
+4
+5
+6
+7
+8
create table copy_example
+(
+	col_1 integer,
+	col_2 text,
+	col_3 varchar(12),
+	col_4 date,
+	col_5 time
+);
+
+ +
+
+ +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0209.html b/docs/dws/dev/dws_04_0209.html new file mode 100644 index 00000000..f00ed48d --- /dev/null +++ b/docs/dws/dev/dws_04_0209.html @@ -0,0 +1,243 @@ + + +

Using gs_restore to Import Data

+

Scenarios

gs_restore is an import tool provided by GaussDB(DWS). You can use gs_restore to import the files exported by gs_dump to a database. gs_restore can import the files in .tar, custom, or directory format.

+

gs_restore can:

+ +

You can specify and sort the data to be imported.

+
+

Procedure

gs_restore incrementally imports data by default. To prevent data exception caused by consecutive imports, use the -e and -c parameters for each import. In this way, existing data is deleted from the target database before each import; the system exists the import task with an error (error message is displayed after the import process is complete) and proceeds with the next.

+
+
  1. Log in to the server as the root user and run the following command to go to the data storage path:

    1
    cd /opt/bin
    +
    + +
    +

  2. Use gs_restore to import all object definitions from the exported file of the whole postgres database to the gaussdb database.

    gs_restore -W password -U jack /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d backupdb -s -e -c
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for database connection.

    +

    -U jack

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -d

    +

    Database to which data will be imported.

    +

    -d backupdb

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    -e

    +

    Exits the current import task and performs the next if an error occurs when you send a SQL statement in the current import task. Error messages are displayed after the import process is complete.

    +

    -

    +

    -c

    +

    Cleans existing objects from the target database before the import.

    +

    -

    +

    -s

    +

    Imports only object definitions in schemas and does not import data. Sequence values will also not be imported.

    +

    -

    +
    +
    +

    For details about other parameters, see "Server Tools > gs_restore" in the Tool Reference.

    +

+
+

Examples

Example 1: Use gs_restore to run the following command to import data and all object definitions of the gaussdb database from the MPPDB_backup.dmp file (custom format).

+
gs_restore -W password backup/MPPDB_backup.dmp -p 8000 -h 10.10.10.100 -d backupdb
+gs_restore[2017-07-21 19:16:26]: restore operation successfu
+gs_restore: total time: 13053  ms
+

Example 2: Use gs_restore to run the following command to import data and all object definitions of the gaussdb database from the MPPDB_backup.tar file.

+
gs_restore backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d backupdb 
+gs_restore[2017-07-21 19:21:32]: restore operation successful
+gs_restore[2017-07-21 19:21:32]: total time: 21203  ms
+

Example 3: Use gs_restore to run the following command to import data and all object definitions of the gaussdb database from the MPPDB_backup directory.

+
gs_restore backup/MPPDB_backup -p 8000 -h 10.10.10.100 -d backupdb
+gs_restore[2017-07-21 19:26:46]: restore operation successful
+gs_restore[2017-07-21 19:26:46]: total time: 21003  ms
+

Example 4: Use gs_restore to run the following command to import all object definitions of the gaussdb database to the backupdb database. Before the import, there are complete definitions in the gaussdb database. After the import, all object definitions exist in the backupdb database and there is no data in tables.

+
gs_restore -W password /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d backupdb -s -e -c 
+gs_restore[2017-07-21 19:46:27]: restore operation successful
+gs_restore[2017-07-21 19:46:27]: total time: 32993  ms
+

Example 5: Use gs_restore to run the following command to import data and all definitions in the PUBLIC schema from the MPPDB_backup.dmp file. Existing objects are deleted from the target database before the import. If an existing object references to an object in another schema, you need to manually delete the referenced object first.

+
gs_restore backup/MPPDB_backup.dmp -p 8000 -h 10.10.10.100 -d backupdb -e -c -n PUBLIC
+gs_restore: [archiver (db)] Error while PROCESSING TOC:
+gs_restore: [archiver (db)] Error from TOC entry 313; 1259 337399 TABLE table1 gaussdba
+gs_restore: [archiver (db)] could not execute query: ERROR:  cannot drop table table1 because other objects depend on it
+DETAIL:  view t1.v1 depends on table table1
+HINT:  Use DROP ... CASCADE to drop the dependent objects too.
+Command was: DROP TABLE public.table1;
+

Manually delete the referenced object and create it again after the import is complete.

+
gs_restore backup/MPPDB_backup.dmp -p 8000 -h 10.10.10.100 -d backupdb -e -c -n PUBLIC
+gs_restore[2017-07-21 19:52:26]: restore operation successful
+gs_restore[2017-07-21 19:52:26]: total time: 2203  ms
+

Example 6: Use gs_restore to run the following command to import the definition of the hr.staffs table in the PUBLIC schema from the MPPDB_backup.dmp file. Before the import, the hr.staffs table does not exist.

+
gs_restore backup/MPPDB_backup.dmp -p 8000 -h 10.10.10.100 -d backupdb -e -c -s -n PUBLIC -t hr.staffs
+gs_restore[2017-07-21 19:56:29]: restore operation successful
+gs_restore[2017-07-21 19:56:29]: total time: 21000  ms
+

Example 7: Use gs_restore to run the following command to import data of the hr.staffs table in the PUBLIC schema from the MPPDB_backup.dmp file. Before the import, the hr.staffs table is empty.

+
gs_restore backup/MPPDB_backup.dmp -p 8000 -h 10.10.10.100 -d backupdb -e -a -n PUBLIC -t hr.staffs
+gs_restore[2017-07-21 20:12:32]: restore operation successful
+gs_restore[2017-07-21 20:12:32]: total time: 20203  ms
+
Example 8: Use gs_restore to run the following command to import the definition of the hr.staffs table. Before the import, the hr.staffs table contains data.
human_resource=# select * from hr.staffs;
+ staff_id | first_name  |  last_name  |  email   |    phone_number    |      hire_date      | employment_id |  salary  | commission_pct | manager_id | section_id 
+----------+-------------+-------------+----------+--------------------+---------------------+---------------+----------+----------------+------------+------------
+      200 | Jennifer    | Whalen      | JWHALEN  | 515.123.4444       | 1987-09-17 00:00:00 | AD_ASST       |  4400.00 |                |        101 |         10
+      201 | Michael     | Hartstein   | MHARTSTE | 515.123.5555       | 1996-02-17 00:00:00 | MK_MAN        | 13000.00 |                |        100 |         20
+
+gsql -d human_resource -p 8000
+gsql ((GaussDB 8.1.1 build af002019) compiled at 2020-01-10 05:43:20 commit 6995 last mr 11566 )
+Non-SSL connection (SSL connection is recommended when requiring high-security)
+Type "help" for help.
+
+human_resource=# drop table hr.staffs CASCADE;
+NOTICE:  drop cascades to view hr.staff_details_view
+DROP TABLE
+
+gs_restore -W password /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d human_resource -n hr -t staffs -s -e 
+restore operation successful
+total time: 904  ms
+
+human_resource=# select * from hr.staffs;
+ staff_id | first_name | last_name | email | phone_number | hire_date | employment_id | salary | commission_pct | manager_id | section_id 
+----------+------------+-----------+-------+--------------+-----------+---------------+--------+----------------+------------+------------
+(0 rows)
+
+
Example 9: Use gs_restore to run the following command to import data and definitions of the staffs and areas tables. Before the import, the staffs and areas tables do not exist.
human_resource=# \d
+                                 List of relations
+ Schema |        Name        | Type  |  Owner   |             Storage              
+--------+--------------------+-------+----------+----------------------------------
+ hr     | employment_history | table |  | {orientation=row,compression=no}
+ hr     | employments        | table |  | {orientation=row,compression=no}
+ hr     | places             | table |  | {orientation=row,compression=no}
+ hr     | sections           | table |  | {orientation=row,compression=no}
+ hr     | states             | table |  | {orientation=row,compression=no}
+(5 rows)
+
+gs_restore -W password /home/mppdb/backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d human_resource -n hr -t staffs -n hr -t areas 
+restore operation successful
+total time: 724  ms
+
+human_resource=# \d
+                                 List of relations
+ Schema |        Name        | Type  |  Owner   |             Storage              
+--------+--------------------+-------+----------+----------------------------------
+ hr     | areas              | table |  | {orientation=row,compression=no}
+ hr     | employment_history | table |  | {orientation=row,compression=no}
+ hr     | employments        | table |  | {orientation=row,compression=no}
+ hr     | places             | table |  | {orientation=row,compression=no}
+ hr     | sections           | table |  | {orientation=row,compression=no}
+ hr     | staffs             | table |  | {orientation=row,compression=no}
+ hr     | states             | table |  | {orientation=row,compression=no}
+(7 rows)
+
+human_resource=# select * from hr.areas;
+ area_id |       area_name        
+---------+------------------------
+       4 | Iron
+       1 | Wood
+       2 | Lake
+       3 | Desert
+(4 rows)
+
+
Example 10: Use gs_restore to run the following command to import data and all object definitions in the hr schema.
gs_restore -W password  /home//backup/MPPDB_backup1.sql -p 8000 -h 10.10.10.100 -d backupdb -n hr -e -c
+restore operation successful
+total time: 702  ms
+
+
Example 11: Use gs_restore to run the following command to import all object definitions in the hr and hr1 schemas to the backupdb database.
gs_restore -W password /home//backup/MPPDB_backup2.dmp -p 8000 -h 10.10.10.100 -d backupdb -n hr -n hr1 -s
+restore operation successful
+total time: 665  ms
+
+

Example 12: Use gs_restore to run the following command to decrypt the files exported from the human_resource database and import them to the backupdb database.

+
create database backupdb;
+CREATE DATABASE
+
+gs_restore /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d backupdb --with-key=1234567812345678
+restore operation successful
+total time: 23472  ms
+
+gsql -d backupdb -p 8000 -r
+gsql ((GaussDB 8.1.1 build af002019) compiled at 2020-01-10 05:43:20 commit 6995 last mr 11566 )
+Non-SSL connection (SSL connection is recommended when requiring high-security)
+Type "help" for help.
+
+backupdb=# select * from hr.areas;
+ area_id |       area_name        
+---------+------------------------
+       4 | Iron
+       1 | Wood
+       2 | Lake
+       3 | Desert
+(4 rows)
+

Example 13: user1 does not have the permission for importing data from an exported file to the backupdb database and role1 has this permission. To import the exported data to the backupdb database, you can set --role to role1 in the export command.

+
human_resource=# CREATE USER user1 IDENTIFIED BY 'password';
+
+gs_restore -U user1 -W password /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 -d backupdb --role role1 --rolepassword password
+restore operation successful
+total time: 554  ms
+
+gsql -d backupdb -p 8000 -r 
+gsql ((GaussDB 8.1.1 build af002019) compiled at 2020-01-10 05:43:20 commit 6995 last mr 11566 )
+Non-SSL connection (SSL connection is recommended when requiring high-security)
+Type "help" for help.
+
+backupdb=# select * from hr.areas;
+ area_id |       area_name        
+---------+------------------------
+       4 | Iron
+       1 | Wood
+       2 | Lake
+       3 | Desert
+(4 rows)
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0210.html b/docs/dws/dev/dws_04_0210.html new file mode 100644 index 00000000..0d8463d0 --- /dev/null +++ b/docs/dws/dev/dws_04_0210.html @@ -0,0 +1,28 @@ + + +

Importing Data from MRS to a Cluster

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0212.html b/docs/dws/dev/dws_04_0212.html new file mode 100644 index 00000000..edaee1e4 --- /dev/null +++ b/docs/dws/dev/dws_04_0212.html @@ -0,0 +1,97 @@ + + +

Preparing Data in an MRS Cluster

+

Before importing data from MRS to a GaussDB(DWS) cluster, you must have:

+
  1. Created an MRS cluster.
  2. Created the Hive/Spark ORC table in the MRS cluster and stored the table data to the HDFS path corresponding to the table.
+

If you have completed the preparations, skip this section.

+

In this tutorial, the Hive ORC table will be created in the MRS cluster as an example to complete the preparation work. The process for creating the Spark ORC table in the MRS cluster and the SQL syntax are similar to those of Hive.

+

Data File

The sample data of the product_info.txt data file is as follows:

+
100,XHDK-A-1293-#fJ3,2017-09-01,A,2017 Autumn New Shirt Women,red,M,328,2017-09-04,715,good
+205,KDKE-B-9947-#kL5,2017-09-01,A,2017 Autumn New Knitwear Women,pink,L,584,2017-09-05,406,very good!
+300,JODL-X-1937-#pV7,2017-09-01,A,2017 autumn new T-shirt men,red,XL,1245,2017-09-03,502,Bad.
+310,QQPX-R-3956-#aD8,2017-09-02,B,2017 autumn new jacket women,red,L,411,2017-09-05,436,It's really super nice
+150,ABEF-C-1820-#mC6,2017-09-03,B,2017 Autumn New Jeans Women,blue,M,1223,2017-09-06,1200,The seller's packaging is exquisite
+200,BCQP-E-2365-#qE4,2017-09-04,B,2017 autumn new casual pants men,black,L,997,2017-09-10,301,The clothes are of good quality.
+250,EABE-D-1476-#oB1,2017-09-10,A,2017 autumn new dress women,black,S,841,2017-09-15,299,Follow the store for a long time.
+108,CDXK-F-1527-#pL2,2017-09-11,A,2017 autumn new dress women,red,M,85,2017-09-14,22,It's really amazing to buy
+450,MMCE-H-4728-#nP9,2017-09-11,A,2017 autumn new jacket women,white,M,114,2017-09-14,22,Open the package and the clothes have no odor
+260,OCDA-G-2817-#bD3,2017-09-12,B,2017 autumn new woolen coat women,red,L,2004,2017-09-15,826,Very favorite clothes
+980,ZKDS-J-5490-#cW4,2017-09-13,B,2017 Autumn New Women's Cotton Clothing,red,M,112,2017-09-16,219,The clothes are small
+98,FKQB-I-2564-#dA5,2017-09-15,B,2017 autumn new shoes men,green,M,4345,2017-09-18,5473,The clothes are thick and it's better this winter.
+150,DMQY-K-6579-#eS6,2017-09-21,A,2017 autumn new underwear men,yellow,37,2840,2017-09-25,5831,This price is very cost effective
+200,GKLW-l-2897-#wQ7,2017-09-22,A,2017 Autumn New Jeans Men,blue,39,5879,2017-09-25,7200,The clothes are very comfortable to wear
+300,HWEC-L-2531-#xP8,2017-09-23,A,2017 autumn new shoes women,brown,M,403,2017-09-26,607,good
+100,IQPD-M-3214-#yQ1,2017-09-24,B,2017 Autumn New Wide Leg Pants Women,black,M,3045,2017-09-27,5021,very good.
+350,LPEC-N-4572-#zX2,2017-09-25,B,2017 Autumn New Underwear Women,red,M,239,2017-09-28,407,The seller's service is very good
+110,NQAB-O-3768-#sM3,2017-09-26,B,2017 autumn new underwear women,red,S,6089,2017-09-29,7021,The color is very good 
+210,HWNB-P-7879-#tN4,2017-09-27,B,2017 autumn new underwear women,red,L,3201,2017-09-30,4059,I like it very much and the quality is good.
+230,JKHU-Q-8865-#uO5,2017-09-29,C,2017 Autumn New Clothes with Chiffon Shirt,black,M,2056,2017-10-02,3842,very good
+

+
+

Creating a Hive ORC Table in an MRS Cluster

  1. Create an MRS cluster.

    For details, see "Creating a Cluster > Custom Creation of a Cluster" in the MapReduce Service User Guide.

    +
  2. Download the client.
    1. Go back to the MRS cluster page. Click the cluster name. On the Dashboard tab page of the cluster details page, click Access Manager. If a message is displayed indicating that EIP needs to be bound, bind an EIP first.
    2. Enter the username admin and its password for logging in to MRS Manager. The password is the one you entered when creating the MRS cluster.
    3. Choose Services > Download Client. Set Client Type to Only configuration files and set Download To to Server. Click OK.

      +
    +
  3. Log in to the Hive client of the MRS cluster.
    1. Log in to a Master node.

      For details, see "Remote Login Guide > Logging In to a Master Node" in the MapReduce Service User Guide.

      +
    2. Run the following command to switch the user:
      sudo su - omm
      +
    3. Run the following command to go to the client directory:
      cd /opt/client
      +
    4. Run the following command to configure the environment variables:
      source bigdata_env
      +
    5. If Kerberos authentication is enabled for the current cluster, run the following command to authenticate the current user. The current user must have the permission for creating Hive tables. For details, see "Creating a Role" in the MapReduce Service User Guide. Configure a role with the required permissions. For details, see "Creating a Role" in the MapReduce Service User Guide. Bind a role to the user. If the Kerberos authentication is disabled for the current cluster, skip this step.
      kinit MRS cluster user
      +

      Example: kinit hiveuser

      +
    6. Run the following command to start the Hive client:
      beeline
      +
    +
  4. Create a database demo on Hive.

    Run the following command to create the database demo:

    +
    CREATE DATABASE demo;
    +
  5. Create table product_info of the Hive TEXTFILE type in the database demo and import the Data File (product_info.txt) to the HDFS path corresponding to the table.

    Run the following command to switch to the database demo:

    +
    USE demo;
    +

    Run the following command to create table product_info and define the table fields based on data in the Data File.

    +
    DROP TABLE product_info;
    +
    +CREATE TABLE product_info 
    +(    
    +    product_price                int            ,
    +    product_id                   char(30)       ,
    +    product_time                 date           ,
    +    product_level                char(10)       ,
    +    product_name                 varchar(200)   ,
    +    product_type1                varchar(20)    ,
    +    product_type2                char(10)       ,
    +    product_monthly_sales_cnt    int            ,
    +    product_comment_time         date           ,
    +    product_comment_num          int        ,
    +    product_comment_content      varchar(200)                   
    +) 
    +row format delimited fields terminated by ',' 
    +stored as TEXTFILE;
    +

    For details about how to import data to an MRS cluster, see "Cluster Operation Guide > Managing Active Clusters > Managing Data Files" in the MapReduce Service User Guide.

    +
  6. Create a Hive ORC table named product_info_orc in the database demo.

    Run the following command to create the Hive ORC table product_info_orc. The table fields are the same as those of the product_info table created in the previous step.

    +
    DROP TABLE product_info_orc;
    +
    +CREATE TABLE product_info_orc
    +(    
    +    product_price                int            ,
    +    product_id                   char(30)       ,
    +    product_time                 date           ,
    +    product_level                char(10)       ,
    +    product_name                 varchar(200)   ,
    +    product_type1                varchar(20)    ,
    +    product_type2                char(10)       ,
    +    product_monthly_sales_cnt    int            ,
    +    product_comment_time         date           ,
    +    product_comment_num          int            ,
    +    product_comment_content      varchar(200)                   
    +) 
    +row format delimited fields terminated by ',' 
    +stored as orc;
    +
  7. Insert data in the product_info table to the Hive ORC table product_info_orc.
    insert into product_info_orc select * from product_info;
    +

    Query table product_info_orc.

    +
    select * from product_info_orc;
    +

    If data displayed in the Data File can be queried, the data has been successfully inserted to the ORC table.

    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0213.html b/docs/dws/dev/dws_04_0213.html new file mode 100644 index 00000000..34e00148 --- /dev/null +++ b/docs/dws/dev/dws_04_0213.html @@ -0,0 +1,119 @@ + + +

Manually Creating a Foreign Server

+

In the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS) for creating a foreign table, you need to specify a foreign server associated with the MRS data source connection.

+

When you create an MRS data source connection on the GaussDB(DWS) management console, the database administrator dbadmin automatically creates a foreign server in the default database postgres. If you want to create a foreign table in the default database postgres to read MRS data, skip this section.

+

To allow a common user to create a foreign table in a user-defined database to read MRS data, you must manually create a foreign server in the user-defined database. This section describes how does a common user create a foreign server in a user-defined database. The procedure is as follows:

+
  1. Ensure that an MRS data source connection has been created for the GaussDB(DWS) cluster.

    For details, see "Managing MRS Data Sources > Creating an MRS Data Source Connection" in the Data Warehouse Service User Guide.

    +
  2. Creating a User and a Database and Granting the User Foreign Table Permissions
  3. Manually Creating a Foreign Server
+

If you no longer need to read data from the MRS data source and have deleted the MRS data source on the GaussDB(DWS) management console, only the foreign server automatically created in the default database gaussdb will be deleted, and the manually created foreign server needs to be deleted manually. For details about the deletion, see Deleting the Manually Created Foreign Server.

+
+

Creating a User and a Database and Granting the User Foreign Table Permissions

In the following example, a common user dbuser and a database mydatabase are created. Then, an administrator is used to grant foreign table permissions to user dbuser.

+
  1. Connect to the default database gaussdb as a database administrator through the database client tool provided by GaussDB(DWS).

    For example, use the gsql client to connect to the database by running the following command:

    +
    gsql -d postgres -h 192.168.2.30 -U dbadmin -p 8000 -W password -r
    +

    Enter your password as prompted.

    +

  2. Create a common user and use it to create a database.

    Create a user named dbuser that has the permission to create databases.

    +
    CREATE USER dbuser WITH CREATEDB PASSWORD 'password';
    +
    Switch to the created user.
    SET ROLE dbuser PASSWORD 'password';
    +
    +
    Run the following command to create a database:
    CREATE DATABASE mydatabase;
    +
    +

    Query the database.

    +
    SELECT * FROM pg_database;
    +

    The database is successfully created if the returned result contains information about mydatabase.

    +
    datname   | datdba | encoding | datcollate | datctype | datistemplate | datallowconn | datconnlimit | datlastsysoid | datfrozenxid | dattablespace | datcompatibility |                       datacl
    +
    +------------+--------+----------+------------+----------+---------------+--------------+--------------+---------------+--------------+---------------+------------------+--------------------------------------
    +--------------
    + template1  |     10 |        0 | C          | C        | t             | t            |           -1 |         14146 |         1351 |          1663 | ORA              | {=c/Ruby,omm=CTc/Ruby}
    + template0  |     10 |        0 | C          | C        | t             | f            |           -1 |         14146 |         1350 |          1663 | ORA              | {=c/Ruby,Ruby=CTc/Ruby}
    + postgres   |     10 |        0 | C          | C        | f             | t            |           -1 |         14146 |         1352 |          1663 | ORA              | {=Tc/Ruby,Ruby=CTc/Ruby,chaojun=C/Ruby,hu
    +obinru=C/Ruby}
    + mydatabase |  17000 |        0 | C          | C        | f             | t            |           -1 |         14146 |         1351 |          1663 | ORA              |
    +(4 rows)
    +

  3. Grant the permissions for creating foreign servers and using foreign tables to a common user as the administrator.

    Use the connection to create a database as a database administrator.

    +
    You can use the gsql client to run the following command, switching to an administrator user, and connect to the new database:
    \c mydatabase dbadmin;
    +
    +

    Enter the password as prompted.

    +

    Note that you must use the administrator account to connect to the database where a foreign server is to be created and foreign tables are used; and then grant permissions to the common user.

    +
    +
    By default, only system administrators can create foreign servers. Common users can create foreign servers only after being authorized. Run the following command to grant the permission:
    GRANT ALL ON FOREIGN DATA WRAPPER hdfs_fdw TO dbuser;
    +
    +

    The name of FOREIGN DATA WRAPPER must be hdfs_fdw. dbuser is the username for creating SERVER.

    +

    Run the following command to grant the user the permission to use foreign tables:

    +
    ALTER USER dbuser USEFT;
    +

    Query for the user.

    +
    SELECT r.rolname, r.rolsuper, r.rolinherit,
    +  r.rolcreaterole, r.rolcreatedb, r.rolcanlogin,
    +  r.rolconnlimit, r.rolvalidbegin, r.rolvaliduntil,
    +  ARRAY(SELECT b.rolname
    +        FROM pg_catalog.pg_auth_members m
    +        JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
    +        WHERE m.member = r.oid) as memberof
    +, r.rolreplication
    +, r.rolauditadmin
    +, r.rolsystemadmin
    +, r.roluseft
    +FROM pg_catalog.pg_roles r
    +ORDER BY 1;
    +

    The authorization is successful if the dbuser information in the returned result contains the UseFT permission.

    +
     rolname  | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolconnlimit | rolvalidbegin | rolvaliduntil | memberof | rolreplication | rolauditadmin | rolsystemadmin | roluseft
    +-----------+----------+------------+---------------+-------------+-------------+--------------+---------------+---------------+----------+----------------+---------------+----------------+----------
    + dbuser    | f        | t          | f             | t           | t           |           -1 |               |               | {}       | f              | f             | f              | t
    + lily      | f        | t          | f             | f           | t           |           -1 |               |               | {}       | f              | f             | f              | f
    + Ruby       | t        | t          | t             | t           | t           |           -1 |               |               | {}       | t              | t             | t              | t
    +

+
+

Manually Creating a Foreign Server

  1. Connect to the default database gaussdb as a database administrator through the database client tool provided by GaussDB(DWS).

    You can use the gsql client to log in to the database in either of the following ways:

    +

    You can use either of the following methods to create the connection:

    +
    • If you have logged in to the gsql client, run the following command to switch the database and user:
      \c postgres dbadmin;
      +

      Enter the password as prompted.

      +
    • If you have not logged in to the gsql client or have exited the gsql client by running the \q command, run the following command to reconnect to it:
      gsql -d postgres -h 192.168.2.30 -U dbadmin -p 8000 -W password -r
      +
    +

  2. Run the following command to query the information about the foreign server that is automatically created:

    SELECT * FROM pg_foreign_server;
    +

    The returned result is as follows:

    +
                         srvname                      | srvowner | srvfdw | srvtype | srvversion | srvacl |                                                     srvoptions
    +--------------------------------------------------+----------+--------+---------+------------+--------+---------------------------------------------------------------------------------------------------------------------
    + gsmpp_server                                     |       10 |  13673 |         |            |        |
    + gsmpp_errorinfo_server                           |       10 |  13678 |         |            |        |
    + hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca |    16476 |  13685 |         |            |        | {"address=192.168.1.245:25000,192.168.1.218:25000",hdfscfgpath=/MRS/8f79ada0-d998-4026-9020-80d6de2692ca,type=hdfs}
    +(3 rows)
    +

    In the query result, each row contains the information about a foreign server. The foreign server associated with the MRS data source connection contains the following information:

    +
    • The value of srvname contains hdfs_server and the ID of the MRS cluster, which is the same as the MRS ID in the cluster list on the MRS management console.
    • The address parameter in the srvoptions field contains the IP addresses and ports of the active and standby nodes in the MRS cluster.
    +

    You can find the foreign server you want based on the above information and record the values of its srvname and srvoptions.

    +

  3. Switch to the user who is about to create a foreign server to connect to the corresponding database.

    In this example, run the following command to use common user dbuser created in Creating a User and a Database and Granting the User Foreign Table Permissions to connect to mydatabase created by the user:
    \c mydatabase dbuser;
    +
    +

  4. Create a foreign server.

    For details about the syntax for creating foreign servers, see CREATE SERVER. For example:

    +
    CREATE SERVER hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca FOREIGN DATA WRAPPER HDFS_FDW 
    +OPTIONS 
    +(
    +address '192.168.1.245:25000,192.168.1.218:25000', 
    +hdfscfgpath '/MRS/8f79ada0-d998-4026-9020-80d6de2692ca',
    +type 'hdfs'
    +);
    +

    Mandatory parameters are described as follows:

    +
    • Name of the foreign server

      You can customize a name.

      +

      In this example, specify the name to the value of the srvname field recorded in 2, such as hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca.

      +

      Resources in different databases are isolated. Therefore, the names of foreign servers in different databases can be the same.

      +
    • FOREIGN DATA WRAPPER

      This parameter can only be set to HDFS_FDW, which already exists in the database.

      +
    • OPTIONS parameters
      Set the following parameters to the values under srvoptions recorded in 2.
      • address

        Specifies the IP address and port number of the primary and standby nodes of the HDFS cluster.

        +
      • hdfscfgpath

        Specifies the configuration file path of the HDFS cluster. This parameter is available only when type is HDFS. You can set only one path.

        +
      • type

        Its value is hdfs, which indicates that HDFS_FDW connects to HDFS.

        +
      +
      +
    +

  5. View the foreign server.

    SELECT * FROM pg_foreign_server WHERE srvname='hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca';
    +

    The server is successfully created if the returned result is as follows:

    +
                         srvname                      | srvowner | srvfdw | srvtype | srvversion | srvacl |                                                     srvoptions
    +--------------------------------------------------+----------+--------+---------+------------+--------+---------------------------------------------------------------------------------------------------------------------
    + hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca |    16476 |  13685 |         |            |        | {"address=192.168.1.245:25000,192.168.1.218:25000",hdfscfgpath=/MRS/8f79ada0-d998-4026-9020-80d6de2692ca,type=hdfs}
    +(1 row)
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0214.html b/docs/dws/dev/dws_04_0214.html new file mode 100644 index 00000000..4e42e0ae --- /dev/null +++ b/docs/dws/dev/dws_04_0214.html @@ -0,0 +1,227 @@ + + +

Creating a Foreign Table

+

This section describes how to create a Hadoop foreign table in the GaussDB(DWS) database to access the Hadoop structured data stored on MRS HDFS. A Hadoop foreign table is read-only. It can only be queried using SELECT.

+

Prerequisites

+
+

Obtaining the HDFS Path of the MRS Data Source

There are two methods for you to obtain the HDFS path.

+ +
+

Obtaining Information About the Foreign Server Connected to the MRS Data Source

  1. Use the user who creates the foreign server to connect to the corresponding database.

    Determine whether to use a common user to create a foreign table in the customized database based on requirements.

    +
    • Yes
      1. Ensure that you have created the common user dbuser and its database mydatabase, and manually created a foreign server in mydatabase by following steps in Manually Creating a Foreign Server.
      2. Connect to the database mydatabase as user dbuser through the database client tool provided by GaussDB(DWS).
        If you have connected to the database using the gsql client, run the following command to switch the user and database:
        \c mydatabase dbuser;
        +
        +

        Enter your password as prompted.

        +
      +
    • No

      When you create an MRS data source connection on the GaussDB(DWS) management console, the database administrator dbadmin automatically creates a foreign server in the default database postgres. If you create a foreign table in the default database postgres as the database administrator dbadmin, you need to connect to the database using the database client tool provided by GaussDB(DWS). For example, use the gsql client to connect to the database by running the following command:

      +
      gsql -d postgres -h 192.168.2.30 -U dbadmin -p 8000 -W password -r
      +

      Enter your password as prompted.

      +
    +

  2. Run the following command to view the information about the created foreign server connected to the MRS data source:

    SELECT * FROM pg_foreign_server;
    +

    You can also run the \desc+ command to view the information about the foreign server.

    +
    +

    The returned result is as follows:

    +
                         srvname                      | srvowner | srvfdw | srvtype | srvversion | srvacl |                                                     srvoptions
    +--------------------------------------------------+----------+--------+---------+------------+--------+---------------------------------------------------------------------------------------------------------------------
    + gsmpp_server                                     |       10 |  13673 |         |            |        |
    + gsmpp_errorinfo_server                           |       10 |  13678 |         |            |        |
    + hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca |    16476 |  13685 |         |            |        | {"address=192.168.1.245:25000,192.168.1.218:25000",hdfscfgpath=/MRS/8f79ada0-d998-4026-9020-80d6de2692ca,type=hdfs}
    +(3 rows)
    +

    In the query result, each row contains the information about a foreign server. The foreign server associated with the MRS data source connection contains the following information:

    +
    • The value of srvname contains hdfs_server and the ID of the MRS cluster, which is the same as the MRS ID in the cluster list on the MRS management console.
    • The address parameter in the srvoptions field contains the IP addresses and ports of the active and standby nodes in the MRS cluster.
    +

    You can find the foreign server you want based on the above information and record the values of its srvname and srvoptions.

    +

+
+

Creating a Foreign Table

After Obtaining Information About the Foreign Server Connected to the MRS Data Source and Obtaining the HDFS Path of the MRS Data Source are completed, you can create a foreign table to read data from the MRS data source.

+

The syntax for creating a foreign table is as follows. For details, see the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS).

+
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name 
+( [ { column_name type_name 
+    [ { [CONSTRAINT constraint_name] NULL |
+    [CONSTRAINT constraint_name] NOT NULL |
+      column_constraint [...]} ] |
+      table_constraint [, ...]} [, ...] ] ) 
+    SERVER dfs_server 
+    OPTIONS ( { option_name ' value ' } [, ...] ) 
+    DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
+    [ PARTITION BY ( column_name ) [ AUTOMAPPED ] ] ;
+

For example, when creating a foreign table named foreign_product_info, set parameters in the syntax as follows:

+ +

Based on the above settings, the foreign table is created using the following statements:

+
DROP FOREIGN TABLE IF EXISTS foreign_product_info;
+
+CREATE FOREIGN TABLE foreign_product_info
+(
+    product_price                integer        ,
+    product_id                   char(30)       ,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)                      
+) SERVER hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca 
+OPTIONS (
+format 'orc', 
+encoding 'utf8',
+foldername '/user/hive/warehouse/demo.db/product_info_orc/'
+) 
+DISTRIBUTE BY ROUNDROBIN;
+
+

Data Type Conversion

Data is imported to Hive/Spark and then stored on HDFS in ORC format. Actually, GaussDB(DWS) reads ORC files on HDFS, and queries and analyzes data in these files.

+

Data types supported by Hive/Spark are different from those supported by GaussDB(DWS). Therefore, you need to learn the mapping between them. Table 1 describes the mapping in detail.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Data type mapping

Type

+

Column Type Supported by an HDFS/OBS Foreign Table of GaussDB(DWS)

+

Column Type Supported by a Hive Table

+

Column Type Supported by a Spark Table

+

Integer in two bytes

+

SMALLINT

+

SMALLINT

+

SMALLINT

+

Integer in four bytes

+

INTEGER

+

INT

+

INT

+

Integer in eight bytes

+

BIGINT

+

BIGINT

+

BIGINT

+

Single-precision floating point number

+

FLOAT4 (REAL)

+

FLOAT

+

FLOAT

+

Double-precision floating point number

+

FLOAT8(DOUBLE PRECISION)

+

DOUBLE

+

FLOAT

+

Scientific data type

+

DECIMAL[p (,s)]

+

The maximum precision can reach up to 38.

+

DECIMAL

+

The maximum precision can reach up to 38 (Hive 0.11).

+

DECIMAL

+

Date type

+

DATE

+

DATE

+

DATE

+

Time type

+

TIMESTAMP

+

TIMESTAMP

+

TIMESTAMP

+

BOOLEAN type

+

BOOLEAN

+

BOOLEAN

+

BOOLEAN

+

CHAR type

+

CHAR(n)

+

CHAR (n)

+

STRING

+

VARCHAR type

+

VARCHAR(n)

+

VARCHAR (n)

+

VARCHAR (n)

+

String

+

TEXT(CLOB)

+

STRING

+

STRING

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0215.html b/docs/dws/dev/dws_04_0215.html new file mode 100644 index 00000000..3bf294b1 --- /dev/null +++ b/docs/dws/dev/dws_04_0215.html @@ -0,0 +1,80 @@ + + +

Importing Data

+

Viewing Data in the MRS Data Source by Directly Querying the Foreign Table

If the data amount is small, you can directly run SELECT to query the foreign table and view the data in the MRS data source.

+
  1. Run the following command to query data from the foreign table:

    1
    SELECT * FROM foreign_product_info;
    +
    + +
    +

    If the query result is the same as the data in Data File, the import is successful. The following information is displayed at the end of the query result:

    +
    (20 rows)
    +

    After data is queried, you can insert the data to common tables in the database.

    +

+
+

Querying Data After Importing It

You can query the MRS data after importing it to GaussDB(DWS).

+
  1. Create a table in GaussDB(DWS) to store imported data.

    The target table structure must be the same as the structure of the foreign table created in Creating a Foreign Table. That is, both tables must have the same number of columns and column types.

    +

    For example, create a table named product_info. The table example is as follows:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    DROP TABLE IF EXISTS product_info;
    +CREATE TABLE product_info
    +(
    +    product_price                integer        ,
    +    product_id                   char(30)       ,
    +    product_time                 date           ,
    +    product_level                char(10)       ,
    +    product_name                 varchar(200)   ,
    +    product_type1                varchar(20)    ,
    +    product_type2                char(10)       ,
    +    product_monthly_sales_cnt    integer        ,
    +    product_comment_time         date           ,
    +    product_comment_num          integer        ,
    +    product_comment_content      varchar(200)                   
    +) 
    +with (
    +orientation = column,
    +compression=middle
    +) 
    +DISTRIBUTE BY HASH (product_id);
    +
    + +
    +

  2. Run the INSERT INTO .. SELECT .. command to import data from the foreign table to the target table.

    Example:

    +
    1
    INSERT INTO product_info SELECT * FROM foreign_product_info;
    +
    + +
    +
    If information similar to the following is displayed, the data has been imported.
    INSERT 0 20
    +
    +

  3. Run the following SELECT command to view data imported from MRS to GaussDB(DWS):

    1
    SELECT * FROM product_info;
    +
    + +
    +

    If the query result is the same as the data in Data File, the import is successful. The following information is displayed at the end of the query result:

    +
    (20 rows)
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0216.html b/docs/dws/dev/dws_04_0216.html new file mode 100644 index 00000000..911ca409 --- /dev/null +++ b/docs/dws/dev/dws_04_0216.html @@ -0,0 +1,52 @@ + + +

Deleting Resources

+

After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quota occupation.

+

Deleting the Foreign Table and Target Table

  1. (Optional) If operations in Querying Data After Importing It have been performed, run the following command to delete the target table:

    DROP TABLE product_info;
    +

  2. Run the following command to delete the foreign table:

    DROP FOREIGN TABLE foreign_product_info;
    +

+
+

Deleting the Manually Created Foreign Server

If operations in Manually Creating a Foreign Server have been performed, perform the following steps to delete the foreign server, database, and user:

+
  1. Use the client provided by GaussDB(DWS) to connect to the database where the foreign server resides as the user who created the foreign server.

    You can use the gsql client to log in to the database in either of the following ways:

    +
    • If you have logged in to the gsql client, run the following command to switch the database and user:
      \c mydatabase dbuser;
      +

      Enter the password as prompted.

      +
    • If you have logged in to the gsql client, you can run the \q command to exit gsql, and run the following command to reconnect to it:
      gsql -d mydatabase -h 192.168.2.30 -U dbuser -p 8000 -r
      +

      Enter the password as prompted.

      +
    +

  2. Delete the manually created foreign server.

    Run the following command to delete the server. For details about the syntax, see DROP SERVER.

    +
    DROP SERVER hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca;
    +

    The foreign server is deleted if the following information is displayed:

    +
    DROP SERVER
    +

    View the foreign server.

    +
    SELECT * FROM pg_foreign_server WHERE srvname='hdfs_server_8f79ada0_d998_4026_9020_80d6de2692ca';
    +

    The server is successfully deleted if the returned result is as follows:

    +
     srvname | srvowner | srvfdw | srvtype | srvversion | srvacl | srvoptions
    +---------+----------+--------+---------+------------+--------+------------
    +(0 rows)
    +

  3. Delete the customized database.

    Connect to the default database gaussdb through the database client tool provided by GaussDB(DWS).

    +

    If you have logged in to the database using the gsql client, run the following command to switch the database and user:

    +
    \c gaussdb
    +

    Enter your password as prompted.

    +

    Run the following command to delete the customized database:

    +
    DROP DATABASE mydatabase;
    +

    The database is deleted if the following information is displayed:

    +
    DROP DATABASE
    +

  4. Delete the common user created in this example as the administrator.

    Connect to the database as a database administrator through the database client tool provided by GaussDB(DWS).

    +

    If you have logged in to the database using the gsql client, run the following command to switch the database and user:

    +
    \c gaussdb dbadmin
    +
    Run the following command to reclaim the permission for creating foreign servers:
    REVOKE ALL ON FOREIGN DATA WRAPPER hdfs_fdw FROM dbuser;
    +
    +

    The name of FOREIGN DATA WRAPPER must be hdfs_fdw. dbuser is the username for creating SERVER.

    +

    Run the following command to delete the user:

    +
    DROP USER dbuser;
    +

    You can run the \du command to query for the user and check whether the user has been deleted.

    +

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0217.html b/docs/dws/dev/dws_04_0217.html new file mode 100644 index 00000000..fa8067fe --- /dev/null +++ b/docs/dws/dev/dws_04_0217.html @@ -0,0 +1,13 @@ + + +

Error Handling

+

The following error information indicates that GaussDB(DWS) is to read an ORC data file but the actual file is in text format. Therefore, create a table of the Hive ORC type and store the data to the table.

+
ERROR:  dn_6009_6010: Error occurs while creating an orc reader for file /user/hive/warehouse/products_info.txt, detail can be found in dn log of dn_6009_6010.
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0219.html b/docs/dws/dev/dws_04_0219.html new file mode 100644 index 00000000..2a2b1daf --- /dev/null +++ b/docs/dws/dev/dws_04_0219.html @@ -0,0 +1,15 @@ + + +

Using CDM to Migrate Data to GaussDB(DWS)

+

You can use CDM to migrate data from other data sources (for example, MySQL) to the databases in clusters on GaussDB(DWS).

+ + +

For details about scenarios where CDM is used to migrate data to GaussDB(DWS), see the following sections of Cloud Data Migration User Guide:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0228.html b/docs/dws/dev/dws_04_0228.html new file mode 100644 index 00000000..797e1b5b --- /dev/null +++ b/docs/dws/dev/dws_04_0228.html @@ -0,0 +1,188 @@ + + +

Checking for Data Skew

+

Scenarios

Data skew causes the query performance to deteriorate. Before importing all the data from a table consisting of over 10 million records, you are advised to import some of the data and check whether data skew occurs and whether the distribution keys need to be changed. Troubleshoot the problems if any. It is costly to address data skew and change the distribution keys after a large amount of data has been imported.

+
+

Background

GaussDB(DWS) uses a massively parallel processing (MPP) system of the shared-nothing architecture. The MPP performs horizontal partitioning to store tuples in service data tables on all DNs using proper distribution policies.

+

The following user table distribution policies are supported:

+ +
+

If an inappropriate distribution key is used, data skew may occur when you use the hash policy. Check for data skew when you use the hash distribution policy so that data can be evenly distributed to each DN. You are advised to use the column with few replicated values as the distribution key.

+

Procedure

  1. Analyze data source features and select candidate distribution columns that have more distinct values and evenly distributed data.
  2. Select a candidate column from 1 to create a target table.

    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name 
    +    ({ column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
    +    | table_constraint    | LIKE source_table [ like_option [...] ] }
    +    [, ... ])    [ WITH ( {storage_parameter = value} [, ... ] ) ]
    +    [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
    +    [ COMPRESS | NOCOMPRESS ]    [ TABLESPACE tablespace_name ]
    +    [ DISTRIBUTE BY { REPLICATION 
    +  
    +                    | { HASH ( column_name [,...] ) } } ];
    +
    + +
    +

  3. Import a small batch of data to the target table.

    When importing a single data file, you can evenly split this file and import a part of it to check for the data skew in the target table.

    +

  4. Check for data skew. (Replace table_name with the actual table name.)

    1
    SELECT a.count,b.node_name FROM (SELECT count(*) AS count,xc_node_id FROM table_name GROUP BY xc_node_id) a, pgxc_node b WHERE a.xc_node_id=b.node_id ORDER BY a.count desc;
    +
    + +
    +

  5. If the data distribution deviation is less than 10% across DNs, data is evenly distributed and an appropriate distribution key has been selected. Delete the small batch of imported data and import full data to complete data migration.

    If data distribution deviation across DNs is greater than or equal to 10%, data skew occurs. Remove this distribution key from the candidates in 1, delete the target table, and repeat 2 through 5.

    +

    The data distribution deviation indicates the difference between the actual data volume on DNs and the average data volume on DNs.

    +
    +

  6. (Optional) If you fail to select an appropriate distribution key after performing the preceding steps, select multiple columns from the candidates as distribution keys.
+

+
+

Examples

Assume you want to select an appropriate distribution key for the staffs table.

+
  1. Analyze the source data for the staffs table and select the staff_ID, FIRST_NAME, and LAST_NAME columns as candidate distribution keys.
  2. Select the staff_ID column as the distribution key and create the target table staffs.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE TABLE staffs
    +(
    +  staff_ID       NUMBER(6) not null,
    +  FIRST_NAME     VARCHAR2(20),
    +  LAST_NAME      VARCHAR2(25),
    +  EMAIL          VARCHAR2(25),
    +  PHONE_NUMBER   VARCHAR2(20),
    +  HIRE_DATE      DATE,
    +  employment_ID  VARCHAR2(10),
    +  SALARY         NUMBER(8,2),
    +  COMMISSION_PCT NUMBER(2,2),
    +  MANAGER_ID     NUMBER(6),
    +  section_ID     NUMBER(4)
    +)   
    +DISTRIBUTE BY hash(staff_ID);
    +
    + +
    +
  3. Import a small batch of data to the target table staffs.
    There are eight DNs in the cluster based on the following query, and you are advised to import 80,000 records.
    1
    +2
    +3
    +4
    +5
    SELECT count(*) FROM pgxc_node where node_type='D';
    + count 
    +-------
    +     8
    +(1 row)
    +
    + +
    +
    +
  4. Verify the data skew of the target table staffs whose distribution key is staff_ID:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    SELECT a.count,b.node_name FROM (select count(*) as count,xc_node_id FROM staffs GROUP BY xc_node_id) a, pgxc_node b WHERE a.xc_node_id=b.node_id ORDER BY a.count desc;
    +count | node_name
    +------+-----------
    +11010 | datanode4
    +10000 | datanode3
    +12001 | datanode2
    + 8995 | datanode1
    +10000 | datanode5
    + 7999 | datanode6
    + 9995 | datanode7
    +10000 | datanode8
    +(8 rows)
    +
    + +
    +
  5. The preceding query result indicates that the distribution deviation across DNs is greater than 10%. The data skew occurs. Therefore, delete staff_ID from the distribution key candidates and delete the staffs table.
    1
    DROP TABLE staffs;
    +
    + +
    +
  6. Use staff_ID, FIRST_NAME, and LAST_NAME as distribution keys and create the target table staffs.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE TABLE staffs
    +(  
    +  staff_ID       NUMBER(6) not null,
    +  FIRST_NAME     VARCHAR2(20),
    +  LAST_NAME      VARCHAR2(25),
    +  EMAIL          VARCHAR2(25),
    +  PHONE_NUMBER   VARCHAR2(20),
    +  HIRE_DATE      DATE,
    +  employment_ID  VARCHAR2(10),
    +  SALARY         NUMBER(8,2),
    +  COMMISSION_PCT NUMBER(2,2),
    +  MANAGER_ID     NUMBER(6),
    +  section_ID     NUMBER(4)
    +) 
    +DISTRIBUTE BY hash(staff_ID,FIRST_NAME,LAST_NAME);
    +
    + +
    +
  7. Verify the data skew of the target table staffs whose distribution keys are staff_ID, FIRST_NAME, and LAST_NAME.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    SELECT a.count,b.node_name FROM (select count(*) as count,xc_node_id FROM staffs GROUP BY xc_node_id) a, pgxc_node b WHERE a.xc_node_id=b.node_id ORDER BY a.count desc;
    +count | node_name
    +------+-----------
    +10010 | datanode4
    +10000 | datanode3
    +10001 | datanode2
    + 9995 | datanode1
    +10000 | datanode5
    + 9999 | datanode6
    + 9995 | datanode7
    +10000 | datanode8
    +(8 rows)
    +
    + +
    +
  8. The preceding query result indicates that the data deviation across DNs is less than 10%. The data is evenly distributed and the appropriate distribution keys have been selected.
  9. Delete the imported small-batch data.
    1
    TRUNCATE TABLE staffs;
    +
    + +
    +
  10. Import the full data to complete data migration.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0243.html b/docs/dws/dev/dws_04_0243.html new file mode 100644 index 00000000..24f3d0a0 --- /dev/null +++ b/docs/dws/dev/dws_04_0243.html @@ -0,0 +1,60 @@ + + +

Preparing Data on OBS

+

Scenarios

Before you use the SQL on OBS feature to query OBS data:

+
  1. You have stored the ORC data on OBS.

    For example, the ORC table has been created when you use the Hive or Spark component, and the ORC data has been stored on OBS.

    +

    Assume that there are two ORC data files, named product_info.0 and product_info.1, whose original data is stored in the demo.db/product_info_orc/ directory of the mybucket OBS bucket. You can view their original data in Original Data.

    +
  2. If your data files are already on OBS, perform steps in Obtaining the OBS Path of Original Data and Setting Read Permission.

    This section uses the ORC format as an example to describe how to import data. The method for importing CarbonData data is similar.

    +
    +
+
+

Original Data

Assume that you have stored the two ORC data files on OBS and their original data is as follows:

+ +
+

Obtaining the OBS Path of Original Data and Setting Read Permission

  1. Log in to the OBS management console.

    Click Service List and choose Object Storage Service to open the OBS management console.

    +

  2. Obtain the OBS path for storing source data files.

    After the source data files are uploaded to an OBS bucket, a globally unique access path is generated. You need to specify the OBS paths of source data files when creating a foreign table.

    +

    For details about how to view an OBS path, see "OBS Console Operation Guide > Managing Objects > Accessing an Object Using Its Object URL" in the Object Storage Service User Guide.

    +

    For example, the OBS paths are as follows:

    +
    1
    +2
    https://obs.xxx.com/mybucket/demo.db/product_info_orc/product_info.0
    +https://obs.xxx.com/mybucket/demo.db/product_info_orc/product_info.1
    +
    + +
    +

  3. Grant the OBS bucket read permission for the user.

    The user who executes the SQL on OBS function needs to obtain the read permission on the OBS bucket where the source data file is located. You can configure the ACL for the OBS buckets to grant the read permission to a specific user.

    +

    For details, see "OBS Console Operation Guide > Permission Control > Configuring a Bucket ACL" in the Object Storage Service User Guide.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0244.html b/docs/dws/dev/dws_04_0244.html new file mode 100644 index 00000000..3397c901 --- /dev/null +++ b/docs/dws/dev/dws_04_0244.html @@ -0,0 +1,194 @@ + + +

Creating a Foreign Server

+

This section describes how to create a foreign server that is used to define the information about OBS servers and is invoked by foreign tables. For details about the syntax for creating foreign servers, see CREATE SERVER.

+

(Optional) Creating a User and a Database and Granting the User Foreign Table Permissions

Common users do not have permissions to create foreign servers and tables. If you want to use a common user to create foreign servers and tables in a customized database, perform the following steps to create a user and a database, and grant the user foreign table permissions.

+

In the following example, a common user dbuser and a database mydatabase are created. Then, an administrator is used to grant foreign table permissions to user dbuser.

+
  1. Connect to the default database gaussdb as a database administrator through the database client tool provided by GaussDB(DWS).

    For example, use the gsql client to connect to the database by running the following command:

    +
    1
    gsql -d gaussdb -h 192.168.2.30 -U dbadmin -p 8000 -W password -r
    +
    + +
    +

  2. Create a common user and use it to create a database.

    Create a user named dbuser that has the permission to create databases.

    +
    1
    CREATE USER dbuser WITH CREATEDB PASSWORD 'password';
    +
    + +
    +
    Switch to the created user.
    1
    SET ROLE dbuser PASSWORD 'password';
    +
    + +
    +
    +
    Run the following command to create the database demo:
    1
    CREATE DATABASE mydatabase;
    +
    + +
    +
    +

    Query the database.

    +
    1
    SELECT * FROM pg_database;
    +
    + +
    +

    The database is successfully created if the returned result contains information about mydatabase.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    datname   | datdba | encoding | datcollate | datctype | datistemplate | datallowconn | datconnlimit | datlastsysoid | datfrozenxid | dattablespace | datcompatibility |                       datacl
    +
    +------------+--------+----------+------------+----------+---------------+--------------+--------------+---------------+--------------+---------------+------------------+--------------------------------------
    +--------------
    + template1  |     10 |        0 | C          | C        | t             | t            |           -1 |         14146 |         1351 |          1663 | ORA              | {=c/Ruby,Ruby=CTc/Ruby}
    + template0  |     10 |        0 | C          | C        | t             | f            |           -1 |         14146 |         1350 |          1663 | ORA              | {=c/Ruby,Ruby=CTc/Ruby}
    + gaussdb   |     10 |        0 | C          | C        | f             | t            |           -1 |         14146 |         1352 |          1663 | ORA              | {=Tc/Ruby,Ruby=CTc/Ruby,chaojun=C/Ruby,hu
    +obinru=C/Ruby}
    + mydatabase |  17000 |        0 | C          | C        | f             | t            |           -1 |         14146 |         1351 |          1663 | ORA              |
    +(4 rows)
    +
    + +
    +

  3. Grant the permissions for creating foreign servers and using foreign tables to a common user as the administrator.

    Connect to the new database as a database administrator through the database client tool provided by GaussDB(DWS).

    +

    You can use the gsql client to run the following command to switch to an administrator user and connect to the new database:

    +
    1
    \c mydatabase dbadmin;
    +
    + +
    +

    Enter the password of the system administrator as prompted.

    +

    Note that you must use the administrator account to connect to the database where a foreign server is to be created and foreign tables are used; and then grant permissions to the common user.

    +
    +
    By default, only system administrators can create foreign servers. Common users can create foreign servers only after being authorized. Run the following command to grant the permission:
    1
    +2
    GRANT ALL ON SCHEMA public TO dbuser;
    +GRANT ALL ON FOREIGN DATA WRAPPER dfs_fdw TO dbuser;
    +
    + +
    +
    +

    where fdw_name can be hdfs_fdw or dfs_fdw, and dbuser is the name of the user who creates SERVER.

    +

    Run the following command to grant the user the permission to use foreign tables:

    +
    1
    ALTER USER dbuser USEFT;
    +
    + +
    +

    Query for the user.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    SELECT r.rolname, r.rolsuper, r.rolinherit,
    +  r.rolcreaterole, r.rolcreatedb, r.rolcanlogin,
    +  r.rolconnlimit, r.rolvalidbegin, r.rolvaliduntil,
    +  ARRAY(SELECT b.rolname
    +        FROM pg_catalog.pg_auth_members m
    +        JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
    +        WHERE m.member = r.oid) as memberof
    +, r.rolreplication
    +, r.rolauditadmin
    +, r.rolsystemadmin
    +, r.roluseft
    +FROM pg_catalog.pg_roles r
    +ORDER BY 1;
    +
    + +
    +

    The authorization is successful if the dbuser information in the returned result contains the UseFT permission.

    +
    1
    +2
    +3
    +4
    +5
    rolname  | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolconnlimit | rolvalidbegin | rolvaliduntil | memberof | rolreplication | rolauditadmin | rolsystemadmin | roluseft
    +-----------+----------+------------+---------------+-------------+-------------+--------------+---------------+---------------+----------+----------------+---------------+----------------+----------
    + dbuser    | f        | t          | f             | t           | t           |           -1 |               |               | {}       | f              | f             | f              | t
    + lily      | f        | t          | f             | f           | t           |           -1 |               |               | {}       | f              | f             | f              | f
    + Ruby       | t        | t          | t             | t           | t           |           -1 |               |               | {}       | t              | t             | t              | t  
    +
    + +
    +

+
+

Creating a Foreign Server

  1. Use the user who is about to create a foreign server to connect to the corresponding database.

    In this example, use common user dbuser created in (Optional) Creating a User and a Database and Granting the User Foreign Table Permissions to connect to mydatabase created by the user. You need to connect to the database through the database client tool provided by GaussDB(DWS).

    +

    You can use the gsql client to log in to the database in either of the following ways:

    +
    • If you have logged in to the gsql client, run the following command to switch the database and user:
      1
      \c mydatabase dbuser;
      +
      + +
      +

      Enter the password as prompted.

      +
    • If you have not logged in to the gsql client or have exited the gsql client by running the \q command, run the following command to reconnect to it:
      1
      gsql -d mydatabase -h 192.168.2.30 -U dbuser -p 8000 -r
      +
      + +
      +

      Enter the password as prompted.

      +
    +

  2. Create a foreign server.

    For details about the syntax for creating foreign servers, see CREATE SERVER.

    +

    For example, run the following command to create a foreign server named obs_server.

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    CREATE SERVER obs_server FOREIGN DATA WRAPPER dfs_fdw 
    +OPTIONS ( 
    +  address 'obs.otc.t-systems.com' , 
    +  ACCESS_KEY 'access_key_value_to_be_replaced', 
    +  SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced', 
    +  encrypt 'on', 
    +  type 'obs' 
    +);
    +
    + +
    +

    Mandatory parameters are described as follows:

    +
    • Name of the foreign server

      You can customize a name.

      +

      In this example, the name is set to obs_server.

      +
    • FOREIGN DATA WRAPPER

      fdw_name can be hdfs_fdw or dfs_fdw, which already exists in the database.

      +
    • OPTIONS parameters
      • address

        Specifies the endpoint of the OBS service.

        +

        Obtain the address as follows:

        +
        1. Obtain the OBS path by performing 2 in Preparing Data on OBS.
        2. The OBS endpoint viewed on the OBS is obs.xxx.xxx.com.
        +
      • (Mandatory) Access keys (AK and SK)
        GaussDB(DWS) needs to use the access keys (AK and SK) to access OBS. Therefore, you must obtain the access keys first.
        • (Mandatory) access_key: specifies users' AK information.
        • (Mandatory) secret_access_key: specifies users' SK information.
        +
        +

        For details about how to obtain the access keys, see Creating Access Keys (AK and SK).

        +
      • type

        Its value is obs, which indicates that dfs_fdw connects to OBS.

        +
      +
    +

  3. View the foreign server.

    1
    SELECT * FROM pg_foreign_server WHERE srvname='obs_server';
    +
    + +
    +

    The server is successfully created if the returned result is as follows:

    +
    1
    +2
    +3
    +4
    +5
    +6
      srvname   | srvowner | srvfdw | srvtype | srvversion | srvacl |                                                                                      srvoptions
    +
    +------------+----------+--------+---------+------------+--------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
    +-----------
    + obs_server |    24661 |  13686 |         |            |        | {address=xxx.xxx.x.xxx,access_key=xxxxxxxxxxxxxxxxxxxx,type=obs,secret_access_key=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx}
    +(1 row)
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0245.html b/docs/dws/dev/dws_04_0245.html new file mode 100644 index 00000000..b40778fa --- /dev/null +++ b/docs/dws/dev/dws_04_0245.html @@ -0,0 +1,152 @@ + + +

Creating a Foreign Table

+

After performing steps in Creating a Foreign Server, create an OBS foreign table in the GaussDB(DWS) database to access the data stored in OBS. An OBS foreign table is read-only. It can only be queried using SELECT.

+

Creating a Foreign Table

The syntax for creating a foreign table is as follows. For details, see the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name 
+( [ { column_name type_name 
+    [ { [CONSTRAINT constraint_name] NULL |
+    [CONSTRAINT constraint_name] NOT NULL |
+      column_constraint [...]} ] |
+      table_constraint [, ...]} [, ...] ] ) 
+    SERVER dfs_server 
+    OPTIONS ( { option_name ' value ' } [, ...] ) 
+    DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
+    [ PARTITION BY ( column_name ) [ AUTOMAPPED ] ] ;
+
+ +
+

For example, when creating a foreign table named product_info_ext_obs, set parameters in the syntax as follows:

+ +

Based on the preceding settings, the command for creating the foreign table is as follows:

+
Create an OBS foreign table that does not contain partition columns. The foreign server associated with the table is obs_server, the file format on OBS corresponding to the table is ORC, and the data storage path on OBS is/mybucket/data/.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
DROP FOREIGN TABLE IF EXISTS product_info_ext_obs;
+CREATE FOREIGN TABLE product_info_ext_obs
+(
+    product_price                integer        not null,
+    product_id                   char(30)       not null,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)                      
+) SERVER obs_server 
+OPTIONS (
+format 'orc', 
+foldername '/mybucket/demo.db/product_info_orc/',
+encoding 'utf8',
+totalrows '10'
+) 
+DISTRIBUTE BY ROUNDROBIN;
+
+ +
+
+
+

Create an OBS foreign table that contains partition columns. The product_info_ext_obs foreign table uses the product_manufacturer column as the partition key. The following partition directories exist in obs/mybucket/demo.db/product_info_orc/:

+

Partition directory 1: product_manufacturer=10001

+

Partition directory 2: product_manufacturer=10010

+

Partition directory 3: product_manufacturer=10086

+
...
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
DROP FOREIGN TABLE IF EXISTS product_info_ext_obs;
+CREATE FOREIGN TABLE product_info_ext_obs
+(
+    product_price                integer        not null,
+    product_id                   char(30)       not null,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)   ,
+    product_manufacturer	 integer
+) SERVER obs_server 
+OPTIONS (
+format 'orc', 
+foldername '/mybucket/demo.db/product_info_orc/',
+encoding 'utf8',
+totalrows '10'
+) 
+DISTRIBUTE BY ROUNDROBIN
+PARTITION BY (product_manufacturer) AUTOMAPPED;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0246.html b/docs/dws/dev/dws_04_0246.html new file mode 100644 index 00000000..47be1e70 --- /dev/null +++ b/docs/dws/dev/dws_04_0246.html @@ -0,0 +1,81 @@ + + +

Querying Data on OBS Through Foreign Tables

+

Viewing Data on OBS by Directly Querying the Foreign Table

If the data amount is small, you can directly run SELECT to query the foreign table and view the data on OBS.

+
  1. Run the following command to query data from the foreign table:

    1
    SELECT * FROM product_info_ext_obs;
    +
    + +
    +

    If the query result is the same as the data in Original Data, the import is successful. The following information is displayed at the end of the query result:

    +
    (10 rows)
    +

    After data is queried, you can insert the data to common tables in the database.

    +

+
+

Querying Data After Importing It

  1. Create a table in GaussDB(DWS) to store imported data.

    The target table structure must be the same as the structure of the foreign table created in Creating a Foreign Table. That is, both tables must have the same number of columns and column types.

    +

    For example, create a table named product_info. The table example is as follows:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    DROP TABLE IF EXISTS product_info;
    +
    +CREATE TABLE product_info
    +(
    +    product_price                integer        not null,
    +    product_id                   char(30)       not null,
    +    product_time                 date           ,
    +    product_level                char(10)       ,
    +    product_name                 varchar(200)   ,
    +    product_type1                varchar(20)    ,
    +    product_type2                char(10)       ,
    +    product_monthly_sales_cnt    integer        ,
    +    product_comment_time         date           ,
    +    product_comment_num          integer        ,
    +    product_comment_content      varchar(200)                   
    +) 
    +with (
    +orientation = column,
    +compression=middle
    +) 
    +DISTRIBUTE BY HASH (product_id);
    +
    + +
    +

  2. Run the INSERT INTO.. SELECT .. command to import data from the foreign table to the target table.

    Example:

    +
    1
    INSERT INTO product_info SELECT * FROM product_info_ext_obs;
    +
    + +
    +
    If information similar to the following is displayed, the data has been imported.
    INSERT 0 10
    +
    +

  3. Run the following SELECT command to view data imported from OBS to GaussDB(DWS):

    1
    SELECT * FROM product_info;
    +
    + +
    +

    If the query result is the same as the data in Original Data, the import is successful. The following information is displayed at the end of the query result:

    +
    (10 rows)
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0247.html b/docs/dws/dev/dws_04_0247.html new file mode 100644 index 00000000..45181910 --- /dev/null +++ b/docs/dws/dev/dws_04_0247.html @@ -0,0 +1,88 @@ + + +

Deleting Resources

+

After completing operations in this tutorial, if you no longer need to use the resources created during the operations, you can delete them to avoid resource waste or quota occupation. The procedure is as follows:

+
  1. Deleting the Foreign Table and Target Table
  2. Deleting the Created Foreign Server
  3. Deleting the Database and the User to Which the Database Belongs

    If you have performed steps in (Optional) Creating a User and a Database and Granting the User Foreign Table Permissions, delete the database and the user to which the database belongs.

    +
+

Deleting the Foreign Table and Target Table

  1. (Optional) If you have performed steps in Querying Data After Importing It, run the following command to delete the target table:

    1
    DROP TABLE product_info;
    +
    + +
    +

    If the following information is displayed, the table has been deleted.

    +
    DROP TABLE
    +

  2. Run the following statement to delete the foreign table:

    1
    DROP FOREIGN TABLE product_info_ext_obs;
    +
    + +
    +

    If the following information is displayed, the table has been deleted.

    +
    DROP FOREIGN TABLE
    +

+
+

Deleting the Created Foreign Server

  1. Use the user who created the foreign server to connect to the database where the foreign server is located.

    In this example, common user dbuser is used to create the foreign server in mydatabase. You need to connect to the database through the database client tool provided by GaussDB(DWS). You can use the gsql client to log in to the database in either of the following ways:

    +
    • If you have logged in to the gsql client, run the following command to switch the database and user:
      1
      \c mydatabase dbuser;
      +
      + +
      +

      Enter the password as prompted.

      +
    • If you have logged in to the gsql client, you can run the \q command to exit gsql, and run the following command to reconnect to it:
      1
      gsql -d mydatabase -h 192.168.2.30 -U dbuser -p 8000 -r
      +
      + +
      +

      Enter the password as prompted.

      +
    +

  2. Delete the created foreign server.

    Run the following command to delete the server. For details about the syntax, see DROP SERVER.

    +
    1
    DROP SERVER obs_server;
    +
    + +
    +

    The database is deleted if the following information is displayed:

    +
    DROP SERVER
    +

    View the foreign server.

    +
    1
    SELECT * FROM pg_foreign_server WHERE srvname='obs_server';
    +
    + +
    +

    The server is successfully deleted if the returned result is as follows:

    +
     srvname | srvowner | srvfdw | srvtype | srvversion | srvacl | srvoptions
    +---------+----------+--------+---------+------------+--------+------------
    +(0 rows)
    +

+
+

Deleting the Database and the User to Which the Database Belongs

If you have performed steps in (Optional) Creating a User and a Database and Granting the User Foreign Table Permissions, perform the following steps to delete the database and the user to which the database belongs.

+
  1. Delete the customized database.

    Connect to the default database gaussdb through the database client tool provided by GaussDB(DWS).

    +

    If you have logged in to the database using the gsql client, run the following command to switch the database and user:

    +

    Switch to the default database.

    +
    \c gaussdb
    +

    Enter your password as prompted.

    +

    Run the following command to delete the customized database:

    +
    1
    DROP DATABASE mydatabase;
    +
    + +
    +

    The database is deleted if the following information is displayed:

    +
    DROP DATABASE
    +

  2. Delete the common user created in this example as the administrator.

    Connect to the database as a database administrator through the database client tool provided by GaussDB(DWS).

    +

    If you have logged in to the database using the gsql client, run the following command to switch the database and user:

    +
    \c gaussdb dbadmin
    +
    Run the following command to reclaim the permission for creating foreign servers:
    1
    REVOKE ALL ON FOREIGN DATA WRAPPER dfs_fdw FROM dbuser;
    +
    + +
    +
    +

    The name of FOREIGN DATA WRAPPER must be dfs_fdw. dbuser is the username for creating SERVER.

    +

    Run the following command to delete the user:

    +
    1
    DROP USER dbuser;
    +
    + +
    +

    You can run the \du command to query for the user and check whether the user has been deleted.

    +

+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0249.html b/docs/dws/dev/dws_04_0249.html new file mode 100644 index 00000000..30f0a5d9 --- /dev/null +++ b/docs/dws/dev/dws_04_0249.html @@ -0,0 +1,19 @@ + + +

Data Export

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0250.html b/docs/dws/dev/dws_04_0250.html new file mode 100644 index 00000000..645127e4 --- /dev/null +++ b/docs/dws/dev/dws_04_0250.html @@ -0,0 +1,19 @@ + + +

Exporting Data to OBS

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0251.html b/docs/dws/dev/dws_04_0251.html new file mode 100644 index 00000000..74feb2b1 --- /dev/null +++ b/docs/dws/dev/dws_04_0251.html @@ -0,0 +1,73 @@ + + +

Parallel OBS Data Export

+

Overview

GaussDB(DWS) databases allow you to export data in parallel using OBS foreign tables, in which the export mode and the exported data format are specified. Data is exported in parallel through multiple DNs from GaussDB(DWS) to the OBS server, improving the overall export performance.
  • The CN only plans data export tasks and delivers the tasks to DNs. In this case, the CN is released to process external requests.
  • The computing capability and bandwidth of all the DNs are fully leveraged to export data.
  • You can concurrently export data using multiple OBS services, but the bucket and object paths specified for the export tasks must be different and cannot be null.
  • The OBS server connects to GaussDB(DWS) cluster nodes. The export rate is affected by the network bandwidth.
  • The TEXT and CSV data file formats are supported. The size of data in a single row must be less than 1 GB.
  • Data in ORC format is supported only by 8.1.0 or later.
+
+
+

Related Concepts

+
+

Principles

The following describes the principles of exporting data from a cluster to OBS by using a distributed hash table or a replication table.

+ +
+

Naming Rules of Exported Files

Rules for naming the files exported from GaussDB(DWS) to OBS are as follows:

+ +
+

Data Export Process

Figure 2 Concurrent data export
+ +
+ + + + + + + + + + + + + + + + + +
Table 1 Process description

Procedure

+

Description

+

Subtask

+

Plan data export.

+

Create an OBS bucket and a folder in the OBS bucket as the directory for storing exported data files.

+

For details, see Planning Data Export.

+

-

+

Create an OBS foreign table.

+

Create a foreign table to help OBS specify information about data files to be exported. The foreign table stores information, such as the destination location, format, encoding, and data delimiter of a source data file.

+

For details, see Creating an OBS Foreign Table.

+

-

+

Export data.

+

After the foreign table is created, run the INSERT statement to efficiently export data to data files.

+

For details, see Exporting Data.

+

+

-

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0252.html b/docs/dws/dev/dws_04_0252.html new file mode 100644 index 00000000..45ca035e --- /dev/null +++ b/docs/dws/dev/dws_04_0252.html @@ -0,0 +1,39 @@ + + +

Planning Data Export

+

Scenarios

Plan the storage location of exported data in OBS.

+
+

Planning OBS Save Path and File

You need to specify the OBS path (to directory) for storing data that you want to export. The exported data can be saved to a file in CSV format. The system also supports TEXT so that you can import the exported data to various applications.

+

The target directory cannot contain any files.

+
+

Planning OBS Bucket Permissions

The user used to export data must:

+ +
+

Planning Data to Be Exported and Foreign Tables

You must prepare data to be exported in the database table, and the data volume per row must be less than 1 GB. Based on the data to be exported, plan foreign tables whose attributes such as columns, column types, and length match those of user data.

+
+

Granting Write Permission to OBS Storage Location and OBS Bucket as Planned

  1. Create an OBS bucket and a folder in the OBS bucket as the directory for storing exported data.

    1. Log in to the OBS management console.

      Click Service List and choose Object Storage Service to open the OBS management console.

      +
    2. Create a bucket.

      For details about how to create an OBS bucket, see "OBS Console Operation Guide > Managing Buckets > Creating a Bucket" in the Object Storage Service User Guide..

      +

      For example, create a bucket named mybucket.

      +
    3. Create a folder.

      In the OBS bucket, create a folder for storing exported data.

      +

      For details, see "OBS Console Operation Guide > Managing Objects > Creating a Folder" in the .

      +

      For example, create a folder named output_data in the created mybucket OBS bucket.

      +
    +

  2. Determine the path of the created OBS folder.

    Specify the OBS path for storing exported data files. This path is the value of the location parameter used for creating a foreign table.

    +

    The OBS folder path in the location parameter consists of obs://, a bucket name, and a file path.

    +

    In this example, the OBS folder path is as follows:

    +
    obs://mybucket/output_data/
    +

    The OBS directory to be used for storing data files must be empty.

    +
    +

  3. Grant the OBS bucket write permission to the user who wants to export data.

    When exporting data, a user must have the write permission on the OBS bucket where the data export path is located. You can configure ACL permissions for the OBS bucket to grant the write permission to a specific user.

    +

    For details, see "OBS Console Operation Guide > Permission Control > Configuring a Bucket ACL" in the Object Storage Service User Guide.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0253.html b/docs/dws/dev/dws_04_0253.html new file mode 100644 index 00000000..6fbdba7f --- /dev/null +++ b/docs/dws/dev/dws_04_0253.html @@ -0,0 +1,141 @@ + + +

Creating an OBS Foreign Table

+

Procedure

  1. Based on the path planned in Planning Data Export, determine the value of the location parameter used for creating a foreign table.
  2. Obtain the access keys (AK and SK) to access OBS.

    To obtain access keys, log in to the management console, click the username in the upper right corner, and select My Credential from the menu. Then choose Access Keys in the navigation tree on the left. On the Access Keys page, you can view the existing AKs or click Add Access Key to create and download access keys.

    +

  3. Examine the formats of data to be exported and determine the values of data format parameters used for creating a foreign table. For details, see data format parameters.
  4. Create an OBS table based on the parameter settings in the preceding steps. For details about how to create a foreign table, see CREATE FOREIGN TABLE (for GDS Import and Export).
+
+

Example 1

For example, in the GaussDB(DWS) database, create a write-only foreign table with the format parameter as text to export text files. Set parameters as follows:

+ + +

Based on the above settings, the foreign table is created using the following statement:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
DROP FOREIGN TABLE IF EXISTS product_info_output_ext1;
+CREATE FOREIGN TABLE product_info_output_ext1
+(
+ c_bigint bigint,
+ c_char char(30),
+ c_varchar varchar(30),
+ c_nvarchar2 nvarchar2(30) ,
+ c_data date,
+ c_time time ,
+ c_test varchar(30)) 
+ server gsmpp_server 
+ options (
+ LOCATION 'obs://mybucket/output_data/', 
+ ACCESS_KEY 'access_key_value_to_be_replaced',
+ SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced'
+ format 'text',
+ delimiter '|',
+ encoding 'utf-8',
+ encrypt 'on' 
+  )
+ WRITE ONLY;
+
+ +
+

If the following information is displayed, the foreign table has been created:

+
CREATE FOREIGN TABLE
+
+

Example 2:

For example, in the GaussDB(DWS) database, create a write-only foreign table with the format parameter as CSV to export CSV files. Set parameters as follows:

+ + +

Based on the preceding settings, the foreign table is created using the following statements:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
DROP FOREIGN TABLE IF EXISTS product_info_output_ext2;
+CREATE FOREIGN TABLE product_info_output_ext2
+(
+    product_price                integer        not null,
+    product_id                   char(30)       not null,
+    product_time                 date           ,
+    product_level                char(10)       ,
+    product_name                 varchar(200)   ,
+    product_type1                varchar(20)    ,
+    product_type2                char(10)       ,
+    product_monthly_sales_cnt    integer        ,
+    product_comment_time         date           ,
+    product_comment_num          integer        ,
+    product_comment_content      varchar(200)                   
+) 
+SERVER gsmpp_server 
+OPTIONS(
+location 'obs://mybucket/output_data/',
+FORMAT 'CSV' ,
+DELIMITER ',',
+encoding 'utf8',
+header 'false',
+ACCESS_KEY 'access_key_value_to_be_replaced',
+SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced'
+)
+WRITE ONLY ;
+
+ +
+

If the following information is displayed, the foreign table has been created:

+
CREATE FOREIGN TABLE
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0254.html b/docs/dws/dev/dws_04_0254.html new file mode 100644 index 00000000..831abeae --- /dev/null +++ b/docs/dws/dev/dws_04_0254.html @@ -0,0 +1,30 @@ + + +

Exporting Data

+

Procedure

  1. Export data.

    1
    INSERT INTO [Foreign table name] SELECT * FROM [Source table name];
    +
    + +
    +

+
+

Examples

+
  • The directory to be used for data storage must be empty, or the export will fail.
  • Data of a special type, such as RAW, is exported as a binary file, which cannot be recognized by the import tool. You need to use the RAWTOHEX() function to convert it to the hexadecimal format before export.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0255.html b/docs/dws/dev/dws_04_0255.html new file mode 100644 index 00000000..3d377332 --- /dev/null +++ b/docs/dws/dev/dws_04_0255.html @@ -0,0 +1,265 @@ + + +

Examples

+

Exporting a Table

Create two foreign tables and use them to export tables from a database to two buckets in OBS.

+
  1. Log in to the OBS data server through the management console. On the OBS server, create the buckets /input-data1 and /input-data2 for storing data files, and create data directories /input-data1/data and /input-data2/data, respectively, in the two buckets.
  2. On the GaussDB(DWS) database, create the foreign tables tpcds.customer_address_ext1 and tpcds.customer_address_ext2 for the OBS data server to receive data exported from the database.

    OBS and the database are in the same region. The example GaussDB(DWS) table to be exported is tpcds.customer_address.

    +

    Export information is set as follows:

    +
    • The source data file directories are /input-data1/data/ and /input-data2/data/, so location of tpcds.customer_address_ext1 and tpcds.customer_address_ext2 is set to obs://input-data1/data/ and obs://input-data2/data/, respectively.
    +

    Information about data formats is set based on the detailed data format parameters specified during data export from a database. The parameter settings are as follows:

    +
    • format is set to CSV.
    • encoding is set to UTF-8.
    • delimiter is set to E'\x08'.
    • Configure encrypt. Its default value is off.
    • access_key is set to the AK you have obtained. (mandatory)
    • secret_access_key is set to the SK you have obtained. (mandatory)

      access_key and secret_access_key have been obtained during user creation. Replace the italic part with the actual keys.

      +
      +
    +

    Based on the preceding settings, the foreign table is created using the following statements:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    CREATE FOREIGN TABLE tpcds.customer_address_ext1
    +(
    +ca_address_sk             integer                       ,
    +ca_address_id             char(16)                      ,
    +ca_street_number          char(10)                      ,
    +ca_street_name            varchar(60)                   ,
    +ca_street_type            char(15)                      ,
    +ca_suite_number           char(10)                      ,
    +ca_city                   varchar(60)                   ,
    +ca_county                 varchar(30)                   ,
    +ca_state                  char(2)                       ,
    +ca_zip                    char(10)                      ,
    +ca_country                varchar(20)                   ,
    +ca_gmt_offset             decimal(5,2)                  ,
    +ca_location_type          char(20)
    +)
    +SERVER gsmpp_server
    +OPTIONS(LOCATION 'obs://input-data1/data/',
    +FORMAT 'CSV',
    +ENCODING 'utf8', 
    +DELIMITER E'\x08', 
    +ENCRYPT 'off',
    +ACCESS_KEY 'access_key_value_to_be_replaced',
    +SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced' 
    +)Write Only;
    +
    + +
    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    CREATE FOREIGN TABLE tpcds.customer_address_ext2
    +(
    +ca_address_sk             integer                       ,
    +ca_address_id             char(16)                      ,
    +ca_street_number          char(10)                      ,
    +ca_street_name            varchar(60)                   ,
    +ca_street_type            char(15)                      ,
    +ca_suite_number           char(10)                      ,
    +ca_city                   varchar(60)                   ,
    +ca_county                 varchar(30)                   ,
    +ca_state                  char(2)                       ,
    +ca_zip                    char(10)                      ,
    +ca_country                varchar(20)                   ,
    +ca_gmt_offset             decimal(5,2)                  ,
    +ca_location_type          char(20)
    +)
    +SERVER gsmpp_server
    +OPTIONS(LOCATION 'obs://input-data2/data/',
    +FORMAT 'CSV',
    +ENCODING 'utf8', 
    +DELIMITER E'\x08', 
    +ENCRYPT 'off',
    +ACCESS_KEY 'access_key_value_to_be_replaced',
    +SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced'
    +)Write Only;
    +
    + +
    +

  3. In GaussDB(DWS), export the data table tpcds.customer_address to the foreign tables tpcds.customer_address_ext1 and tpcds.customer_address_ext2 concurrently.

    1
    INSERT INTO tpcds.customer_address_ext1 SELECT * FROM tpcds.customer_address;
    +
    + +
    +
    1
    INSERT INTO tpcds.customer_address_ext2 SELECT * FROM tpcds.customer_address;
    +
    + +
    +

    The design of OBS foreign tables does not allow exporting files to a non-empty path. However, in concurrent export scenarios, multiple files are exported to the same path, causing an error.

    +

    Assume that a user concurrently exports data from the same table to the same OBS foreign table, and that one SQL statement is executed to export data when another SQL statement is being executed and has not generated any file on the OBS server. In this case, certain data is overwritten although both SQL statements are successfully executed. Therefore, you are advised not to concurrently export data to the same OBS foreign table.

    +
    +

+
+

Concurrently Exporting Tables

Use the two foreign tables to export tables from the database to two buckets in OBS.

+
  1. Log in to the OBS data server through the management console. On the OBS server, create the buckets /input-data1 and /input-data2 for storing data files, and create data directories /input-data1/data and /input-data2/data, respectively, in the two buckets.
  2. In GaussDB(DWS), create foreign tables tpcds.customer_address_ext1 and tpcds.customer_address_ext2 for the OBS server to receive exported data.

    OBS and the database are in the same region. Tables to be exported are tpcds.customer_address and tpcds.customer_demographics.

    +
    Export information is set as follows:
    • The source data file directories are /input-data1/data/ and /input-data2/data/, so location of tpcds.customer_address_ext1 and tpcds.customer_address_ext2 is set to obs://input-data1/data/ and obs://input-data2/data/, respectively.
    +
    +

    Information about data formats is set based on the detailed data format parameters specified during data export from GaussDB(DWS). The parameter settings are as follows:

    +
    • format is set to CSV.
    • encoding is set to UTF-8.
    • delimiter is set to E'\x08'.
    • Configure encrypt. Its default value is off.
    • access_key is set to the AK you have obtained. (mandatory)
    • secret_access_key is set to the SK you have obtained. (mandatory)

      access_key and secret_access_key have been obtained during user creation. Replace the italic part with the actual keys.

      +
      +
    +

    Based on the preceding settings, the foreign table is created using the following statements:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    CREATE FOREIGN TABLE tpcds.customer_address_ext1
    +(
    +ca_address_sk             integer               ,
    +ca_address_id             char(16)              ,
    +ca_street_number          char(10)                      ,
    +ca_street_name            varchar(60)                   ,
    +ca_street_type            char(15)                      ,
    +ca_suite_number           char(10)                      ,
    +ca_city                   varchar(60)                   ,
    +ca_county                 varchar(30)                   ,
    +ca_state                  char(2)                       ,
    +ca_zip                    char(10)                      ,
    +ca_country                varchar(20)                   ,
    +ca_gmt_offset             decimal(5,2)                  ,
    +ca_location_type          char(20)
    +)
    +SERVER gsmpp_server
    +OPTIONS(LOCATION 'obs://input-data1/data/',
    +FORMAT 'CSV',
    +ENCODING 'utf8', 
    +DELIMITER E'\x08',
    +ENCRYPT 'off',
    +ACCESS_KEY 'access_key_value_to_be_replaced',
    +SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced' 
    +)Write Only;
    +
    + +
    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    CREATE FOREIGN TABLE tpcds.customer_address_ext2
    +(
    +ca_address_sk             integer               ,
    +ca_address_id             char(16)              ,
    +ca_address_name           varchar(20)           ,
    +ca_address_code           integer               ,
    +ca_street_number          char(10)                      ,
    +ca_street_name            varchar(60)                   ,
    +ca_street_type            char(15)                      ,
    +ca_suite_number           char(10)                      ,
    +ca_city                   varchar(60)                   ,
    +ca_county                 varchar(30)                   ,
    +ca_state                  char(2)                       ,
    +ca_zip                    char(10)                      ,
    +ca_country                varchar(20)                   ,
    +ca_gmt_offset             decimal(5,2)                  
    +)
    +SERVER gsmpp_server
    +OPTIONS(LOCATION 'obs://input_data2/data/',
    +FORMAT 'CSV',
    +ENCODING 'utf8', 
    +DELIMITER E'\x08', 
    +QUOTE E'\x1b',
    +ENCRYPT 'off',
    +ACCESS_KEY 'access_key_value_to_be_replaced',
    +SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced'
    +)Write Only;
    +
    + +
    +

  3. In GaussDB(DWS), export the data tables tpcds.customer_address and tpcds.warehouse in parallel to the foreign tables tpcds.customer_address_ext1 and tpcds.customer_address_ext2, respectively.

    1
    INSERT INTO tpcds.customer_address_ext1 SELECT * FROM tpcds.customer_address;
    +
    + +
    +
    1
    INSERT INTO tpcds.customer_address_ext2 SELECT * FROM tpcds.warehouse;
    +
    + +
    +

+

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0256.html b/docs/dws/dev/dws_04_0256.html new file mode 100644 index 00000000..60d637b7 --- /dev/null +++ b/docs/dws/dev/dws_04_0256.html @@ -0,0 +1,22 @@ + + +

Exporting ORC Data to OBS

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0258.html b/docs/dws/dev/dws_04_0258.html new file mode 100644 index 00000000..d1ae5d6b --- /dev/null +++ b/docs/dws/dev/dws_04_0258.html @@ -0,0 +1,13 @@ + + +

Planning Data Export

+

For details about exporting data to OBS, see Planning Data Export.

+

For details about the data types that can be exported to OBS, see Table 2.

+

For details about HDFS data export or MRS configuration, see the MapReduce Service User Guide.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0259.html b/docs/dws/dev/dws_04_0259.html new file mode 100644 index 00000000..8c00e4d5 --- /dev/null +++ b/docs/dws/dev/dws_04_0259.html @@ -0,0 +1,12 @@ + + +

Creating a Foreign Server

+

For details about creating a foreign server on OBS, see Creating a Foreign Server.

+

For details about creating a foreign server in HDFS, see Manually Creating a Foreign Server.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0260.html b/docs/dws/dev/dws_04_0260.html new file mode 100644 index 00000000..5c85118a --- /dev/null +++ b/docs/dws/dev/dws_04_0260.html @@ -0,0 +1,109 @@ + + +

Creating a Foreign Table

+

After operations in Creating a Foreign Server are complete, create an OBS/HDFS write-only foreign table in the GaussDB(DWS) database to access data stored in OBS/HDFS. The foreign table is write-only and can be used only for data export.

+
The syntax for creating a foreign table is as follows. For details, see the syntax CREATE FOREIGN TABLE (SQL on Hadoop or OBS).
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name 
+( [ { column_name type_name 
+    [ { [CONSTRAINT constraint_name] NULL |
+    [CONSTRAINT constraint_name] NOT NULL |
+      column_constraint [...]} ] |
+      table_constraint [, ...]} [, ...] ] ) 
+    SERVER dfs_server 
+    OPTIONS ( { option_name ' value ' } [, ...] ) 
+    [ {WRITE ONLY }]
+    DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
+    [ PARTITION BY ( column_name ) [ AUTOMAPPED ] ] ;
+
+ +
+
+

For example, when creating a foreign table named product_info_ext_obs, set parameters in the syntax as follows:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0261.html b/docs/dws/dev/dws_04_0261.html new file mode 100644 index 00000000..c4778677 --- /dev/null +++ b/docs/dws/dev/dws_04_0261.html @@ -0,0 +1,27 @@ + + +

Using GDS to Export Data to a Remote Server

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0262.html b/docs/dws/dev/dws_04_0262.html new file mode 100644 index 00000000..50175dd9 --- /dev/null +++ b/docs/dws/dev/dws_04_0262.html @@ -0,0 +1,82 @@ + + +

Exporting Data In Parallel Using GDS

+

In high-concurrency scenarios, you can use GDS to export data from a database to a common file system.

+

In the current GDS version, data can be exported from a database to a pipe file.

+ +

Overview

Using foreign tables: A GDS foreign table specifies the exported file format and export mode. Data is exported in parallel through multiple DNs from the database to data files, which improves the overall data export performance. The data files cannot be directly exported to HDFS.
  • The CN only plans data export tasks and delivers the tasks to DNs. In this case, the CN is released to process other tasks.
  • In this way, the computing capabilities and bandwidths of all the DNs are fully leveraged to export data.
    Figure 1 Exporting data using foreign tables
    +
+
+
+

Related Concepts

+
+

Exporting a Schema

Data can be exported to GaussDB(DWS) in Remote mode.

+ +
+

Data Export Process

Figure 2 Concurrent data export
+

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Process description

Process

+

Description

+

Subtask

+

Plan data export.

+

Prepare data to be exported and plan the export path for the mode to be selected.

+

For details, see Planning Data Export.

+

-

+

Start GDS.

+

If the Remote mode is selected, install, configure, and start GDS on data servers.

+

For details, see Installing, Configuring, and Starting GDS.

+

-

+

Create a foreign table,

+

Create a foreign table to help GDS specify information about a data file. The foreign table stores information, such as the location, format, encoding, and inter-data delimiter of a data file.

+

For details, see Creating a GDS Foreign Table.

+

-

+

Export data.

+

After the foreign table is created, run the INSERT statement to efficiently export data to data files.

+

For details, see Exporting Data.

+

-

+

Stop GDS.

+

Stop GDS after data is exported.

+

For details, see Stopping GDS.

+

-

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0263.html b/docs/dws/dev/dws_04_0263.html new file mode 100644 index 00000000..e586e503 --- /dev/null +++ b/docs/dws/dev/dws_04_0263.html @@ -0,0 +1,22 @@ + + +

Planning Data Export

+

Scenarios

Before you use GDS to export data from a cluster, prepare data to be exported and plan the export path.

+
+

Planning an Export Path

+
  1. Log in to the GDS data server as user root and create the /output_data directory for storing data files.

    mkdir -p /output_data
    +

  2. (Optional) Create a user and the user group to which it belongs. This user is used to start GDS and must have the write permission on the directory for storing data files.

    groupadd gdsgrp
    +useradd -g gdsgrp gdsuser
    +

    If the following information is displayed, the user and user group already exist. Skip this step.

    +
    useradd: Account 'gdsuser' already exists.
    +groupadd: Group 'gdsgrp' already exists.
    +

  3. Change the directory owner to gdsuser.

    chown -R gdsuser:gdsgrp /output_data 
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0264.html b/docs/dws/dev/dws_04_0264.html new file mode 100644 index 00000000..6d322964 --- /dev/null +++ b/docs/dws/dev/dws_04_0264.html @@ -0,0 +1,12 @@ + + +

Installing, Configuring, and Starting GDS

+

GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.

+

For details, see Installing, Configuring, and Starting GDS.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0265.html b/docs/dws/dev/dws_04_0265.html new file mode 100644 index 00000000..da314e9b --- /dev/null +++ b/docs/dws/dev/dws_04_0265.html @@ -0,0 +1,60 @@ + + +

Creating a GDS Foreign Table

+

Procedure

  1. Set the location parameter for the foreign table based on the path planned in Planning Data Export.

    • Remote mode

      Set the location parameter to the URL of the directory that stores the data files.

      +

      You do not need to specify any file.

      +

      For example:

      +

      The IP address of the GDS data server is 192.168.0.90. The listening port number set during GDS startup is 5000. The directory for storing data files is /output_data.

      +

      In this case, set the location parameter to gsfs://192.168.0.90:5000/.

      +
      • location can be set to a subdirectory, for example, gsfs://192.168.0.90:5000/2019/11/, so that the same table can be exported to different directories by date.
      • In the current version, when an export task is executed, the system checks whether the /output_data/2019/11 directory exists. If the directory does not exist, the system creates it. During the export, files are written to this directory. In this way, you do not need to manually run the mkdir -p /output_data/2019/11 command after creating or modifying a foreign table.
      +
      +
    +

  2. Set data format parameters in the foreign table based on the planned data file formats. For details about format parameters, see data format parameters.
  3. Create a GDS foreign table based on the parameter settings in the preceding steps. For details about how to create a foreign table, see CREATE FOREIGN TABLE (for GDS Import and Export).
+
+

Example

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0266.html b/docs/dws/dev/dws_04_0266.html new file mode 100644 index 00000000..168453cb --- /dev/null +++ b/docs/dws/dev/dws_04_0266.html @@ -0,0 +1,20 @@ + + +

Exporting Data

+

Prerequisites

Ensure that the IP addresses and ports of servers where CNs and DNs are deployed can connect to those of the GDS server.

+
+

Procedure

  1. Export data.

    1
    INSERT INTO  [Foreign table name] SELECT * FROM [Source table name];
    +
    + +
    +

    Create batch processing scripts to export data in parallel. The degree of parallelism depends on the server resource usage. You can test several tables and monitor resource usage to determine whether to increase or reduce the amount. Common resource monitoring commands include top for memory and CPU usage, iostat for I/O usage, and sar for networks. For details about application cases, see Exporting Data Using Multiple Threads.

    +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0267.html b/docs/dws/dev/dws_04_0267.html new file mode 100644 index 00000000..49730fc4 --- /dev/null +++ b/docs/dws/dev/dws_04_0267.html @@ -0,0 +1,12 @@ + + +

Stopping GDS

+

GDS is a data service tool provided by GaussDB(DWS). Using the foreign table mechanism, this tool helps export data at a high speed.

+

For details, see Stopping GDS.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0268.html b/docs/dws/dev/dws_04_0268.html new file mode 100644 index 00000000..098c3fb8 --- /dev/null +++ b/docs/dws/dev/dws_04_0268.html @@ -0,0 +1,167 @@ + + +

Examples of Exporting Data Using GDS

+

Exporting Data in Remote Mode

The data server and the cluster reside on the same intranet, the IP address of the data server is 192.168.0.90, and data source files are in CSV format. In this scenario, data is exported in parallel in Remote mode.

+

To export data in parallel in Remote mode, perform the following operations:

+
  1. Log in to the GDS data server as user root, create the /output_data directory for storing data files, and create user gds_user and its user group.
    mkdir -p /output_data
    +
  2. (Optional) Create a user and the user group it belongs to. The user is used to start GDS. If the user and user group exist, skip this step.
    groupadd gdsgrp
    +useradd -g gdsgrp gds_user
    +
  3. Change the owner of the /output_data directory on the data server to gds_user.
    chown -R gds_user:gdsgrp /output_data 
    +
  4. Log in to the data server as user gds_user and start GDS.
    The GDS installation path is /opt/bin/dws/gds. Exported data files are stored in /output_data/. The IP address of the data server is 192.168.0.90. The GDS listening port is 5000. GDS runs in daemon mode.
    /opt/bin/dws/gds/bin/gds -d /output_data -p 192.168.0.90:5000 -H 10.10.0.1/24 -D
    +
    +
  5. In the database, create the foreign table foreign_tpcds_reasons for receiving data from the data server.

    Data export mode settings are as follows:

    +
    • The directory for storing exported files is /output_data/ and the GDS listening port is 5000 when GDS is started. The directory created for storing exported files is /output_data/. Therefore, the location parameter is set to gsfs://192.168.0.90:5000/.
    +

    Data format parameter settings are as follows:

    +
    • format is set to CSV.
    • encoding is set to UTF-8.
    • delimiter is set to E'\x0a'.
    • quote is set to E'\x1b'.
    • null is set to an empty string without quotation marks.
    • escape defaults to the value of quote.
    • header is set to false, indicating that the first row is identified as a data row in an exported file.
    +

    Based on the above settings, the foreign table is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE FOREIGN TABLE foreign_tpcds_reasons
    +(
    +  r_reason_sk    integer        not null,
    +  r_reason_id    char(16)       not null,
    +  r_reason_desc  char(100)
    +) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.90:5000/', FORMAT 'CSV',ENCODING 'utf8',DELIMITER E'\x08', QUOTE E'\x1b', NULL '') WRITE ONLY;
    +
    + +
    +
  6. In the database, export data to data files through the foreign table foreign_tpcds_reasons.
    1
    INSERT INTO foreign_tpcds_reasons SELECT * FROM tpcds.reason;
    +
    + +
    +
  7. After data export is complete, log in to the data server as user gds_user and stop GDS.
    The GDS process ID is 128954.
    ps -ef|grep gds
    +gds_user 128954      1  0 15:03 ?        00:00:00 gds -d /output_data -p 192.168.0.90:5000 -D
    +gds_user 129003 118723  0 15:04 pts/0    00:00:00 grep gds
    +kill -9 128954
    +
    +
+
+

Exporting Data Using Multiple Threads

The data server and the cluster reside on the same intranet, the IP address of the data server is 192.168.0.90, and data source files are in CSV format. In this scenario, data is concurrently exported to two target tables using multiple threads in Remote mode.

+

To concurrently export data using multiple threads in Remote mode, perform the following operations:

+
  1. Log in to the GDS data server as user root, create the /output_data directory for storing data files, and create the database user and its user group.
    mkdir -p /output_data
    +groupadd gdsgrp
    +useradd -g gdsgrp gds_user
    +
  2. Change the owner of the /output_data directory on the data server to gds_user.
    chown -R gds_user:gdsgrp /output_data 
    +
  3. Log in to the data server as user gds_user and start GDS.
    The GDS installation path is /opt/bin/dws/gds. Exported data files are stored in /output_data/. The IP address of the data server is 192.168.0.90. The GDS listening port is 5000. GDS runs in daemon mode. The degree of parallelism is 2.
    /opt/bin/dws/gds/bin/gds -d /output_data -p 192.168.0.90:5000 -H 10.10.0.1/24 -D -t 2 
    +
    +
  4. In GaussDB(DWS), create the foreign tables foreign_tpcds_reasons1 and foreign_tpcds_reasons2 for receiving data from the data server.
    • Data export mode settings are as follows:
      • The directory for storing exported files is /output_data/ and the GDS listening port is 5000 when GDS is started. The directory created for storing exported files is /output_data/. Therefore, the location parameter is set to gsfs://192.168.0.90:5000/.
      +
    • Data format parameter settings are as follows:
      • format is set to CSV.
      • encoding is set to UTF-8.
      • delimiter is set to E'\x08'.
      • quote is set to E'\x1b'.
      • null is set to an empty string without quotation marks.
      • escape defaults to the value of quote.
      • header is set to false, indicating that the first row is identified as a data row in an exported file.
      +
    +

    Based on the preceding settings, the foreign table foreign_tpcds_reasons1 is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE FOREIGN TABLE foreign_tpcds_reasons1
    +(  
    +  r_reason_sk    integer     not null,
    +  r_reason_id    char(16)    not null,
    +  r_reason_desc  char(100)
    +) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.90:5000/', FORMAT 'CSV',ENCODING 'utf8', DELIMITER E'\x08', QUOTE E'\x1b', NULL '') WRITE ONLY;
    +
    + +
    +

    Based on the preceding settings, the foreign table foreign_tpcds_reasons2 is created using the following statement:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE FOREIGN TABLE foreign_tpcds_reasons2
    +(  
    +  r_reason_sk    integer     not null,
    +  r_reason_id    char(16)    not null,
    +  r_reason_desc  char(100)
    +) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.90:5000/', FORMAT 'CSV', DELIMITER E'\x08', QUOTE E'\x1b', NULL '') WRITE ONLY;
    +
    + +
    +
  5. In the database, export data from table reasons1 through the foreign table foreign_tpcds_reasons1 and from table reasons2 through the foreign table foreign_tpcds_reasons2 to /output_data.
    1
    INSERT INTO foreign_tpcds_reasons1 SELECT * FROM tpcds.reason;
    +
    + +
    +
    1
    INSERT INTO foreign_tpcds_reasons2 SELECT * FROM tpcds.reason;
    +
    + +
    +
  6. After data export is complete, log in to the data server as user gds_user and stop GDS.
    The GDS process ID is 128954.
    ps -ef|grep gds
    +gds_user 128954      1  0 15:03 ?        00:00:00 gds -d /output_data -p 192.168.0.90:5000 -D -t 2 
    +gds_user 129003 118723  0 15:04 pts/0    00:00:00 grep gds
    +kill -9 128954
    +
    +
+
+

Exporting Data Through a Pipe

  1. Start GDS.

    gds -d /***/gds_data/ -D -p 192.168.0.1:7789 -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +

    If you need to set the timeout interval of a pipe, use the --pipe-timeout parameter.

    +

  2. Export data.

    1. Log in to the database, create an internal table, and write data to the table.
      CREATE TABLE test_pipe( id integer not null, sex text not null, name text ) ;
      +
      +INSERT INTO test_pipe values(1,2,'11111111111111');
      +INSERT INTO test_pipe values(2,2,'11111111111111');
      +INSERT INTO test_pipe values(3,2,'11111111111111');
      +INSERT INTO test_pipe values(4,2,'11111111111111');
      +
    2. Create a write-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe_tw( id integer not null, age text not null, name  text ) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.1:7789/', FORMAT 'text', DELIMITER ',',  NULL '', EOL '0x0a' ,file_type 'pipe', auto_create_pipe 'false') WRITE ONLY;
      +
    3. Execute the export statement. The statement will be blocked.
      INSERT INTO foreign_test_pipe_tw select * from test_pipe; 
      +
    +

  3. Export data through the GDS pipe.

    1. Log in to GDS and go to the GDS data directory.
      cd /***/gds_data/  
      +
    2. Create a pipe. If auto_create_pipe is set to true, skip this step.
      mkfifo postgres_public_foreign_test_pipe_tw.pipe 
      +

      A pipe will be automatically cleared after an operation is complete. To perform another operation, create a pipe again.

      +
      +
    3. Read data from the pipe and write it to a new file.
      cat postgres_public_foreign_test_pipe_tw.pipe > postgres_public_foreign_test_pipe_tw.txt
      +
    4. To compress the exported files, run the following command:
      gzip -9 -c < postgres_public_foreign_test_pipe_tw.pipe  > out.gz 
      +
    5. To export the content from the pipe to the HDFS server, run the following command:
      cat postgres_public_foreign_test_pipe_tw.pipe  | hdfs dfs -put -  /user/hive/***/test_pipe.txt
      +
    +

  4. Verify the exported data.

    1. Check whether the exported file is correct.
      cat postgres_public_foreign_test_pipe_tw.txt
      +3,2,11111111111111
      +1,2,11111111111111
      +2,2,11111111111111
      +4,2,11111111111111
      +
    2. Check the compressed file.
      vim out.gz
      +3,2,11111111111111
      +1,2,11111111111111
      +2,2,11111111111111
      +4,2,11111111111111
      +
    3. Check the data exported to the HDFS server.
      hdfs dfs -cat /user/hive/***/test_pipe.txt
      +3,2,11111111111111
      +1,2,11111111111111
      +2,2,11111111111111
      +4,2,11111111111111
      +
    +

+
+

Exporting Data Through Multi-Process Pipes

GDS also supports importing and exporting data through multi-process pipes. That is, one foreign table corresponds to multiple GDSs.

+

The following takes exporting a local file as an example.

+
  1. Start multiple GDSs.

    gds -d /***/gds_data/ -D -p 192.168.0.1:7789 -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +gds -d /***/gds_data_1/ -D -p 192.168.0.1:7790 -l /***/gds_log/aa.log -H 0/0 -t 10 -D
    +

    If you need to set the timeout interval of a pipe, use the --pipe-timeout parameter.

    +

  2. Export data.

    1. Log in to the database and create an internal table.
      CREATE TABLE test_pipe (id integer not null, sex text not null, name  text);
      +
    2. Write data.
      INSERT INTO test_pipe values(1,2,'11111111111111');
      +INSERT INTO test_pipe values(2,2,'11111111111111');
      +INSERT INTO test_pipe values(3,2,'11111111111111');
      +INSERT INTO test_pipe values(4,2,'11111111111111');
      +
    3. Create a write-only foreign table.
      CREATE FOREIGN TABLE foreign_test_pipe_tw( id integer not null, age text not null, name  text ) SERVER gsmpp_server OPTIONS (LOCATION 'gsfs://192.168.0.1:7789/|gsfs://192.168.0.1:7790/', FORMAT 'text', DELIMITER ',',  NULL '', EOL '0x0a' ,file_type 'pipe', auto_create_pipe 'false') WRITE ONLY;
      +
    4. Execute the export statement. The statement will be blocked.
      INSERT INTO foreign_test_pipe_tw select * from test_pipe; 
      +
    +

  3. Export data through the GDS pipes.

    1. Log in to GDS and go to each GDS data directory.
      cd /***/gds_data/ 
      +cd /***/gds_data_1/ 
      +
    2. Create a pipe. If auto_create_pipe is set to true, skip this step.
      mkfifo postgres_public_foreign_test_pipe_tw.pipe 
      +
    3. Read each pipe and write the new file to the pipes.
      cat postgres_public_foreign_test_pipe_tw.pipe > postgres_public_foreign_test_pipe_tw.txt
      +
    +

  4. Verify the exported data.

    cat /***/gds_data/postgres_public_foreign_test_pipe_tw.txt
    +3,2,11111111111111
    +
    cat /***/gds_data_1/postgres_public_foreign_test_pipe_tw.txt
    +1,2,11111111111111
    +2,2,11111111111111
    +4,2,11111111111111
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0269.html b/docs/dws/dev/dws_04_0269.html new file mode 100644 index 00000000..a171d8a0 --- /dev/null +++ b/docs/dws/dev/dws_04_0269.html @@ -0,0 +1,21 @@ + + +

Using gs_dump and gs_dumpall to Export Metadata

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0270.html b/docs/dws/dev/dws_04_0270.html new file mode 100644 index 00000000..cc0b900c --- /dev/null +++ b/docs/dws/dev/dws_04_0270.html @@ -0,0 +1,68 @@ + + +

Overview

+

GaussDB(DWS) provides gs_dump and gs_dumpall to export required database objects and related information. To migrate database information, you can use a tool to import the exported metadata to a target database. gs_dump exports a single database or its objects. gs_dumpall exports all databases or global objects in a cluster. For details, see Table 1.

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Application scenarios

Application Scenario

+

Export Granularity

+

Export Format

+

Import Method

+

Exporting a single database

+

Database-level export

+
  • Export full information of a database.

    You can use the exported information to create a same database containing the same data as the current one.

    +
  • Export all object definitions of a database, including the definitions of the database, functions, schemas, tables, indexes, and stored procedures.

    You can use the exported object definitions to quickly create a same database as the current one, without data.

    +
  • Export data of a database.
+
  • Plain text
  • Custom
  • Directory
  • .tar
+
+

Schema-level export

+
  • Export full information of a schema.
  • Export data of a schema.
  • Export all object definitions of a schema, including the definitions of tables, stored procedures, and indexes.
+
Table-level export
  • Export full information of a table.
  • Export data of a table.
  • Export the definition of a table.
+
+

Exporting all databases in a cluster

+

Database-level export

+
  • Export full information of a cluster.

    You can use the exported information to create a same cluster containing the same databases, global objects, and data as the current one.

    +
  • Export all object definitions of a cluster, including the definitions of tablespaces, databases, functions, schemas, tables, indexes, and stored procedures.

    You can use the exported object definitions to quickly create a same cluster as the current one, containing the same databases and tablespaces but without data.

    +
  • Export data of a cluster.
+

Plain text

+

For details about how to import data files, see Using the gsql Meta-Command \COPY to Import Data.

+
Global object export
  • Export tablespaces.
  • Export roles.
  • Export tablespaces and roles.
+
+
+
+

gs_dump and gs_dumpall use -U to specify the user that performs the export. If the specified user does not have the required permission, data cannot be exported. In this case, you can set --role in the export command to the role that has the permission. Then, gs_dump or gs_dumpall uses the specified role to export data. See Table 1 for application scenarios and Data Export By a User Without Required Permissions for operation details.

+

gs_dump and gs_dumpall encrypt the exported data files. These files are decrypted before being imported to prevent data disclosure for higher database security.

+

When gs_dump or gs_dumpall is used to export data from a cluster, other users can still access (read data from and write data to) databases in the cluster.

+

gs_dump and gs_dumpall can export complete, consistent data. For example, if gs_dump is used to export database A or gs_dumpall is used to export all databases from a cluster at T1, data of database A or all databases in the cluster at that time point will be exported, and modifications on the databases after that time point will not be exported.

+

Obtain gs_dump and gs_dumpall by decompressing the gsql CLI client package.

+

Precautions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0271.html b/docs/dws/dev/dws_04_0271.html new file mode 100644 index 00000000..004a8ee6 --- /dev/null +++ b/docs/dws/dev/dws_04_0271.html @@ -0,0 +1,20 @@ + + +

Exporting a Single Database

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0272.html b/docs/dws/dev/dws_04_0272.html new file mode 100644 index 00000000..552f685b --- /dev/null +++ b/docs/dws/dev/dws_04_0272.html @@ -0,0 +1,115 @@ + + +

Exporting a Database

+

You can use gs_dump to export data and all object definitions of a database from GaussDB(DWS). You can specify the information to be exported as follows:

+ +

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dump to export data of the database gaussdb.

    gs_dump -W password -U jack -f /home//backup/postgres_backup.tar -p 8000 gaussdb  -h 10.10.10.100 -F t 
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for connecting to the database. If this parameter is not configured, the username of the connected database is used.

    +

    -U jack

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home//backup/postgres_backup.tar

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    dbname

    +

    Name of the database to be exported.

    +

    gaussdb

    +

    -F

    +

    Format of exported files. The values of -F are as follows:

    +
    • p: plain text
    • c: custom
    • d: directory
    • t: .tar
    +

    -F t

    +
    +
    +

    For details about other parameters, see "gs_dump" in the Tool Guide.

    +

+
+

Examples

Example 1: Use gs_dump to run the following command to export full information of the database gaussdb and compress the exported files in SQL format.

+
gs_dump -W password -U jack -f /home//backup/postgres_backup.sql -p 8000  -h 10.10.10.100 gaussdb -Z 8 -F p
+gs_dump[port=''][gaussdb][2017-07-21 15:36:13]: dump database gaussdb successfully
+gs_dump[port=''][gaussdb][2017-07-21 15:36:13]: total time: 3793  ms
+

Example 2: Use gs_dump to run the following command to export data of the database gaussdb, excluding object definitions. The exported files are in a custom format.

+
gs_dump -W Password -U jack -f /home//backup/postgres_data_backup.dmp -p 8000 -h 10.10.10.100 gaussdb -a -F c
+gs_dump[port=''][gaussdb][2017-07-21 15:36:13]: dump database gaussdb successfully
+gs_dump[port=''][gaussdb][2017-07-21 15:36:13]: total time: 3793  ms
+

Example 3: Use gs_dump to run the following command to export object definitions of the database gaussdb. The exported files are in SQL format.

+
--Before the export, the nation table contains data.
+select n_nationkey,n_name,n_regionkey from nation limit 3;
+ n_nationkey |          n_name           | n_regionkey 
+-------------+---------------------------+-------------
+           0 | ALGERIA                   |           0
+           3 | CANADA                    |           1
+          11 | IRAQ                      |           4
+(3 rows)
+
+gs_dump -W password -U jack -f /home//backup/postgres_def_backup.sql -p 8000 -h 10.10.10.100 gaussdb -s -F p
+gs_dump[port=''][gaussdb][2017-07-20 15:04:14]: dump database gaussdb successfully
+gs_dump[port=''][gaussdb][2017-07-20 15:04:14]: total time: 472 ms
+

Example 4: Use gs_dump to run the following command to export object definitions of the database gaussdb. The exported files are in text format and are encrypted.

+
gs_dump -W password -U jack -f /home//backup/postgres_def_backup.sql -p 8000 -h 10.10.10.100 gaussdb --with-encryption AES128 --with-key 1234567812345678 -s -F p
+gs_dump[port=''][gaussdb][2018-11-14 11:25:18]: dump database gaussdb successfully
+gs_dump[port=''][gaussdb][2018-11-14 11:25:18]: total time: 1161  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0273.html b/docs/dws/dev/dws_04_0273.html new file mode 100644 index 00000000..005c0e05 --- /dev/null +++ b/docs/dws/dev/dws_04_0273.html @@ -0,0 +1,124 @@ + + +

Exporting a Schema

+

You can use gs_dump to export data and all object definitions of a schema from GaussDB(DWS). You can export one or more specified schemas as needed. You can specify the information to be exported as follows:

+ +

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dump to run the following command to export the hr and public schemas.

    gs_dump -W Password -U jack -f /home//backup/MPPDB_schema_backup -p 8000 -h 10.10.10.100 human_resource -n hr -F d 
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for connecting to the database. If this parameter is not configured, the username of the connected database is used.

    +

    -U jack

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home//backup/MPPDB_schema_backup

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    dbname

    +

    Name of the database to be exported.

    +

    human_resource

    +

    -n

    +

    Names of schemas to be exported. Data of the specified schemas will also be exported.

    +
    • Single schema: Enter -n schemaname.
    • Multiple schemas: Enter -n schemaname for each schema.
    +
    • Single schema: -n hr
    • Multiple schemas: -n hr -n public
    +

    -F

    +

    Format of exported files. The values of -F are as follows:

    +
    • p: plain text
    • c: custom
    • d: directory
    • t: .tar
    +

    -F d

    +
    +
    +

    For details about other parameters, see "gs_dump" in the Tool Guide.

    +

+
+

Examples

Example 1: Use gs_dump to run the following command to export full information of the hr schema. The exported files are compressed and stored in text format.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_backup.sql -p 8000 -h 10.10.10.100 human_resource -n hr -Z 6 -F p
+gs_dump[port=''][human_resource][2017-07-21 16:05:55]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 16:05:55]: total time: 2425  ms
+
+
Example 2: Use gs_dump to run the following command to export data of the hr schema. The exported files are in .tar format.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_data_backup.tar -p 8000 -h 10.10.10.100 human_resource -n hr -a -F t
+gs_dump[port=''][human_resource][2018-11-14 15:07:16]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-11-14 15:07:16]: total time: 1865  ms
+
+
Example 3: Use gs_dump to run the following command to export the definition of the hr schema. The exported files are stored in a directory.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_def_backup -p 8000 -h 10.10.10.100 human_resource -n hr -s -F d
+gs_dump[port=''][human_resource][2018-11-14 15:11:34]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-11-14 15:11:34]: total time: 1652  ms
+
+
Example 4: Use gs_dump to run the following command to export the human_resource database excluding the hr schema. The exported files are in a custom format.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_backup.dmp -p 8000 -h 10.10.10.100 human_resource -N hr -F c
+gs_dump[port=''][human_resource][2017-07-21 16:06:31]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 16:06:31]: total time: 2522  ms
+
+
Example 5: Use gs_dump to run the following command to export the object definitions of the hr and public schemas, encrypt the exported files, and store them in .tar format.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_backup1.tar -p 8000 -h 10.10.10.100 human_resource -n hr -n public -s --with-encryption AES128 --with-key 1234567812345678 -F t
+gs_dump[port=''][human_resource][2017-07-21 16:07:16]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 16:07:16]: total time: 2132  ms
+
+
Example 6: Use gs_dump to run the following command to export the human_resource database excluding the hr and public schemas. The exported files are in a custom format.
gs_dump -W password -U jack -f /home//backup/MPPDB_schema_backup2.dmp -p 8000 -h 10.10.10.100 human_resource -N hr -N public -F c
+gs_dump[port=''][human_resource][2017-07-21 16:07:55]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 16:07:55]: total time: 2296  ms
+
+

Example 7: Use gs_dump to run the following command to export all tables, including views, sequences, and foreign tables, in the public schema, and the staffs table in the hr schema, including data and table definition. The exported files are in a custom format.

+
gs_dump -W password -U jack -f /home//backup/MPPDB_backup3.dmp -p 8000 -h 10.10.10.100 human_resource -t public.* -t hr.staffs -F c
+gs_dump[port=''][human_resource][2018-12-13 09:40:24]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-12-13 09:40:24]: total time: 896  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0274.html b/docs/dws/dev/dws_04_0274.html new file mode 100644 index 00000000..7cefc702 --- /dev/null +++ b/docs/dws/dev/dws_04_0274.html @@ -0,0 +1,136 @@ + + +

Exporting a Table

+

You can use gs_dump to export data and all object definitions of a table-level object from GaussDB(DWS). Views, sequences, and foreign tables are special tables. You can export one or more specified tables as needed. You can specify the information to be exported as follows:

+ +

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dump to run the following command to export the hr.staffs and hr.employments tables.

    gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -F d
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for connecting to the database. If this parameter is not configured, the username of the connected database is used.

    +

    -U jack

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home//backup/MPPDB_table_backup

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    dbname

    +

    Name of the database to be exported.

    +

    human_resource

    +

    -t

    +

    Table (or view, sequence, foreign table) to be exported. You can specify multiple tables by listing them or using wildcard characters. When you use wildcard characters, quote wildcard patterns with single quotation marks ('') to prevent the shell from expanding the wildcard characters.

    +
    • Single table: Enter -t schema.table.
    • Multiple tables: Enter -t schema.table for each table.
    +
    • Single table: -t hr.staffs
    • Multiple tables: -t hr.staffs -t hr.employments
    +

    -F

    +

    Format of exported files. The values of -F are as follows:

    +
    • p: plain text
    • c: custom
    • d: directory
    • t: .tar
    +

    -F d

    +
    +
    +

    For details about other parameters, see "gs_dump" in the Tool Guide.

    +

+
+

Examples

Example 1: Use gs_dump to run the following command to export full information of the hr.staffs table. The exported files are in text format.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup.sql -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -Z 6 -F p
+gs_dump[port=''][human_resource][2017-07-21 17:05:10]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:05:10]: total time: 3116  ms
+
+
Example 2: Use gs_dump to run the following command to export data of the hr.staffs table. The exported files are in .tar format.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_data_backup.tar -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -a -F t 
+gs_dump[port=''][human_resource][2017-07-21 17:04:26]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:04:26]: total time: 2570  ms
+
+
Example 3: Use gs_dump to run the following command to export the definition of the hr.staffs table. The exported files are stored in a directory.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_def_backup -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -s -F d 
+gs_dump[port=''][human_resource][2017-07-21 17:03:09]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:03:09]: total time: 2297  ms 
+
+
Example 4: Use gs_dump to run the following command to export the human_resource database excluding the hr.staffs table. The exported files are in a custom format.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup4.dmp -p 8000 -h 10.10.10.100 human_resource -T hr.staffs -F c
+gs_dump[port=''][human_resource][2017-07-21 17:14:11]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:14:11]: total time: 2450  ms
+
+
Example 5: Use gs_dump to run the following command to export the hr.staffs and hr.employments tables. The exported files are in text format.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup1.sql -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -t hr.employments -F p
+gs_dump[port=''][human_resource][2017-07-21 17:19:42]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:19:42]: total time: 2414  ms
+
+
Example 6: Use gs_dump to run the following command to export the human_resource database excluding the hr.staffs and hr.employments tables. The exported files are in text format.
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup2.sql -p 8000 -h 10.10.10.100 human_resource -T hr.staffs -T hr.employments -F p
+gs_dump[port=''][human_resource][2017-07-21 17:21:02]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2017-07-21 17:21:02]: total time: 3165  ms
+
+

Example 7: Use gs_dump to run the following command to export data and definition of the hr.staffs table, and the definition of the hr.employments table. The exported files are in .tar format.

+
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup3.tar -p 8000 -h 10.10.10.100 human_resource -t hr.staffs -t hr.employments --exclude-table-data hr.employments -F t
+gs_dump[port=''][human_resource][2018-11-14 11:32:02]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-11-14 11:32:02]: total time: 1645  ms
+

Example 8: Use gs_dump to run the following command to export data and definition of the hr.staffs table, encrypt the exported files, and store them in text format.

+
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup4.sql -p 8000 -h 10.10.10.100 human_resource -t hr.staffs --with-encryption AES128 --with-key 1212121212121212 -F p
+gs_dump[port=''][human_resource][2018-11-14 11:35:30]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-11-14 11:35:30]: total time: 6708  ms
+

Example 9: Use gs_dump to run the following command to export all tables, including views, sequences, and foreign tables, in the public schema, and the staffs table in the hr schema, including data and table definition. The exported files are in a custom format.

+
gs_dump -W password -U jack -f /home//backup/MPPDB_table_backup5.dmp -p 8000 -h 10.10.10.100 human_resource -t public.* -t hr.staffs -F c
+gs_dump[port=''][human_resource][2018-12-13 09:40:24]: dump database human_resource successfully
+gs_dump[port=''][human_resource][2018-12-13 09:40:24]: total time: 896  ms
+

Example 10: Use gs_dump to run the following command to export the definition of the view referencing to the test1 table in the t1 schema. The exported files are in a custom format.

+
gs_dump -W password -U jack -f /home//backup/MPPDB_view_backup6 -p 8000 -h 10.10.10.100 human_resource -t t1.test1 --include-depend-objs --exclude-self -F d
+gs_dump[port=''][jack][2018-11-14 17:21:18]: dump database human_resource successfully
+gs_dump[port=''][jack][2018-11-14 17:21:23]: total time: 4239  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0275.html b/docs/dws/dev/dws_04_0275.html new file mode 100644 index 00000000..403c505b --- /dev/null +++ b/docs/dws/dev/dws_04_0275.html @@ -0,0 +1,18 @@ + + +

Exporting All Databases

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0276.html b/docs/dws/dev/dws_04_0276.html new file mode 100644 index 00000000..d8143f92 --- /dev/null +++ b/docs/dws/dev/dws_04_0276.html @@ -0,0 +1,87 @@ + + +

Exporting All Databases

+

You can use gs_dumpall to export full information of all databases in a cluster from GaussDB(DWS), including information about each database and global objects in the cluster. You can specify the information to be exported as follows:

+ +

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dumpall to run the following command to export information of all databases.

    gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_backup.sql -p 8000 -h 10.10.10.100
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for database connection. The user must be a cluster administrator.

    +

    -U dbadmin

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home/dbadmin/backup/MPPDB_backup.sql

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +
    +
    +

    For details about other parameters, see "gs_dumpall" in the Tool Guide.

    +

+
+

Examples

Example 1: Use gs_dumpall to run the following command as the cluster administrator dbadmin to export information of all databases in a cluster. The exported files are in text format. After the command is executed, a large amount of output information will be displayed. total time will be displayed at the end of the information, indicating that the export is successful. In this example, only related output information is included.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_backup.sql -p 8000 -h 10.10.10.100 
+gs_dumpall[port=''][2017-07-21 15:57:31]: dumpall operation successful
+gs_dumpall[port=''][2017-07-21 15:57:31]: total time: 9627  ms
+

Example 2: Use gs_dumpall to run the following command as the cluster administrator dbadmin to export definitions of all databases in a cluster. The exported files are in text format. After the command is executed, a large amount of output information will be displayed. total time will be displayed at the end of the information, indicating that the export is successful. In this example, only related output information is included.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_backup.sql -p 8000 -h 10.10.10.100 -s 
+gs_dumpall[port=''][2018-11-14 11:28:14]: dumpall operation successful
+gs_dumpall[port=''][2018-11-14 11:28:14]: total time: 4147  ms
+

Example 3: Use gs_dumpall to run the following command export data of all databases in a cluster, encrypt the exported files, and store them in text format. After the command is executed, a large amount of output information will be displayed. total time will be displayed at the end of the information, indicating that the export is successful. In this example, only related output information is included.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_backup.sql -p 8000 -h 10.10.10.100 -a --with-encryption AES128 --with-key 1234567812345678
+gs_dumpall[port=''][2018-11-14 11:32:26]: dumpall operation successful
+gs_dumpall[port=''][2018-11-14 11:23:26]: total time: 4147  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0277.html b/docs/dws/dev/dws_04_0277.html new file mode 100644 index 00000000..09ad4246 --- /dev/null +++ b/docs/dws/dev/dws_04_0277.html @@ -0,0 +1,91 @@ + + +

Exporting Global Objects

+

You can use gs_dumpall to export global objects from GaussDB(DWS), including database users, user groups, tablespaces, and attributes (for example, global access permissions).

+

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dumpall to run the following command to export tablespace objects.

    gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_tablespace.sql -p 8000 -h 10.10.10.100 -t
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value

    +

    -U

    +

    Username for database connection. The user must be a cluster administrator.

    +

    -U dbadmin

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home//backup/MPPDB_tablespace.sql

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    -t

    +

    Dumps only tablespaces. You can also use --tablespaces-only alternatively.

    +

    -

    +
    +
    +

    For details about other parameters, see "gs_dumpall" in the Tool Guide.

    +

+
+

Examples

Example 1: Use gs_dumpall to run the following command as the cluster administrator dbadmin to export information of global tablespaces and users in a cluster. The exported files are in text format.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_globals.sql -p 8000 -h 10.10.10.100 -g
+gs_dumpall[port=''][2018-11-14 19:06:24]: dumpall operation successful
+gs_dumpall[port=''][2018-11-14 19:06:24]: total time: 1150  ms
+

Example 2: Use gs_dumpall to run the following command as the cluster administrator dbadmin to export global tablespaces in a cluster, encrypt the exported files, and store them in text format.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_tablespace.sql -p 8000 -h 10.10.10.100 -t --with-encryption AES128 --with-key 1212121212121212
+gs_dumpall[port=''][2018-11-14 19:00:58]: dumpall operation successful
+gs_dumpall[port=''][2018-11-14 19:00:58]: total time: 186  ms
+

Example 3: Use gs_dumpall to run the following command as the cluster administrator dbadmin to export information of global users in a cluster. The exported files are in text format.

+
gs_dumpall -W password -U dbadmin -f /home/dbadmin/backup/MPPDB_user.sql -p 8000 -h 10.10.10.100 -r
+gs_dumpall[port=''][2018-11-14 19:03:18]: dumpall operation successful
+gs_dumpall[port=''][2018-11-14 19:03:18]: total time: 162  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0278.html b/docs/dws/dev/dws_04_0278.html new file mode 100644 index 00000000..773a82e5 --- /dev/null +++ b/docs/dws/dev/dws_04_0278.html @@ -0,0 +1,120 @@ + + +

Data Export By a User Without Required Permissions

+

gs_dump and gs_dumpall use -U to specify the user that performs the export. If the specified user does not have the required permission, data cannot be exported. In this case, you can set --role in the export command to the role that has the permission. Then, gs_dump or gs_dumpall uses the specified role to export data.

+

Procedure

  1. Prepare an ECS as the gsql client host. For details, see "Preparing an ECS as the gsql Client Host" in the Data Warehouse Service (DWS) User Guide.
  2. Download the gsql client and use an SSH transfer tool (such as WinSCP) to upload it to the Linux server where gsql is to be installed. For details, see "Downloading the Client" in Data Warehouse Service User Guide.

    The user who uploads the client must have the full control permission on the target directory on the host to which the client is uploaded.

    +

  3. Run the following commands to decompress the client:

    cd <Path_for_storing_the_client>
    +unzip dws_client_8.1.x_redhat_x64.zip
    +

    Where,

    +
    • <Path_for_storing_the_client>: Replace it with the actual path.
    • dws_client_8.1.x_redhat_x86.zip: This is the client tool package of RedHat x86. Replace it with the actual one.
    +

  4. Run the following command to configure the GaussDB(DWS) client:

    source gsql_env.sh
    +

    If the following information is displayed, the GaussDB(DWS) client is successfully configured:

    +
    All things done.
    +

  5. Use gs_dump to export data of the human_resource database.

    User jack does not have the permission for exporting data of the human_resource database and the role role1 has this permission. To export data of the human_resource database, you can set --role to role1 in the export command. The exported files are in .tar format.
    gs_dump -U jack -W password -f /home//backup/MPPDB_backup.tar -p 8000 -h 10.10.10.100 human_resource --role role1 --rolepassword password -F t
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Common parameters

    Parameter

    +

    Description

    +

    Example Value (dbadmin)

    +

    -U

    +

    Username for database connection.

    +

    -U jack

    +

    -W

    +

    User password for database connection.

    +
    • This parameter is not required for database administrators if the trust policy is used for authentication.
    • If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password.
    +

    -W Password

    +

    -f

    +

    Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output.

    +

    -f /home//backup/MPPDB_backup.tar

    +

    -p

    +

    Name extension of the TCP port on which the server is listening or the local Unix domain socket. This parameter is configured to ensure connections.

    +

    -p 8000

    +

    -h

    +

    Cluster address: If a public network address is used for connection, set this parameter to Public Network Address or Public Network Domain Name. If a private network address is used for connection, set this parameter to Private Network Address or Private Network Domain Name.

    +

    -h 10.10.10.100

    +

    dbname

    +

    Name of the database to be exported.

    +

    human_resource

    +

    --role

    +

    Role name for the export operation. After this parameter is set and gs_dump or gs_dumpall connects to the database, the SET ROLE command will be issued. When the user specified by -U does not have the permissions required by gs_dump or gs_dumpall, this parameter allows the user to switch to a role with the required permissions.

    +

    -r role1

    +

    --rolepassword

    +

    Role password.

    +

    --rolepassword password

    +

    -F

    +

    Format of exported files. The values of -F are as follows:

    +
    • p: plain text
    • c: custom
    • d: directory
    • t: .tar
    +

    -F t

    +
    +
    +

    For details about other parameters, see "gs_dump" or "gs_dumpall" in the Tool Guide.

    +

+
+

Examples

Example 1: User jack does not have the permission for exporting data of the human_resource database and the role role1 has this permission. To export data of the human_resource database, you can set --role to role1 in the export command. The exported files are in .tar format.

+
human_resource=# CREATE USER jack IDENTIFIED BY "password";
+
+gs_dump -U jack -W password -f /home//backup/MPPDB_backup11.tar -p 8000 -h 10.10.10.100 human_resource --role role1 --rolepassword password -F t
+gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully
+gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 4239  ms
+

Example 2: User jack does not have the permission for exporting the public schema and the role role1 has this permission. To export the public schema, you can set --role to role1 in the export command. The exported files are in .tar format.

+
human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc";
+
+gs_dump -U jack -W password -f /home//backup/MPPDB_backup12.tar -p 8000 -h 10.10.10.100 human_resource -n public --role role1 --rolepassword password -F t
+gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully
+gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 3278  ms
+

Example 3: User jack does not have the permission for exporting all databases in a cluster and the role role1 has this permission. To export all databases, you can set --role to role1 in the export command. The exported files are in text format.

+
human_resource=# CREATE USER jack IDENTIFIED BY "password";
+
+gs_dumpall -U jack -W password -f /home//backup/MPPDB_backup.sql -p 8000 -h 10.10.10.100 --role role1 --rolepassword password
+gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: dumpall operation successful
+gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: total time: 6437  ms
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0279.html b/docs/dws/dev/dws_04_0279.html new file mode 100644 index 00000000..d3cc247f --- /dev/null +++ b/docs/dws/dev/dws_04_0279.html @@ -0,0 +1,25 @@ + + +

GDS Pipe FAQs

+

Precautions

+
+ +

Common Troubleshooting Methods:

+ + +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0301.html b/docs/dws/dev/dws_04_0301.html new file mode 100644 index 00000000..2d1e8ac7 --- /dev/null +++ b/docs/dws/dev/dws_04_0301.html @@ -0,0 +1,21 @@ + + +

PostGIS Extension

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0302.html b/docs/dws/dev/dws_04_0302.html new file mode 100644 index 00000000..7abcac44 --- /dev/null +++ b/docs/dws/dev/dws_04_0302.html @@ -0,0 +1,18 @@ + + +

PostGIS

+
  • The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical support to submit an application.
  • If the error message "ERROR: EXTENSION is not yet supported." is displayed, the PostGIS software package is not installed. Contact technical support.
+
+

GaussDB(DWS) provides PostGIS Extension (PostGIS-2.4.2). PostGIS Extension is a spatial database extender for PostgreSQL. It provides the following spatial information services: spatial objects, spatial indexes, spatial functions, and spatial operators. PostGIS Extension complies with the OpenGIS specifications.

+

In GaussDB(DWS), PostGIS Extension depends on the listed third-party open-source software.

+ + + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0304.html b/docs/dws/dev/dws_04_0304.html new file mode 100644 index 00000000..35ba6520 --- /dev/null +++ b/docs/dws/dev/dws_04_0304.html @@ -0,0 +1,54 @@ + + +

Using PostGIS

+
  • The third-party software that the PostGIS Extension depends on needs to be installed separately. If you need to use PostGIS, submit a service ticket or contact technical support to submit an application.
  • If the error message "ERROR: EXTENSION is not yet supported." is displayed, the PostGIS software package is not installed. Contact technical support.
+
+

Creating PostGIS Extension

Run the CREATE EXTENSION command to create PostGIS Extension.

+
1
CREATE EXTENSION postgis;
+
+ +
+
+

Using PostGIS Extension

Use the following function to invoke a PostGIS Extension:

+
1
SELECT GisFunction (Param1, Param2,......);
+
+ +
+

GisFunction is the function, and Param1 and Param2 are function parameters. The following SQL statements are a simple illustration for PostGIS use. For details about related functions, see PostGIS 2.4.2 Manual.

+

Example 1: Create a geometry table.

+
1
+2
CREATE TABLE cities ( id integer, city_name varchar(50) );
+SELECT AddGeometryColumn('cities', 'position', 4326, 'POINT', 2);
+
+ +
+

Example 2: Insert geometry data.

+
1
+2
+3
INSERT INTO cities (id, position, city_name) VALUES (1,ST_GeomFromText('POINT(-9.5 23)',4326),'CityA');
+INSERT INTO cities (id, position, city_name) VALUES (2,ST_GeomFromText('POINT(-10.6 40.3)',4326),'CityB');
+INSERT INTO cities (id, position, city_name) VALUES (3,ST_GeomFromText('POINT(20.8 30.3)',4326), 'CityC');
+
+ +
+

Example 3: Calculate the distance between any two cities among three cities.

+
1
SELECT p1.city_name,p2.city_name,ST_Distance(p1.position,p2.position) FROM cities AS p1, cities AS p2 WHERE p1.id > p2.id;
+
+ +
+
+

Deleting PostGIS Extension

Run the following command to delete PostGIS Extension from GaussDB(DWS):

+
1
DROP EXTENSION postgis [CASCADE];
+
+ +
+

If PostGIS Extension is the dependee of other objects (for example, geometry tables), you need to add the CASCADE keyword to delete all these objects.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0305.html b/docs/dws/dev/dws_04_0305.html new file mode 100644 index 00000000..9ff44cec --- /dev/null +++ b/docs/dws/dev/dws_04_0305.html @@ -0,0 +1,146 @@ + + +

PostGIS Support and Constraints

+

Supported Data Types

In GaussDB(DWS), PostGIS Extension support the following data types:

+ +
+
If PostGIS is used by a user other than the creator of the PostGIS, set the following GUC parameters:
SET behavior_compat_options = 'bind_procedure_searchpath';
+
+
+

Supported Operators and Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Operators and functions supported by PostGIS

Category

+

Function

+

Management functions

+

AddGeometryColumn, DropGeometryColumn, DropGeometryTable, PostGIS_Full_Version, PostGIS_GEOS_Version, PostGIS_Liblwgeom_Version, PostGIS_Lib_Build_Date, PostGIS_Lib_Version, PostGIS_PROJ_Version, PostGIS_Scripts_Build_Date, PostGIS_Scripts_Installed, PostGIS_Version, PostGIS_LibXML_Version, PostGIS_Scripts_Released, Populate_Geometry_Columns, UpdateGeometrySRID

+

Geometry constructors

+

ST_BdPolyFromText, ST_BdMPolyFromText, ST_Box2dFromGeoHash, ST_GeogFromText, ST_GeographyFromText, ST_GeogFromWKB, ST_GeomCollFromText, ST_GeomFromEWKB, ST_GeomFromEWKT, ST_GeometryFromText, ST_GeomFromGeoHash, ST_GeomFromGML, ST_GeomFromGeoJSON, ST_GeomFromKML, ST_GMLToSQL, ST_GeomFromText, ST_GeomFromWKB, ST_LineFromMultiPoint, ST_LineFromText, ST_LineFromWKB, ST_LinestringFromWKB, ST_MakeBox2D, ST_3DMakeBox, ST_MakeEnvelope, ST_MakePolygon, ST_MakePoint, ST_MakePointM, ST_MLineFromText, ST_MPointFromText, ST_MPolyFromText, ST_Point, ST_PointFromGeoHash, ST_PointFromText, ST_PointFromWKB, ST_Polygon, ST_PolygonFromText, ST_WKBToSQL, ST_WKTToSQL

+

Geometry accessors

+

GeometryType, ST_Boundary, ST_CoordDim, ST_Dimension, ST_EndPoint, ST_Envelope, ST_ExteriorRing, ST_GeometryN, ST_GeometryType, ST_InteriorRingN, ST_IsClosed, ST_IsCollection, ST_IsEmpty, ST_IsRing, ST_IsSimple, ST_IsValid, ST_IsValidReason, ST_IsValidDetail, ST_M, ST_NDims, ST_NPoints, ST_NRings, ST_NumGeometries, ST_NumInteriorRings, ST_NumInteriorRing, ST_NumPatches, ST_NumPoints, ST_PatchN, ST_PointN, ST_SRID, ST_StartPoint, ST_Summary, ST_X, ST_XMax, ST_XMin, ST_Y, ST_YMax, ST_YMin, ST_Z, ST_ZMax, ST_Zmflag, ST_ZMin

+

Geometry editors

+

ST_AddPoint, ST_Affine, ST_Force2D, ST_Force3D, ST_Force3DZ, ST_Force3DM, ST_Force4D, ST_ForceCollection, ST_ForceSFS, ST_ForceRHR, ST_LineMerge, ST_CollectionExtract, ST_CollectionHomogenize, ST_Multi, ST_RemovePoint, ST_Reverse, ST_Rotate, ST_RotateX, ST_RotateY, ST_RotateZ, ST_Scale, ST_Segmentize, ST_SetPoint, ST_SetSRID, ST_SnapToGrid, ST_Snap, ST_Transform, ST_Translate, ST_TransScale

+

Geometry outputs

+

ST_AsBinary, ST_AsEWKB, ST_AsEWKT, ST_AsGeoJSON, ST_AsGML, ST_AsHEXEWKB, ST_AsKML, ST_AsLatLonText, ST_AsSVG, ST_AsText, ST_AsX3D, ST_GeoHash

+

Operators

+

&&, &&&, &<, &<|, &>, <<, <<|, =, >>, @, |&>, |>>, ~, ~=, <->, <#>

+

Spatial relationships and measurements

+

ST_3DClosestPoint, ST_3DDistance, ST_3DDWithin, ST_3DDFullyWithin, ST_3DIntersects, ST_3DLongestLine, ST_3DMaxDistance, ST_3DShortestLine, ST_Area, ST_Azimuth, ST_Centroid, ST_ClosestPoint, ST_Contains, ST_ContainsProperly, ST_Covers, ST_CoveredBy, ST_Crosses, ST_LineCrossingDirection, ST_Disjoint, ST_Distance, ST_HausdorffDistance, ST_MaxDistance, ST_DistanceSphere, ST_DistanceSpheroid, ST_DFullyWithin, ST_DWithin, ST_Equals, ST_HasArc, ST_Intersects, ST_Length, ST_Length2D, ST_3DLength, ST_Length_Spheroid, ST_Length2D_Spheroid, ST_3DLength_Spheroid, ST_LongestLine, ST_OrderingEquals, ST_Overlaps, ST_Perimeter, ST_Perimeter2D, ST_3DPerimeter, ST_PointOnSurface, ST_Project, ST_Relate, ST_RelateMatch, ST_ShortestLine, ST_Touches, ST_Within

+

Geometry processing

+

ST_Buffer, ST_BuildArea, ST_Collect, ST_ConcaveHull, ST_ConvexHull, ST_CurveToLine, ST_DelaunayTriangles, ST_Difference, ST_Dump, ST_DumpPoints, ST_DumpRings, ST_FlipCoordinates, ST_Intersection, ST_LineToCurve, ST_MakeValid, ST_MemUnion, ST_MinimumBoundingCircle, ST_Polygonize, ST_Node, ST_OffsetCurve, ST_RemoveRepeatedPoints, ST_SharedPaths, ST_Shift_Longitude, ST_Simplify, ST_SimplifyPreserveTopology, ST_Split, ST_SymDifference, ST_Union, ST_UnaryUnion

+

Linear referencing

+

ST_LineInterpolatePoint, ST_LineLocatePoint, ST_LineSubstring, ST_LocateAlong, ST_LocateBetween, ST_LocateBetweenElevations, ST_InterpolatePoint, ST_AddMeasure

+

Miscellaneous functions

+

ST_Accum, Box2D, Box3D, ST_Expand, ST_Extent, ST_3Dextent, Find_SRID, ST_MemSize

+

Exceptional functions

+

PostGIS_AddBBox, PostGIS_DropBBox, PostGIS_HasBBox

+

Raster Management Functions

+

AddRasterConstraints, DropRasterConstraints, AddOverviewConstraints, DropOverviewConstraints, PostGIS_GDAL_Version, PostGIS_Raster_Lib_Build_Date, PostGIS_Raster_Lib_Version, and ST_GDALDrivers, and UpdateRasterSRID

+

Raster Constructors

+

ST_AddBand, ST_AsRaster, ST_Band, ST_MakeEmptyRaster, ST_Tile, and ST_FromGDALRaster

+

Raster Accessors

+

ST_GeoReference, ST_Height, ST_IsEmpty, ST_MetaData, ST_NumBands, ST_PixelHeight, ST_PixelWidth, ST_ScaleX, ST_ScaleY, ST_RasterToWorldCoord, ST_RasterToWorldCoordX, ST_RasterToWorldCoordY, ST_Rotation, ST_SkewX, ST_SkewY, ST_SRID, ST_Summary, ST_UpperLeftX, ST_UpperLeftY, ST_Width, ST_WorldToRasterCoord, ST_WorldToRasterCoordX, ST_WorldToRasterCoordY

+

Raster Band Accessors

+

ST_BandMetaData, ST_BandNoDataValue, ST_BandIsNoData, ST_BandPath, ST_BandPixelType, and ST_HasNoBand

+

Raster Pixel Accessors and Setters

+

ST_PixelAsPolygon, ST_PixelAsPolygons, ST_PixelAsPoint, ST_PixelAsPoints, ST_PixelAsCentroid, ST_PixelAsCentroids, ST_Value, ST_NearestValue, ST_Neighborhood, ST_SetValue, ST_SetValues, ST_DumpValues, and ST_PixelOfValue

+

Raster Editors

+

ST_SetGeoReference, ST_SetRotation, ST_SetScale, ST_SetSkew, ST_SetSRID, ST_SetUpperLeft, ST_Resample, ST_Rescale, ST_Reskew, and ST_SnapToGrid, ST_Resize, and ST_Transform

+

Raster Band Editors

+

ST_SetBandNoDataValue and ST_SetBandIsNoData

+

Raster Band Statistics and Analytics

+

ST_Count, ST_CountAgg, ST_Histogram, ST_Quantile, ST_SummaryStats, ST_SummaryStatsAgg, and ST_ValueCount

+

Raster Outputs

+

ST_AsBinary, ST_AsGDALRaster, ST_AsJPEG, ST_AsPNG, and ST_AsTIFF

+

Raster Processing

+

ST_Clip, ST_ColorMap, ST_Intersection, ST_MapAlgebra, ST_Reclass, and ST_Union ST_Distinct4ma, ST_InvDistWeight4ma, ST_Max4ma, ST_Mean4ma, ST_Min4ma, ST_MinDist4ma, ST_Range4ma, ST_StdDev4ma, and ST _Sum4ma, ST_Aspect, ST_HillShade, ST_Roughness, ST_Slope, ST_TPI, ST_TRI, Box3D, ST_ConvexHull, ST_DumpAsPolygons, and ST_ Envelope, ST_MinConvexHull, ST_Polygon, ST_Contains, ST_ContainsProperly, ST_Covers, ST_CoveredBy, ST_Disjoint, ST_Intersects, and ST_Overlaps, ST_Touches, ST_SameAlignment, ST_NotSameAlignmentReason, ST_Within, ST_DWithin, and ST_DFullyWithin

+

Raster Operators

+

&&, &<, &>, =, @, ~=, and ~

+
+
+
+

Spatial Indexes

In GaussDB(DWS), PostGIS Extension supports Generalized Search Tree (GIST) spatial indexes. This index type is inapplicable to partitioned tables. Different from B-tree indexes, GIS indexes are adaptable to all kinds of irregular data structures, which can effectively improve the retrieval efficiency for geometry and geographic data.

+

Run the following command to create a GiST index:

+
1
CREATE INDEX indexname ON tablename USING GIST ( geometryfield );
+
+ +
+
+

Extension Constraints

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0306.html b/docs/dws/dev/dws_04_0306.html new file mode 100644 index 00000000..ebcdf386 --- /dev/null +++ b/docs/dws/dev/dws_04_0306.html @@ -0,0 +1,1351 @@ + + +

OPEN SOURCE SOFTWARE NOTICE (For PostGIS)

+

This document contains open source software notice for the product. And this document is confidential information of copyright holder. Recipient shall protect it in due care and shall not disseminate it without permission.

+

+

Warranty Disclaimer

+

This document is provided "as is" without any warranty whatsoever, including the accuracy or comprehensiveness. Copyright holder of this document may change the contents of this document at any time without prior notice, and copyright holder disclaims any liability in relation to recipient's use of this document.

+

Open source software is provided by the author "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the author be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of data or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of open source software, even if advised of the possibility of such damage.

+

+

Copyright Notice And License Texts

+

Software: postgis-2.4.2

+

Copyright notice:

+

"Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (C) 1989, 1991 Free Software Foundation, Inc.,

+

51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Copyright 2008 Kevin Neufeld

+

Copyright (c) 2009 Walter Bruce Sinclair

+

Copyright 2006-2013 Stephen Woodbridge.

+

Copyright (c) 2008 Walter Bruce Sinclair

+

Copyright (c) 2012 TJ Holowaychuk <tj@vision-media.ca>

+

Copyright (c) 2008, by Attractive Chaos <attractivechaos@aol.co.uk>

+

Copyright (c) 2001-2012 Walter Bruce Sinclair

+

Copyright (c) 2010 Walter Bruce Sinclair

+

Copyright 2006 Stephen Woodbridge

+

Copyright 2006-2010 Stephen Woodbridge.

+

Copyright (c) 2006-2014 Stephen Woodbridge.

+

Copyright (c) 2017, Even Rouault <even.rouault at spatialys.com>

+

Copyright (C) 2004-2015 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2008-2011 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2008 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright 2015 Nicklas Avén <nicklas.aven@jordogskog.no>

+

Copyright 2008 Paul Ramsey

+

Copyright (C) 2012 Sandro Santilli <strk@kbt.io>

+

Copyright 2012 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2014 Sandro Santilli <strk@kbt.io>

+

Copyright 2013 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright 2009 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright 2008 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright 2011 Sandro Santilli <strk@kbt.io>

+

Copyright 2015 Daniel Baston

+

Copyright 2009 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright 2014 Kashif Rasul <kashif.rasul@gmail.com> and

+

Shoaib Burq <saburq@gmail.com>

+

Copyright 2013 Sandro Santilli <strk@kbt.io>

+

Copyright 2010 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2017 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2015 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2009 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2011 Sandro Santilli <strk@kbt.io>

+

Copyright 2010 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright 2014 Nicklas Avén

+

Copyright 2011-2016 Regina Obe

+

Copyright (C) 2008 Paul Ramsey

+

Copyright (C) 2011-2015 Sandro Santilli <strk@kbt.io>

+

Copyright 2010-2012 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright (C) 2015 Daniel Baston <dbaston@gmail.com>

+

Copyright (C) 2013 Nicklas Avén

+

Copyright (C) 2016 Sandro Santilli <strk@kbt.io>

+

Copyright 2017 Darafei Praliaskouski <me@komzpa.net>

+

Copyright (c) 2016, Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2011-2012 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2011 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2007-2008 Mark Cave-Ayland

+

Copyright (C) 2001-2006 Refractions Research Inc.

+

Copyright 2015 Daniel Baston <dbaston@gmail.com>

+

Copyright 2009 David Skea <David.Skea@gov.bc.ca>

+

Copyright (C) 2012-2015 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2012-2015 Sandro Santilli <strk@kbt.io>

+

Copyright 2001-2006 Refractions Research Inc.

+

Copyright (C) 2004 Refractions Research Inc.

+

Copyright 2011-2014 Sandro Santilli <strk@kbt.io>

+

Copyright 2009-2010 Sandro Santilli <strk@kbt.io>

+

Copyright 2015-2016 Daniel Baston <dbaston@gmail.com>

+

Copyright 2011-2015 Sandro Santilli <strk@kbt.io>

+

Copyright 2007-2008 Mark Cave-Ayland

+

Copyright 2012-2013 Oslandia <infos@oslandia.com>

+

Copyright (C) 2015-2017 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2001-2003 Refractions Research Inc.

+

Copyright 2016 Sandro Santilli <strk@kbt.io>

+

Copyright 2011 Kashif Rasul <kashif.rasul@gmail.com>

+

Copyright (C) 2014 Nicklas Avén

+

Copyright (C) 2010 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2010-2015 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2011 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2011-2014 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.

+

Copyright (C) 2011 Paul Ramsey

+

Copyright 2001-2003 Refractions Research Inc.

+

Copyright 2009-2010 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright 2010-2012 Oslandia

+

Copyright 2006 Corporacion Autonoma Regional de Santander

+

Copyright 2013 Nicklas Avén

+

Copyright 2011-2016 Arrival 3D, Regina Obe

+

Copyright (C) 2009 David Skea <David.Skea@gov.bc.ca>

+

Copyright (C) 2017 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2009-2012 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2010 - Oslandia

+

Copyright (C) 2006 Mark Leslie <mark.leslie@lisasoft.com>

+

Copyright (C) 2008-2009 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright (C) 2009-2015 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2010 Olivier Courtin <olivier.courtin@camptocamp.com>

+

Copyright 2010 Nicklas Avén

+

Copyright 2012 Paul Ramsey

+

Copyright 2011 Nicklas Avén

+

Copyright 2002 Thamer Alharbash

+

Copyright 2011 OSGeo

+

Copyright (C) 2009-2011 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2008 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright (C) 2004-2007 Refractions Research Inc.

+

Copyright 2010 LISAsoft Pty Ltd

+

Copyright 2010 Mark Leslie

+

Copyright (c) 1999, Frank Warmerdam

+

Copyright 2009 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright (c) 2007, Frank Warmerdam

+

Copyright 2008 OpenGeo.org

+

Copyright (C) 2008 OpenGeo.org

+

Copyright (C) 2009 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright 2010 LISAsoft

+

Copyright (C) 2010 Mark Cave-Ayland <mark.cave-ayland@siriusit.co.uk>

+

Copyright (c) 1999, 2001, Frank Warmerdam

+

Copyright (C) 2016-2017 Bj?rn Harrtell <bjorn@wololo.org>

+

Copyright (C) 2017 Danny G?tte <danny.goette@fem.tu-ilmenau.de>

+

Copyright 2009-2011 Paul Ramsey <pramsey@cleverelephant.ca>

+

^copyright^

+

Copyright 2012 (C) Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright 2009 Paul Ramsey <pramsey@opengeo.org>

+

Copyright 2001-2009 Refractions Research Inc.

+

Copyright (C) 2010 Olivier Courtin <olivier.courtin@oslandia.com>

+

By Nathan Wagner, copyright disclaimed,

+

this entire file is in the public domain

+

Copyright 2009-2011 Olivier Courtin <olivier.courtin@oslandia.com>

+

Copyright (C) 2001-2005 Refractions Research Inc.

+

Copyright 2001-2011 Refractions Research Inc.

+

Copyright 2009-2014 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2008 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2010 Sandro Santilli <strk@kbt.io>

+

Copyright 2012 J Smith <dark.panda@gmail.com>

+

Copyright 2009 - 2010 Oslandia

+

Copyright 2009 Oslandia

+

Copyright 2001-2005 Refractions Research Inc.

+

Copyright 2016 Paul Ramsey <pramsey@cleverelephant.ca>

+

Copyright 2016 Daniel Baston <dbaston@gmail.com>

+

Copyright (C) 2011 OpenGeo.org

+

Copyright (c) 2003-2017, Troy D. Hanson http:troydhanson.github.com/uthash/

+

Copyright (C) 2011 Regents of the University of California

+

Copyright (C) 2011-2013 Regents of the University of California

+

Copyright (C) 2010-2011 Jorge Arevalo <jorge.arevalo@deimos-space.com>

+

Copyright (C) 2010-2011 David Zwarg <dzwarg@azavea.com>

+

Copyright (C) 2009-2011 Pierre Racine <pierre.racine@sbf.ulaval.ca>

+

Copyright (C) 2009-2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2008-2009 Sandro Santilli <strk@kbt.io>

+

Copyright (C) 2013 Nathaneil Hunter Clay <clay.nathaniel@gmail.com

+

Copyright (C) 2013 Nathaniel Hunter Clay <clay.nathaniel@gmail.com>

+

Copyright (C) 2013 Bborie Park <dustymugs@gmail.com>

+

Copyright (C) 2013 Nathaniel Hunter Clay <clay.nathaniel@gmail.com>

+

(C) 2009 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2009 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2009-2010 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2009-2010 Jorge Arevalo <jorge.arevalo@deimos-space.com>

+

Copyright (C) 2012 Regents of the University of California

+

Copyright (C) 2013 Regents of the University of California

+

Copyright (C) 2012-2013 Regents of the University of California

+

Copyright (C) 2009 Sandro Santilli <strk@kbt.io>

+

"

+

License: The GPL v2 License.

+

GNU GENERAL PUBLIC LICENSE

+

Version 2, June 1991

+

+

Copyright (C) 1989, 1991 Free Software Foundation, Inc.

+

51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

+

Preamble

+

+

The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too.

+

+

When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things.

+

+

To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it.

+

+

For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.

+

+

We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software.

+

+

Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations.

+

+

Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all.

+

+

The precise terms and conditions for copying, distribution and modification follow.?

+

GNU GENERAL PUBLIC LICENSE

+

TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

+

+

0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you".

+

+

Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does.

+

+

1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program.

+

+

You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.

+

+

2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions:

+

+

a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change.

+

+

b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License.

+

+

c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.)

+

These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.

+

+

Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program.

+

+

In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.

+

+

3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following:

+

+

a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,

+

+

b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or,

+

+

c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.)

+

+

The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable.

+

+

If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code.

+

+

4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.

+

+

5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it.

+

+

6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License.

+

+

7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program.

+

+

If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances.

+

+

It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice.

+

+

This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.

+

+

8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.

+

+

9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

+

+

Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation.

+

+

10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally.

+

+

NO WARRANTY

+

+

11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

+

+

12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

+

+

END OF TERMS AND CONDITIONS

+

How to Apply These Terms to Your New Programs

+

+

If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.

+

+

To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.

+

+

<one line to give the program's name and a brief idea of what it does.>

+

Copyright (C) <year> <name of author>

+

+

This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.

+

+

This program is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

+

+

You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

+

Also add information on how to contact you by electronic and paper mail.

+

+

If the program is interactive, make it output a short notice like this when it starts in an interactive mode:

+

+

Gnomovision version 69, Copyright (C) year name of author

+

Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.

+

This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details.

+

+

The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program.

+

+

You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names:

+

+

Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker.

+

+

<signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice

+

+

This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License.

+

+

Software:Geos

+

Copyright notice:

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2013 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

Copyright (C) 2005-2011 Refractions Research Inc.

+

Copyright (C) 2009 Ragi Y. Burhum <ragi@burhum.com>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2005 2006 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006-2011 Refractions Research Inc.

+

Copyright (C) 2011 Sandro Santilli <strk@keybit.net

+

Copyright (C) 2009-2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2016 Daniel Baston

+

Copyright (C) 2008 Sean Gillies

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Refractions Research Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2008-2010 Safe Software Inc.

+

Copyright (C) 2006-2007 Refractions Research Inc.

+

Copyright (C) 2005-2007 Refractions Research Inc.

+

Copyright (C) 2007 Refractions Research Inc.

+

Copyright (C) 2014 Mika Heiskanen <mika.heiskanen@fmi.fi>

+

Copyright (C) 2009-2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 2011 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2010 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2009 Mateusz Loskot

+

Copyright (C) 2005-2009 Refractions Research Inc.

+

Copyright (C) 2001-2009 Vivid Solutions Inc.

+

Copyright (C) 2012 Sandro Santilli <strk@keybit.net>

+

Copyright (C) 2006 Wu Yongwei

+

Copyright (C) 2012 Excensus LLC.

+

Copyright (C) 1996-2015 Free Software Foundation, Inc.

+

Copyright (c) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>

+

Copyright (C) 2007-2010 Safe Software Inc.

+

Copyright (C) 2010 Safe Software Inc.

+

Copyright (C) 2006 Refractions Research

+

Copyright 2004 Sean Gillies, sgillies@frii.com

+

Copyright (C) 2011 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2015 Nyall Dawson <nyall dot dawson at gmail dot com>

+

Original code (2.0 and earlier )copyright (c) 2000-2006 Lee Thomason (www.grinninglizard.com)

+

Original code (2.0 and earlier )copyright (c) 2000-2002 Lee Thomason (www.grinninglizard.com)

+

+

License: LGPL V2.1

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

Version 2.1, February 1999

+

Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+

+

[This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.]

+

+

Preamble

+

+

The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public

+

Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users.

+

+

This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below.

+

+

When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things.

+

+

To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it.

+

+

For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights.

+

+

We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library.

+

+

To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others.

+

+

Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license.

+

+

Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and

+

is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs.

+

+

When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library.

+

+

We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances.

+

+

For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License.

+

+

In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in

+

non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system.

+

+

Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library.

+

The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The

+

former contains code derived from the library, whereas the latter must be combined with the library in order to run.

+

+

GNU LESSER GENERAL PUBLIC LICENSE

+

TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

+

+

0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you".

+

+

A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables.

+

+

The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under

+

copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".)

+

+

"Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library.

+

+

Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does.

+

+

1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an

+

appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the

+

Library.

+

+

You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee.

+

+

2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1

+

above, provided that you also meet all of these conditions:

+

+

a) The modified work must itself be a software library.

+

+

b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change.

+

+

c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License.

+

+

d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of

+

its purpose remains meaningful.

+

+

(For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.)

+

+

These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in

+

themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it.

+

+

Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or

+

collective works based on the Library.

+

+

In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License.

+

+

3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices.

+

+

Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy.

+

+

This option is useful when you wish to copy part of the code of the Library into a program that is not a library.

+

+

4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany

+

it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange.

+

+

If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to

+

distribute the source code, even though third parties are not compelled to copy the source along with the object code.

+

+

5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a

+

work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License.

+

+

However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License.

+

Section 6 states terms for distribution of such executables.

+

+

When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law.

+

+

If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object

+

file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.)

+

+

Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6,

+

whether or not they are linked directly with the Library itself.

+

+

6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work

+

under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications.

+

+

You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things:

+

+

a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.)

+

+

b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system,

+

rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with.

+

+

c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution.

+

+

d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place.

+

+

e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy.

+

+

For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception,

+

the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on

+

which the executable runs, unless that component itself accompanies the executable.

+

+

It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot

+

use both them and the Library together in an executable that you distribute.

+

+

7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things:

+

+

a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above.

+

+

b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work.

+

+

8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance.

+

+

9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it.

+

+

10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein.

+

You are not responsible for enforcing compliance by third parties with this License.

+

+

11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library.

+

+

If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances.

+

+

It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the

+

integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot

+

impose that choice.

+

+

This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License.

+

+

12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License.

+

+

13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time.

+

Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

+

+

Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation.

+

+

14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is

+

copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status

+

of all derivatives of our free software and of promoting the sharing and reuse of software generally.

+

+

NO WARRANTY

+

+

15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

+

+

16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING

+

RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

+

+

END OF TERMS AND CONDITIONS

+

+

How to Apply These Terms to Your New Libraries

+

+

If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License).

+

+

To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found.

+

+

<one line to give the library's name and a brief idea of what it does.>

+

Copyright (C) <year> <name of author>

+

+

This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version.

+

+

This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU

+

Lesser General Public License for more details.

+

+

You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA

+

+

Also add information on how to contact you by electronic and paper mail.

+

+

You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names:

+

+

Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker.

+

+

<signature of Ty Coon>, 1 April 1990

+

Ty Coon, President of Vice

+

+

That's all there is to it!

+

+

Software: JSON-C

+

+

Copyright notice:

+

Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.

+

Copyright (c) 2009-2012 Eric Haszlakiewicz

+

Copyright (c) 2004, 2005 Metaparadigm Pte Ltd

+

Copyright (c) 2009 Hewlett-Packard Development Company, L.P.

+

Copyright 2011, John Resig

+

Copyright 2011, The Dojo Foundation

+

Copyright (c) 2012 Eric Haszlakiewicz

+

Copyright (c) 2009-2012 Hewlett-Packard Development Company, L.P.

+

Copyright (c) 2008-2009 Yahoo! Inc. All rights reserved.

+

Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,

+

2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.

+

Copyright (c) 2013 Metaparadigm Pte. Ltd.

+

+

License: MIT License

+

+

Copyright (c) 2009-2012 Eric Haszlakiewicz

+

+

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

+

+

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

+

+

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+

+

----------------------------------------------------------------

+

+

Copyright (c) 2004, 2005 Metaparadigm Pte Ltd

+

+

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

+

+

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

+

+

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+

+

Software: proj

+

Copyright notice:

+

"Copyright (C) 2010 Mateusz Loskot <mateusz@loskot.net>

+

Copyright (C) 2007 Douglas Gregor <doug.gregor@gmail.com>

+

Copyright (C) 2007 Troy Straszheim

+

CMake, Copyright (C) 2009-2010 Mateusz Loskot <mateusz@loskot.net> )

+

Copyright (C) 2011 Nicolas David <nicolas.david@ign.fr>

+

Copyright (c) 2000, Frank Warmerdam

+

Copyright (c) 2011, Open Geospatial Consortium, Inc.

+

Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,

+

2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.

+

Copyright (c) Charles Karney (2012-2015) <charles@karney.com> and licensed

+

Copyright (c) 2005, Antonello Andrea

+

Copyright (c) 2010, Frank Warmerdam

+

Copyright (c) 1995, Gerald Evenden

+

Copyright (c) 2000, Frank Warmerdam <warmerdam@pobox.com>

+

Copyright (c) 2010, Frank Warmerdam <warmerdam@pobox.com>

+

Copyright (c) 2013, Frank Warmerdam

+

Copyright (c) 2003 Gerald I. Evenden

+

Copyright (c) 2012, Frank Warmerdam <warmerdam@pobox.com>

+

Copyright (c) 2002, Frank Warmerdam

+

Copyright (c) 2004 Gerald I. Evenden

+

Copyright (c) 2012 Martin Raspaud

+

Copyright (c) 2001, Thomas Flemming, tf@ttqv.com

+

Copyright (c) 2002, Frank Warmerdam <warmerdam@pobox.com>

+

Copyright (c) 2009, Frank Warmerdam

+

Copyright (c) 2003, 2006 Gerald I. Evenden

+

Copyright (c) 2011, 2012 Martin Lambers <marlam@marlam.de>

+

Copyright (c) 2006, Andrey Kiselev

+

Copyright (c) 2008-2012, Even Rouault <even dot rouault at mines-paris dot org>

+

Copyright (c) 2001, Frank Warmerdam

+

Copyright (c) 2001, Frank Warmerdam <warmerdam@pobox.com>

+

Copyright (c) 2008 Gerald I. Evenden

+

"

+

+

License: MIT License

+

Please see above

+

+

Software: libxml2

+

Copyright notice:

+

+

"See Copyright for the status of this software.

+

Copyright (C) 1998-2003 Daniel Veillard. All Rights Reserved.

+

Copyright (C) 2003 Daniel Veillard.

+

copy: see Copyright for the status of this software.

+

copy: see Copyright for the status of this software

+

copy: see Copyright for the status of this software.

+

Copyright (C) 2000 Bjorn Reese and Daniel Veillard.

+

Copy: See Copyright for the status of this software.

+

See COPYRIGHT for the status of this software

+

Copyright (C) 2000 Gary Pennington and Daniel Veillard.

+

Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,

+

2007 Free Software Foundation, Inc.

+

Copyright (C) 1998 Bjorn Reese and Daniel Stenberg.

+

Copyright (C) 2001 Bjorn Reese <breese@users.sourceforge.net>

+

Copyright (C) 2000 Bjorn Reese and Daniel Stenberg.

+

Copyright (C) 2001 Bjorn Reese and Daniel Stenberg.

+

See Copyright for the status of this software

+

"

+

License: MIT License

+

Please see above

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0393.html b/docs/dws/dev/dws_04_0393.html new file mode 100644 index 00000000..d367334f --- /dev/null +++ b/docs/dws/dev/dws_04_0393.html @@ -0,0 +1,26 @@ + + +

Resource Monitoring

+

GaussDB(DWS) provides multiple dimensional resource monitoring views to show the real-time and historical resource usage of tasks.

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0394.html b/docs/dws/dev/dws_04_0394.html new file mode 100644 index 00000000..a021414c --- /dev/null +++ b/docs/dws/dev/dws_04_0394.html @@ -0,0 +1,62 @@ + + +

User Resource Query

+

Context

In the multi-tenant management framework, you can query the real-time or historical usage of all user resources (including memory, CPU cores, storage space, temporary space, and I/Os).

+
  • User real-time resource views/functions: PG_TOTAL_USER_RESOURCE_INFO and GS_WLM_USER_RESOURCE_INFO; user historical resource table: GS_WLM_USER_RESOURCE_HISTORY.
  • In the preceding views and tables, the value of used_cpu indicates the CPU usage of a user's resource pool. The resource pool records only the CPU usage of long queries.
  • In the preceding views and tables, I/O-related resource statistics only record I/O read and write data of long queries executed by users.
  • When there are a large number of users and a large cluster, querying such real-time views will cause network latency due to the real-time communication overhead between CNs and DNs.
  • User memory and CPU monitoring does not apply to short queries or administrator jobs.
+
+
+

Procedure

+ + +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0395.html b/docs/dws/dev/dws_04_0395.html new file mode 100644 index 00000000..146641df --- /dev/null +++ b/docs/dws/dev/dws_04_0395.html @@ -0,0 +1,71 @@ + + +

Monitoring Memory Resources

+

Monitoring the Memory

GaussDB(DWS) provides a view for monitoring the memory usage of the entire cluster.

+
Query the pgxc_total_memory_detail view as a user with sysadmin permissions.
1
SELECT * FROM pgxc_total_memory_detail;
+
+ +
+
+
If the following error message is returned during the query, enable the memory management function.
1
+2
+3
SELECT * FROM pgxc_total_memory_detail;
+ERROR:  unsupported view for memory protection feature is disabled.
+CONTEXT:  PL/pgSQL function pgxc_total_memory_detail() line 12 at FOR over EXECUTE statement
+
+ +
+
+
To enable memory management, you can set enable_memory_limit and max_process_memory on the GaussDB(DWS) console as follows:
  1. Log in to the GaussDB(DWS) management console.
  2. In the navigation pane on the left, click Clusters.
  3. In the cluster list, find the target cluster and click its name. The Basic Information page is displayed.
  4. Click the Parameter Modification tab, change the value of enable_memory_limit to on, and click Save to save the file.
  5. Change the value of max_process_memory to a proper one. For details about the modification suggestions, see max_process_memory. After it is done, click Save.
  6. In the Modification Preview dialog box, confirm the modifications and click Save. After the modification, restart the cluster for the modification to take effect.
+
+
+

Monitoring the Shared Memory

You can query the context information about the shared memory on the pg_shared_memory_detail view.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT * FROM pg_shared_memory_detail;
+           contextname           | level |             parent              | totalsize | freesize | usedsize 
+---------------------------------+-------+---------------------------------+-----------+----------+----------
+ ProcessMemory                   |     0 |                                 |     24576 |     9840 |    14736
+ Workload manager memory context |     1 | ProcessMemory                   |   2105400 |     7304 |  2098096
+ wlm collector hash table        |     2 | Workload manager memory context |      8192 |     3736 |     4456
+ Resource pool hash table        |     2 | Workload manager memory context |     24576 |    15968 |     8608
+ wlm cgroup hash table           |     2 | Workload manager memory context |     24576 |    15968 |     8608
+(5 rows)
+
+ +
+

This view lists the context name of the memory, level, the upper-layer memory context, and the total size of the shared memory.

+

In the database, GUC parameter memory_tracking_mode is used to configure the memory statistics collecting mode, including the following options:

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0396.html b/docs/dws/dev/dws_04_0396.html new file mode 100644 index 00000000..1a8217d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0396.html @@ -0,0 +1,82 @@ + + +

Instance Resource Monitoring

+

GaussDB(DWS) provides system catalogs for monitoring the resource usage of CNs and DNs (including memory, CPU usage, disk I/O, process physical I/O, and process logical I/O), and system catalogs for monitoring the resource usage of the entire cluster.

+

For details about the system catalog GS_WLM_INSTANCE_HISTORY, see GS_WLM_INSTANCE_HISTORY.

+

Data in the system catalogGS_WLM_INSTANCE_HISTORY is distributed in corresponding instances. CN monitoring data is stored in the CN instance, and DN monitoring data is stored in the DN instance. The DN has a standby node. When the primary DN is abnormal, the monitoring data of the DN can be restored from the standby node. However, a CN has no standby node. When a CN is abnormal and then restored, the monitoring data of the CN will be lost.

+
+

Procedure

+
+ + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0397.html b/docs/dws/dev/dws_04_0397.html new file mode 100644 index 00000000..00e17a0f --- /dev/null +++ b/docs/dws/dev/dws_04_0397.html @@ -0,0 +1,143 @@ + + +

Real-time TopSQL

+

You can query real-time Top SQL in real-time resource monitoring views at different levels. The real-time resource monitoring view records the resource usage (including memory, disk, CPU time, and I/O) and performance alarm information during job running.

+

The following table describes the external interfaces of the real-time views.

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 1 Real-time resource monitoring views

Level

+

Monitored Node

+

View

+

Query level/perf level

+

Current CN

+

GS_WLM_SESSION_STATISTICS

+

All CNs

+

PGXC_WLM_SESSION_STATISTICS

+

Operator level

+

Current CN

+

GS_WLM_OPERATOR_STATISTICS

+

All CNs

+

PGXC_WLM_OPERATOR_STATISTICS

+
+
+
  • The view level is determined by the resource monitoring level, that is, the resource_track_level configuration.
  • The perf and operator levels affect the values of the query_plan and warning columns in GS_WLM_SESSION_STATISTICS/PGXC_WLM_SESSION_INFO. For details, see SQL Self-Diagnosis.
  • Prefixes gs and pgxc indicate views showing single CN information and those showing cluster information, respectively. Common users can log in to a CN in the cluster to query only views with the gs prefix.
  • When you query this type of views, there will be network latency, because the views obtain resource usage in real time.
  • If instance fault occurs, some SQL statement information may fail to be recorded in real-time resource monitoring views.
  • SQL statements are recorded in real-time resource monitoring views as follows:
    • DDL statements are not recorded, such as the execution of CREATE, ALTER, DROP, GRANT, REVOKE, and VACUUM statements.
    • DML statements are recorded, including:
      • the execution of SELECT, INSERT, UPDATE, and DELETE
      • the execution of EXPLAIN ANALYZE and EXPLAIN PERFORMANCE
      • the use of a query-level/perf-level view, which also supports the CREATE TABLE AS syntax.
      +
    • Statements in functions and stored procedures and statements used for calling functions and stored procedures are recorded. Statements in loop bodies (if any) of functions and stored procedures are not recorded.
    • Statements in anonymous blocks are not recorded.
    • Statements in transaction blocks are recorded. Statements in loop bodies (if any) of transaction blocks are not recorded.
    • Cursor statements are recorded.
    • Jobs in a redistribution process are not monitored.
    +
+
+

Prerequisites

+

In the preceding prerequisites, enable_resource_track is a system-level parameter that specifies whether to enable resource monitoring. resource_track_level is a session-level parameter. You can set the resource monitoring level of a session as needed. The following table describes the values of the two parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Setting the resource monitoring level to collect statistics

enable_resource_track

+

resource_track_level

+

Query-Level Information

+

Operator-Level Information

+

on(default)

+

none

+

Not collected

+

Not collected

+

query(default)

+

Collected

+

Not collected

+

perf

+

Collected

+

Not collected

+

operator

+

Collected

+

Collected

+

off

+

none/query/operator

+

Not collected

+

Not collected

+
+
+
+

Procedure

  1. Query for the real-time CPU information in the gs_session_cpu_statistics view.

    1
    SELECT * FROM gs_session_cpu_statistics;
    +
    + +
    +

  2. Query for the real-time memory information in the gs_session_memory_statistics view.

    1
    SELECT * FROM gs_session_memory_statistics;
    +
    + +
    +

  3. Query for the real-time resource information about the current CN in the gs_wlm_session_statistics view.

    1
    SELECT * FROM gs_wlm_session_statistics;
    +
    + +
    +

  4. Query for the real-time resource information about all CNs in the pgxc_wlm_session_statistics view.

    1
    SELECT * FROM pgxc_wlm_session_statistics;
    +
    + +
    +

  5. Query for the real-time resource information about job operators on the current CN in the gs_wlm_operator_statistics view.

    1
    SELECT * FROM gs_wlm_operator_statistics;
    +
    + +
    +

  6. Query for the real-time resource information about job operators on all CNs in the pgxc_wlm_operator_statistics view.

    1
    SELECT * FROM pgxc_wlm_operator_statistics;
    +
    + +
    +

  7. Query for the load management information about the jobs executed by the current user in the PG_SESSION_WLMSTAT view.

    1
    SELECT * FROM pg_session_wlmstat;
    +
    + +
    +

  8. Query the job execution status of the current user on each CN in the pgxc_wlm_workload_records view (this view is available when the dynamic load function is enabled, that is, enable_dynamic_workload is set to on).

    1
    SELECT * FROM pgxc_wlm_workload_records;
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0398.html b/docs/dws/dev/dws_04_0398.html new file mode 100644 index 00000000..9abda6e8 --- /dev/null +++ b/docs/dws/dev/dws_04_0398.html @@ -0,0 +1,156 @@ + + +

Historical TopSQL

+

You can query historical Top SQL in historical resource monitoring views. The historical resource monitoring view records the resource usage (of memory, disk, CPU time, and I/O), running status (including errors, termination, and exceptions), and performance alarm information during job running. For queries that abnormally terminate due to FATAL or PANIC errors, their status is displayed as aborted and no detailed information is recorded. Status information about query parsing in the optimization phase cannot be monitored.

+

The following table describes the external interfaces of the historical views.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Level

+

Monitored Node

+

View

+

Query level/perf level

+

Current CN

+

History (Database Manager interface)

+

GS_WLM_SESSION_HISTORY

+

History (internal dump interface)

+

GS_WLM_SESSION_INFO

+

All CNs

+

History (Database Manager interface)

+

PGXC_WLM_SESSION_HISTORY

+

History (internal dump interface)

+

PGXC_WLM_SESSION_INFO

+

Operator

+

Current CN

+

History (Database Manager interface)

+

GS_WLM_OPERATOR_HISTORY

+

History (internal dump interface)

+

GS_WLM_OPERAROR_INFO

+

All CNs

+

History (Database Manager interface)

+

PGXC_WLM_OPERATOR_HISTORY

+

History (internal dump interface)

+

PGXC_WLM_OPERATOR_INFO

+
+
+
  • The view level is determined by the resource monitoring level, that is, the resource_track_level configuration.
  • The perf and operator levels affect the values of the query_plan and warning columns in GS_WLM_SESSION_STATISTICS/PGXC_WLM_SESSION_INFO. For details, see SQL Self-Diagnosis.
  • Prefixes gs and pgxc indicate views showing single CN information and those showing cluster information, respectively. Common users can log in to a CN in the cluster to query only views with the gs prefix.
  • You can query the gs_wlm_session_info, gs_wlm_operator_info, pgxc_wlm_session_info, and pgxc_wlm_operator_info views only after connecting to the gaussdb database.
  • If instance fault occurs, some SQL statement information may fail to be recorded in historical resource monitoring views.
  • In some abnormal cases, the status information column in the historical TopSQL may be displayed as unknown. The recorded monitoring information may be inaccurate.
  • The SQL statements that can be recorded in historical resource monitoring views are the same as those recorded in real-time resource monitoring views. For details, see SQL statements recorded in real-time resource monitoring views.
+
+

Prerequisites

+
+

Procedure

  1. Query the load records of the current CN after its latest job is complete in the gs_wlm_session_history view.

    1
    SELECT * FROM gs_wlm_session_history;
    +
    + +
    +

  2. Query the load records of all the CNs after their latest job are complete in the pgxc_wlm_session_history view.

    1
     SELECT * FROM pgxc_wlm_session_history;
    +
    + +
    +

  3. Query the load records of the current CN through the gs_wlm_session_info table after the task is complete. To query the historical records successfully, set enable_resource_record to on.

    1
    SELECT * FROM gs_wlm_session_info;
    +
    + +
    +
    • Top 10 queries that consume the most memory (You can specify a query period.)
    +
    1
    SELECT * FROM gs_wlm_session_info order by max_peak_memory desc limit 10; 
    +
    + +
    +
    1
    SELECT * FROM gs_wlm_session_info WHERE start_time >= '2022-05-15 21:00:00' and finish_time <='2022-05-15 23:30:00'order by max_peak_memory desc limit 10; 
    +
    + +
    +
    • Top 10 queries that consume the most CPU (You can specify a query period.)
    +
    1
    SELECT * FROM gs_wlm_session_info order by total_cpu_time desc limit 10;
    +
    + +
    +
    1
    SELECT * FROM gs_wlm_session_info WHERE start_time >= '2022-05-15 21:00:00' and finish_time <='2022-05-15 23:30:00'order by total_cpu_time desc limit 10;
    +
    + +
    +

  4. Query for the load records of all the CNs after their jobs are complete in the pgxc_wlm_session_info view. To query the historical records successfully, set enable_resource_record to on.

    1
    SELECT * FROM pgxc_wlm_session_info;
    +
    + +
    +
    • Query the top 10 queries that take up the most CN processing time (You can specify a query period.)
    +
    1
    SELECT * FROM pgxc_wlm_session_info order by duration desc limit 10;
    +
    + +
    +
    1
    SELECT * FROM pgxc_wlm_session_info WHERE start_time >= '2022-05-15 21:00:00' and finish_time <='2022-05-15 23:30:00'order by nodename,max_peak_memory desc limit 10;
    +
    + +
    +

  5. Use the pgxc_get_wlm_session_info_bytime function to filter and query the pgxc_wlm_session_info view. To query the historical records successfully, set enable_resource_record to on. You are advised to use this function if the view contains a large number of records.

    A GaussDB(DWS) cluster uses the UTC time by default, which has an 8-hour time difference with the system time. Before queries, ensure that the database time is the same as the system time.

    +
    +
    • Return the queries started between 2019-09-10 15:30:00 and 2019-09-10 15:35:00 on all CNs. For each CN, a maximum of 10 queries will be returned.
    +
    1
    SELECT * FROM pgxc_get_wlm_session_info_bytime('start_time', '2019-09-10 15:30:00', '2019-09-10 15:35:00', 10);
    +
    + +
    +
    • Return the queries ended between 2019-09-10 15:30:00 and 2019-09-10 15:35:00 on all CNs. For each CN, a maximum of 10 queries will be returned.
    +
    1
    SELECT * FROM pgxc_get_wlm_session_info_bytime('finish_time', '2019-09-10 15:30:00', '2019-09-10 15:35:00', 10);
    +
    + +
    +

  6. Query the recent resource information of the job operators on the current CN in the gs_wlm_operator_history view. Ensure that resource_track_level is set to operator.

    1
    SELECT * FROM gs_wlm_operator_history;
    +
    + +
    +

  7. Query the recent resource information of the job operators on all the CNs in the pgxc_wlm_operator_history view. Ensure that resource_track_level is set to operator.

    1
    SELECT * FROM pgxc_wlm_operator_history;
    +
    + +
    +

  8. Query the recent resource information of the job operators on the current CN in the gs_wlm_operator_info view. Ensure that resource_track_level is set to operator and enable_resource_record to on.

    1
    SELECT * FROM gs_wlm_operator_info;
    +
    + +
    +

  9. Query for the historical resource information of job operators on all the CNs in the pgxc_wlm_operator_info view. Ensure that resource_track_level is set to operator and enable_resource_record to on.

    1
    SELECT * FROM pgxc_wlm_operator_info;
    +
    + +
    +

+
  • The number of data records that can be retained in the memory is limited, because certain memory is reserved. After a real-time query is complete, its information is imported to historical views and will be deleted when the information has been stored in the hash table in the memory for 3 minutes. The upper limit of records varies by view. For a query-level view, whether a record can be stored depends on the upper limit of records allowed by the memory at that time point. On each CN, the maximum number of real-time query-level records that can be stored in memory (default: 5MB) is max_session_realt_num (set to 12850 by default). The maximum number of historical records that can be stored in memory (default: 100MB) is max_session_hist_num (set to 137970 by default). The average execution time of queries in a service system is greater than run_time (in the unit of s). The maximum number of concurrent jobs allowed by real-time views on each CN is: num_realt_active = max_session_realt_num. The maximum number of concurrent jobs allowed by historical views on each CN is: num_hist_active = max_session_hist_num/(180/run_time).
  • For operator-level views, whether a record can be stored depends on the upper limit allowed by the memory at that time point. If the number of plan nodes plus the number of records in the memory exceeds the upper limit, the record cannot be stored. On each CN, the maximum numbers of real-time and historical operator-level records that can be stored in the memory are max_oper_realt_num (set to 56987 by default) and max_oper_hist_num (set to 113975 by default), respectively. The average number of plan nodes of a query is num_plan_node. Maximum number of concurrent tasks allowed by real-time views on each CN is: num_realt_active = max_oper_realt_num/num_plan_node. Maximum number of concurrent tasks allowed by historical views on each CN is: num_hist_active = max_oper_hist_num/(180/run_time)/num_plan_node.
  • In high concurrency, ensure that the number of queries to be recorded does not exceed the maximum values set for query- and operator-level queries. You can modify the memory of the real-time query view by setting session_statistics_memory and the memory of the historical query view by setting session_history_memory. The memory size increases in direct proportion to the maximum number of queries that can be recorded.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0399.html b/docs/dws/dev/dws_04_0399.html new file mode 100644 index 00000000..3007fa8d --- /dev/null +++ b/docs/dws/dev/dws_04_0399.html @@ -0,0 +1,122 @@ + + +

TopSQL Query Example

+

In this section, TPC-DS sample data is used as an example to describe how to query Real-time TopSQL and Historical TopSQL.

+

Configuring Cluster Parameters

To query for historical or archived resource monitoring information about jobs of top SQLs, you need to set related GUC parameters first. The procedure is as follows:

+
  1. Log in to the GaussDB(DWS) management console.
  2. On the Cluster Management page, locate the required cluster and click the cluster name. The cluster details page is displayed.
  3. Click the Parameter Modifications tab to view the values of cluster parameters.
  4. Set an appropriate value for parameter resource_track_duration and click Save.

    If enable_resource_record is set to on, storage space expansion may occur and thereby slightly affects the performance. Therefore, set is to off if record archiving is unnecessary.

    +
    +
  5. Go back to the Cluster Management page, click the refresh button in the upper right corner, and wait until the cluster parameter settings are applied.
+
+

Example for Querying for Top SQLs

The TPC-DS sample data is used as an example.

+
  1. Open the SQL client tool and connect to your database.
  2. Run the EXPLAIN statement to query for the estimated cost of the SQL statement to be executed to determine whether resources of the SQL statement will be monitored.

    By default, only resources of a query whose execution cost is greater than the value (default: 100000) of resource_track_cost are monitored and can be queried by users.

    +

    For example, run the following statements to query for the estimated execution cost of the SQL statement:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    SET CURRENT_SCHEMA = tpcds;
    +EXPLAIN WITH customer_total_return AS
    +( SELECT sr_customer_sk as ctr_customer_sk,
    +sr_store_sk as ctr_store_sk, 
    +sum(SR_FEE) as ctr_total_return 
    +FROM store_returns, date_dim
    +WHERE sr_returned_date_sk = d_date_sk AND d_year =2000
    +GROUP BY sr_customer_sk, sr_store_sk )
    +SELECT  c_customer_id
    +FROM customer_total_return ctr1, store, customer
    +WHERE ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 
    +FROM customer_total_return ctr2
    +WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk) 
    +AND s_store_sk = ctr1.ctr_store_sk
    +AND s_state = 'TN'
    +AND ctr1.ctr_customer_sk = c_customer_sk
    +ORDER BY c_customer_id
    +limit 100;
    +
    + +
    +

    In the following query result, the value in the first row of the E-costs column is the estimated cost of the SQL statement.

    +
    Figure 1 EXPLAIN result
    +

    In this example, to demonstrate the resource monitoring function of top SQLs, you need to set resource_track_cost to a value smaller than the estimated cost in the EXPLAIN result, for example, 100. For details about the parameter setting, see resource_track_cost.

    +

    After completing this example, you still need to reset resource_track_cost to its default value 100000 or a proper value. An overly small parameter value will compromise the database performance.

    +
    +

  3. Run SQL statements.

     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    SET CURRENT_SCHEMA = tpcds;
    +WITH customer_total_return AS
    +(SELECT sr_customer_sk as ctr_customer_sk, 
    +sr_store_sk as ctr_store_sk, 
    +sum(SR_FEE) as ctr_total_return
    +FROM store_returns,date_dim
    +WHERE sr_returned_date_sk = d_date_sk
    +AND d_year =2000
    +GROUP BY sr_customer_sk ,sr_store_sk)
    +SELECT  c_customer_id
    +FROM customer_total_return ctr1, store, customer
    +WHERE ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 
    +FROM customer_total_return ctr2
    +WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk)
    +AND s_store_sk = ctr1.ctr_store_sk
    +AND s_state = 'TN'
    +AND ctr1.ctr_customer_sk = c_customer_sk
    +ORDER BY c_customer_id
    +limit 100;
    +
    + +
    +

  4. During statement execution, query for the real-time memory peak information about the SQL statement on the current CN.

    1
    SELECT query,max_peak_memory,average_peak_memory,memory_skew_percent FROM gs_wlm_session_statistics ORDER BY start_time DESC;
    +
    + +
    +

    The preceding command queries for the real-time peak information at the query-level. The peak information includes the maximum memory peak among all DNs per second, average memory peak among all DNs per second, and memory usage skew across DNs.

    +

    For more examples of querying for the real-time resource monitoring information of top SQLs, see Real-time TopSQL.

    +

  5. Wait until the SQL statement execution in 3 is complete, and then query for the historical resource monitoring information of the statement.

    1
    select query,start_time,finish_time,duration,status from gs_wlm_session_history order by start_time desc;
    +
    + +
    +

    The preceding command queries for the historical information at the query-level. The peak information includes the execution start time, execution duration (unit: ms), and execution status. The time unit is ms.

    +

    For more examples of querying for the historical resource monitoring information of top SQLs, see Historical TopSQL.

    +

  6. Wait for 3 minutes after the execution of the SQL statement in 3 is complete, query for the historical resource monitoring information of the statement in the info view.

    If enable_resource_record is set to on and the execution time of the SQL statement in 3 is no less than the value of resource_track_duration, historical information about the SQL statement will be archived to the gs_wlm_session_info view 3 minutes after the execution of the SQL statement is complete.

    +

    The info view can be queried only when the gaussdb database is connected. Therefore, switch to the gaussdb database before running the following statement:

    +
    1
    select query,start_time,finish_time,duration,status from gs_wlm_session_info order by start_time desc;
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0400.html b/docs/dws/dev/dws_04_0400.html new file mode 100644 index 00000000..e736097b --- /dev/null +++ b/docs/dws/dev/dws_04_0400.html @@ -0,0 +1,25 @@ + + +

Query Performance Optimization

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0402.html b/docs/dws/dev/dws_04_0402.html new file mode 100644 index 00000000..0d9144e9 --- /dev/null +++ b/docs/dws/dev/dws_04_0402.html @@ -0,0 +1,12 @@ + + +

Overview of Query Performance Optimization

+

The aim of SQL optimization is to maximize the utilization of resources, including CPU, memory, disk I/O, and network I/O. To maximize resource utilization is to run SQL statements as efficiently as possible to achieve the highest performance at a lower cost. For example, when performing a typical point query, you can use the seqscan and filter (that is, read every tuple and query conditions for match). You can also use an index scan, which can be implemented at a lower cost but achieve the same effect.

+

This chapter describes how to analyze and improve query performance, and provides common cases and troubleshooting methods.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0403.html b/docs/dws/dev/dws_04_0403.html new file mode 100644 index 00000000..55efd126 --- /dev/null +++ b/docs/dws/dev/dws_04_0403.html @@ -0,0 +1,23 @@ + + +

Query Analysis

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0409.html b/docs/dws/dev/dws_04_0409.html new file mode 100644 index 00000000..f4a332a7 --- /dev/null +++ b/docs/dws/dev/dws_04_0409.html @@ -0,0 +1,66 @@ + + +

Query Execution Process

+

The process from receiving SQL statements to the statement execution by the SQL engine is shown in Figure 1 and Table 1. The texts in red are steps where database administrators can optimize queries.

+
Figure 1 Execution process of query-related SQL statements by the SQL engine
+ +
+ + + + + + + + + + + + + + + + + + + +
Table 1 Execution process of query-related SQL statements by the SQL engine

Procedure

+

Description

+

1. Perform syntax and lexical parsing.

+

Converts the input SQL statements from the string data type to the formatted structure stmt based on the specified SQL statement rules.

+

2. Perform semantic parsing.

+

Converts the formatted structure obtained from the previous step into objects that can be recognized by the database.

+

3. Rewrite the query statements.

+

Converts the output of the last step into the structure that optimizes the query execution.

+

4. Optimize the query.

+

Determines the execution mode of SQL statements (the execution plan) based on the result obtained from the last step and the internal database statistics. For details about the impact of statistics and GUC parameters on query optimization (execution plan), see Optimizing Queries Using Statistics and Optimizing Queries Using GUC parameters.

+

5. Perform the query.

+

Executes the SQL statements based on the execution path specified in the last step. Selecting a proper underlying storage mode improves the query execution efficiency. For details, see Optimizing Queries Using the Underlying Storage.

+
+
+

Optimizing Queries Using Statistics

The GaussDB(DWS) optimizer is a typical Cost-based Optimization (CBO). By using CBO, the database calculates the number of tuples and the execution cost for each execution step under each execution plan based on the number of table tuples, column width, NULL record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. These characteristic values are the statistics, which is the core for optimizing a query. Accurate statistics helps the planner select the most appropriate query plan. Generally, you can collect statistics of a table or that of some columns in a table using ANALYZE. You are advised to periodically execute ANALYZE or execute it immediately after you modified most contents in a table.

+
+

Optimizing Queries Using GUC parameters

Optimizing queries aims to select an efficient execution mode.

+

Take the following statement as an example:

+
1
+2
select count(1) 
+from customer inner join store_sales on (ss_customer_sk = c_customer_sk);
+
+ +
+

During execution of customer inner join store_sales, GaussDB(DWS) supports nested loop, merge join, and hash join. The optimizer estimates the result set value and the execution cost under each join mode based on the statistics of the customer and store_sales tables and selects the execution plan that takes the lowest execution cost.

+

As described in the preceding content, the execution cost is calculated based on certain methods and statistics. If the actual execution cost cannot be accurately estimated, you need to optimize the execution plan by setting the GUC parameters.

+
+

Optimizing Queries Using the Underlying Storage

GaussDB(DWS) supports row- and column-based tables. The selection of an underlying storage mode strongly depends on specific customer business scenarios. You are advised to use column-store tables for computing service scenarios (mainly involving association and aggregation operations) and row-store tables for service scenarios, such as point queries and massive UPDATE or DELETE executions.

+

Optimization methods of each storage mode will be described in details in the performance optimization chapter.

+
+

Optimizing Queries by Rewriting SQL Statements

Besides the preceding methods that improve the performance of the execution plan generated by the SQL engine, database administrators can also enhance SQL statement performance by rewriting SQL statements while retaining the original service logic based on the execution mechanism of the database and abundant practical experience.

+

This requires that the system administrators know the customer business well and have professional knowledge of SQL statements.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0410.html b/docs/dws/dev/dws_04_0410.html new file mode 100644 index 00000000..ad1c8245 --- /dev/null +++ b/docs/dws/dev/dws_04_0410.html @@ -0,0 +1,25 @@ + + +

Overview of the SQL Execution Plan

+

The SQL execution plan is a node tree, which displays detailed procedure when GaussDB(DWS) runs an SQL statement. A database operator indicates one step.

+

You can run the EXPLAIN command to view the execution plan generated for each query by an optimizer. Explain outputs a row of information for each execution node, showing the basic node type and the expense estimate that the optimizer makes for executing the node. See Figure 1.

+
Figure 1 SQL execution plan example
+ +

Execution Plan Display Format

GaussDB(DWS) provides four display formats: normal, pretty, summary, and run.

+ +
+
Figure 2 Example of an execution plan using the pretty format
+

You can change the display format of execution plans by setting explain_perf_mode. Later examples use the pretty format by default.

+

Execution Plan Information

In addition to setting different display formats for an execution plan, you can use different EXPLAIN syntax to display execution plan information in details. The following lists the common EXPLAIN syntax. For details, see EXPLAIN.

+ +

To measure the run time cost of each node in the execution plan, the current execution of EXPLAIN ANALYZE or EXPLAIN PERFORMANCE adds profiling overhead to query execution. Running EXPLAIN ANALYZE or PERFORMANCE on a query sometimes takes longer time than executing the query normally. The amount of overhead depends on the nature of the query, as well as the platform being used.

+

Therefore, if an SQL statement is not finished after being running for a long time, run the EXPLAIN statement to view the execution plan and then locate the fault. If the SQL statement has been properly executed, run the EXPLAIN ANALYZE or EXPLAIN PERFORMANCE statement to check the execution plan and information to locate the fault.

+

The EXPLAIN PERFORMANCE lightweight execution is consistent with EXPLAIN PERFORMANCE but greatly reduces the time spent on performance analysis.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0411.html b/docs/dws/dev/dws_04_0411.html new file mode 100644 index 00000000..695d83aa --- /dev/null +++ b/docs/dws/dev/dws_04_0411.html @@ -0,0 +1,85 @@ + + +

Deep Dive on the SQL Execution Plan

+

As described in Overview of the SQL Execution Plan, EXPLAIN displays the execution plan, but will not actually run SQL statements. EXPLAIN ANALYZE and EXPLAIN PERFORMANCE both will actually run SQL statements and return the execution information. In this section, detailed execution plan and execution information are described.

+

Execution Plan

The following SQL statement is used as an example:

+
1
+2
+3
+4
+5
select 
+    cjxh, 
+    count(1) 
+from dwcjk
+group by cjxh;
+
+ +
+

Run the EXPLAIN command and the output is as follows:

+

+

Interpretation of the execution plan column (horizontal):

+ +

Interpretation of the execution plan level (vertical):

+
  1. Layer 1: CStore Scan on dwcjk

    The table scan operator scans the table dwcjk using Cstore Scan. The function of this layer is to read data in the table dwcjk from the buffer or disks, or transfers it to the upper-layer node to participate in the calculation.

    +
  2. Layer 2: Vector Hash Aggregate

    Aggregation operators are used to perform aggregation operations (group by) on operators calculated from the lower layer.

    +
  3. Layer 3: Vector Streaming (type: GATHER)

    The GATHER-typed Shuffle operator aggregates data from DNs to the CN.

    +
  4. Layer 4: Row Adapter

    Storage format conversion operator is used to convert data in columns of the memory to data in rows for client display.

    +
+

It should be noted that when operators in the top layer are Data Node Scan, you need to set enable_fast_query_shipping to off to view detailed execution plan as follows:

+

+

After enable_fast_query_shipping is set, the execution plan is displayed as follows:

+

+

Keywords in the execution plan:

+
  1. Table access modes
    • Seq Scan

      Scans all rows of the table in sequence.

      +
    • Index Scan

      The optimizer uses a two-step plan: the child plan node visits an index to find the locations of rows matching the index condition, and then the upper plan node actually fetches those rows from the table itself. Fetching rows separately is much more expensive than reading them sequentially, but because not all pages of the table have to be visited, this is still cheaper than a sequential scan. The upper-layer planning node first sort the location of index identifier rows based on physical locations before reading them. This minimizes the independent capturing overhead.

      +

      If there are separate indexes on multiple columns referenced in WHERE, the optimizer might choose to use an AND or OR combination of the indexes. However, this requires the visiting of both indexes, so it is not necessarily a win compared to using just one index and treating the other condition as a filter.

      +

      The following Index scans featured with different sorting mechanisms are involved:

      +
      • Bitmap Index Scan

        Fetches data pages using a bitmap.

        +
      • Index Scan using index_name

        Fetches table rows in index order, which makes them even more expensive to read. However, there are so few rows that the extra cost of sorting the row locations is unnecessary. This plan type is used mainly for queries fetching just a single row and queries having an ORDER BY condition that matches the index order, because no extra sorting step is needed to satisfy ORDER BY.

        +
      +
    +
  2. Table connection modes
    • Nested Loop

      Nested-loop is used for queries that have a smaller data set connected. In a Nested-loop join, the foreign table drives the internal table and each row returned from the foreign table should have a matching row in the internal table. The returned result set of all queries should be less than 10,000. The table that returns a smaller subset will work as a foreign table, and indexes are recommended for connection fields of the internal table.

      +
    • (Sonic) Hash Join

      A Hash join is used for large tables. The optimizer uses a hash join, in which rows of one table are entered into an in-memory hash table, after which the other table is scanned and the hash table is probed for matches to each row. Sonic and non-Sonic hash joins differ in their hash table structures, which do not affect the execution result set.

      +
    • Merge Join

      In a merge join, data in the two joined tables is sorted by join columns. Then, data is extracted from the two tables to a sorted table for matching.

      +

      Merge join requires more resources for sorting and its performance is lower than that of hash join. If the source data has been sorted, it does not need to be sorted again when merge join is performed. In this case, the performance of merge join is better than that of hash join.

      +
    +
  3. Operators
    • sort

      Sorts the result set.

      +
    • filter

      The EXPLAIN output shows the WHERE clause being applied as a Filter condition attached to the Seq Scan plan node. This means that the plan node checks the condition for each row it scans, and returns only the ones that meet the condition. The estimated number of output rows has been reduced because of the WHERE clause. However, the scan will still have to visit all 10000 rows. As a result, the cost is not decreased. It increases a bit (by 10000 x cpu_operator_cost) to reflect the extra CPU time spent on checking the WHERE condition.

      +
    • LIMIT

      LIMIT limits the number of output execution results. If a LIMIT condition is added, not all rows are retrieved.

      +
    +
+
+

Task Execution

You can use EXPLAIN ANALYZE or EXPLAIN PERFORMANCE to check the SQL statement execution information and compare the actual execution and the optimizer's estimation to find what to optimize. EXPLAIN PERFORMANCE provides the execution information on each DN, whereas EXPLAIN ANALYZE does not.

+

The following SQL statement is used as an example:

+
1
select count(1) from tb1;
+
+ +
+

The output of running EXPLAIN PERFORMANCE is as follows:

+

+

+

+

+

This figure shows that the execution information can be classified into the following 7 aspects.

+
  1. The plan is displayed as a table, which contains 11 columns: id, operation, A-time, A-rows, E-rows, E-distinct, Peak Memory, E-memory, A-width, E-width, and E-costs. The definition of the plan-type columns (columns started with id, operation, or some started with E) is the same as that of running EXPLAIN. For details, see Execution Plan (execution plan) in the section. The definition of A-time, A-rows, E-distinct, Peak Memory, and A-width are described as follows:
    • A-time: indicates the execution completion time of the current operator. Generally, the A-time of the operator executed on the DN is two values enclosed by square brackets ([]), indicating the shortest time and longest time for completing the operator on all DNs, respectively.
    • A-rows: indicates the number of tuples provided by the current operator
    • E-distinct: indicates the estimated distinct value of the hashjoin operator.
    • Peak Memory: indicates the peak memory usage of an operator on each DN.
    • A-width: indicates that the current operator tuple actual width of each line. This parameter is valid only for the heavy memory operator is displayed, including: (Vec)HashJoin, (Vec)HashAgg, (Vec) HashSetOp, (Vec)Sort, and (Vec)Materialize operator. The (Vec)HashJoin calculation of width is the width of the right subtree operator, it will be displayed in the right subtree.
    +
  2. Predicate Information (identified by plan id):

    This part displays the static information that does not change during the plan execution process, such as some join conditions and filter information.

    +
  3. Memory Information (identified by plan id):

    This part displays the memory usage information printed by certain operators (mainly Hash and Sort), including peak memory, control memory, operator memory, width, auto spread num, and early spilled; and spill details, including spill Time(s), inner/outer partition spill num, temp file num, split data volume, and written disk IO [min, max]. The Sort operator does not display the number of files written to disks, and displays disks only when displaying sorting methods.

    +
  4. Targetlist Information (identified by plan id):

    This part displays the target columns provided by each operator.

    +
  5. DataNode Information (identified by plan id):

    This part displays the execution time of each operator (including the execution time of filtering and projection, if any), CPU usage, and buffer usage.

    +
  6. User Define Profiling:

    This part displays CNs and DNs, DN and DN connection time, and some execution information in the storage layer.

    +
  7. ====== Query Summary =====:

    The total execution time and network traffic, including the maximum and minimum execution time in the initialization and end phases on each DN, initialization, execution, and time in the end phase on each CN, and the system available memory during the current statement execution, and statement estimation memory information.

    +
+
  • The difference between A-rows and E-rows shows the deviation between the optimizer estimation and actual execution. Generally, if the deviation is large, the plan generated by the optimizer is untrusted, and you need to modify the deviation value.
  • If the difference of the A-time values is large, it indicates that the operator computing skew (difference between execution time on DNs) is large and that manual performance tuning is required. Generally, for two adjacent operators, the execution time of the upper-layer operator includes that of the lower-layer operator. However, if the upper-layer operator is a stream operator, its execution time may be less than that of the lower-layer operator, as there is no driving relationship between threads.
  • Max Query Peak Memory is often used to estimate the consumed memory of SQL statements, and is also used as an important basis for setting a memory parameter during SQL statement optimization. Generally, the output from EXPLAIN ANALYZE or EXPLAIN PERFORMANCE is provided for the input for further optimization.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0412.html b/docs/dws/dev/dws_04_0412.html new file mode 100644 index 00000000..6a32380a --- /dev/null +++ b/docs/dws/dev/dws_04_0412.html @@ -0,0 +1,57 @@ + + +

Querying SQL Statements That Affect Performance Most

+

This section describes how to query SQL statements whose execution takes a long time, leading to poor system performance.

+

Procedure

  1. Query the statements that are run for a long time in the database.

    1
    SELECT current_timestamp - query_start AS runtime, datname, usename, query FROM pg_stat_activity where state != 'idle' ORDER BY 1 desc;
    +
    + +
    +

    After the query, query statements are returned as a list, ranked by execution time in descending order. The first result is the query statement that has the longest execution time in the system. The returned result contains the SQL statement invoked by the system and the SQL statement run by users. Find the statements that were run by users and took a long time.

    +
    Alternatively, you can set current_timestamp - query_start to be greater than a threshold to identify query statements that are executed for a duration longer than this threshold.
    1
    SELECT query FROM pg_stat_activity WHERE current_timestamp - query_start > interval '1 days';
    +
    + +
    +
    +

  2. Set the parameter track_activities to on.

    1
    SET track_activities = on;
    +
    + +
    +

    The database collects the running information about active queries only if the parameter is set to on.

    +

  3. View the running query statements.

    Viewing pg_stat_activity is used as an example here.

    +
    1
    +2
    +3
    +4
    +5
    +6
    SELECT datname, usename, state FROM pg_stat_activity;
    + datname  | usename | state  |
    +----------+---------+--------+
    + postgres |   omm   | idle   |
    + postgres |   omm   | active |
    +(2 rows)
    +
    + +
    +

    If the state column is idle, the connection is idle and requires a user to enter a command.

    +

    To identify only active query statements, run the following command:

    +
    1
    SELECT datname, usename, state FROM pg_stat_activity WHERE state != 'idle';
    +
    + +
    +

  4. Analyze the status of the query statements that were run for a long time.

    • If the query statement is normal, wait until the execution is complete.
    • If a query statement is blocked, run the following command to view this query statement:
      1
      SELECT datname, usename, state, query FROM pg_stat_activity WHERE waiting = true;
      +
      + +
      +

      The command output lists a query statement in the block state. The lock resource requested by this query statement is occupied by another session, so this query statement is waiting for the session to release the lock resource.

      +

      Only when the query is blocked by internal lock resources, the waiting field is true. In most cases, block happens when query statements are waiting for lock resources to be released. However, query statements may be blocked because they are waiting to write in files or for timers. Such blocked queries are not displayed in the pg_stat_activity view.

      +
      +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0413.html b/docs/dws/dev/dws_04_0413.html new file mode 100644 index 00000000..8c872672 --- /dev/null +++ b/docs/dws/dev/dws_04_0413.html @@ -0,0 +1,53 @@ + + +

Checking Blocked Statements

+

During database running, query statements are blocked in some service scenarios and run for an excessively long time. In this case, you can forcibly terminate the faulty session.

+

Procedure

  1. View blocked query statements and information about the tables and schemas that block the query statements.

     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    SELECT w.query as waiting_query,
    +w.pid as w_pid,
    +w.usename as w_user,
    +l.query as locking_query,
    +l.pid as l_pid,
    +l.usename as l_user,
    +t.schemaname || '.' || t.relname as tablename
    +from pg_stat_activity w join pg_locks l1 on w.pid = l1.pid
    +and not l1.granted join pg_locks l2 on l1.relation = l2.relation
    +and l2.granted join pg_stat_activity l on l2.pid = l.pid join pg_stat_user_tables t on l1.relation = t.relid
    +where w.waiting;
    +
    + +
    +

    The thread ID, user information, query status, as well as information about the tables and schemas that block the query statements are returned.

    +

  2. Run the following command to terminate the required session, where 139834762094352 is the thread ID:

    1
    SELECT PG_TERMINATE_BACKEND(139834762094352);
    +
    + +
    +

    If information similar to the following is displayed, the session is successfully terminated:

    +
     PG_TERMINATE_BACKEND
    +----------------------
    + t
    +(1 row)
    +

    If a command output similar to the following is displayed, a user is attempting to terminate the session, and the session will be reconnected rather than being terminated.

    +
    FATAL:  terminating connection due to administrator command
    +FATAL:  terminating connection due to administrator command
    +The connection to the server was lost. Attempting reset: Succeeded.
    +

    If the PG_TERMINATE_BACKEND function is used to terminate the background threads of the session, the gsql client will be reconnected rather than be logged out.

    +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0430.html b/docs/dws/dev/dws_04_0430.html new file mode 100644 index 00000000..e4a41bff --- /dev/null +++ b/docs/dws/dev/dws_04_0430.html @@ -0,0 +1,33 @@ + + +

Query Improvement

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0435.html b/docs/dws/dev/dws_04_0435.html new file mode 100644 index 00000000..56e22b75 --- /dev/null +++ b/docs/dws/dev/dws_04_0435.html @@ -0,0 +1,13 @@ + + +

Optimization Process

+

You can analyze slow SQL statements to optimize them.

+

Procedure

  1. Collect all table statistics associated with the SQL statements. In a database, statistics indicate the source data of a plan generated by a planner. If statistics are unavailable or out of date, the execution plan may seriously deteriorate, leading to low performance. According to past experience, about 10% performance problem occurred because no statistics are collected. For details, see Updating Statistics.
  2. View the execution plan to find out the cause. If the SQL statements have been running for a long period of time and not ended, run the EXPLAIN command to view the execution plan and then locate the fault. If the SQL statement has been executed, run the EXPLAIN ANALYZE or EXPLAIN PERFORMANCE command to check the execution plan and actual running situation and then accurately locate the fault.
  3. Review and modify the table definition.
  4. For details about EXPLAIN or EXPLAIN PERFORMANCE, the reason why SQL statements are slowly located, and how to solve this problem, see Typical SQL Optimization Methods.
  5. Generally, some SQL statements can be converted to its equivalent statements in all or certain scenarios by rewriting queries. SQL statements are simpler after they are rewritten. Some execution steps can be simplified to improve the performance. The query rewriting method is universal in all databases. Experience in Rewriting SQL Statements describes several optimization methods by rewriting SQL statements.
  6. Specify a join order; join, stream, or scan operations; number of rows in a result; or redistribution skew information to optimize an execution plan, improving query performance. For details, see Hint-based Tuning.
  7. To maintain high database performance, you are advised to perform Routinely Maintaining Tables and Routinely Recreating an Index.
  8. (Optional) Improve performance by using operators if resources are sufficient in GaussDB(DWS). For details, see SMP Manual Optimization Suggestions.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0436.html b/docs/dws/dev/dws_04_0436.html new file mode 100644 index 00000000..c3e4bbbc --- /dev/null +++ b/docs/dws/dev/dws_04_0436.html @@ -0,0 +1,44 @@ + + +

Updating Statistics

+

In a database, statistics indicate the source data of a plan generated by a planner. If no collection statistics are available or out of date, the execution plan may seriously deteriorate, leading to low performance.

+

Context

The ANALYZE statement collects statistic about table contents in databases, which will be stored in the system table PG_STATISTIC. Then, the query optimizer uses the statistics to work out the most efficient execution plan.

+

After executing batch insertion and deletions, you are advised to run the ANALYZE statement on the table or the entire library to update statistics. By default, 30,000 rows of statistics are sampled. That is, the default value of the GUC parameter default_statistics_target is 100. If the total number of rows in the table exceeds 1,600,000, you are advised to set default_statistics_target to -2, indicating that 2% of the statistics are collected.

+

For an intermediate table generated during the execution of a batch script or stored procedure, you also need to run the ANALYZE statement.

+

If there are multiple inter-related columns in a table and the conditions or grouping operations based on these columns are involved in the query, collect statistics about these columns so that the query optimizer can accurately estimate the number of rows and generate an effective execution plan.

+
+

Procedure

Run the following commands to update the statistics about a table or the entire database:

+
1
+2
ANALYZE tablename;                        --Update statistics about a table.
+ANALYZE;                                  ---Update statistics about the entire database.
+
+ +
+
+

Run the following statements to perform statistics-related operations on multiple columns:

+
1
+2
+3
+4
+5
+6
ANALYZE tablename ((column_1, column_2));                       --Collect statistics about column_1 and column_2 of tablename.
+
+ALTER TABLE tablename ADD STATISTICS ((column_1, column_2));    --Declare statistics about column_1 and column_2 of tablename.
+ANALYZE tablename;                                               --Collect statistics about one or more columns.
+
+ALTER TABLE tablename DELETE STATISTICS ((column_1, column_2)); --Delete statistics about column_1 and column_2 of tablename or their statistics declaration.
+
+ +
+

After the statistics are declared for multiple columns by running the ALTER TABLE tablename ADD STATISTICS statement, the system collects the statistics about these columns next time ANALYZE is performed on the table or the entire database.

+

To collect the statistics, run the ANALYZE statement.

+
+

Use EXPLAIN to show the execution plan of each SQL statement. If rows=10 (the default value, probably indicating the table has not been analyzed) is displayed in the SEQ SCAN output of a table, run the ANALYZE statement for this table.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0437.html b/docs/dws/dev/dws_04_0437.html new file mode 100644 index 00000000..44e151ff --- /dev/null +++ b/docs/dws/dev/dws_04_0437.html @@ -0,0 +1,27 @@ + + +

Reviewing and Modifying a Table Definition

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0438.html b/docs/dws/dev/dws_04_0438.html new file mode 100644 index 00000000..290f5b3b --- /dev/null +++ b/docs/dws/dev/dws_04_0438.html @@ -0,0 +1,14 @@ + + +

Reviewing and Modifying a Table Definition

+

In a distributed framework, data is distributed on DNs. Data on one or more DNs is stored on a physical storage device. To properly define a table, you must:

+
  1. Evenly distribute data on each DN to avoid the available capacity decrease of a cluster caused by insufficient storage space of the storage device associated with a DN. Specifically, select a proper distribution key to avoid data skew.
  2. Evenly assign table scanning tasks on each DN to avoid that a DN is overloaded by the table scanning tasks. Specifically, do not select columns in the equivalent filter of a base table as the distribution key.
  3. Reduce the data volume scanned by using the partition pruning mechanism.
  4. Avoid the use of random I/O by using clustering or partial clustering.
  5. Avoid data shuffle to reduce the network pressure by selecting the join-condition column or group by column as the distribution column.
+

The distribution column is the core for defining a table. The following figure shows the procedure of defining a table. The table definition is created during the database design and is reviewed and modified during the SQL statement optimization.

+
Figure 1 Procedure of defining a table
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0439.html b/docs/dws/dev/dws_04_0439.html new file mode 100644 index 00000000..50d69263 --- /dev/null +++ b/docs/dws/dev/dws_04_0439.html @@ -0,0 +1,33 @@ + + +

Selecting a Storage Model

+

During database design, some key factors about table design will greatly affect the subsequent query performance of the database. Table design affects data storage as well. Scientific table design reduces I/O operations and minimizes memory usage, improving the query performance.

+

Selecting a model for table storage is the first step of table definition. Select a proper storage model for your service based on the following table.

+ +
+ + + + + + + + + + +

Storage Model

+

Application Scenario

+

Row storage

+

Point query (simple index–based query that returns only a few records).

+

Query involving many INSERT, UPDATE, and DELETE operations.

+

Column storage

+

Statistics analysis query, in which operations, such as group and join, are performed many times.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0440.html b/docs/dws/dev/dws_04_0440.html new file mode 100644 index 00000000..cfee328e --- /dev/null +++ b/docs/dws/dev/dws_04_0440.html @@ -0,0 +1,40 @@ + + +

Selecting a Distribution Mode

+

In replication mode, full data in a table is copied to each DN in the cluster. This mode is used for tables containing a small volume of data. Full data in a table stored on each DN avoids data redistribution during the JOIN operation. This reduces network costs and plan segments (each with a thread), but generates much redundant data. Generally, replication is only used for small dimension tables.

+

In hash mode, hash values are generated for one or more columns. You can obtain the storage location of a tuple based on the mapping between DNs and the hash values. In a hash table, I/O resources on each node can be used for data read/write, which greatly accelerates the read/write of a table. Generally, a table containing a large amount of data is defined as a hash table.

+ +
+ + + + + + + + + + + + + +

Policy

+

Description

+

Scenario

+

Hash

+

Table data is distributed on all DNs in the cluster.

+

Fact tables containing a large amount of data

+

Replication

+

Full data in a table is stored on each DN in the cluster.

+

Small tables and dimension tables

+
+
+

As shown in Figure 1, T1 is a replication table and T2 is a hash table.

+
Figure 1 Replication table and hash table
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0441.html b/docs/dws/dev/dws_04_0441.html new file mode 100644 index 00000000..c7fc5f2d --- /dev/null +++ b/docs/dws/dev/dws_04_0441.html @@ -0,0 +1,27 @@ + + +

Selecting a Distribution Column

+

The distribution column in a hash table must meet the following requirements, which are ranked by priority in descending order:

+
  1. The value of the distribution column should be discrete so that data can be evenly distributed on each DN. For example, you are advised to select the primary key of a table as the distribution column, and the ID card number as the distribution column in a personnel information table.
  2. Do not select the column where a constant filter exists. For example, if a constant constraint (for example, zqdh= '000001') exists on the zqdh column in some queries on the dwcjk table, you are not advised to use zqdh as the distribution column.
  3. Select the join condition as the distribution column, so that join tasks can be pushed down to DNs to execute, reducing the amount of data transferred between the DNs.

    For a hash table, an improper distribution key may cause data skew or poor I/O performance on certain DNs. Therefore, you need to check the table to ensure that data is evenly distributed on each DN. You can run the following SQL statements to check data skew:

    +
    1
    +2
    +3
    +4
    +5
    select 
    +xc_node_id, count(1) 
    +from tablename 
    +group by xc_node_id 
    +order by xc_node_id desc;
    +
    + +
    +

    xc_node_id corresponds to a DN. Generally, over 5% difference between the amount of data on different DNs is regarded as data skew. If the difference is over 10%, choose another distribution column.

    +
  4. You are not advised to add a column as a distribution column, especially add a new column and use the SEQUENCE value to fill the column. This is because SEQUENCE may cause performance bottlenecks and unnecessary maintenance costs.
+

Multiple distribution columns can be selected in GaussDB(DWS) to evenly distribute data.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0442.html b/docs/dws/dev/dws_04_0442.html new file mode 100644 index 00000000..90800e27 --- /dev/null +++ b/docs/dws/dev/dws_04_0442.html @@ -0,0 +1,12 @@ + + +

Using Partial Clustering

+

Partial Cluster Key is the column-based technology. It can minimize or maximize sparse indexes to quickly filter base tables. Partial cluster key can specify multiple columns, but you are advised to specify no more than two columns. Use the following principles to specify columns:

+
  1. The selected columns must be restricted by simple expressions in base tables. Such constraints are usually represented by Col, Op, and Const. Col specifies the column name, Op specifies operators, (including =, >, >=, <=, and <) Const specifies constants.
  2. Select columns that are frequently selected (to filter much more undesired data) in simple expressions.
  3. List the less frequently selected columns on the top.
  4. List the columns of the enumerated type at the top.
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0443.html b/docs/dws/dev/dws_04_0443.html new file mode 100644 index 00000000..a5460628 --- /dev/null +++ b/docs/dws/dev/dws_04_0443.html @@ -0,0 +1,14 @@ + + +

Using Partitioned Tables

+

Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partitioned table, and a physical piece is called a partition. Data is stored on these smaller physical pieces, namely, partitions, instead of the larger logical partitioned table. A partitioned table has the following advantages over an ordinary table:

+
  1. High query performance: The system queries only the concerned partitions rather than the whole table, improving the query efficiency.
  2. High availability: If a partition is faulty, data in the other partitions is still available.
  3. Easy maintenance: You only need to fix the faulty partition.
+

GaussDB(DWS) supports range-partitioned tables.

+

Range-partitioned table: Data within a specific range is mapped onto each partition. The range is determined by the partition key specified during the partitioned table creation. The partition key is usually a date. For example, sales data is partitioned by month.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0444.html b/docs/dws/dev/dws_04_0444.html new file mode 100644 index 00000000..52bff546 --- /dev/null +++ b/docs/dws/dev/dws_04_0444.html @@ -0,0 +1,15 @@ + + +

Selecting a Data Type

+

Use the following principles to obtain efficient data types:

+
  1. Using the data type that can be efficiently executed

    Generally, calculation of integers (including common comparison calculations, such as =, >, <, ≥, ≤, and ≠ and group by) is more efficient than that of strings and floating point numbers. For example, if you need to filter data in a column containing numeric data for a column-store table where point query is performed, the execution takes over 10s. However, the execution time is reduced to 1.8s when you change the data type from NUMERIC to INT.

    +
  2. Using the data type of short length column

    Using the data type with a shorter length reduces both the data file size and the memory used for computing, improving the I/O and computing performance. For example, use SMALLINT instead of INT, and INT instead of BIGINT.

    +
  3. Using the same data type for associated columns

    Use the same data type for associated columns. If columns having different data types are associated, the database must dynamically convert the different data types into the same ones for comparison. The conversion results in performance overheads.

    +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0445.html b/docs/dws/dev/dws_04_0445.html new file mode 100644 index 00000000..6662497f --- /dev/null +++ b/docs/dws/dev/dws_04_0445.html @@ -0,0 +1,26 @@ + + +

Typical SQL Optimization Methods

+

SQL optimization involves continuous analysis and adjustment. You need to test-run a query, locate and fix its performance issues (if any) based on its execution plan, and run it again, until the execution performance meet your requirements.

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0446.html b/docs/dws/dev/dws_04_0446.html new file mode 100644 index 00000000..737ff3d4 --- /dev/null +++ b/docs/dws/dev/dws_04_0446.html @@ -0,0 +1,71 @@ + + +

SQL Self-Diagnosis

+

Performance issues may occur when you query data or run the INSERT, DELETE, UPDATE, or CREATE TABLE AS statement. You can query the warning column in the GS_WLM_SESSION_STATISTICS, GS_WLM_SESSION_HISTORY, and GS_WLM_SESSION_INFO views to obtain performance diagnosis information for tuning.

+

Alarms that can trigger SQL self-diagnosis depend on the settings of resource_track_level. If resource_track_level is set to query or perf, you can diagnose alarms indicating that statistics of multiple columns or a single column are not collected or SQL statements are not pushed down. If resource_track_level is set to operator, all alarm scenarios can be diagnosed.

+

Whether a SQL plan will be diagnosed depends on the settings of resource_track_cost. A SQL plan will be diagnosed only if its execution cost is greater than resource_track_cost. You can use the EXPLAIN keyword to check the plan execution cost.

+

Alarms

Currently, the following performance alarms will be reported:

+ + + + + + + + +
+

Restrictions

  1. An alarm contains a maximum of 2048 characters. If the length of an alarm exceeds this value (for example, a large number of long table names and column names are displayed in the alarm when their statistics are not collected), a warning instead of an alarm will be reported.
    WARNING, "Planner issue report is truncated, the rest of planner issues will be skipped"
    +
  2. If a query statement contains the Limit operator, alarms of operators lower than Limit will not be reported.
  3. For alarms about data skew and inaccurate estimation, only alarms on the lower-layer nodes in a plan tree will be reported. This is because the same alarms on the upper-level nodes may be triggered by problems on the lower-layer nodes. For example, if data skew occurs on the Scan node, data skew may also occur in operators (for example, Hashagg) at the upper layer.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0447.html b/docs/dws/dev/dws_04_0447.html new file mode 100644 index 00000000..0faf288d --- /dev/null +++ b/docs/dws/dev/dws_04_0447.html @@ -0,0 +1,402 @@ + + +

Optimizing Statement Pushdown

+

Statement Pushdown

Currently, the GaussDB(DWS) optimizer can use three methods to develop statement execution policies in the distributed framework: generating a statement pushdown plan, a distributed execution plan, or a distributed execution plan for sending statements.

+ +

The third policy sends many intermediate results from the DNs to a CN for further execution. In this case, the CN performance bottleneck (in bandwidth, storage, and computing) is caused by statements that cannot be pushed down to DNs. Therefore, you are not advised to use the query statements that only the third policy is applicable to.

+

Statements cannot be pushed down to DNs if they have Functions That Do Not Support Pushdown or Syntax That Does Not Support Pushdown. Generally, you can rewrite the execution statements to solve the problem.

+
+

Viewing Whether the Execution Plan Has Been Pushed Down to DNs

Perform the following procedure to quickly determine whether the execution plan can be pushed down to DNs:

+
  1. Set the GUC parameter enable_fast_query_shipping to off to use the distributed framework policy for the query optimizer.

    1
    SET enable_fast_query_shipping = off;
    +
    + +
    +

  2. View the execution plan.

    If the execution plan contains Data Node Scan, the SQL statements cannot be pushed down to DNs. If the execution plan contains Streaming, the SQL statements can be pushed down to DNs.

    +

    For example:

    +
    1
    +2
    +3
    +4
    +5
    select
    +count(ss.ss_sold_date_sk order by ss.ss_sold_date_sk)c1 
    +from store_sales ss, store_returns sr 
    +where 
    +sr.sr_customer_sk = ss.ss_customer_sk;
    +
    + +
    +

    The execution plan is as follows, which indicates that the SQL statement cannot be pushed down.

    +

    +
                                  QUERY PLAN
    +--------------------------------------------------------------------------
    +Aggregate
    +->  Hash Join
    +Hash Cond: (ss.ss_customer_sk = sr.sr_customer_sk)
    +->  Data Node Scan on store_sales "_REMOTE_TABLE_QUERY_"
    +Node/s: All datanodes
    +->  Hash
    +->  Data Node Scan on store_returns "_REMOTE_TABLE_QUERY_"
    +Node/s: All datanodes
    +(8 rows)
    +

+
+

Syntax That Does Not Support Pushdown

SQL syntax that does not support pushdown is described using the following table definition examples:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE TABLE CUSTOMER1
+(
+    C_CUSTKEY     BIGINT NOT NULL
+  , C_NAME        VARCHAR(25) NOT NULL
+  , C_ADDRESS     VARCHAR(40) NOT NULL
+  , C_NATIONKEY   INT NOT NULL
+  , C_PHONE       CHAR(15) NOT NULL
+  , C_ACCTBAL     DECIMAL(15,2)   NOT NULL
+  , C_MKTSEGMENT  CHAR(10) NOT NULL
+  , C_COMMENT     VARCHAR(117) NOT NULL
+)
+DISTRIBUTE BY hash(C_CUSTKEY);
+CREATE TABLE test_stream(a int, b float);--float does not support redistribution.
+CREATE TABLE sal_emp ( c1 integer[] ) DISTRIBUTE BY replication;
+
+ +
+ +
+ +

Functions That Do Not Support Pushdown

This module describes the variability of functions. The function variability in GaussDB(DWS) is as follows:

+ +

The volatility of a function can be obtained by querying its provolatile column in pg_proc. The value i indicates immutable, s indicates stable, and v indicates volatile. The valid values of the proshippable column in pg_proc are t, f, and NULL. This column and the provolatile column together describe whether a function is pushed down.

+ +

For a UDF, you can specify the values of provolatile and proshippable during its creation. For details, see CREATE FUNCTION.

+

In scenarios where a function does not support pushdown, perform one of the following as required:

+ +
+

Example: UDF

Define a user-defined function that generates fixed output for a certain input as the immutable type.

+

Take the sales information of TPCDS as an example. If you want to write a function to calculate the discount data of a product, you can define the function as follows:

+
1
+2
+3
+4
CREATE FUNCTION func_percent_2 (NUMERIC, NUMERIC) RETURNS NUMERIC
+AS 'SELECT $1 / $2 WHERE $2 > 0.01'
+LANGUAGE SQL
+VOLATILE;
+
+ +
+

Run the following statement:

+
1
+2
SELECT func_percent_2(ss_sales_price, ss_list_price)
+FROM store_sales;
+
+ +
+

The execution plan is as follows:

+

+

func_percent_2 is not pushed down, and ss_sales_price and ss_list_price are executed on a CN. In this case, a large amount of resources on the CN is consumed, and the performance deteriorates as a result.

+

In this example, the function returns certain output when certain input is entered. Therefore, we can modify the function to the following one:

+
1
+2
+3
+4
CREATE FUNCTION func_percent_1 (NUMERIC, NUMERIC) RETURNS NUMERIC
+AS 'SELECT $1 / $2 WHERE $2 > 0.01'
+LANGUAGE SQL
+IMMUTABLE;
+
+ +
+

Run the following statement:

+
1
+2
SELECT func_percent_1(ss_sales_price, ss_list_price)
+FROM store_sales;
+
+ +
+

The execution plan is as follows:

+

+

func_percent_1 is pushed down to DNs for quicker execution. (In TPCDS 1000X, where three CNs and 18 DNs are used, the query efficiency is improved by over 100 times).

+
+

Example 2: Pushing Down the Sorting Operation

For details, see Case: Pushing Down Sort Operations to DNs.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0448.html b/docs/dws/dev/dws_04_0448.html new file mode 100644 index 00000000..848b0cc0 --- /dev/null +++ b/docs/dws/dev/dws_04_0448.html @@ -0,0 +1,591 @@ + + +

Optimizing Subqueries

+

What Is a Subquery

When an application runs a SQL statement to operate the database, a large number of subqueries are used because they are more clear than table join. Especially in complicated query statements, subqueries have more complete and independent semantics, which makes SQL statements clearer and easy to understand. Therefore, subqueries are widely used.

+

In GaussDB(DWS), subqueries can also be called sublinks based on the location of subqueries in SQL statements.

+ +
+

GaussDB(DWS) SubLink Optimization

A subquery is pulled up to join with tables in outer queries, preventing the subquery from being converted into the combination of a subplan and broadcast. You can run the EXPLAIN statement to check whether a subquery is converted into the combination of a subplan and broadcast.

+

Example:

+

+ + +
+

More Optimization Examples

1. Change the base table to a replication table and create an index on the filter column.

+
1
+2
+3
create table master_table (a int);
+create table sub_table(a int, b int);
+select a from master_table group by a having a in (select a from sub_table); 
+
+ +
+

In this example, a correlated subquery is contained. To improve the query performance, you can change sub_table to a replication table and create an index on the a column.

+
+

2. Modify the SELECT statement, change the subquery to a JOIN relationship between the primary table and the parent query, or modify the subquery to improve the query performance. Ensure that the subquery to be used is semantically correct.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
explain (costs off)select * from master_table as t1 where t1.a in (select t2.a from sub_table as t2 where t1.a = t2.b);
+                        QUERY PLAN
+----------------------------------------------------------
+ Streaming (type: GATHER)
+   Node/s: All datanodes
+   ->  Seq Scan on master_table t1
+         Filter: (SubPlan 1)
+         SubPlan 1
+           ->  Result
+                 Filter: (t1.a = t2.b)
+                 ->  Materialize
+                       ->  Streaming(type: BROADCAST)
+                             Spawn on: All datanodes
+                             ->  Seq Scan on sub_table t2
+(11 rows)
+
+ +
+

In the preceding example, a subplan is used. To remove the subplan, you can modify the statement as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
explain(costs off) select * from master_table as t1 where exists (select t2.a from sub_table as t2 where t1.a = t2.b and t1.a = t2.a);
+                    QUERY PLAN
+--------------------------------------------------
+ Streaming (type: GATHER)
+   Node/s: All datanodes
+   ->  Hash Semi Join
+         Hash Cond: (t1.a = t2.b)
+         ->  Seq Scan on master_table t1
+         ->  Hash
+               ->  Streaming(type: REDISTRIBUTE)
+                     Spawn on: All datanodes
+                     ->  Seq Scan on sub_table t2
+(9 rows)
+
+ +
+

In this way, the subplan is replaced by the semi-join between the two tables, greatly improving the execution efficiency.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0449.html b/docs/dws/dev/dws_04_0449.html new file mode 100644 index 00000000..2662a670 --- /dev/null +++ b/docs/dws/dev/dws_04_0449.html @@ -0,0 +1,399 @@ + + +

Optimizing Statistics

+

What Is Statistic Optimization

GaussDB(DWS) generates optimal execution plans based on the cost estimation. Optimizers need to estimate the number of data rows and the cost based on statistics collected using ANALYZE. Therefore, the statistics is vital for the estimation of the number of rows and cost. Global statistics are collected using ANALYZE: relpages and reltuples in the pg_class table; stadistinct, stanullfrac, stanumbersN, stavaluesN, and histogram_bounds in the pg_statistic table.

+
+

Example 1: Poor Query Performance Due to the Lack of Statistics

In most cases, the lack of statistics in tables or columns involved in the query greatly affects the query performance.

+

The table structure is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
CREATE TABLE LINEITEM
+(
+L_ORDERKEY         BIGINT        NOT NULL
+, L_PARTKEY        BIGINT        NOT NULL
+, L_SUPPKEY        BIGINT        NOT NULL3
+, L_LINENUMBER     BIGINT        NOT NULL
+, L_QUANTITY       DECIMAL(15,2) NOT NULL
+, L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL
+, L_DISCOUNT       DECIMAL(15,2) NOT NULL
+, L_TAX            DECIMAL(15,2) NOT NULL
+, L_RETURNFLAG     CHAR(1)       NOT NULL
+, L_LINESTATUS     CHAR(1)       NOT NULL
+, L_SHIPDATE       DATE          NOT NULL
+, L_COMMITDATE     DATE          NOT NULL
+, L_RECEIPTDATE    DATE          NOT NULL
+, L_SHIPINSTRUCT   CHAR(25)      NOT NULL
+, L_SHIPMODE       CHAR(10)      NOT NULL
+, L_COMMENT        VARCHAR(44)   NOT NULL
+) with (orientation = column, COMPRESSION = MIDDLE) distribute by hash(L_ORDERKEY);
+
+CREATE TABLE ORDERS
+(
+O_ORDERKEY        BIGINT        NOT NULL
+, O_CUSTKEY       BIGINT        NOT NULL
+, O_ORDERSTATUS   CHAR(1)       NOT NULL
+, O_TOTALPRICE    DECIMAL(15,2) NOT NULL
+, O_ORDERDATE     DATE NOT NULL
+, O_ORDERPRIORITY CHAR(15)      NOT NULL
+, O_CLERK         CHAR(15)      NOT NULL
+, O_SHIPPRIORITY  BIGINT        NOT NULL
+, O_COMMENT       VARCHAR(79)   NOT NULL
+)with (orientation = column, COMPRESSION = MIDDLE) distribute by hash(O_ORDERKEY);
+
+ +
+

The query statements are as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
explain verbose select
+count(*) as numwait 
+from
+lineitem l1,
+orders 
+where
+o_orderkey = l1.l_orderkey
+and o_orderstatus = 'F'
+and l1.l_receiptdate > l1.l_commitdate
+and not exists (
+select
+*
+from
+lineitem l3
+where
+l3.l_orderkey = l1.l_orderkey
+and l3.l_suppkey <> l1.l_suppkey
+and l3.l_receiptdate > l3.l_commitdate
+)
+order by
+numwait desc;
+
+ +
+

If such an issue occurs, you can use the following methods to check whether statistics in tables or columns has been collected using ANALYZE.

+
  1. Execute EXPLAIN VERBOSE to analyze the execution plan and check the warning information:
    WARNING:Statistics in some tables or columns(public.lineitem.l_receiptdate, public.lineitem.l_commitdate, public.lineitem.l_orderkey, public.lineitem.l_suppkey, public.orders.o_orderstatus, public.orders.o_orderkey) are not collected.
    +HINT:Do analyze for them in order to generate optimized plan.
    +
  2. Check whether the following information exists in the log file in the pg_log directory. If it does, the poor query performance was caused by the lack of statistics in some tables or columns.
    2017-06-14 17:28:30.336 CST 140644024579856 20971684 [BACKEND] LOG:Statistics in some tables or columns(public.lineitem.l_receiptdate, public.lineitem.l_commitdate, public.lineitem.l_orderkey, public.linei
    +tem.l_suppkey, public.orders.o_orderstatus, public.orders.o_orderkey) are not collected.
    +2017-06-14 17:28:30.336 CST 140644024579856 20971684 [BACKEND] HINT:Do analyze for them in order to generate optimized plan.
    +
+

By using any of the preceding methods, you can identify tables or columns whose statistics have not been collected using ANALYZE. You can execute ANALYZE to warnings or tables and columns recorded in logs to resolve the problem.

+
+

Example 2: Setting cost_param to Optimize Query Performance

For details, see Case: Configuring cost_param for Better Query Performance.

+
+

Example 3: Optimization is Not Accurate When Intermediate Results Exist in the Query Where JOIN Is Used for Multiple Tables

Symptom: Query the personnel who have checked in an Internet cafe within 15 minutes before and after the check-in of a specified person.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
SELECT
+C.WBM,
+C.DZQH,
+C.DZ,
+B.ZJHM,
+B.SWKSSJ,
+B.XWSJ
+FROM
+b_zyk_wbswxx A,
+b_zyk_wbswxx B,
+b_zyk_wbcs C
+WHERE
+A.ZJHM = '522522******3824'
+AND A.WBDM = B.WBDM
+AND A.WBDM = C.WBDM
+AND abs(to_date(A.SWKSSJ,'yyyymmddHH24MISS') - to_date(B.SWKSSJ,'yyyymmddHH24MISS')) < INTERVAL '15 MINUTES'
+ORDER BY
+B.SWKSSJ,
+B.ZJHM
+limit 10 offset 0
+;
+
+ +
+

Figure 1 shows the execution plan. This query takes about 12s.

+
Figure 1 Using an unlogged table (1)
+

+

Optimization analysis:

+
  1. In the execution plan, index scan is used for node scanning, the Join Filter calculation in the external NEST LOOP IN statement consumes most of the query time, and the calculation uses the string addition and subtraction, and unequal-value comparison.
  2. Use an unlogged table to record the Internet access time of the specified person. The start time and end time are processed during data insertion, and this reduces subsequent addition and subtraction operations.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    +32
    +33
    +34
    +35
    +36
    +37
    +38
    +39
    +40
    +41
    +42
    +43
    +44
    +45
    +46
    +47
    +48
    +49
    +50
    +51
    +52
    //Create a temporary unlogged table.
    +CREATE UNLOGGED TABLE temp_tsw
    +(
    +ZJHM         NVARCHAR2(18),
    +WBDM         NVARCHAR2(14),
    +SWKSSJ_START NVARCHAR2(14),
    +SWKSSJ_END   NVARCHAR2(14),
    +WBM          NVARCHAR2(70),
    +DZQH         NVARCHAR2(6),
    +DZ           NVARCHAR2(70),
    +IPDZ         NVARCHAR2(39)
    +)
    +;
    +//Insert the Internet access record of the specified person, and process the start time and end time.
    +INSERT INTO
    +temp_tsw
    +SELECT
    +A.ZJHM,
    +A.WBDM,
    +to_char((to_date(A.SWKSSJ,'yyyymmddHH24MISS') - INTERVAL '15 MINUTES'),'yyyymmddHH24MISS'),
    +to_char((to_date(A.SWKSSJ,'yyyymmddHH24MISS') + INTERVAL '15 MINUTES'),'yyyymmddHH24MISS'),
    +B.WBM,B.DZQH,B.DZ,B.IPDZ
    +FROM
    +b_zyk_wbswxx A,
    +b_zyk_wbcs B
    +WHERE
    +A.ZJHM='522522******3824' AND A.WBDM = B.WBDM
    +;
    +
    +//Query the personnel who have check in an Internet cafe before and after 15 minutes of the check-in of the specified person. Convert their ID card number format to int8 in comparison.
    +SELECT
    +A.WBM,
    +A.DZQH,
    +A.DZ,
    +A.IPDZ,
    +B.ZJHM,
    +B.XM,
    +to_date(B.SWKSSJ,'yyyymmddHH24MISS') as SWKSSJ,
    +to_date(B.XWSJ,'yyyymmddHH24MISS') as XWSJ,
    +B.SWZDH
    +FROM temp_tsw A,
    +b_zyk_wbswxx B
    +WHERE
    +A.ZJHM <> B.ZJHM
    +AND A.WBDM = B.WBDM
    +AND (B.SWKSSJ)::int8 > (A.swkssj_start)::int8
    +AND (B.SWKSSJ)::int8 < (A.swkssj_end)::int8
    +order by
    +B.SWKSSJ,
    +B.ZJHM
    +limit 10 offset 0
    +;
    +
    + +
    +

    The query takes about 7s. Figure 2 shows the execution plan.

    +
    Figure 2 Using an unlogged table (2)
    +
  3. In the previous plan, Hash Join has been executed, and a Hash table has been created for the large table b_zyk_wbswxx. The table contains large amounts of data, so the creation takes long time.

    temp_tsw contains only hundreds of records, and an equal-value connection is created between temp_tsw and b_zyk_wbswxx using wbdm (the Internet cafe code). Therefore, if JOIN is changed to NEST LOOP JOIN, index scan can be used for node scanning, and the performance will be boosted.

    +
  4. Execute the following statement to change JOIN to NEST LOOP JOIN.
    1
    SET enable_hashjoin = off;
    +
    + +
    +

    Figure 3 shows the execution plan. The query takes about 3s.

    +
    Figure 3 Using an unlogged table (3)
    +
  5. Save the query result set in the unlogged table for paging display.

    If paging display needs to be achieved on the upper-layer application page, change the offset value to determine the result set on the target page. In this way, the previous query statement will be executed every time after a page turning operation, which causes long response latency.

    +

    To resolve this problem, you are advised to use the unlogged table to save the result set.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    +32
    +33
    +34
    +35
    +36
    +37
    +38
    +39
    +40
    +41
    +42
    +43
    +44
    +45
    //Create an unlogged table to save the result set.
    +CREATE UNLOGGED TABLE temp_result
    +(
    +WBM      NVARCHAR2(70),
    +DZQH     NVARCHAR2(6),
    +DZ       NVARCHAR2(70),
    +IPDZ     NVARCHAR2(39),
    +ZJHM     NVARCHAR2(18),
    +XM       NVARCHAR2(30),
    +SWKSSJ   date,
    +XWSJ     date,
    +SWZDH    NVARCHAR2(32)
    +);
    +
    +//Insert the result set to the unlogged table. The insertion takes about 3s.
    +INSERT INTO
    +temp_result
    +SELECT
    +A.WBM,
    +A.DZQH,
    +A.DZ,
    +A.IPDZ,
    +B.ZJHM,
    +B.XM,
    +to_date(B.SWKSSJ,'yyyymmddHH24MISS') as SWKSSJ,
    +to_date(B.XWSJ,'yyyymmddHH24MISS') as XWSJ,
    +B.SWZDH
    +FROM temp_tsw A,
    +b_zyk_wbswxx B
    +WHERE
    +A.ZJHM <> B.ZJHM
    +AND A.WBDM = B.WBDM
    +AND (B.SWKSSJ)::int8 > (A.swkssj_start)::int8
    +AND (B.SWKSSJ)::int8 < (A.swkssj_end)::int8
    +;
    +
    +//Perform paging query on the result set. The paging query takes about 10 ms.
    +SELECT
    +*
    +FROM
    +temp_result
    +ORDER BY
    +SWKSSJ,
    +ZJHM
    +LIMIT 10 OFFSET 0;
    +
    + +
    +

    Collecting global statistics using ANALYZE improves query performance.

    +

    If a performance problem occurs, you can use plan hint to adjust the query plan to the previous one. For details, see Hint-based Tuning.

    +
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0450.html b/docs/dws/dev/dws_04_0450.html new file mode 100644 index 00000000..058ea2cd --- /dev/null +++ b/docs/dws/dev/dws_04_0450.html @@ -0,0 +1,63 @@ + + +

Optimizing Operators

+

What Is Operator Optimization

A query statement needs to go through multiple operator procedures to generate the final result. Sometimes, the overall query performance deteriorates due to long execution time of certain operators, which are regarded as bottleneck operators. In this case, you need to execute the EXPLAIN ANALYZE/PERFORMANCE command to view the bottleneck operators, and then perform optimization.

+

For example, in the following execution process, the execution time of the Hashagg operator accounts for about 66% [(51016-13535)/56476 ≈ 66%] of the total execution time. Therefore, the Hashagg operator is the bottleneck operator for this query. Optimize this operator first.

+

+
+

Operator Optimization Example

1. Scan the base table. For queries requiring large volume of data filtering, such as point queries or queries that need range scanning, a full table scan using SeqScan will take a long time. To facilitate scanning, you can create indexes on the condition column and select IndexScan for index scanning.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
 explain (analyze on, costs off) select * from store_sales where ss_sold_date_sk = 2450944;
+ id |             operation          |       A-time        | A-rows | Peak Memory  | A-width
+----+--------------------------------+---------------------+--------+--------------+---------
+  1 | ->  Streaming (type: GATHER)   | 3666.020            |   3360 | 195KB        |
+  2 |    ->  Seq Scan on store_sales | [3594.611,3594.611] |   3360 | [34KB, 34KB] |
+
+ Predicate Information (identified by plan id) 
+-----------------------------------------------
+   2 --Seq Scan on store_sales
+         Filter: (ss_sold_date_sk = 2450944)
+         Rows Removed by Filter: 4968936
+
+ +
+
1
+2
+3
+4
+5
+6
+7
 create index idx on store_sales_row(ss_sold_date_sk);
+CREATE INDEX
+ explain (analyze on, costs off) select * from store_sales_row where ss_sold_date_sk = 2450944;
+ id |                   operation                    |     A-time      | A-rows | Peak Memory  | A-width
+----+------------------------------------------------+-----------------+--------+--------------+----------
+  1 | ->  Streaming (type: GATHER)                   | 81.524          |   3360 | 195KB        |
+  2 |    ->  Index Scan using idx on store_sales_row | [13.352,13.352] |   3360 | [34KB, 34KB] |
+
+ +
+

In this example, the full table scan filters much data and returns 3360 records. After an index has been created on the ss_sold_date_sk column, the scanning efficiency is significantly boosted from 3.6s to 13 ms by using IndexScan.

+

2: If NestLoop is used for joining tables with a large number of rows, the join may take a long time. In the following example, NestLoop takes 181s. If enable_mergejoin=off is set to disable merge join and enable_nestloop=off is set to disable NestLoop so that the optimizer selects hash join, the join takes more than 200 ms.

+

+

+

3. Generally, query performance can be improved by selecting HashAgg. If Sort and GroupAgg are used for a large result set, you need to set enable_sort to off. HashAgg consumes less time than Sort and GroupAgg.

+

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0451.html b/docs/dws/dev/dws_04_0451.html new file mode 100644 index 00000000..8450091a --- /dev/null +++ b/docs/dws/dev/dws_04_0451.html @@ -0,0 +1,297 @@ + + +

Optimizing Data Skew

+

Data skew breaks the balance among nodes in the distributed MPP architecture. If the amount of data stored or processed by a node is much greater than that by other nodes, the following problems may occur:

+ +

GaussDB(DWS) provides a complete solution for data skew, including storage and computing skew.

+

Data Skew in the Storage Layer

In the GaussDB(DWS) database, data is distributed and stored on each DN. You can improve the query efficiency by using distributed execution. However, if data skew occurs, bottlenecks exist on some DNs during distribution execution, affecting the query performance. This is because the distribution column is not properly selected. This can be solved by adjusting the distribution column.

+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
explain performance select count(*) from inventory;
+5 --CStore Scan on lmz.inventory
+         dn_6001_6002 (actual time=0.444..83.127 rows=42000000 loops=1)
+         dn_6003_6004 (actual time=0.512..63.554 rows=27000000 loops=1)
+         dn_6005_6006 (actual time=0.722..99.033 rows=45000000 loops=1)
+         dn_6007_6008 (actual time=0.529..100.379 rows=51000000 loops=1)
+         dn_6009_6010 (actual time=0.382..71.341 rows=36000000 loops=1)
+         dn_6011_6012 (actual time=0.547..100.274 rows=51000000 loops=1)
+         dn_6013_6014 (actual time=0.596..118.289 rows=60000000 loops=1)
+         dn_6015_6016 (actual time=1.057..132.346 rows=63000000 loops=1)
+         dn_6017_6018 (actual time=0.940..110.310 rows=54000000 loops=1)
+         dn_6019_6020 (actual time=0.231..41.198 rows=21000000 loops=1)
+         dn_6021_6022 (actual time=0.927..114.538 rows=54000000 loops=1)
+         dn_6023_6024 (actual time=0.637..118.385 rows=60000000 loops=1)
+         dn_6025_6026 (actual time=0.288..32.240 rows=15000000 loops=1)
+         dn_6027_6028 (actual time=0.566..118.096 rows=60000000 loops=1)
+         dn_6029_6030 (actual time=0.423..82.913 rows=42000000 loops=1)
+         dn_6031_6032 (actual time=0.395..78.103 rows=39000000 loops=1)
+         dn_6033_6034 (actual time=0.376..51.052 rows=24000000 loops=1)
+         dn_6035_6036 (actual time=0.569..79.463 rows=39000000 loops=1)
+
+ +
+

In the performance information, you can view the number of scan rows of each DN in the inventory table. The number of rows of each DN differs a lot, the biggest is 63000000 and the smallest value is 15000000. This value difference on the performance of data scan is acceptable, but if the join operator exists in the upper-layer, the impact on the performance cannot be ignored.

+

Generally, the data table is hash distributed on each DN; therefore, it is important to choose a proper distribution column. Run table_skewness() to view data skew of each DN in the inventory table. The query result is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
select table_skewness('inventory');
+              table_skewness              
+------------------------------------------
+ ("dn_6015_6016        ",63000000,8.046%)
+ ("dn_6013_6014        ",60000000,7.663%)
+ ("dn_6023_6024        ",60000000,7.663%)
+ ("dn_6027_6028        ",60000000,7.663%)
+ ("dn_6017_6018        ",54000000,6.897%)
+ ("dn_6021_6022        ",54000000,6.897%)
+ ("dn_6007_6008        ",51000000,6.513%)
+ ("dn_6011_6012        ",51000000,6.513%)
+ ("dn_6005_6006        ",45000000,5.747%)
+ ("dn_6001_6002        ",42000000,5.364%)
+ ("dn_6029_6030        ",42000000,5.364%)
+ ("dn_6031_6032        ",39000000,4.981%)
+ ("dn_6035_6036        ",39000000,4.981%)
+ ("dn_6009_6010        ",36000000,4.598%)
+ ("dn_6003_6004        ",27000000,3.448%)
+ ("dn_6033_6034        ",24000000,3.065%)
+ ("dn_6019_6020        ",21000000,2.682%)
+ ("dn_6025_6026        ",15000000,1.916%)
+(18 rows)
+
+ +
+

The table definition indicates that the table uses the inv_date_sk column as the distribution column, which causes a data skew. Based on the data distribution of each column, change the distribution column to inv_item_sk. The skew status is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
select table_skewness('inventory');
+              table_skewness              
+------------------------------------------
+ ("dn_6001_6002        ",43934200,5.611%)
+ ("dn_6007_6008        ",43829420,5.598%)
+ ("dn_6003_6004        ",43781960,5.592%)
+ ("dn_6031_6032        ",43773880,5.591%)
+ ("dn_6033_6034        ",43763280,5.589%)
+ ("dn_6011_6012        ",43683600,5.579%)
+ ("dn_6013_6014        ",43551660,5.562%)
+ ("dn_6027_6028        ",43546340,5.561%)
+ ("dn_6009_6010        ",43508700,5.557%)
+ ("dn_6023_6024        ",43484540,5.554%)
+ ("dn_6019_6020        ",43466800,5.551%)
+ ("dn_6021_6022        ",43458500,5.550%)
+ ("dn_6017_6018        ",43448040,5.549%)
+ ("dn_6015_6016        ",43247700,5.523%)
+ ("dn_6005_6006        ",43200240,5.517%)
+ ("dn_6029_6030        ",43181360,5.515%)
+ ("dn_6025_6026        ",43179700,5.515%)
+ ("dn_6035_6036        ",42960080,5.487%)
+(18 rows)
+
+ +
+

Data skew is solved.

+

In addition to the table_skewness() view, you can use the table_distribution function and the PGXC_GET_TABLE_SKEWNESS view to efficiently query the data skew of each table.

+
+

Data Skew in the Computing Layer

Even if data is balanced across nodes after you change the distribution key of a table, data skew may still occur during a query. If data skew occurs in the result set of an operator on a DN, skew will also occur during the computing that involves the operator. Generally, this is caused by data redistribution during the execution.

+

During a query, JOIN keys and GROUP BY keys are not used as distribution columns. Data is redistributed among DNs based on the hash values of data on the keys. The redistribution is implemented using the Redistribute operator in an execution plan. Data skew in redistribution columns can lead to data skew during system operation. After the redistribution, some nodes will have much more data, process more data, and will have much lower performance than others.

+

In the following example, the s and t tables are joined, and s.x and t.x columns in the join condition are not their distribution keys. Table data is redistributed using the REDISTRIBUTE operator. Data skew occurs in the s.x column and not in the t.x column. The result set of the Streaming operator (id being 6) on datanode2 has data three times that of other DNs and causes a skew.

+
1
select * from skew s,test t where s.x = t.x order by s.a limit 1;
+
+ +
+
 id |                      operation                      |        A-time         
+----+-----------------------------------------------------+-----------------------
+  1 | ->  Limit                                           | 52622.382             
+  2 |    ->  Streaming (type: GATHER)                     | 52622.374             
+  3 |       ->  Limit                                     | [30138.494,52598.994] 
+  4 |          ->  Sort                                   | [30138.486,52598.986] 
+  5 |             ->  Hash Join (6,8)                     | [30127.013,41483.275] 
+  6 |                ->  Streaming(type: REDISTRIBUTE)    | [11365.110,22024.845] 
+  7 |                   ->  Seq Scan on public.skew s     | [2019.168,2175.369]   
+  8 |                ->  Hash                             | [2460.108,2499.850]   
+  9 |                   ->  Streaming(type: REDISTRIBUTE) | [1056.214,1121.887]   
+ 10 |                      ->  Seq Scan on public.test t  | [310.848,325.569]     
+ 
+6 --Streaming(type: REDISTRIBUTE)
+         datanode1 (rows=5050368)
+         datanode2 (rows=15276032)
+         datanode3 (rows=5174272)
+         datanode4 (rows=5219328)
+

It is more difficult to detect skew in computing than in storage. To solve skew in computing, GaussDB provides the Runtime Load Balance Technology (RLBT) solution controlled by the skew_option parameter. The RLBT solution addresses how to detect and solve data skew.

+
  1. Detect data skew.

    The solution first checks whether skew data exists in redistribution columns used for computing. RLBT can detect data skew based on statistics, specified hints, or rules.

    +
    • Detection based on statistics

      Run the ANALYZE statement to collect statistics on tables. The optimizer will automatically identify skew data on redistribution keys based on the statistics and generate optimization plans for queries having potential skew. When the redistribution key has multiple columns, statistics information can be used for identification only when all columns belong to the same base table.

      +

      The statistics information can only provide the skew of the base table. When a column in the base table is skewed, other columns have filtering conditions, or after the join of other tables, we cannot determine whether the skewed data still exists on the skewed column. If skew_option is set to normal, it indicates that data skew persists and the base tables will be optimized to solve the skew. If skew_option is set to lazy, it indicates that data skew is solved and the optimization will stop.

      +
    • Detection based on specified hints

      The intermediate results of complex queries are difficult to estimate based on statistics. In this case, you can specify hints provide the skew information, based on which the optimizer optimize queries. For details about the syntax of hints, see Skew Hints.

      +
    • Detection based on rules

      In a business intelligence (BI) system, a large number of SQL statements having outer joins (including left joins, right joins, and full joins) are generated, and many NULL values will be generated in empty columns that have no match for outer joins. If JOIN or GROUP BY operations are performed on the columns, data skew will occur. RLBT can automatically identify this scenario and generate an optimization plan for NULL value skew.

      +
    +
  2. Solve computing skew.
    Join and Aggregate operators are optimized to solve skew.
    • Join optimization
    +
    +

    Skew and non-skew data is separately processed. Details are as follows:

    +
    1. When redistribution is required on both sides of a join:

      Use PART_REDISTRIBUTE_PART_ROUNDROBIN on the side with skew. Specifically, perform round-robin on skew data and redistribution on non-skew data.

      +

      Use PART_REDISTRIBUTE_PART_BROADCAST on the side with no skew. Specifically, perform broadcast on skew data and redistribution on non-skew data.

      +
    2. When redistribution is required on only one side of a join:

      Use PART_REDISTRIBUTE_PART_ROUNDROBIN on the side where redistribution is required.

      +

      Use PART_LOCAL_PART_BROADCAST on the side where redistribution is not required. Specifically, perform broadcast on skew data and retain other data locally.

      +
    3. When a table has NULL values padded:

      Use PART_REDISTRIBUTE_PART_LOCAL on the table. Specifically, retain the NULL values locally and perform redistribution on other data.

      +
    +

    In the example query, the s.x column contains skewed data and its value is 0. The optimizer identifies the skew data in statistics and generates the following optimization plan:

    +
     id |                                operation                                |        A-time         
    +----+-------------------------------------------------------------------------+-----------------------
    +  1 | ->  Limit                                                               | 23642.049             
    +  2 |    ->  Streaming (type: GATHER)                                         | 23642.041             
    +  3 |       ->  Limit                                                         | [23310.768,23618.021] 
    +  4 |          ->  Sort                                                       | [23310.761,23618.012] 
    +  5 |             ->  Hash Join (6,8)                                         | [20898.341,21115.272] 
    +  6 |                ->  Streaming(type: PART REDISTRIBUTE PART ROUNDROBIN)   | [7125.834,7472.111]   
    +  7 |                   ->  Seq Scan on public.skew s                         | [1837.079,1911.025]   
    +  8 |                ->  Hash                                                 | [2612.484,2640.572]   
    +  9 |                   ->  Streaming(type: PART REDISTRIBUTE PART BROADCAST) | [1193.548,1297.894]   
    + 10 |                      ->  Seq Scan on public.test t                      | [314.343,328.707]     
    +
    +   5 --Vector Hash Join (6,8)
    +         Hash Cond: s.x = t.x
    +         Skew Join Optimizated by Statistic
    +   6 --Streaming(type: PART REDISTRIBUTE PART ROUNDROBIN)
    +         datanode1 (rows=7635968)
    +         datanode2 (rows=7517184)
    +         datanode3 (rows=7748608)
    +         datanode4 (rows=7818240)
    +

    In the preceding execution plan, Skew Join Optimized by Statistic indicates that this is an optimized plan used for handling data skew. The Statistic keyword indicates that the plan optimization is based on statistics; Hint indicates that the optimization is based on hints; Rule indicates that the optimization is based on rules. In this plan, skew and non-skew data is separately processed. Non-skew data in the s table is redistributed based on its hash values, and skew data (whose value is 0) is evenly distributed on all nodes in round-robin mode. In this way, data skew is solved.

    +

    To ensure result correctness, the t table also needs to be processed. In the t table, the data whose value is 0 (skew value in the s.x table) is broadcast and other data is redistributed based on its hash values.

    +

    In this way, data skew in JOIN operations is solved. The above result shows that the output of the Streaming operator (id being 6) is balanced and the end-to-end performance of the query is doubled.

    +

    If the stream operator type in the execution plan is HYBRID, the stream mode varies depending on the skew data. The following plan is an example:

    +
    EXPLAIN (nodes OFF, costs OFF) SELECT COUNT(*) FROM skew_scol s, skew_scol1 s1 WHERE s.b = s1.c;
    +QUERY PLAN
    +------------------------------------------------------------------------------------------------------------------------------------------------------------------
    +id |                                                                         operation
    +----+-----------------------------------------------------------------------------------------------------------------------------------------------------------
    +1 | ->  Aggregate
    +2 |    ->  Streaming (type: GATHER)
    +3 |       ->  Aggregate
    +4 |          ->  Hash Join (5,7)
    +5 |             ->  Streaming(type: HYBRID)
    +6 |                ->  Seq Scan on skew_scol s
    +7 |             ->  Hash
    +8 |                ->  Streaming(type: HYBRID)
    +9 |                   ->  Seq Scan on skew_scol1 s1
    +
    +Predicate Information (identified by plan id)
    +--------------------------------------------------------------------------------------------------------------------------------------------
    +4 --Hash Join (5,7)
    +Hash Cond: (s.b = s1.c)
    +Skew Join Optimized by Statistic
    +5 --Streaming(type: HYBRID)
    +Skew Filter: (b = 1)
    +Skew Filter: (b = 0)
    +8 --Streaming(type: HYBRID)
    +Skew Filter: (c = 0)
    +Skew Filter: (c = 1)
    +

    Data 1 has skew in the skew_scol table. Perform ROUNDROBIN on skew data and REDISTRIBUTE on non-skew data.

    +

    Data 0 is the side with no skew in the skew_scol table. Perform BROADCAST on skew data and REDISTRIBUTE on non-skew data.

    +

    As shown in the preceding figure, the two stream types are PART REDISTRIBUTE PART ROUNDROBIN and PART REDISTRIBUTE PART BROADCAST. In this example, the stream type is HYBRID.

    +
    • Aggregate optimization
    +

    For aggregation, data on each DN is deduplicated based on the GROUP BY key and then redistributed. After the deduplication on DNs, the global occurrences of each value will not be greater than the number of DNs. Therefore, no serious data skew will occur. Take the following query as an example:

    +
    1
    select c1, c2, c3, c4, c5, c6, c7, c8, c9, count(*) from t group by c1, c2, c3, c4, c5, c6, c7, c8, c9 limit 10;
    +
    + +
    +

    The command output is as follows:

    +
     id |                 operation                  |         A-time         |  A-rows  
    +----+--------------------------------------------+------------------------+----------
    +  1 | ->  Streaming (type: GATHER)               | 130621.783             |       12 
    +  2 |    ->  GroupAggregate                      | [85499.711,130432.341] |       12 
    +  3 |       ->  Sort                             | [85499.509,103145.632] | 36679237 
    +  4 |          ->  Streaming(type: REDISTRIBUTE) | [25668.897,85499.050]  | 36679237 
    +  5 |             ->  Seq Scan on public.t       | [9835.069,10416.388]   | 36679237 
    +
    +   4 --Streaming(type: REDISTRIBUTE)
    +         datanode1 (rows=36678837)
    +         datanode2 (rows=100)
    +         datanode3 (rows=100)
    +         datanode4 (rows=200)
    +

    A large amount of skew data exists. As a result, after data is redistributed based on its GROUP BY key, the data volume of datanode1 is hundreds of thousands of times that of others. After optimization, a GROUP BY operation is performed on the DN to deduplicate data. After redistribution, no data skew occurs.

    +
     id |                 operation                  |        A-time          
    +----+--------------------------------------------+-----------------------
    +  1 | ->  Streaming (type: GATHER)               | 10961.337             
    +  2 |    ->  HashAggregate                       | [10953.014,10953.705] 
    +  3 |       ->  HashAggregate                    | [10952.957,10953.632] 
    +  4 |          ->  Streaming(type: REDISTRIBUTE) | [10952.859,10953.502] 
    +  5 |             ->  HashAggregate              | [10084.280,10947.139] 
    +  6 |                ->  Seq Scan on public.t    | [4757.031,5201.168]   
    +
    + Predicate Information (identified by plan id) 
    +-----------------------------------------------
    +   3 --HashAggregate
    +         Skew Agg Optimized by Statistic
    +
    +   4 --Streaming(type: REDISTRIBUTE)
    +         datanode1 (rows=17)
    +         datanode2 (rows=8)
    +         datanode3 (rows=8)
    +         datanode4 (rows=14)
    +

    Applicable scope

    +
    • Join operator
      • nest loop, merge join, and hash join can be optimized.
      • If skew data is on the left to the join, inner join, left join, semi join, and anti join are supported. If skew data is on the right to the join, inner join, right join, right semi join, and right anti join are supported.
      • For an optimization plan generated based on statistics, the optimizer checks whether it is optimal by estimating its cost. Optimization plans based on hints or rules are forcibly generated.
      +
    • Aggregate operator
      • array_agg, string_agg, and subplan in agg qual cannot be optimized.
      • A plan generated based on statistics is affected by its cost, the plan_mode_seed parameter, and the best_agg_plan parameter. A plan generated based on hints or rules are not affected by them.
      +
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0452.html b/docs/dws/dev/dws_04_0452.html new file mode 100644 index 00000000..14171a54 --- /dev/null +++ b/docs/dws/dev/dws_04_0452.html @@ -0,0 +1,28 @@ + + +

Experience in Rewriting SQL Statements

+
Based on the database SQL execution mechanism and a large number of practices, summarize finds that: using rules of a certain SQL statement, on the basis of the so that the correct test result, which can improve the SQL execution efficiency. You can comply with these rules to greatly improve service query efficiency. +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0453.html b/docs/dws/dev/dws_04_0453.html new file mode 100644 index 00000000..f9a119f0 --- /dev/null +++ b/docs/dws/dev/dws_04_0453.html @@ -0,0 +1,101 @@ + + +

Adjusting Key Parameters During SQL Tuning

+

This section describes the key CN parameters that affect GaussDB(DWS) SQL tuning performance. For details about how to configure these parameters, see Configuring GUC Parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CN parameters

Parameter/Reference Value

+

Description

+

enable_nestloop=on

+

Specifies how the optimizer uses Nest Loop Join. If this parameter is set to on, the optimizer preferentially uses Nest Loop Join. If it is set to off, the optimizer preferentially uses other methods, if any.

+
NOTE:

To temporarily change the value of this parameter in the current database connection (that is, the current session), run the following SQL statement:

+
1
SET enable_nestloop to off;
+
+ +
+
+

By default, this parameter is set to on. Change the value as required. Generally, nested loop join has the poorest performance among the three JOIN methods (nested loop join, merge join, and hash join). You are advised to set this parameter to off.

+

enable_bitmapscan=on

+

Specifies whether the optimizer uses bitmap scanning. If the value is on, bitmap scanning is used. If the value is off, it is not used.

+
NOTE:

If you only want to temporarily change the value of this parameter during the current database connection (that is, the current session), run the following SQL statements:

+
1
SET enable_bitmapscan to off;
+
+ +
+
+

The bitmap scanning applies only in the query condition where a > 1 and b > 1 and indexes are created on columns a and b. During performance tuning, if the query performance is poor and bitmapscan operators are in the execution plan, set this parameter to off and check whether the performance is improved.

+

enable_fast_query_shipping=on

+

Specifies whether the optimizer uses a distribution framework. If the value is on, the execution plan is generated on both CNs and DNs. If the value is off, the distribution framework is used, that is, the execution plan is generated on the CNs and then sent to DNs for execution.

+
NOTE:

To temporarily change the value of this parameter in the current database connection (that is, the current session), run the following SQL statement:

+
1
SET enable_fast_query_shipping to off;
+
+ +
+
+

enable_hashagg=on

+

Specifies whether to enable the optimizer's use of Hash-aggregation plan types.

+

enable_hashjoin=on

+

Specifies whether to enable the optimizer's use of Hash-join plan types.

+

enable_mergejoin=on

+

Specifies whether to enable the optimizer's use of Hash-merge plan types.

+

enable_indexscan=on

+

Specifies whether to enable the optimizer's use of index-scan plan types.

+

enable_indexonlyscan=on

+

Specifies whether to enable the optimizer's use of index-only-scan plan types.

+

enable_seqscan=on

+

Specifies whether the optimizer uses bitmap scanning. It is impossible to suppress sequential scans entirely, but setting this variable to off allows the optimizer to preferentially choose other methods if available.

+

enable_sort=on

+

Specifies the optimizer sorts. It is impossible to fully suppress explicit sorts, but setting this variable to off allows the optimizer to preferentially choose other methods if available.

+

enable_broadcast=on

+

Specifies whether enable the optimizer's use of data broadcast. In data broadcast, a large amount of data is transferred on the network. When the number of transmission nodes (stream) is large and the estimation is inaccurate, set this parameter to off and check whether the performance is improved.

+

rewrite_rule

+

Specifies whether the optimizer enables the LAZY_AGG and MAGIC_SET rewriting rules.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0454.html b/docs/dws/dev/dws_04_0454.html new file mode 100644 index 00000000..34bd511d --- /dev/null +++ b/docs/dws/dev/dws_04_0454.html @@ -0,0 +1,35 @@ + + +

Hint-based Tuning

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0455.html b/docs/dws/dev/dws_04_0455.html new file mode 100644 index 00000000..eed63720 --- /dev/null +++ b/docs/dws/dev/dws_04_0455.html @@ -0,0 +1,115 @@ + + +

Plan Hint Optimization

+

In plan hints, you can specify a join order, join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution plan, improving query performance.

+

Function

The hint syntax must follow immediately after a SELECT keyword and is written in the following format:

+
1
/*+ <plan hint>*/
+
+ +
+

You can specify multiple hints for a query plan and separate them by spaces. A hint specified for a query plan does not apply to its subquery plans. To specify a hint for a subquery, add the hint following the SELECT of this subquery.

+
+

For example:

+
1
select /*+ <plan_hint1> <plan_hint2> */ * from t1, (select /*+ <plan_hint3> */ from t2) where 1=1;
+
+ +
+

In the preceding command, <plan_hint1> and <plan_hint2> are the hints of a query, and <plan_hint3> is the hint of its subquery.

+

If a hint is specified in the CREATE VIEW statement, the hint will be applied each time this view is used.

+

If the random plan function is enabled (plan_mode_seed is set to a value other than 0), the specified hint will not be used.

+
+

Supported Hints

Currently, the following hints are supported:

+
+ +

Precautions

+
+

Examples

The following is the original plan and is used for comparing with the optimized ones:

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
explain
+select i_product_name product_name
+,i_item_sk item_sk
+,s_store_name store_name
+,s_zip store_zip
+,ad2.ca_street_number c_street_number
+,ad2.ca_street_name c_street_name
+,ad2.ca_city c_city
+,ad2.ca_zip c_zip
+,count(*) cnt
+,sum(ss_wholesale_cost) s1
+,sum(ss_list_price) s2
+,sum(ss_coupon_amt) s3
+FROM   store_sales
+,store_returns
+,store
+,customer
+,promotion
+,customer_address ad2
+,item
+WHERE  ss_store_sk = s_store_sk AND
+ss_customer_sk = c_customer_sk AND
+ss_item_sk = i_item_sk and
+ss_item_sk = sr_item_sk and
+ss_ticket_number = sr_ticket_number and
+c_current_addr_sk = ad2.ca_address_sk and
+ss_promo_sk = p_promo_sk and
+i_color in ('maroon','burnished','dim','steel','navajo','chocolate') and
+i_current_price between 35 and 35 + 10 and
+i_current_price between 35 + 1 and 35 + 15
+group by i_product_name
+,i_item_sk
+,s_store_name
+,s_zip
+,ad2.ca_street_number
+,ad2.ca_street_name
+,ad2.ca_city
+,ad2.ca_zip
+;
+
+ +
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0456.html b/docs/dws/dev/dws_04_0456.html new file mode 100644 index 00000000..0ef2f8cf --- /dev/null +++ b/docs/dws/dev/dws_04_0456.html @@ -0,0 +1,47 @@ + + +

Join Order Hints

+

Function

Theses hints specify the join order and outer/inner tables.

+
+

Syntax

+
1
leading(join_table_list) 
+
+ +
+ +
1
leading((join_table_list)) 
+
+ +
+
+

Parameter Description

join_table_list specifies the tables to be joined. The values can be table names or table aliases. If a subquery is pulled up, the value can also be the subquery alias. Separate the values with spaces. You can add parentheses to specify the join priorities of tables.

+

A table name or alias can only be a string without a schema name.

+

An alias (if any) is used to represent a table.

+
+
+

To prevent semantic errors, tables in the list must meet the following requirements:

+ +

For example:

+

leading(t1 t2 t3 t4 t5): t1, t2, t3, t4, and t5 are joined. The join order and outer/inner tables are not specified.

+

leading(t1 t2 t3 t4 t5): t1, t2, t3, t4, and t5 are joined in sequence. The table on the right is used as the inner table in each join.

+

leading(t1 (t2 t3 t4) t5): First, t2, t3, and t4 are joined and the outer/inner tables are not specified. Then, the result is joined with t1 and t5, and the outer/inner tables are not specified.

+

leading(t1 (t2 t3 t4) t5): First, t2, t3, and t4 are joined and the outer/inner tables are not specified. Then, the result is joined with t1, and (t2 t3 t4) is used as the inner table. Finally, the result is joined with t5, and t5 is used as the inner table.

+

leading((t1 (t2 t3) t4 t5)) leading((t3 t2)): First, t2 and t3 are joined and t2 is used as the inner table. Then, the result is joined with t1, and (t2 t3) is used as the inner table. Finally, the result is joined with t4 and then t5, and the table on the right in each join is used as the inner table.

+

Examples

Hint the query plan in Examples as follows:

+
+
1
+2
explain
+select /*+ leading((((((store_sales store) promotion) item) customer) ad2) store_returns) leading((store store_sales))*/ i_product_name product_name ...
+
+ +
+

First, store_sales and store are joined and store_sales is the inner table. Then, The result is joined with promotion, item, customer, ad2, and store_returns in sequence. The optimized plan is as follows:

+

+

For details about the warning at the top of the plan, see Hint Errors, Conflicts, and Other Warnings.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0457.html b/docs/dws/dev/dws_04_0457.html new file mode 100644 index 00000000..224773d5 --- /dev/null +++ b/docs/dws/dev/dws_04_0457.html @@ -0,0 +1,32 @@ + + +

Join Operation Hints

+

Function

Specifies the join method. It can be nested loop join, hash join, or merge join.

+
+

Syntax

1
[no] nestloop|hashjoin|mergejoin(table_list)
+
+ +
+
+

Parameter Description

+
+ +

For example:

+

no nestloop(t1 t2 t3): nestloop is not used for joining t1, t2, and t3. The three tables may be joined in either of the two ways: Join t2 and t3, and then t1; join t1 and t2, and then t3. This hint takes effect only for the last join. If necessary, you can hint other joins. For example, you can add no nestloop(t2 t3) to join t2 and t3 first and to forbid the use of nestloop.

+

Examples

Hint the query plan in Examples as follows:

+
+
1
+2
explain
+select /*+ nestloop(store_sales store_returns item) */ i_product_name product_name ...
+
+ +
+

nestloop is used for the last join between store_sales, store_returns, and item. The optimized plan is as follows:

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0458.html b/docs/dws/dev/dws_04_0458.html new file mode 100644 index 00000000..d5d736d1 --- /dev/null +++ b/docs/dws/dev/dws_04_0458.html @@ -0,0 +1,36 @@ + + +

Rows Hints

+

Function

These hints specify the number of rows in an intermediate result set. Both absolute values and relative values are supported.

+
+

Syntax

1
rows(table_list #|+|-|* const)
+
+ +
+
+

Parameter Description

+
+ +

For example:

+

rows(t1 #5): The result set of t1 is five rows.

+

rows(t1 t2 t3 *1000): Multiply the result set of joined t1, t2, and t3 by 1000.

+

Suggestion

+
+

Examples

Hint the query plan in Examples as follows:

+
+
1
+2
explain
+select /*+ rows(store_sales store_returns *50) */ i_product_name product_name ...
+
+ +
+

Multiply the result set of joined store_sales and store_returns by 50. The optimized plan is as follows:

+

+

The estimation value after the hint in row 11 is 360, and the original value is rounded off to 7.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0459.html b/docs/dws/dev/dws_04_0459.html new file mode 100644 index 00000000..36decf9b --- /dev/null +++ b/docs/dws/dev/dws_04_0459.html @@ -0,0 +1,30 @@ + + +

Stream Operation Hints

+

Function

These hints specify a stream operation, which can be broadcast or redistribute.

+
+

Syntax

1
[no] broadcast|redistribute(table_list)
+
+ +
+
+

Parameter Description

+
+ +

Examples

Hint the query plan in Examples as follows:

+
+
1
+2
explain
+select /*+ no redistribute(store_sales store_returns item store) leading(((store_sales store_returns item store) customer)) */ i_product_name product_name ...
+
+ +
+

In the original plan, the join result of store_sales, store_returns, item, and store is redistributed before it is joined with customer. After the hinting, the redistribution is disabled and the join order is retained. The optimized plan is as follows:

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0460.html b/docs/dws/dev/dws_04_0460.html new file mode 100644 index 00000000..9b93a976 --- /dev/null +++ b/docs/dws/dev/dws_04_0460.html @@ -0,0 +1,38 @@ + + +

Scan Operation Hints

+

Function

These hints specify a scan operation, which can be tablescan, indexscan, or indexonlyscan.

+
+

Syntax

1
[no] tablescan|indexscan|indexonlyscan(table [index])
+
+ +
+
+

Parameter Description

+
+ +

indexscan and indexonlyscan hints can be used only when the specified index belongs to the table.

+

Scan operation hints can be used for row-store tables, column-store tables, HDFS tables, HDFS foreign tables, OBS tables, and subquery tables. HDFS tables include primary tables and delta tables. The delta tables are invisible to users. Therefore, scan operation hints are used only for primary tables.

+
+

Examples

To specify an index-based hint for a scan, create an index named i on the i_item_sk column of the item table.

+
1
create index i on item(i_item_sk);
+
+ +
+
+

Hint the query plan in Examples as follows:

+
1
+2
explain
+select /*+ indexscan(item i) */ i_product_name product_name ...
+
+ +
+

item is scanned based on an index. The optimized plan is as follows:

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0461.html b/docs/dws/dev/dws_04_0461.html new file mode 100644 index 00000000..6afcb593 --- /dev/null +++ b/docs/dws/dev/dws_04_0461.html @@ -0,0 +1,29 @@ + + +

Sublink Name Hints

+

Function

These hints specify the name of a sublink block.

+
+

Syntax

1
blockname (table)
+
+ +
+
+

Parameter Description

+
+
  • This hint is used by an outer query only when a sublink is pulled up. Currently, only the Agg equivalent join, IN, and EXISTS sublinks can be pulled up. This hint is usually used together with the hints described in the previous sections.
+
  • The subquery after the FROM keyword is hinted by using the subquery alias. In this case, blockname becomes invalid.
  • If a sublink contains multiple tables, the tables will be joined with the outer-query tables in a random sequence after the sublink is pulled up. In this case, blockname also becomes invalid.
+
+

Examples

1
explain select /*+nestloop(store_sales tt) */ * from store_sales where ss_item_sk in (select /*+blockname(tt)*/ i_item_sk from item group by 1);
+
+ +
+
+

tt indicates the sublink block name. After being pulled up, the sublink is joined with the outer-query table store_sales by using nestloop. The optimized plan is as follows:

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0462.html b/docs/dws/dev/dws_04_0462.html new file mode 100644 index 00000000..961b880f --- /dev/null +++ b/docs/dws/dev/dws_04_0462.html @@ -0,0 +1,200 @@ + + +

Skew Hints

+

Function

Theses hints specify redistribution keys containing skew data and skew values, and are used to optimize redistribution involving Join or HashAgg.

+
+

Syntax

+
+

Parameter Description

+
+
  • Skew hints are used only if redistribution is required and the specified skew information matches the redistribution information.
  • Skew hints are controlled by the GUC parameter skew_option. If the parameter is disabled, skew hints cannot be used for solving skew.
  • Currently, skew hints support only the table relationships of the ordinary table and subquery types. Hints can be specified for base tables, subqueries, and WITH ... AS clauses. Unlike other hints, a subquery can be used in skew hints regardless of whether it is pulled up.
  • Use an alias (if any) to specify a table where data skew occurs.
+
  • You can use a name or an alias to specify a skew column as long as it is not ambiguous. The columns in skew hints cannot be expressions. If data skew occurs in the redistribution that uses an expression as a redistribution key, set the redistribution key as a new column and specify the column in skew hints.
  • The number of skew values must be an integer multiple of the number of columns. Skew values must be grouped based on the column sequence, with each group containing a maximum of 10 values. You can specify duplicate values to group skew columns having different number of skew values. For example, the c1 and c2 columns of the t1 table contains skew data. The skew value of the c1 column is a1, and the skew values of the c2 column are b1 and b2. In this case, the skew hint is skew(t1 (c1 c2) ((a1 b1)(a1 b2))). (a1 b1) is a value group, where NULL is allowed as a skew value. Each hint can contain a maximum of 10 groups and the number of groups should be an integer multiple of the number of columns.
  • In the redistribution optimization of Join, a skew value must be specified for skew hints. The skew value can be left empty for HashAgg.
  • If multiple tables, columns, or values are specified, separate items of the same type with spaces.
  • The type of skew values cannot be forcibly converted in hints. To specify a string, enclose it with single quotation marks (' ').
+
+

Example:

+ + +

Suggestion

+
+

Examples

Specify single-table skew.

+ +
+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0463.html b/docs/dws/dev/dws_04_0463.html new file mode 100644 index 00000000..6d4477b6 --- /dev/null +++ b/docs/dws/dev/dws_04_0463.html @@ -0,0 +1,27 @@ + + +

Configuration Parameter Hints

+

Function

A hint, or a GUC hint, specifies a configuration parameter value when a plan is generated. Currently, only the following parameters are supported:

+
+ +

Syntax

set [global](guc_name guc_value)
+
+

Parameters

+ +
  • If a parameter set by hint takes effect at the statement level, the hint must be written to the top-level query instead of the subquery. For UNION, INTERSECT, EXCEPT, and MINUS statements, you can write the GUC hint at the statement level to any SELECT clause that participates in the set operation. The configuration parameters set by the GUC hint take effect on each SELECT clause that participates in the set operation.
  • When a subquery is pulled up, all GUC hints on the subquery are discarded.
  • The enable_fast_query_shipping, enable_nodegroup_debug, expected_computing_nodegroup, query_dop, and rewrite_rule parameters can be set only at the statement level.
  • If a parameter is set by both the statement-level GUC hint and the subquery-level GUC hint, the subquery-level GUC hint takes effect in the corresponding subquery, and the statement-level GUC hint takes effect in other subqueries of the statement.
+
+
+

Example

Hint the query plan in Examples as follows:

+
explain
+select /*+ set global(query_dop 0) */ i_product_name product_name
+...
+

This hint indicates that the query_dop parameter is set to 0 when the plan for a statement is generated, which means the SMP adaptation function is enabled. The generated plan is as follows:

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0464.html b/docs/dws/dev/dws_04_0464.html new file mode 100644 index 00000000..128799c6 --- /dev/null +++ b/docs/dws/dev/dws_04_0464.html @@ -0,0 +1,33 @@ + + +

Hint Errors, Conflicts, and Other Warnings

+

Plan hints change an execution plan. You can run EXPLAIN to view the changes.

+

Hints containing errors are invalid and do not affect statement execution. The errors will be displayed in different ways based on statement types. Hint errors in an EXPLAIN statement are displayed as a warning on the interface. Hint errors in other statements will be recorded in debug1-level logs containing the PLANHINT keyword.

+

Hint Error Types

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0465.html b/docs/dws/dev/dws_04_0465.html new file mode 100644 index 00000000..9fb8a1a4 --- /dev/null +++ b/docs/dws/dev/dws_04_0465.html @@ -0,0 +1,148 @@ + + +

Plan Hint Cases

+

This section takes the statements in TPC-DS (Q24) as an example to describe how to optimize an execution plan by using hints in 1000X+24DN environments. For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
select avg(netpaid) from
+(select c_last_name
+,c_first_name
+,s_store_name
+,ca_state
+,s_state
+,i_color
+,i_current_price
+,i_manager_id
+,i_units
+,i_size
+,sum(ss_sales_price) netpaid
+from store_sales
+,store_returns
+,store
+,item
+,customer
+,customer_address
+where ss_ticket_number = sr_ticket_number
+and ss_item_sk = sr_item_sk
+and ss_customer_sk = c_customer_sk
+and ss_item_sk = i_item_sk
+and ss_store_sk = s_store_sk
+and c_birth_country = upper(ca_country)
+and s_zip = ca_zip
+and s_market_id=7
+group by c_last_name
+,c_first_name
+,s_store_name
+,ca_state
+,s_state
+,i_color
+,i_current_price
+,i_manager_id
+,i_units
+,i_size);
+
+ +
+
  1. The original plan of this statement is as follows and the statement execution takes 110s:
+

+

In this plan, the performance of the layer-10 broadcast is poor because the estimation result generated at layer 11 is 2140 rows, which is much less than the actual number of rows. The inaccurate estimation is mainly caused by the underestimated number of rows in layer-13 hash join. In this layer, store_sales and store_returns are joined (based on the ss_ticket_number and ss_item_sk columns in store_sales and the sr_ticket_number and sr_item_sk columns in store_returns) but the multi-column correlation is not considered.

+

2. After the rows hint is used for optimization, the plan is as follows and the statement execution takes 318s:

+
1
+2
select avg(netpaid) from
+(select /*+rows(store_sales store_returns * 11270)*/ c_last_name ...
+
+ +
+

+

The execution takes a longer time because layer-9 redistribute is slow. Considering that data skew does not occur at layer-9 redistribute, the slow redistribution is caused by the slow layer-8 hashjoin due to data skew at layer-18 redistribute.

+

3. Data skew occurs at layer-18 redistribute because customer_address has a few different values in its two join keys. Therefore, plan customer_address as the last one to be joined. After the hint is used for optimization, the plan is as follows and the statement execution takes 116s:

+
1
+2
+3
+4
select avg(netpaid) from
+(select /*+rows(store_sales store_returns *11270)
+leading((store_sales store_returns store item customer) customer_address)*/
+c_last_name ...
+
+ +
+

+

Most of the time is spent on layer-6 redistribute. The plan needs to be further optimized.

+

4. Most of the time is spent on layer-6 redistribute because of data skew. To avoid the data skew, plan the item table as the last one to be joined because the number of rows is not reduced after item is joined. After the hint is used for optimization, the plan is as follows and the statement execution takes 120s:

+
1
+2
+3
+4
select avg(netpaid) from
+(select /*+rows(store_sales store_returns *11270)
+leading((customer_address (store_sales store_returns store customer) item))
+c_last_name ...
+
+ +
+

+

Data skew occurs after the join of item and customer_address because item is broadcasted at layer-22. As a result, layer-6 redistribute is still slow.

+

5. Add a hint to disable broadcast for item or add a redistribute hint for the join result of item and customer_address. After the hint is used for optimization, the plan is as follows and the statement execution takes 105s:

+
1
+2
+3
+4
+5
select avg(netpaid) from
+(select /*+rows(store_sales store_returns *11270)
+leading((customer_address (store_sales store_returns store customer) item))
+no broadcast(item)*/
+c_last_name ...
+
+ +
+

+

6. The last layer uses single-layer Agg and the number of rows is greatly reduced. Set best_agg_plan to 3 and change the single-layer Agg to a double-layer Agg. The plan is as follows and the statement execution takes 94s. The optimization ends.

+

+

+

If the query performance deteriorates due to statistics changes, you can use hints to optimize the query plan. Take TPCH-Q17 as an example. The query performance deteriorates after the value of default_statistics_target is changed from the default one to –2 for statistics collection.

+

1. If default_statistics_target is set to the default value 100, the plan is as follows:

+

+

2. If default_statistics_target is set to –2, the plan is as follows:

+

+

3. After the analysis, the cause is that the stream type is changed from BroadCast to Redistribute during the join of the lineitem and part tables. You can use a hint to change the stream type back to BroadCast. For example:

+

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0466.html b/docs/dws/dev/dws_04_0466.html new file mode 100644 index 00000000..a93ebc9c --- /dev/null +++ b/docs/dws/dev/dws_04_0466.html @@ -0,0 +1,51 @@ + + + +

Routinely Maintaining Tables

+ +

To ensure proper database running, after INSERT and DELETE operations, you need to routinely do VACUUM FULL and ANALYZE as appropriate for customer scenarios and update statistics to obtain better performance.

+

Related Concepts

You need to routinely run VACUUM, VACUUM FULL, and ANALYZE to maintain tables, because:

+ +
+

Procedure

  1. Run the VACUUM or VACUUM FULL command to reclaim disk space.

    • VACUUM:
      Do VACUUM to the table:
      VACUUM customer;
      +
      VACUUM
      +
      +

      This command can be concurrently executed with database operation commands, including SELECT, INSERT, UPDATE, and DELETE; excluding ALTER TABLE.

      +

      Do VACUUM to the partitioned table:

      +
      VACUUM customer_par PARTITION ( P1 );
      +
      VACUUM
      +
    • VACUUM FULL:
      VACUUM FULL customer;
      +
      VACUUM
      +

      VACUUM FULL needs to add exclusive locks on tables it operates on and requires that all other database operations be suspended.

      +

      When reclaiming disk space, you can query for the session corresponding to the earliest transactions in the cluster, and then end the earliest long transactions as needed to make full use of the disk space.

      +
      1. Run the following command to query for oldestxmin on the GTM:
        select * from pgxc_gtm_snapshot_status();
        +
      2. Run the following command to query for the PID of the corresponding session on the CN. xmin is the oldestxmin obtained in the previous step.
        select * from pgxc_running_xacts() where xmin=1400202010;
        +
      +
    +

  2. Do ANALYZE to update statistical information.

    ANALYZE customer;
    +
    ANALYZE
    +

    Do ANALYZE VERBOSE to update statistics and display table information.

    +
    ANALYZE VERBOSE customer;
    +
    ANALYZE
    +

    You can use VACUUM ANALYZE at the same time to optimize the query.

    +
    VACUUM ANALYZE customer;
    +
    VACUUM
    +

    VACUUM and ANALYZE cause a substantial increase in I/O traffic, which may cause poor performance of other active sessions. Therefore, you are advised to set by specifying the vacuum_cost_delay parameter.

    +
    +

  3. Delete a table

    DROP TABLE customer;
    +DROP TABLE customer_par;
    +DROP TABLE part;
    +

    If the following output is displayed, the index has been deleted.

    +
    DROP TABLE
    +

+
+

Maintenance Suggestion

+
+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0467.html b/docs/dws/dev/dws_04_0467.html new file mode 100644 index 00000000..6a384ba2 --- /dev/null +++ b/docs/dws/dev/dws_04_0467.html @@ -0,0 +1,36 @@ + + + +

Routinely Recreating an Index

+ +

Context

When data deletion is repeatedly performed in the database, index keys will be deleted from the index page, resulting in index distention. Recreating an index routinely improves query efficiency.

+

The database supports B-tree, GIN, and psort indexes.

+ +
+

Rebuilding an Index

Use either of the following two methods to recreate an index:

+ +
+

Procedure

Assume the ordinary index areaS_idx exists in the area_id column of the imported table areaS. Use either of the following two methods to recreate an index:
  • Run the DROP INDEX statement to delete the index and run the CREATE INDEX statement to create an index.
    1. Delete an index.
      DROP INDEX areaS_idx;
      +DROP INDEX
      +
    2. Create an index.
      CREATE INDEX areaS_idx ON areaS (area_id);
      +CREATE INDEX
      +
    +
  • Run the REINDEX statement to recreate an index.
    • Run the REINDEX TABLE statement to recreate an index.
      REINDEX TABLE areaS;
      +REINDEX
      +
    • Run the REINDEX INTERNAL TABLE statement to recreate an index for a desc table ().
      REINDEX INTERNAL TABLE areaS;
      +REINDEX
      +
    +
+
+
+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0468.html b/docs/dws/dev/dws_04_0468.html new file mode 100644 index 00000000..03a5488e --- /dev/null +++ b/docs/dws/dev/dws_04_0468.html @@ -0,0 +1,24 @@ + + +

Configuring the SMP

+

This section describes the usage restrictions, application scenarios, and configuration guide of symmetric multiprocessing (SMP).

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0469.html b/docs/dws/dev/dws_04_0469.html new file mode 100644 index 00000000..79fbb904 --- /dev/null +++ b/docs/dws/dev/dws_04_0469.html @@ -0,0 +1,29 @@ + + + +

Application Scenarios and Restrictions

+ +

Context

The SMP feature improves the performance through operator parallelism and occupies more system resources, including CPU, memory, network, and I/O. Actually, SMP is a method consuming resources to save time. It improves system performance in appropriate scenarios and when resources are sufficient, but may deteriorate performance otherwise. In addition, compared with the serial processing, SMP generates more candidate plans, which is more time-consuming and may deteriorate performance.

+
+

Applicable Scenarios

+
+

Non-applicable Scenarios

  1. Short query operations are performed, where the plan generation is time-consuming.
  2. Operators are processed on CNs.
  3. Statements that cannot be pushed down are executed.
  4. The subplan of a query and operators containing a subquery are executed.
+
+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0470.html b/docs/dws/dev/dws_04_0470.html new file mode 100644 index 00000000..a672e2f6 --- /dev/null +++ b/docs/dws/dev/dws_04_0470.html @@ -0,0 +1,19 @@ + + + +

Resource Impact on SMP Performance

+ +

The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, the resource consumption is added, including the CPU, memory, I/O, and network bandwidth resources. As the parallelism degree is expanded, the resource consumption increases. If these resources become a bottleneck, the SMP cannot improve the performance and the overall cluster performance may be deteriorated. Adaptive SMP is provided to dynamically select the optimal parallel degree for each query based on the resource usage and query requirements. The following information describes the situations that the SMP affects theses resources:

+ +
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0471.html b/docs/dws/dev/dws_04_0471.html new file mode 100644 index 00000000..581ebba2 --- /dev/null +++ b/docs/dws/dev/dws_04_0471.html @@ -0,0 +1,17 @@ + + + +

Other Factors Affecting SMP Performance

+ +

Besides resource factors, there are other factors that impact the SMP parallelism performance, such as unevenly data distributed in a partitioned table and system parallelism degree.

+ +
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0472.html b/docs/dws/dev/dws_04_0472.html new file mode 100644 index 00000000..391f2d0a --- /dev/null +++ b/docs/dws/dev/dws_04_0472.html @@ -0,0 +1,21 @@ + + + +

Suggestions for SMP Parameter Settings

+ +

Starting from this version, SMP auto adaptation is enabled. For newly deployed clusters, the default value of query_dop is 0, and SMP parameters have been adjusted. To ensure forward compatibility, the value of query_dop should remain unchanged after an existing cluster is upgraded.

+

For an upgraded cluster, if you want to set query_dop to 0 and enable SMP parallel processing, modify the following parameters to obtain better dop options:

+ +
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0473.html b/docs/dws/dev/dws_04_0473.html new file mode 100644 index 00000000..b7aba1cc --- /dev/null +++ b/docs/dws/dev/dws_04_0473.html @@ -0,0 +1,30 @@ + + + +

SMP Manual Optimization Suggestions

+ +

To manually optimize SMP, you need to be familiar with Suggestions for SMP Parameter Settings. This section describes how to optimize SMP.

+

Constraints

The CPU, memory, I/O, and network bandwidth resources are sufficient. The SMP architecture uses abundant resources to save time. After the plan parallelism is executed, resource consumption increases. When these resources become a bottleneck, SMP may deteriorate, rather than improve performance. In addition, it takes a longer time to generate SMP plans than serial plans. Therefore, in TP services that mainly involve short queries or in case resources are insufficient, you are advised to disable SMP by setting query_dop to 1.

+
+

Procedure

  1. Observe the current system load situation. If the resource is sufficient (the resource usage ratio is smaller than 50%), perform step 2. Otherwise, exit this system.
  2. Set query_dop to 1 (default value). Use explain to generate an execution plan and check whether the plan can be used in scenarios in Application Scenarios and Restrictions. If the plan can be used, go to the next step.
  3. Set query_dop=–value. The value range of the parallelism degree is [1, value].
  4. Set query_dop=value. The parallelism degree is 1 or value.
  5. Before the query statement is executed, set query_dop to an appropriate value. After the statement is executed, set query_dop to off. For example:
    1
    +2
    +3
    +4
    SET query_dop = 0;
    +SELECT COUNT(*) FROM t1 GROUP BY a;
    +......
    +SET query_dop = 1;
    +
    + +
    +
    • If resources are enough, the higher the parallelism degree is, the better the performance improvement effect is.
    • The SMP parallelism degree supports a session level setting and you are advised to enable the SMP before executing the query that meets the requirements. After the execution is complete, disable the SMP. Otherwise, SMP may affect services in peak hours.
    • SMP adaptation (query_dop ≤ 0) depends on resource management. If resource management is disabled (use_workload_manager is off), plans with parallelism degree of only 1 or 2 are generated.
    +
    +
+
+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_0474.html b/docs/dws/dev/dws_04_0474.html new file mode 100644 index 00000000..08861861 --- /dev/null +++ b/docs/dws/dev/dws_04_0474.html @@ -0,0 +1,45 @@ + + +

Optimization Cases

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0475.html b/docs/dws/dev/dws_04_0475.html new file mode 100644 index 00000000..49d767f8 --- /dev/null +++ b/docs/dws/dev/dws_04_0475.html @@ -0,0 +1,42 @@ + + +

Case: Selecting an Appropriate Distribution Column

+

Symptom

Tables are defined as follows:

+
1
+2
CREATE TABLE t1 (a int, b int);
+CREATE TABLE t2 (a int, b int);
+
+ +
+

The following query is executed:

+
1
SELECT * FROM t1, t2 WHERE t1.a = t2.b;
+
+ +
+
+

Optimization Analysis

If a is the distribution column of t1 and t2:

+
1
+2
CREATE TABLE t1 (a int, b int) DISTRIBUTE BY HASH (a);
+CREATE TABLE t2 (a int, b int) DISTRIBUTE BY HASH (a);
+
+ +
+

Then Streaming exists in the execution plan and the data volume is heavy among DNs, as shown in Figure 1.

+
Figure 1 Selecting an appropriate distribution column (1)
+

If a is the distribution column of t1 and b is the distribution column of t2:

+
1
+2
CREATE TABLE t1 (a int, b int) DISTRIBUTE BY HASH (a);
+CREATE TABLE t2 (a int, b int) DISTRIBUTE BY HASH (b);
+
+ +
+

Then Streaming does not exist in the execution plan, and the data volume among DNs is decreasing and the query performance is increasing, as shown in Figure 2.

+
Figure 2 Selecting an appropriate distribution column (2)
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0476.html b/docs/dws/dev/dws_04_0476.html new file mode 100644 index 00000000..cae8d6eb --- /dev/null +++ b/docs/dws/dev/dws_04_0476.html @@ -0,0 +1,33 @@ + + +

Case: Creating an Appropriate Index

+

Symptom

Query the information about all personnel in the sales department.

+
1
+2
+3
+4
+5
+6
+7
SELECT staff_id,first_name,last_name,employment_id,state_name,city 
+FROM staffs,sections,states,places 
+WHERE sections.section_name='Sales' 
+AND staffs.section_id = sections.section_id 
+AND sections.place_id = places.place_id 
+AND places.state_id = states.state_id 
+ORDER BY staff_id;
+
+ +
+
+

Optimization Analysis

The original execution plan is as follows before creating the places.place_id and states.state_id indexes:

+

+

The optimized execution plan is as follows (two indexes have been created on the places.place_id and states.state_id columns):

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0477.html b/docs/dws/dev/dws_04_0477.html new file mode 100644 index 00000000..cdba213d --- /dev/null +++ b/docs/dws/dev/dws_04_0477.html @@ -0,0 +1,171 @@ + + +

Case: Adding NOT NULL for JOIN Columns

+

Symptom

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
SELECT
+ * 
+FROM
+( ( SELECT
+  STARTTIME STTIME,
+  SUM(NVL(PAGE_DELAY_MSEL,0)) PAGE_DELAY_MSEL,
+  SUM(NVL(PAGE_SUCCEED_TIMES,0)) PAGE_SUCCEED_TIMES,
+  SUM(NVL(FST_PAGE_REQ_NUM,0)) FST_PAGE_REQ_NUM,
+  SUM(NVL(PAGE_AVG_SIZE,0)) PAGE_AVG_SIZE,
+  SUM(NVL(FST_PAGE_ACK_NUM,0)) FST_PAGE_ACK_NUM,
+  SUM(NVL(DATATRANS_DW_DURATION,0)) DATATRANS_DW_DURATION,
+  SUM(NVL(PAGE_SR_DELAY_MSEL,0)) PAGE_SR_DELAY_MSEL 
+ FROM
+  PS.SDR_WEB_BSCRNC_1DAY SDR
+  INNER JOIN (SELECT
+      BSCRNC_ID,
+      BSCRNC_NAME,
+      ACCESS_TYPE,
+      ACCESS_TYPE_ID 
+     FROM
+      nethouse.DIM_LOC_BSCRNC 
+     GROUP BY
+      BSCRNC_ID,
+      BSCRNC_NAME,
+      ACCESS_TYPE,
+      ACCESS_TYPE_ID) DIM 
+  ON SDR.BSCRNC_ID = DIM.BSCRNC_ID 
+  AND DIM.ACCESS_TYPE_ID IN (0,1,2) 
+  INNER JOIN nethouse.DIM_RAT_MAPPING RAT 
+  ON (RAT.RAT = SDR.RAT)
+ WHERE
+  ( (STARTTIME >= 1461340800 
+  AND STARTTIME < 1461427200) ) 
+  AND RAT.ACCESS_TYPE_ID IN (0,1,2) 
+  --and SDR.BSCRNC_ID is not null
+ GROUP BY
+  STTIME ) ) ;
+
+ +
+

Figure 1 shows the execution plan.

+
Figure 1 Adding NOT NULL for JOIN columns (1)
+
+

Optimization Analysis

  1. As shown in Figure 1, the sequential scan phase is time consuming.
  2. The JOIN performance is poor because a large number of null values exist in the JOIN column BSCRNC_ID of the PS.SDR_WEB_BSCRNC_1DAY table.

    Therefore, you are advised to manually add NOT NULL for JOIN columns in the statement, as shown below:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    +32
    +33
    +34
    +35
    +36
    +37
    SELECT
    + * 
    +FROM
    +( ( SELECT
    +  STARTTIME STTIME,
    +  SUM(NVL(PAGE_DELAY_MSEL,0)) PAGE_DELAY_MSEL,
    +  SUM(NVL(PAGE_SUCCEED_TIMES,0)) PAGE_SUCCEED_TIMES,
    +  SUM(NVL(FST_PAGE_REQ_NUM,0)) FST_PAGE_REQ_NUM,
    +  SUM(NVL(PAGE_AVG_SIZE,0)) PAGE_AVG_SIZE,
    +  SUM(NVL(FST_PAGE_ACK_NUM,0)) FST_PAGE_ACK_NUM,
    +  SUM(NVL(DATATRANS_DW_DURATION,0)) DATATRANS_DW_DURATION,
    +  SUM(NVL(PAGE_SR_DELAY_MSEL,0)) PAGE_SR_DELAY_MSEL 
    + FROM
    +  PS.SDR_WEB_BSCRNC_1DAY SDR
    +  INNER JOIN (SELECT
    +      BSCRNC_ID,
    +      BSCRNC_NAME,
    +      ACCESS_TYPE,
    +      ACCESS_TYPE_ID 
    +     FROM
    +      nethouse.DIM_LOC_BSCRNC 
    +     GROUP BY
    +      BSCRNC_ID,
    +      BSCRNC_NAME,
    +      ACCESS_TYPE,
    +      ACCESS_TYPE_ID) DIM 
    +  ON SDR.BSCRNC_ID = DIM.BSCRNC_ID 
    +  AND DIM.ACCESS_TYPE_ID IN (0,1,2) 
    +  INNER JOIN nethouse.DIM_RAT_MAPPING RAT 
    +  ON (RAT.RAT = SDR.RAT)
    + WHERE
    +  ( (STARTTIME >= 1461340800 
    +  AND STARTTIME < 1461427200) ) 
    +  AND RAT.ACCESS_TYPE_ID IN (0,1,2) 
    +  and SDR.BSCRNC_ID is not null
    + GROUP BY
    +  STTIME ) ) A;
    +
    + +
    +

    Figure 2 shows the execution plan.

    +
    Figure 2 Adding NOT NULL for JOIN columns (2)

    +
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0478.html b/docs/dws/dev/dws_04_0478.html new file mode 100644 index 00000000..d034428a --- /dev/null +++ b/docs/dws/dev/dws_04_0478.html @@ -0,0 +1,98 @@ + + +

Case: Pushing Down Sort Operations to DNs

+

Symptom

In an execution plan, more than 95% of the execution time is spent on window agg performed on the CN. In this case, sum is performed for the two columns separately, and then another sum is performed for the separate sum results of the two columns. After this, trunc and sorting are performed in sequence.

+

The table structure is as follows:

+
1
+2
CREATE TABLE public.test(imsi int,L4_DW_THROUGHPUT int,L4_UL_THROUGHPUT int)
+with (orientation = column) DISTRIBUTE BY hash(imsi);
+
+ +
+

The query statements are as follows:

+
1
+2
+3
+4
+5
+6
+7
SELECT COUNT(1) over() AS DATACNT,
+IMSI AS IMSI_IMSI,
+CAST(TRUNC(((SUM(L4_UL_THROUGHPUT) + SUM(L4_DW_THROUGHPUT))), 0) AS
+DECIMAL(20)) AS TOTAL_VOLOME_KPIID
+FROM public.test AS test
+GROUP BY IMSI
+order by TOTAL_VOLOME_KPIID DESC;
+
+ +
+

The execution plan is as follows:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
Row Adapter  (cost=10.70..10.70 rows=10 width=12)
+   ->  Vector Sort  (cost=10.68..10.70 rows=10 width=12)
+         Sort Key: ((trunc((((sum(l4_ul_throughput)) + (sum(l4_dw_throughput))))::numeric, 0))::numeric(20,0))
+         ->  Vector WindowAgg  (cost=10.09..10.51 rows=10 width=12)
+               ->  Vector Streaming (type: GATHER)  (cost=242.04..246.84 rows=240 width=12)
+                     Node/s: All datanodes
+                     ->  Vector Hash Aggregate  (cost=10.09..10.29 rows=10 width=12)
+                           Group By Key: imsi
+                           ->  CStore Scan on test  (cost=0.00..10.01 rows=10 width=12)
+
+ +
+

As we can see, both window agg and sort are performed on the CN, which is time consuming.

+
+

Optimization Analysis

Modify the statement to a subquery statement, as shown below:

+
+
1
+2
+3
+4
+5
+6
+7
SELECT COUNT(1) over() AS DATACNT, IMSI_IMSI, TOTAL_VOLOME_KPIID
+FROM (SELECT IMSI AS IMSI_IMSI,
+CAST(TRUNC(((SUM(L4_UL_THROUGHPUT) + SUM(L4_DW_THROUGHPUT))),
+0) AS DECIMAL(20)) AS TOTAL_VOLOME_KPIID
+FROM public.test AS test
+GROUP BY IMSI
+ORDER BY TOTAL_VOLOME_KPIID DESC);
+
+ +
+

Perform sum on the trunc results of the two columns, take it as a subquery, and then perform window agg for the subquery to push down the sorting operation to DNs, as shown below:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
Row Adapter  (cost=10.70..10.70 rows=10 width=24)
+   ->  Vector WindowAgg  (cost=10.45..10.70 rows=10 width=24)
+         ->  Vector Streaming (type: GATHER)  (cost=250.83..253.83 rows=240 width=24)
+               Node/s: All datanodes
+               ->  Vector Sort  (cost=10.45..10.48 rows=10 width=12)
+                     Sort Key: ((trunc(((sum(test.l4_ul_throughput) + sum(test.l4_dw_throughput)))::numeric, 0))::numeric(20,0))
+                     ->  Vector Hash Aggregate  (cost=10.09..10.29 rows=10 width=12)
+                           Group By Key: test.imsi
+                           ->  CStore Scan on test  (cost=0.00..10.01 rows=10 width=12)
+
+ +
+

The optimized SQL statement greatly improves the performance by reducing the execution time from 120s to 7s.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0479.html b/docs/dws/dev/dws_04_0479.html new file mode 100644 index 00000000..2508dda9 --- /dev/null +++ b/docs/dws/dev/dws_04_0479.html @@ -0,0 +1,237 @@ + + +

Case: Configuring cost_param for Better Query Performance

+

Symptom

If bit0 of cost_param is set to 1, an improved mechanism is used for estimating the selection rate of non-equi-joins. This method is more accurate for estimating the selection rate of joins between two identical tables. The following example describes the optimization scenario when bit0 of cost_param is set to 1. In V300R002C00 and later, cost_param & 1=0 is not used. That is, an optimized formula is selected for calculation.

+

Note: The selection rate indicates the percentage for which the number of rows meeting the join conditions account of the JOIN results when the JOIN relationship is established between two tables.

+

The table structure is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
CREATE TABLE LINEITEM
+(
+L_ORDERKEY BIGINT NOT NULL
+, L_PARTKEY BIGINT NOT NULL
+, L_SUPPKEY BIGINT NOT NULL
+, L_LINENUMBER BIGINT NOT NULL
+, L_QUANTITY DECIMAL(15,2) NOT NULL
+, L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL
+, L_DISCOUNT DECIMAL(15,2) NOT NULL
+, L_TAX DECIMAL(15,2) NOT NULL
+, L_RETURNFLAG CHAR(1) NOT NULL
+, L_LINESTATUS CHAR(1) NOT NULL
+, L_SHIPDATE DATE NOT NULL
+, L_COMMITDATE DATE NOT NULL
+, L_RECEIPTDATE DATE NOT NULL
+, L_SHIPINSTRUCT CHAR(25) NOT NULL
+, L_SHIPMODE CHAR(10) NOT NULL
+, L_COMMENT VARCHAR(44) NOT NULL
+) with (orientation = column, COMPRESSION = MIDDLE) distribute by hash(L_ORDERKEY);
+
+CREATE TABLE ORDERS
+(
+O_ORDERKEY BIGINT NOT NULL
+, O_CUSTKEY BIGINT NOT NULL
+, O_ORDERSTATUS CHAR(1) NOT NULL
+, O_TOTALPRICE DECIMAL(15,2) NOT NULL
+, O_ORDERDATE DATE NOT NULL
+, O_ORDERPRIORITY CHAR(15) NOT NULL
+, O_CLERK CHAR(15) NOT NULL
+, O_SHIPPRIORITY BIGINT NOT NULL
+, O_COMMENT VARCHAR(79) NOT NULL
+)with (orientation = column, COMPRESSION = MIDDLE) distribute by hash(O_ORDERKEY);
+
+ +
+

The query statements are as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
explain verbose select
+count(*) as numwait 
+from
+lineitem l1,
+orders 
+where
+o_orderkey = l1.l_orderkey
+and o_orderstatus = 'F'
+and l1.l_receiptdate > l1.l_commitdate
+and not exists (
+select
+*
+from
+lineitem l3
+where
+l3.l_orderkey = l1.l_orderkey
+and l3.l_suppkey <> l1.l_suppkey
+and l3.l_receiptdate > l3.l_commitdate
+)
+order by
+numwait desc;
+
+ +
+

The following figure shows the execution plan. (When verbose is used, distinct is added for column selection which is controlled by cost off/on. The hash join rows show the estimated number of distinct values and the other rows do not.)

+

+
+

Optimization Analysis 1

These queries are from Anti Join connected in the lineitem table. When cost_param & bit0 is 1, the estimated number of Anti Join rows greatly differ from that of the actual number of rows so that the query performance deteriorates. You can estimate the number of Anti Join rows more accurately by setting cost_param & bit0 to 1 to improve the query performance. The optimized execution plan is as follows:

+

+
+

Symptom 2

If bit1 is set to 1 (set cost_param=2), the selection rate is estimated based on multiple filter criteria. The lowest selection rate among all filter criteria, but not the product of the selection rates for two tables under a specific filter criterion, is used as the total selection rate. This method is more accurate when a close correlation exists between the columns to be filtered. The following example describes the optimization scenario when cost_param & bit1 is 1.

+

The table structure is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
CREATE TABLE NATION
+(
+N_NATIONKEYINT NOT NULL
+, N_NAMECHAR(25) NOT NULL
+, N_REGIONKEYINT NOT NULL
+, N_COMMENTVARCHAR(152)
+) distribute by replication;
+CREATE TABLE SUPPLIER
+(
+S_SUPPKEYBIGINT NOT NULL
+, S_NAMECHAR(25) NOT NULL
+, S_ADDRESSVARCHAR(40) NOT NULL
+, S_NATIONKEYINT NOT NULL
+, S_PHONECHAR(15) NOT NULL
+, S_ACCTBALDECIMAL(15,2) NOT NULL
+, S_COMMENTVARCHAR(101) NOT NULL
+) distribute by hash(S_SUPPKEY);
+CREATE TABLE PARTSUPP
+(
+PS_PARTKEYBIGINT NOT NULL
+, PS_SUPPKEYBIGINT NOT NULL
+, PS_AVAILQTYBIGINT NOT NULL
+, PS_SUPPLYCOSTDECIMAL(15,2)NOT NULL
+, PS_COMMENTVARCHAR(199) NOT NULL
+)distribute by hash(PS_PARTKEY);
+
+ +
+

The query statements are as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
set cost_param=2;
+explain verbose select
+nation,
+sum(amount) as sum_profit 
+from
+(
+select
+n_name as nation,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from
+supplier,
+lineitem,
+partsupp,
+nation
+where
+s_suppkey = l_suppkey
+and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey
+and s_nationkey = n_nationkey
+) as profit 
+group by nation 
+order by nation;
+
+ +
+

When bit1 of cost_param is 0, the execution plan is shown as follows:

+

+
+

Optimization Analysis 2

In the preceding queries, the hash join criteria of the supplier, lineitem, and partsupp tables are setting lineitem.l_suppkey to supplier.s_suppkey and lineitem.l_partkey to partsupp.ps_partkey. Two filter criteria exist in the hash join conditions. lineitem.l_suppkey in the first filter criteria and lineitem.l_partkey in the second filter criteria are two columns with strong relationship of the lineitem table. In this situation, when you estimate the rate of the hash join conditions, if cost_param & bit1 is 0, the selection rate is estimated based on multiple filter criteria. The lowest selection rate among all filter criteria, but not the product of the selection rates for two tables under a specific filter criterion, is used as the total selection rate. This method is more accurate when a close correlation exists between the columns to be filtered. The plan after optimization is shown as follows:

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0480.html b/docs/dws/dev/dws_04_0480.html new file mode 100644 index 00000000..c8f040fa --- /dev/null +++ b/docs/dws/dev/dws_04_0480.html @@ -0,0 +1,18 @@ + + +

Case: Adjusting the Distribution Key

+

Symptom

During a site test, the information is displayed after EXPLAIN ANALYZE is executed:

+

+

According to the execution information, HashJoin becomes the performance bottleneck of the whole plan. Based on the execution time of HashJoin [2657.406, 93339.924], it can be seen that severe skew occurs on different DNs during the HashJoin operation.

+

In the memory information (as shown in the following figure), it can be seen that the data skew occurs in the memory usage of each node.

+

+
+

Optimization Analysis

The preceding two symptoms indicate that this SQL statement has serious computing skew. The further lower-layer analysis on the HashJoin operator shows that serious computing skew [38.885,2940.983] occurs in Seq Scan on s_riskrate_setting. Based on the description of the Scan, we can infer that the performance problems of this plan lie in data skew occurred in the s_riskrate_setting table. Later, it is proved that serious data skew occurred in the s_riskrate_setting table. After performance optimization, the execution time is reduced from 94s to 50s.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0481.html b/docs/dws/dev/dws_04_0481.html new file mode 100644 index 00000000..2d7f4a65 --- /dev/null +++ b/docs/dws/dev/dws_04_0481.html @@ -0,0 +1,18 @@ + + +

Case: Adjusting the Partial Clustering Key

+

Symptom

Information on the EXPLAIN PERFORMANCE at a site is as follows: As shown in the red boxes, two performance bottlenecks are scan operations in a table.

+

+

+
+

Optimization Analysis

After further analysis, we found that the filter condition acct_id ='A012709548':: bpchar exists in the two tables.

+

+

Try to add the partial clustering key in the acct_id column of the two tables, and run the VACUUM FULL statement to make the local clustering take effect. The table performance is improved.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0482.html b/docs/dws/dev/dws_04_0482.html new file mode 100644 index 00000000..519048af --- /dev/null +++ b/docs/dws/dev/dws_04_0482.html @@ -0,0 +1,18 @@ + + +

Case: Adjusting the Table Storage Mode in a Medium Table

+

Symptom

In the GaussDB(DWS) database, row-store tables use the row execution engine, and column-store tables use the column execution engine. If both row-store table and column-store tables exist in a SQL statement, the system will automatically select the row execution engine. The performance of a column execution engine (except for the indexscan related operators) is much better than that of a row execution engine. Therefore, a column-store table is recommended. This is important for some medium result set dumping tables, and you need to select a proper table storage type.

+

During the test at a site, if the following execution plan is performed, the customer expects that the performance can be improved and the result can be returned within 3s.

+

+
+

Optimization Analysis

It is found that the row engine is used after analysis, because both the temporary plan table input_acct_id_tbl and the medium result dumping table row_unlogged_table use a row-store table.

+

After the two tables are changed into column-store tables, the system performance is improved and the result is returned by 1.6s.

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0483.html b/docs/dws/dev/dws_04_0483.html new file mode 100644 index 00000000..0cb4c9a8 --- /dev/null +++ b/docs/dws/dev/dws_04_0483.html @@ -0,0 +1,20 @@ + + +

Case: Adjusting the Local Clustering Column

+

Symptom

During the test at a site, if the following execution plan is performed, the customer expects that the performance can be improved and the result can be returned within 3s.

+

+
+

Optimization Analysis

The analysis shows that the performance bottleneck of this plan is lfbank. f_ev_dp_kdpl_zhminx. The scan condition of this table is as follows:

+

+

Try to modify the lfbank. f_ev_dp_kdpl_zhmin table to a column-store table, and then create the PCK (local clustering) in the yezdminc column, and set PARTIAL_CLUSTER_ROWS to 100000000. The execution plan after optimization is as follows:

+

+
  • This method actually sacrifices the performance during data import to improve the query performance.
  • The number of local sorting tuples is increased, and you need to increase the value of psort_work_mem to improve the sorting efficiency.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0484.html b/docs/dws/dev/dws_04_0484.html new file mode 100644 index 00000000..d754bd99 --- /dev/null +++ b/docs/dws/dev/dws_04_0484.html @@ -0,0 +1,18 @@ + + +

Case: Reconstructing Partition Tables

+

Symptom

In the following simple SQL statements, the performance bottlenecks exist in the scan operation of dwcjk.

+

+

+
+

Optimization Analysis

Obviously, there are date features in the cjrq field of table data in the service layer, and this meet the features of a partitioned table. Replan the table definition of the dwcjk table. Set the cjrq field as a partition key, and day as an interval unit. Define the partitioned table dwcjk_part. The modified result is as follows, and the performance is nearly doubled.

+

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0485.html b/docs/dws/dev/dws_04_0485.html new file mode 100644 index 00000000..d1817700 --- /dev/null +++ b/docs/dws/dev/dws_04_0485.html @@ -0,0 +1,109 @@ + + +

Case: Adjusting the GUC Parameter best_agg_plan

+

Symptom

The t1 table is defined as follows:

+
1
create table t1(a int, b int, c int) distribute by hash(a);
+
+ +
+

Assume that the distribution column of the result set provided by the agg lower-layer operator is setA, and the group by column of the agg operation is setB, the agg operations can be performed in two scenarios in the stream framework.

+
  1. setA is a subset of setB.

    In this scenario, the aggregation result of the lower-layer result set is the correct result, which can be directly used by the upper-layer operator. For details, see the following figure:

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    explain select a, count(1) from t1 group by a;
    + id |          operation           | E-rows | E-width | E-costs  
    +----+------------------------------+--------+---------+---------
    +  1 | ->  Streaming (type: GATHER) |     30 |       4 | 15.56   
    +  2 |    ->  HashAggregate         |     30 |       4 | 14.31   
    +  3 |       ->  Seq Scan on t1     |     30 |       4 | 14.14   
    +(3 rows)
    +
    + +
    +
  2. setA is not a subset of setB.

    In this scenario, the Stream execution framework is classified into the following three plans:

    +

    hashagg+gather(redistribute)+hashagg

    +

    redistribute+hashagg(+gather)

    +

    hashagg+redistribute+hashagg(+gather)

    +

    GaussDB(DWS) provides the guc parameter best_agg_plan to intervene the execution plan, and forces the plan to generate the corresponding execution plan. This parameter can be set to 0, 1, 2, and 3.

    +
    • When the value is set to 1, the first plan is forcibly generated.
    • When the value is set to 2 and if the group by column can be redistributed, the second plan is forcibly generated. Otherwise, the first plan is generated.
    • When the value is set to 3 and if the group by column can be redistributed, the third plan is generated. Otherwise, the first plan is generated.
    • When the value is set to 0, the query optimizer chooses the most optimal plan by the three preceding plans' evaluation cost.
    +

    For details, see the following figure.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    set best_agg_plan to 1;
    +SET
    +explain select b,count(1) from t1 group by b;
    + id |            operation            | E-rows | E-width | E-costs 
    +----+---------------------------------+--------+---------+---------
    +  1 | ->  HashAggregate               |      8 |       4 | 15.83   
    +  2 |    ->  Streaming (type: GATHER) |     25 |       4 | 15.83   
    +  3 |       ->  HashAggregate         |     25 |       4 | 14.33   
    +  4 |          ->  Seq Scan on t1     |     30 |       4 | 14.14   
    +(4 rows)
    +set best_agg_plan to 2;
    +SET
    +explain select b,count(1) from t1 group by b;
    + id |                operation                | E-rows | E-width | E-costs 
    +----+-----------------------------------------+--------+---------+---------
    +  1 | ->  Streaming (type: GATHER)            |     30 |       4 | 15.85   
    +  2 |    ->  HashAggregate                    |     30 |       4 | 14.60   
    +  3 |       ->  Streaming(type: REDISTRIBUTE) |     30 |       4 | 14.45   
    +  4 |          ->  Seq Scan on t1             |     30 |       4 | 14.14   
    +(4 rows)
    +set best_agg_plan to 3;
    +SET
    +explain select b,count(1) from t1 group by b;
    + id |                operation                | E-rows | E-width | E-costs 
    +----+-----------------------------------------+--------+---------+---------
    +  1 | ->  Streaming (type: GATHER)            |     30 |       4 | 15.84   
    +  2 |    ->  HashAggregate                    |     30 |       4 | 14.59   
    +  3 |       ->  Streaming(type: REDISTRIBUTE) |     25 |       4 | 14.59   
    +  4 |          ->  HashAggregate              |     25 |       4 | 14.33   
    +  5 |             ->  Seq Scan on t1          |     30 |       4 | 14.14   
    +(5 rows)
    +
    + +
    +
+
+

Optimization

Generally, the optimizer chooses an optimal execution plan, but the cost estimation, especially that of the intermediate result set, has large deviations, which may result in large deviations in agg calculation. In this case, you need to use best_agg_plan to adjust the agg calculation model.

+

When the aggregation convergence ratio is very small, that is, the number of result sets does not become small obviously after the agg operation (5 times is a critical point), you can select the redistribute+hashagg or hashagg+redistribute+hashagg execution mode.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0486.html b/docs/dws/dev/dws_04_0486.html new file mode 100644 index 00000000..e1814b38 --- /dev/null +++ b/docs/dws/dev/dws_04_0486.html @@ -0,0 +1,39 @@ + + +

Case: Rewriting SQL and Deleting Subqueries (Case 1)

+

Symptom

1
+2
+3
+4
select 
+    1,
+    (select count(*) from customer_address_001 a4 where a4.ca_address_sk = a.ca_address_sk) as GZCS 
+from customer_address_001 a;
+
+ +
+

This SQL performance is poor. SubPlan exists in the execution plan as follows:

+

+
+

Optimization

The core of this optimization is to eliminate subqueries. Based on the service scenario analysis, a.ca_address_sk is not null. In terms of SQL syntax, you can rewrite the SQL statement as follows:

+
1
+2
+3
+4
+5
select 
+count(*) 
+from customer_address_001 a4, customer_address_001 a
+where a4.ca_address_sk = a.ca_address_sk
+group by  a.ca_address_sk;
+
+ +
+

To ensure that the modified statements have the same functions, not null is added to customer_address_001. ca_address_sk.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0487.html b/docs/dws/dev/dws_04_0487.html new file mode 100644 index 00000000..2a22db25 --- /dev/null +++ b/docs/dws/dev/dws_04_0487.html @@ -0,0 +1,62 @@ + + +

Case: Rewriting SQL and Deleting Subqueries (Case 2)

+

Symptom

On a site, the customer gave the feedback saying that the execution time of the following SQL statements lasted over one day and did not end:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
UPDATE calc_empfyc_c_cusr1 t1
+SET ln_rec_count =
+ (
+    SELECT CASE WHEN current_date - ln_process_date + 1 <= 12 THEN 0 ELSE t2.ln_rec_count END 
+    FROM calc_empfyc_c1_policysend_tmp t2
+    WHERE t1.ln_branch = t2.ln_branch AND t1.ls_policyno_cusr1 = t2.ls_policyno_cusr1
+)
+WHERE dsign = '1'
+AND flag = '1'
+AND EXISTS
+    (SELECT 1
+    FROM calc_empfyc_c1_policysend_tmp t2
+    WHERE t1.ln_branch = t2.ln_branch AND t1.ls_policyno_cusr1 = t2.ls_policyno_cusr1
+    );
+
+ +
+

The corresponding execution plan is as follows:

+

+
+

Optimization

SubPlan exists in the execution plan, and the calculation accounts for a large proportion in the SubPlan query. That is, SubPlan is a performance bottleneck.

+

Based on the SQL syntax, you can rewrite the SQL statements and delete SubPlan as follows:

+
1
+2
+3
+4
+5
+6
UPDATE calc_empfyc_c_cusr1 t1
+SET ln_rec_count = CASE WHEN current_date - ln_process_date + 1 <= 12 THEN 0 ELSE t2.ln_rec_count END
+FROM calc_empfyc_c1_policysend_tmp t2
+WHERE 
+t1.dsign = '1' AND t1.flag = '1' 
+AND t1.ln_branch = t2.ln_branch AND t1.ls_policyno_cusr1 = t2.ls_policyno_cusr1;
+
+ +
+

The modified SQL statement task is complete within 50s.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0488.html b/docs/dws/dev/dws_04_0488.html new file mode 100644 index 00000000..07a6fc20 --- /dev/null +++ b/docs/dws/dev/dws_04_0488.html @@ -0,0 +1,69 @@ + + +

Case: Rewriting SQL Statements and Eliminating Prune Interference

+

Symptom

In a test at a site, ddw_f10_op_cust_asset_mon is a partitioned table and the partition key is year_mth whose value is a combined string of month and year values.

+

The following figure shows the tested SQL statements:

+
1
+2
+3
+4
select  
+    count(1) 
+from t_ddw_f10_op_cust_asset_mon b1
+where b1.year_mth between to_char(add_months(to_date(''20170222'','yyyymmdd'), -11),'yyyymm') and substr(''20170222'',1 ,6 );
+
+ +
+

The test result shows the SQL Scan table takes 135s. This may be the performance bottleneck.

+

add_months is a local adaptation function.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
CREATE OR REPLACE FUNCTION ADD_MONTHS(date, integer) RETURNS date
+    AS $$
+    SELECT
+    CASE 
+    WHEN (EXTRACT(day FROM $1) = EXTRACT(day FROM (date_trunc('month', $1) + INTERVAL '1 month - 1 day'))) THEN
+        date_trunc('month', $1) + CAST($2 + 1 || ' month - 1 day' as interval)
+    ELSE
+        $1 + CAST($2 || ' month' as interval)
+    END
+    $$
+    LANGUAGE SQL
+    IMMUTABLE;
+
+ +
+
+
+

Optimization

According to the statement execution plan, the base table filter is displayed as follows:

+
Filter: (((year_mth)::text <= '201702'::text) AND ((year_mth)::text >= to_char(add_months(to_date('20170222'::text, 'YYYYMMDD'::text), (-11)), 'YYYYMM'::text)))
+

The query condition expression to_char(add_months(to_date(''20170222'','yyyymmdd'),-11),'yyyymm') exists in the filter condition, and this non-constant expression cannot be used for pruning. Therefore, all data of query statements in the partitioned tables is scanned.

+

to_date and to_char are stable functions as queried in the pg_proc. Based on the function behaviors described in Postgresql, this type of function cannot be converted into the Const value in the preprocessing phase, which is the root cause of preventing partition pruning.

+

Based on the preceding analysis, the optimization expression can be used for partition pruning, which is the key to performance optimization. The original SQL statements can be written to as follows:

+
1
+2
+3
+4
select  
+    count(1) 
+from t_ddw_f10_op_cust_asset_mon b1
+where b1.year_mth between(substr(ADD_MONTHS('20170222'::date, -11), 1, 4)||substr(ADD_MONTHS('20170222'::date, -11), 6, 2)) and substr(''20170222'',1 ,6 );
+
+ +
+

The execution time of modified SQL statements is reduced from 135s to 18s.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0489.html b/docs/dws/dev/dws_04_0489.html new file mode 100644 index 00000000..fc65d571 --- /dev/null +++ b/docs/dws/dev/dws_04_0489.html @@ -0,0 +1,94 @@ + + +

Case: Rewriting SQL Statements and Deleting in-clause

+

Symptom

in-clause/any-clause is a common SQL statement constraint. Sometimes, the clause following in or any is a constant. For example:

+
1
+2
+3
+4
select 
+count(1) 
+from calc_empfyc_c1_result_tmp_t1 
+where ls_pid_cusr1 in ('20120405', '20130405');
+
+ +
+

or

+
1
+2
+3
+4
select 
+count(1) 
+from calc_empfyc_c1_result_tmp_t1 
+where ls_pid_cusr1 in any('20120405', '20130405');
+
+ +
+

Some special usages are as follows:

+
1
+2
+3
+4
+5
SELECT 
+ls_pid_cusr1,COALESCE(max(round((current_date-bthdate)/365)),0)
+FROM calc_empfyc_c1_result_tmp_t1 t1,p10_md_tmp_t2 t2
+WHERE t1.ls_pid_cusr1 = any(values(id),(id15))
+GROUP BY ls_pid_cusr1;
+
+ +
+

Where id and id15 are columns of p10_md_tmp_t2. ls_pid_cusr1 = any(values(id),(id15)) equals t1. ls_pid_cusr1 = id or t1. ls_pid_cusr1 = id15.

+

Therefore, join-condition is essentially an inequality, and nestloop must be used for this join operation. The execution plan is as follows:

+

+
+

Optimization

The test result shows that both result sets are too large. As a result, nestloop is time-consuming with more than one hour to return results. Therefore, the key to performance optimization is to eliminate nestloop, using more efficient hashjoin. From the perspective of semantic equivalence, the SQL statements can be written as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
select
+ls_pid_cusr1,COALESCE(max(round(ym/365)),0)
+from
+(
+         (
+                   SELECT 
+                            ls_pid_cusr1,(current_date-bthdate) as ym
+                   FROM calc_empfyc_c1_result_tmp_t1 t1,p10_md_tmp_t2 t2
+                   WHERE t1.ls_pid_cusr1 = t2.id and t1.ls_pid_cusr1 != t2.id15
+         )
+         union all
+         (
+                   SELECT 
+                            ls_pid_cusr1,(current_date-bthdate) as ym
+                   FROM calc_empfyc_c1_result_tmp_t1 t1,p10_md_tmp_t2 t2
+                   WHERE t1.ls_pid_cusr1 = id15
+         )
+)
+GROUP BY ls_pid_cusr1;
+
+ +
+

The optimized SQL queries consist of two equivalent join subqueries, and each subquery can be used for hashjoin in this scenario. The optimized execution plan is as follows:

+

+

Before the optimization, no result is returned for more than 1 hour. After the optimization, the result is returned within 7s.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0490.html b/docs/dws/dev/dws_04_0490.html new file mode 100644 index 00000000..0884c463 --- /dev/null +++ b/docs/dws/dev/dws_04_0490.html @@ -0,0 +1,134 @@ + + +

Case: Setting Partial Cluster Keys

+

You can add PARTIAL CLUSTER KEY(column_name[,...]) to the definition of a column-store table to set one or more columns of this table as partial cluster keys. In this way, each 70 CUs (4.2 million rows) will be sorted based on the cluster keys by default during data import and the value range is narrowed down for each of the new 70 CUs. If the where condition in the query statement contains these columns, the filtering performance will be improved.

+
  1. Use partial cluster keys.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    CREATE TABLE lineitem
    +(
    +L_ORDERKEY    BIGINT NOT NULL
    +, L_PARTKEY     BIGINT NOT NULL
    +, L_SUPPKEY     BIGINT NOT NULL
    +, L_LINENUMBER  BIGINT NOT NULL
    +, L_QUANTITY    DECIMAL(15,2) NOT NULL
    +, L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL
    +, L_DISCOUNT    DECIMAL(15,2) NOT NULL
    +, L_TAX         DECIMAL(15,2) NOT NULL
    +, L_RETURNFLAG  CHAR(1) NOT NULL
    +, L_LINESTATUS  CHAR(1) NOT NULL
    +, L_SHIPDATE    DATE NOT NULL
    +, L_COMMITDATE  DATE NOT NULL
    +, L_RECEIPTDATE DATE NOT NULL
    +, L_SHIPINSTRUCT CHAR(25) NOT NULL
    +, L_SHIPMODE     CHAR(10) NOT NULL
    +, L_COMMENT      VARCHAR(44) NOT NULL
    +)
    +with (orientation = column)
    +distribute by hash(L_ORDERKEY);
    +
    +select
    +sum(l_extendedprice * l_discount) as revenue
    +from
    +lineitem
    +where
    +l_shipdate >= '1994-01-01'::date
    +and l_shipdate < '1994-01-01'::date + interval '1 year'
    +and l_discount between 0.06 - 0.01 and 0.06 + 0.01
    +and l_quantity < 24;
    +
    + +
    +

    In the where condition, both the l_shipdate and l_quantity columns have a few distinct values, and their values can be used for min/max filtering. Therefore, modify the table definition as follows:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    CREATE TABLE lineitem
    +(
    +L_ORDERKEY    BIGINT NOT NULL
    +, L_PARTKEY     BIGINT NOT NULL
    +, L_SUPPKEY     BIGINT NOT NULL
    +, L_LINENUMBER  BIGINT NOT NULL
    +, L_QUANTITY    DECIMAL(15,2) NOT NULL
    +, L_EXTENDEDPRICE  DECIMAL(15,2) NOT NULL
    +, L_DISCOUNT    DECIMAL(15,2) NOT NULL
    +, L_TAX         DECIMAL(15,2) NOT NULL
    +, L_RETURNFLAG  CHAR(1) NOT NULL
    +, L_LINESTATUS  CHAR(1) NOT NULL
    +, L_SHIPDATE    DATE NOT NULL
    +, L_COMMITDATE  DATE NOT NULL
    +, L_RECEIPTDATE DATE NOT NULL
    +, L_SHIPINSTRUCT CHAR(25) NOT NULL
    +, L_SHIPMODE     CHAR(10) NOT NULL
    +, L_COMMENT      VARCHAR(44) NOT NULL
    +, partial cluster key(l_shipdate, l_quantity)
    +)
    +with (orientation = column)
    +distribute by hash(L_ORDERKEY);
    +
    + +
    +

    Import the data again and run the query statement. Then, compare the execution time before and after partial cluster keys are used.

    +
    Figure 1 Partial cluster keys not used
    +
    Figure 2 CU loading without partial cluster keys
    +
    Figure 3 Partial cluster keys used
    +
    Figure 4 CU loading with partial cluster keys
    +

    After partial cluster keys are used, the execution time of 5-- CStore Scan on public.lineitem decreases by 1.2s because 84 CUs are filtered out.

    +
  2. Select partial cluster keys.
    • The following data types support cluster keys: character varying(n), varchar(n), character(n), char(n), text, nvarchar2, timestamp with time zone, timestamp without time zone, date, time without time zone, and time with time zone.
    • Smaller number of distinct values in a partial cluster key generates higher filtering performance.
    • Columns that can filter out larger amount of data is preferentially selected as partial cluster keys.
    • If multiple columns are selected as partial cluster keys, the columns are used in sequence to sort data. You are advised to select a maximum of three columns.
    +
  3. Modify parameters to reduce the impact of partial cluster keys on the import performance.

    After partial cluster keys are used, data will be sorted when they are imported, affecting the import performance. If all the data can be sorted in the memory, the keys have little impact on import. If some data cannot be sorted in the memory and is written into a temporary file for sorting, the import performance will be greatly affected.

    +

    The memory used for sorting is specified by the psort_work_mem parameter. You can set it to a larger value so that the sorting has less impact on the import performance.

    +

    The volume of data to be sorted is specified by the PARTIAL_CLUSTER_ROWS parameter of the table. Decreasing the value of this parameter reduces the amount of data to be sorted at a time. PARTIAL_CLUSTER_ROWS is usually used along with the MAX_BATCHROW parameter. The value of PARTIAL_CLUSTER_ROWS must be an integer multiple of the MAX_BATCHROW value. MAX_BATCHROW specifies the maximum number of rows in a CU.

    +

    +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0491.html b/docs/dws/dev/dws_04_0491.html new file mode 100644 index 00000000..31b7080a --- /dev/null +++ b/docs/dws/dev/dws_04_0491.html @@ -0,0 +1,24 @@ + + +

SQL Execution Troubleshooting

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0492.html b/docs/dws/dev/dws_04_0492.html new file mode 100644 index 00000000..e564a035 --- /dev/null +++ b/docs/dws/dev/dws_04_0492.html @@ -0,0 +1,20 @@ + + +

Low Query Efficiency

+

A query task that used to take a few milliseconds to complete is now requiring several seconds, and that used to take several seconds is now requiring even half an hour. This section describes how to analyze and rectify such low efficiency issues.

+

Procedure

Perform the following procedure to locate the cause of this fault.

+
  1. Run the analyze command to analyze the database.

    The analyze command updates data statistics information, such as data sizes and attributes in all tables. This is a lightweight command and can be executed frequently. If the query efficiency is improved or restored after the command execution, the autovacuum process does not function well and requires further analysis.

    +

  2. Check whether the query statement returns unnecessary information.

    For example, if we only need the first 10 records in a table but the query statement searches all records in the table, the query efficiency is fine for a table containing only 50 records but very low for a table containing 50,000 records.

    +

    If an application requires only a part of data information but the query statement returns all information, add a LIMIT clause to the query statement to restrict the number of returned records. In this way, the database optimizer can optimize space and improve query efficiency.

    +

  3. Check whether the query statement still has a low response even when it is solely executed.

    Run the query statement when there are no or only a few other query requests in the database, and observe the query efficiency. If the efficiency is high, the previous issue is possibly caused by a heavily loaded host in the database system or an inefficient execution plan.

    +

  4. Check the same query statement repeatedly to check the query efficiency.

    One major cause that will reduce query efficiency is that the required information is not cached in the memory or is replaced by other query requests because of insufficient memory resources.

    +

    Run the same query statement repeatedly. If the query efficiency increases gradually, the previous issue might be caused by this reason.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0494.html b/docs/dws/dev/dws_04_0494.html new file mode 100644 index 00000000..ca801022 --- /dev/null +++ b/docs/dws/dev/dws_04_0494.html @@ -0,0 +1,17 @@ + + +

DROP TABLE Fails to Be Executed

+

Problem

DROP TABLE fails to be executed in the following scenarios:

+ +
+

Possible Causes

The table_name table exists on some nodes only.

+
+

Troubleshooting Method

In the preceding scenarios, if DROP TABLE table_name fails to be executed, run DROP TABLE IF EXISTS table_name to successfully drop table_name.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0495.html b/docs/dws/dev/dws_04_0495.html new file mode 100644 index 00000000..4c5dfbec --- /dev/null +++ b/docs/dws/dev/dws_04_0495.html @@ -0,0 +1,16 @@ + + +

Different Data Is Displayed for the Same Table Queried By Multiple Users

+

Problem

Two users log in to the same database human_resource and run the select count(*) from areas statement separately to query the areas table, but obtain different results.

+
+

Possible Causes

Check whether the two users really query the same table. In a relational database, a table is identified by three elements: database, schema, and table. In this issue, database is human_resource and table is areas. Then, check schema. Log in as users dbadmin and user01 separately. It is found that search_path is public for dbadmin and $user for user01. By default, a schema having the same name as user dbadmin, the cluster administrator, is not created. That is, all tables will be created in public if no schema is specified. However, when a common user, such as user01, is created, the same-name schema (user01) is created by default. That is, all tables are created in user01 if the schema is not specified. In conclusion, both the two users are operating the table, causing that the same-name table is not really the same table.

+
+

Troubleshooting Method

Use schema.table to determine a table for query.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0496.html b/docs/dws/dev/dws_04_0496.html new file mode 100644 index 00000000..6f2cfa97 --- /dev/null +++ b/docs/dws/dev/dws_04_0496.html @@ -0,0 +1,17 @@ + + +

An Error Occurs During the Integer Conversion

+

Problem

The following error is reported during the integer conversion:

+
Invalid input syntax for integer: "13."
+
+

Possible Causes

Some data types cannot be converted to the target data type.

+
+

Troubleshooting

Gradually narrow down the range of SQL statements to locate the fault.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0497.html b/docs/dws/dev/dws_04_0497.html new file mode 100644 index 00000000..8091e855 --- /dev/null +++ b/docs/dws/dev/dws_04_0497.html @@ -0,0 +1,212 @@ + + +

Automatic Retry upon SQL Statement Execution Errors

+

With automatic retry (referred to as CN retry), GaussDB(DWS) retries an SQL statement when the execution of this statement fails. If an SQL statement sent from the gsql client, JDBC driver, or ODBC driver fails to be executed, the CN can automatically identify the error reported during execution and re-deliver the task to retry.

+

The restrictions of this function are as follows:

+ +

Table 1 lists the error types supported by CN retry and the corresponding error codes. You can use the GUC parameter retry_ecode_list to set the list of error types supported by CN retry. You are not advised to modify this parameter. To modify it, contact the technical support.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Error types supported by CN retry

Error Type

+

Error Code

+

Remarks

+

CONNECTION_RESET_BY_PEER

+

YY001

+

TCP communication errors: Connection reset by peer (communication between the CN and DNs)

+

STREAM_CONNECTION_RESET_BY_PEER

+

YY002

+

TCP communication errors: Stream connection reset by peer (communication between DNs)

+

LOCK_WAIT_TIMEOUT

+

YY003

+

Lock wait timeout

+

CONNECTION_TIMED_OUT

+

YY004

+

TCP communication errors: Connection timed out

+

SET_QUERY_ERROR

+

YY005

+

Failed to deliver the SET command: Set query

+

OUT_OF_LOGICAL_MEMORY

+

YY006

+

Failed to apply for memory: Out of logical memory

+

SCTP_MEMORY_ALLOC

+

YY007

+

SCTP communication errors: Memory allocate error

+

SCTP_NO_DATA_IN_BUFFER

+

YY008

+

SCTP communication errors: SCTP no data in buffer

+

SCTP_RELEASE_MEMORY_CLOSE

+

YY009

+

SCTP communication errors: Release memory close

+

SCTP_TCP_DISCONNECT

+

YY010

+

SCTP communication errors: TCP disconnect

+

SCTP_DISCONNECT

+

YY011

+

SCTP communication errors: SCTP disconnect

+

SCTP_REMOTE_CLOSE

+

YY012

+

SCTP communication errors: Stream closed by remote

+

SCTP_WAIT_POLL_UNKNOW

+

YY013

+

Waiting for an unknown poll: SCTP wait poll unknown

+

SNAPSHOT_INVALID

+

YY014

+

Snapshot invalid

+

ERRCODE_CONNECTION_RECEIVE_WRONG

+

YY015

+

Connection receive wrong

+

OUT_OF_MEMORY

+

53200

+

Out of memory

+

CONNECTION_FAILURE

+

08006

+

GTM errors: Connection failure

+

CONNECTION_EXCEPTION

+

08000

+

Failed to communicate with DNs due to connection errors: Connection exception

+

ADMIN_SHUTDOWN

+

57P01

+

System shutdown by administrators: Admin shutdown

+

STREAM_REMOTE_CLOSE_SOCKET

+

XX003

+

Remote socket disabled: Stream remote close socket

+

ERRCODE_STREAM_DUPLICATE_QUERY_ID

+

XX009

+

Duplicate query id

+

ERRCODE_STREAM_CONCURRENT_UPDATE

+

YY016

+

Stream concurrent update

+

ERRCODE_LLVM_BAD_ALLOC_ERROR

+

CG003

+

Memory allocation error: Allocate error

+

ERRCODE_LLVM_FATAL_ERROR

+

CG004

+

Fatal error

+

HashJoin Temporary File Read Error(ERRCODE_HASHJOIN_TEMP_FILE_ERROR)

+

F0011

+

Temporary file read error, File error

+
+
+
To enable CN retry, set the following GUC parameters: +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0507.html b/docs/dws/dev/dws_04_0507.html new file mode 100644 index 00000000..ced65c75 --- /dev/null +++ b/docs/dws/dev/dws_04_0507.html @@ -0,0 +1,18 @@ + + +

User-Defined Functions

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0509.html b/docs/dws/dev/dws_04_0509.html new file mode 100644 index 00000000..f279e045 --- /dev/null +++ b/docs/dws/dev/dws_04_0509.html @@ -0,0 +1,487 @@ + + +

PL/Java Functions

+

With the GaussDB(DWS) PL/Java functions, you can choose your favorite Java IDE to write Java methods and install the JAR files containing these methods into the GaussDB(DWS) database before invoking them. GaussDB(DWS) PL/Java is developed based on open-source PL/Java 1.5.5 and uses JDK 1.8.0_292.

+

Constraints

Java UDF can be used for some Java logical computing. You are not advised to encapsulate services in Java UDF.

+ +
+

Examples

Before using PL/Java, you need to pack the implementation of Java methods into a JAR package and deploy it into the database. Then, create functions as a database administrator. For compatibility purposes, use JDK 1.8.0_262 for compilation.

+
  1. Compile a JAR package.

    Java method implementation and JAR package archiving can be achieved in an integrated development environment (IDE). The following is a simple example of compilation and archiving through command lines. You can create a JAR package that contains a single method in the similar way.

    +

    First, prepare an Example.java file that contains a method for converting substrings to uppercase. In the following example, Example is the class name and upperString is the method name:

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    public class Example 
    +{
    +    public static String upperString (String text, int beginIndex, int endIndex) 
    +    {
    +        return text.substring(beginIndex, endIndex).toUpperCase();
    +    }
    +}
    +
    + +
    +

    Then, create a manifest.txt file containing the following content:

    +
    1
    +2
    +3
    +4
    +5
    +6
    Manifest-Version: 1.0
    +Main-Class: Example
    +Specification-Title: "Example"
    +Specification-Version: "1.0"
    +Created-By: 1.6.0_35-b10-428-11M3811
    +Build-Date: 08/14/2018 10:09 AM
    +
    + +
    +

    Manifest-Version specifies the version of the manifest file. Main-Class specifies the main class used by the .jar file. Specification-Title and Specification-Version are the extended attributes of the package. Specification-Title specifies the title of the extended specification and Specification-Version specifies the version of the extended specification. Created-By specifies the person who created the file. Build-Date specifies the date when the file was created.

    +

    Finally, archive the .java file and package it into javaudf-example.jar.

    +
    1
    +2
    javac Example.java
    +jar cfm javaudf-example.jar manifest.txt Example.class
    +
    + +
    +

    JAR package names must comply with JDK rules. If a name contains invalid characters, an error occurs when a function is deployed or used.

    +
    +

  2. Deploy the JAR package.

    First store the JAR package on an OBS server. For details, see "Uploading a File" in Object Storage Service Console Operation Guide. Then, create the access key AK/SK. For details about how to create access keys, see "Creating an Access Key (AK and SK)" in Data Warehouse Service User Guide. After that, log in to the database, run the gs_extend_library function, and import the package to GaussDB(DWS).

    +
    1
    SELECT gs_extend_library('addjar', 'obs://bucket/path/javaudf-example.jar accesskey=access_key_value_to_be_replaced  secretkey=secret_access_key_value_to_be_replaced  region=region_name libraryname=example');
    +
    + +
    +

    For details about how to use the gs_extend_library function, see Manage JAR packages and files. Change the values of AK and SK as needed. Replace region_name with an actual region name.

    +

  3. Use a PL/Java function.

    Log in to the database as a user who has the sysadmin permission (for example, dbadmin) and create the java_upperstring function:

    +
    1
    +2
    +3
    +4
    CREATE FUNCTION java_upperstring(VARCHAR, INTEGER, INTEGER)
    +    RETURNS VARCHAR
    +    AS 'Example.upperString'
    +LANGUAGE JAVA;
    +
    + +
    +
    • The data type defined in the java_upperstring function should be a type in GaussDB(DWS) and match the data type defined in 1 in the upperString method in Java. For details about the mapping between GaussDB(DWS) and Java data types, see Table 1.
    • The AS clause specifies the class name and static method name of the Java method invoked by the function. The format is Class name.Method name. The class name and method name must match the Java class and method defined in 1. In this example, no packages are specified. If a package has been specified, specify the complete class name when using CREATE FUNCTION.
    • To use PL/Java functions, set LANGUAGE to JAVA.
    • For details about CREATE FUNCTION, see Create functions.
    +
    +

    Execute the java_upperstring function.

    +
    1
    SELECT java_upperstring('test', 0, 1);
    +
    + +
    +

    The expected result is as follows:

    +
    1
    +2
    +3
    +4
     java_upperstring
    +---------------------
    + T
    +(1 row)
    +
    + +
    +

  4. Authorize a common user to use the PL/Java function.

    Create a common user named udf_user.

    +
    1
    CREATE USER udf_user PASSWORD 'password';
    +
    + +
    +

    This command grants user udf_user the permission for the java_upperstring function. Note that the user can use this function only if it also has the permission for using the schema of the function.

    +
    1
    +2
    GRANT ALL PRIVILEGES ON SCHEMA public TO udf_user;
    +GRANT ALL PRIVILEGES ON FUNCTION java_upperstring(VARCHAR, INTEGER, INTEGER) TO udf_user;
    +
    + +
    +

    Log in to the database as user udf_user.

    +
    1
    SET SESSION SESSION AUTHORIZATION udf_user PASSWORD 'password';
    +
    + +
    +

    Execute the java_upperstring function.

    +
    1
    SELECT public.java_upperstring('test', 0, 1);
    +
    + +
    +

    The expected result is as follows:

    +
    1
    +2
    +3
    +4
     java_upperstring
    +---------------------
    + T
    +(1 row)
    +
    + +
    +

  5. Delete the function.

    If you no longer need this function, delete it.
    1
    DROP FUNCTION java_upperstring;
    +
    + +
    +
    +

  6. Uninstall the JAR package.

    Use the gs_extend_library function to uninstall the JAR package.

    +
    1
    SELECT gs_extend_library('rmjar', 'libraryname=example');
    +
    + +
    +

+
+

SQL Definition and Usage

+
+

Mapping for Basic Data Types

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PL/Java mapping for default data types

GaussDB(DWS)

+

Java

+

BOOLEAN

+

boolean

+

"char"

+

byte

+

bytea

+

byte[]

+

SMALLINT

+

short

+

INTEGER

+

int

+

BIGINT

+

long

+

FLOAT4

+

float

+

FLOAT8

+

double

+

CHAR

+

java.lang.String

+

VARCHAR

+

java.lang.String

+

TEXT

+

java.lang.String

+

name

+

java.lang.String

+

DATE

+

java.sql.Timestamp

+

TIME

+

java.sql.Time (stored value treated as local time)

+

TIMETZ

+

java.sql.Time

+

TIMESTAMP

+

java.sql.Timestamp

+

TIMESTAMPTZ

+

java.sql.Timestamp

+
+
+
+

Array Type Processing

GaussDB(DWS) can convert basic array types. You only need to append a pair of square brackets ([]) to the data type when creating a function.

+
CREATE FUNCTION java_arrayLength(INTEGER[])
+    RETURNS INTEGER
+    AS 'Example.getArrayLength'
+LANGUAGE JAVA;
+

Java code is similar to the following:

+
public class Example
+{
+    public static int getArrayLength(Integer[] intArray)
+    {
+        return intArray.length;
+    }
+}
+

Invoke the following statement:

+
SELECT java_arrayLength(ARRAY[1, 2, 3]);
+

The expected result is as follows:

+
java_arrayLength
+---------------------
+3
+(1 row)
+
+

NULL Handling

NULL values cannot be handled for GaussDB(DWS) data types that are mapped and can be converted to simple Java types by default. If you use a Java function to obtain and process the NULL value transferred from GaussDB(DWS), specify the Java encapsulation class in the AS clause as follows:

+
CREATE FUNCTION java_countnulls(INTEGER[])
+    RETURNS INTEGER
+    AS 'Example.countNulls(java.lang.Integer[])'
+LANGUAGE JAVA;
+

Java code is similar to the following:

+
public class Example
+{
+    public static int countNulls(Integer[] intArray)
+    {
+        int nullCount = 0;
+        for (int idx = 0; idx < intArray.length; ++idx)
+        {
+            if (intArray[idx] == null)
+            nullCount++;
+        }
+        return nullCount;
+    }
+}
+

Invoke the following statement:

+
SELECT java_countNulls(ARRAY[null, 1, null, 2, null]);
+

The expected result is as follows:

+
java_countNulls
+--------------------
+3
+(1 row)
+
+

Overloaded Functions

PL/Java supports overloaded functions. You can create functions with the same name or invoke overloaded functions from Java code. The procedure is as follows:

+
  1. Create overloaded functions.

    For example, create two Java methods with the same name, and specify the methods dummy(int) and dummy(String) with different parameter types.

    +
    public class Example
    +{
    +    public static int dummy(int value)
    +    {
    +        return value*2;
    +    }
    +    public static String dummy(String value)
    +    {
    +        return value;
    +    }
    +}
    +

    In addition, create two functions with the same names as the above two functions in GaussDB(DWS).

    +
    CREATE FUNCTION java_dummy(INTEGER)
    +    RETURNS INTEGER
    +    AS 'Example.dummy'
    +LANGUAGE JAVA;
    +
    +CREATE FUNCTION java_dummy(VARCHAR)
    +    RETURNS VARCHAR
    +    AS 'Example.dummy'
    +LANGUAGE JAVA;
    +

  2. Invoke the overloaded functions.

    GaussDB(DWS) invokes the functions that match the specified parameter type. The results of invoking the above two functions are as follows:

    +
    SELECT java_dummy(5);
    + java_dummy
    +-----------------
    +            10
    +(1 row)
    +
    +SELECT java_dummy('5');
    + java_dummy
    +---------------
    +5
    +(1 row)
    +

    Note that GaussDB(DWS) may implicitly convert data types. Therefore, you are advised to specify the parameter type when invoking an overloaded function.

    +
    SELECT java_dummy(5::varchar);
    + java_dummy
    +----------------
    +5
    +(1 row)
    +

    In this case, the specified parameter type is preferentially used for matching. If there is no Java method matching the specified parameter type, the system implicitly converts the parameter and searches for Java methods based on the conversion result.

    +
    SELECT java_dummy(5::INTEGER);
    + java_dummy
    +-----------------
    +10
    +(1 row)
    +
    +DROP FUNCTION java_dummy(INTEGER);
    +
    +SELECT java_dummy(5::INTEGER);
    + java_dummy
    +----------------
    +5
    +(1 row)
    +

    Data types supporting implicit conversion are as follows:

    +
    • SMALLINT: It can be converted to the INTEGER type by default.
    • SMALLINT and INTEGER: They can be converted to the BIGINT type by default.
    • TINYINT, SMALLINT, INTEGER, and BIGINT: They can be converted to the BOOL type by default.
    • CHAR, NAME, BIGINT, INTEGER, SMALLINT, TINYINT, RAW, FLOAT4, FLOAT8, BPCHAR, VARCHAR, NVARCHAR2, DATE, TIMESTAMP, TIMESTAMPTZ, NUMERIC, and SMALLDATETIME: They can be converted to the TEXT type by default.
    • TEXT, CHAR, BIGINT, INTEGER, SMALLINT, TINYINT, RAW, FLOAT4, FLOAT8, BPCHAR, DATE, NVARCHAR2, TIMESTAMP, NUMERIC, and SMALLDATETIME: They can be converted to the VARCHAR type by default.
    +
    +

  3. Delete the overloaded functions.

    To delete an overloaded function, specify the parameter type for the function. Otherwise, the function cannot be deleted.

    +
    DROP FUNCTION java_dummy(INTEGER);
    +

+
+

GUC Parameters

+ + +
+

Exception Handling

If there is an exception in a JVM, PL/Java will export JVM stack information during the exception to a client.

+
+

Logging

PL/Java uses the standard Java Logger. Therefore, you can record logs as follows:

+
Logger.getAnonymousLogger().config( "Time is " + new 
+Date(System.currentTimeMillis()));
+

An initialized Java Logger class is set to the CONFIG level by default, corresponding to the LOG level in GaussDB(DWS). In this case, log messages generated by Java Logger are all redirected to the GaussDB(DWS) backend. Then, the log messages are written into server logs or displayed on the user interface. MPPDB server logs record information at the LOG, WARNING, and ERROR levels. The SQL user interface displays logs at the WARNING and ERROR levels. The following table lists mapping between Java Logger levels and GaussDB(DWS) log levels.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 PL/Java log levels

java.util.logging.Level

+

GaussDB(DWS) Log Level

+

SERVER

+

ERROR

+

WARINING

+

WARNING

+

CONFIG

+

LOG

+

INFO

+

INFO

+

FINE

+

DEBUG1

+

FINER

+

DEBUG2

+

FINEST

+

DEBUG3

+
+
+

You can change Java Logger levels. For example, if the Java Logger level is changed to SEVERE by the following Java code, log messages (msg) will not be recorded in GaussDB(DWS) logs during WARNING logging.

+
Logger log = Logger.getAnonymousLogger();
+Log.setLevel(Level.SEVERE);
+log.log(Level.WARNING, msg);
+
+

Security Issues

In GaussDB(DWS), PL/Java is an untrusted language. Only user sysadmin can create PL/Java functions. The user can grant other users the permission for using the PL/Java functions. For details, see Authorize permissions for functions.

+

In addition, PL/Java controls user access to file systems, forbidding users from reading most system files, or writing, deleting, or executing any system files in Java methods.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0511.html b/docs/dws/dev/dws_04_0511.html new file mode 100644 index 00000000..243eb8ae --- /dev/null +++ b/docs/dws/dev/dws_04_0511.html @@ -0,0 +1,18 @@ + + +

PL/pgSQL Functions

+

PL/pgSQL is similar to PL/SQL of Oracle. It is a loadable procedural language.

+

The functions created using PL/pgSQL can be used in any place where you can use built-in functions. For example, you can create calculation functions with complex conditions and use them to define operators or use them for index expressions.

+

SQL is used by most databases as a query language. It is portable and easy to learn. Each SQL statement must be executed independently by a database server.

+

In this case, when a client application sends a query to the server, it must wait for it to be processed, receive and process the results, and then perform some calculation before sending more queries to the server. If the client and server are not on the same machine, all these operations will cause inter-process communication and increase network loads.

+

PL/pgSQL enables a whole computing part and a series of queries to be grouped inside a database server. This makes procedural language available and SQL easier to use. In addition, the client/server communication cost is reduced.

+ +

PL/pgSQL can use all data types, operators, and functions in SQL.

+

For details about the PL/pgSQL syntax for creating functions, see CREATE FUNCTION. As mentioned earlier, PL/pgSQL is similar to PL/SQL of Oracle and is a loadable procedural language. Its application method is similar to that of Stored Procedures. There is only one difference. Stored procedures have no return values but the functions have.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0512.html b/docs/dws/dev/dws_04_0512.html new file mode 100644 index 00000000..de277013 --- /dev/null +++ b/docs/dws/dev/dws_04_0512.html @@ -0,0 +1,37 @@ + + +

Stored Procedures

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0513.html b/docs/dws/dev/dws_04_0513.html new file mode 100644 index 00000000..72f7998c --- /dev/null +++ b/docs/dws/dev/dws_04_0513.html @@ -0,0 +1,14 @@ + + +

Stored Procedure

+

In GaussDB(DWS), business rules and logics are saved as stored procedures.

+

A stored procedure is a combination of SQL, PL/SQL, and Java statements, enabling business rule code to be moved from applications to databases and used by multiple programs at a time.

+

For details about how to create and invoke a stored procedure, see section "CREATE PROCEDURE" in SQL Syntax.

+

The functions and stored procedures created by using PL/pgSQL in PL/pgSQL Functions are applicable to all the following sections.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0514.html b/docs/dws/dev/dws_04_0514.html new file mode 100644 index 00000000..db98ab22 --- /dev/null +++ b/docs/dws/dev/dws_04_0514.html @@ -0,0 +1,11 @@ + + +

Data Types

+

A data type refers to a value set and an operation set defined on the value set. A GaussDB(DWS) database consists of tables, each of which is defined by its own columns. Each column corresponds to a data type. GaussDB(DWS) uses corresponding functions to perform operations on data based on data types. For example, GaussDB(DWS) can perform addition, subtraction, multiplication, and division operations on data of numeric values.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0515.html b/docs/dws/dev/dws_04_0515.html new file mode 100644 index 00000000..4e963282 --- /dev/null +++ b/docs/dws/dev/dws_04_0515.html @@ -0,0 +1,159 @@ + + +

Data Type Conversion

+

Certain data types in the database support implicit data type conversions, such as assignments and parameters invoked by functions. For other data types, you can use the type conversion functions provided by GaussDB(DWS), such as the CAST function, to forcibly convert them.

+

Table 1 lists common implicit data type conversions in GaussDB(DWS).

+

The valid value range of DATE supported by GaussDB(DWS) is from 4713 B.C. to 294276 A.D.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Implicit data type conversions

Raw Data Type

+

Target Data Type

+

Remarks

+

CHAR

+

VARCHAR2

+

-

+

CHAR

+

NUMBER

+

Raw data must consist of digits.

+

CHAR

+

DATE

+

Raw data cannot exceed the valid date range.

+

CHAR

+

RAW

+

-

+

CHAR

+

CLOB

+

-

+

VARCHAR2

+

CHAR

+

-

+

VARCHAR2

+

NUMBER

+

Raw data must consist of digits.

+

VARCHAR2

+

DATE

+

Raw data cannot exceed the valid date range.

+

VARCHAR2

+

CLOB

+

-

+

NUMBER

+

CHAR

+

-

+

NUMBER

+

VARCHAR2

+

-

+

DATE

+

CHAR

+

-

+

DATE

+

VARCHAR2

+

-

+

RAW

+

CHAR

+

-

+

RAW

+

VARCHAR2

+

-

+

CLOB

+

CHAR

+

-

+

CLOB

+

VARCHAR2

+

-

+

CLOB

+

NUMBER

+

Raw data must consist of digits.

+

INT4

+

CHAR

+

-

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0516.html b/docs/dws/dev/dws_04_0516.html new file mode 100644 index 00000000..40060c06 --- /dev/null +++ b/docs/dws/dev/dws_04_0516.html @@ -0,0 +1,18 @@ + + +

Arrays and Records

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0517.html b/docs/dws/dev/dws_04_0517.html new file mode 100644 index 00000000..391f6e1f --- /dev/null +++ b/docs/dws/dev/dws_04_0517.html @@ -0,0 +1,509 @@ + + +

Arrays

+

Use of Array Types

Before the use of arrays, an array type needs to be defined:

+
Define an array type immediately after the AS keyword in a stored procedure. Run the following statement:
TYPE array_type IS VARRAY(size) OF data_type [NOT NULL];
+
+

Its parameters are as follows:

+ +
  • In GaussDB(DWS), an array automatically increases. If an access violation occurs, a null value will be returned, and no error message will be reported. If out-of-bounds write occurs in an array, the message Subscript outside of limit is displayed.
  • The scope of an array type defined in a stored procedure takes effect only in this storage process.
  • It is recommended that you use one of the preceding methods to define an array type. If both methods are used to define the same array type, GaussDB(DWS) prefers the array type defined in a stored procedure to declare array variables.
+
+

In GaussDB(DWS) 8.1.0 and earlier versions, the system does not verify the length of array elements and out-of-bounds write because the array can automatically increase. This version adds related constraints to be compatible with Oracle databases. If out-of-bounds write exists, you can configure varray_verification in the parameter behavior_compat_options to be compatible with previously unverified operations.

+

Example:

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
-- Declare an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE array_proc
+AS 
+       TYPE ARRAY_INTEGER IS VARRAY(1024) OF INTEGER;--Define the array type.
+       TYPE ARRAY_INTEGER_NOT_NULL IS VARRAY(1024) OF INTEGER NOT NULL;-- Defines non-null array types.
+       ARRINT ARRAY_INTEGER: = ARRAY_INTEGER();  --Declare the variable of the array type. 
+BEGIN 
+       ARRINT.extend(10);  
+       FOR I IN 1..10 LOOP  
+               ARRINT(I) := I; 
+       END LOOP; 
+       DBMS_OUTPUT.PUT_LINE(ARRINT.COUNT);  
+       DBMS_OUTPUT.PUT_LINE(ARRINT(1));  
+       DBMS_OUTPUT.PUT_LINE(ARRINT(10)); 
+       DBMS_OUTPUT.PUT_LINE(ARRINT(ARRINT.FIRST)); 
+       DBMS_OUTPUT.PUT_LINE(ARRINT(ARRINT.last));
+END;  
+/
+
+-- Invoke the stored procedure.
+CALL array_proc();
+10
+1
+10
+1
+10
+
+-- Delete the stored procedure.
+DROP PROCEDURE array_proc;
+
+ +
+

Declaration and Use of Rowtype Arrays

In addition to the declaration and use of common arrays and non-null arrays in the preceding example, the array also supports the declaration and use of rowtype arrays.

+

Example:

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
-- Use the COUNT function on an array in a stored procedure.
+CREATE TABLE tbl (a int, b int);
+INSERT INTO tbl VALUES(1, 2),(2, 3),(3, 4);
+CREATE OR REPLACE PROCEDURE array_proc
+AS 
+    CURSOR all_tbl IS SELECT * FROM tbl ORDER BY a; 
+    TYPE tbl_array_type IS varray(50) OF tbl%rowtype; -- Defines the array of the rowtype type. tbl indicates any table.
+    tbl_array tbl_array_type;
+    tbl_item tbl%rowtype;
+    inx1 int;
+BEGIN 
+    tbl_array := tbl_array_type();
+    inx1 := 0;
+    FOR tbl_item IN all_tbl LOOP 
+        inx1 := inx1 + 1;  
+        tbl_array(inx1) := tbl_item; 
+    END LOOP; 
+    WHILE inx1 IS NOT NULL LOOP  
+        DBMS_OUTPUT.PUT_LINE('tbl_array(inx1).a=' || tbl_array(inx1).a || ' tbl_array(inx1).b=' || tbl_array(inx1).b);  
+        inx1 := tbl_array.PRIOR(inx1);
+    END LOOP; 
+END;
+/
+
+ +
+

The execution output is as follows:

+
1
+2
+3
+4
call array_proc();
+tbl_array(inx1).a=3 tbl_array(inx1).b=4
+tbl_array(inx1).a=2 tbl_array(inx1).b=3
+tbl_array(inx1).a=1 tbl_array(inx1).b=2
+
+ +
+

Array Related Functions

GaussDB(DWS) supports Oracle-related array functions. You can use the following functions to obtain array attributes or perform operations on the array content.

+
+

COUNT

Returns the number of elements in the current array. Only the initialized elements or the elements extended by the EXTEND function are counted.

+

Use:

+

varray.COUNT or varray.COUNT()

+

Example:

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
-- Use the COUNT function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3);
+    DBMS_OUTPUT.PUT_LINE('v_varray.count=' || v_varray.count); 
+    v_varray.extend;
+    DBMS_OUTPUT.PUT_LINE('v_varray.count=' || v_varray.count); 
+END; 
+/
+
+ +
+

The execution output is as follows:

+
1
+2
+3
call test_varray();
+v_varray.count=3
+v_varray.count=4
+
+ +
+

FIRST and LAST

The FIRST function can return the subscript of the first element. The LAST function can return the subscript of the last element.

+

Use:

+

varray.FIRST or varray.FIRST()

+

varray.LAST or varray.LAST()

+

Example:

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
-- Use the FIRST and LAST functions on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3);
+    DBMS_OUTPUT.PUT_LINE('v_varray.first=' || v_varray.first); 
+    DBMS_OUTPUT.PUT_LINE('v_varray.last=' || v_varray.last); 
+END; 
+/ 
+
+ +
+

The execution output is as follows:

+
1
+2
+3
call test_varray();
+v_varray.first=1
+v_varray.last=3
+
+ +
+

EXTEND

The EXTEND function is used to be compatible with two Oracle database operations. In GaussDB(DWS), an array automatically grows, and the EXTEND function is not necessary. For a newly written stored procedure, you do not need to use the EXTEND function.

+
+

The EXTEND function can extend arrays. The EXTEND function can be invoked in either of the following ways:

+ +

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
-- Use the EXTEND function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3);
+    v_varray.extend(3);
+    DBMS_OUTPUT.PUT_LINE('v_varray.count=' || v_varray.count); 
+    v_varray.extend(2,3);
+    DBMS_OUTPUT.PUT_LINE('v_varray.count=' || v_varray.count); 
+    DBMS_OUTPUT.PUT_LINE('v_varray(7)=' || v_varray(7)); 
+    DBMS_OUTPUT.PUT_LINE('v_varray(8)=' || v_varray(7)); 
+END; 
+/ 
+
+ +
+
+

The execution output is as follows:

+
1
+2
+3
+4
+5
call test_varray();
+v_varray.count=6
+v_varray.count=8
+v_varray(7)=3
+v_varray(8)=3
+
+ +
+

NEXT and PRIOR

The NEXT and PRIOR functions are used for cyclic array traversal. The NEXT function returns the subscript of the next array element based on the input parameter index. If the subscript reaches the maximum value, NULL is returned. The PRIOR function returns the subscript of the previous array element based on the input parameter index. If the minimum value of the array subscript is reached, NULL is returned.

+

Use:

+

varray.NEXT(index)

+

varray.PRIOR(index)

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
-- Use the NEXT and PRIOR functions on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+    i int;
+BEGIN 
+    v_varray := varray_type(1, 2, 3);
+
+    i := v_varray.COUNT;  
+    WHILE i IS NOT NULL LOOP  
+        DBMS_OUTPUT.PUT_LINE('test prior v_varray('||i||')=' || v_varray(i));  
+        i := v_varray.PRIOR(i);  
+    END LOOP; 
+
+    i := 1;  
+    WHILE i IS NOT NULL LOOP  
+        DBMS_OUTPUT.PUT_LINE('test next v_varray('||i||')=' || v_varray(i));  
+        i := v_varray.NEXT(i);  
+    END LOOP;
+END; 
+/
+
+ +
+

The execution output is as follows:

+
1
+2
+3
+4
+5
+6
+7
call test_varray();
+test prior v_varray(3)=3
+test prior v_varray(2)=2
+test prior v_varray(1)=1
+test next v_varray(1)=1
+test next v_varray(2)=2
+test next v_varray(3)=3
+
+ +
+
+

EXISTS

Determines whether an array subscript exists.

+

Use:

+

varray.EXISTS(index)

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
-- Use the EXISTS function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3);
+    IF v_varray.EXISTS(1) THEN
+        DBMS_OUTPUT.PUT_LINE('v_varray.EXISTS(1)');
+    END IF;
+    IF NOT v_varray.EXISTS(10) THEN
+        DBMS_OUTPUT.PUT_LINE('NOT v_varray.EXISTS(10)');
+    END IF;
+END; 
+/ 
+
+ +
+

The execution output is as follows:

+
1
+2
+3
call test_varray();
+v_varray.EXISTS(1)
+NOT v_varray.EXISTS(10)
+
+ +
+
+

TRIM

Deletes a specified number of elements from the end of an array.

+

Use:

+

varray.TRIM(size)

+

varray.TRIM is equivalent to varray.TRIM(1), because the default input parameter is 1.

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
-- Use the TRIM function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3, 4, 5);
+    v_varray.trim(3);
+    DBMS_OUTPUT.PUT_LINE('v_varray.count' || v_varray.count);
+    v_varray.trim;
+    DBMS_OUTPUT.PUT_LINE('v_varray.count:' || v_varray.count);
+END; 
+/ 
+
+ +
+

The execution output is as follows:

+
1
+2
+3
call test_varray();
+v_varray.count:2
+v_varray.count:1
+
+ +
+
+

DELETE

Deletes all elements from an array.

+

Use:

+

varray.DELETE or varray.DELETE()

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
-- Use the DELETE function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3, 4, 5);
+    v_varray.delete;
+    DBMS_OUTPUT.PUT_LINE('v_varray.count:' || v_varray.count);
+END; 
+/ 
+
+ +
+

The execution output is as follows:

+
1
+2
call test_varray();
+v_varray.count:0
+
+ +
+
+

LIMIT

Returns the allowed maximum length of an array.

+

Use:

+

varray.LIMIT or varray.LIMIT()

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
-- Use the LIMIT function on an array in a stored procedure.
+CREATE OR REPLACE PROCEDURE test_varray
+AS 
+    TYPE varray_type IS VARRAY(20) OF INT; 
+    v_varray varray_type; 
+BEGIN 
+    v_varray := varray_type(1, 2, 3, 4, 5);
+    DBMS_OUTPUT.PUT_LINE('v_varray.limit:' || v_varray.limit);
+END; 
+/ 
+
+ +
+

The execution output is as follows:

+
1
+2
call test_varray();
+v_varray.limit:20
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0518.html b/docs/dws/dev/dws_04_0518.html new file mode 100644 index 00000000..8598bd1f --- /dev/null +++ b/docs/dws/dev/dws_04_0518.html @@ -0,0 +1,213 @@ + + +

record

+

record Variables

Perform the following operations to create a record variable:

+

Define a record type and use this type to declare a variable.

+
+

Syntax

For the syntax of the record type, see Figure 1.

+
Figure 1 Syntax of the record type
+

The syntax is described as follows:

+ +
+

In GaussDB(DWS):

+
  • When assigning values to record variables, you can:
    • Declare a record type and define member variables of this type when you declare a function or stored procedure.
    • Assign the value of a record variable to another record variable.
    • Use SELECT INTO or FETCH to assign values to a record type.
    • Assign the NULL value to a record variable.
    +
  • The INSERT and UPDATE statements cannot use a record variable to insert or update data.
  • Just like a variable, a record column of the compound type does not have a default value in the declaration.
+
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
The table used in the following stored procedure is defined as follows:
+CREATE TABLE emp_rec
+(
+    empno            numeric(4,0),
+    ename            character varying(10),
+    job              character varying(9),
+    mgr              numeric(4,0),
+    hiredate         timestamp(0) without time zone,
+    sal              numeric(7,2),           
+    comm             numeric(7,2),          
+    deptno           numeric(2,0)
+)
+with (orientation = column,compression=middle)
+distribute by hash (sal);
+\d emp_rec
+                Table "public.emp_rec"
+  Column  |              Type              | Modifiers 
+----------+--------------------------------+-----------
+ empno    | numeric(4,0)                   | 
+ ename    | character varying(10)          | 
+ job      | character varying(9)           | 
+ mgr      | numeric(4,0)                   | 
+ hiredate | timestamp(0) without time zone | 
+ sal      | numeric(7,2)                   | 
+ comm     | numeric(7,2)                   | 
+ deptno   | numeric(2,0)                   | 
+
+-- Perform array operations in the stored procedure.
+CREATE OR REPLACE FUNCTION regress_record(p_w VARCHAR2)
+RETURNS
+VARCHAR2  AS $$
+DECLARE
+
+   -- Declare a record type.
+   type rec_type is record (name  varchar2(100), epno int);
+   employer rec_type;
+
+   -- Use %type to declare the record type.
+   type rec_type1 is record (name  emp_rec.ename%type, epno int not null :=10);
+   employer1 rec_type1;
+
+   -- Declare a record type with a default value.
+   type rec_type2 is record (
+         name varchar2 not null := 'SCOTT', 
+         epno int not null :=10);
+    employer2 rec_type2;
+    CURSOR C1 IS  select ename,empno from emp_rec order by 1 limit 1;
+            
+BEGIN
+      -- Assign a value to a member record variable.
+     employer.name := 'WARD';
+     employer.epno = 18;
+     raise info 'employer name: % , epno:%', employer.name, employer.epno;
+
+      -- Assign the value of a record variable to another variable.
+     employer1 := employer;
+     raise info 'employer1 name: % , epno: %',employer1.name, employer1.epno;
+         
+      -- Assign the NULL value to a record variable.
+     employer1 := NULL;
+     raise info 'employer1 name: % , epno: %',employer1.name, employer1.epno;
+
+      -- Obtain the default value of a record variable.
+     raise info 'employer2 name: % ,epno: %', employer2.name, employer2.epno;
+            
+      -- Use a record variable in the FOR loop.
+      for employer in select ename,empno from emp_rec order by 1  limit 1 
+          loop 
+               raise info 'employer name: % , epno: %', employer.name, employer.epno;
+          end loop;
+         
+      -- Use a record variable in the SELECT INTO statement.
+      select ename,empno  into employer2 from emp_rec order by 1 limit 1;
+      raise info 'employer name: % , epno: %', employer2.name, employer2.epno;
+            
+      -- Use a record variable in a cursor.
+      OPEN C1;
+      FETCH C1 INTO employer2;
+      raise info 'employer name: % , epno: %', employer2.name, employer2.epno;
+      CLOSE C1;        
+      RETURN employer.name;
+END;
+$$
+LANGUAGE plpgsql;
+
+-- Invoke the stored procedure.
+CALL regress_record('abc');
+INFO:  employer name: WARD , epno:18
+INFO:  employer1 name: WARD , epno: 18
+INFO:  employer1 name: <NULL> , epno: <NULL>
+INFO:  employer2 name: SCOTT ,epno: 10
+
+-- Delete the stored procedure.
+DROP PROCEDURE regress_record;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0519.html b/docs/dws/dev/dws_04_0519.html new file mode 100644 index 00000000..42d97986 --- /dev/null +++ b/docs/dws/dev/dws_04_0519.html @@ -0,0 +1,19 @@ + + +

Syntax

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0520.html b/docs/dws/dev/dws_04_0520.html new file mode 100644 index 00000000..af7d77c8 --- /dev/null +++ b/docs/dws/dev/dws_04_0520.html @@ -0,0 +1,26 @@ + + +

Basic Structure

+

Structure

A PL/SQL block can contain a sub-block which can be placed in any section. The following describes the architecture of a PL/SQL block:

+ +
+

Type

PL/SQL blocks are classified into the following types:

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0521.html b/docs/dws/dev/dws_04_0521.html new file mode 100644 index 00000000..9157fa42 --- /dev/null +++ b/docs/dws/dev/dws_04_0521.html @@ -0,0 +1,62 @@ + + +

Anonymous Block

+

An anonymous block applies to a script infrequently executed or a one-off activity. An anonymous block is executed in a session and is not stored.

+

Syntax

Figure 1 shows the syntax diagrams for an anonymous block.

+
Figure 1 anonymous_block::=
+

Details about the syntax diagram are as follows:

+ +
+

Examples

The following lists basic anonymous block programs:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
-- Null statement block:
+BEGIN
+     NULL; 
+END;
+/
+
+-- Print information to the console:
+BEGIN
+     dbms_output.put_line('hello world!'); 
+END; 
+/
+
+-- Print variable contents to the console:
+DECLARE      
+     my_var VARCHAR2(30);  
+BEGIN      
+     my_var :='world';     
+     dbms_output.put_line('hello'||my_var); 
+END; 
+/ 
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0522.html b/docs/dws/dev/dws_04_0522.html new file mode 100644 index 00000000..9d18f0ac --- /dev/null +++ b/docs/dws/dev/dws_04_0522.html @@ -0,0 +1,11 @@ + + +

Subprogram

+

A subprogram stores stored procedures, functions, operators, and advanced packages. A subprogram created in a database can be called by other programs.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0523.html b/docs/dws/dev/dws_04_0523.html new file mode 100644 index 00000000..f1b6acff --- /dev/null +++ b/docs/dws/dev/dws_04_0523.html @@ -0,0 +1,19 @@ + + +

Basic Statements

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0524.html b/docs/dws/dev/dws_04_0524.html new file mode 100644 index 00000000..a26c69d9 --- /dev/null +++ b/docs/dws/dev/dws_04_0524.html @@ -0,0 +1,75 @@ + + +

Variable Definition Statement

+

This section describes the declaration of variables in the PL/SQL and the scope of this variable in codes.

+

Variable Declaration

For details about the variable declaration syntax, see Figure 1.

+
Figure 1 declare_variable::=
+

The above syntax diagram is explained as follows:

+ +

Example:

+
1
+2
+3
+4
+5
+6
DECLARE
+    emp_id  INTEGER := 7788; -- Define a variable and assign a value to it.
+BEGIN
+    emp_id := 5*7784; -- Assign a value to the variable.
+END;
+/
+
+ +
+
+

In addition to the declaration of basic variable types, %TYPE and %ROWTYPE can be used to declare variables related to table columns or table structures.

+

%TYPE Attribute

%TYPE declares a variable to be of the same data type as a previously declared variable (for example, a column in a table). For example, if you want to define a my_name variable whose data type is the same as the data type of the firstname column in the employee table, you can define the variable as follows:

+
my_name employee.firstname%TYPE
+

In this way, you can declare my_name without the need of knowing the data type of firstname in employee, and the data type of my_name can be automatically updated when the data type of firstname changes.

+
+

%ROWTYPE Attribute

%ROWTYPE declares data types of a set of data. It stores a row of table data or results fetched from a cursor. For example, if you want to define a set of data with the same column names and column data types as the employee table, you can define the data as follows:

+
my_employee employee%ROWTYPE
+
+

If multiple CNs are used, the %ROWTYPE and %TYPE attributes of temporary tables cannot be declared in a stored procedure, because a temporary table is valid only in the current session and is invisible to other CNs in the compilation phase. In this case, a message is displayed indicating that the temporary table does not exist.

+
+

Scope of a Variable

The scope of a variable indicates the accessibility and availability of a variable in code block. In other words, a variable takes effect only within its scope.

+ +

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
DECLARE
+    emp_id  INTEGER :=7788; -- Define a variable and assign a value to it.
+    outer_var  INTEGER :=6688; -- Define a variable and assign a value to it.
+BEGIN
+    DECLARE        
+        emp_id INTEGER :=7799; -- Define a variable and assign a value to it.
+        inner_var  INTEGER :=6688; -- Define a variable and assign a value to it.
+    BEGIN
+        dbms_output.put_line('inner emp_id ='||emp_id); -- Display the value as 7799.
+        dbms_output.put_line('outer_var ='||outer_var); -- Cite variables of an outer block.
+    END;
+    dbms_output.put_line('outer emp_id ='||emp_id); -- Display the value as 7788.
+END;
+/
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0525.html b/docs/dws/dev/dws_04_0525.html new file mode 100644 index 00000000..e9cc0b3f --- /dev/null +++ b/docs/dws/dev/dws_04_0525.html @@ -0,0 +1,32 @@ + + +

Assignment Statement

+

Syntax

Figure 1 shows the syntax diagram for assigning a value to a variable.

+
Figure 1 assignment_value::=
+

The above syntax diagram is explained as follows:

+ +
+

Examples

1
+2
+3
+4
+5
+6
+7
DECLARE
+    emp_id  INTEGER := 7788; --Assignment
+BEGIN
+    emp_id := 5; --Assignment
+    emp_id := 5*7784;
+END;
+/
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0526.html b/docs/dws/dev/dws_04_0526.html new file mode 100644 index 00000000..0e8c5665 --- /dev/null +++ b/docs/dws/dev/dws_04_0526.html @@ -0,0 +1,118 @@ + + +

Call Statement

+

Syntax

Figure 1 shows the syntax diagram for calling a clause.

+
Figure 1 call_clause::=
+

The above syntax diagram is explained as follows:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
-- Create the stored procedure proc_staffs:
+CREATE OR REPLACE PROCEDURE proc_staffs
+(
+section     NUMBER(6),
+salary_sum out NUMBER(8,2),
+staffs_count out INTEGER
+)
+IS
+BEGIN
+SELECT sum(salary), count(*) INTO salary_sum, staffs_count FROM staffs where section_id = section;
+END;
+/
+
+-- Create the stored procedure proc_return:
+CREATE OR REPLACE PROCEDURE proc_return
+AS
+v_num NUMBER(8,2);
+v_sum INTEGER;
+BEGIN
+proc_staffs(30, v_sum, v_num);  --Invoke a statement
+dbms_output.put_line(v_sum||'#'||v_num);
+RETURN;   --Return a statement
+END;
+/
+
+-- Invoke a stored procedure proc_return:
+CALL proc_return();
+
+-- Delete a stored procedure:
+DROP PROCEDURE proc_staffs;
+DROP PROCEDURE proc_return;
+
+--Create the function func_return.
+CREATE OR REPLACE FUNCTION func_return returns void
+language plpgsql
+AS $$
+DECLARE
+v_num INTEGER := 1;
+BEGIN
+dbms_output.put_line(v_num);
+RETURN;   --Return a statement
+END $$;
+
+
+-- Invoke the function func_return.
+ CALL func_return();
+1
+
+-- Delete the function:
+ DROP FUNCTION func_return;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0527.html b/docs/dws/dev/dws_04_0527.html new file mode 100644 index 00000000..b71de56c --- /dev/null +++ b/docs/dws/dev/dws_04_0527.html @@ -0,0 +1,21 @@ + + +

Dynamic Statements

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0528.html b/docs/dws/dev/dws_04_0528.html new file mode 100644 index 00000000..aa82c81e --- /dev/null +++ b/docs/dws/dev/dws_04_0528.html @@ -0,0 +1,133 @@ + + +

Executing Dynamic Query Statements

+

You can perform dynamic queries using EXECUTE IMMEDIATE or OPEN FOR in GaussDB(DWS). EXECUTE IMMEDIATE dynamically executes SELECT statements and OPEN FOR combines use of cursors. If you need to store query results in a data set, use OPEN FOR.

+

EXECUTE IMMEDIATE

Figure 1 shows the syntax diagram.

+
Figure 1 EXECUTE IMMEDIATE dynamic_select_clause::=
+

Figure 2 shows the syntax diagram for using_clause.

+
Figure 2 using_clause-1
+

The above syntax diagram is explained as follows:

+ +

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
--Retrieve values from dynamic statements (INTO clause).
+DECLARE
+   staff_count  VARCHAR2(20);
+BEGIN
+   EXECUTE IMMEDIATE 'select count(*) from staffs'
+      INTO staff_count;
+   dbms_output.put_line(staff_count);
+END;
+/
+
+--Pass and retrieve values (the INTO clause is used before the USING clause).
+CREATE OR REPLACE PROCEDURE dynamic_proc
+AS
+   staff_id     NUMBER(6) := 200;
+   first_name   VARCHAR2(20);
+   salary       NUMBER(8,2);
+BEGIN
+   EXECUTE IMMEDIATE 'select first_name, salary from staffs where staff_id = :1'
+       INTO first_name, salary
+       USING IN staff_id;
+   dbms_output.put_line(first_name || ' ' || salary);
+END;
+/
+
+-- Invoke the stored procedure.
+CALL dynamic_proc();
+
+-- Delete the stored procedure.
+DROP PROCEDURE dynamic_proc;
+
+ +
+
+

OPEN FOR

Dynamic query statements can be executed by using OPEN FOR to open dynamic cursors.

+

For details about the syntax, see Figure 3.

+
Figure 3 open_for::=
+

Parameter description:

+ +

For use of cursors, see Cursors.

+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
DECLARE
+    name          VARCHAR2(20);
+    phone_number  VARCHAR2(20);
+    salary        NUMBER(8,2);
+    sqlstr        VARCHAR2(1024);
+
+    TYPE app_ref_cur_type IS REF CURSOR; -- Define the cursor type.
+    my_cur app_ref_cur_type; -- Define the cursor variable.
+    
+BEGIN
+    sqlstr := 'select first_name,phone_number,salary from staffs
+         where section_id = :1';
+    OPEN my_cur FOR sqlstr USING '30'; -- Open the cursor. using is optional.
+    FETCH my_cur INTO name, phone_number, salary; -- Retrieve the data.
+    WHILE my_cur%FOUND LOOP
+          dbms_output.put_line(name||'#'||phone_number||'#'||salary);
+          FETCH my_cur INTO name, phone_number, salary;
+    END LOOP;
+    CLOSE my_cur; -- Close the cursor.
+END;
+/
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0529.html b/docs/dws/dev/dws_04_0529.html new file mode 100644 index 00000000..2667a28c --- /dev/null +++ b/docs/dws/dev/dws_04_0529.html @@ -0,0 +1,88 @@ + + +

Executing Dynamic Non-query Statements

+

Syntax

Figure 1 shows the syntax diagram.

+
Figure 1 noselect::=
+

Figure 2 shows the syntax diagram for using_clause.

+
Figure 2 using_clause-2
+

The above syntax diagram is explained as follows:

+

USING IN bind_argument is used to specify the variable that transfers values to dynamic SQL statements. It is used when a placeholder exists in dynamic_noselect_string. That is, a placeholder is replaced by the corresponding bind_argument when a dynamic SQL statement is executed. Note that bind_argument can only be a value, variable, or expression, and cannot be a database object such as a table name, column name, and data type. If a stored procedure needs to transfer database objects through bind_argument to construct dynamic SQL statements (generally, DDL statements), you are advised to use double vertical bars (||) to concatenate dynamic_select_clause with a database object. In addition, a dynamic PL/SQL block allows duplicate placeholders. That is, a placeholder can correspond to only one bind_argument.

+
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
-- Create a table:
+CREATE TABLE sections_t1
+(
+   section       NUMBER(4) ,
+   section_name  VARCHAR2(30),
+   manager_id    NUMBER(6),
+   place_id      NUMBER(4) 
+)  
+DISTRIBUTE BY hash(manager_id);
+
+--Declare a variable:
+DECLARE 
+   section       NUMBER(4) := 280; 
+   section_name  VARCHAR2(30) := 'Info support'; 
+   manager_id    NUMBER(6) := 103;
+   place_id      NUMBER(4) := 1400;
+   new_colname   VARCHAR2(10) := 'sec_name';
+BEGIN 
+-- Execute the query:
+    EXECUTE IMMEDIATE 'insert into sections_t1 values(:1, :2, :3, :4)' 
+       USING section, section_name, manager_id,place_id; 
+-- Execute the query (duplicate placeholders):
+    EXECUTE IMMEDIATE 'insert into sections_t1 values(:1, :2, :3, :1)' 
+       USING section, section_name, manager_id; 
+-- Run the ALTER statement. (You are advised to use double vertical bars (||) to concatenate the dynamic DDL statement with a database object.)
+    EXECUTE IMMEDIATE 'alter table sections_t1 rename section_name to ' || new_colname;
+END; 
+/
+
+-- Query data:
+SELECT * FROM sections_t1;
+
+--Delete the table.
+DROP TABLE sections_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0530.html b/docs/dws/dev/dws_04_0530.html new file mode 100644 index 00000000..0d0c5738 --- /dev/null +++ b/docs/dws/dev/dws_04_0530.html @@ -0,0 +1,81 @@ + + +

Dynamically Calling Stored Procedures

+

This section describes how to dynamically call store procedures. You must use anonymous statement blocks to package stored procedures or statement blocks and append IN and OUT behind the EXECUTE IMMEDIATE...USING statement to input and output parameters.

+

Syntax

Figure 1 shows the syntax diagram.

+
Figure 1 call_procedure::=
+

Figure 2 shows the syntax diagram for using_clause.

+
Figure 2 using_clause-3
+

The above syntax diagram is explained as follows:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
--Create the stored procedure proc_add:
+CREATE OR REPLACE PROCEDURE proc_add
+(
+    param1    in   INTEGER,
+    param2    out  INTEGER,
+    param3    in   INTEGER
+)
+AS
+BEGIN
+   param2:= param1 + param3;
+END;
+/
+
+DECLARE
+    input1 INTEGER:=1;
+    input2 INTEGER:=2;
+    statement  VARCHAR2(200);
+    param2     INTEGER;
+BEGIN
+   --Declare the call statement:
+    statement := 'call proc_add(:col_1, :col_2, :col_3)';
+   --Execute the statement:
+    EXECUTE IMMEDIATE statement
+        USING IN input1, OUT param2, IN input2;
+    dbms_output.put_line('result is: '||to_char(param2));
+END;
+/
+
+-- Delete the stored procedure.
+DROP PROCEDURE proc_add;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0531.html b/docs/dws/dev/dws_04_0531.html new file mode 100644 index 00000000..d0252b71 --- /dev/null +++ b/docs/dws/dev/dws_04_0531.html @@ -0,0 +1,59 @@ + + +

Dynamically Calling Anonymous Blocks

+

This section describes how to execute anonymous blocks in dynamic statements. Append IN and OUT behind the EXECUTE IMMEDIATE...USING statement to input and output parameters.

+

Syntax

Figure 1 shows the syntax diagram.

+
Figure 1 call_anonymous_block::=
+

Figure 2 shows the syntax diagram for using_clause.

+
Figure 2 using_clause-4
+

The above syntax diagram is explained as follows:

+ +
+

Example

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
--Create the stored procedure dynamic_proc.
+CREATE OR REPLACE PROCEDURE dynamic_proc
+AS
+   staff_id     NUMBER(6) := 200;
+   first_name   VARCHAR2(20);
+   salary       NUMBER(8,2);
+BEGIN
+--Execute the anonymous block.
+    EXECUTE IMMEDIATE 'begin select first_name, salary into :first_name, :salary from staffs where staff_id= :dno; end;'
+       USING OUT first_name, OUT salary, IN staff_id;
+   dbms_output.put_line(first_name|| ' ' || salary);
+END;
+/
+
+-- Invoke the stored procedure.
+CALL dynamic_proc();
+
+-- Delete the stored procedure.
+DROP PROCEDURE dynamic_proc;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0532.html b/docs/dws/dev/dws_04_0532.html new file mode 100644 index 00000000..4c8dda19 --- /dev/null +++ b/docs/dws/dev/dws_04_0532.html @@ -0,0 +1,27 @@ + + +

Control Statements

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0533.html b/docs/dws/dev/dws_04_0533.html new file mode 100644 index 00000000..c4d0eadd --- /dev/null +++ b/docs/dws/dev/dws_04_0533.html @@ -0,0 +1,18 @@ + + +

RETURN Statements

+

In GaussDB(DWS), data can be returned in either of the following ways: RETURN, RETURN NEXT, or RETURN QUERY. RETURN NEXT and RETURN QUERY are used only for functions and cannot be used for stored procedures.

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0534.html b/docs/dws/dev/dws_04_0534.html new file mode 100644 index 00000000..dccbdf9b --- /dev/null +++ b/docs/dws/dev/dws_04_0534.html @@ -0,0 +1,17 @@ + + +

RETURN

+

Syntax

Figure 1 shows the syntax diagram for a return statement.

+
Figure 1 return_clause::=
+

The syntax details are as follows:

+

This statement returns control from a stored procedure or function to a caller.

+
+

Examples

See Examples for call statement examples.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0535.html b/docs/dws/dev/dws_04_0535.html new file mode 100644 index 00000000..ce5e4777 --- /dev/null +++ b/docs/dws/dev/dws_04_0535.html @@ -0,0 +1,97 @@ + + +

RETURN NEXT and RETURN QUERY

+

Syntax

When creating a function, specify SETOF datatype for the return values.

+

return_next_clause::=

+

+

return_query_clause::=

+

+

The syntax details are as follows:

+

If a function needs to return a result set, use RETURN NEXT or RETURN QUERY to add results to the result set, and then continue to execute the next statement of the function. As the RETURN NEXT or RETURN QUERY statement is executed repeatedly, more and more results will be added to the result set. After the function is executed, all results are returned.

+

RETURN NEXT can be used for scalar and compound data types.

+

RETURN QUERY has a variant RETURN QUERY EXECUTE. You can add dynamic queries and add parameters to the queries by using USING.

+
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
CREATE TABLE t1(a int);
+INSERT INTO t1 VALUES(1),(10);
+
+--RETURN NEXT
+CREATE OR REPLACE FUNCTION fun_for_return_next() RETURNS SETOF t1 AS $$
+DECLARE
+   r t1%ROWTYPE;
+BEGIN
+   FOR r IN select * from t1
+   LOOP
+      RETURN NEXT r;
+   END LOOP;
+   RETURN;
+END;
+$$ LANGUAGE PLPGSQL;
+call fun_for_return_next();
+ a
+---
+ 1
+ 10
+(2 rows)
+
+-- RETURN QUERY
+CREATE OR REPLACE FUNCTION fun_for_return_query() RETURNS SETOF t1 AS $$
+DECLARE
+   r t1%ROWTYPE;
+BEGIN
+   RETURN QUERY select * from t1;
+END;
+$$
+language plpgsql;
+call fun_for_return_next();
+ a
+---
+ 1
+ 10
+(2 rows)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0536.html b/docs/dws/dev/dws_04_0536.html new file mode 100644 index 00000000..ebcc0529 --- /dev/null +++ b/docs/dws/dev/dws_04_0536.html @@ -0,0 +1,121 @@ + + +

Conditional Statements

+

Conditional statements are used to decide whether given conditions are met. Operations are executed based on the decisions made.

+

GaussDB(DWS) supports five usages of IF:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0537.html b/docs/dws/dev/dws_04_0537.html new file mode 100644 index 00000000..80d78558 --- /dev/null +++ b/docs/dws/dev/dws_04_0537.html @@ -0,0 +1,241 @@ + + +

Loop Statements

+

Simple LOOP Statements

The syntax diagram is as follows.

+
Figure 1 loop::=
+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE OR REPLACE PROCEDURE proc_loop(i in integer, count out integer) 
+AS 
+    BEGIN 
+        count:=0; 
+        LOOP 
+        IF count > i THEN 
+            raise info 'count is %. ', count;  
+            EXIT; 
+        ELSE 
+            count:=count+1; 
+        END IF; 
+        END LOOP; 
+    END;
+/
+
+CALL proc_loop(10,5);
+
+ +
+

The loop must be exploited together with EXIT; otherwise, a dead loop occurs.

+
+
+

WHILE-LOOP Statements

The syntax diagram is as follows.

+
Figure 2 while_loop::=
+

If the conditional expression is true, a series of statements in the WHILE statement are repeatedly executed and the condition is decided each time the loop body is executed.

+

Examples

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
CREATE TABLE integertable(c1 integer) DISTRIBUTE BY hash(c1); 
+CREATE OR REPLACE PROCEDURE proc_while_loop(maxval in integer) 
+AS 
+    DECLARE 
+    i int :=1;  
+    BEGIN 
+        WHILE i < maxval LOOP 
+            INSERT INTO integertable VALUES(i); 
+            i:=i+1; 
+        END LOOP; 
+    END; 
+/
+
+-- Invoke a function:
+CALL proc_while_loop(10);
+
+-- Delete the stored procedure and table:
+DROP PROCEDURE proc_while_loop;
+DROP TABLE integertable;
+
+ +
+
+

FOR_LOOP (Integer variable) Statement

The syntax diagram is as follows.

+
Figure 3 for_loop::=
+
  • The variable name is automatically defined as the integer type and exists only in this loop. The variable name falls between lower_bound and upper_bound.
  • When the keyword REVERSE is used, the lower bound must be greater than or equal to the upper bound; otherwise, the loop body is not executed.
+
+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
-- Loop from 0 to 5:
+CREATE OR REPLACE PROCEDURE proc_for_loop()
+AS
+    BEGIN
+    FOR I IN 0..5 LOOP
+        DBMS_OUTPUT.PUT_LINE('It is '||to_char(I) || ' time;') ;
+    END LOOP;
+END;
+/
+
+-- Invoke a function:
+CALL proc_for_loop();
+
+-- Delete the stored procedure:
+DROP PROCEDURE proc_for_loop;
+
+ +
+
+

FOR_LOOP Query Statements

The syntax diagram is as follows.

+
Figure 4 for_loop_query::=
+

The variable target is automatically defined, its type is the same as that in the query result, and it is valid only in this loop. The target value is the query result.

+
+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
-- Display the query result from the loop:
+CREATE OR REPLACE PROCEDURE proc_for_loop_query()
+AS 
+    record VARCHAR2(50);
+BEGIN 
+    FOR record IN SELECT spcname FROM pg_tablespace LOOP 
+    dbms_output.put_line(record); 
+    END LOOP; 
+END; 
+/
+
+-- Invoke a function.
+CALL proc_for_loop_query();
+
+-- Delete the stored procedure.
+DROP PROCEDURE proc_for_loop_query;
+
+ +
+
+

FORALL Batch Query Statements

The syntax diagram is as follows.

+
Figure 5 forall::=
+

The variable index is automatically defined as the integer type and exists only in this loop. The index value falls between low_bound and upper_bound.

+
+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
CREATE TABLE hdfs_t1 (
+  title NUMBER(6),
+  did VARCHAR2(20),
+  data_peroid VARCHAR2(25),
+  kind VARCHAR2(25),
+  interval VARCHAR2(20),
+  time DATE,
+  isModified VARCHAR2(10)
+) 
+DISTRIBUTE BY hash(did);
+
+INSERT INTO hdfs_t1 VALUES( 8, 'Donald', 'OConnell', 'DOCONNEL', '650.507.9833', to_date('21-06-1999', 'dd-mm-yyyy'), 'SH_CLERK' );
+
+CREATE OR REPLACE PROCEDURE proc_forall()
+AS 
+BEGIN 
+    FORALL i IN 100..120 
+        insert into hdfs_t1(title) values(i);
+END; 
+/
+
+-- Invoke a function:
+CALL proc_forall();
+
+-- Query the invocation result of the stored procedure:
+SELECT * FROM hdfs_t1 WHERE title BETWEEN 100 AND 120;
+
+-- Delete the stored procedure and table:
+DROP PROCEDURE proc_forall;
+DROP TABLE hdfs_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0538.html b/docs/dws/dev/dws_04_0538.html new file mode 100644 index 00000000..bad60cb3 --- /dev/null +++ b/docs/dws/dev/dws_04_0538.html @@ -0,0 +1,82 @@ + + +

Branch Statements

+

Syntax

Figure 1 shows the syntax diagram.

+
Figure 1 case_when::=
+

Figure 2 shows the syntax diagram for when_clause.

+
Figure 2 when_clause::=
+

Parameter description:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
CREATE OR REPLACE PROCEDURE proc_case_branch(pi_result in integer, pi_return out integer)
+AS 
+    BEGIN 
+        CASE pi_result 
+            WHEN 1 THEN 
+                pi_return := 111; 
+            WHEN 2 THEN 
+                pi_return := 222; 
+            WHEN 3 THEN 
+                pi_return := 333; 
+            WHEN 6 THEN 
+                pi_return := 444; 
+            WHEN 7 THEN 
+                pi_return := 555; 
+            WHEN 8 THEN 
+                pi_return := 666; 
+            WHEN 9 THEN 
+                pi_return := 777; 
+            WHEN 10 THEN 
+                pi_return := 888; 
+            ELSE 
+                pi_return := 999; 
+        END CASE; 
+        raise info 'pi_return : %',pi_return ; 
+END; 
+/
+
+CALL proc_case_branch(3,0);
+
+-- Delete the stored procedure:
+DROP PROCEDURE proc_case_branch;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0539.html b/docs/dws/dev/dws_04_0539.html new file mode 100644 index 00000000..820e895b --- /dev/null +++ b/docs/dws/dev/dws_04_0539.html @@ -0,0 +1,33 @@ + + +

NULL Statements

+

In PL/SQL programs, NULL statements are used to indicate "nothing should be done", equal to placeholders. They grant meanings to some statements and improve program readability.

+

Syntax

The following shows example use of NULL statements.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
DECLARE
+    ...
+BEGIN
+    ...
+    IF v_num IS NULL THEN
+        NULL; --No data needs to be processed.
+    END IF;
+END;
+/
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0540.html b/docs/dws/dev/dws_04_0540.html new file mode 100644 index 00000000..00ed6d80 --- /dev/null +++ b/docs/dws/dev/dws_04_0540.html @@ -0,0 +1,183 @@ + + +

Error Trapping Statements

+
By default, any error occurring in a PL/SQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and restore from them by using a BEGIN block with an EXCEPTION clause. The syntax is an extension of the normal syntax for a BEGIN block:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
[<<label>>]
+[DECLARE
+    declarations]
+BEGIN
+    statements
+EXCEPTION
+    WHEN condition [OR condition ...] THEN
+        handler_statements
+    [WHEN condition [OR condition ...] THEN
+        handler_statements
+    ...]
+END;
+
+ +
+
+

If no error occurs, this form of block simply executes all the statements, and then control passes to the next statement after END. But if an error occurs within the statements, further processing of the statements is abandoned, and control passes to the EXCEPTION list. The list is searched for the first condition matching the error that occurred. If a match is found, the corresponding handler_statements are executed, and then control passes to the next statement after END. If no match is found, the error propagates out as though the EXCEPTION clause were not there at all:

+

The error can be caught by an enclosing block with EXCEPTION, or if there is none it aborts processing of the function.

+

The condition names can be any of those shown in GaussDB(DWS) Error Code Reference. The special condition name OTHERS matches every error type except QUERY_CANCELED.

+

If a new error occurs within the selected handler_statements, it cannot be caught by this EXCEPTION clause, but is propagated out. A surrounding EXCEPTION clause could catch it.

+

When an error is caught by an EXCEPTION clause, the local variables of the PL/SQL function remain as they were when the error occurred, but all changes to persistent database state within the block are rolled back.

+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
CREATE TABLE mytab(id INT,firstname VARCHAR(20),lastname VARCHAR(20)) DISTRIBUTE BY hash(id);
+
+INSERT INTO mytab(firstname, lastname) VALUES('Tom', 'Jones');
+
+CREATE FUNCTION fun_exp() RETURNS INT
+AS $$
+DECLARE
+    x INT :=0;
+    y INT;
+BEGIN
+    UPDATE mytab SET firstname = 'Joe' WHERE lastname = 'Jones';
+    x := x + 1;
+    y := x / 0;
+EXCEPTION
+    WHEN division_by_zero THEN
+        RAISE NOTICE 'caught division_by_zero';
+        RETURN x;
+END;$$
+LANGUAGE plpgsql;
+
+call fun_exp();
+NOTICE:  caught division_by_zero
+ fun_exp 
+---------
+       1
+(1 row)
+
+select * from mytab;
+ id | firstname | lastname 
+----+-----------+----------
+    | Tom       | Jones
+(1 row)
+
+DROP FUNCTION fun_exp();
+DROP TABLE mytab;
+
+ +
+

When control reaches the assignment to y, it will fail with a division_by_zero error. This will be caught by the EXCEPTION clause. The value returned in the RETURN statement will be the incremented value of x.

+

A block containing an EXCEPTION clause is more expensive to enter and exit than a block without one. Therefore, do not use EXCEPTION without need.

+

In the following scenario, an exception cannot be caught, and the entire transaction rolls back. The threads of the nodes participating the stored procedure exit abnormally due to node failure and network fault, or the source data is inconsistent with that of the table structure of the target table during the COPY FROM operation.

+
+

Example: Exceptions with UPDATE/INSERT

+

This example uses exception handling to perform either UPDATE or INSERT, as appropriate:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
CREATE TABLE db (a INT, b TEXT);
+
+CREATE FUNCTION merge_db(key INT, data TEXT) RETURNS VOID AS
+$$
+BEGIN
+    LOOP
+
+-- Try updating the key:
+        UPDATE db SET b = data WHERE a = key;
+        IF found THEN
+            RETURN;
+        END IF;
+-- Not there, so try to insert the key. If someone else inserts the same key concurrently, we could get a unique-key failure.
+        BEGIN
+            INSERT INTO db(a,b) VALUES (key, data);
+            RETURN;
+        EXCEPTION WHEN unique_violation THEN
+        -- Loop to try the UPDATE again:
+        END;
+     END LOOP;
+END;
+$$
+LANGUAGE plpgsql;
+
+SELECT merge_db(1, 'david');
+SELECT merge_db(1, 'dennis');
+
+-- Delete FUNCTION and TABLE:
+DROP FUNCTION merge_db;
+DROP TABLE db ;
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0541.html b/docs/dws/dev/dws_04_0541.html new file mode 100644 index 00000000..d821cbcb --- /dev/null +++ b/docs/dws/dev/dws_04_0541.html @@ -0,0 +1,179 @@ + + +

GOTO Statements

+

The GOTO statement unconditionally transfers the control from the current statement to a labeled statement. The GOTO statement changes the execution logic. Therefore, use this statement only when necessary. Alternatively, you can use the EXCEPTION statement to handle issues in special scenarios. To run the GOTO statement, the labeled statement must be unique.

+

Syntax

label declaration ::=

+
+

+

goto statement ::=

+

+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
CREATE OR REPLACE PROCEDURE GOTO_test()
+AS 
+DECLARE
+    v1  int;
+BEGIN
+    v1  := 0;
+        LOOP
+        EXIT WHEN v1 > 100;
+                v1 := v1 + 2;
+                if v1 > 25 THEN
+                        GOTO pos1;
+                END IF;
+        END LOOP;
+<<pos1>>
+v1 := v1 + 10;
+raise info 'v1 is %. ', v1;
+END;
+/
+
+call GOTO_test();
+DROP PROCEDURE GOTO_test(); 
+
+ +
+
+

Constraints

The GOTO statement has the following constraints:

+
+ + + + + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0542.html b/docs/dws/dev/dws_04_0542.html new file mode 100644 index 00000000..61304b26 --- /dev/null +++ b/docs/dws/dev/dws_04_0542.html @@ -0,0 +1,17 @@ + + +

Other Statements

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0543.html b/docs/dws/dev/dws_04_0543.html new file mode 100644 index 00000000..c721d750 --- /dev/null +++ b/docs/dws/dev/dws_04_0543.html @@ -0,0 +1,11 @@ + + +

Lock Operations

+

GaussDB(DWS) provides multiple lock modes to control concurrent accesses to table data. These modes are used when Multi-Version Concurrency Control (MVCC) cannot give expected behaviors. Alike, most GaussDB(DWS) commands automatically apply appropriate locks to ensure that called tables are not deleted or modified in an incompatible manner during command execution. For example, when concurrent operations exist, ALTER TABLE cannot be executed on the same table.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0544.html b/docs/dws/dev/dws_04_0544.html new file mode 100644 index 00000000..fd6f57e0 --- /dev/null +++ b/docs/dws/dev/dws_04_0544.html @@ -0,0 +1,13 @@ + + +

Cursor Operations

+

GaussDB(DWS) provides cursors as a data buffer for users to store execution results of SQL statements. Each cursor region has a name. Users can use SQL statements to obtain records one by one from cursors and grant them to master variables, then being processed further by host languages.

+

Cursor operations include cursor definition, open, fetch, and close operations.

+

For the complete example of cursor operations, see Explicit Cursor.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0545.html b/docs/dws/dev/dws_04_0545.html new file mode 100644 index 00000000..523e22cf --- /dev/null +++ b/docs/dws/dev/dws_04_0545.html @@ -0,0 +1,21 @@ + + +

Cursors

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0546.html b/docs/dws/dev/dws_04_0546.html new file mode 100644 index 00000000..2ce039d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0546.html @@ -0,0 +1,39 @@ + + +

Overview

+

To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context areas. With cursors, stored procedures can control alterations in context areas.

+

If JDBC is used to call a stored procedure whose returned value is a cursor, the returned cursor is not available.

+
+

Cursors are classified into explicit cursors and implicit cursors. Table 1 shows the usage conditions of explicit and implicit cursors for different SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 1 Cursor usage conditions

SQL Statement

+

Cursor

+

Non-query statements

+

Implicit

+

Query statements with single-line results

+

Implicit or explicit

+

Query statements with multi-line results

+

Explicit

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0547.html b/docs/dws/dev/dws_04_0547.html new file mode 100644 index 00000000..0b05a601 --- /dev/null +++ b/docs/dws/dev/dws_04_0547.html @@ -0,0 +1,243 @@ + + +

Explicit Cursor

+

An explicit cursor is used to process query statements, particularly when the query results contain multiple records.

+

Procedure

An explicit cursor performs the following six PL/SQL steps to process query statements:

+
  1. Define a static cursor: Define a cursor name and its corresponding SELECT statement.

    Figure 1 shows the syntax diagram for defining a static cursor.

    +
    Figure 1 static_cursor_define::=
    +

    Parameter description:

    +
    • cursor_name: defines a cursor name.
    • parameter: specifies cursor parameters. Only input parameters are allowed in the following format:
      parameter_name datatype
      +
    • select_statement: specifies a query statement.
    +

    The system automatically determines whether the cursor can be used for backward fetches based on the execution plan.

    +
    +

    Define a dynamic cursor: Define a ref cursor, which means that the cursor can be opened dynamically by a set of static SQL statements. Define the type of the ref cursor first and then the cursor variable of this cursor type. Dynamically bind a SELECT statement through OPEN FOR when the cursor is opened.

    +

    Figure 2 and Figure 3 show the syntax diagrams for defining a dynamic cursor.

    +
    Figure 2 cursor_typename::=
    +

    GaussDB(DWS) supports the dynamic cursor type sys_refcursor. A function or stored procedure can use the sys_refcursor parameter to pass on or pass out the cursor result set. A function can return sys_refcursor to return the cursor result set.

    +
    Figure 3 dynamic_cursor_define::=
    +

  2. Open the static cursor: Execute the SELECT statement corresponding to the cursor. The query result is placed in the work area and the pointer directs to the head of the work area to identify the cursor result set. If the cursor query statement contains the FOR UPDATE option, the OPEN statement locks the data row corresponding to the cursor result set in the database table.

    Figure 4 shows the syntax diagram for opening a static cursor.

    +
    Figure 4 open_static_cursor::=
    +

    Open the dynamic cursor: Use the OPEN FOR statement to open the dynamic cursor and the SQL statement is dynamically bound.

    +

    Figure 5 shows the syntax diagram for opening a dynamic cursor.

    +
    Figure 5 open_dynamic_cursor::=
    +

    A PL/SQL program cannot use the OPEN statement to repeatedly open a cursor.

    +

  3. Fetch cursor data: Retrieve data rows in the result set and place them in specified output variables.

    Figure 6 shows the syntax diagram for fetching cursor data.

    +
    Figure 6 fetch_cursor::=
    +

  4. Process the record.
  5. Continue to process until the active set has no record.
  6. Close the cursor: When fetching and finishing the data in the cursor result set, close the cursor immediately to release system resources used by the cursor and invalidate the work area of the cursor so that the FETCH statement cannot be used to fetch data any more. A closed cursor can be reopened using the OPEN statement.

    Figure 7 shows the syntax diagram for closing a cursor.

    +
    Figure 7 close_cursor::=
    +

+
+

Attributes

Cursor attributes are used to control program procedures or learn about program status. When a DML statement is executed, the PL/SQL opens a built-in cursor and processes its result. A cursor is a memory segment for maintaining query results. It is opened when a DML statement is executed and closed when the execution is finished. An explicit cursor has the following attributes:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
-- Specify the method for passing cursor parameters:
+CREATE OR REPLACE PROCEDURE cursor_proc1()
+AS 
+DECLARE
+    DEPT_NAME VARCHAR(100);
+    DEPT_LOC NUMBER(4);
+    -- Define a cursor:
+    CURSOR C1 IS 
+        SELECT section_name, place_id FROM sections WHERE section_id <= 50;
+    CURSOR C2(sect_id INTEGER) IS
+        SELECT section_name, place_id FROM sections WHERE section_id <= sect_id;
+    TYPE CURSOR_TYPE IS REF CURSOR;
+    C3 CURSOR_TYPE;
+    SQL_STR VARCHAR(100);
+BEGIN
+    OPEN C1;-- Open the cursor:
+    LOOP
+        -- Fetch data from the cursor:
+        FETCH C1 INTO DEPT_NAME, DEPT_LOC;
+        EXIT WHEN C1%NOTFOUND;
+        DBMS_OUTPUT.PUT_LINE(DEPT_NAME||'---'||DEPT_LOC);
+    END LOOP;
+    CLOSE C1;-- Close the cursor.
+
+    OPEN C2(10);
+    LOOP
+        FETCH C2 INTO DEPT_NAME, DEPT_LOC;
+        EXIT WHEN C2%NOTFOUND;
+        DBMS_OUTPUT.PUT_LINE(DEPT_NAME||'---'||DEPT_LOC);
+    END LOOP;
+    CLOSE C2;
+    
+    SQL_STR := 'SELECT section_name, place_id FROM sections WHERE section_id <= :DEPT_NO;';
+    OPEN C3 FOR SQL_STR USING 50;
+    LOOP
+        FETCH C3 INTO DEPT_NAME, DEPT_LOC;
+        EXIT WHEN C3%NOTFOUND;
+        DBMS_OUTPUT.PUT_LINE(DEPT_NAME||'---'||DEPT_LOC);
+    END LOOP;
+    CLOSE C3;
+END;
+/
+
+CALL cursor_proc1();
+
+DROP PROCEDURE cursor_proc1;
+
+ +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
-- Increase the salary of employees whose salary is lower than CNY3000 by CNY500:
+CREATE TABLE staffs_t1 AS TABLE staffs;
+
+CREATE OR REPLACE PROCEDURE cursor_proc2()
+AS 
+DECLARE
+   V_EMPNO  NUMBER(6);
+   V_SAL    NUMBER(8,2);
+   CURSOR C IS SELECT staff_id, salary FROM staffs_t1; 
+BEGIN
+   OPEN C;
+   LOOP
+      FETCH C INTO V_EMPNO, V_SAL;
+      EXIT WHEN C%NOTFOUND; 
+      IF V_SAL<=3000 THEN
+            UPDATE staffs_t1 SET salary =salary + 500 WHERE staff_id = V_EMPNO;
+      END IF;
+   END LOOP;
+   CLOSE C;
+END; 
+/
+
+CALL cursor_proc2();
+
+-- Drop the stored procedure:
+DROP PROCEDURE cursor_proc2;
+DROP TABLE staffs_t1;
+
+ +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
-- Use function parameters of the SYS_REFCURSOR type:
+CREATE OR REPLACE PROCEDURE proc_sys_ref(O OUT SYS_REFCURSOR)
+IS 
+C1 SYS_REFCURSOR; 
+BEGIN 
+OPEN C1 FOR SELECT section_ID FROM sections ORDER BY section_ID; 
+O := C1; 
+END; 
+/
+
+DECLARE 
+C1 SYS_REFCURSOR; 
+TEMP NUMBER(4); 
+BEGIN 
+proc_sys_ref(C1); 
+LOOP 
+  FETCH C1 INTO TEMP; 
+  DBMS_OUTPUT.PUT_LINE(C1%ROWCOUNT);
+  EXIT WHEN C1%NOTFOUND; 
+END LOOP;  
+END; 
+/
+
+-- Drop the stored procedure:
+DROP PROCEDURE proc_sys_ref;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0548.html b/docs/dws/dev/dws_04_0548.html new file mode 100644 index 00000000..c5088d20 --- /dev/null +++ b/docs/dws/dev/dws_04_0548.html @@ -0,0 +1,68 @@ + + +

Implicit Cursor

+

The system automatically sets implicit cursors for non-query statements, such as ALTER and DROP, and creates work areas for these statements. These implicit cursors are named SQL, which is defined by the system.

+

Overview

Implicit cursor operations, such as definition, opening, value-grant, and closing, are automatically performed by the system. Users can use only the attributes of implicit cursors to complete operations. The data stored in the work area of an implicit cursor is the latest SQL statement, and is not related to the user-defined explicit cursors.

+

Format call: SQL%

+

INSERT, UPDATE, DROP, and SELECT statements do not require defined cursors.

+
+
+

Attributes

An implicit cursor has the following attributes:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
-- Delete all employees in a department from the EMP table. If the department has no employees, delete the department from the DEPT table.
+CREATE TABLE staffs_t1 AS TABLE staffs;
+CREATE TABLE sections_t1 AS TABLE sections;
+
+CREATE OR REPLACE PROCEDURE proc_cursor3() 
+AS 
+    DECLARE
+    V_DEPTNO NUMBER(4) := 100;
+    BEGIN
+        DELETE FROM staffs WHERE section_ID = V_DEPTNO;
+        -- Proceed based on cursor status:
+        IF SQL%NOTFOUND THEN
+        DELETE FROM sections_t1 WHERE section_ID = V_DEPTNO;
+        END IF;
+    END;
+/
+
+CALL proc_cursor3();
+
+-- Drop the stored procedure and the temporary table:
+DROP PROCEDURE proc_cursor3;
+DROP TABLE staffs_t1;
+DROP TABLE sections_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0549.html b/docs/dws/dev/dws_04_0549.html new file mode 100644 index 00000000..99672ebf --- /dev/null +++ b/docs/dws/dev/dws_04_0549.html @@ -0,0 +1,98 @@ + + +

Cursor Loop

+

The use of cursors in WHILE and LOOP statements is called a cursor loop. Generally, OPEN, FETCH, and CLOSE statements are needed in cursor loop. The following describes a loop that is applicable to a static cursor loop without executing the four steps of a static cursor.

+

Syntax

Figure 1 shows the syntax diagram for the FOR AS loop.

+
Figure 1 FOR_AS_loop::=
+
+

Precautions

+
+ +

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
BEGIN
+FOR ROW_TRANS IN 
+        SELECT first_name FROM staffs 
+    LOOP 
+        DBMS_OUTPUT.PUT_LINE (ROW_TRANS.first_name );
+    END LOOP;
+END;
+/
+
+-- Create a table:
+CREATE TABLE integerTable1( A INTEGER) DISTRIBUTE BY hash(A);
+CREATE TABLE integerTable2( B INTEGER) DISTRIBUTE BY hash(B);
+INSERT INTO integerTable2 VALUES(2);
+
+-- Multiple cursors share the parameters of cursor attributes:
+DECLARE
+   CURSOR C1 IS SELECT A FROM integerTable1;--Declare the cursor.
+   CURSOR C2 IS SELECT B FROM integerTable2;
+   PI_A INTEGER;
+   PI_B INTEGER;
+BEGIN
+    OPEN C1;-- Open the cursor.
+   OPEN C2;
+   FETCH C1 INTO PI_A; ----  The value of C1%FOUND and C2%FOUND is FALSE.
+   FETCH C2 INTO PI_B; ----  The value of C1%FOUND and C2%FOUND is TRUE.
+-- Determine the cursor status:
+   IF C1%FOUND THEN
+       IF C2%FOUND THEN
+         DBMS_OUTPUT.PUT_LINE('Dual cursor share paremeter.');
+      END IF;
+   END IF;
+    CLOSE C1;-- Close the cursor.
+   CLOSE C2;
+END;
+/
+
+-- Drop the temporary table:
+DROP TABLE integerTable1;
+DROP TABLE integerTable2;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0550.html b/docs/dws/dev/dws_04_0550.html new file mode 100644 index 00000000..e35d8f69 --- /dev/null +++ b/docs/dws/dev/dws_04_0550.html @@ -0,0 +1,25 @@ + + +

Advanced Packages

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0551.html b/docs/dws/dev/dws_04_0551.html new file mode 100644 index 00000000..1a05ce68 --- /dev/null +++ b/docs/dws/dev/dws_04_0551.html @@ -0,0 +1,870 @@ + + +

DBMS_LOB

+

Related Interfaces

Table 1 provides all interfaces supported by the DBMS_LOB package.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBMS_LOB

API

+

Description

+

DBMS_LOB.GETLENGTH

+

Obtains and returns the specified length of a LOB object.

+

DBMS_LOB.OPEN

+

Opens a LOB and returns a LOB descriptor.

+

DBMS_LOB.READ

+

Loads a part of LOB contents to BUFFER area according to the specified length and initial position offset.

+

DBMS_LOB.WRITE

+

Copies contents in BUFFER area to LOB according to the specified length and initial position offset.

+

DBMS_LOB.WRITEAPPEND

+

Copies contents in BUFFER area to the end part of LOB according to the specified length.

+

DBMS_LOB.COPY

+

Copies contents in BLOB to another BLOB according to the specified length and initial position offset.

+

DBMS_LOB.ERASE

+

Deletes contents in BLOB according to the specified length and initial position offset.

+

DBMS_LOB.CLOSE

+

Closes a LOB descriptor.

+

DBMS_LOB.INSTR

+

Returns the position of the Nth occurrence of a character string in LOB.

+

DBMS_LOB.COMPARE

+

Compares two LOBs or a certain part of two LOBs.

+

DBMS_LOB.SUBSTR

+

Reads the substring of a LOB and returns the number of read bytes or the number of characters.

+

DBMS_LOB.TRIM

+

Truncates the LOB of a specified length. After the execution is complete, the length of the LOB is set to the length specified by the newlen parameter.

+

DBMS_LOB.CREATETEMPORARY

+

Creates a temporary BLOB or CLOB.

+

DBMS_LOB.APPEND

+

Adds the content of a LOB to another LOB.

+
+
+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
-- Obtain the length of the character string.
+SELECT DBMS_LOB.GETLENGTH('12345678');
+
+DECLARE
+myraw  RAW(100);
+amount INTEGER :=2;
+buffer INTEGER :=1;
+begin
+DBMS_LOB.READ('123456789012345',amount,buffer,myraw);
+dbms_output.put_line(myraw);
+end;
+/
+
+CREATE TABLE blob_Table (t1 blob) DISTRIBUTE BY REPLICATION;
+CREATE TABLE blob_Table_bak (t2 blob) DISTRIBUTE BY REPLICATION;
+INSERT INTO blob_Table VALUES('abcdef');
+INSERT INTO blob_Table_bak VALUES('22222');
+
+DECLARE
+str varchar2(100) := 'abcdef';
+source raw(100);
+dest blob;
+copyto blob;
+amount int;
+PSV_SQL varchar2(100);
+PSV_SQL1 varchar2(100);
+a int :=1;
+len int;
+BEGIN
+source := utl_raw.cast_to_raw(str);
+amount := utl_raw.length(source);
+
+PSV_SQL :='select * from blob_Table for update';
+PSV_SQL1 := 'select * from blob_Table_bak for update';
+
+EXECUTE IMMEDIATE PSV_SQL into dest;
+EXECUTE IMMEDIATE PSV_SQL1 into copyto;
+
+DBMS_LOB.WRITE(dest, amount, 1, source);
+DBMS_LOB.WRITEAPPEND(dest, amount, source);
+
+DBMS_LOB.ERASE(dest, a, 1);
+DBMS_OUTPUT.PUT_LINE(a);
+DBMS_LOB.COPY(copyto, dest, amount, 10, 1);
+DBMS_LOB.CLOSE(dest);
+RETURN;
+END;
+/
+
+--Delete the table.
+DROP TABLE blob_Table;
+DROP TABLE blob_Table_bak;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0552.html b/docs/dws/dev/dws_04_0552.html new file mode 100644 index 00000000..143d4e68 --- /dev/null +++ b/docs/dws/dev/dws_04_0552.html @@ -0,0 +1,101 @@ + + +

DBMS_RANDOM

+

Related Interfaces

Table 1 provides all interfaces supported by the DBMS_RANDOM package.

+ +
+ + + + + + + + + + +
Table 1 DBMS_RANDOM interface parameters

API

+

Description

+

DBMS_RANDOM.SEED

+

Sets a seed for a random number.

+

DBMS_RANDOM.VALUE

+

Generates a random number between a specified low and a specified high.

+
+
+ + +

The only requirement is that the parameter type is NUMERIC regardless of the right and left bound values.

+
+
+

Examples

+
1
+2
+3
+4
+5
-- Generate a random number between 0 and 1:
+SELECT DBMS_RANDOM.VALUE(0,1);
+
+-- Add the low and high parameters to an integer within the specified range and intercept smaller values from the result. (The maximum value cannot be a possible value.) Therefore, use the following code for an integer between 0 and 99:
+SELECT TRUNC(DBMS_RANDOM.VALUE(0,100));
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0553.html b/docs/dws/dev/dws_04_0553.html new file mode 100644 index 00000000..219f9595 --- /dev/null +++ b/docs/dws/dev/dws_04_0553.html @@ -0,0 +1,121 @@ + + +

DBMS_OUTPUT

+

Related Interfaces

Table 1 provides all interfaces supported by the DBMS_OUTPUT package.

+ +
+ + + + + + + + + + + + + +
Table 1 DBMS_OUTPUT

API

+

Description

+

DBMS_OUTPUT.PUT_LINE

+

Outputs the specified text. The text length cannot exceed 32,767 bytes.

+

DBMS_OUTPUT.PUT

+

Outputs the specified text to the front of the specified text without adding a line break. The text length cannot exceed 32,767 bytes.

+

DBMS_OUTPUT.ENABLE

+

Sets the buffer area size. If this interface is not specified, the maximum buffer size is 20,000 bytes and the minimum buffer size is 2000 bytes. If the specified buffer size is less than 2000 bytes, the default minimum buffer size is applied.

+
+
+ +

The PUT_LINE procedure writes a row of text carrying a line end symbol in the buffer. The DBMS_OUTPUT.PUT_LINE function prototype is:

+
1
+2
DBMS_OUTPUT.PUT_LINE (
+item IN VARCHAR2);
+
+ +
+ +
+ + + + + + + +
Table 2 DBMS_OUTPUT.PUT_LINE interface parameters

Parameter

+

Description

+

item

+

Specifies the text that was written to the buffer.

+
+
+ +

The stored procedure PUT outputs the specified text to the front of the specified text without adding a linefeed. The DBMS_OUTPUT.PUT function prototype is:

+
1
+2
DBMS_OUTPUT.PUT (
+item IN VARCHAR2);
+
+ +
+ +
+ + + + + + + +
Table 3 DBMS_OUTPUT.PUT interface parameters

Parameter

+

Description

+

item

+

Specifies the text that was written to the specified text.

+
+
+ +

The stored procedure ENABLE sets the output buffer size. If the size is not specified, it contains a maximum of 20,000 bytes. The DBMS_OUTPUT.ENABLE function prototype is:

+
1
+2
DBMS_OUTPUT.ENABLE (
+buf IN INTEGER);
+
+ +
+ +
+ + + + + + + +
Table 4 DBMS_OUTPUT.ENABLE interface parameters

Parameter

+

Description

+

buf

+

Sets the buffer area size.

+
+
+
+

Examples

1
+2
+3
+4
+5
+6
BEGIN
+    DBMS_OUTPUT.ENABLE(50);
+    DBMS_OUTPUT.PUT ('hello, ');
+    DBMS_OUTPUT.PUT_LINE('database!');-- Displaying "hello, database!"
+END;
+/
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0554.html b/docs/dws/dev/dws_04_0554.html new file mode 100644 index 00000000..78bceb0c --- /dev/null +++ b/docs/dws/dev/dws_04_0554.html @@ -0,0 +1,198 @@ + + +

UTL_RAW

+

Related Interfaces

Table 1 provides all interfaces supported by the UTL_RAW package.

+ +
+ + + + + + + + + + + + + + + + +
Table 1 UTL_RAW

API

+

Description

+

UTL_RAW.CAST_FROM_BINARY_INTEGER

+

Converts an INTEGER type value to a binary representation (RAW type).

+

UTL_RAW.CAST_TO_BINARY_INTEGER

+

Converts a binary representation (RAW type) to an INTEGER type value.

+

UTL_RAW.LENGTH

+

Obtains the length of the RAW type object.

+

UTL_RAW.CAST_TO_RAW

+

Converts a VARCHAR2 type value to a binary expression (RAW type).

+
+
+

The external representation of the RAW type data is hexadecimal and its internal storage form is binary. For example, the representation of the RAW type data 11001011 is 'CB'. The input of the actual type conversion is 'CB'.

+
+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
-- Perform operations on RAW data in a stored procedure.
+CREATE OR REPLACE PROCEDURE proc_raw
+AS
+str varchar2(100) := 'abcdef';
+source raw(100);
+amount integer;
+BEGIN
+source := utl_raw.cast_to_raw(str);--Convert the type.
+amount := utl_raw.length(source);--Obtain the length.
+dbms_output.put_line(amount);
+END;
+/
+
+-- Invoke the stored procedure.
+CALL proc_raw();
+
+-- Delete the stored procedure.
+DROP PROCEDURE proc_raw;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0555.html b/docs/dws/dev/dws_04_0555.html new file mode 100644 index 00000000..68dd7e2d --- /dev/null +++ b/docs/dws/dev/dws_04_0555.html @@ -0,0 +1,571 @@ + + +

DBMS_JOB

+

Related Interfaces

Table 1 lists all interfaces supported by the DBMS_JOB package.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBMS_JOB

Interface

+

Description

+

DBMS_JOB.SUBMIT

+

Submits a job to the job queue. The job number is automatically generated by the system.

+

DBMS_JOB.ISUBMIT

+

Submits a job to the job queue. The job number is specified by the user.

+

DBMS_JOB.REMOVE

+

Removes a job from the job queue by job number.

+

DBMS_JOB.BROKEN

+

Disables or enables job execution.

+

DBMS_JOB.CHANGE

+

Modifies user-definable attributes of a job, including the job description, next execution time, and execution interval.

+

DBMS_JOB.WHAT

+

Modifies the job description of a job.

+

DBMS_JOB.NEXT_DATE

+

Modifies the next execution time of a job.

+

DBMS_JOB.INTERVAL

+

Modifies the execution interval of a job.

+

DBMS_JOB.CHANGE_OWNER

+

Modifies the owner of a job.

+
+
+ + +
+ +

Constraints

  1. After a new job is created, this job belongs to the current coordinator only, that is, this job can be scheduled and executed only on the current coordinator. Other coordinators will not schedule or execute this job. All coordinators can query, modify, and delete jobs created on other CNs.
  2. Create, update, and delete jobs only using the procedures provided by the DBMS_JOB package. These procedures synchronize job information between different CNs and associate primary keys between the pg_jobs tables. If you use DML statements to add, delete, or modify records in the pg_jobs table, job information will become inconsistent between CNs and system catalogs may fail to be associated, compromising internal job management.
  3. Each user-created task is bound to a CN. If the automatic migration function is not enabled, task statuses cannot be updated in real time when the CN is faulty during task execution. When a CN fails, all jobs on this CN cannot be scheduled or executed until the CN is restored manually. Enable the automatic migration function on CNs, so that jobs on the faulty CN will be migrated to other CNs for scheduling.
  4. For each job, the hosting CN updates the real-time job information (including the job status, last execution start time, last execution end time, next execution start time, the number of execution failures if any) to the pg_jobs table, and synchronizes the information to other CNs, ensuring consistent job information between different CNs. In the case of CN failures, job information synchronization is reattempted by the hosting CNs, which increases job execution time. Although job information fails to be synchronized between CNs, job information can still be properly updated in the pg_jobs table on the hosting CNs, and jobs can be executed successfully. After a CN recovers, job information such as job execution time and status in its pg_jobs table may be incorrect and will be updated only after the jobs are executed again on related CNs.
  5. For each job, a thread is established to execute it. If multiple jobs are triggered concurrently as scheduled, the system will need some time to start the required threads, resulting in a latency of 0.1 ms in job execution.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0556.html b/docs/dws/dev/dws_04_0556.html new file mode 100644 index 00000000..f125c466 --- /dev/null +++ b/docs/dws/dev/dws_04_0556.html @@ -0,0 +1,987 @@ + + +

DBMS_SQL

+

Related Interfaces

Table 1 lists interfaces supported by the DBMS_SQL package.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBMS_SQL

API

+

Description

+

DBMS_SQL.OPEN_CURSOR

+

Opens a cursor.

+

DBMS_SQL.CLOSE_CURSOR

+

Closes an open cursor.

+

DBMS_SQL.PARSE

+

Transmits a group of SQL statements to a cursor. Currently, only the SELECT statement is supported.

+

DBMS_SQL.EXECUTE

+

Performs a set of dynamically defined operations on the cursor.

+

DBMS_SQL.FETCHE_ROWS

+

Reads a row of cursor data.

+

DBMS_SQL.DEFINE_COLUMN

+

Dynamically defines a column.

+

DBMS_SQL.DEFINE_COLUMN_CHAR

+

Dynamically defines a column of the CHAR type.

+

DBMS_SQL.DEFINE_COLUMN_INT

+

Dynamically defines a column of the INT type.

+

DBMS_SQL.DEFINE_COLUMN_LONG

+

Dynamically defines a column of the LONG type.

+

DBMS_SQL.DEFINE_COLUMN_RAW

+

Dynamically defines a column of the RAW type.

+

DBMS_SQL.DEFINE_COLUMN_TEXT

+

Dynamically defines a column of the TEXT type.

+

DBMS_SQL.DEFINE_COLUMN_UNKNOWN

+

Dynamically defines a column of an unknown type.

+

DBMS_SQL.COLUMN_VALUE

+

Reads a dynamically defined column value.

+

DBMS_SQL.COLUMN_VALUE_CHAR

+

Reads a dynamically defined column value of the CHAR type.

+

DBMS_SQL.COLUMN_VALUE_INT

+

Reads a dynamically defined column value of the INT type.

+

DBMS_SQL.COLUMN_VALUE_LONG

+

Reads a dynamically defined column value of the LONG type.

+

DBMS_SQL.COLUMN_VALUE_RAW

+

Reads a dynamically defined column value of the RAW type.

+

DBMS_SQL.COLUMN_VALUE_TEXT

+

Reads a dynamically defined column value of the TEXT type.

+

DBMS_SQL.COLUMN_VALUE_UNKNOWN

+

Reads a dynamically defined column value of an unknown type.

+

DBMS_SQL.IS_OPEN

+

Checks whether a cursor is opened.

+
+
+
  • You are advised to use dbms_sql.define_column and dbms_sql.column_value to define columns.
  • If the size of the result set is greater than the value of work_mem, the result set will be flushed to disk. The value of work_mem must be no greater than 512 MB.
+
+ + +

This function returns the status of a cursor: open, parse, execute, or define. The value is TRUE. If the status is unknown, an error is reported. In other cases, the value is FALSE.

+
The function prototype of DBMS_SQL.IS_OPEN is:
1
+2
+3
+4
DBMS_SQL.IS_OPEN(
+cursorid                 IN    INTEGER
+)
+RETURN BOOLEAN;
+
+ +
+
+ +
+ + + + + + + +
Table 20 DBMS_SQL.IS_OPEN interface parameters

Parameter Name

+

Description

+

cursorid

+

ID of the cursor to be queried

+
+
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
-- Perform operations on RAW data in a stored procedure.
+create or replace procedure pro_dbms_sql_all_02(in_raw raw,v_in int,v_offset int)
+as 
+cursorid int;
+v_id int;
+v_info bytea :=1;
+query varchar(2000);
+execute_ret int;
+define_column_ret_raw bytea :='1';
+define_column_ret int;
+begin
+drop table if exists pro_dbms_sql_all_tb1_02 ;
+create table pro_dbms_sql_all_tb1_02(a int ,b blob);
+insert into pro_dbms_sql_all_tb1_02 values(1,HEXTORAW('DEADBEEE'));
+insert into pro_dbms_sql_all_tb1_02 values(2,in_raw);
+query := 'select * from pro_dbms_sql_all_tb1_02 order by 1';
+-- Open a cursor.
+cursorid := dbms_sql.open_cursor();
+-- Compile the cursor.
+dbms_sql.parse(cursorid, query, 1);
+-- Define a column.
+define_column_ret:= dbms_sql.define_column(cursorid,1,v_id);
+define_column_ret_raw:= dbms_sql.define_column_raw(cursorid,2,v_info,10);
+-- Execute the cursor.
+execute_ret := dbms_sql.execute(cursorid);
+loop 
+exit when (dbms_sql.fetch_rows(cursorid) <= 0);
+-- Obtain values.
+dbms_sql.column_value(cursorid,1,v_id);
+dbms_sql.column_value_raw(cursorid,2,v_info,v_in,v_offset);
+-- Output the result.
+dbms_output.put_line('id:'|| v_id || ' info:' || v_info);
+end loop;
+-- Close the cursor.
+dbms_sql.close_cursor(cursorid);
+end;
+/
+-- Invoke the stored procedure.
+call pro_dbms_sql_all_02(HEXTORAW('DEADBEEF'),0,1);
+
+-- Delete the stored procedure.
+DROP PROCEDURE pro_dbms_sql_all_02;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0558.html b/docs/dws/dev/dws_04_0558.html new file mode 100644 index 00000000..8a3d2509 --- /dev/null +++ b/docs/dws/dev/dws_04_0558.html @@ -0,0 +1,114 @@ + + +

Debugging

+

Syntax

RAISE has the following five syntax formats:

+
Figure 1 raise_format::=
+
Figure 2 raise_condition::=
+
Figure 3 raise_sqlstate::=
+
Figure 4 raise_option::=
+
Figure 5 raise::=
+

Parameter description:

+ +

If neither a condition name nor an SQLSTATE is designated in a RAISE EXCEPTION command, the RAISE EXCEPTION (P0001) is used by default. If no message text is designated, the condition name or SQLSTATE is used as the message text by default.

+

If the SQLSTATE designates an error code, the error code is not limited to a defined error code. It can be any error code containing five digits or ASCII uppercase rather than 00000. Do not use an error code ended with three zeros because this kind of error codes are type codes and can be captured by the whole category.

+
+

The syntax described in Figure 5 does not append any parameter. This form is used only for the EXCEPTION statement in a BEGIN block so that the error can be re-processed.

+
+
+

Examples

Display error and hint information when a transaction terminates:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
CREATE OR REPLACE PROCEDURE proc_raise1(user_id in integer)
+AS 
+BEGIN 
+RAISE EXCEPTION 'Noexistence ID --> %',user_id USING HINT = 'Please check your user ID'; 
+END; 
+/
+
+call proc_raise1(300011);
+
+-- Execution result:
+ERROR:  Noexistence ID --> 300011
+HINT:  Please check your user ID
+
+ +
+
+
Two methods are available for setting SQLSTATE:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE OR REPLACE PROCEDURE proc_raise2(user_id in integer)
+AS 
+BEGIN 
+RAISE 'Duplicate user ID: %',user_id USING ERRCODE = 'unique_violation'; 
+END; 
+/
+
+\set VERBOSITY verbose
+call proc_raise2(300011);
+
+-- Execution result:
+ERROR:  Duplicate user ID: 300011
+SQLSTATE: 23505
+LOCATION:  exec_stmt_raise, pl_exec.cpp:3482
+
+ +
+
+

If the main parameter is a condition name or SQLSTATE, the following applies:

+

RAISE division_by_zero;

+

RAISE SQLSTATE '22012';

+

For example:

+
CREATE OR REPLACE PROCEDURE division(div in integer, dividend in integer) 
+AS 
+DECLARE 
+res int; 
+    BEGIN 
+    IF dividend=0 THEN 
+        RAISE division_by_zero; 
+        RETURN; 
+    ELSE 
+        res := div/dividend; 
+        RAISE INFO 'division result: %', res;
+        RETURN; 
+    END IF; 
+    END; 
+/
+call division(3,0);
+
+-- Execution result:
+ERROR:  division_by_zero
+
Alternatively:
1
RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0559.html b/docs/dws/dev/dws_04_0559.html new file mode 100644 index 00000000..06acd69e --- /dev/null +++ b/docs/dws/dev/dws_04_0559.html @@ -0,0 +1,19 @@ + + +

System Catalogs and System Views

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0560.html b/docs/dws/dev/dws_04_0560.html new file mode 100644 index 00000000..82e0f577 --- /dev/null +++ b/docs/dws/dev/dws_04_0560.html @@ -0,0 +1,15 @@ + + +

Overview of System Catalogs and System Views

+

System catalogs are used by GaussDB(DWS) to store structure metadata. They are a core component the GaussDB(DWS) database system and provide control information for the database system. These system catalogs contain cluster installation information and information about various queries and processes in GaussDB(DWS). You can collect information about the database by querying the system catalog.

+

System views provide ways to query system catalogs and internal database status. If some columns in one or more tables in a database are frequently searched for, an administrator can define a view for these columns, and then users can directly access these columns in the view without entering search criteria. A view is different from a basic table. It is only a virtual object rather than a physical one. A database only stores the definition of a view and does not store its data. The data is still stored in the original base table. If data in the base table changes, the data in the view changes accordingly. In this sense, a view is like a window through which users can know their interested data and data changes in the database. A view is triggered every time it is referenced.

+

In separation of duty, non-administrators have no permission to view system catalogs and views. In other scenarios, system catalogs and views are either visible only to administrators or visible to all users. Some of the following system catalogs and views have marked the need of administrator permissions. They are accessible only to administrators.

+

Do not add, delete, or modify system catalogs or system views. Manual modification or damage to system catalogs or system views may cause system information inconsistency, system control exceptions, or even cluster unavailability.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0561.html b/docs/dws/dev/dws_04_0561.html new file mode 100644 index 00000000..d7087b87 --- /dev/null +++ b/docs/dws/dev/dws_04_0561.html @@ -0,0 +1,158 @@ + + +

System Catalogs

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0562.html b/docs/dws/dev/dws_04_0562.html new file mode 100644 index 00000000..b474db3d --- /dev/null +++ b/docs/dws/dev/dws_04_0562.html @@ -0,0 +1,106 @@ + + +

GS_OBSSCANINFO

+

GS_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table on OBS in a query.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_OBSSCANINFO columns

Name

+

Type

+

Reference

+

Description

+

query_id

+

bigint

+

-

+

Specifies a query ID.

+

user_id

+

text

+

-

+

Specifies a database user who performs queries.

+

table_name

+

text

+

-

+

Specifies the name of a foreign table on OBS.

+

file_type

+

text

+

-

+

Specifies the format of files storing the underlying data.

+

time_stamp

+

time_stam

+

-

+

Specifies the scanning start time.

+

actual_time

+

double

+

-

+

Specifies the scanning execution time in seconds.

+

file_scanned

+

bigint

+

-

+

Specifies the number of files scanned.

+

data_size

+

double

+

-

+

Specifies the size of data scanned in bytes.

+

billing_info

+

text

+

-

+

Specifies the reserved fields.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0564.html b/docs/dws/dev/dws_04_0564.html new file mode 100644 index 00000000..f742d70d --- /dev/null +++ b/docs/dws/dev/dws_04_0564.html @@ -0,0 +1,132 @@ + + +

GS_WLM_INSTANCE_HISTORY

+

The GS_WLM_INSTANCE_HISTORY system catalog stores information about resource usage related to CN or DN instances. Each record in the system table indicates the resource usage of an instance at a specific time point, including the memory, number of CPU cores, disk I/O, physical I/O of the process, and logical I/O of the process.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_INSTANCE_HISTORY column

Name

+

Type

+

Description

+

instancename

+

text

+

Instance name

+

timestamp

+

timestamp with time zone

+

Timestamp

+

used_cpu

+

int

+

CPU usage of an instance

+

free_mem

+

int

+

Unused memory of an instance (unit: MB)

+

used_mem

+

int

+

Used memory of an instance (unit: MB)

+

io_await

+

real

+

Specifies the io_wait value (average value within 10 seconds) of the disk used by an instance.

+

io_util

+

real

+

Specifies the io_util value (average value within 10 seconds) of the disk used by an instance.

+

disk_read

+

real

+

Specifies the disk read rate (average value within 10 seconds) of an instance (unit: KB/s).

+

disk_write

+

real

+

The disk write rate (average value within 10 seconds) of an instance (unit: KB/s).

+

process_read

+

bigint

+

Specifies the read rate (excluding the number of bytes read from the disk pagecache) of the corresponding instance process that reads data from a disk. (Unit: KB/s)

+

process_write

+

bigint

+

Specifies the write rate (excluding the number of bytes written to the disk pagecache) of the corresponding instance process that writes data to a disk within 10 seconds. (Unit: KB/s)

+

logical_read

+

bigint

+

CN instance: N/A

+

DN instance: Specifies the logical read byte rate of the instance in the statistical interval (10 seconds). (Unit: KB/s)

+

logical_write

+

bigint

+

CN instance: N/A

+

DN instance: Specifies the logical write byte rate of the instance within the statistical interval (10 seconds). (Unit: KB/s)

+

read_counts

+

bigint

+

CN instance: N/A

+

DN instance: Specifies the total number of logical read operations of the instance in the statistical interval (10 seconds).

+

write_counts

+

bigint

+

CN instance: N/A

+

DN instance: Specifies the total number of logical write operations of the instance in the statistical interval (10 seconds).

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0565.html b/docs/dws/dev/dws_04_0565.html new file mode 100644 index 00000000..8fe4f934 --- /dev/null +++ b/docs/dws/dev/dws_04_0565.html @@ -0,0 +1,187 @@ + + +

GS_WLM_OPERATOR_INFO

+

GS_WLM_OPERATOR_INFO records operators of completed jobs. The data is dumped from the kernel to a system catalog.

+
  • This system catalog's schema is dbms_om.
  • This system catalog has a distribution column, the gaussdb column, in PostgreSQL databases only, not other databases.
  • The pg_catalog has the GS_WLM_OPERATOR_INFO view.
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_OPERATOR_INFO columns

Name

+

Type

+

Description

+

nodename

+

text

+

Name of the CN where the statement is executed

+

queryid

+

bigint

+

Internal query_id used for statement execution

+

pid

+

bigint

+

Thread ID of the backend

+

plan_node_id

+

integer

+

plan_node_id of the execution plan of a query

+

plan_node_name

+

text

+

Name of the operator corresponding to plan_node_id

+

start_time

+

timestamp with time zone

+

Time when an operator starts to process the first data record

+

duration

+

bigint

+

Total execution time of an operator. The unit is ms.

+

query_dop

+

integer

+

Degree of parallelism (DOP) of the current operator

+

estimated_rows

+

bigint

+

Number of rows estimated by the optimizer

+

tuple_processed

+

bigint

+

Number of elements returned by the current operator

+

min_peak_memory

+

integer

+

Minimum peak memory used by the current operator on all DNs. The unit is MB.

+

max_peak_memory

+

integer

+

Maximum peak memory used by the current operator on all DNs. The unit is MB.

+

average_peak_memory

+

integer

+

Average peak memory used by the current operator on all DNs. The unit is MB.

+

memory_skew_percent

+

integer

+

Memory usage skew of the current operator among DNs

+

min_spill_size

+

integer

+

Minimum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

max_spill_size

+

integer

+

Maximum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

average_spill_size

+

integer

+

Average spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

spill_skew_percent

+

integer

+

DN spill skew when a spill occurs

+

min_cpu_time

+

bigint

+

Minimum execution time of the operator on all DNs. The unit is ms.

+

max_cpu_time

+

bigint

+

Maximum execution time of the operator on all DNs. The unit is ms.

+

total_cpu_time

+

bigint

+

Total execution time of the operator on all DNs. The unit is ms.

+

cpu_skew_percent

+

integer

+

Skew of the execution time among DNs.

+

warning

+

text

+

Warning. The following warnings are displayed:

+
  1. Sort/SetOp/HashAgg/HashJoin spill
  2. Spill file size large than 256MB
  3. Broadcast size large than 100MB
  4. Early spill
  5. Spill times is greater than 3
  6. Spill on memory adaptive
  7. Hash table conflict
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0566.html b/docs/dws/dev/dws_04_0566.html new file mode 100644 index 00000000..7efa2692 --- /dev/null +++ b/docs/dws/dev/dws_04_0566.html @@ -0,0 +1,13 @@ + + +

GS_WLM_SESSION_INFO

+

GS_WLM_SESSION_INFO records load management information about a completed job executed on all CNs. The data is dumped from the kernel to a system catalog.

+
  • This system catalog's schema is dbms_om.
  • This system catalog has a distribution column, the gaussdb column, in PostgreSQL databases only, not other databases.
  • The pg_catalog has the GS_WLM_SESSION_INFO view.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0567.html b/docs/dws/dev/dws_04_0567.html new file mode 100644 index 00000000..dac64373 --- /dev/null +++ b/docs/dws/dev/dws_04_0567.html @@ -0,0 +1,150 @@ + + +

GS_WLM_USER_RESOURCE_HISTORY

+

The GS_WLM_USER_RESOURCE_HISTORY system table stores information about resources used by users and is valid only on CNs. Each record in the system table indicates the resource usage of a user at a time point, including the memory, number of CPU cores, storage space, temporary space, operator flushing space, logical I/O traffic, number of logical I/O times, and logical I/O rate. The memory, CPU, and I/O monitoring items record only the resource usage of complex jobs.

+

Data in the GS_WLM_USER_RESOURCE_HISTORY system table comes from the PG_TOTAL_USER_RESOURCE_INFO view.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_USER_RESOURCE_HISTORY column

Name

+

Type

+

Description

+

username

+

text

+

Username

+

timestamp

+

timestamp with time zone

+

Timestamp

+

used_memory

+

int

+

Specifies the used memory (unit: MB)

+

total_memory

+

int

+

Available memory (unit: MB). 0 indicates that the available memory is not limited and depends on the maximum memory available in the database.

+

used_cpu

+

real

+

Number of CPU cores in use

+

total_cpu

+

int

+

Total number of CPU cores of the Cgroup associated with a user on the node

+

used_space

+

bigint

+

Used storage space (unit: KB)

+

total_space

+

bigint

+

Available storage space (unit: KB). -1 indicates that the storage space is not limited.

+

used_temp_space

+

bigint

+

Used temporary storage space (unit: KB)

+

total_temp_space

+

bigint

+

Available temporary storage space (unit: KB). -1 indicates that the maximum temporary storage space is not limited.

+

used_spill_space

+

bigint

+

Used space of operator flushing (unit: KB)

+

total_spill_space

+

bigint

+

Available storage space for operator flushing (unit: KB). The value -1 indicates that the maximum operator flushing space is not limited.

+

read_kbytes

+

bigint

+

Byte traffic of read operations in a monitoring period (unit: KB)

+

write_kbytes

+

bigint

+

Byte traffic of write operations in a monitoring period (unit: KB)

+

read_counts

+

bigint

+

Number of read operations in a monitoring period.

+

write_counts

+

bigint

+

Number of write operations in a monitoring period.

+

read_speed

+

real

+

Byte rate of read operations in a monitoring period (unit: KB)

+

write_speed

+

real

+

Byte rate of write operations in a monitoring period (unit: KB)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0568.html b/docs/dws/dev/dws_04_0568.html new file mode 100644 index 00000000..fcd2430a --- /dev/null +++ b/docs/dws/dev/dws_04_0568.html @@ -0,0 +1,97 @@ + + +

PG_AGGREGATE

+

pg_aggregate records information about aggregation functions. Each entry in pg_aggregate is an extension of an entry in pg_proc. The pg_proc entry carries the aggregate's name, input and output data types, and other information that is similar to ordinary functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AGGREGATE columns

Name

+

Type

+

Reference

+

Description

+

aggfnoid

+

regproc

+

PG_PROC.oid

+

PG_PROC OID of the aggregate function

+

aggtransfn

+

regproc

+

PG_PROC.oid

+

Transition function

+

aggcollectfn

+

regproc

+

PG_PROC.oid

+

Aggregate function

+

aggfinalfn

+

regproc

+

PG_PROC.oid

+

Final function (zero if none)

+

aggsortop

+

oid

+

PG_OPERATOR.oid

+

Associated sort operator (zero if none)

+

aggtranstype

+

oid

+

PG_TYPE.oid

+

Data type of the aggregate function's internal transition (state) data

+

agginitval

+

text

+

-

+

Initial value of the transition state. This is a text column containing the initial value in its external string representation. If this column is null, the transition state value starts out null.

+

agginitcollect

+

text

+

-

+

Initial value of the collection state. This is a text column containing the initial value in its external string representation. If this column is null, the collection state value starts out null.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0569.html b/docs/dws/dev/dws_04_0569.html new file mode 100644 index 00000000..60802ed5 --- /dev/null +++ b/docs/dws/dev/dws_04_0569.html @@ -0,0 +1,313 @@ + + +

PG_AM

+

PG_AM records information about index access methods. There is one row for each index access method supported by the system.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AM columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

amname

+

name

+

-

+

Name of the access method

+

amstrategies

+

smallint

+

-

+

Number of operator strategies for this access method, or zero if access method does not have a fixed set of operator strategies

+

amsupport

+

smallint

+

-

+

Number of support routines for this access method

+

amcanorder

+

boolean

+

-

+

Whether the access method supports ordered scans sorted by the indexed column's value

+

amcanorderbyop

+

boolean

+

-

+

Whether the access method supports ordered scans sorted by the result of an operator on the indexed column

+

amcanbackward

+

boolean

+

-

+

Whether the access method supports backward scanning

+

amcanunique

+

boolean

+

-

+

Whether the access method supports unique indexes

+

amcanmulticol

+

boolean

+

-

+

Whether the access method supports multi-column indexes

+

amoptionalkey

+

boolean

+

-

+

Whether the access method supports a scan without any constraint for the first index column

+

amsearcharray

+

boolean

+

-

+

Whether the access method supports ScalarArrayOpExpr searches

+

amsearchnulls

+

boolean

+

-

+

Whether the access method supports IS NULL/NOT NULL searches

+

amstorage

+

boolean

+

-

+

Whether an index storage data type can differ from a column data type

+

amclusterable

+

boolean

+

-

+

Whether an index of this type can be clustered on

+

ampredlocks

+

boolean

+

-

+

Whether an index of this type manages fine-grained predicate locks

+

amkeytype

+

oid

+

PG_TYPE.oid

+

Type of data stored in index, or zero if not a fixed type

+

aminsert

+

regproc

+

PG_PROC.oid

+

"Insert this tuple" function

+

ambeginscan

+

regproc

+

PG_PROC.oid

+

"Prepare for index scan" function

+

amgettuple

+

regproc

+

PG_PROC.oid

+

"Next valid tuple" function, or zero if none

+

amgetbitmap

+

regproc

+

PG_PROC.oid

+

"Fetch all valid tuples" function, or zero if none

+

amrescan

+

regproc

+

PG_PROC.oid

+

"(Re)start index scan" function

+

amendscan

+

regproc

+

PG_PROC.oid

+

"Clean up after index scan" function

+

ammarkpos

+

regproc

+

PG_PROC.oid

+

"Mark current scan position" function

+

amrestrpos

+

regproc

+

PG_PROC.oid

+

"Restore marked scan position" function

+

ammerge

+

regproc

+

PG_PROC.oid

+

"Merge multiple indexes" function

+

ambuild

+

regproc

+

PG_PROC.oid

+

"Build new index" function

+

ambuildempty

+

regproc

+

PG_PROC.oid

+

"Build empty index" function

+

ambulkdelete

+

regproc

+

PG_PROC.oid

+

Bulk-delete function

+

amvacuumcleanup

+

regproc

+

PG_PROC.oid

+

Post-VACUUM cleanup function

+

amcanreturn

+

regproc

+

PG_PROC.oid

+

Function to check whether index supports index-only scans, or zero if none

+

amcostestimate

+

regproc

+

PG_PROC.oid

+

Function to estimate cost of an index scan

+

amoptions

+

regproc

+

PG_PROC.oid

+

Function to parse and validate reloptions for an index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0570.html b/docs/dws/dev/dws_04_0570.html new file mode 100644 index 00000000..76758ff0 --- /dev/null +++ b/docs/dws/dev/dws_04_0570.html @@ -0,0 +1,108 @@ + + +

PG_AMOP

+

PG_AMOP records information about operators associated with access method operator families. There is one row for each operator that is a member of an operator family. A family member can be either a search operator or an ordering operator. An operator can appear in more than one family, but cannot appear in more than one search position nor more than one ordering position within a family.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AMOP columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

amopfamily

+

oid

+

PG_OPFAMILY.oid

+

Operator family this entry is for

+

amoplefttype

+

oid

+

PG_TYPE.oid

+

Left-hand input data type of operator

+

amoprighttype

+

oid

+

PG_TYPE.oid

+

Right-hand input data type of operator

+

amopstrategy

+

smallint

+

-

+

Number of operator strategies

+

amoppurpose

+

"char"

+

-

+

Operator purpose, either s for search or o for ordering

+

amopopr

+

oid

+

PG_OPERATOR.oid

+

OID of the operator

+

amopmethod

+

oid

+

PG_AM.oid

+

Index access method the operator family is for

+

amopsortfamily

+

oid

+

PG_OPFAMILY.oid

+

The btree operator family this entry sorts according to, if an ordering operator; zero if a search operator

+
+
+

A "search" operator entry indicates that an index of this operator family can be searched to find all rows satisfying WHERE indexed_column operator constant. Obviously, such an operator must return a Boolean value, and its left-hand input type must match the index's column data type.

+

An "ordering" operator entry indicates that an index of this operator family can be scanned to return rows in the order represented by ORDER BY indexed_column operator constant. Such an operator could return any sortable data type, though again its left-hand input type must match the index's column data type. The exact semantics of the ORDER BY are specified by the amopsortfamily column, which must reference a btree operator family for the operator's result type.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0571.html b/docs/dws/dev/dws_04_0571.html new file mode 100644 index 00000000..e190f158 --- /dev/null +++ b/docs/dws/dev/dws_04_0571.html @@ -0,0 +1,80 @@ + + +

PG_AMPROC

+

PG_AMPROC records information about the support procedures associated with the access method operator families. There is one row for each support procedure belonging to an operator family.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AMPROC columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

amprocfamily

+

oid

+

PG_OPFAMILY.oid

+

Operator family this entry is for

+

amproclefttype

+

oid

+

PG_TYPE.oid

+

Left-hand input data type of associated operator

+

amprocrighttype

+

oid

+

PG_TYPE.oid

+

Right-hand input data type of associated operator

+

amprocnum

+

smallint

+

-

+

Support procedure number

+

amproc

+

regproc

+

PG_PROC.oid

+

OID of the procedure

+
+
+

The usual interpretation of the amproclefttype and amprocrighttype columns is that they identify the left and right input types of the operator(s) that a particular support procedure supports. For some access methods these match the input data type(s) of the support procedure itself, for others not. There is a notion of "default" support procedures for an index, which are those with amproclefttype and amprocrighttype both equal to the index opclass's opcintype.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0572.html b/docs/dws/dev/dws_04_0572.html new file mode 100644 index 00000000..852975a3 --- /dev/null +++ b/docs/dws/dev/dws_04_0572.html @@ -0,0 +1,51 @@ + + +

PG_ATTRDEF

+

PG_ATTRDEF stores default values of columns.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_ATTRDEF columns

Name

+

Type

+

Description

+

adrelid

+

oid

+

Table to which the column belongs

+

adnum

+

smallint

+

Number of the column

+

adbin

+

pg_node_tree

+

Internal representation of the default value of the column

+

adsrc

+

text

+

Internal representation of the readable default value

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0573.html b/docs/dws/dev/dws_04_0573.html new file mode 100644 index 00000000..72431b7a --- /dev/null +++ b/docs/dws/dev/dws_04_0573.html @@ -0,0 +1,187 @@ + + +

PG_ATTRIBUTE

+

PG_ATTRIBUTE records information about table columns.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_ATTRIBUTE columns

Name

+

Type

+

Description

+

attrelid

+

oid

+

Table to which the column belongs

+

attname

+

name

+

Column name

+

atttypid

+

oid

+

Column type

+

attstattarget

+

integer

+

Controls the level of details of statistics collected for this column by ANALYZE.

+
  • A zero value indicates that no statistics should be collected.
  • A negative value says to use the system default statistics target.
  • The exact meaning of positive values is data type-dependent.
+

For scalar data types, attstattarget is both the target number of "most common values" to collect, and the target number of histogram bins to create.

+

attlen

+

smallint

+

Copy of pg_type.typlen of the column's type

+

attnum

+

smallint

+

Number of a column.

+

attndims

+

integer

+

Number of dimensions if the column is an array; otherwise, the value is 0.

+

attcacheoff

+

integer

+

This column is always -1 on disk. When it is loaded into a row descriptor in the memory, it may be updated to cache the offset of the columns in the row.

+

atttypmod

+

integer

+

Type-specific data supplied at table creation time (for example, the maximum length of a varchar column). This column is used as the third parameter when passing to type-specific input functions and length coercion functions. The value will generally be -1 for types that do not need ATTTYPMOD.

+

attbyval

+

boolean

+

Copy of pg_type.typbyval of the column's type

+

attstorage

+

"char"

+

Copy of pg_type.typstorage of this column's type

+

attalign

+

"char"

+

Copy of pg_type.typalign of the column's type

+

attnotnull

+

boolean

+

A not-null constraint. It is possible to change this column to enable or disable the constraint.

+

atthasdef

+

boolean

+

Indicates that this column has a default value, in which case there will be a corresponding entry in the pg_attrdef table that actually defines the value.

+

attisdropped

+

boolean

+

Whether the column has been dropped and is no longer valid. A dropped column is still physically present in the table but is ignored by the analyzer, so it cannot be accessed through SQL.

+

attislocal

+

boolean

+

Whether the column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.

+

attcmprmode

+

tinyint

+

Compressed modes for a specific column The compressed mode includes:

+
  • ATT_CMPR_NOCOMPRESS
  • ATT_CMPR_DELTA
  • ATT_CMPR_DICTIONARY
  • ATT_CMPR_PREFIX
  • ATT_CMPR_NUMSTR
+

attinhcount

+

integer

+

Number of direct ancestors this column has. A column with an ancestor cannot be dropped nor renamed.

+

attcollation

+

oid

+

Defined collation of a column

+

attacl

+

aclitem[]

+

Permissions for column-level access

+

attoptions

+

text[]

+

Property-level options

+

attfdwoptions

+

text[]

+

Property-level external data options

+

attinitdefval

+

bytea

+

attinitdefval stores the default value expression. ADD COLUMN in a row-store table must use this column.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0574.html b/docs/dws/dev/dws_04_0574.html new file mode 100644 index 00000000..05a1719d --- /dev/null +++ b/docs/dws/dev/dws_04_0574.html @@ -0,0 +1,213 @@ + + +

PG_AUTHID

+

PG_AUTHID records information about the database authentication identifiers (roles). The concept of users is contained in that of roles. A user is actually a role whose rolcanlogin has been set. Any role, whether the rolcanlogin is set or not, can use other roles as members.

+

For a cluster, only one pg_authid exists which is not available for every database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AUTHID columns

Column

+

Type

+

Description

+

oid

+

oid

+

Row identifier (hidden attribute; must be explicitly selected)

+

rolname

+

name

+

Role name

+

rolsuper

+

boolean

+

Whether the role is the initial system administrator with the highest permission

+

rolinherit

+

boolean

+

Whether the role automatically inherits permissions of roles it is a member of

+

rolcreaterole

+

boolean

+

Whether the role can create more roles

+

rolcreatedb

+

boolean

+

Whether the role can create databases

+

rolcatupdate

+

boolean

+

Whether the role can directly update system catalogs. Only the initial system administrator whose usesysid is 10 has this permission. It is not available for other users.

+

rolcanlogin

+

boolean

+

Whether a role can log in, that is, whether a role can be given as the initial session authorization identifier.

+

rolreplication

+

boolean

+

Indicates that the role is a replicated one (an adaptation syntax and no actual meaning).

+

rolauditadmin

+

boolean

+

Indicates that the role is an audit user.

+

rolsystemadmin

+

boolean

+

Indicates that the role is an administrator.

+

rolconnlimit

+

integer

+

For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.

+

rolpassword

+

text

+

Password (possibly encrypted); NULL if no password.

+

rolvalidbegin

+

timestamp with time zone

+

Account validity start time; NULL if no start time

+

rolvaliduntil

+

timestamp with time zone

+

Password expiry time; NULL if no expiration

+

rolrespool

+

name

+

Resource pool that a user can use

+

roluseft

+

boolean

+

Whether the role can perform operations on foreign tables

+

rolparentid

+

oid

+

OID of a group user to which the user belongs

+

roltabspace

+

Text

+

Storage space of the user permanent table

+

rolkind

+

char

+

Special type of user, including private users, logical cluster administrators, and common users.

+

rolnodegroup

+

oid

+

OID of a node group associated with a user. The node group must be a logical cluster.

+

roltempspace

+

Text

+

Storage space of the user temporary table

+

rolspillspace

+

Text

+

Operator disk spill space of the user

+

rolexcpdata

+

text

+

Reserved column

+

rolauthinfo

+

text

+

Additional information when LDAP authentication is used. If other authentication modes are used, the value is NULL.

+

rolpwdexpire

+

integer

+

Password expiration time. Users can change their password before it expires. After the password expires, only the administrator can change the password. The value -1 indicates that the password never expires.

+

rolpwdtime

+

timestamp with time zone

+

Time when a password is created

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0575.html b/docs/dws/dev/dws_04_0575.html new file mode 100644 index 00000000..08e2555c --- /dev/null +++ b/docs/dws/dev/dws_04_0575.html @@ -0,0 +1,44 @@ + + +

PG_AUTH_HISTORY

+

PG_AUTH_HISTORY records the authentication history of the role. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_AUTH_HISTORY columns

Name

+

Type

+

Description

+

roloid

+

oid

+

ID of the role

+

passwordtime

+

timestamp with time zone

+

Time of password creation and change

+

rolpassword

+

text

+

Role password that is encrypted using MD5 or SHA256, or that is not encrypted

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0576.html b/docs/dws/dev/dws_04_0576.html new file mode 100644 index 00000000..f2311002 --- /dev/null +++ b/docs/dws/dev/dws_04_0576.html @@ -0,0 +1,51 @@ + + +

PG_AUTH_MEMBERS

+

PG_AUTH_MEMBERS records the membership relations between roles.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AUTH_MEMBERS columns

Name

+

Type

+

Description

+

roleid

+

oid

+

ID of a role that has a member

+

member

+

oid

+

ID of a role that is a member of ROLEID

+

grantor

+

oid

+

ID of a role that grants this membership

+

admin_option

+

boolean

+

Whether a member can grant membership in ROLEID to others

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0577.html b/docs/dws/dev/dws_04_0577.html new file mode 100644 index 00000000..5b3d008f --- /dev/null +++ b/docs/dws/dev/dws_04_0577.html @@ -0,0 +1,60 @@ + + +

PG_CAST

+

PG_CAST records conversion relationships between data types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_CAST columns

Name

+

Type

+

Description

+

castsource

+

oid

+

OID of the source data type

+

casttarget

+

oid

+

OID of the target data type

+

castfunc

+

oid

+

OID of the conversion function. If the value is 0, no conversion function is required.

+

castcontext

+

"char"

+

Conversion mode between the source and target data types

+
  • e indicates that only explicit conversion can be performed (using the CAST or :: syntax).
  • i indicates that only implicit conversion can be performed.
  • a indicates that both explicit and implicit conversion can be performed between data types.
+

castmethod

+

"char"

+

Conversion method

+
  • f indicates that conversion is performed using the specified function in the castfunc column.
  • b indicates that binary forcible conversion rather than the specified function in the castfunc column is performed between data types.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0578.html b/docs/dws/dev/dws_04_0578.html new file mode 100644 index 00000000..e69716a5 --- /dev/null +++ b/docs/dws/dev/dws_04_0578.html @@ -0,0 +1,401 @@ + + +

PG_CLASS

+

PG_CLASS records database objects and their relations.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_CLASS columns

Name

+

Type

+

Description

+

oid

+

oid

+

Row identifier (hidden attribute; must be explicitly selected)

+

relname

+

name

+

Name of an object, such as a table, index, or view

+

relnamespace

+

oid

+

OID of the namespace that contains the relationship

+

reltype

+

oid

+

Data type that corresponds to this table's row type (the index is 0 because the index does not have pg_type record)

+

reloftype

+

oid

+

OID is of composite type. 0 indicates other types.

+

relowner

+

oid

+

Owner of the relationship

+

relam

+

oid

+

Specifies the access method used, such as B-tree and hash, if this is an index

+

relfilenode

+

oid

+

Name of the on-disk file of this relationship. If such file does not exist, the value is 0.

+

reltablespace

+

oid

+

Tablespace in which this relationship is stored. If its value is 0, the default tablespace in this database is used. This column is meaningless if the relationship has no on-disk file.

+

relpages

+

double precision

+

Size of the on-disk representation of this table in pages (of size BLCKSZ). This is only an estimate used by the optimizer.

+

reltuples

+

double precision

+

Number of rows in the table. This is only an estimate used by the optimizer.

+

relallvisible

+

integer

+

Number of pages marked as all visible in the table. This column is used by the optimizer for optimizing SQL execution. It is updated by VACUUM, ANALYZE, and a few DDL statements such as CREATE INDEX.

+

reltoastrelid

+

oid

+

OID of the TOAST table associated with this table. The OID is 0 if no TOAST table exists.

+

The TOAST table stores large columns "offline" in a secondary table.

+

reltoastidxid

+

oid

+

OID of the index for a TOAST table. The OID is 0 for a table other than a TOAST table.

+

reldeltarelid

+

oid

+

OID of a Delta table

+

Delta tables belong to column-store tables. They store long tail data generated during data import.

+

reldeltaidx

+

oid

+

OID of the index for a Delta table

+

relcudescrelid

+

oid

+

OID of a CU description table

+

CU description tables (Desc tables) belong to column-store tables. They control whether storage data in the HDFS table directory is visible.

+

relcudescidx

+

oid

+

OID of the index for a CU description table

+

relhasindex

+

boolean

+

Its value is true if this column is a table and has (or recently had) at least one index.

+

It is set by CREATE INDEX but is not immediately cleared by DROP INDEX. If the VACUUM process detects that a table has no index, it clears the relhasindex column and sets the value to false.

+

relisshared

+

boolean

+

Its value is true if the table is shared across all databases in the cluster. Only certain system catalogs (such as pg_database) are shared.

+

relpersistence

+

"char"

+
  • p indicates a permanent table.
  • u indicates a non-log table.
  • t indicates a temporary table.
+

relkind

+

"char"

+
  • r indicates an ordinary table.
  • i indicates an index.
  • S indicates a sequence.
  • v indicates a view.
  • c indicates the composite type.
  • t indicates a TOAST table.
  • f indicates a foreign table.
+

relnatts

+

smallint

+

Number of user columns in the relationship (excluding system columns) pg_attribute has the same number of rows corresponding to the user columns.

+

relchecks

+

smallint

+

Number of constraints on a table. For details, see PG_CONSTRAINT.

+

relhasoids

+

boolean

+

Its value is true if an OID is generated for each row of the relationship.

+

relhaspkey

+

boolean

+

Its value is true if the table has (or once had) a primary key.

+

relhasrules

+

boolean

+

Its value is true if the table has rules. See table PG_REWRITE to check whether it has rules.

+

relhastriggers

+

boolean

+

Its value is true if the table has (or once had) triggers. See PG_TRIGGER.

+

relhassubclass

+

boolean

+

Its value is true if the table has (or once had) any inheritance child table.

+

relcmprs

+

tinyint

+

Whether the compression feature is enabled for the table. Note that only batch insertion triggers compression so ordinary CRUD does not trigger compression.

+
  • 0 indicates other tables that do not support compression (primarily system tables, on which the compression attribute cannot be modified).
  • 1 indicates that the compression feature of the table data is NOCOMPRESS or has no specified keyword.
  • 2 indicates that the compression feature of the table data is COMPRESS.
+

relhasclusterkey

+

boolean

+

Whether the local cluster storage is used

+

relrowmovement

+

boolean

+

Whether the row migration is allowed when the partitioned table is updated

+
  • true indicates that the row migration is allowed.
  • false indicates that the row migration is not allowed.
+

parttype

+

"char"

+

Whether the table or index has the property of a partitioned table

+
  • p indicates that the table or index has the property of a partitioned table.
  • n indicates that the table or index does not have the property of a partitioned table.
  • v indicates that the table is the value partitioned table in the HDFS.
+

relfrozenxid

+

xid32

+

All transaction IDs before this one have been replaced with a permanent ("frozen") transaction ID in this table. This column is used to track whether the table needs to be vacuumed in order to prevent transaction ID wraparound (or to allow pg_clog to be shrunk). The value is 0 (InvalidTransactionId) if the relationship is not a table.

+

To ensure forward compatibility, this column is reserved. The relfrozenxid64 column is added to record the information.

+

relacl

+

aclitem[]

+

Access permissions

+

The command output of the query is as follows:

+
1
rolename=xxxx/yyyy  --Assigning privileges to a role
+
+ +
+
1
=xxxx/yyyy --Assigning the permission to public
+
+ +
+

xxxx indicates the assigned privileges, and yyyy indicates the roles that are assigned to the privileges. For details about permission descriptions, see Table 2.

+

reloptions

+

text[]

+

Access-method-specific options, as "keyword=value" strings

+

relfrozenxid64

+

xid

+

All transaction IDs before this one have been replaced with a permanent ("frozen") transaction ID in this table. This column is used to track whether the table needs to be vacuumed in order to prevent transaction ID wraparound (or to allow pg_clog to be shrunk). The value is 0 (InvalidTransactionId) if the relationship is not a table.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Description of privileges

Parameter

+

Description

+

r

+

SELECT (read)

+

w

+

UPDATE (write)

+

a

+

INSERT (insert)

+

d

+

DELETE

+

D

+

TRUNCATE

+

x

+

REFERENCES

+

t

+

TRIGGER

+

X

+

EXECUTE

+

U

+

USAGE

+

C

+

CREATE

+

c

+

CONNECT

+

T

+

TEMPORARY

+

A

+

ANALYZE|ANALYSE

+

arwdDxtA

+

ALL PRIVILEGES (used for tables)

+

*

+

Authorization options for preceding permissions

+
+
+

Examples

View the OID and relfilenode of a table.

+
1
select oid,relname,relfilenode from pg_class where relname = 'table_name';
+
+ +
+

Count row-store tables.

+
1
select 'row count:'||count(1) as point from pg_class where relkind = 'r' and oid > 16384 and reloptions::text not like '%column%' and reloptions::text not like '%internal_mask%';
+
+ +
+

Count column-store tables.

+
1
select 'column count:'||count(1) as point from pg_class where relkind = 'r' and oid > 16384 and reloptions::text like '%column%';
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0579.html b/docs/dws/dev/dws_04_0579.html new file mode 100644 index 00000000..98a98e74 --- /dev/null +++ b/docs/dws/dev/dws_04_0579.html @@ -0,0 +1,88 @@ + + +

PG_COLLATION

+

PG_COLLATION records the available collations, which are essentially mappings from an SQL name to operating system locale categories.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COLLATION columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

collname

+

name

+

-

+

Collation name (unique per namespace and encoding)

+

collnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains this collation

+

collowner

+

oid

+

PG_AUTHID.oid

+

Owner of the collation

+

collencoding

+

integer

+

-

+

Encoding in which the collation is applicable, or -1 if it works for any encoding

+

collcollate

+

name

+

-

+

LC_COLLATE for this collation object

+

collctype

+

name

+

-

+

LC_CTYPE for this collation object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0580.html b/docs/dws/dev/dws_04_0580.html new file mode 100644 index 00000000..b837ab87 --- /dev/null +++ b/docs/dws/dev/dws_04_0580.html @@ -0,0 +1,210 @@ + + +

PG_CONSTRAINT

+

PG_CONSTRAINT records check, primary key, unique, and foreign key constraints on the tables.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_CONSTRAINT columns

Name

+

Type

+

Description

+

conname

+

name

+

Constraint name (not necessarily unique)

+

connamespace

+

oid

+

OID of the namespace that contains the constraint

+

contype

+

"char"

+
  • c indicates check constraints.
  • f indicates foreign key constraints.
  • p indicates primary key constraints.
  • u indicates unique constraints.
  • t indicates trigger constraints.
+

condeferrable

+

boolean

+

Whether the constraint can be deferrable

+

condeferred

+

boolean

+

Whether the constraint can be deferrable by default

+

convalidated

+

boolean

+

Whether the constraint is valid Currently, only foreign key and check constraints can be set to false.

+

conrelid

+

oid

+

Table containing this constraint. The value is 0 if it is not a table constraint.

+

contypid

+

oid

+

Domain containing this constraint. The value is 0 if it is not a domain constraint.

+

conindid

+

oid

+

ID of the index associated with the constraint

+

confrelid

+

oid

+

Referenced table if this constraint is a foreign key; otherwise, the value is 0.

+

confupdtype

+

"char"

+

Foreign key update action code

+
  • a indicates no action.
  • r indicates restriction.
  • c indicates cascading.
  • n indicates that the parameter is set to null.
  • d indicates that the default value is used.
+

confdeltype

+

"char"

+

Foreign key deletion action code

+
  • a indicates no action.
  • r indicates restriction.
  • c indicates cascading.
  • n indicates that the parameter is set to null.
  • d indicates that the default value is used.
+

confmatchtype

+

"char"

+

Foreign key match type

+
  • f indicates full match.
  • p indicates partial match.
  • u indicates simple match (not specified).
+

conislocal

+

boolean

+

Whether the local constraint is defined for the relationship

+

coninhcount

+

integer

+

Number of direct inheritance parent tables this constraint has. When the number is not 0, the constraint cannot be deleted or renamed.

+

connoinherit

+

boolean

+

Whether the constraint can be inherited

+

consoft

+

boolean

+

Whether the column indicates an informational constraint.

+

conopt

+

boolean

+

Whether you can use Informational Constraint to optimize the execution plan.

+

conkey

+

smallint[]

+

Column list of the constrained control if this column is a table constraint

+

confkey

+

smallint[]

+

List of referenced columns if this column is a foreign key

+

conpfeqop

+

oid[]

+

ID list of the equality operators for PK = FK comparisons if this column is a foreign key

+

conppeqop

+

oid[]

+

ID list of the equality operators for PK = PK comparisons if this column is a foreign key

+

conffeqop

+

oid[]

+

ID list of the equality operators for FK = FK comparisons if this column is a foreign key

+

conexclop

+

oid[]

+

ID list of the per-column exclusion operators if this column is an exclusion constraint

+

conbin

+

pg_node_tree

+

Internal representation of the expression if this column is a check constraint

+

consrc

+

text

+

Human-readable representation of the expression if this column is a check constraint

+
+
+
  • consrc is not updated when referenced objects change; for example, it will not track renaming of columns. Rather than relying on this field, it's best to use pg_get_constraintdef() to extract the definition of a check constraint.
  • pg_class.relchecks must be consistent with the number of check-constraint entries in this table for each relationship.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0581.html b/docs/dws/dev/dws_04_0581.html new file mode 100644 index 00000000..63ea6530 --- /dev/null +++ b/docs/dws/dev/dws_04_0581.html @@ -0,0 +1,97 @@ + + +

PG_CONVERSION

+

PG_CONVERSION records encoding conversion information.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_CONVERSION columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

conname

+

name

+

-

+

Conversion name (unique in a namespace)

+

connamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains this conversion

+

conowner

+

oid

+

PG_AUTHID.oid

+

Owner of the conversion

+

conforencoding

+

integer

+

-

+

Source encoding ID

+

contoencoding

+

integer

+

-

+

Destination encoding ID

+

conproc

+

regproc

+

PG_PROC.oid

+

Conversion procedure

+

condefault

+

boolean

+

-

+

Its value is true if this is the default conversion.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0582.html b/docs/dws/dev/dws_04_0582.html new file mode 100644 index 00000000..5ee6b3c5 --- /dev/null +++ b/docs/dws/dev/dws_04_0582.html @@ -0,0 +1,123 @@ + + +

PG_DATABASE

+

PG_DATABASE records information about the available databases.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_DATABASE columns

Name

+

Type

+

Description

+

datname

+

name

+

Database name

+

datdba

+

oid

+

Owner of the database, usually the user who created it

+

encoding

+

integer

+

Database encoding.

+

You can use pg_encoding_to_char() to convert this number to the encoding name.

+

datcollate

+

name

+

Sequence used by the database

+

datctype

+

name

+

Character type used by the database

+

datistemplate

+

boolean

+

Whether this column can serve as a template database

+

datallowconn

+

boolean

+

If false then no one can connect to this database. This column is used to protect the template0 database from being altered.

+

datconnlimit

+

integer

+

Maximum number of concurrent connections allowed on this database. -1 indicates no limit.

+

datlastsysoid

+

oid

+

Last system OID in the database

+

datfrozenxid

+

xid32

+

Tracks whether the database needs to be vacuumed in order to prevent transaction ID wraparound.

+

To ensure forward compatibility, this column is reserved. The datfrozenxid64 column is added to record the information.

+

dattablespace

+

oid

+

Default tablespace of the database

+

datcompatibility

+

name

+

Database compatibility mode

+

datacl

+

aclitem[]

+

Access permissions

+

datfrozenxid64

+

xid

+

Tracks whether the database needs to be vacuumed in order to prevent transaction ID wraparound.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0583.html b/docs/dws/dev/dws_04_0583.html new file mode 100644 index 00000000..d4672892 --- /dev/null +++ b/docs/dws/dev/dws_04_0583.html @@ -0,0 +1,44 @@ + + +

PG_DB_ROLE_SETTING

+

PG_DB_ROLE_SETTING records the default values of configuration items bonded to each role and database when the database is running.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_DB_ROLE_SETTING columns

Name

+

Type

+

Description

+

setdatabase

+

oid

+

Database corresponding to the configuration items; the value is 0 if the database is not specified

+

setrole

+

oid

+

Role corresponding to the configuration items; the value is 0 if the role is not specified

+

setconfig

+

text[]

+

Default value of configuration items when the database is running

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0584.html b/docs/dws/dev/dws_04_0584.html new file mode 100644 index 00000000..e093f0b0 --- /dev/null +++ b/docs/dws/dev/dws_04_0584.html @@ -0,0 +1,80 @@ + + +

PG_DEFAULT_ACL

+

PG_DEFAULT_ACL records the initial privileges assigned to the newly created objects.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_DEFAULT_ACL columns

Name

+

Type

+

Description

+

defaclrole

+

oid

+

ID of the role associated with the permission

+

defaclnamespace

+

oid

+

Namespace associated with the permission; the value is 0 if no ID

+

defaclobjtype

+

"char"

+

Object type of the permission:

+
  • r indicates a table or view.
  • S indicates a sequence.
  • f indicates a function.
  • T indicates a type.
+

defaclacl

+

aclitem[]

+

Access permissions that this type of object should have on creation

+
+
+

Example

Run the following command to view the initial permissions of the new user role1:

+
1
+2
+3
+4
select * from PG_DEFAULT_ACL;
+ defaclrole | defaclnamespace | defaclobjtype |    defaclacl
+------------+-----------------+---------------+-----------------
+      16820 |           16822 | r             | {role1=r/user1}
+
+ +
+

You can also run the following statement to convert the format:

+
1
SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS "Granter",  n.nspname AS "Schema",  CASE d.defaclobjtype WHEN 'r' THEN 'table' WHEN 'S' THEN 'sequence' WHEN 'f' THEN 'function' WHEN 'T' THEN 'type' END AS "Type",  pg_catalog.array_to_string(d.defaclacl, E', ') AS "Access privileges" FROM pg_catalog.pg_default_acl d LEFT JOIN pg_catalog.pg_namespace n ON n.oid = d.defaclnamespace ORDER BY 1, 2, 3;
+
+ +
+

If the following information is displayed, user1 grants role1 the read permission on schema user1.

+
1
+2
+3
+4
 Granter | Schema | Type  | Access privileges
+---------+--------+-------+-------------------
+ user1   | user1  | table | role1=r/user1
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0585.html b/docs/dws/dev/dws_04_0585.html new file mode 100644 index 00000000..1211646c --- /dev/null +++ b/docs/dws/dev/dws_04_0585.html @@ -0,0 +1,132 @@ + + +

PG_DEPEND

+

PG_DEPEND records the dependency relationships between database objects. This information allows DROP commands to find which other objects must be dropped by DROP CASCADE or prevent dropping in the DROP RESTRICT case.

+

See also PG_SHDEPEND, which provides a similar function for dependencies involving objects that are shared across a database cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_DEPEND columns

Name

+

Type

+

Reference

+

Description

+

classid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog the dependent object is in

+

objid

+

oid

+

Any OID column

+

OID of the specific dependent object

+

objsubid

+

integer

+

-

+

For a table column, this is the column number (the objid and classid refer to the table itself). For all other object types, this column is 0.

+

refclassid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog the referenced object is in

+

refobjid

+

oid

+

Any OID column

+

OID of the specific referenced object

+

refobjsubid

+

integer

+

-

+

For a table column, this is the column number (the refobjid and refclassid refer to the table itself). For all other object types, this column is 0.

+

deptype

+

"char"

+

-

+

A code defining the specific semantics of this dependency relationship

+
+
+

In all cases, a pg_depend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors defined by deptype:

+ +

Examples

Query the table that depends on the database object sequence serial1.

+
  1. Query the OID of the sequence serial1 in the system catalog PG_CLASS.
    1
    +2
    +3
    +4
    +5
    SELECT oid FROM pg_class WHERE relname ='serial1';
    +  oid
    +-------
    + 17815
    +(1 row)
    +
    + +
    +
  2. Use the system catalog PG_DEPEND and the OID of serial1 to obtain the objects that depend on serial1.
    1
    +2
    +3
    +4
    +5
    +6
    SELECT * FROM pg_depend WHERE objid ='17815';
    + classid | objid | objsubid | refclassid | refobjid | refobjsubid | deptype
    +---------+-------+----------+------------+----------+-------------+---------
    +    1259 | 17815 |        0 |       2615 |     2200 |           0 | n
    +    1259 | 17815 |        0 |       1259 |    17812 |           1 | a
    +(2 rows)
    +
    + +
    +
  3. Obtain the OID of the table that depends on the serial1 sequence based on the refobjid field and query the table name. The result indicates that the table customer_address depends on serial1.
    1
    +2
    +3
    +4
    +5
    SELECT relname FROM pg_class where oid='17812';
    +     relname
    +------------------
    + customer_address
    +(1 row)
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0586.html b/docs/dws/dev/dws_04_0586.html new file mode 100644 index 00000000..84ad2a86 --- /dev/null +++ b/docs/dws/dev/dws_04_0586.html @@ -0,0 +1,62 @@ + + +

PG_DESCRIPTION

+

PG_DESCRIPTION records optional descriptions (comments) for each database object. Descriptions of many built-in system objects are provided in the initial contents of PG_DESCRIPTION.

+

See also PG_SHDESCRIPTION, which performs a similar function for descriptions involving objects that are shared across a database cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_DESCRIPTION columns

Name

+

Type

+

Reference

+

Description

+

objoid

+

oid

+

Any OID column

+

OID of the object this description pertains to

+

classoid

+

oid

+

PG_CLASSoid

+

OID of the system catalog this object appears in

+

objsubid

+

integer

+

-

+

For a comment on a table column, this is the column number (the objoid and classoid refer to the table itself). For all other object types, this column is 0.

+

description

+

text

+

-

+

Arbitrary text that serves as the description of this object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0588.html b/docs/dws/dev/dws_04_0588.html new file mode 100644 index 00000000..c05487e9 --- /dev/null +++ b/docs/dws/dev/dws_04_0588.html @@ -0,0 +1,63 @@ + + +

PG_ENUM

+

PG_ENUM records entries showing the values and labels for each enum type. The internal representation of a given enum value is actually the OID of its associated row in pg_enum.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_ENUM columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

enumtypid

+

oid

+

PG_TYPE.oid

+

OID of the pg_type entry that contains this enum value

+

enumsortorder

+

real

+

-

+

Sort position of this enum value within its enum type

+

enumlabel

+

name

+

-

+

Textual label for this enum value

+
+
+

The OIDs for PG_ENUM rows follow a special rule: even-numbered OIDs are guaranteed to be ordered in the same way as the sort ordering of their enum type. That is, if two even OIDs belong to the same enum type, the smaller OID must have the smaller enumsortorder value. Odd-numbered OID values need bear no relationship to the sort order. This rule allows the enum comparison routines to avoid catalog lookups in many common cases. The routines that create and alter enum types attempt to assign even OIDs to enum values whenever possible.

+

When an enum type is created, its members are assigned sort-order positions from 1 to n. But members added later might be given negative or fractional values of enumsortorder. The only requirement on these values is that they be correctly ordered and unique within each enum type.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0589.html b/docs/dws/dev/dws_04_0589.html new file mode 100644 index 00000000..c9cd1b0b --- /dev/null +++ b/docs/dws/dev/dws_04_0589.html @@ -0,0 +1,72 @@ + + +

PG_EXTENSION

+

PG_EXTENSION records information about the installed extensions. By default, GaussDB(DWS) has 12 extensions, that is, PLPGSQL, DIST_FDW, FILE_FDW, HDFS_FDW, HSTORE, PLDBGAPI, DIMSEARCH, PACKAGES, GC_FDW, UUID-OSSP, LOG_FDW, and ROACH_API.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_EXTENSION

Name

+

Type

+

Description

+

extname

+

name

+

Extension name

+

extowner

+

oid

+

Owner of the extension

+

extnamespace

+

oid

+

Namespace containing the extension's exported objects

+

extrelocatable

+

boolean

+

Its value is true if the extension can be relocated to another schema.

+

extversion

+

text

+

Version number of the extension

+

extconfig

+

oid[]

+

Configuration information about the extension

+

extcondition

+

text[]

+

Filter conditions for the extension's configuration information

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0590.html b/docs/dws/dev/dws_04_0590.html new file mode 100644 index 00000000..85bf8582 --- /dev/null +++ b/docs/dws/dev/dws_04_0590.html @@ -0,0 +1,88 @@ + + +

PG_EXTENSION_DATA_SOURCE

+

PG_EXTENSION_DATA_SOURCE records information about external data source. An external data source contains information about an external database, such as its password encoding. It is mainy used with Extension Connector.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_EXTENSION_DATA_SOURCE columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

srcname

+

name

+

-

+

Name of an external data source

+

srcowner

+

oid

+

PG_AUTHID.oid

+

Owner of an external data source

+

srctype

+

text

+

-

+

Type of an external data source. It is NULL by default.

+

srcversion

+

text

+

-

+

Type of an external data source. It is NULL by default.

+

srcacl

+

aclitem[]

+

-

+

Access permissions

+

srcoptions

+

text[]

+

-

+

Option used for foreign data sources. It is a keyword=value string.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0591.html b/docs/dws/dev/dws_04_0591.html new file mode 100644 index 00000000..debbb25f --- /dev/null +++ b/docs/dws/dev/dws_04_0591.html @@ -0,0 +1,88 @@ + + +

PG_FOREIGN_DATA_WRAPPER

+

PG_FOREIGN_DATA_WRAPPER records foreign-data wrapper definitions. A foreign-data wrapper is the mechanism by which external data, residing on foreign servers, is accessed.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_FOREIGN_DATA_WRAPPER columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

fdwname

+

name

+

-

+

Name of the foreign-data wrapper

+

fdwowner

+

oid

+

PG_AUTHID.oid

+

Owner of the foreign-data wrapper

+

fdwhandler

+

oid

+

PG_PROC.oid

+

References a handler function that is responsible for supplying execution routines for the foreign-data wrapper. Its value is 0 if no handler is provided.

+

fdwvalidator

+

oid

+

PG_PROC.oid

+

References a validator function that is responsible for checking the validity of the options given to the foreign-data wrapper, as well as options for foreign servers and user mappings using the foreign-data wrapper. Its value is 0 if no validator is provided.

+

fdwacl

+

aclitem[]

+

-

+

Access permissions

+

fdwoptions

+

text[]

+

-

+

Option used for foreign data wrappers. It is a keyword=value string.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0592.html b/docs/dws/dev/dws_04_0592.html new file mode 100644 index 00000000..0af86ffe --- /dev/null +++ b/docs/dws/dev/dws_04_0592.html @@ -0,0 +1,97 @@ + + +

PG_FOREIGN_SERVER

+

PG_FOREIGN_SERVER records the foreign server definitions. A foreign server describes a source of external data, such as a remote server. Foreign servers are accessed via foreign-data wrappers.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_FOREIGN_SERVER columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

srvname

+

name

+

-

+

Name of the foreign server

+

srvowner

+

oid

+

PG_AUTHID.oid

+

Owner of the foreign server

+

srvfdw

+

oid

+

PG_FOREIGN_DATA_WRAPPER.oid

+

OID of the foreign-data wrapper of this foreign server

+

srvtype

+

text

+

-

+

Type of the server (optional)

+

srvversion

+

text

+

-

+

Version of the server (optional)

+

srvacl

+

aclitem[]

+

-

+

Access permissions

+

srvoptions

+

text[]

+

-

+

Option used for foreign servers. It is a keyword=value string.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0593.html b/docs/dws/dev/dws_04_0593.html new file mode 100644 index 00000000..88bab5d3 --- /dev/null +++ b/docs/dws/dev/dws_04_0593.html @@ -0,0 +1,51 @@ + + +

PG_FOREIGN_TABLE

+

PG_FOREIGN_TABLE records auxiliary information about foreign tables.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_FOREIGN_TABLE columns

Name

+

Type

+

Description

+

ftrelid

+

oid

+

OID of the foreign table

+

ftserver

+

oid

+

OID of the server where the foreign table is located

+

ftwriteonly

+

boolean

+

Whether data can be written in the foreign table

+

ftoptions

+

text[]

+

Foreign table options

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0594.html b/docs/dws/dev/dws_04_0594.html new file mode 100644 index 00000000..5b7774fd --- /dev/null +++ b/docs/dws/dev/dws_04_0594.html @@ -0,0 +1,150 @@ + + +

PG_INDEX

+

PG_INDEX records part of the information about indexes. The rest is mostly in PG_CLASS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_INDEX columns

Name

+

Type

+

Description

+

indexrelid

+

oid

+

OID of the pg_class entry for this index

+

indrelid

+

oid

+

OID of the pg_class entry for the table this index is for

+

indnatts

+

smallint

+

Number of columns in an index

+

indisunique

+

boolean

+

This index is a unique index if the value is true.

+

indisprimary

+

boolean

+

This index represents the primary key of the table if the value is true. If this value is true, the value of indisunique is true.

+

indisexclusion

+

boolean

+

This index supports exclusion constraints if the value is true.

+

indimmediate

+

boolean

+

A uniqueness check is performed upon data insertion if the value is true.

+

indisclustered

+

boolean

+

The table was last clustered on this index if the value is true.

+

indisusable

+

boolean

+

This index supports insert/select if the value is true.

+

indisvalid

+

boolean

+

This index is valid for queries if the value is true. If this column is false, this index is possibly incomplete and must still be modified by INSERT/UPDATE operations, but it cannot safely be used for queries. If it is a unique index, the uniqueness property is also not true.

+

indcheckxmin

+

boolean

+

If the value is true, queries must not use the index until the xmin of this row in pg_index is below their TransactionXmin event horizon, because the table may contain broken HOT chains with incompatible rows that they can see.

+

indisready

+

boolean

+

If the value is true, this index is ready for inserts. If the value is false, this index is ignored when data is inserted or modified.

+

indkey

+

int2vector

+

This is an array of indnatts values that indicate which table columns this index creates. For example, a value of 1 3 means that the first and the third columns make up the index key. 0 in this array indicates that the corresponding index attribute is an expression over the table columns, rather than a simple column reference.

+

indcollation

+

oidvector

+

+

ID of each column used by the index

+

indclass

+

oidvector

+

For each column in the index key, this column contains the OID of the operator class to use. For details, see PG_OPCLASS.

+

indoption

+

int2vector

+

Array of values that store per-column flag bits. The meaning of the bits is defined by the index's access method.

+

indexprs

+

pg_node_tree

+

Expression trees (in nodeToString() representation) for index attributes that are not simple column references. It is a list with one element for each zero entry in INDKEY. NULL if all index attributes are simple references.

+

indpred

+

pg_node_tree

+

Expression tree (in nodeToString() representation) for partial index predicate. If the index is not a partial index, the value is null.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0595.html b/docs/dws/dev/dws_04_0595.html new file mode 100644 index 00000000..414bd949 --- /dev/null +++ b/docs/dws/dev/dws_04_0595.html @@ -0,0 +1,52 @@ + + +

PG_INHERITS

+

PG_INHERITS records information about table inheritance hierarchies. There is one entry for each direct child table in the database. Indirect inheritance can be determined by following chains of entries.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_INHERITS columns

Name

+

Type

+

Reference

+

Description

+

inhrelid

+

oid

+

PG_CLASS.oid

+

OID of the child table

+

inhparent

+

oid

+

PG_CLASS.oid

+

OID of the parent table

+

inhseqno

+

integer

+

-

+

If there is more than one direct parent for a child table (multiple inheritances), this number tells the order in which the inherited columns are to be arranged. The count starts at 1.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0596.html b/docs/dws/dev/dws_04_0596.html new file mode 100644 index 00000000..56aa95fc --- /dev/null +++ b/docs/dws/dev/dws_04_0596.html @@ -0,0 +1,135 @@ + + +

PG_JOBS

+

PG_JOBS records detailed information about jobs created by users. Dedicated threads poll the pg_jobs table and trigger jobs based on scheduled job execution time. This table belongs to the Shared Relation category. All job records are visible to all databases.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_JOBS columns

Name

+

Type

+

Description

+

job_id

+

integer

+

Job ID, primary key, unique (with a unique index)

+

what

+

text

+

Job content

+

log_user

+

oid

+

Username of the job creator

+

priv_user

+

oid

+

User ID of the job executor

+

job_db

+

oid

+

OID of the database where the job is executed

+

job_nsp

+

oid

+

OID of the namespace where a job is running

+

job_node

+

oid

+

CN node on which the job will be created and executed

+

is_broken

+

boolean

+

Job invalid or not. If a job fails to be executed for 16 consecutive times, is_broken is automatically set to true and the job will not be executed later.

+

start_date

+

timestamp without time zone

+

Start time of the first job execution, accurate to millisecond

+

next_run_date

+

timestamp without time zone

+

Scheduled time of the next job execution, accurate to millisecond

+

failure_count

+

smallint

+

Number of times the job has started and failed. If a job fails to be executed for 16 consecutive times, no more attempt will be made on it.

+

interval

+

text

+

Job execution interval

+

last_start_date

+

timestamp without time zone

+

Start time of the last job execution, accurate to millisecond

+

last_end_date

+

timestamp without time zone

+

End time of the last job execution, accurate to millisecond

+

last_suc_date

+

timestamp without time zone

+

Start time of the last successful job execution, accurate to millisecond

+

this_run_date

+

timestamp without time zone

+

Start time of the ongoing job execution, accurate to millisecond

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0597.html b/docs/dws/dev/dws_04_0597.html new file mode 100644 index 00000000..bccce061 --- /dev/null +++ b/docs/dws/dev/dws_04_0597.html @@ -0,0 +1,106 @@ + + +

PG_LANGUAGE

+

PG_LANGUAGE records programming languages. You can use them and interfaces to write functions or stored procedures.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_LANGUAGE columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

lanname

+

name

+

-

+

Name of the language

+

lanowner

+

oid

+

PG_AUTHID.oid

+

Owner of the language

+

lanispl

+

boolean

+

-

+

The value is false for internal languages (such as SQL) and true for user-defined languages. Currently, gs_dump still uses this to determine which languages need to be dumped, but this might be replaced by a different mechanism in the future.

+

lanpltrusted

+

boolean

+

-

+

Its value is true if this is a trusted language, which means that it is believed not to grant access to anything outside the normal SQL execution environment. Only the initial user can create functions in untrusted languages.

+

lanplcallfoid

+

oid

+

PG_PROC.oid

+

For external languages, this references the language handler, which is a special function that is responsible for executing all functions that are written in the particular language.

+

laninline

+

oid

+

PG_PROC.oid

+

This references a function that is responsible for executing "inline" anonymous code blocks (DO blocks). The value is 0 if inline blocks are not supported.

+

lanvalidator

+

oid

+

PG_PROC.oid

+

This references a language validator function that is responsible for checking the syntax and validity of new functions when they are created. The value is 0 if no validator is provided.

+

lanacl

+

aclitem[]

+

-

+

Access permissions

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0598.html b/docs/dws/dev/dws_04_0598.html new file mode 100644 index 00000000..22503c9a --- /dev/null +++ b/docs/dws/dev/dws_04_0598.html @@ -0,0 +1,54 @@ + + +

PG_LARGEOBJECT

+

PG_LARGEOBJECT records the data making up large objects A large object is identified by an OID assigned when it is created. Each large object is broken into segments or "pages" small enough to be conveniently stored as rows in pg_largeobject. The amount of data per page is defined to be LOBLKSIZE (which is currently BLCKSZ/4, or typically 2 kB).

+

It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_LARGEOBJECT columns

Name

+

Type

+

Reference

+

Description

+

loid

+

oid

+

PG_LARGEOBJECT_METADATA.oid

+

Identifier of the large object that includes this page

+

pageno

+

integer

+

-

+

Page number of this page within its large object (counting from zero)

+

data

+

bytea

+

-

+

Actual data stored in the large object. This will never be more than LOBLKSIZE bytes and might be less.

+
+
+

Each row of pg_largeobject holds data for one page of a large object, beginning at byte offset (pageno * LOBLKSIZE) within the object. The implementation allows sparse storage: pages might be missing, and might be shorter than LOBLKSIZE bytes even if they are not the last page of the object. Missing regions within a large object are read as zeroes.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0599.html b/docs/dws/dev/dws_04_0599.html new file mode 100644 index 00000000..11394d59 --- /dev/null +++ b/docs/dws/dev/dws_04_0599.html @@ -0,0 +1,52 @@ + + +

PG_LARGEOBJECT_METADATA

+

PG_LARGEOBJECT_METADATA records metadata associated with large objects. The actual large object data is stored in PG_LARGEOBJECT.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_LARGEOBJECT_METADATA columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

lomowner

+

oid

+

PG_AUTHID.oid

+

Owner of the large object

+

lomacl

+

aclitem[]

+

-

+

Access permissions

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0600.html b/docs/dws/dev/dws_04_0600.html new file mode 100644 index 00000000..e83aa5c0 --- /dev/null +++ b/docs/dws/dev/dws_04_0600.html @@ -0,0 +1,65 @@ + + +

PG_NAMESPACE

+

PG_NAMESPACE records the namespaces, that is, schema-related information.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_NAMESPACE columns

Name

+

Type

+

Description

+

nspname

+

name

+

Name of the namespace

+

nspowner

+

oid

+

Owner of the namespace

+

nsptimeline

+

bigint

+

Timeline when the namespace is created on the DN This column is for internal use and valid only on the DN.

+

nspacl

+

aclitem[]

+

Access permissions For details, see GRANT and REVOKE.

+

permspace

+

bigint

+

Quota of a schema's permanent tablespace

+

usedspace

+

bigint

+

Used size of a schema's permanent tablespace

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0601.html b/docs/dws/dev/dws_04_0601.html new file mode 100644 index 00000000..b79665f9 --- /dev/null +++ b/docs/dws/dev/dws_04_0601.html @@ -0,0 +1,69 @@ + + +

PG_OBJECT

+

PG_OBJECT records the user creation, creation time, last modification time, and last analyzing time of objects of specified types (types existing in object_type).

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OBJECT columns

Name

+

Type

+

Description

+

object_oid

+

oid

+

Object identifier.

+

object_type

+

"char"

+

Object type:

+
  • r indicates a table, which can be an ordinary table or a temporary table.
  • i indicates an index.
  • s indicates a sequence.
  • v indicates a view.
  • p indicates a stored procedure and function.
+

creator

+

oid

+

ID of the creator.

+

ctime

+

timestamp with time zone

+

Object creation time.

+

mtime

+

timestamp with time zone

+

Time when the object was last modified. By default, the ALTER, COMMENT, GRANT, REVOKE, and TRUNCATE operations are recorded.

+

If light_object_mtime is configured for behavior_compat_options, the GRANT, REVOKE, and TRUNCATE operations are not recorded.

+

last_analyze_time

+

timestamp with time zone

+

Time when an object is analyzed for the last time.

+
+
+
  • Only normal user operations are recorded. Operations before the object upgrade and during the initdb process cannot be recorded.
  • ctime and mtime are the start time of the transaction.
  • The time of object modification due to capacity expansion is also recorded.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0602.html b/docs/dws/dev/dws_04_0602.html new file mode 100644 index 00000000..b4ced81c --- /dev/null +++ b/docs/dws/dev/dws_04_0602.html @@ -0,0 +1,106 @@ + + +

PG_OBSSCANINFO

+

PG_OBSSCANINFO defines the OBS runtime information scanned in cluster acceleration scenarios. Each record corresponds to a piece of runtime information of a foreign table on OBS in a query.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OBSSCANINFO columns

Name

+

Type

+

Reference

+

Description

+

query_id

+

bigint

+

-

+

Query ID

+

user_id

+

text

+

-

+

Database user who performs queries

+

table_name

+

text

+

-

+

Name of a foreign table on OBS

+

file_type

+

text

+

-

+

Format of files storing the underlying data

+

time_stamp

+

time_stam

+

-

+

Scanning start time

+

actual_time

+

double

+

-

+

Scanning execution time, in seconds

+

file_scanned

+

bigint

+

-

+

Number of files scanned

+

data_size

+

double

+

-

+

Size of data scanned, in bytes

+

billing_info

+

text

+

-

+

Reserved columns

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0603.html b/docs/dws/dev/dws_04_0603.html new file mode 100644 index 00000000..cd4cc287 --- /dev/null +++ b/docs/dws/dev/dws_04_0603.html @@ -0,0 +1,108 @@ + + +

PG_OPCLASS

+

PG_OPCLASS defines index access method operator classes.

+

Each operator class defines semantics for index columns of a particular data type and a particular index access method. An operator class essentially specifies that a particular operator family is applicable to a particular indexable column data type. The set of operators from the family that are actually usable with the indexed column are whichever ones accept the column's data type as their lefthand input.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OPCLASS columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

opcmethod

+

oid

+

PG_AM.oid

+

Index access method the operator class is for

+

opcname

+

name

+

-

+

Name of the operator class

+

opcnamespace

+

oid

+

PG_NAMESPACE.oid

+

Namespace to which the operator class belongs

+

opcowner

+

oid

+

PG_AUTHID.oid

+

Owner of the operator class

+

opcfamily

+

oid

+

PG_OPFAMILY.oid

+

Operator family containing the operator class

+

opcintype

+

oid

+

PG_TYPE.oid

+

Data type that the operator class indexes

+

opcdefault

+

boolean

+

-

+

Whether the operator class is the default for opcintype. If it is, its value is true.

+

opckeytype

+

oid

+

PG_TYPE.oid

+

Type of data stored in index, or zero if same as opcintype

+
+
+

An operator class's opcmethod must match the opfmethod of its containing operator family. Also, there must be no more than one pg_opclass row having opcdefault true for any given combination of opcmethod and opcintype.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0604.html b/docs/dws/dev/dws_04_0604.html new file mode 100644 index 00000000..86e03b3b --- /dev/null +++ b/docs/dws/dev/dws_04_0604.html @@ -0,0 +1,160 @@ + + +

PG_OPERATOR

+

PG_OPERATOR records information about operators.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OPERATOR columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

oprname

+

name

+

-

+

Name of the operator

+

oprnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains this operator

+

oprowner

+

oid

+

PG_AUTHID.oid

+

Owner of the operator

+

oprkind

+

"char"

+

-

+
  • b: infix ("both")
  • l: prefix ("left")
  • r: postfix ("right")
+

oprcanmerge

+

boolean

+

-

+

Whether the operator supports merge joins

+

oprcanhash

+

boolean

+

-

+

Whether the operator supports hash joins

+

oprleft

+

oid

+

PG_TYPE.oid

+

Type of the left operand

+

oprright

+

oid

+

PG_TYPE.oid

+

Type of the right operand

+

oprresult

+

oid

+

PG_TYPE.oid

+

Type of the result

+

oprcom

+

oid

+

PG_OPERATOR.oid

+

Commutator of this operator, if any

+

oprnegate

+

oid

+

PG_OPERATOR.oid

+

Negator of this operator, if any

+

oprcode

+

regproc

+

PG_PROC.oid

+

Function that implements this operator

+

oprrest

+

regproc

+

PG_PROC.oid

+

Restriction selectivity estimation function for this operator

+

oprjoin

+

regproc

+

PG_PROC.oid

+

Join selectivity estimation function for this operator

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0605.html b/docs/dws/dev/dws_04_0605.html new file mode 100644 index 00000000..be1f81b2 --- /dev/null +++ b/docs/dws/dev/dws_04_0605.html @@ -0,0 +1,72 @@ + + +

PG_OPFAMILY

+

PG_OPFAMILY defines operator families.

+

Each operator family is a collection of operators and associated support routines that implement the semantics specified for a particular index access method. Furthermore, the operators in a family are all "compatible", in a way that is specified by the access method. The operator family concept allows cross-data-type operators to be used with indexes and to be reasoned about using knowledge of access method semantics.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OPFAMILY columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

opfmethod

+

oid

+

PG_AM.oid

+

Index access method the operator family is for

+

opfname

+

name

+

-

+

Name of the operator family

+

opfnamespace

+

oid

+

PG_NAMESPACE.oid

+

Namespace of the operator family

+

opfowner

+

oid

+

PG_AUTHID.oid

+

Owner of the operator family

+
+
+

The majority of the information defining an operator family is not in PG_OPFAMILY, but in the associated PG_AMOP, PG_AMPROC, and PG_OPCLASS.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0606.html b/docs/dws/dev/dws_04_0606.html new file mode 100644 index 00000000..ea19b305 --- /dev/null +++ b/docs/dws/dev/dws_04_0606.html @@ -0,0 +1,224 @@ + + +

PG_PARTITION

+

PG_PARTITION records all partitioned tables, table partitions, toast tables on table partitions, and index partitions in the database. Partitioned index information is not stored in the PG_PARTITION system catalog.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_PARTITION columns

Name

+

Type

+

Description

+

relname

+

name

+

Names of the partitioned tables, table partitions, TOAST tables on table partitions, and index partitions

+

parttype

+

"char"

+

Object type

+
  • r indicates a partitioned table.
  • p indicates a table partition.
  • x indicates an index partition.
  • t indicates a TOAST table.
+

parentid

+

oid

+

OID of the partitioned table in PG_CLASS when the object is a partitioned table or table partition

+

OID of the partitioned index when the object is an index partition

+

rangenum

+

integer

+

Reserved field.

+

intervalnum

+

integer

+

Reserved field.

+

partstrategy

+

"char"

+

Partition policy of the partitioned table. The following policies are supported:

+

r indicates the range partition.

+

v indicates the numeric partition

+

relfilenode

+

oid

+

Physical storage locations of the table partition, index partition, and TOAST table on the table partition.

+

reltablespace

+

oid

+

OID of the tablespace containing the table partition, index partition, TOAST table on the table partition

+

relpages

+

double precision

+

Statistics: numbers of data pages of the table partition and index partition

+

reltuples

+

double precision

+

Statistics: numbers of tuples of the table partition and index partition.

+

relallvisible

+

integer

+

Statistics: number of visible data pages of the table partition and index partition.

+

reltoastrelid

+

oid

+

OID of the TOAST table corresponding to the table partition

+

reltoastidxid

+

oid

+

OID of the TOAST table index corresponding to the table partition

+

indextblid

+

oid

+

OID of the table partition corresponding to the index partition

+

indisusable

+

boolean

+

Whether the index partition is available

+

reldeltarelid

+

oid

+

OID of a Delta table

+

reldeltaidx

+

oid

+

OID of the index for a Delta table

+

relcudescrelid

+

oid

+

OID of a CU description table

+

relcudescidx

+

oid

+

OID of the index for a CU description table

+

relfrozenxid

+

xid32

+

Frozen transaction ID

+

To ensure forward compatibility, this column is reserved. The relfrozenxid64 column is added to record the information.

+

intspnum

+

integer

+

Number of tablespaces that the interval partition belongs to

+

partkey

+

int2vector

+

Column number of the partition key

+

intervaltablespace

+

oidvector

+

Tablespace that the interval partition belongs to. Interval partitions fall in the tablespaces in the round-robin manner

+

interval

+

text[]

+

Interval value of the interval partition

+

boundaries

+

text[]

+

Upper boundary of the range partition and interval partition

+

transit

+

text[]

+

Transit of the interval partition

+

reloptions

+

text[]

+

Storage property of a partition used for collecting online scale-out information. Same as pg_class.reloptions, it is a keyword=value string.

+

relfrozenxid64

+

xid

+

Frozen transaction ID

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0607.html b/docs/dws/dev/dws_04_0607.html new file mode 100644 index 00000000..0b2ff893 --- /dev/null +++ b/docs/dws/dev/dws_04_0607.html @@ -0,0 +1,79 @@ + + +

PG_PLTEMPLATE

+

PG_PLTEMPLATE records template information for procedural languages.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_PLTEMPLATE columns

Name

+

Type

+

Description

+

tmplname

+

name

+

Name of the language for which this template is used

+

tmpltrusted

+

boolean

+

The value is true if the language is considered trusted.

+

tmpldbacreate

+

boolean

+

The value is true if the language is created by the owner of the database.

+

tmplhandler

+

text

+

Name of the call handler function

+

tmplinline

+

text

+

Name of the anonymous block handler. If no name of the block handler exists, the value is null.

+

tmplvalidator

+

text

+

Name of the verification function. If no verification function is available, the value is null.

+

tmpllibrary

+

text

+

Path of the shared library that implements languages

+

tmplacl

+

aclitem[]

+

Access permissions for template (not yet used)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0608.html b/docs/dws/dev/dws_04_0608.html new file mode 100644 index 00000000..13f6172a --- /dev/null +++ b/docs/dws/dev/dws_04_0608.html @@ -0,0 +1,271 @@ + + +

PG_PROC

+

PG_PROC records information about functions or procedures.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_PROC columns

Name

+

Type

+

Description

+

proname

+

name

+

Name of the function

+

pronamespace

+

oid

+

OID of the namespace that contains the function

+

proowner

+

oid

+

Owner of the function

+

prolang

+

oid

+

Implementation language or call interface of the function

+

procost

+

real

+

Estimated execution cost

+

prorows

+

real

+

Estimate number of result rows

+

provariadic

+

oid

+

Data type of parameter element

+

protransform

+

regproc

+

Simplified call method for this function

+

proisagg

+

boolean

+

Whether this function is an aggregate function

+

proiswindow

+

boolean

+

Whether this function is a window function

+

prosecdef

+

boolean

+

Whether this function is a security definer (such as a "setuid" function)

+

proleakproof

+

boolean

+

Whether this function has side effects. If no leakproof treatment is provided for parameters, the function throws errors.

+

proisstrict

+

boolean

+

The function returns null if any call parameter is null. In that case the function does not actually be called at all. Functions that are not "strict" must be prepared to process null inputs.

+

proretset

+

boolean

+

The function returns a set, that is, multiple values of the specified data type.

+

provolatile

+

"char"

+

Whether the function's result depends only on its input parameters, or is affected by outside factors

+
  • It is i for "immutable" functions, which always deliver the same result for the same inputs.
  • It is s for "stable" functions, whose results (for fixed inputs) do not change within a scan.
  • It is v for "volatile" functions, whose results may change at any time.
+

pronargs

+

smallint

+

Number of parameters

+

pronargdefaults

+

smallint

+

Number of parameters that have default values

+

prorettype

+

oid

+

OID of the returned parameter type

+

proargtypes

+

oidvector

+

Array with the data types of the function parameters. This array includes only input parameters (including INOUT parameters) and thus represents the call signature of the function.

+

proallargtypes

+

oid[]

+

Array with the data types of the function parameters. This array includes all parameter types (including OUT and INOUT parameters); however, if all the parameters are IN parameters, this column is null. Note that array subscripting is 1-based, whereas for historical reasons, and proargtypes is subscripted from 0.

+

proargmodes

+

"char"[]

+

Array with the modes of the function parameters.

+
  • i indicates IN parameters.
  • o indicates OUT parameters.
  • b indicates INOUT parameters.
+

If all the parameters are IN parameters, this column is null. Note that subscripts of this array correspond to positions of proallargtypes not proargtypes.

+

proargnames

+

text[]

+

Array that stores the names of the function parameters. Parameters without a name are set to empty strings in the array. If none of the parameters have a name, this column is null. Note that subscripts correspond to positions of proallargtypes not proargtypes.

+

proargdefaults

+

pg_node_tree

+

Expression tree of the default value. This is the list of PRONARGDEFAULTS elements.

+

prosrc

+

text

+

A definition that describes a function or stored procedure. In an interpreting language, it is the function source code, a link symbol, a file name, or any body content specified when a function or stored procedure is created, depending on how a language or calling is used.

+

probin

+

text

+

Additional information about how to call the function. Again, the interpretation is language-specific.

+

proconfig

+

text[]

+

Function's local settings for run-time configuration variables.

+

proacl

+

aclitem[]

+

Access permissions For details, see GRANT and REVOKE.

+

prodefaultargpos

+

int2vector

+

Locations of the function default values. Not only the last few parameters have default values.

+

fencedmode

+

boolean

+

Execution mode of a function, indicating whether a function is executed in fence or not fence mode. If the execution mode is fence, the function is executed in the fork process that is reworked. The default value is fence.

+

proshippable

+

boolean

+

Whether a function can be pushed down to DNs. The default value is false.

+
  • Functions of the IMMUTABLE type can always be pushed down to the DNs.
  • Functions of the STABLE or VOLATILE type can be pushed down to DNs only if their attribute is SHIPPABLE.
+

propackage

+

boolean

+

Indicates whether the function supports overloading, which is mainly used for the Oracle style function. The default value is false.

+
+
+

Examples

Query the OID of a specified function. For example, obtain the OID 1295 of the justify_days function.

+
1
+2
+3
+4
+5
SELECT oid FROM pg_proc where proname ='justify_days';
+ oid
+------
+ 1295
+(1 row)
+
+ +
+

Query whether a function is an aggregate function. For example, the justify_days function is a non-aggregate function.

+
1
+2
+3
+4
+5
SELECT proisagg FROM pg_proc where proname ='justify_days';
+ proisagg
+----------
+ f
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0609.html b/docs/dws/dev/dws_04_0609.html new file mode 100644 index 00000000..1ea2439e --- /dev/null +++ b/docs/dws/dev/dws_04_0609.html @@ -0,0 +1,81 @@ + + +

PG_RANGE

+

PG_RANGE records information about range types.

+

This is in addition to the types' entries in PG_TYPE.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_RANGE columns

Name

+

Type

+

Reference

+

Description

+

rngtypid

+

oid

+

PG_TYPE.oid

+

OID of the range type

+

rngsubtype

+

oid

+

PG_TYPE.oid

+

OID of the element type (subtype) of this range type

+

rngcollation

+

oid

+

PG_COLLATION.oid

+

OID of the collation used for range comparisons, or 0 if none

+

rngsubopc

+

oid

+

PG_OPCLASS.oid

+

OID of the subtype's operator class used for range comparisons

+

rngcanonical

+

regproc

+

PG_PROC.oid

+

OID of the function to convert a range value into canonical form, or 0 if none

+

rngsubdiff

+

regproc

+

PG_PROC.oid

+

OID of the function to return the difference between two element values as double precision, or 0 if none

+
+
+

rngsubopc (plus rngcollation, if the element type is collatable) determines the sort ordering used by the range type. rngcanonical is used when the element type is discrete.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0610.html b/docs/dws/dev/dws_04_0610.html new file mode 100644 index 00000000..99d3135b --- /dev/null +++ b/docs/dws/dev/dws_04_0610.html @@ -0,0 +1,102 @@ + + +

PG_REDACTION_COLUMN

+

PG_REDACTION_COLUMN records the information about the redacted columns.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_REDACTION_COLUMN columns

Name

+

Type

+

Description

+

object_oid

+

oid

+

OID of the object to be redacted.

+

column_attrno

+

smallint

+

attrno of the redacted column.

+

function_type

+

integer

+

Redaction type.

+
NOTE:

This column is reserved. It is used only for forward compatibility of redacted column information in earlier versions. The value can be 0 (NONE) or 1 (FULL).

+
+

function_parameters

+

text

+

Parameters used when the redaction type is partial (reserved).

+

regexp_pattern

+

text

+

Pattern string when the redaction type is regexp (reserved).

+

regexp_replace_string

+

text

+

Replacement string when the redaction type is regexp (reserved).

+

regexp_position

+

integer

+

Start and end replacement positions when the redaction type is regexp (reserved).

+

regexp_occurrence

+

integer

+

Replacement times when the redaction type is regexp (reserved).

+

regexp_match_parameter

+

text

+

Regular control parameter used when the redaction type is regexp (reserved).

+

column_description

+

text

+

Description of the redacted column.

+

function_expr

+

pg_node_tree

+

Internal representation of the redaction function.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0611.html b/docs/dws/dev/dws_04_0611.html new file mode 100644 index 00000000..258dd82a --- /dev/null +++ b/docs/dws/dev/dws_04_0611.html @@ -0,0 +1,61 @@ + + +

PG_REDACTION_POLICY

+

PG_REDACTION_POLICY records information about the object to be redacted.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_REDACTION_POLICY columns

Name

+

Type

+

Description

+

object_oid

+

oid

+

OID of the object to be redacted.

+

policy_name

+

name

+

Name of the redact policy.

+

enable

+

boolean

+

Policy status (enabled or disabled).

+
NOTE:

The value can be:

+
  • true: enabled.
  • false: disabled.
+
+

expression

+

pg_node_tree

+

Policy effective expression (for users).

+

policy_description

+

text

+

Description of a policy.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0612.html b/docs/dws/dev/dws_04_0612.html new file mode 100644 index 00000000..2735a6ef --- /dev/null +++ b/docs/dws/dev/dws_04_0612.html @@ -0,0 +1,68 @@ + + +

PG_RLSPOLICY

+

PG_RLSPOLICY displays the information about row-level access control policies.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_RLSPOLICY columns

Name

+

Type

+

Description

+

polname

+

name

+

Name of a row-level access control policy

+

polrelid

+

oid

+

Table OID of a row-level access control policy

+

polcmd

+

char

+

SQL operations affected by a row-level access control policy. The options are *(ALL), r(SELECT), w(UPDATE), and d(DELETE).

+

polpermissive

+

boolean

+

Type of a row-level access control policy

+
NOTE:

Values of polpermissive:

+
  • true: The row-level access control policy is a permissive policy.
  • false: The row-level access control policy is a restrictive policy.
+
+

polroles

+

oid[]

+

OID of database user affected by a row-level access control policy

+

polqual

+

pg_node_tree

+

SQL condition expression of a row-level access control policy

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0613.html b/docs/dws/dev/dws_04_0613.html new file mode 100644 index 00000000..0533db56 --- /dev/null +++ b/docs/dws/dev/dws_04_0613.html @@ -0,0 +1,100 @@ + + +

PG_RESOURCE_POOL

+

PG_RESOURCE_POOL records the information about database resource pool.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_RESOURCE_POOL columns

Name

+

Type

+

Description

+

respool_name

+

name

+

Name of the resource pool

+

mem_percent

+

integer

+

Percentage of the memory configuration

+

cpu_affinity

+

bigint

+

Value of cores bound to the CPU

+

control_group

+

name

+

Name of the Cgroup where the resource pool is located

+

active_statements

+

integer

+

Maximum number of concurrent statements in the resource pool

+

max_dop

+

integer

+

Maximum concurrency. This is a reserved parameter.

+

memory_limit

+

name

+

Maximum memory of resource pool

+

parentid

+

oid

+

OID of the parent resource pool

+

io_limits

+

integer

+

Upper limit of IOPS. It is counted by ones for column storage and by 10 thousands for row storage.

+

io_priority

+

text

+

I/O priority set for jobs that consume many I/O resources. It takes effect when the I/O usage reaches 90%.

+

is_foreign

+

boolean

+

Indicates whether the resource pool can be used for users outside the logical cluster. If it is set to true, the resource pool controls the resources of common users who do not belong to the current resource pool.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0614.html b/docs/dws/dev/dws_04_0614.html new file mode 100644 index 00000000..3bb28ce1 --- /dev/null +++ b/docs/dws/dev/dws_04_0614.html @@ -0,0 +1,81 @@ + + +

PG_REWRITE

+

PG_REWRITE records rewrite rules defined for tables and views.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_REWRITE columns

Name

+

Type

+

Description

+

rulename

+

name

+

Rule name

+

ev_class

+

oid

+

Name of the table that uses the rule

+

ev_attr

+

smallint

+

Column this rule is for (always 0 to indicate the entire table)

+

ev_type

+

"char"

+

Event type for this rule:

+
  • 1 = SELECT
  • 2 = UPDATE
  • 3 = INSERT
  • 4 = DELETE
+

ev_enabled

+

"char"

+

Controls in which mode the rule fires

+
  • O: The rule fires in "origin" and "local" modes.
  • D: The rule is disabled.
  • R: The rule fires in "replica" mode.
  • A: The rule always fires.
+

is_instead

+

boolean

+

Its value is true if the rule is an INSTEAD rule.

+

ev_qual

+

pg_node_tree

+

Expression tree (in the form of a nodeToString() representation) for the rule's qualifying condition

+

ev_action

+

pg_node_tree

+

Query tree (in the form of a nodeToString() representation) for the rule's action

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0615.html b/docs/dws/dev/dws_04_0615.html new file mode 100644 index 00000000..8e4818a5 --- /dev/null +++ b/docs/dws/dev/dws_04_0615.html @@ -0,0 +1,71 @@ + + +

PG_SECLABEL

+

PG_SECLABEL records security labels on database objects.

+

See also PG_SHSECLABEL, which performs a similar function for security labels of database objects that are shared across a database cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SECLABEL columns

Name

+

Type

+

Reference

+

Description

+

objoid

+

oid

+

Any OID column

+

OID of the object this security label pertains to

+

classoid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog that contains the object

+

objsubid

+

integer

+

-

+

For a security label on a table column, this is the column number.

+

provider

+

text

+

-

+

Label provider associated with this label

+

label

+

text

+

-

+

Security label applied to this object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0616.html b/docs/dws/dev/dws_04_0616.html new file mode 100644 index 00000000..fa48519b --- /dev/null +++ b/docs/dws/dev/dws_04_0616.html @@ -0,0 +1,104 @@ + + +

PG_SHDEPEND

+

PG_SHDEPEND records the dependency relationships between database objects and shared objects, such as roles. This information allows GaussDB(DWS) to ensure that those objects are unreferenced before attempting to delete them.

+

See also PG_DEPEND, which performs a similar function for dependencies involving objects within a single database.

+

Unlike most system catalogs, PG_SHDEPEND is shared across all databases of a cluster: there is only one copy of PG_SHDEPEND per cluster, not one per database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SHDEPEND columns

Name

+

Type

+

Reference

+

Description

+

dbid

+

oid

+

PG_DATABASE.oid

+

OID of the database the dependent object is in. The value is 0 for a shared object.

+

classid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog the dependent object is in.

+

objid

+

oid

+

Any OID column

+

OID of the specific dependent object

+

objsubid

+

integer

+

-

+

For a table column, this is the column number (the objid and classid refer to the table itself). For all other object types, this column is 0.

+

refclassid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog the referenced object is in (must be a shared catalog)

+

refobjid

+

oid

+

Any OID column

+

OID of the specific referenced object

+

deptype

+

"char"

+

-

+

Code segment defining the specific semantics of this dependency relationship. See the following text for details.

+

objfile

+

text

+

-

+

Path of the user-defined C function library file.

+
+
+

In all cases, a pg_shdepend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors defined by deptype:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0617.html b/docs/dws/dev/dws_04_0617.html new file mode 100644 index 00000000..66b91d9e --- /dev/null +++ b/docs/dws/dev/dws_04_0617.html @@ -0,0 +1,54 @@ + + +

PG_SHDESCRIPTION

+

PG_SHDESCRIPTION records optional comments for shared database objects. Descriptions can be manipulated with the COMMENT command and viewed with psql's \d commands.

+

See also PG_DESCRIPTION, which performs a similar function for descriptions involving objects within a single database.

+

Unlike most system catalogs, PG_SHDESCRIPTION is shared across all databases of a cluster. There is only one copy of PG_SHDESCRIPTION per cluster, not one per database.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SHDESCRIPTION columns

Name

+

Type

+

Reference

+

Description

+

objoid

+

oid

+

Any OID column

+

OID of the object this description pertains to

+

classoid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog where the object resides

+

description

+

text

+

-

+

Arbitrary text that serves as the description of this object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0618.html b/docs/dws/dev/dws_04_0618.html new file mode 100644 index 00000000..f21c0368 --- /dev/null +++ b/docs/dws/dev/dws_04_0618.html @@ -0,0 +1,64 @@ + + +

PG_SHSECLABEL

+

PG_SHSECLABEL records security labels on shared database objects. Security labels can be manipulated with the SECURITY LABEL command.

+

For an easier way to view security labels, see PG_SECLABELS.

+

See also PG_SECLABEL, which performs a similar function for security labels involving objects within a single database.

+

Unlike most system catalogs, PG_SHSECLABEL is shared across all databases of a cluster. There is only one copy of PG_SHSECLABEL per cluster, not one per database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SHSECLABEL columns

Name

+

Type

+

Reference

+

Description

+

objoid

+

oid

+

Any OID column

+

OID of the object this security label pertains to

+

classoid

+

oid

+

PG_CLASS.oid

+

OID of the system catalog where the object resides

+

provider

+

text

+

-

+

Label provider associated with this label

+

label

+

text

+

-

+

Security label applied to this object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0619.html b/docs/dws/dev/dws_04_0619.html new file mode 100644 index 00000000..4888d10d --- /dev/null +++ b/docs/dws/dev/dws_04_0619.html @@ -0,0 +1,120 @@ + + +

PG_STATISTIC

+

PG_STATISTIC records statistics about tables and index columns in a database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATISTIC columns

Name

+

Type

+

Description

+

starelid

+

oid

+

Table or index which the described column belongs to

+

starelkind

+

"char"

+

Type of an object

+

staattnum

+

smallint

+

Number of the described column in the table, starting from 1

+

stainherit

+

boolean

+

Whether to collect statistics for objects that have inheritance relationship

+

stanullfrac

+

real

+

Percentage of column entries that are null

+

stawidth

+

integer

+

Average stored width, in bytes, of non-null entries

+

stadistinct

+

real

+

Number of distinct, not-null data values in the column for all DNs

+
  • A value greater than zero is the actual number of distinct values.
  • A value less than zero is the negative of a multiplier for the number of rows in the table. (For example, stadistinct=-0.5 indicates that values in a column appear twice on average.)
  • 0 indicates that the number of distinct values is unknown.
+

stakindN

+

smallint

+

Code number stating that the type of statistics is stored in Slot N of the pg_statistic row.

+

Value range: 1 to 5

+

staopN

+

oid

+

Operator used to generate the statistics stored in Slot N. For example, a histogram slot shows the < operator that defines the sort order of the data.

+

Value range: 1 to 5

+

stanumbersN

+

real[]

+

Numerical statistics of the appropriate type for Slot N. The value is null if the slot kind does not involve numerical values.

+

Value range: 1 to 5

+

stavaluesN

+

anyarray

+

Column data values of the appropriate type for Slot N. The value is null if the slot type does not store any data values. Each array's element values are actually of the specific column's data type so there is no way to define these columns' type more specifically than anyarray.

+

Value range: 1 to 5

+

stadndistinct

+

real

+

Number of unique non-null data values in the dn1 column

+
  • A value greater than zero is the actual number of distinct values.
  • A value less than zero is the negative of a multiplier for the number of rows in the table. (For example, stadistinct=-0.5 indicates that values in a column appear twice on average.)
  • 0 indicates that the number of distinct values is unknown.
+

staextinfo

+

text

+

Information about extension statistics (reserved)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0620.html b/docs/dws/dev/dws_04_0620.html new file mode 100644 index 00000000..2b6649d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0620.html @@ -0,0 +1,120 @@ + + +

PG_STATISTIC_EXT

+

PG_STATISTIC_EXT records the extended statistics of tables in a database, such as statistics of multiple columns. Statistics of expressions will be supported later. You can specify the extended statistics to be collected. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATISTIC_EXT columns

Parameter

+

Type

+

Description

+

starelid

+

oid

+

Table or index which the described column belongs to

+

starelkind

+

"char"

+

Type of an object

+

stainherit

+

boolean

+

Whether to collect statistics for objects that have inheritance relationship

+

stanullfrac

+

real

+

Percentage of column entries that are null

+

stawidth

+

integer

+

Average stored width, in bytes, of non-null entries

+

stadistinct

+

real

+

Number of distinct, not-null data values in the column for all DNs

+
  • A value greater than zero is the actual number of distinct values.
  • A value less than zero is the negative of a multiplier for the number of rows in the table. (For example, stadistinct=-0.5 indicates that values in a column appear twice on average.)
  • 0 indicates that the number of distinct values is unknown.
+

stadndistinct

+

real

+

Number of unique non-null data values in the dn1 column

+
  • A value greater than zero is the actual number of distinct values.
  • A value less than zero is the negative of a multiplier for the number of rows in the table. (For example, stadistinct=-0.5 indicates that values in a column appear twice on average.)
  • 0 indicates that the number of distinct values is unknown.
+

stakindN

+

smallint

+

Code number stating that the type of statistics is stored in Slot N of the pg_statistic row.

+

Value range: 1 to 5

+

staopN

+

oid

+

Operator used to generate the statistics stored in Slot N. For example, a histogram slot shows the < operator that defines the sort order of the data.

+

Value range: 1 to 5

+

stakey

+

int2vector

+

Array of a column ID

+

stanumbersN

+

real[]

+

Numerical statistics of the appropriate type for Slot N. The value is null if the slot kind does not involve numerical values.

+

Value range: 1 to 5

+

stavaluesN

+

anyarray

+

Column data values of the appropriate type for Slot N. The value is null if the slot type does not store any data values. Each array's element values are actually of the specific column's data type so there is no way to define these columns' type more specifically than anyarray.

+

Value range: 1 to 5

+

staexprs

+

pg_node_tree

+

Expression corresponding to the extended statistics information.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0621.html b/docs/dws/dev/dws_04_0621.html new file mode 100644 index 00000000..17cdc533 --- /dev/null +++ b/docs/dws/dev/dws_04_0621.html @@ -0,0 +1,58 @@ + + +

PG_SYNONYM

+

PG_SYNONYM records the mapping between synonym object names and other database object names.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SYNONYM columns

Name

+

Type

+

Description

+

synname

+

name

+

Synonym name.

+

synnamespace

+

oid

+

OID of the namespace where the synonym is located.

+

synowner

+

oid

+

Owner of a synonym, usually the OID of the user who created it.

+

synobjschema

+

name

+

Schema name specified by the associated object.

+

synobjname

+

name

+

Name of the associated object.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0622.html b/docs/dws/dev/dws_04_0622.html new file mode 100644 index 00000000..4bf622cf --- /dev/null +++ b/docs/dws/dev/dws_04_0622.html @@ -0,0 +1,58 @@ + + +

PG_TABLESPACE

+

PG_TABLESPACE records tablespace information.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TABLESPACE columns

Name

+

Type

+

Description

+

spcname

+

name

+

Name of the tablespace

+

spcowner

+

oid

+

Owner of the tablespace, usually the user who created it

+

spcacl

+

aclitem[]

+

Access permissions For details, see GRANT and REVOKE.

+

spcoptions

+

text[]

+

Specifies options of the tablespace.

+

spcmaxsize

+

text

+

Maximum size of the available disk space, in bytes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0623.html b/docs/dws/dev/dws_04_0623.html new file mode 100644 index 00000000..103d55e9 --- /dev/null +++ b/docs/dws/dev/dws_04_0623.html @@ -0,0 +1,131 @@ + + +

PG_TRIGGER

+

PG_TRIGGER records the trigger information.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Name

+

Type

+

Description

+

tgrelid

+

oid

+

OID of the table where the trigger is located.

+

tgname

+

name

+

Trigger name.

+

tgfoid

+

oid

+

Trigger OID.

+

tgtype

+

smallint

+

Trigger type

+

tgenabled

+

"char"

+

O: The trigger fires in "origin" or "local" mode.

+

D: The trigger is disabled.

+

R: The trigger fires in "replica" mode.

+

A: The trigger always fires.

+

tgisinternal

+

boolean

+

Internal trigger ID. If the value is true, it indicates an internal trigger.

+

tgconstrrelid

+

oid

+

The table referenced by the integrity constraint

+

tgconstrindid

+

oid

+

Index of the integrity constraint

+

tgconstraint

+

oid

+

OID of the constraint trigger in the pg_constraint

+

tgdeferrable

+

boolean

+

The constraint trigger is of the DEFERRABLE type.

+

tginitdeferred

+

boolean

+

whether the trigger is of the INITIALLY DEFERRED type

+

tgnargs

+

smallint

+

Input parameters number of the trigger function

+

tgattr

+

int2vector

+

Column ID specified by the trigger. If no column is specified, an empty array is used.

+

tgargs

+

bytea

+

Parameter transferred to the trigger

+

tgqual

+

pg_node_tree

+

Indicates the WHEN condition of the trigger. If the WHEN condition does not exist, the value is null.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0624.html b/docs/dws/dev/dws_04_0624.html new file mode 100644 index 00000000..131504a7 --- /dev/null +++ b/docs/dws/dev/dws_04_0624.html @@ -0,0 +1,80 @@ + + +

PG_TS_CONFIG

+

PG_TS_CONFIG records entries representing text search configurations. A configuration specifies a particular text search parser and a list of dictionaries to use for each of the parser's output token types.

+

The parser is shown in the PG_TS_CONFIG entry, but the token-to-dictionary mapping is defined by subsidiary entries in PG_TS_CONFIG_MAP.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TS_CONFIG columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

cfgname

+

name

+

-

+

Text search configuration name

+

cfgnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace where the configuration resides

+

cfgowner

+

oid

+

PG_AUTHID.oid

+

Owner of the configuration

+

cfgparser

+

oid

+

PG_TS_PARSER.oid

+

OID of the text search parser for this configuration

+

cfoptions

+

text[]

+

-

+

Configuration options

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0625.html b/docs/dws/dev/dws_04_0625.html new file mode 100644 index 00000000..7eebc0d4 --- /dev/null +++ b/docs/dws/dev/dws_04_0625.html @@ -0,0 +1,61 @@ + + +

PG_TS_CONFIG_MAP

+

PG_TS_CONFIG_MAP records entries showing which text search dictionaries should be consulted, and in what order, for each output token type of each text search configuration's parser.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TS_CONFIG_MAP columns

Name

+

Type

+

Reference

+

Description

+

mapcfg

+

oid

+

PG_TS_CONFIG.oid

+

OID of the PG_TS_CONFIG entry owning this map entry

+

maptokentype

+

integer

+

-

+

A token type emitted by the configuration's parser

+

mapseqno

+

integer

+

-

+

Order in which to consult this entry

+

mapdict

+

oid

+

PG_TS_DICT.oid

+

OID of the text search dictionary to consult

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0626.html b/docs/dws/dev/dws_04_0626.html new file mode 100644 index 00000000..35445ac6 --- /dev/null +++ b/docs/dws/dev/dws_04_0626.html @@ -0,0 +1,80 @@ + + +

PG_TS_DICT

+

PG_TS_DICT records entries that define text search dictionaries. A dictionary depends on a text search template, which specifies all the implementation functions needed. The dictionary itself provides values for the user-settable parameters supported by the template.

+

This division of labor allows dictionaries to be created by unprivileged users. The parameters are specified by a text string dictinitoption, whose format and meaning vary depending on the template.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TS_DICT columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

dictname

+

name

+

-

+

Text search dictionary name

+

dictnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains the dictionary

+

dictowner

+

oid

+

PG_AUTHID.oid

+

Owner of the dictionary

+

dicttemplate

+

oid

+

PG_TS_TEMPLATE.oid

+

OID of the text search template for this dictionary

+

dictinitoption

+

text

+

-

+

Initialization option string for the template

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0627.html b/docs/dws/dev/dws_04_0627.html new file mode 100644 index 00000000..c07d7e23 --- /dev/null +++ b/docs/dws/dev/dws_04_0627.html @@ -0,0 +1,97 @@ + + +

PG_TS_PARSER

+

PG_TS_PARSER records entries defining text search parsers. A parser splits input text into lexemes and assigns a token type to each lexeme. Since a parser must be implemented by C functions, parsers can be created only by database administrators.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TS_PARSER columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

prsname

+

name

+

-

+

Text search parser name

+

prsnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains the parser

+

prsstart

+

regproc

+

PG_PROC.oid

+

OID of the parser's startup function

+

prstoken

+

regproc

+

PG_PROC.oid

+

OID of the parser's next-token function

+

prsend

+

regproc

+

PG_PROC.oid

+

OID of the parser's shutdown function

+

prsheadline

+

regproc

+

PG_PROC.oid

+

OID of the parser's headline function

+

prslextype

+

regproc

+

PG_PROC.oid

+

OID of the parser's lextype function

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0628.html b/docs/dws/dev/dws_04_0628.html new file mode 100644 index 00000000..b90ddd5a --- /dev/null +++ b/docs/dws/dev/dws_04_0628.html @@ -0,0 +1,70 @@ + + +

PG_TS_TEMPLATE

+

PG_TS_TEMPLATE records entries defining text search templates. A template provides a framework for text search dictionaries. Since a template must be implemented by C functions, templates can be created only by database administrators.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TS_TEMPLATE columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

tmplname

+

name

+

-

+

Text search template name

+

tmplnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace that contains the template

+

tmplinit

+

regproc

+

PG_PROC.oid

+

OID of the template's initialization function

+

tmpllexize

+

regproc

+

PG_PROC.oid

+

OID of the template's lexize function

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0629.html b/docs/dws/dev/dws_04_0629.html new file mode 100644 index 00000000..096ffed1 --- /dev/null +++ b/docs/dws/dev/dws_04_0629.html @@ -0,0 +1,241 @@ + + +

PG_TYPE

+

PG_TYPE records the information about data types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TYPE columns

Name

+

Type

+

Description

+

typname

+

name

+

Data type name

+

typnamespace

+

oid

+

OID of the namespace that contains this type

+

typowner

+

oid

+

Owner of this type

+

typlen

+

smallint

+

Number of bytes in the internal representation of the type for a fixed-size type. But for a variable-length type, typlen is negative.

+
  • -1 indicates a "varlena" type (one that has a length word).
  • -2 indicates a null-terminated C string.
+

typbyval

+

boolean

+

Whether the value of this type is passed by parameter or reference of this column. TYPBYVAL is false if the type of TYPLEN is not 1, 2, 4, or 8, because values of this type are always passed by reference of this column. TYPBYVAL can be false even the TYPLEN is passed by parameter of this column.

+

typtype

+

char

+
  • b indicates a basic type.
  • c indicates a composite type, for example, a table's row type.
  • e indicates an enumeration type.
  • p indicates a pseudo type.
+

For details, see typrelid and typbasetype.

+

typcategory

+

char

+

typcategory is an arbitrary classification of data types that is used by the parser to determine which implicit casts should be "preferred".

+

typispreferred

+

boolean

+

Whether data is converted. It is true if conversion is performed when data meets the conversion rules specified by TYPCATEGORY.

+

typisdefined

+

boolean

+

The value is true if the type is defined. The value is false if this is a placeholder entry for a not-yet-defined type. When it is false, type name, namespace, and OID are the only dependable objects.

+

typdelim

+

char

+

Character that separates two values of this type when parsing array input. Note that the delimiter is associated with the array element data type, not the array data type.

+

typrelid

+

oid

+

If this is a composite type (see typtype), then this column points to the pg_class entry that defines the corresponding table. For a free-standing composite type, the pg_class entry does not represent a table, but it is required for the type's pg_attribute entries to link to. The value is 0 for non-composite types.

+

typelem

+

oid

+

If typelem is not 0 then it identifies another row in pg_type. The current type can be subscripted like an array yielding values of type typelem. The current type can then be subscripted like an array yielding values of type typelem. A "true" array type is variable length (typlen = -1), but some fixed-length (typlen > 0) types also have nonzero typelem, for example name and point. If a fixed-length type has a typelem, its internal representation must be some number of values of the typelem data type with no other data. Variable-length array types have a header defined by the array subroutines.

+

typarray

+

oid

+

Indicates that the corresponding type record is available in pg_type if the value is not 0.

+

typinput

+

regproc

+

Input conversion function (text format)

+

typoutput

+

regproc

+

Output conversion function (text format)

+

typreceive

+

regproc

+

Input conversion function (binary format). If no input conversion function, the value is 0.

+

typsend

+

regproc

+

output conversion function (binary format). If no output conversion function, the value is 0.

+

typmodin

+

regproc

+

Type modifier input function. The value is 0 if the type does not support modifiers.

+

typmodout

+

regproc

+

Type modifier output function. The value is 0 if the type does not support modifiers.

+

typanalyze

+

regproc

+

Custom ANALYZE function. The value is 0 if the standard function is used.

+

typalign

+

char

+

Alignment required when storing a value of this type. It applies to storage on disk as well as most representations of the value inside PostgreSQL. When multiple values are stored consecutively, such as in the representation of a complete row on disk, padding is inserted before a data of this type so that it begins on the specified boundary. The alignment reference is the beginning of the first datum in the sequence. Possible values are:

+
  • c: char alignment, that is, no alignment needed
  • s: short alignment (2 bytes on most machines)
  • i: int alignment (4 bytes on most machines).
  • d: double alignment (8 bytes on many machines, but by no means all)
+
NOTICE:

For types used in system catalogs, the size and alignment defined in pg_type must agree with the way that the compiler lays out the column in a structure representing a table row.

+
+

typstorage

+

char

+

typstorage tells for varlena types (those with typlen = -1) if the type is prepared for toasting and what the default strategy for attributes of this type should be. Possible values are:

+
  • p indicates that values are always stored plain.
  • e: Value can be stored in a "secondary" relationship (if the relation has one, see pg_class.reltoastrelid).
  • m: Values can be stored compressed inline.
  • x: Values can be stored compressed inline or stored in secondary storage.
+
NOTICE:

m domains can also be moved out to secondary storage, but only as a last resort (e and x domains are moved first).

+
+

typenotnull

+

boolean

+

Represents a NOTNULL constraint on a type. Currently, it is used for domains only.

+

typbasetype

+

oid

+

If this is a domain (see typtype), then typbasetype identifies the type that this one is based on. The value is 0 if this type is not a derived type.

+

typtypmod

+

integer

+

Records the typtypmod to be applied to domains' base types by domains (the value is -1 if the base type does not use typmod). The value is -1 if this type is not a domain.

+

typndims

+

integer

+

Number of array dimensions for a domain that is an array (that is, typbasetype is an array type; the domain's typelem matches the base type's typelem). The value is 0 for types other than domains over array types.

+

typcollation

+

oid

+

Sequence rule for specified types. Sequencing is not supported if the value is 0.

+

typdefaultbin

+

pg_node_tree

+

nodeToString() representation of a default expression for the type if the value is non-null. Currently, this column is only used for domains.

+

typdefault

+

text

+

The value is null if a type has no associated default value. If typdefaultbin is not null, typdefault must contain a human-readable version of the default expression represented by typdefaultbin. If typdefaultbin is null and typdefault is not, then typdefault is the external representation of the type's default value, which can be fed to the type's input converter to produce a constant.

+

typacl

+

aclitem[]

+

Access permissions

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0630.html b/docs/dws/dev/dws_04_0630.html new file mode 100644 index 00000000..bcf85772 --- /dev/null +++ b/docs/dws/dev/dws_04_0630.html @@ -0,0 +1,62 @@ + + +

PG_USER_MAPPING

+

PG_USER_MAPPING records the mappings from local users to remote.

+

It is accessible only to users with system administrator rights. You can use view PG_USER_MAPPINGS to query common users.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_USER_MAPPING columns

Name

+

Type

+

Reference

+

Description

+

oid

+

oid

+

-

+

Row identifier (hidden attribute; must be explicitly selected)

+

umuser

+

oid

+

PG_AUTHID.oid

+

OID of the local role being mapped, 0 if the user mapping is public

+

umserver

+

oid

+

PG_FOREIGN_SERVER.oid

+

OID of the foreign server that contains this mapping

+

umoptions

+

text[]

+

-

+

Option used for user mapping. It is a keyword=value string.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0631.html b/docs/dws/dev/dws_04_0631.html new file mode 100644 index 00000000..8f09fec1 --- /dev/null +++ b/docs/dws/dev/dws_04_0631.html @@ -0,0 +1,66 @@ + + +

PG_USER_STATUS

+

PG_USER_STATUS records the states of users that access to the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_USER_STATUS columns

Name

+

Type

+

Description

+

roloid

+

oid

+

ID of the role

+

failcount

+

integer

+

Specifies the number of failed attempts.

+

locktime

+

timestamp with time zone

+

Time at which the role is locked

+

rolstatus

+

smallint

+

Role state

+
  • 0: normal
  • 1 indicates that the role is locked for some time because the failed login attempts exceed the threshold
  • 2 indicates that the role is locked by the administrator.
+

permspace

+

bigint

+

Size of the permanent table storage space used by a role in the current instance.

+

tempspace

+

bigint

+

Size of the temporary table storage space used by a role in the current instance.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0632.html b/docs/dws/dev/dws_04_0632.html new file mode 100644 index 00000000..966c57e7 --- /dev/null +++ b/docs/dws/dev/dws_04_0632.html @@ -0,0 +1,51 @@ + + +

PG_WORKLOAD_ACTION

+

PG_WORKLOAD_ACTION records information about query_band.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_WORKLOAD_ACTION columns

Name

+

Type

+

Description

+

qband

+

name

+

query_band key-value pairs

+

class

+

name

+

Class of the object associated with query_band

+

object

+

name

+

Object associated with query_band

+

action

+

name

+

Action of the object associated with query_band

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0633.html b/docs/dws/dev/dws_04_0633.html new file mode 100644 index 00000000..900b8250 --- /dev/null +++ b/docs/dws/dev/dws_04_0633.html @@ -0,0 +1,94 @@ + + +

PGXC_CLASS

+

PGXC_CLASS records the replicated or distributed information for each table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_CLASS columns

Name

+

Type

+

Description

+

pcrelid

+

oid

+

Table OID

+

pclocatortype

+

"char"

+

Locator type

+
  • H: hash
  • M: Modulo
  • N: Round Robin
  • R: Replicate
+

pchashalgorithm

+

smallint

+

Distributed tuple using the hash algorithm

+

pchashbuckets

+

smallint

+

Value of a harsh container

+

pgroup

+

name

+

Name of the node group

+

redistributed

+

"char"

+

The table has been redistributed.

+

redis_order

+

integer

+

Redistribution sequence

+

pcattnum

+

int2vector

+

Column number used as a distribution key

+

nodeoids

+

oidvector_extend

+

List of distributed table node OIDs

+

options

+

text

+

Extension status information. This is a reserved column in the system.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0634.html b/docs/dws/dev/dws_04_0634.html new file mode 100644 index 00000000..b30aebe9 --- /dev/null +++ b/docs/dws/dev/dws_04_0634.html @@ -0,0 +1,74 @@ + + +

PGXC_GROUP

+

PGXC_GROUP records information about node groups.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_GROUP columns

Name

+

Type

+

Description

+

group_name

+

name

+

Name of the node group

+

in_redistribution

+

"char"

+

Whether redistribution is required

+
  • n indicates that the Node Group is not redistributed.
  • y indicates the source Node Group in redistribution.
  • t indicates the destination Node Group in redistribution.
+

group_members

+

oidvector_extend

+

Node OID list of the node group

+

group_buckets

+

text

+

Distributed data bucket group

+

is_installation

+

boolean

+

Whether to install a sub-cluster

+

group_acl

+

aclitem[]

+

Access permissions

+

group_kind

+

"char"

+

Node Group type

+
  • i indicates an installation Node Group.
  • n indicates a Node Group in a common, non-logical cluster.
  • v indicates a Node Group in a logical cluster.
  • e indicates an elastic cluster.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0635.html b/docs/dws/dev/dws_04_0635.html new file mode 100644 index 00000000..482f5050 --- /dev/null +++ b/docs/dws/dev/dws_04_0635.html @@ -0,0 +1,150 @@ + + +

PGXC_NODE

+

PGXC_NODE records information about cluster nodes.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_NODE columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

node_type

+

"char"

+

Node type

+

C: CN

+

D: DN

+

node_port

+

integer

+

Port ID of the node

+

node_host

+

name

+

Host name or IP address of a node. (If a virtual IP address is configured, its value is a virtual IP address.)

+

node_port1

+

integer

+

Port number of a replication node

+

node_host1

+

name

+

Host name or IP address of a replication node. (If a virtual IP address is configured, its value is a virtual IP address.)

+

hostis_primary

+

boolean

+

Whether a switchover occurs between the primary and the standby server on the current node

+

nodeis_primary

+

boolean

+

Whether the current node is preferred to execute non-query operations in the replication table

+

nodeis_preferred

+

boolean

+

Whether the current node is preferred to execute queries in the replication table

+

node_id

+

integer

+

Node identifier

+

sctp_port

+

integer

+

Specifies the port used by the TCP proxy communication library or SCTP communication library of the primary node to listen to the data channel.

+

control_port

+

integer

+

Specifies the port used by the TCP proxy communication library or SCTP communication library of the primary node to listen to the control channel.

+

sctp_port1

+

integer

+

Specifies the port used by the TCP proxy communication library or SCTP communication library of the standby node to listen to the data channel.

+

control_port1

+

integer

+

Specifies the port used by the TCP proxy communication library or SCTP communication library of the standby node to listen to the control channel.

+

nodeis_central

+

boolean

+

Indicates that the current node is the central node.

+
+
+

Examples

Query the CN and DN information of the cluster:

+
 select * from pgxc_node;
+  node_name   | node_type | node_port |   node_host    | node_port1 |   node_host1   | hostis_primary | nodeis_primary | nodeis_preferred
+ |   node_id   | sctp_port | control_port | sctp_port1 | control_port1 | nodeis_central
+--------------+-----------+-----------+----------------+------------+----------------+----------------+----------------+-----------------
+-+-------------+-----------+--------------+------------+---------------+----------------
+ dn_6001_6002 | D         |     40000 | 172.**.***.**1 |      45000 | 172.**.**.**2 | t              | f              | f
+ |  1644780306 |     40002 |        40003 |      45002 |         45003 | f
+ dn_6003_6004 | D         |     40000 | 172.**.**.**2  |      45000 | 172.**.**.**3  | t              | f              | f
+ |  -966646068 |     40002 |        40003 |      45002 |         45003 | f
+ dn_6005_6006 | D         |     40000 | 172.**.**.**3  |      45000 | 172.**.***.**1 | t              | f              | f
+ |   868850011 |     40002 |        40003 |      45002 |         45003 | f
+ cn_5001      | C         |      8000 | 172.**.***.**1 |       8000 | 172.**.***.**1 | t              | f              | f
+ |  1120683504 |      8002 |         8003 |          0 |             0 | f
+ cn_5002      | C         |      8000 | 172.**.**.**2  |       8000 | 172.**.**.**2  | t              | f              | f
+ | -1736975100 |      8002 |         8003 |          0 |             0 | f
+ cn_5003      | C         |      8000 | localhost      |       8000 | localhost      | t              | f              | f
+ |  -125853378 |      8002 |         8003 |          0 |             0 | t
+(6 rows)
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0639.html b/docs/dws/dev/dws_04_0639.html new file mode 100644 index 00000000..26030778 --- /dev/null +++ b/docs/dws/dev/dws_04_0639.html @@ -0,0 +1,484 @@ + + +

System Views

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0640.html b/docs/dws/dev/dws_04_0640.html new file mode 100644 index 00000000..58399fe5 --- /dev/null +++ b/docs/dws/dev/dws_04_0640.html @@ -0,0 +1,44 @@ + + +

ALL_ALL_TABLES

+

ALL_ALL_TABLES displays the tables or views accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 ALL_ALL_TABLES columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the table or the view

+

table_name

+

name

+

Name of the table or the view

+

tablespace_name

+

name

+

Tablespace where the table or view is located

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0641.html b/docs/dws/dev/dws_04_0641.html new file mode 100644 index 00000000..604d89de --- /dev/null +++ b/docs/dws/dev/dws_04_0641.html @@ -0,0 +1,59 @@ + + +

ALL_CONSTRAINTS

+

ALL_CONSTRAINTS displays information about constraints accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_CONSTRAINTS columns

Name

+

Type

+

Description

+

constraint_name

+

vcharacter varying(64)

+

Constraint name

+

constraint_type

+

text

+

Constraint type

+
  • C: Check constraint.
  • F: Foreign key constraint
  • P: Primary key constraint
  • U: Unique constraint.
+

table_name

+

character varying(64)

+

Name of constraint-related table

+

index_owner

+

character varying(64)

+

Owner of constraint-related index (only for the unique constraint and primary key constraint)

+

index_name

+

character varying(64)

+

Name of constraint-related index (only for the unique constraint and primary key constraint)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0642.html b/docs/dws/dev/dws_04_0642.html new file mode 100644 index 00000000..c16de3c7 --- /dev/null +++ b/docs/dws/dev/dws_04_0642.html @@ -0,0 +1,51 @@ + + +

ALL_CONS_COLUMNS

+

ALL_CONS_COLUMNS displays information about constraint columns accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_CONS_COLUMNS columns

Name

+

Type

+

Description

+

table_name

+

character varying(64)

+

Name of constraint-related table

+

column_name

+

character varying(64)

+

Name of constraint-related column

+

constraint_name

+

character varying(64)

+

Constraint name

+

position

+

smallint

+

Position of the column in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0643.html b/docs/dws/dev/dws_04_0643.html new file mode 100644 index 00000000..b2b841cb --- /dev/null +++ b/docs/dws/dev/dws_04_0643.html @@ -0,0 +1,51 @@ + + +

ALL_COL_COMMENTS

+

ALL_COL_COMMENTS displays the comment information about table columns accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_COL_COMMENTS columns

Name

+

Type

+

Description

+

column_name

+

character varying(64)

+

Column name

+

table_name

+

character varying(64)

+

Table name

+

owner

+

character varying(64)

+

Table owner

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0644.html b/docs/dws/dev/dws_04_0644.html new file mode 100644 index 00000000..86e1434a --- /dev/null +++ b/docs/dws/dev/dws_04_0644.html @@ -0,0 +1,88 @@ + + +

ALL_DEPENDENCIES

+

ALL_DEPENDENCIES displays dependencies between functions and advanced packages accessible to the current user.

+

Currently in GaussDB(DWS), this table is empty without any record due to information constraints.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_DEPENDENCIES columns

Name

+

Type

+

Description

+

owner

+

character varying(30)

+

Owner of the object

+

name

+

character varying(30)

+

Object name

+

type

+

character varying(17)

+

Type of the object

+

referenced_owner

+

character varying(30)

+

Owner of the referenced object

+

referenced_name

+

character varying(64)

+

Name of the referenced object

+

referenced_type

+

character varying(17)

+

Type of the referenced object

+

referenced_link_name

+

character varying(128)

+

Name of the link to the referenced object

+

schemaid

+

numeric

+

ID of the current schema

+

dependency_type

+

character varying(4)

+

Dependency type (REF or HARD)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0645.html b/docs/dws/dev/dws_04_0645.html new file mode 100644 index 00000000..cd952967 --- /dev/null +++ b/docs/dws/dev/dws_04_0645.html @@ -0,0 +1,65 @@ + + +

ALL_IND_COLUMNS

+

ALL_IND_COLUMNS displays all index columns accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_IND_COLUMNS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

name

+

Column name

+

column_position

+

smallint

+

Position of column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0646.html b/docs/dws/dev/dws_04_0646.html new file mode 100644 index 00000000..13c72676 --- /dev/null +++ b/docs/dws/dev/dws_04_0646.html @@ -0,0 +1,65 @@ + + +

ALL_IND_EXPRESSIONS

+

ALL_IND_EXPRESSIONS displays information about the expression indexes accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_IND_EXPRESSIONS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_expression

+

text

+

Function-based index expression of a specified column

+

column_position

+

smallint

+

Position of a column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0647.html b/docs/dws/dev/dws_04_0647.html new file mode 100644 index 00000000..cf1223c6 --- /dev/null +++ b/docs/dws/dev/dws_04_0647.html @@ -0,0 +1,65 @@ + + +

ALL_INDEXES

+

ALL_INDEXES displays information about indexes accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_INDEXES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_name

+

character varying(64)

+

Name of the table corresponding to the index.

+

uniqueness

+

text

+

Whether the index is a unique index

+

generated

+

character varying(1)

+

Whether the index name is generated by the system

+

partitioned

+

character(3)

+

Whether the index has the property of the partition table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0648.html b/docs/dws/dev/dws_04_0648.html new file mode 100644 index 00000000..edc939c6 --- /dev/null +++ b/docs/dws/dev/dws_04_0648.html @@ -0,0 +1,74 @@ + + +

ALL_OBJECTS

+

ALL_OBJECTS displays all database objects accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_OBJECTS columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the object

+

object_name

+

name

+

Object name

+

object_id

+

oid

+

OID of the object

+

object_type

+

name

+

Type of the object

+

namespace

+

oid

+

ID of the namespace where the object resides

+

created

+

timestamp with time zone

+

Object creation time

+

last_ddl_time

+

timestamp with time zone

+

The last time when an object was modified.

+
+
+

For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0649.html b/docs/dws/dev/dws_04_0649.html new file mode 100644 index 00000000..046540ad --- /dev/null +++ b/docs/dws/dev/dws_04_0649.html @@ -0,0 +1,37 @@ + + +

ALL_PROCEDURES

+

ALL_PROCEDURES displays information about all stored procedures or functions accessible to the current user.

+ +
+ + + + + + + + + + + + + +
Table 1 ALL_PROCEDURES columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the object

+

object_name

+

name

+

Object name

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0650.html b/docs/dws/dev/dws_04_0650.html new file mode 100644 index 00000000..05b2fccd --- /dev/null +++ b/docs/dws/dev/dws_04_0650.html @@ -0,0 +1,66 @@ + + +

ALL_SEQUENCES

+

ALL_SEQUENCES displays all sequences accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_SEQUENCES columns

Name

+

Type

+

Description

+

sequence_owner

+

name

+

Owner of the sequence

+

sequence_name

+

name

+

Name of the sequence

+

min_value

+

bigint

+

Minimum value of the sequence

+

max_value

+

bigint

+

Maximum value of the sequence

+

increment_by

+

bigint

+

Value by which the sequence is incremented

+

cycle_flag

+

character(1)

+

Whether the sequence is a cycle sequence. The value can be Y or N.

+
  • Y indicates that it is a cycle sequence.
  • N indicates that it is not a cycle sequence.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0651.html b/docs/dws/dev/dws_04_0651.html new file mode 100644 index 00000000..261ac95a --- /dev/null +++ b/docs/dws/dev/dws_04_0651.html @@ -0,0 +1,51 @@ + + +

ALL_SOURCE

+

ALL_SOURCE displays information about stored procedures or functions accessible to the current user, and provides the columns defined by the stored procedures and functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_SOURCE columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the object

+

name

+

name

+

Object name

+

type

+

name

+

Type of the object

+

text

+

text

+

Definition of the object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0652.html b/docs/dws/dev/dws_04_0652.html new file mode 100644 index 00000000..79dafa56 --- /dev/null +++ b/docs/dws/dev/dws_04_0652.html @@ -0,0 +1,65 @@ + + +

ALL_SYNONYMS

+

ALL_SYNONYMS displays all synonyms accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_SYNONYMS columns

Name

+

Type

+

Description

+

owner

+

text

+

Owner of a synonym.

+

schema_name

+

text

+

Name of the schema to which the synonym belongs.

+

synonym_name

+

text

+

Synonym name.

+

table_owner

+

text

+

Owner of the associated object.

+

table_schema_name

+

text

+

Schema name of the associated object.

+

table_name

+

text

+

Name of the associated object.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0653.html b/docs/dws/dev/dws_04_0653.html new file mode 100644 index 00000000..af5ed6f3 --- /dev/null +++ b/docs/dws/dev/dws_04_0653.html @@ -0,0 +1,100 @@ + + +

ALL_TAB_COLUMNS

+

ALL_TAB_COLUMNS displays description information about columns of the tables accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_TAB_COLUMNS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the table

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

character varying(64)

+

Column name

+

data_type

+

character varying(128)

+

Data type of the column

+

column_id

+

integer

+

Column ID generated when the object is created or column is added

+

data_length

+

integer

+

Length of the column in the unit of bytes

+

avg_col_len

+

numeric

+

Average length of a column in the unit of bytes

+

nullable

+

bpchar

+

Whether the column can be empty. For the primary key constraint and non-null constraint, the value is n.

+

data_precision

+

integer

+

Indicates the precision of the data type. This parameter is valid for the numeric data type and NULL for other types.

+

data_scale

+

integer

+

Number of decimal places. This parameter is valid for the numeric data type. For other data types, the value of this parameter is 0.

+

char_length

+

numeric

+

Column length in the unit of bytes. This parameter is valid only for the varchar, nvarchar2, bpchar, and char types.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0654.html b/docs/dws/dev/dws_04_0654.html new file mode 100644 index 00000000..54a857b2 --- /dev/null +++ b/docs/dws/dev/dws_04_0654.html @@ -0,0 +1,44 @@ + + +

ALL_TAB_COMMENTS

+

ALL_TAB_COMMENTS displays comments about all tables and views accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 ALL_TAB_COMMENTS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the table or the view

+

table_name

+

character varying(64)

+

Name of the table or the view

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0655.html b/docs/dws/dev/dws_04_0655.html new file mode 100644 index 00000000..1a87fca5 --- /dev/null +++ b/docs/dws/dev/dws_04_0655.html @@ -0,0 +1,74 @@ + + +

ALL_TABLES

+

ALL_TABLES displays all the tables accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_TABLES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

tablespace_name

+

character varying(64)

+

Name of the tablespace that contains the table

+

status

+

character varying(8)

+

Whether the current record is valid

+

temporary

+

character(1)

+

Whether the table is a temporary table

+
  • Y indicates that it is a temporary table.
  • N indicates that it is not a temporary table.
+

dropped

+

character varying

+

Whether the current record is deleted

+
  • YES indicates that it is deleted.
  • NO indicates that it is not deleted.
+

num_rows

+

numeric

+

The estimated number of rows in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0656.html b/docs/dws/dev/dws_04_0656.html new file mode 100644 index 00000000..5e32c4f0 --- /dev/null +++ b/docs/dws/dev/dws_04_0656.html @@ -0,0 +1,37 @@ + + +

ALL_USERS

+

ALL_USERS displays all users of the database visible to the current user, however, it does not describe the users.

+ +
+ + + + + + + + + + + + + +
Table 1 ALL_USERS columns

Name

+

Type

+

Description

+

username

+

name

+

User name

+

user_id

+

oid

+

OID of the user

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0657.html b/docs/dws/dev/dws_04_0657.html new file mode 100644 index 00000000..298fb6ee --- /dev/null +++ b/docs/dws/dev/dws_04_0657.html @@ -0,0 +1,51 @@ + + +

ALL_VIEWS

+

ALL_VIEWS displays the description about all views accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 ALL_VIEWS columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the view

+

view_name

+

name

+

Name of the view

+

text_length

+

integer

+

Text length of the view

+

text

+

text

+

Text in the view

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0658.html b/docs/dws/dev/dws_04_0658.html new file mode 100644 index 00000000..9905bf46 --- /dev/null +++ b/docs/dws/dev/dws_04_0658.html @@ -0,0 +1,37 @@ + + +

DBA_DATA_FILES

+

DBA_DATA_FILES displays the description of database files. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + +
Table 1 DBA_DATA_FILES columns

Name

+

Type

+

Description

+

tablespace_name

+

name

+

Name of the tablespace to which the file belongs

+

bytes

+

double precision

+

Length of the file in bytes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0659.html b/docs/dws/dev/dws_04_0659.html new file mode 100644 index 00000000..36c8a297 --- /dev/null +++ b/docs/dws/dev/dws_04_0659.html @@ -0,0 +1,30 @@ + + +

DBA_USERS

+

DBA_USERS displays all user names in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + +
Table 1 DBA_USERS columns

Name

+

Type

+

Description

+

username

+

character varying(64)

+

User name

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0660.html b/docs/dws/dev/dws_04_0660.html new file mode 100644 index 00000000..26315a5e --- /dev/null +++ b/docs/dws/dev/dws_04_0660.html @@ -0,0 +1,51 @@ + + +

DBA_COL_COMMENTS

+

DBA_COL_COMMENTS displays information about table colum comments in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_COL_COMMENTS columns

Name

+

Type

+

Description

+

column_name

+

character varying(64)

+

Column name

+

table_name

+

character varying(64)

+

Table name

+

owner

+

character varying(64)

+

Table owner

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0661.html b/docs/dws/dev/dws_04_0661.html new file mode 100644 index 00000000..90ff069e --- /dev/null +++ b/docs/dws/dev/dws_04_0661.html @@ -0,0 +1,59 @@ + + +

DBA_CONSTRAINTS

+

DBA_CONSTRAINTS displays information about table constraints in database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_CONSTRAINTS columns

Name

+

Type

+

Description

+

constraint_name

+

vcharacter varying(64)

+

Constraint name

+

constraint_type

+

text

+

Constraint type

+
  • C: Check constraint.
  • F: Foreign key constraint
  • P: Primary key constraint
  • U: Unique constraint.
+

table_name

+

character varying(64)

+

Name of constraint-related table

+

index_owner

+

character varying(64)

+

Owner of constraint-related index (only for the unique constraint and primary key constraint)

+

index_name

+

character varying(64)

+

Name of constraint-related index (only for the unique constraint and primary key constraint)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0662.html b/docs/dws/dev/dws_04_0662.html new file mode 100644 index 00000000..32fc582c --- /dev/null +++ b/docs/dws/dev/dws_04_0662.html @@ -0,0 +1,51 @@ + + +

DBA_CONS_COLUMNS

+

DBA_CONS_COLUMNS displays information about constraint columns in database tables. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_CONS_COLUMNS columns

Name

+

Type

+

Description

+

table_name

+

character varying(64)

+

Name of constraint-related table

+

column_name

+

character varying(64)

+

Name of constraint-related column

+

constraint_name

+

character varying(64)

+

Constraint name

+

position

+

smallint

+

Position of the column in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0663.html b/docs/dws/dev/dws_04_0663.html new file mode 100644 index 00000000..6179f011 --- /dev/null +++ b/docs/dws/dev/dws_04_0663.html @@ -0,0 +1,65 @@ + + +

DBA_IND_COLUMNS

+

DBA_IND_COLUMNS displays column information about all indexes in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_IND_COLUMNS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

name

+

Column name

+

column_position

+

smallint

+

Position of column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0664.html b/docs/dws/dev/dws_04_0664.html new file mode 100644 index 00000000..a50d0fa8 --- /dev/null +++ b/docs/dws/dev/dws_04_0664.html @@ -0,0 +1,65 @@ + + +

DBA_IND_EXPRESSIONS

+

DBA_IND_EXPRESSIONS displays the information about expression indexes in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_IND_EXPRESSIONS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_expression

+

text

+

The function-based index expression of a specified column

+

column_position

+

smallint

+

Position of column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0665.html b/docs/dws/dev/dws_04_0665.html new file mode 100644 index 00000000..70588728 --- /dev/null +++ b/docs/dws/dev/dws_04_0665.html @@ -0,0 +1,72 @@ + + +

DBA_IND_PARTITIONS

+

DBA_IND_PARTITIONS displays information about all index partitions in the database. Each index partition of a partitioned table in the database, if present, has a row of records in DBA_IND_PARTITIONS. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_IND_PARTITIONS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Name of the owner of the partitioned index to which the index partition belongs

+

schema

+

character varying(64)

+

Schema of the partitioned index to which the index partition belongs

+

index_name

+

character varying(64)

+

Index name of the partitioned table to which the index partition belongs

+

partition_name

+

character varying(64)

+

Name of the index partition

+

index_partition_usable

+

boolean

+

Whether the index partition is available

+

high_value

+

text

+

Upper boundary of the partition corresponding to the index partition

+

def_tablespace_name

+

name

+

Tablespace name of the index partition

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0666.html b/docs/dws/dev/dws_04_0666.html new file mode 100644 index 00000000..121b8159 --- /dev/null +++ b/docs/dws/dev/dws_04_0666.html @@ -0,0 +1,65 @@ + + +

DBA_INDEXES

+

DBA_INDEXES displays all indexes in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_INDEXES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the index

+

index_name

+

character varying(64)

+

Index name

+

table_name

+

character varying(64)

+

Name of the table corresponding to the index

+

uniqueness

+

text

+

Whether the index is a unique index

+

generated

+

character varying(1)

+

Whether the index name is generated by the system

+

partitioned

+

character(3)

+

Whether the index has the property of the partition table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0667.html b/docs/dws/dev/dws_04_0667.html new file mode 100644 index 00000000..275c5979 --- /dev/null +++ b/docs/dws/dev/dws_04_0667.html @@ -0,0 +1,75 @@ + + +

DBA_OBJECTS

+

DBA_OBJECTS displays all database objects in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_OBJECTS columns

Name

+

Type

+

Description

+

owner

+

name

+

Owner of the object

+

object_name

+

name

+

Object name

+

object_id

+

oid

+

OID of the object

+

object_type

+

name

+

Type of the object

+

namespace

+

oid

+

Namespace containing the object

+

created

+

timestamp with time zone

+

Object creation time

+

last_ddl_time

+

timestamp with time zone

+

The last time when an object was modified.

+
+
+

For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.

+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0668.html b/docs/dws/dev/dws_04_0668.html new file mode 100644 index 00000000..efde4960 --- /dev/null +++ b/docs/dws/dev/dws_04_0668.html @@ -0,0 +1,81 @@ + + +

DBA_PART_INDEXES

+

DBA_PART_INDEXES displays information about all partitioned table indexes in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_PART_INDEXES columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Name of the owner of the partitioned table index

+

schema

+

character varying(64)

+

Schema of the partitioned table index

+

index_name

+

character varying(64)

+

Name of the partitioned table index

+

table_name

+

character varying(64)

+

Name of the partitioned table to which the partitioned table index belongs

+

partitioning_type

+

text

+

Partition policy of the partitioned table

+
NOTE:

Currently, only range partitioning is supported.

+
+

partition_count

+

bigint

+

Number of index partitions of the partitioned table index

+

def_tablespace_name

+

name

+

Tablespace name of the partitioned table index

+

partitioning_key_count

+

integer

+

Number of partition keys of the partitioned table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0669.html b/docs/dws/dev/dws_04_0669.html new file mode 100644 index 00000000..81b09f75 --- /dev/null +++ b/docs/dws/dev/dws_04_0669.html @@ -0,0 +1,74 @@ + + +

DBA_PART_TABLES

+

DBA_PART_TABLES displays information about all partitioned tables in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_PART_TABLES columns

Name

+

Type

+

Description

+

table_owner

+

character varying(64)

+

Name of the owner of the partitioned table

+

schema

+

character varying(64)

+

Schema of the partitioned table

+

table_name

+

character varying(64)

+

Name of the partitioned table

+

partitioning_type

+

text

+

Partition policy of the partitioned table

+
NOTE:

Currently, only range partitioning is supported.

+
+

partition_count

+

bigint

+

Number of partitions of the partitioned table

+

def_tablespace_name

+

name

+

Tablespace name of the partitioned table

+

partitioning_key_count

+

integer

+

Number of partition keys of the partitioned table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0670.html b/docs/dws/dev/dws_04_0670.html new file mode 100644 index 00000000..e3aa17fd --- /dev/null +++ b/docs/dws/dev/dws_04_0670.html @@ -0,0 +1,44 @@ + + +

DBA_PROCEDURES

+

DBA_PROCEDURES displays information about all stored procedures and functions in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 DBA_PROCEDURES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the stored procedure or the function

+

object_name

+

character varying(64)

+

Name of the stored procedure or the function

+

argument_number

+

smallint

+

Number of the input parameters in the stored procedure

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0671.html b/docs/dws/dev/dws_04_0671.html new file mode 100644 index 00000000..4f2b051c --- /dev/null +++ b/docs/dws/dev/dws_04_0671.html @@ -0,0 +1,37 @@ + + +

DBA_SEQUENCES

+

DBA_SEQUENCES displays information about all sequences in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + +
Table 1 DBA_SEQUENCES columns

Name

+

Type

+

Description

+

sequence_owner

+

character varying(64)

+

Owner of the sequence

+

sequence_name

+

character varying(64)

+

Name of the sequence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0672.html b/docs/dws/dev/dws_04_0672.html new file mode 100644 index 00000000..73825ce1 --- /dev/null +++ b/docs/dws/dev/dws_04_0672.html @@ -0,0 +1,44 @@ + + +

DBA_SOURCE

+

DBA_SOURCE displays all stored procedures or functions in the database, and it provides the columns defined by the stored procedures or functions. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 DBA_SOURCE columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the stored procedure or the function

+

name

+

character varying(64)

+

Name of the stored procedure or the function

+

text

+

text

+

Definition of the stored procedure or the function

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0673.html b/docs/dws/dev/dws_04_0673.html new file mode 100644 index 00000000..dc06d88e --- /dev/null +++ b/docs/dws/dev/dws_04_0673.html @@ -0,0 +1,65 @@ + + +

DBA_SYNONYMS

+

DBA_SYNONYMS displays all synonyms in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_SYNONYMS columns

Name

+

Type

+

Description

+

owner

+

text

+

Owner of a synonym.

+

schema_name

+

text

+

Name of the schema to which the synonym belongs.

+

synonym_name

+

text

+

Synonym name.

+

table_owner

+

text

+

Owner of the associated object.

+

table_schema_name

+

text

+

Schema name of the associated object.

+

table_name

+

text

+

Name of the associated object.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0674.html b/docs/dws/dev/dws_04_0674.html new file mode 100644 index 00000000..f64a1278 --- /dev/null +++ b/docs/dws/dev/dws_04_0674.html @@ -0,0 +1,107 @@ + + +

DBA_TAB_COLUMNS

+

DBA_TAB_COLUMNS displays the columns of tables. Each column of a table in the database has a row in DBA_TAB_COLUMNS. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_TAB_COLUMNS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

character varying(64)

+

Column name

+

data_type

+

character varying(128)

+

Data type of the column

+

column_id

+

integer

+

Sequence number of the column when the table is created

+

data_length

+

integer

+

Length of the column in the unit of bytes

+

comments

+

text

+

Comments

+

avg_col_len

+

numeric

+

Average length of a column in the unit of bytes

+

nullable

+

bpchar

+

Whether the column can be empty. For the primary key constraint and non-null constraint, the value is n.

+

data_precision

+

integer

+

Indicates the precision of the data type. This parameter is valid for the numeric data type, however its value is NULL for other types.

+

data_scale

+

integer

+

Number of decimal places. This parameter is valid for the numeric data type. For other data types, the value of this parameter is 0.

+

char_length

+

numeric

+

Column length (in the unit of bytes) which is valid only for varchar, nvarchar2, bpchar, and char types.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0675.html b/docs/dws/dev/dws_04_0675.html new file mode 100644 index 00000000..fc686125 --- /dev/null +++ b/docs/dws/dev/dws_04_0675.html @@ -0,0 +1,44 @@ + + +

DBA_TAB_COMMENTS

+

DBA_TAB_COMMENTS displays comments about all tables and views in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 DBA_TAB_COMMENTS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the table or the view

+

table_name

+

character varying(64)

+

Name of the table or the view

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0676.html b/docs/dws/dev/dws_04_0676.html new file mode 100644 index 00000000..49dca363 --- /dev/null +++ b/docs/dws/dev/dws_04_0676.html @@ -0,0 +1,65 @@ + + +

DBA_TAB_PARTITIONS

+

DBA_TAB_PARTITIONS displays information about all partitions in the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_TAB_PARTITIONS columns

Name

+

Type

+

Description

+

table_owner

+

character varying(64)

+

Owner of the table that contains the partition

+

schema

+

character varying(64)

+

Schema of the partitioned table

+

table_name

+

character varying(64)

+

Table name

+

partition_name

+

character varying(64)

+

Name of the partition

+

high_value

+

text

+

Upper boundary of the range partition and interval partition

+

tablespace_name

+

name

+

Name of the tablespace that contains the partition

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0677.html b/docs/dws/dev/dws_04_0677.html new file mode 100644 index 00000000..4bfeae6e --- /dev/null +++ b/docs/dws/dev/dws_04_0677.html @@ -0,0 +1,74 @@ + + +

DBA_TABLES

+

DBA_TABLES displays all tables in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 DBA_TABLES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

tablespace_name

+

character varying(64)

+

Name of the tablespace that contains the table

+

status

+

character varying(8)

+

Whether the current record is valid

+

temporary

+

character(1)

+

Whether the table is a temporary table

+
  • Y indicates that it is a temporary table.
  • N indicates that it is not a temporary table.
+

dropped

+

character varying

+

Whether the current record is deleted

+
  • YES indicates that it is deleted.
  • NO indicates that it is not deleted.
+

num_rows

+

numeric

+

The estimated number of rows in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0678.html b/docs/dws/dev/dws_04_0678.html new file mode 100644 index 00000000..475d5595 --- /dev/null +++ b/docs/dws/dev/dws_04_0678.html @@ -0,0 +1,30 @@ + + +

DBA_TABLESPACES

+

DBA_TABLESPACES displays information about available tablespaces. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + +
Table 1 DBA_TABLESPACES columns

Name

+

Type

+

Description

+

tablespace_name

+

character varying(64)

+

Name of the tablespace

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0679.html b/docs/dws/dev/dws_04_0679.html new file mode 100644 index 00000000..16a25f67 --- /dev/null +++ b/docs/dws/dev/dws_04_0679.html @@ -0,0 +1,44 @@ + + +

DBA_TRIGGERS

+

DBA_TRIGGERS displays information about triggers in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 DBA_TRIGGERS columns

Name

+

Type

+

Description

+

trigger_name

+

character varying(64)

+

Trigger name

+

table_name

+

character varying(64)

+

Name of the table that defines the trigger

+

table_owner

+

character varying(64)

+

Owner of the table that defines the trigger

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0680.html b/docs/dws/dev/dws_04_0680.html new file mode 100644 index 00000000..22b99286 --- /dev/null +++ b/docs/dws/dev/dws_04_0680.html @@ -0,0 +1,37 @@ + + +

DBA_VIEWS

+

DBA_VIEWS displays views in the database. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + +
Table 1 DBA_VIEWS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the view

+

view_name

+

character varying(64)

+

View name

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0681.html b/docs/dws/dev/dws_04_0681.html new file mode 100644 index 00000000..0bfed48c --- /dev/null +++ b/docs/dws/dev/dws_04_0681.html @@ -0,0 +1,30 @@ + + +

DUAL

+

DUAL is automatically created by the database based on the data dictionary. It has only one text column in only one row for storing expression calculation results. It is accessible to all users.

+ +
+ + + + + + + + + +
Table 1 DUAL columns

Name

+

Type

+

Description

+

dummy

+

text

+

Expression calculation result

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0682.html b/docs/dws/dev/dws_04_0682.html new file mode 100644 index 00000000..3b2fa849 --- /dev/null +++ b/docs/dws/dev/dws_04_0682.html @@ -0,0 +1,12 @@ + + +

GLOBAL_REDO_STAT

+

GLOBAL_REDO_STAT displays the total statistics of XLOG redo operations on all nodes in a cluster. Except the avgiotim column (indicating the average redo write time of all nodes), the names of the other columns in this view are the same as those in the PV_REDO_STAT view. The respective meanings of the other columns are the sum of the values of the same columns in the PV_REDO_STAT view on each node.

+

This view is accessible only to users with system administrator rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0683.html b/docs/dws/dev/dws_04_0683.html new file mode 100644 index 00000000..094fb7d8 --- /dev/null +++ b/docs/dws/dev/dws_04_0683.html @@ -0,0 +1,11 @@ + + +

GLOBAL_REL_IOSTAT

+

GLOBAL_REL_IOSTAT displays the total disk I/O statistics of all nodes in a cluster. The name of each column in this view is the same as that in the GS_REL_IOSTAT view, but the column meaning is the sum of the value of the same column in the GS_REL_IOSTAT view on each node. This view is accessible only to users with system administrator rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0684.html b/docs/dws/dev/dws_04_0684.html new file mode 100644 index 00000000..272ce0ba --- /dev/null +++ b/docs/dws/dev/dws_04_0684.html @@ -0,0 +1,197 @@ + + +

GLOBAL_STAT_DATABASE

+

GLOBAL_STAT_DATABASE displays the status and statistics of databases on all nodes in a cluster.

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GLOBAL_STAT_DATABASE columns

Name

+

Type

+

Description

+

Sum Range

+

datid

+

oid

+

Database OID

+

-

+

datname

+

name

+

Database name

+

-

+

numbackends

+

integer

+

Number of backends currently connected to this database on the current node. This is the only column in this view that reflects the current state value. All columns return the accumulated value since the last reset.

+

CN

+

xact_commit

+

bigint

+

Number of transactions in this database that have been committed on the current node

+

CN

+

xact_rollback

+

bigint

+

Number of transactions in this database that have been rolled back on the current node

+

CN

+

blks_read

+

bigint

+

Number of disk blocks read in this database on the current node

+

DN

+

blks_hit

+

bigint

+

Number of disk blocks found in the buffer cache on the current node, that is, the number of blocks hit in the cache. (This only includes hits in the GaussDB(DWS) buffer cache, not in the file system cache.)

+

DN

+

tup_returned

+

bigint

+

Number of rows returned by queries in this database on the current node

+

DN

+

tup_fetched

+

bigint

+

Number of rows fetched by queries in this database on the current node

+

DN

+

tup_inserted

+

bigint

+

Number of rows inserted in this database on the current node

+

DN

+

tup_updated

+

bigint

+

Number of rows updated in this database on the current node

+

DN

+

tup_deleted

+

bigint

+

Number of rows deleted from this database on the current node

+

DN

+

conflicts

+

bigint

+

Number of queries canceled due to database recovery conflicts on the current node (conflicts occurring only on the standby server). For details, see PG_STAT_DATABASE_CONFLICTS.

+

CN and DN

+

temp_files

+

bigint

+

Number of temporary files created by this database on the current node. All temporary files are counted, regardless of why the temporary file was created (for example, sorting or hashing), and regardless of the log_temp_files setting.

+

DN

+

temp_bytes

+

bigint

+

Size of temporary files written to this database on the current node. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.

+

DN

+

deadlocks

+

bigint

+

Number of deadlocks in this database on the current node

+

CN and DN

+

blk_read_time

+

double precision

+

Time spent reading data file blocks by backends in this database on the current node, in milliseconds

+

DN

+

blk_write_time

+

double precision

+

Time spent writing into data file blocks by backends in this database on the current node, in milliseconds

+

DN

+

stats_reset

+

timestamp with time zone

+

Time when the database statistics are reset on the current node

+

-

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0685.html b/docs/dws/dev/dws_04_0685.html new file mode 100644 index 00000000..5368ebde --- /dev/null +++ b/docs/dws/dev/dws_04_0685.html @@ -0,0 +1,79 @@ + + +

GLOBAL_WORKLOAD_SQL_COUNT

+

GLOBAL_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in all workload Cgroups in a cluster, including the number of SELECT, UPDATE, INSERT, and DELETE statements and the number of DDL, DML, and DCL statements.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GLOBAL_WORKLOAD_SQL_COUNT columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

select_count

+

bigint

+

Number of SELECT statements

+

update_count

+

bigint

+

Number of UPDATE statements

+

insert_count

+

bigint

+

Number of INSERT statements

+

delete_count

+

bigint

+

Number of DELETE statements

+

ddl_count

+

bigint

+

Number of DDL statements

+

dml_count

+

bigint

+

Number of DML statements

+

dcl_count

+

bigint

+

Number of DCL statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0686.html b/docs/dws/dev/dws_04_0686.html new file mode 100644 index 00000000..a948b70b --- /dev/null +++ b/docs/dws/dev/dws_04_0686.html @@ -0,0 +1,142 @@ + + +

GLOBAL_WORKLOAD_SQL_ELAPSE_TIME

+

GLOBAL_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in all workload Cgroups in a cluster, including the maximum, minimum, average, and total response time of SELECT, UPDATE, INSERT, and DELETE statements. The unit is microsecond.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GLOBAL_WORKLOAD_SQL_ELAPSE_TIME columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

total_select_elapse

+

bigint

+

Total response time of SELECT

+

max_select_elapse

+

bigint

+

Maximum response time of SELECT

+

min_select_elapse

+

bigint

+

Minimum response time of SELECT

+

avg_select_elapse

+

bigint

+

Average response time of SELECT

+

total_update_elapse

+

bigint

+

Total response time of UPDATE

+

max_update_elapse

+

bigint

+

Maximum response time of UPDATE

+

min_update_elapse

+

bigint

+

Minimum response time of UPDATE

+

avg_update_elapse

+

bigint

+

Average response time of UPDATE

+

total_insert_elapse

+

bigint

+

Total response time of INSERT

+

max_insert_elapse

+

bigint

+

Maximum response time of INSERT

+

min_insert_elapse

+

bigint

+

Minimum response time of INSERT

+

avg_insert_elapse

+

bigint

+

Average response time of INSERT

+

total_delete_elapse

+

bigint

+

Total response time of DELETE

+

max_delete_elapse

+

bigint

+

Maximum response time of DELETE

+

min_delete_elapse

+

bigint

+

Minimum response time of DELETE

+

avg_delete_elapse

+

bigint

+

Average response time of DELETE

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0687.html b/docs/dws/dev/dws_04_0687.html new file mode 100644 index 00000000..c6359c24 --- /dev/null +++ b/docs/dws/dev/dws_04_0687.html @@ -0,0 +1,72 @@ + + +

GLOBAL_WORKLOAD_TRANSACTION

+

GLOBAL_WORKLOAD_TRANSACTION provides the total transaction information about workload Cgroups on all CNs in the cluster. This view is accessible only to users with system administrator rights. It is valid only when the real-time resource monitoring function is enabled, that is, enable_resource_track is on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GLOBAL_WORKLOAD_TRANSACTION columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

commit_counter

+

bigint

+

Total number of submission times on each CN

+

rollback_counter

+

bigint

+

Total number of rollback times on each CN

+

resp_min

+

bigint

+

Minimum response time of the cluster

+

resp_max

+

bigint

+

Maximum response time of the cluster

+

resp_avg

+

bigint

+

Average response time on each CN

+

resp_total

+

bigint

+

Total response time on each CN

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0688.html b/docs/dws/dev/dws_04_0688.html new file mode 100644 index 00000000..2aaaf173 --- /dev/null +++ b/docs/dws/dev/dws_04_0688.html @@ -0,0 +1,93 @@ + + +

GS_ALL_CONTROL_GROUP_INFO

+

GS_ALL_CONTROL_GROUP_INFO displays all Cgroup information in a database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_ALL_CONTROL_GROUP_INFO columns

Name

+

Type

+

Description

+

name

+

text

+

Name of the Cgroup

+

type

+

text

+

Type of the Cgroup

+

gid

+

bigint

+

Cgroup ID

+

classgid

+

bigint

+

ID of the Class Cgroup to which a Workload belongs

+

class

+

text

+

Class Cgroup

+

workload

+

text

+

Workload Cgroup

+

shares

+

bigint

+

CPU quota allocated to a Cgroup

+

limits

+

bigint

+

Limit of CPUs allocated to a Cgroup

+

wdlevel

+

bigint

+

Workload Cgroup level

+

cpucores

+

text

+

Usage of CPU cores in a Cgroup

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0689.html b/docs/dws/dev/dws_04_0689.html new file mode 100644 index 00000000..9c19e3f4 --- /dev/null +++ b/docs/dws/dev/dws_04_0689.html @@ -0,0 +1,72 @@ + + +

GS_CLUSTER_RESOURCE_INFO

+

GS_CLUSTER_RESOURCE_INFO displays a DN resource summary.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_CLUSTER_RESOURCE_INFO columns

Name

+

Type

+

Description

+

min_mem_util

+

integer

+

Minimum memory usage of a DN

+

max_mem_util

+

integer

+

Maximum memory usage of a DN

+

min_cpu_util

+

integer

+

Minimum CPU usage of a DN

+

max_cpu_util

+

integer

+

Maximum CPU usage of a DN

+

min_io_util

+

integer

+

Minimum I/O usage of a DN

+

max_io_util

+

integer

+

Maximum I/O usage of a DN

+

used_mem_rate

+

integer

+

Maximum physical memory usage

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0690.html b/docs/dws/dev/dws_04_0690.html new file mode 100644 index 00000000..e0a0d4bc --- /dev/null +++ b/docs/dws/dev/dws_04_0690.html @@ -0,0 +1,246 @@ + + +

GS_INSTR_UNIQUE_SQL

+

Unique SQL Definition

The database parses each received SQL text string and generates an internal parsing tree. The database traverses the parsing tree and ignores constant values in the parsing tree. In this case, an integer value is calculated using a certain algorithm. This integer is used as the Unique SQL ID to uniquely identify this type of SQL. SQLs with the same Unique SQL ID are called Unique SQLs.

+
+

Examples

Assume that the user enters the following SQL statements in sequence:

+
select * from t1 where id = 1;
+select * from t1 where id = 2;
+
+

The statistics of the two SQL statements are aggregated to the same Unique SQL statement.

+
select * from t1 where id = ?;
+

GS_INSTR_UNIQUE_SQL View

The GS_INSTR_UNIQUE_SQL view displays the execution information about the Unique SQL statements collected by the current node, including:

+ +

The Unique SQL statistics function has the following restrictions:

+ +

When a common user accesses the GS_INSTR_UNIQUE_SQL view, only the Unique SQL information about the user is displayed. When an administrator accesses the GS_INSTR_UNIQUE_SQL view, all Unique SQL information about the current node is displayed. The GS_INSTR_UNIQUE_SQL view can be queried on both CNs and DNs. The DN displays the Unique SQL statistics of the local node, and the CN displays the complete Unique SQL statistics of the local node. That is, the CN collects the Unique SQL execution information of the CN from other CNs and DNs and displays the information. You can query the GS_INSTR_UNIQUE_SQL view to locate the top SQL statements that consume different resources, providing a basis for cluster performance optimization and maintenance.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_INSTR_UNIQUE_SQL columns

Name

+

Type

+

Description

+

node_name

+

name

+

Name of the CN that receives SQL statements

+

node_id

+

integer

+

Node ID, which is the same as the value of node_id in the pgxc_node table

+

user_name

+

name

+

Username

+

user_id

+

oid

+

User ID

+

unique_sql_id

+

bigint

+

Normalized Unique SQL ID

+

query

+

text

+

Normalized SQL text. The maximum length is equal to the value of the GUC parameter track_activity_query_size.

+

n_calls

+

bigint

+

Number of successful execution times

+

min_elapse_time

+

bigint

+

Minimum running time of the SQL statement in the database (unit: μs)

+

max_elapse_time

+

bigint

+

Maximum running time of SQL statements in the database (unit: μs)

+

total_elapse_time

+

bigint

+

Total running time of SQL statements in the database (unit: μs)

+

n_returned_rows

+

bigint

+

Row activity - Number of rows in the result set returned by the SELECT statement

+

n_tuples_fetched

+

bigint

+

Row activity - Randomly scan rows (column-store tables/foreign tables are not counted.)

+

n_tuples_returned

+

bigint

+

Row activity - Sequential scan rows (Column-store tables/foreign tables are not counted.)

+

n_tuples_inserted

+

bigint

+

Row activity - Inserted rows

+

n_tuples_updated

+

bigint

+

Row activity - Updated rows

+

n_tuples_deleted

+

bigint

+

Row activity - Deleted rows

+

n_blocks_fetched

+

bigint

+

Block access times of the buffer, that is, physical read/I/O

+

n_blocks_hit

+

bigint

+

Block hits of the buffer, that is, logical read/cache

+

n_soft_parse

+

bigint

+

Number of soft parsing times (cache plan)

+

n_hard_parse

+

bigint

+

Number of hard parsing times (generation plan)

+

db_time

+

bigint

+

Valid DB execution time, including the waiting time and network sending time. If multiple threads are involved in query execution, the value of DB_TIME is the sum of DB_TIME of multiple threads (unit: μs).

+

cpu_time

+

bigint

+

CPU execution time, excluding the sleep time (unit: μs)

+

execution_time

+

bigint

+

SQL execution time in the query executor, DDL statements, and statements (such as Copy statements) that are not executed by the executor are not counted (unit: μs).

+

parse_time

+

bigint

+

SQL parsing time (unit: μs)

+

plan_time

+

bigint

+

SQL generation plan time (unit: μs)

+

rewrite_time

+

bigint

+

SQL rewriting time (unit: μs)

+

pl_execution_time

+

bigint

+

Execution time of the plpgsql procedural language function (unit: μs)

+

pl_compilation_time

+

bigint

+

Compilation time of the plpgsql procedural language function (unit: μs)

+

net_send_time

+

bigint

+

Network time, including the time spent by the CN in sending data to the client and the time spent by the DN in sending data to the CN (unit: μs)

+

data_io_time

+

bigint

+

File I/O time (unit: μs)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0691.html b/docs/dws/dev/dws_04_0691.html new file mode 100644 index 00000000..3144bd9b --- /dev/null +++ b/docs/dws/dev/dws_04_0691.html @@ -0,0 +1,51 @@ + + +

GS_REL_IOSTAT

+

GS_REL_IOSTAT displays disk I/O statistics on the current node. In the current version, only one page is read or written in each read or write operation. Therefore, the number of read/write times is the same as the number of pages.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_REL_IOSTAT columns

Name

+

Type

+

Description

+

phyrds

+

bigint

+

Number of disk reads

+

phywrts

+

bigint

+

Number of disk writes

+

phyblkrd

+

bigint

+

Number of read pages

+

phyblkwrt

+

bigint

+

Number of written pages

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0692.html b/docs/dws/dev/dws_04_0692.html new file mode 100644 index 00000000..9a27281b --- /dev/null +++ b/docs/dws/dev/dws_04_0692.html @@ -0,0 +1,11 @@ + + +

GS_NODE_STAT_RESET_TIME

+

The GS_NODE_STAT_RESET_TIME view provides the reset time of statistics on the current node and returns the timestamp with the time zone. For details, see the get_node_stat_reset_time() function in "Functions and Operators > System Administration Functions > Other Functions" in SQL Syntax.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0693.html b/docs/dws/dev/dws_04_0693.html new file mode 100644 index 00000000..9b0494f8 --- /dev/null +++ b/docs/dws/dev/dws_04_0693.html @@ -0,0 +1,86 @@ + + +

GS_SESSION_CPU_STATISTICS

+

GS_SESSION_CPU_STATISTICS displays load management information about CPU usage of ongoing complex jobs executed by the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_SESSION_CPU_STATISTICS columns

Name

+

Type

+

Description

+

datid

+

oid

+

OID of the database this backend is connected to

+

usename

+

name

+

Name of the user logging in to the backend

+

pid

+

bigint

+

ID of a backend process

+

start_time

+

timestamp with time zone

+

Time when the statement starts to be executed

+

min_cpu_time

+

bigint

+

Minimum CPU time of the statement across all DNs. The unit is ms.

+

max_cpu_time

+

bigint

+

Maximum CPU time of the statement across all DNs. The unit is ms.

+

total_cpu_time

+

bigint

+

Total CPU time of the statement across all DNs. The unit is ms.

+

query

+

text

+

Statement that is being executed

+

node_group

+

text

+

Logical cluster of the user running the statement

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0694.html b/docs/dws/dev/dws_04_0694.html new file mode 100644 index 00000000..5c808644 --- /dev/null +++ b/docs/dws/dev/dws_04_0694.html @@ -0,0 +1,89 @@ + + +

GS_SESSION_MEMORY_STATISTICS

+

GS_SESSION_MEMORY_STATISTICS displays load management information about memory usage of ongoing complex jobs executed by the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_SESSION_MEMORY_STATISTICS columns

Name

+

Type

+

Description

+

datid

+

oid

+

OID of the database this backend is connected to

+

usename

+

name

+

Name of the user logging in to the backend

+

pid

+

bigint

+

ID of a backend process

+

start_time

+

timestamp with time zone

+

Time when the statement starts to be executed

+

min_peak_memory

+

integer

+

Minimum memory peak of a statement across all DNs, in MB

+

max_peak_memory

+

integer

+

Maximum memory peak of a statement across all DNs, in MB

+

spill_info

+

text

+

Information about statement flushing into disks on DNs

+

None indicates that the statement has not been flushed to disks on any DNs.

+

All indicates that the statement has been flushed to disks on every DN.

+

[a:b] indicates that the statement has been flushed to disks on a of b DNs.

+

query

+

text

+

Statement that is being executed

+

node_group

+

text

+

Logical cluster of the user running the statement

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0695.html b/docs/dws/dev/dws_04_0695.html new file mode 100644 index 00000000..73e7e5f1 --- /dev/null +++ b/docs/dws/dev/dws_04_0695.html @@ -0,0 +1,209 @@ + + +

GS_SQL_COUNT

+

GS_SQL_COUNT displays statistics about the five types of statements (SELECT, INSERT, UPDATE, DELETE, and MERGE INTO) executed on the current node of the database, including the number of execution times, response time (the maximum, minimum, average, and total response time of the other four types of statements except the MERGE INTO statement, in microseconds), and the number of execution times of DDL, DML, and DCL statements.

+

The classification of DDL, DML, and DCL statements in the GS_SQL_COUNT view is slightly different from that of the SQL syntaxt. The details are as follows:

+ +

The classification of other statements is similar to the definition in the SQL syntax.

+

When a common user queries the GS_SQL_COUNT view, only the statistics of this user in the current node can be viewed. When a user with the administrator permissions queries the GS_SQL_COUNT view, the statistics of all users in the current node can be viewed. When the cluster or the node is restarted, the statistics are cleared and the counting restarts. The counting is based on the number of queries received by the node, including the queries performed inside the cluster. Statistics about the GS_SQL_COUNT view are collected only on CNs, and SQL statements sent from other CNs are not collected. No result is returned when you query the view on a DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_SQL_COUNT columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

user_name

+

name

+

User name

+

select_count

+

bigint

+

Number of SELECT statements

+

update_count

+

bigint

+

Number of UPDATE statements

+

insert_count

+

bigint

+

Number of INSERT statements

+

delete_count

+

bigint

+

Number of DELETE statements

+

mergeinto_count

+

bigint

+

Number of MERGE INTO statements

+

ddl_count

+

bigint

+

Number of DDL statements

+

dml_count

+

bigint

+

Number of DML statements

+

dcl_count

+

bigint

+

Number of DCL statements

+

total_select_elapse

+

bigint

+

Total response time of SELECT statements

+

avg_select_elapse

+

bigint

+

Average response time of SELECT statements

+

max_select_elapse

+

bigint

+

Maximum response time of SELECT statements

+

min_select_elapse

+

bigint

+

Minimum response time of SELECT statements

+

total_update_elapse

+

bigint

+

Total response time of UPDATE statements

+

avg_update_elapse

+

bigint

+

Average response time of UPDATE statements

+

max_update_elapse

+

bigint

+

Maximum response time of UPDATE statements

+

min_update_elapse

+

bigint

+

Minimum response time of UPDATE statements

+

total_delete_elapse

+

bigint

+

Total response time of DELETE statements

+

avg_delete_elapse

+

bigint

+

Average response time of DELETE statements

+

max_delete_elapse

+

bigint

+

Maximum response time of DELETE statements

+

min_delete_elapse

+

bigint

+

Minimum response time of DELETE statements

+

total_insert_elapse

+

bigint

+

Total response time of INSERT statements

+

avg_insert_elapse

+

bigint

+

Average response time of INSERT statements

+

max_insert_elapse

+

bigint

+

Maximum response time of INSERT statements

+

min_insert_elapse

+

bigint

+

Minimum response time of INSERT statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0696.html b/docs/dws/dev/dws_04_0696.html new file mode 100644 index 00000000..81c55458 --- /dev/null +++ b/docs/dws/dev/dws_04_0696.html @@ -0,0 +1,90 @@ + + +

GS_WAIT_EVENTS

+

GS_WAIT_EVENTS displays statistics about waiting status and events on the current node.

+

The values of statistical columns in this view are accumulated only when the enable_track_wait_event GUC parameter is set to on. If enable_track_wait_event is set to off during statistics measurement, the statistics will no longer be accumulated, but the existing values are not affected. If enable_track_wait_event is off, 0 row is returned when this view is queried.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WAIT_EVENTS columns

Name

+

Type

+

Description

+

nodename

+

name

+

Node name

+

type

+

text

+

Event type, which can be STATUS, LOCK_EVENT, LWLOCK_EVENT, or IO_EVENT

+

event

+

text

+

Event name. For details, see PG_THREAD_WAIT_STATUS.

+

wait

+

bigint

+

Number of times an event occurs. This column and all the columns below are values accumulated during process running.

+

failed_wait

+

bigint

+

Number of waiting failures. In the current version, this column is used only for counting timeout errors and waiting failures of locks such as LOCK and LWLOCK.

+

total_wait_time

+

bigint

+

Total duration of the event

+

avg_wait_time

+

bigint

+

Average duration of the event

+

max_wait_time

+

bigint

+

Maximum wait time of the event

+

min_wait_time

+

bigint

+

Minimum wait time of the event

+
+
+

In the current version, for events whose type is LOCK_EVENT, LWLOCK_EVENT, or IO_EVENT, the display scope of GS_WAIT_EVENTS is the same as that of the corresponding events in the PG_THREAD_WAIT_STATUS view.

+

For events whose type is STATUS, GS_WAIT_EVENTS displays the following waiting status columns. For details, see the PG_THREAD_WAIT_STATUS view.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0701.html b/docs/dws/dev/dws_04_0701.html new file mode 100644 index 00000000..355e0b02 --- /dev/null +++ b/docs/dws/dev/dws_04_0701.html @@ -0,0 +1,11 @@ + + +

GS_WLM_OPERAROR_INFO

+

This view displays the execution information about operators in the query statements that have been executed on the current CN. The information comes from the system catalog dbms_om. gs_wlm_operator_info.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0702.html b/docs/dws/dev/dws_04_0702.html new file mode 100644 index 00000000..406f6251 --- /dev/null +++ b/docs/dws/dev/dws_04_0702.html @@ -0,0 +1,12 @@ + + +

GS_WLM_OPERATOR_HISTORY

+

This view displays the records of operators in jobs that have been executed by the current user on the current CN.

+

This view is used by Database Manager to query data from the kernel. Data in the kernel is cleared every 3 minutes.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0703.html b/docs/dws/dev/dws_04_0703.html new file mode 100644 index 00000000..37b20eb6 --- /dev/null +++ b/docs/dws/dev/dws_04_0703.html @@ -0,0 +1,185 @@ + + +

GS_WLM_OPERATOR_STATISTICS

+

GS_WLM_OPERATOR_STATISTICS displays the operators of the jobs that are being executed by the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_OPERATOR_STATISTICS columns

Name

+

Type

+

Description

+

queryid

+

bigint

+

Internal query_id used for statement execution

+

pid

+

bigint

+

ID of the backend thread

+

plan_node_id

+

integer

+

plan_node_id of the execution plan of a query

+

plan_node_name

+

text

+

Name of the operator corresponding to plan_node_id

+

start_time

+

timestamp with time zone

+

Time when an operator starts to process the first data record

+

duration

+

bigint

+

Total execution time of an operator. The unit is ms.

+

status

+

text

+

Execution status of the current operator. Its value can be finished or running.

+

query_dop

+

integer

+

DOP of the current operator

+

estimated_rows

+

bigint

+

Number of rows estimated by the optimizer

+

tuple_processed

+

bigint

+

Number of elements returned by the current operator

+

min_peak_memory

+

integer

+

Minimum peak memory used by the current operator on all DNs. The unit is MB.

+

max_peak_memory

+

integer

+

Maximum peak memory used by the current operator on all DNs. The unit is MB.

+

average_peak_memory

+

integer

+

Average peak memory used by the current operator on all DNs. The unit is MB.

+

memory_skew_percent

+

integer

+

Memory usage skew of the current operator among DNs

+

min_spill_size

+

integer

+

Minimum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

max_spill_size

+

integer

+

Maximum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

average_spill_size

+

integer

+

Average spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

spill_skew_percent

+

integer

+

DN spill skew when a spill occurs

+

min_cpu_time

+

bigint

+

Minimum execution time of the operator on all DNs. The unit is ms.

+

max_cpu_time

+

bigint

+

Maximum execution time of the operator on all DNs. The unit is ms.

+

total_cpu_time

+

bigint

+

Total execution time of the operator on all DNs. The unit is ms.

+

cpu_skew_percent

+

integer

+

Skew of the execution time among DNs.

+

warning

+

text

+

Warning. The following warnings are displayed:

+
  1. Sort/SetOp/HashAgg/HashJoin spill
  2. Spill file size large than 256MB
  3. Broadcast size large than 100MB
  4. Early spill
  5. Spill times is greater than 3
  6. Spill on memory adaptive
  7. Hash table conflict
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0704.html b/docs/dws/dev/dws_04_0704.html new file mode 100644 index 00000000..d8df7453 --- /dev/null +++ b/docs/dws/dev/dws_04_0704.html @@ -0,0 +1,11 @@ + + +

GS_WLM_SESSION_INFO

+

This view displays the execution information about the query statements that have been executed on the current CN. The information comes from the system catalog dbms_om. gs_wlm_session_info.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0705.html b/docs/dws/dev/dws_04_0705.html new file mode 100644 index 00000000..6ab5bcfd --- /dev/null +++ b/docs/dws/dev/dws_04_0705.html @@ -0,0 +1,342 @@ + + +

GS_WLM_SESSION_HISTORY

+

GS_WLM_SESSION_HISTORY displays load management information about a completed job executed by the current user on the current CN. This view is used by Database Manager to query data from GaussDB(DWS). Data in the GaussDB(DWS) is cleared every 3 minutes.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_SESSION_HISTORY columns

Name

+

Type

+

Description

+

datid

+

oid

+

OID of the database this backend is connected to

+

dbname

+

text

+

Name of the database the backend is connected to

+

schemaname

+

text

+

Schema name

+

nodename

+

text

+

Name of the CN where the statement is run

+

username

+

text

+

User name used for connecting to the backend

+

application_name

+

text

+

Name of the application that is connected to the backend

+

client_addr

+

inet

+

IP address of the client connected to this backend. If this column is null, it indicates either that the client is connected via a Unix socket on the server machine or that this is an internal process such as autovacuum.

+

client_hostname

+

text

+

Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will only be non-null for IP connections, and only when log_hostname is enabled.

+

client_port

+

integer

+

TCP port number that the client uses for communication with this backend, or -1 if a Unix socket is used

+

query_band

+

text

+

Job type, which is specified by the query_band parameter. The default value is a null string.

+

block_time

+

bigint

+

Duration that a statement is blocked before being executed, including the statement parsing and optimization duration. The unit is ms.

+

start_time

+

timestamp with time zone

+

Time when the statement starts to be run

+

finish_time

+

timestamp with time zone

+

Time when the statement execution ends

+

duration

+

bigint

+

Execution time of a statement. The unit is ms.

+

estimate_total_time

+

bigint

+

Estimated execution time of a statement. The unit is ms.

+

status

+

text

+

Final statement execution status. Its value can be finished (normal) or aborted (abnormal).

+

abort_info

+

text

+

Exception information displayed if the final statement execution status is aborted.

+

resource_pool

+

text

+

Resource pool used by the user

+

control_group

+

text

+

Cgroup used by the statement

+

min_peak_memory

+

integer

+

Minimum memory peak of a statement across all DNs. The unit is MB.

+

max_peak_memory

+

integer

+

Maximum memory peak of a statement across all DNs. The unit is MB.

+

average_peak_memory

+

integer

+

Average memory usage during statement execution. The unit is MB.

+

memory_skew_percent

+

integer

+

Memory usage skew of a statement among DNs.

+

spill_info

+

text

+

Statement spill information on all DNs.

+

None indicates that the statement has not been flushed to disks on any DNs.

+

All indicates that the statement has been flushed to disks on every DN.

+

[a:b] indicates that the statement has been flushed to disks on a of b DNs.

+

min_spill_size

+

integer

+

Minimum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

max_spill_size

+

integer

+

Maximum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

average_spill_size

+

integer

+

Average spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

spill_skew_percent

+

integer

+

DN spill skew when a spill occurs

+

min_dn_time

+

bigint

+

Minimum execution time of a statement across all DNs. The unit is ms.

+

max_dn_time

+

bigint

+

Maximum execution time of a statement across all DNs. The unit is ms.

+

average_dn_time

+

bigint

+

Average execution time of a statement across all DNs. The unit is ms.

+

dntime_skew_percent

+

integer

+

Execution time skew of a statement among DNs.

+

min_cpu_time

+

bigint

+

Minimum CPU time of a statement across all DNs. The unit is ms.

+

max_cpu_time

+

bigint

+

Maximum CPU time of a statement across all DNs. The unit is ms.

+

total_cpu_time

+

bigint

+

Total CPU time of a statement across all DNs. The unit is ms.

+

cpu_skew_percent

+

integer

+

CPU time skew of a statement among DNs.

+

min_peak_iops

+

integer

+

Minimum IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

max_peak_iops

+

integer

+

Maximum IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

average_peak_iops

+

integer

+

Average IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

iops_skew_percent

+

integer

+

I/O skew across DNs.

+

warning

+

text

+

Warning. The following warnings and warnings related to SQL self-diagnosis tuning are displayed:

+
  1. Spill file size large than 256MB
  2. Broadcast size large than 100MB
  3. Early spill
  4. Spill times is greater than 3
  5. Spill on memory adaptive
  6. Hash table conflict
+

queryid

+

bigint

+

Internal query ID used for statement execution

+

query

+

text

+

Statement executed

+

query_plan

+

text

+

Execution plan of a statement

+

node_group

+

text

+

Logical cluster of the user running the statement

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0706.html b/docs/dws/dev/dws_04_0706.html new file mode 100644 index 00000000..4a7cc58c --- /dev/null +++ b/docs/dws/dev/dws_04_0706.html @@ -0,0 +1,349 @@ + + +

GS_WLM_SESSION_STATISTICS

+

GS_WLM_SESSION_STATISTICS displays load management information about jobs being executed by the current user on the current CN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WLM_SESSION_STATISTICS columns

Name

+

Type

+

Description

+

datid

+

oid

+

OID of the database this backend is connected to

+

dbname

+

name

+

Name of the database the backend is connected to

+

schemaname

+

text

+

Schema name

+

nodename

+

text

+

Name of the CN where the statement is executed

+

username

+

name

+

User name used for connecting to the backend

+

application_name

+

text

+

Name of the application that is connected to the backend

+

client_addr

+

inet

+

IP address of the client connected to this backend. If this column is null, it indicates either that the client is connected via a Unix socket on the server machine or that this is an internal process such as autovacuum.

+

client_hostname

+

text

+

Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will only be non-null for IP connections, and only when log_hostname is enabled.

+

client_port

+

integer

+

TCP port number that the client uses for communication with this backend, or -1 if a Unix socket is used

+

query_band

+

text

+

Job type, which is specified by the GUC parameter query_band parameter. The default value is a null string.

+

pid

+

bigint

+

Process ID of the backend

+

block_time

+

bigint

+

Block time before the statement is executed. The unit is ms.

+

start_time

+

timestamp with time zone

+

Time when the statement starts to be executed

+

duration

+

bigint

+

For how long a statement has been executing. The unit is ms.

+

estimate_total_time

+

bigint

+

Estimated execution time of a statement. The unit is ms.

+

estimate_left_time

+

bigint

+

Estimated remaining time of statement execution. The unit is ms.

+

enqueue

+

text

+

Workload management resource status

+

resource_pool

+

name

+

Resource pool used by the user

+

control_group

+

text

+

Cgroup used by the statement

+

estimate_memory

+

integer

+

Estimated memory used by the statement. The unit is MB.

+

min_peak_memory

+

integer

+

Minimum memory peak of a statement across all DNs. The unit is MB.

+

max_peak_memory

+

integer

+

Maximum memory peak of a statement across all DNs. The unit is MB.

+

average_peak_memory

+

integer

+

Average memory usage during statement execution. The unit is MB.

+

memory_skew_percent

+

integer

+

Memory usage skew of a statement among DNs.

+

spill_info

+

text

+

Statement spill information on all DNs.

+

None indicates that the statement has not been flushed to disks on any DNs.

+

All indicates that the statement has been flushed to disks on every DN.

+

[a:b] indicates that the statement has been flushed to disks on a of b DNs.

+

min_spill_size

+

integer

+

Minimum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

max_spill_size

+

integer

+

Maximum spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

average_spill_size

+

integer

+

Average spilled data among all DNs when a spill occurs. The unit is MB. The default value is 0.

+

spill_skew_percent

+

integer

+

DN spill skew when a spill occurs

+

min_dn_time

+

bigint

+

Minimum execution time of a statement across all DNs. The unit is ms.

+

max_dn_time

+

bigint

+

Maximum execution time of a statement across all DNs. The unit is ms.

+

average_dn_time

+

bigint

+

Average execution time of a statement across all DNs. The unit is ms.

+

dntime_skew_percent

+

bigint

+

Execution time skew of a statement among DNs.

+

min_cpu_time

+

bigint

+

Minimum CPU time of a statement across all DNs. The unit is ms.

+

max_cpu_time

+

bigint

+

Maximum CPU time of a statement across all DNs. The unit is ms.

+

total_cpu_time

+

bigint

+

Total CPU time of a statement across all DNs. The unit is ms.

+

cpu_skew_percent

+

integer

+

CPU time skew of a statement among DNs.

+

min_peak_iops

+

integer

+

Minimum IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

max_peak_iops

+

integer

+

Maximum IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

average_peak_iops

+

integer

+

Average IOPS peak of a statement across all DNs. It is counted by ones in a column-store table and by ten thousands in a row-store table.

+

iops_skew_percent

+

integer

+

I/O skew across DNs.

+

warning

+

text

+

Warning. The following warnings and warnings related to SQL self-diagnosis tuning are displayed:

+
  1. Spill file size large than 256MB
  2. Broadcast size large than 100MB
  3. Early spill
  4. Spill times is greater than 3
  5. Spill on memory adaptive
  6. Hash table conflict
+

queryid

+

bigint

+

Internal query ID used for statement execution

+

query

+

text

+

Statement that is being executed

+

query_plan

+

text

+

Execution plan of a statement

+

node_group

+

text

+

Logical cluster of the user running the statement

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0708.html b/docs/dws/dev/dws_04_0708.html new file mode 100644 index 00000000..30731104 --- /dev/null +++ b/docs/dws/dev/dws_04_0708.html @@ -0,0 +1,11 @@ + + +

GS_WLM_SQL_ALLOW

+

GS_WLM_SQL_ALLOW displays the configured resource management SQL whitelist, including the default SQL whitelist and the SQL whitelist configured using the GUC parameter wlm_sql_allow_list.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0709.html b/docs/dws/dev/dws_04_0709.html new file mode 100644 index 00000000..ccab85d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0709.html @@ -0,0 +1,79 @@ + + +

GS_WORKLOAD_SQL_COUNT

+

GS_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on the current node, including the number of SELECT, UPDATE, INSERT, and DELETE statements and the number of DDL, DML, and DCL statements.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WORKLOAD_SQL_COUNT columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

select_count

+

bigint

+

Number of SELECT statements

+

update_count

+

bigint

+

Number of UPDATE statements

+

insert_count

+

bigint

+

Number of INSERT statements

+

delete_count

+

bigint

+

Number of DELETE statements

+

ddl_count

+

bigint

+

Number of DDL statements

+

dml_count

+

bigint

+

Number of DML statements

+

dcl_count

+

bigint

+

Number of DCL statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0710.html b/docs/dws/dev/dws_04_0710.html new file mode 100644 index 00000000..85692e15 --- /dev/null +++ b/docs/dws/dev/dws_04_0710.html @@ -0,0 +1,142 @@ + + +

GS_WORKLOAD_SQL_ELAPSE_TIME

+

GS_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on the current node, including the maximum, minimum, average, and total response time of SELECT, UPDATE, INSERT, and DELETE statements. The unit is microsecond.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WORKLOAD_SQL_ELAPSE_TIME columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

total_select_elapse

+

bigint

+

Total response time of SELECT statements

+

max_select_elapse

+

bigint

+

Maximum response time of SELECT statements

+

min_select_elapse

+

bigint

+

Minimum response time of SELECT statements

+

avg_select_elapse

+

bigint

+

Average response time of SELECT statements

+

total_update_elapse

+

bigint

+

Total response time of UPDATE statements

+

max_update_elapse

+

bigint

+

Maximum response time of UPDATE statements

+

min_update_elapse

+

bigint

+

Minimum response time of UPDATE statements

+

avg_update_elapse

+

bigint

+

Average response time of UPDATE statements

+

total_insert_elapse

+

bigint

+

Total response time of INSERT statements

+

max_insert_elapse

+

bigint

+

Maximum response time of INSERT statements

+

min_insert_elapse

+

bigint

+

Minimum response time of INSERT statements

+

avg_insert_elapse

+

bigint

+

Average response time of INSERT statements

+

total_delete_elapse

+

bigint

+

Total response time of DELETE statements

+

max_delete_elapse

+

bigint

+

Maximum response time of DELETE statements

+

min_delete_elapse

+

bigint

+

Minimum response time of DELETE statements

+

avg_delete_elapse

+

bigint

+

Average response time of DELETE statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0711.html b/docs/dws/dev/dws_04_0711.html new file mode 100644 index 00000000..37ded879 --- /dev/null +++ b/docs/dws/dev/dws_04_0711.html @@ -0,0 +1,72 @@ + + +

GS_WORKLOAD_TRANSACTION

+

GS_WORKLOAD_TRANSACTION provides transaction information about workload cgroups on a single CN. The database records the number of times that each workload Cgroup commits and rolls back transactions and the response time of transaction commitment and rollback, in microseconds.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_WORKLOAD_TRANSACTION columns

Name

+

Type

+

Description

+

workload

+

name

+

Workload Cgroup name

+

commit_counter

+

bigint

+

Number of the commit times

+

rollback_counter

+

bigint

+

Number of rollbacks

+

resp_min

+

bigint

+

Minimum response time

+

resp_max

+

bigint

+

Maximum response time

+

resp_avg

+

bigint

+

Average response time

+

resp_total

+

bigint

+

Total response time

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0712.html b/docs/dws/dev/dws_04_0712.html new file mode 100644 index 00000000..76f1b107 --- /dev/null +++ b/docs/dws/dev/dws_04_0712.html @@ -0,0 +1,58 @@ + + +

GS_STAT_DB_CU

+

GS_STAT_DB_CU displsys CU hits in a database and in each node in a cluster. You can clear it using gs_stat_reset().

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_STAT_DB_CU columns

Name

+

Type

+

Description

+

node_name1

+

text

+

Node name

+

db_name

+

text

+

Database name

+

mem_hit

+

integer

+

Number of memory hits

+

hdd_sync_read

+

integer

+

Number of hard disk synchronous reads

+

hdd_asyn_read

+

integer

+

Number of hard disk asynchronous reads

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0713.html b/docs/dws/dev/dws_04_0713.html new file mode 100644 index 00000000..3c549906 --- /dev/null +++ b/docs/dws/dev/dws_04_0713.html @@ -0,0 +1,51 @@ + + +

GS_STAT_SESSION_CU

+

GS_STAT_SESSION_CU displays the CU hit rate of running sessions on each node in a cluster. This data about a session is cleared when you exit this session or restart the cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_STAT_SESSION_CU columns

Name

+

Type

+

Description

+

node_name1

+

text

+

Node name

+

mem_hit

+

integer

+

Number of memory hits

+

hdd_sync_read

+

integer

+

Number of hard disk synchronous reads

+

hdd_asyn_read

+

integer

+

Number of hard disk asynchronous reads

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0714.html b/docs/dws/dev/dws_04_0714.html new file mode 100644 index 00000000..24e3db2a --- /dev/null +++ b/docs/dws/dev/dws_04_0714.html @@ -0,0 +1,45 @@ + + +

GS_TOTAL_NODEGROUP_MEMORY_DETAIL

+

GS_TOTAL_NODEGROUP_MEMORY_DETAIL displays statistics about memory usage of the logical cluster that the current database belongs to in the unit of MB.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 GS_TOTAL_NODEGROUP_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

ngname

+

text

+

Name of a logical cluster

+

memorytype

+

text

+

Memory type. Its value can be:

+
  • ng_total_memory: total memory of the logical cluster
  • ng_used_memory: memory usage of the logical cluster
  • ng_estimate_memory: estimated memory usage of the logical cluster
  • ng_foreignrp_memsize: total memory of the external resource pool of the logical cluster
  • ng_foreignrp_memsize: memory usage of the external resource pool of the logical cluster
  • ng_foreignrp_peaksize: peak memory usage of the external resource pool of the logical cluster
  • ng_foreignrp_mempct: percentage of the external resource pool of the logical cluster to the total memory of the logical cluster
  • ng_foreignrp_estmsize: estimated memory usage of the external resource pool of the logical cluster
+

memorymbytes

+

integer

+

Size of allocated memory-typed memory

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0715.html b/docs/dws/dev/dws_04_0715.html new file mode 100644 index 00000000..76a1b80b --- /dev/null +++ b/docs/dws/dev/dws_04_0715.html @@ -0,0 +1,72 @@ + + +

GS_USER_TRANSACTION

+

GS_USER_TRANSACTION provides transaction information about users on a single CN. The database records the number of times that each user commits and rolls back transactions and the response time of transaction commitment and rollback, in microseconds.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_USER_TRANSACTION columns

Name

+

Type

+

Description

+

usename

+

name

+

Username

+

commit_counter

+

bigint

+

Number of the commit times

+

rollback_counter

+

bigint

+

Number of rollbacks

+

resp_min

+

bigint

+

Minimum response time

+

resp_max

+

bigint

+

Maximum response time

+

resp_avg

+

bigint

+

Average response time

+

resp_total

+

bigint

+

Total response time

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0716.html b/docs/dws/dev/dws_04_0716.html new file mode 100644 index 00000000..af96090f --- /dev/null +++ b/docs/dws/dev/dws_04_0716.html @@ -0,0 +1,59 @@ + + +

GS_VIEW_DEPENDENCY

+

GS_VIEW_DEPENDENCY allows you to query the direct dependencies of all views visible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_VIEW_DEPENDENCY columns

Column

+

Type

+

Description

+

objschema

+

name

+

View space name

+

objname

+

name

+

View name

+

refobjschema

+

name

+

Name of the space where the dependent object resides

+

refobjname

+

name

+

Name of a dependent object

+

relobjkind

+

char

+

Type of a dependent object

+
  • r: table
  • v: view
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0717.html b/docs/dws/dev/dws_04_0717.html new file mode 100644 index 00000000..6534f714 --- /dev/null +++ b/docs/dws/dev/dws_04_0717.html @@ -0,0 +1,65 @@ + + +

GS_VIEW_INVALID

+

GS_VIEW_INVALID queries all unavailable views visible to the current user. If the base table, function, or synonym that the view depends on is abnormal, the validtype column of the view is displayed as "invalid".

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_VIEW_INVALID columns

Column

+

Type

+

Description

+

oid

+

oid

+

OID of the view

+

schemaname

+

name

+

View space name

+

viewname

+

name

+

Name of the view

+

viewowner

+

name

+

Owner of the view

+

definition

+

text

+

Definition of the view

+

validtype

+

text

+

View validity flag

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0718.html b/docs/dws/dev/dws_04_0718.html new file mode 100644 index 00000000..146a2b78 --- /dev/null +++ b/docs/dws/dev/dws_04_0718.html @@ -0,0 +1,79 @@ + + +

PG_AVAILABLE_EXTENSION_VERSIONS

+

PG_AVAILABLE_EXTENSION_VERSIONS displays the extension versions of certain database features.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AVAILABLE_EXTENSION_VERSIONS columns

Name

+

Type

+

Description

+

name

+

name

+

Extension name

+

version

+

text

+

Version name

+

installed

+

boolean

+

The value is true if the version of this extension is currently installed.

+

superuser

+

boolean

+

The value is true if only system administrators are allowed to install this extension.

+

relocatable

+

boolean

+

The value is true if an extension can be relocated to another schema.

+

schema

+

name

+

Name of the schema that the extension must be installed into. The value is null if the extension is partially or fully relocatable.

+

requires

+

name[]

+

Names of prerequisite extensions. The value is null if there are no prerequisite extensions.

+

comment

+

text

+

Comment string from the extension's control file

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0719.html b/docs/dws/dev/dws_04_0719.html new file mode 100644 index 00000000..c33fb1d2 --- /dev/null +++ b/docs/dws/dev/dws_04_0719.html @@ -0,0 +1,51 @@ + + +

PG_AVAILABLE_EXTENSIONS

+

PG_AVAILABLE_EXTENSIONS displays the extended information about certain database features.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_AVAILABLE_EXTENSIONS columns

Name

+

Type

+

Description

+

name

+

name

+

Extension name

+

default_version

+

text

+

Name of default version. The value is NULL if none is specified.

+

installed_version

+

text

+

Currently installed version of the extension. The value is NULL if no version is installed.

+

comment

+

text

+

Comment string from the extension's control file

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0720.html b/docs/dws/dev/dws_04_0720.html new file mode 100644 index 00000000..99a3768f --- /dev/null +++ b/docs/dws/dev/dws_04_0720.html @@ -0,0 +1,121 @@ + + +

PG_BULKLOAD_STATISTICS

+

On any normal node in a cluster, PG_BULKLOAD_STATISTICS displays the execution status of the import and export services. Each import or export service corresponds to a record. This view is accessible only to users with system administrators rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_BULKLOAD_STATISTICS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

db_name

+

text

+

Database name

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

tid

+

bigint

+

ID of the current thread

+

lwtid

+

integer

+

Lightweight thread ID

+

session_id

+

bigint

+

GDS session ID

+

direction

+

text

+

Service type. The options are gds to file, gds from file, gds to pipe, gds from pipe, copy from, and copy to.

+

query

+

text

+

Query statement

+

address

+

text

+

Location of the foreign table used for data import and export

+

query_start

+

timestamp with time zone

+

Start time of data import or export

+

total_bytes

+

bigint

+

Total size of data to be processed

+

This parameter is specified only when a GDS common file is to be imported and the record in the row comes from a CN. Otherwise, left this parameter unspecified.

+

phase

+

text

+

Execution phase of the current service import and export. The options are INITIALIZING, TRANSFER_DATA, and RELEASE_RESOURCE.

+

done_lines

+

bigint

+

Number of lines that have been transferred

+

done_bytes

+

bigint

+

Number of bytes that have been transferred

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0721.html b/docs/dws/dev/dws_04_0721.html new file mode 100644 index 00000000..280c24e2 --- /dev/null +++ b/docs/dws/dev/dws_04_0721.html @@ -0,0 +1,86 @@ + + +

PG_COMM_CLIENT_INFO

+

PG_COMM_CLIENT_INFO stores the client connection information of a single node. (You can query this view on a DN to view the information about the connection between the CN and DN.)

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COMM_CLIENT_INFO columns

Name

+

Type

+

Description

+

node_name

+

text

+

Current node name.

+

app

+

text

+

Client application name

+

tid

+

bigint

+

Thread ID of the current thread.

+

lwtid

+

integer

+

Lightweight thread ID of the current thread.

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

socket

+

integer

+

It is displayed if the connection is a physical connection.

+

remote_ip

+

text

+

Peer node IP address.

+

remote_port

+

text

+

Peer node port.

+

logic_id

+

integer

+

If the connection is a logical connection, sid is displayed. If -1 is displayed, the current connection is a physical connection.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0722.html b/docs/dws/dev/dws_04_0722.html new file mode 100644 index 00000000..77511557 --- /dev/null +++ b/docs/dws/dev/dws_04_0722.html @@ -0,0 +1,74 @@ + + +

PG_COMM_DELAY

+

PG_COMM_DELAY displays the communication library delay status for a single DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COMM_DELAY columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

remote_name

+

text

+

Name of the peer node

+

remote_host

+

text

+

IP address of the peer

+

stream_num

+

integer

+

Number of logical stream connections used by the current physical connection

+

min_delay

+

integer

+

Minimum delay of the current physical connection within 1 minute. Its unit is microsecond.

+
NOTE:

A negative result is invalid. Wait until the delay status is updated and query again.

+
+

average

+

integer

+

Average delay of the current physical connection within 1 minute. The unit is microsecond.

+

max_delay

+

integer

+

Maximum delay of the current physical connection within 1 minute. The unit is microsecond.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0723.html b/docs/dws/dev/dws_04_0723.html new file mode 100644 index 00000000..a55ad0aa --- /dev/null +++ b/docs/dws/dev/dws_04_0723.html @@ -0,0 +1,114 @@ + + +

PG_COMM_STATUS

+

PG_COMM_STATUS displays the communication library status for a single DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COMM_STATUS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Specifies the node name.

+

rxpck/s

+

integer

+

Receiving rate of the communication library on a node. The unit is byte/s.

+

txpck/s

+

integer

+

Sending rate of the communication library on a node. The unit is byte/s.

+

rxkB/s

+

bigint

+

Receiving rate of the communication library on a node. The unit is KB/s.

+

txkB/s

+

bigint

+

Sending rate of the communication library on a node. The unit is KB/s.

+

buffer

+

bigint

+

Size of the buffer of the Cmailbox.

+

memKB(libcomm)

+

bigint

+

Communication memory size of the libcomm process, in KB.

+

memKB(libpq)

+

bigint

+

Communication memory size of the libpq process, in KB.

+

%USED(PM)

+

integer

+

Real-time usage of the postmaster thread.

+

%USED (sflow)

+

integer

+

Real-time usage of the gs_sender_flow_controller thread.

+

%USED (rflow)

+

integer

+

Real-time usage of the gs_receiver_flow_controller thread.

+

%USED (rloop)

+

integer

+

Highest real-time usage among multiple gs_receivers_loop threads.

+

stream

+

integer

+

Total number of used logical connections.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0724.html b/docs/dws/dev/dws_04_0724.html new file mode 100644 index 00000000..62d9c1d9 --- /dev/null +++ b/docs/dws/dev/dws_04_0724.html @@ -0,0 +1,143 @@ + + +

PG_COMM_RECV_STREAM

+

PG_COMM_RECV_STREAM displays the receiving stream status of all the communication libraries for a single DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COMM_RECV_STREAM columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

local_tid

+

bigint

+

ID of the thread using this stream

+

remote_name

+

text

+

Name of the peer node

+

remote_tid

+

bigint

+

Peer thread ID

+

idx

+

integer

+

Peer DN ID in the local DN

+

sid

+

integer

+

Stream ID in the physical connection

+

tcp_sock

+

integer

+

TCP socket used in the stream

+

state

+

text

+

Current status of the stream

+
  • UNKNOWN: The logical connection is unknown.
  • READY: The logical connection is ready.
  • RUN: The logical connection receives packets normally.
  • HOLD: The logical connection is waiting to receive packets.
  • CLOSED: The logical connection is closed.
  • TO_CLOSED: The logical connection is to be closed.
+

query_id

+

bigint

+

debug_query_id corresponding to the stream

+

pn_id

+

integer

+

plan_node_id of the query executed by the stream

+

send_smp

+

integer

+

smpid of the sender of the query executed by the stream

+

recv_smp

+

integer

+

smpid of the receiver of the query executed by the stream

+

recv_bytes

+

bigint

+

Total data volume received from the stream. The unit is byte.

+

time

+

bigint

+

Current life cycle service duration of the stream. The unit is ms.

+

speed

+

bigint

+

Average receiving rate of the stream. The unit is byte/s.

+

quota

+

bigint

+

Current communication quota value of the stream. The unit is Byte.

+

buff_usize

+

bigint

+

Current size of the data cache of the stream. The unit is byte.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0725.html b/docs/dws/dev/dws_04_0725.html new file mode 100644 index 00000000..7a171fca --- /dev/null +++ b/docs/dws/dev/dws_04_0725.html @@ -0,0 +1,143 @@ + + +

PG_COMM_SEND_STREAM

+

PG_COMM_SEND_STREAM displays the sending stream status of all the communication libraries for a single DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_COMM_SEND_STREAM columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

local_tid

+

bigint

+

ID of the thread using this stream

+

remote_name

+

text

+

Name of the peer node

+

remote_tid

+

bigint

+

Peer thread ID

+

idx

+

integer

+

Peer DN ID in the local DN

+

sid

+

integer

+

Stream ID in the physical connection

+

tcp_sock

+

integer

+

TCP socket used in the stream

+

state

+

text

+

Current status of the stream

+
  • UNKNOWN: The logical connection is unknown.
  • READY: The logical connection is ready.
  • RUN: The logical connection sends packets normally.
  • HOLD: The logical connection is waiting to send packets.
  • CLOSED: The logical connection is closed.
  • TO_CLOSED: The logical connection is to be closed.
+

query_id

+

bigint

+

debug_query_id corresponding to the stream

+

pn_id

+

integer

+

plan_node_id of the query executed by the stream

+

send_smp

+

integer

+

smpid of the sender of the query executed by the stream

+

recv_smp

+

integer

+

smpid of the receiver of the query executed by the stream

+

send_bytes

+

bigint

+

Total data volume sent by the stream. The unit is Byte.

+

time

+

bigint

+

Current life cycle service duration of the stream. The unit is ms.

+

speed

+

bigint

+

Average sending rate of the stream. The unit is Byte/s.

+

quota

+

bigint

+

Current communication quota value of the stream. The unit is Byte.

+

wait_quota

+

bigint

+

Extra time generated when the stream waits the quota value. The unit is ms.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0726.html b/docs/dws/dev/dws_04_0726.html new file mode 100644 index 00000000..a3163188 --- /dev/null +++ b/docs/dws/dev/dws_04_0726.html @@ -0,0 +1,30 @@ + + +

PG_CONTROL_GROUP_CONFIG

+

PG_CONTROL_GROUP_CONFIG displays the Cgroup configuration information in the system.

+ +
+ + + + + + + + + +
Table 1 PG_CONTROL_GROUP_CONFIG columns

Name

+

Type

+

Description

+

pg_control_group_config

+

text

+

Configuration information of the cgroup

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0727.html b/docs/dws/dev/dws_04_0727.html new file mode 100644 index 00000000..4502b5fb --- /dev/null +++ b/docs/dws/dev/dws_04_0727.html @@ -0,0 +1,65 @@ + + +

PG_CURSORS

+

PG_CURSORS displays the cursors that are currently available.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_CURSORS columns

Name

+

Type

+

Description

+

name

+

text

+

Cursor name

+

statement

+

text

+

Query statement when the cursor is declared to change

+

is_holdable

+

boolean

+

Whether the cursor is holdable (that is, it can be accessed after the transaction that declared the cursor has committed). If it is, its value is true.

+

is_binary

+

boolean

+

Whether the cursor was declared BINARY. If it was, its value is true.

+

is_scrollable

+

boolean

+

Whether the cursor is scrollable (that is, it allows rows to be retrieved in a nonsequential manner). If it is, its value is true.

+

creation_time

+

timestamp with time zone

+

Timestamp at which the cursor is declared

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0728.html b/docs/dws/dev/dws_04_0728.html new file mode 100644 index 00000000..b56ac924 --- /dev/null +++ b/docs/dws/dev/dws_04_0728.html @@ -0,0 +1,137 @@ + + +

PG_EXT_STATS

+

PG_EXT_STATS displays extension statistics stored in the PG_STATISTIC_EXT table. The extension statistics means multiple columns of statistics.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_EXT_STATS columns

Name

+

Type

+

Reference

+

Description

+

schemaname

+

name

+

PG_NAMESPACE.nspname

+

Name of the schema that contains a table

+

tablename

+

name

+

PG_CLASS.relname

+

Name of a table

+

attname

+

int2vector

+

PG_STATISTIC_EXT.stakey

+

Indicates the columns to be combined for collecting statistics.

+

inherited

+

boolean

+

-

+

Includes inherited sub-columns if the value is true; otherwise, indicates the column in a specified table.

+

null_frac

+

real

+

-

+

Percentage of column combinations that are null to all records

+

avg_width

+

integer

+

-

+

Average width of column combinations. The unit is byte.

+

n_distinct

+

real

+

-

+
  • Estimated number of distinct values in a column combination if the value is greater than 0
  • Negative of the number of distinct values divided by the number of rows if the value is less than 0
+

The negated form is used when ANALYZE believes that the number of distinct values is likely to increase as the table grows.

+

The positive form is used when the column seems to have a fixed number of possible values. For example, -1 indicates that the number of distinct values is the same as the number of rows for a column combination.

+
  • The number of distinct values is unknown if the value is 0.
+

n_dndistinct

+

real

+

-

+

Number of unique not-null data values in the dn1 column combination

+
  • Exact number of distinct values if the value is greater than 0
  • Negative of the number of distinct values divided by the number of rows if the value is less than 0 For example, if a value in a column combination appears twice in average, n_dndistinct equals -0.5.
  • The number of distinct values is unknown if the value is 0.
+

most_common_vals

+

anyarray

+

-

+

List of the most common values in a column combination. If this combination does not have the most common values, most_common_vals_null will be NULL. None of the most common values in most_common_vals is NULL.

+

most_common_freqs

+

real[]

+

-

+

List of the frequencies of the most common values, that is, the number of occurrences of each value divided by the total number of rows. (NULL if most_common_vals is NULL)

+

most_common_vals_null

+

anyarray

+

-

+

List of the most common values in a column combination. If this combination does not have the most common values, most_common_vals_null will be NULL. At least one of the common values in most_common_vals_null is NULL.

+

most_common_freqs_null

+

real[]

+

-

+

List of the frequencies of the most common values, that is, the number of occurrences of each value divided by the total number of rows. (NULL if most_common_vals_null is NULL)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0729.html b/docs/dws/dev/dws_04_0729.html new file mode 100644 index 00000000..b4e3214c --- /dev/null +++ b/docs/dws/dev/dws_04_0729.html @@ -0,0 +1,58 @@ + + +

PG_GET_INVALID_BACKENDS

+

PG_GET_INVALID_BACKENDS displays the information about backend threads on the CN that are connected to the current standby DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_GET_INVALID_BACKENDS columns

Name

+

Type

+

Description

+

pid

+

bigint

+

Thread ID

+

node_name

+

text

+

Node information connected to the backend thread

+

dbname

+

name

+

Name of the connected database

+

backend_start

+

timestamp with time zone

+

Backend thread startup time

+

query

+

text

+

Query statement performed by the backend thread

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0730.html b/docs/dws/dev/dws_04_0730.html new file mode 100644 index 00000000..36fff8c3 --- /dev/null +++ b/docs/dws/dev/dws_04_0730.html @@ -0,0 +1,121 @@ + + +

PG_GET_SENDERS_CATCHUP_TIME

+

PG_GET_SENDERS_CATCHUP_TIME displays the catchup information of the currently active primary/standby instance sending thread on a single DN.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_GET_SENDERS_CATCHUP_TIME columns

Name

+

Type

+

Description

+

pid

+

bigint

+

Current sender thread ID

+

lwpid

+

integer

+

Current sender lwpid

+

local_role

+

text

+

Local role

+

peer_role

+

text

+

Peer role

+

state

+

text

+

Current sender's replication status

+

type

+

text

+

Current sender type

+

catchup_start

+

timestamp with time zone

+

Startup time of a catchup task

+

catchup_end

+

timestamp with time zone

+

End time of a catchup task

+

catchup_type

+

text

+

Catchup task type, full or incremental

+

catchup_bcm_filename

+

text

+

BCM file executed by the current catchup task

+

catchup_bcm_finished

+

integer

+

Number of BCM files completed by a catchup task

+

catchup_bcm_total

+

integer

+

Total number of BCM files to be operated by a catchup task

+

catchup_percent

+

text

+

Completion percentage of a catchup task

+

catchup_remaining_time

+

text

+

Estimated remaining time of a catchup task

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0731.html b/docs/dws/dev/dws_04_0731.html new file mode 100644 index 00000000..ce63d261 --- /dev/null +++ b/docs/dws/dev/dws_04_0731.html @@ -0,0 +1,44 @@ + + +

PG_GROUP

+

PG_GROUP displays the database role authentication and the relationship between roles.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_GROUP columns

Name

+

Type

+

Description

+

groname

+

name

+

Group name

+

grosysid

+

oid

+

Group ID

+

grolist

+

oid[]

+

An array, including all the role IDs in this group

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0732.html b/docs/dws/dev/dws_04_0732.html new file mode 100644 index 00000000..3b9e196e --- /dev/null +++ b/docs/dws/dev/dws_04_0732.html @@ -0,0 +1,70 @@ + + +

PG_INDEXES

+

PG_INDEXES displays access to useful information about each index in the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_INDEXES columns

Name

+

Type

+

Reference

+

Description

+

schemaname

+

name

+

PG_NAMESPACE.nspname

+

Name of the schema that contains tables and indexes

+

tablename

+

name

+

PG_CLASS.relname

+

Name of the table for which the index serves

+

indexname

+

name

+

PG_CLASS.relname

+

Index name

+

tablespace

+

name

+

PG_TABLESPACE.spcname

+

Name of the tablespace that contains the index

+

indexdef

+

text

+

-

+

Index definition (a reconstructed CREATE INDEX command)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0733.html b/docs/dws/dev/dws_04_0733.html new file mode 100644 index 00000000..ea436df0 --- /dev/null +++ b/docs/dws/dev/dws_04_0733.html @@ -0,0 +1,146 @@ + + +

PG_JOB

+

The PG_JOB view replaces the PG_JOB system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB system catalog is changed to the PG_JOBS system catalog. For details about PG_JOBS, see PG_JOBS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_JOB columns

Name

+

Type

+

Description

+

job_id

+

bigint

+

Job ID

+

current_postgres_pid

+

bigint

+

If the current job has been executed, the PostgreSQL thread ID of this job is recorded. The default value is -1, indicating that the job has not yet been executed.

+

log_user

+

name

+

User name of the job creator

+

priv_user

+

name

+

User name of the job executor

+

dbname

+

name

+

Name of the database where the job is executed

+

node_name

+

name

+

CN node on which the job will be created and executed

+

job_status

+

text

+

Status of the current job. The value range is r, s, f, or d. The default value is s. The indications are as follows:

+
  • r=running
  • s=successfully finished
  • f=job failed
  • d=disable
+

If a job fails to be executed for 16 consecutive times, job_status is automatically set to d, and no more attempt will be made on this job.

+
NOTE:
  • Note: When you disable a scheduled task (by setting job_queue_processes to 0), the thread monitor the job execution is not started, and the job_status will not be updated. You can ignore the job_status.
  • Only when the scheduled task function is enabled (that is, when job_queue_processes is not 0), the system updates the value of job_status based on the real-time job status.
+
+

start_date

+

timestamp without time zone

+

Start time of the first job execution, precise to millisecond

+

next_run_date

+

timestamp without time zone

+

Scheduled time of the next job execution, accurate to millisecond

+

failure_count

+

smallint

+

Number of times the job has started and failed. If a job fails to be executed for 16 consecutive times, no more attempt will be made on it.

+

interval

+

text

+

Job execution interval

+

last_start_date

+

timestamp without time zone

+

Start time of the last job execution, accurate to millisecond

+

last_end_date

+

timestamp without time zone

+

End time of the last job execution, accurate to millisecond

+

last_suc_date

+

timestamp without time zone

+

Start time of the last successful job execution, accurate to millisecond

+

this_run_date

+

timestamp without time zone

+

Start time of the ongoing job execution, accurate to millisecond

+

nspname

+

name

+

Name of the namespace where a job is running

+

what

+

text

+

Job content

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0734.html b/docs/dws/dev/dws_04_0734.html new file mode 100644 index 00000000..854e3a72 --- /dev/null +++ b/docs/dws/dev/dws_04_0734.html @@ -0,0 +1,37 @@ + + +

PG_JOB_PROC

+

The PG_JOB_PROC view replaces the PG_JOB_PROC system catalog in earlier versions and provides forward compatibility with earlier versions. The original PG_JOB_PROC and PG_JOB system catalogs are merged into the PG_JOBS system catalog in the current version. For details about the PG_JOBS system catalog, see PG_JOBS.

+ +
+ + + + + + + + + + + + + +
Table 1 PG_JOB_PROC columns

Name

+

Type

+

Description

+

job_id

+

bigint

+

Job ID

+

what

+

text

+

Job content

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0735.html b/docs/dws/dev/dws_04_0735.html new file mode 100644 index 00000000..7f426c1d --- /dev/null +++ b/docs/dws/dev/dws_04_0735.html @@ -0,0 +1,146 @@ + + +

PG_JOB_SINGLE

+

PG_JOB_SINGLE displays job information about the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_JOB_SINGLE columns

Name

+

Type

+

Description

+

job_id

+

bigint

+

Job ID

+

current_postgres_pid

+

bigint

+

If the current job has been executed, the PostgreSQL thread ID of this job is recorded. The default value is -1, indicating that the job has not yet been executed.

+

log_user

+

name

+

User name of the job creator

+

priv_user

+

name

+

User name of the job executor

+

dbname

+

name

+

Name of the database where the job is executed

+

node_name

+

name

+

CN node on which the job will be created and executed

+

job_status

+

text

+

Status of the current job. The value range is r, s, f, or d. The default value is s. The indications are as follows:

+
  • r=running
  • s=successfully finished
  • f=job failed
  • d=disable
+

If a job fails to be executed for 16 consecutive times, job_status is automatically set to d, and no more attempt will be made on this job.

+
NOTE:
  • Note: When you disable a scheduled task (by setting job_queue_processes to 0), the thread monitor the job execution is not started, and the job_status will not be updated. You can ignore the job_status.
  • Only when the scheduled task function is enabled (that is, when job_queue_processes is not 0), the system updates the value of job_status based on the real-time job status.
+
+

start_date

+

timestamp without time zone

+

Start time of the first job execution, precise to millisecond

+

next_run_date

+

timestamp without time zone

+

Scheduled time of the next job execution, accurate to millisecond

+

failure_count

+

smallint

+

Number of times the job has started and failed. If a job fails to be executed for 16 consecutive times, no more attempt will be made on it.

+

interval

+

text

+

Job execution interval

+

last_start_date

+

timestamp without time zone

+

Start time of the last job execution, accurate to millisecond

+

last_end_date

+

timestamp without time zone

+

End time of the last job execution, accurate to millisecond

+

last_suc_date

+

timestamp without time zone

+

Start time of the last successful job execution, accurate to millisecond

+

this_run_date

+

timestamp without time zone

+

Start time of the ongoing job execution, accurate to millisecond

+

nspname

+

name

+

Name of the namespace where a job is running

+

what

+

text

+

Job content

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0736.html b/docs/dws/dev/dws_04_0736.html new file mode 100644 index 00000000..c27cb42a --- /dev/null +++ b/docs/dws/dev/dws_04_0736.html @@ -0,0 +1,86 @@ + + +

PG_LIFECYCLE_DATA_DISTRIBUTE

+

PG_LIFECYCLE_DATA_DISTRIBUTE displays the distribution of cold and hot data in a multi-temperature table of OBS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_LIFECYCLE_DATA_DISTRIBUTE columns

Name

+

Type

+

Description

+

schemaname

+

name

+

Schema name

+

tablename

+

name

+

Current table name

+

nodename

+

name

+

Node name

+

hotpartition

+

text

+

Hot partition on the DN

+

coldpartition

+

text

+

Cold partition on the DN

+

switchablepartition

+

text

+

Switchable partition on the DN

+

hotdatasize

+

text

+

Data size of the hot partition on the DN

+

colddatasize

+

text

+

Data size of the cold partition on the DN

+

switchabledatasize

+

text

+

Data size of the switchable partition on the DN

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0737.html b/docs/dws/dev/dws_04_0737.html new file mode 100644 index 00000000..e9a0eac4 --- /dev/null +++ b/docs/dws/dev/dws_04_0737.html @@ -0,0 +1,162 @@ + + +

PG_LOCKS

+

PG_LOCKS displays information about the locks held by open transactions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_LOCKS columns

Name

+

Type

+

Reference

+

Description

+

locktype

+

text

+

-

+

Type of the locked object: relation, extend, page, tuple, transactionid, virtualxid, object, userlock, and advisory

+

database

+

oid

+

PG_DATABASE.oid

+

OID of the database in which the locked target exists

+
  • The OID is 0 if the target is a shared object.
  • The OID is NULL if the locked target is a transaction.
+

relation

+

oid

+

PG_CLASS.oid

+

OID of the relationship targeted by the lock. The value is NULL if the object is neither a relationship nor part of a relationship.

+

page

+

integer

+

-

+

Page number targeted by the lock within the relationship. If the object is neither a relation page nor row page, the value is NULL.

+

tuple

+

smallint

+

-

+

Row number targeted by the lock within the page. If the object is not a row, the value is NULL.

+

virtualxid

+

text

+

-

+

Virtual ID of the transaction targeted by the lock. If the object is not a virtual transaction ID, the value is NULL.

+

transactionid

+

xid

+

-

+

ID of the transaction targeted by the lock. If the object is not a transaction ID, the value is NULL.

+

classid

+

oid

+

PG_CLASS.oid

+

OID of the system table that contains the object. If the object is not a general database object, the value is NULL.

+

objid

+

oid

+

-

+

OID of the lock target within its system table. If the target is not a general database object, the value is NULL.

+

objsubid

+

smallint

+

-

+

Column number for a column in the table. The value is 0 if the target is some other object type. If the object is not a general database object, the value is NULL.

+

virtualtransaction

+

text

+

-

+

Virtual ID of the transaction holding or awaiting this lock

+

pid

+

bigint

+

-

+

Logical ID of the server thread holding or awaiting this lock. This is NULL if the lock is held by a prepared transaction.

+

mode

+

text

+

-

+

Lock mode held or desired by this thread For more information about lock modes, see "LOCK" in GaussDB(DWS) SQL Syntax Reference.

+

+

granted

+

boolean

+

-

+
  • The value is true if the lock is a held lock.
  • The value is false if the lock is an awaited lock.
+

fastpath

+

boolean

+

-

+

Whether the lock is obtained through fast-path (true) or main lock table (false)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0738.html b/docs/dws/dev/dws_04_0738.html new file mode 100644 index 00000000..140d43a8 --- /dev/null +++ b/docs/dws/dev/dws_04_0738.html @@ -0,0 +1,72 @@ + + +

PG_NODE_ENV

+

PG_NODE_ENVO displays the environmental variable information about the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_NODE_ENV columns

Name

+

Type

+

Description

+

node_name

+

text

+

Name of the current node

+

host

+

text

+

Host name of the current node

+

process

+

integer

+

Process ID of the current node

+

port

+

integer

+

Port ID of the current node

+

installpath

+

text

+

Installation directory of current node

+

datapath

+

text

+

Data directory of the current node

+

log_directory

+

text

+

Log directory of the current node

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0739.html b/docs/dws/dev/dws_04_0739.html new file mode 100644 index 00000000..4267ee72 --- /dev/null +++ b/docs/dws/dev/dws_04_0739.html @@ -0,0 +1,58 @@ + + +

PG_OS_THREADS

+

PG_OS_THREADS displays the status information about all the threads under the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_OS_THREADS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Name of the current node

+

pid

+

bigint

+

Thread number running under the current node process

+

lwpid

+

integer

+

Lightweight thread ID corresponding to the PID

+

thread_name

+

text

+

Thread name corresponding to the PID

+

creation_time

+

timestamp with time zone

+

Thread creation time corresponding to the PID

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0740.html b/docs/dws/dev/dws_04_0740.html new file mode 100644 index 00000000..7546f7a8 --- /dev/null +++ b/docs/dws/dev/dws_04_0740.html @@ -0,0 +1,87 @@ + + +

PG_POOLER_STATUS

+

PG_POOLER_STATUS displays the cache connection status in the pooler. PG_POOLER_STATUS can only query on the CN, and displays the connection cache information about the pooler module.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_POOLER_STATUS columns

Name

+

Type

+

Description

+

database

+

text

+

Database name

+

user_name

+

text

+

User name

+

tid

+

bigint

+

ID of a thread connected to the CN

+

node_oid

+

bigint

+

OID of the node connected

+

node_name

+

name

+

Name of the node connected

+

in_use

+

boolean

+

Whether the connection is in use

+
  • t (true): indicates that the connection is in use.
  • f (false): indicates that the connection is not in use.
+

fdsock

+

bigint

+

Peer socket.

+

remote_pid

+

bigint

+

Peer thread ID.

+

session_params

+

text

+

GUC session parameter delivered by the connection.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0741.html b/docs/dws/dev/dws_04_0741.html new file mode 100644 index 00000000..96c85da7 --- /dev/null +++ b/docs/dws/dev/dws_04_0741.html @@ -0,0 +1,59 @@ + + +

PG_PREPARED_STATEMENTS

+

PG_PREPARED_STATEMENTS displays all prepared statements that are available in the current session.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_PREPARED_STATEMENTS columns

Name

+

Type

+

Description

+

name

+

text

+

Identifier of the prepared statement

+

statement

+

text

+

Query string for creating this prepared statement For prepared statements created through SQL, this is the PREPARE statement submitted by the client. For prepared statements created through the frontend/backend protocol, this is the text of the prepared statement itself.

+

prepare_time

+

timestamp with time zone

+

Timestamp when the prepared statement is created

+

parameter_types

+

regtype[]

+

Expected parameter types for the prepared statement in the form of an array of regtype. The OID corresponding to an element of this array can be obtained by casting the regtype value to oid.

+

from_sql

+

boolean

+

How a prepared statement was created

+
  • true: The prepared statement was created through the PREPARE statement.
  • false The statement was prepared through the frontend/backend protocol.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0742.html b/docs/dws/dev/dws_04_0742.html new file mode 100644 index 00000000..17ff4930 --- /dev/null +++ b/docs/dws/dev/dws_04_0742.html @@ -0,0 +1,70 @@ + + +

PG_PREPARED_XACTS

+

PG_PREPARED_XACTS displays information about transactions that are currently prepared for two-phase commit.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_PREPARED_XACTS columns

Name

+

Type

+

Reference

+

Description

+

transaction

+

xid

+

-

+

Numeric transaction identifier of the prepared transaction

+

gid

+

text

+

-

+

Global transaction identifier that was assigned to the transaction

+

prepared

+

timestamp with time zone

+

-

+

Time at which the transaction is prepared for commit

+

owner

+

name

+

PG_AUTHID.rolname

+

Name of the user that executes the transaction

+

database

+

name

+

PG_DATABASE.datname

+

Name of the database in which the transaction is executed

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0743.html b/docs/dws/dev/dws_04_0743.html new file mode 100644 index 00000000..b617c779 --- /dev/null +++ b/docs/dws/dev/dws_04_0743.html @@ -0,0 +1,58 @@ + + +

PG_QUERYBAND_ACTION

+

PG_QUERYBAND_ACTION displays information about the object associated with query_band and the query_band query order.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_QUERYBAND_ACTION columns

Name

+

Type

+

Description

+

qband

+

text

+

query_band key-value pairs

+

respool_id

+

oid

+

OID of the resource pool associated with query_band

+

respool

+

text

+

Name of the resource pool associated with query_band

+

priority

+

text

+

Intra-queue priority associated with query_band

+

qborder

+

integer

+

query_band query order

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0744.html b/docs/dws/dev/dws_04_0744.html new file mode 100644 index 00000000..493b7a39 --- /dev/null +++ b/docs/dws/dev/dws_04_0744.html @@ -0,0 +1,93 @@ + + +

PG_REPLICATION_SLOTS

+

PG_REPLICATION_SLOTS displays the replication node information.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_REPLICATION_SLOTS columns

Name

+

Type

+

Description

+

slot_name

+

text

+

Name of a replication node

+

plugin

+

name

+

Name of the output plug-in of the logical replication slot

+

slot_type

+

text

+

Type of a replication node

+

datoid

+

oid

+

OID of the database on the replication node

+

database

+

name

+

Name of the database on the replication node

+

active

+

boolean

+

Whether the replication node is active

+

xmin

+

xid

+

Transaction ID of the replication node

+

catalog_xmin

+

text

+

ID of the earliest-decoded transaction corresponding to the logical replication slot

+

restart_lsn

+

text

+

Xlog file information on the replication node

+

dummy_standby

+

boolean

+

Whether the replication node is the dummy standby node

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0745.html b/docs/dws/dev/dws_04_0745.html new file mode 100644 index 00000000..166e93c2 --- /dev/null +++ b/docs/dws/dev/dws_04_0745.html @@ -0,0 +1,232 @@ + + +

PG_ROLES

+

PG_ROLES displays information about database roles.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_ROLES columns

Name

+

Type

+

Reference

+

Description

+

rolname

+

name

+

-

+

Role name

+

rolsuper

+

boolean

+

-

+

Whether the role is the initial system administrator with the highest permission

+

rolinherit

+

boolean

+

-

+

Whether the role inherits permissions for this type of roles

+

rolcreaterole

+

boolean

+

-

+

Whether the role can create other roles

+

rolcreatedb

+

boolean

+

-

+

Whether the role can create databases

+

rolcatupdate

+

boolean

+

-

+

Whether the role can update system tables directly. Only the initial system administrator whose usesysid is 10 has this permission. It is not available for other users.

+

rolcanlogin

+

boolean

+

-

+

Whether the role can log in to the database

+

rolreplication

+

boolean

+

-

+

Whether the role can be replicated

+

rolauditadmin

+

boolean

+

-

+

Whether the role is an audit system administrator

+

rolsystemadmin

+

boolean

+

-

+

Whether the role is a system administrator

+

rolconnlimit

+

integer

+

-

+

Sets the maximum number of concurrent connections this role can make if this role can log in. -1 indicates no limit.

+

rolpassword

+

text

+

-

+

Not the password (always reads as ********)

+

rolvalidbegin

+

timestamp with time zone

+

-

+

Account validity start time; null if no start time

+

rolvaliduntil

+

timestamp with time zone

+

-

+

Password expiry time; null if no expiration

+

rolrespool

+

name

+

-

+

Resource pool that a user can use

+

rolparentid

+

oid

+

PG_AUTHID.rolparentid

+

OID of a group user to which the user belongs

+

roltabspace

+

text

+

-

+

The storage space of the user permanent table.

+

roltempspace

+

text

+

-

+

The storage space of the user temporary table.

+

rolspillspace

+

text

+

-

+

The operator disk flushing space of the user.

+

rolconfig

+

text[]

+

-

+

Session defaults for runtime configuration variables

+

oid

+

oid

+

PG_AUTHID.oid

+

ID of the role

+

roluseft

+

boolean

+

PG_AUTHID.roluseft

+

Whether the role can perform operations on foreign tables

+

nodegroup

+

name

+

-

+

Name of the logical cluster associated with the role. If no logical cluster is associated, this column is left empty.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0746.html b/docs/dws/dev/dws_04_0746.html new file mode 100644 index 00000000..e10f5448 --- /dev/null +++ b/docs/dws/dev/dws_04_0746.html @@ -0,0 +1,51 @@ + + +

PG_RULES

+

PG_RULES displays information about rewrite rules.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_RULES columns

Name

+

Type

+

Description

+

schemaname

+

name

+

Name of the schema that contains the table

+

tablename

+

name

+

Name of the table the rule is for

+

rulename

+

name

+

Rule name

+

definition

+

text

+

Rule definition (a reconstructed creation command)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0747.html b/docs/dws/dev/dws_04_0747.html new file mode 100644 index 00000000..9e4b2d96 --- /dev/null +++ b/docs/dws/dev/dws_04_0747.html @@ -0,0 +1,93 @@ + + +

PG_RUNNING_XACTS

+

PG_RUNNING_XACTS displays the running transaction information on the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_RUNNING_XACTS columns

Name

+

Type

+

Description

+

handle

+

integer

+

Handle corresponding to the transaction in GTM

+

gxid

+

xid

+

Transaction ID

+

state

+

tinyint

+

Transaction status (3: prepared; 0: starting)

+

node

+

text

+

Node name

+

xmin

+

xid

+

Minimum transaction ID xmin on the node

+

vacuum

+

boolean

+

Whether the current transaction is lazy vacuum

+

timeline

+

bigint

+

Number of database restarts

+

prepare_xid

+

xid

+

Transaction ID in the prepared status. If the status is not prepared, the value is 0.

+

pid

+

bigint

+

Thread ID corresponding to the transaction

+

next_xid

+

xid

+

Transaction ID sent from a CN to a DN

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0748.html b/docs/dws/dev/dws_04_0748.html new file mode 100644 index 00000000..9cb3414d --- /dev/null +++ b/docs/dws/dev/dws_04_0748.html @@ -0,0 +1,97 @@ + + +

PG_SECLABELS

+

PG_SECLABELS displays information about security labels.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SECLABEL columns

Name

+

Type

+

Reference

+

Description

+

objoid

+

oid

+

Any OID column

+

OID of the object this security label pertains to

+

classoid

+

oid

+

PG_CLASS.oid

+

OID of the system table that contains the object

+

objsubid

+

integer

+

-

+

For a security label on a table column, this is the column number (the objoid and classoid refer to the table itself). For all other object types, this column is 0.

+

objtype

+

text

+

-

+

Type of the object to which this label applies

+

objnamespace

+

oid

+

PG_NAMESPACE.oid

+

OID of the namespace for this object, if applicable; otherwise NULL.

+

objname

+

text

+

-

+

Name of the object to which the label applies

+

provider

+

text

+

PG_SECLABEL.provider

+

Label provider associated with this label

+

label

+

text

+

PG_SECLABEL.label

+

Security label applied to this object

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0749.html b/docs/dws/dev/dws_04_0749.html new file mode 100644 index 00000000..e1b68a5d --- /dev/null +++ b/docs/dws/dev/dws_04_0749.html @@ -0,0 +1,196 @@ + + +

PG_SESSION_WLMSTAT

+

PG_SESSION_WLMSTAT displays the corresponding load management information about the task currently executed by the user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SESSION_WLMSTAT columns

Column

+

Type

+

Description

+

datid

+

oid

+

OID of the database this backend is connected to

+

datname

+

name

+

Name of the database the backend is connected to

+

threadid

+

bigint

+

ID of the backend thread

+

processid

+

integer

+

Thread PID of the backend

+

usesysid

+

oid

+

OID of the user who logged into the backend

+

appname

+

text

+

Name of the application that is connected to the backend

+

usename

+

name

+

Name of the user logged in to the backend

+

priority

+

bigint

+

Priority of Cgroup where the statement is located

+

attribute

+

text

+

Statement attributes

+
  • Ordinary: default attribute of a statement before it is parsed by the database
+
  • Simple: simple statements
  • Complicated: complicated statements
  • Internal: internal statement of the database
+

block_time

+

bigint

+

Pending duration of the statements by now (unit: s)

+

elapsed_time

+

bigint

+

Actual execution duration of the statements by now (unit: s)

+

total_cpu_time

+

bigint

+

Total CPU usage duration of the statement on the DN in the last period (unit: s)

+

cpu_skew_percent

+

integer

+

CPU usage inclination ratio of the statement on the DN in the last period

+

statement_mem

+

integer

+

Estimated memory required for statement execution. This column is reserved.

+

active_points

+

integer

+

Number of concurrently active points occupied by the statement in the resource pool

+

dop_value

+

integer

+

DOP value obtained by the statement from the resource pool

+

control_group

+

text

+

Cgroup currently used by the statement

+

status

+

text

+

Status of a statement, including:

+
  • pending
  • running
  • finished (If enqueue is set to StoredProc or Transaction, this state indicates that only a part of jobs in the statement have been executed. This state persists until the finish of this statement.)
  • aborted: terminated unexpectedly
  • active: normal status except for those above
  • unknown: unknown status
+

enqueue

+

text

+

Current queuing status of the statements, including:

+
  • Global: queuing in the global queue
  • Respool: queuing in the resource pool queue
  • CentralQueue: queuing on the CCN
  • Transaction: being in a transaction block
  • StoredProc: being in a stored procedure
  • None: not in a queue
  • Forced None: being forcibly executed (transaction block statement or stored procedure statement are) because the statement waiting time exceeds the specified value
+

resource_pool

+

name

+

Current resource pool where the statements are located.

+

query

+

text

+

Text of this backend's most recent query If state is active, this column shows the executing query. In all other states, it shows the last query that was executed.

+

isplana

+

bool

+

In logical cluster mode, indicates whether a statement occupies the resources of other logical clusters. The default value is f (does not occupy).

+

node_group

+

text

+

Logical cluster of the user running the statement

+

lane

+

text

+

Fast or slow lane for statement queries.

+
  • fast: fast lane
  • slow: slow lane
  • none: not controlled
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0750.html b/docs/dws/dev/dws_04_0750.html new file mode 100644 index 00000000..2b9dac16 --- /dev/null +++ b/docs/dws/dev/dws_04_0750.html @@ -0,0 +1,87 @@ + + +

PG_SESSION_IOSTAT

+

PG_SESSION_IOSTAT displays the I/O load management information about the task currently executed by the user.

+

IOPS is counted by ones for column storage and by thousands for row storage.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SESSION_IOSTAT columns

Name

+

Type

+

Description

+

query_id

+

bigint

+

Job ID

+

mincurriops

+

integer

+

Minimum I/O of the current job across DNs

+

maxcurriops

+

integer

+

Maximum I/O of the current job across DNs

+

minpeakiops

+

integer

+

Minimum peak I/O of the current job across DNs

+

maxpeakiops

+

integer

+

Maximum peak I/O of the current job across DNs

+

io_limits

+

integer

+

io_limits set for the job

+

io_priority

+

text

+

io_priority set for the job

+

query

+

text

+

Job

+

node_group

+

text

+

Logical cluster of the user running the job

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0751.html b/docs/dws/dev/dws_04_0751.html new file mode 100644 index 00000000..7ee63d89 --- /dev/null +++ b/docs/dws/dev/dws_04_0751.html @@ -0,0 +1,135 @@ + + +

PG_SETTINGS

+

PG_SETTINGS displays information about parameters of the running database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SETTINGS columns

Name

+

Type

+

Description

+

name

+

text

+

Parameter name

+

setting

+

text

+

Current value of the parameter

+

unit

+

text

+

Implicit unit of the parameter

+

category

+

text

+

Logical group of the parameter

+

short_desc

+

text

+

Brief description of the parameter

+

extra_desc

+

text

+

Detailed description of the parameter

+

context

+

text

+

Context of parameter values including internal, backend, superuser, and user

+

vartype

+

text

+

Parameter type. It can be bool, enum, integer, real, or string.

+

source

+

text

+

Method of assigning the parameter value

+

min_val

+

text

+

Minimum value of the parameter. If the parameter type is not numeric data, the value of this column is null.

+

max_val

+

text

+

Maximum value of the parameter. If the parameter type is not numeric data, the value of this column is null.

+

enumvals

+

text[]

+

Valid values of an enum-typed parameter. If the parameter type is not enum, the value of this column is null.

+

boot_val

+

text

+

Default parameter value used upon the database startup

+

reset_val

+

text

+

Default parameter value used upon the database reset

+

sourcefile

+

text

+

Configuration file used to set parameter values. If parameter values are not configured using the configuration file, the value of this column is null.

+

sourceline

+

integer

+

Row number of the configuration file for setting parameter values. If parameter values are not configured using the configuration file, the value of this column is null.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0752.html b/docs/dws/dev/dws_04_0752.html new file mode 100644 index 00000000..37cb0694 --- /dev/null +++ b/docs/dws/dev/dws_04_0752.html @@ -0,0 +1,161 @@ + + +

PG_SHADOW

+

PG_SHADOW displays properties of all roles that are marked as rolcanlogin in PG_AUTHID.

+

The name stems from the fact that this table should not be readable by the public since it contains passwords. PG_USER is a publicly readable view on PG_SHADOW that blanks out the password column.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SHADOW columns

Name

+

Type

+

Reference

+

Description

+

usename

+

name

+

PG_AUTHID.rolname

+

User name

+

usesysid

+

oid

+

PG_AUTHID.oid

+

ID of a user

+

usecreatedb

+

boolean

+

-

+

Indicates that the user can create databases.

+

usesuper

+

boolean

+

-

+

Indicates that the user is an administrator.

+

usecatupd

+

boolean

+

-

+

Indicates that the user can update system catalogs. Even the system administrator cannot do this unless this column is true.

+

userepl

+

boolean

+

-

+

User can initiate streaming replication and put the system in and out of backup mode.

+

passwd

+

text

+

-

+

Password (possibly encrypted); null if none. See PG_AUTHID for details about how encrypted passwords are stored.

+

valbegin

+

timestamp with time zone

+

-

+

Account validity start time; null if no start time

+

valuntil

+

timestamp with time zone

+

-

+

Password expiry time; null if no expiration

+

respool

+

name

+

-

+

Resource pool used by the user

+

parent

+

oid

+

-

+

Parent resource pool

+

spacelimit

+

text

+

-

+

The storage space of the permanent table.

+

tempspacelimit

+

text

+

-

+

The storage space of the temporary table.

+

spillspacelimit

+

text

+

-

+

The operator disk flushing space.

+

useconfig

+

text[ ]

+

-

+

Session defaults for runtime configuration variables

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0753.html b/docs/dws/dev/dws_04_0753.html new file mode 100644 index 00000000..173307d9 --- /dev/null +++ b/docs/dws/dev/dws_04_0753.html @@ -0,0 +1,65 @@ + + +

PG_SHARED_MEMORY_DETAIL

+

PG_SHARED_MEMORY_DETAIL displays usage information about all the shared memory contexts.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_SHARED_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

contextname

+

text

+

Name of the context in the memory

+

level

+

smallint

+

Hierarchy of the memory context

+

parent

+

text

+

Context of the parent memory

+

totalsize

+

bigint

+

Total size of the shared memory, in bytes

+

freesize

+

bigint

+

Remaining size of the shared memory, in bytes

+

usedsize

+

bigint

+

Used size of the shared memory, in bytes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0754.html b/docs/dws/dev/dws_04_0754.html new file mode 100644 index 00000000..5de680df --- /dev/null +++ b/docs/dws/dev/dws_04_0754.html @@ -0,0 +1,163 @@ + + +

PG_STATS

+

PG_STATS displays the single-column statistics stored in the pg_statistic table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATS columns

Name

+

Type

+

Reference

+

Description

+

schemaname

+

name

+

PG_NAMESPACE.nspname

+

Name of the schema that contains the table

+

tablename

+

name

+

PG_CLASS.relname

+

Name of the table

+

attname

+

name

+

PG_ATTRIBUTE.attname

+

Column name

+

inherited

+

boolean

+

-

+

Includes inherited sub-columns if the value is true; otherwise, indicates the column in a specified table.

+

null_frac

+

real

+

-

+

Percentage of column entries that are null

+

avg_width

+

integer

+

-

+

Average width in bytes of column's entries

+

n_distinct

+

real

+

-

+
  • Estimated number of distinct values in the column if the value is greater than 0
  • Negative of the number of distinct values divided by the number of rows if the value is less than 0
+

The negated form is used when ANALYZE believes that the number of distinct values is likely to increase as the table grows.

+

The positive form is used when the column seems to have a fixed number of possible values. For example, -1 indicates a unique column in which the number of distinct values is the same as the number of rows.

+

n_dndistinct

+

real

+

-

+

Number of unique non-null data values in the dn1 column

+
  • Exact number of distinct values if the value is greater than 0
  • Negative of the number of distinct values divided by the number of rows if the value is less than 0 (For example, if the value of a column appears twice in average, set n_dndistinct=-0.5.)
  • The number of distinct values is unknown if the value is 0.
+

most_common_vals

+

anyarray

+

-

+

List of the most common values in a column. If this combination does not have the most common values, it will be NULL.

+

most_common_freqs

+

real[]

+

-

+

List of the frequencies of the most common values, that is, the number of occurrences of each value divided by the total number of rows. (NULL if most_common_vals is NULL)

+

histogram_bounds

+

anyarray

+

-

+

List of values that divide the column's values into groups of equal proportion. The values in most_common_vals, if present, are omitted from this histogram calculation. This field is null if the field data type does not have a < operator or if the most_common_vals list accounts for the entire population.

+

correlation

+

real

+

-

+

Statistical correlation between physical row ordering and logical ordering of the column values. It ranges from -1 to +1. When the value is near to -1 or +1, an index scan on the column is estimated to be cheaper than when it is near to zero, due to reduction of random access to the disk. This column is null if the column data type does not have a < operator.

+

most_common_elems

+

anyarray

+

-

+

Specifies a list of non-null element values most often appearing.

+

most_common_elem_freqs

+

real[]

+

-

+

Specifies a list of the frequencies of the most common element values.

+

elem_count_histogram

+

real[]

+

-

+

Specifies a histogram of the counts of distinct non-null element values.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0755.html b/docs/dws/dev/dws_04_0755.html new file mode 100644 index 00000000..3610206e --- /dev/null +++ b/docs/dws/dev/dws_04_0755.html @@ -0,0 +1,177 @@ + + +

PG_STAT_ACTIVITY

+

PG_STAT_ACTIVITY displays information about the current user's queries.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_ACTIVITY columns

Name

+

Type

+

Description

+

datid

+

oid

+

OID of the database that the user session connects to in the backend

+

datname

+

name

+

Name of the database that the user session connects to in the backend

+

pid

+

bigint

+

Process ID of the backend

+

usesysid

+

oid

+

OID of the user logging in to the backend

+

usename

+

name

+

OID of the user logging in to the backend

+

application_name

+

text

+

Name of the application connected to the backend

+

client_addr

+

inet

+

IP address of the client connected to the backend If this column is null, it indicates either that the client is connected via a Unix socket on the server machine or that this is an internal process such as autovacuum.

+

client_hostname

+

text

+

Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will only be non-null for IP connections, and only when log_hostname is enabled.

+

client_port

+

integer

+

TCP port number that the client uses for communication with this backend, or -1 if a Unix socket is used

+

backend_start

+

timestamp with time zone

+

Startup time of the backend process, that is, the time when the client connects to the server.

+

xact_start

+

timestamp with time zone

+

Time when the current transaction was started, or NULL if no transaction is active. If the current query is the first of its transaction, this column is equal to the query_start column.

+

query_start

+

timestamp with time zone

+

Time when the currently active query was started, or if state is not active, when the last query was started

+

state_change

+

timestamp with time zone

+

Time for the last status change

+

waiting

+

boolean

+

Whether the backend is currently waiting on a lock. If it is, its value is true.

+

enqueue

+

text

+

Queuing status of a statement. Its value can be:

+
  • waiting in queue: The statement is in the queue.
  • waiting in global queue: The statement is in the global queue.
  • waiting in respool queue: The statement is in the resource pool queue.
  • waiting in ccn queue: The job is in the CCN queue.
  • Empty: The statement is running.
+

state

+

text

+

Current overall state of this backend. Its value can be:

+
  • active: The backend is executing queries.
  • idle: The backend is waiting for new client commands.
  • idle in transaction: The backend is in a transaction, but there is no statement being executed in the transaction.
  • idle in transaction (aborted): The backend is in a transaction, but there are statements failed in the transaction.
  • fastpath function call: The backend is executing a fast-path function.
  • disabled: This state is reported if track_activities is disabled in this backend.
+
NOTE:

Common users can view only their own session status. The state information of other accounts is empty. For example, after user judy is connected to the database, the state information of user joe and the initial user omm in pg_stat_activity is empty.

+
SELECT datname, usename, usesysid, state,pid FROM pg_stat_activity;
+
 datname  | usename | usesysid | state  |       pid
+----------+---------+----------+--------+-----------------
+ gaussdb |  dbadmin    |       10 |        | 139968752121616
+ gaussdb | dbadmin     |       10 |        | 139968903116560
+ db_tpcds | judy    |    16398 | active | 139968391403280
+ gaussdb | dbadmin     |       10 |        | 139968643069712
+ gaussdb | dbadmin     |       10 |        | 139968680818448
+ gaussdb | joe     |    16390 |        | 139968563377936
+(6 rows)
+
+

resource_pool

+

name

+

Resource pool used by the user

+

query_id

+

bigint

+

ID of a query

+

query

+

text

+

Text of this backend's most recent query If state is active, this column shows the executing query. In all other states, it shows the last query that was executed.

+

connection_info

+

text

+

A string in JSON format recording the driver type, driver version, driver deployment path, and process owner of the connected database (for details, see connection_info)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0757.html b/docs/dws/dev/dws_04_0757.html new file mode 100644 index 00000000..715570ff --- /dev/null +++ b/docs/dws/dev/dws_04_0757.html @@ -0,0 +1,80 @@ + + +

PG_STAT_ALL_INDEXES

+

PG_STAT_ALL_INDEXES displays access informaton about all indexes in the database, with information about each index displayed in a row.

+

Indexes can be used via either simple index scans or "bitmap" index scans. In a bitmap scan the output of several indexes can be combined via AND or OR rules, so it is difficult to associate individual heap row fetches with specific indexes when a bitmap scan is used. Therefore, a bitmap scan increments the pg_stat_all_indexes.idx_tup_read count(s) for the index(es) it uses, and it increments the pg_stat_all_tables.idx_tup_fetch count for the table, but it does not affect pg_stat_all_indexes.idx_tup_fetch.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_ALL_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

OID of the table for this index

+

indexrelid

+

oid

+

OID of this index

+

schemaname

+

name

+

Name of the schema this index is in

+

relname

+

name

+

Name of the table for this index

+

indexrelname

+

name

+

Name of this index

+

idx_scan

+

bigint

+

Number of index scans initiated on this index

+

idx_tup_read

+

bigint

+

Number of index entries returned by scans on this index

+

idx_tup_fetch

+

bigint

+

Number of live table rows fetched by simple index scans using this index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0758.html b/docs/dws/dev/dws_04_0758.html new file mode 100644 index 00000000..72862ed1 --- /dev/null +++ b/docs/dws/dev/dws_04_0758.html @@ -0,0 +1,177 @@ + + +

PG_STAT_ALL_TABLES

+

PG_STAT_ALL_TABLES displays access information about all rows in all tables (including TOAST tables) in the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_ALL_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Name of the table

+

seq_scan

+

bigint

+

Number of sequential scans started on the table

+

seq_tup_read

+

bigint

+

Number of rows that have live data fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans

+

idx_tup_fetch

+

bigint

+

Number of rows that have live data fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows updated by HOT (no separate index update is required)

+

n_live_tup

+

bigint

+

Estimated number of live rows

+

n_dead_tup

+

bigint

+

Estimated number of dead rows

+

last_vacuum

+

timestamp with time zone

+

Last time at which this table was manually vacuumed (excluding VACUUM FULL)

+

last_autovacuum

+

timestamp with time zone

+

Last time at which this table was automatically vacuumed

+

last_analyze

+

timestamp with time zone

+

Last time at which this table was analyzed

+

last_autoanalyze

+

timestamp with time zone

+

Last time at which this table was automatically vacuumed

+

vacuum_count

+

bigint

+

Number of vacuum operations (excluding VACUUM FULL)

+

autovacuum_count

+

bigint

+

Number of autovacuum operations

+

analyze_count

+

bigint

+

Number of analyze operations

+

autoanalyze_count

+

bigint

+

Number of autoanalyze operations

+

last_data_changed

+

timestamp with time zone

+

Last time at which this table was updated (by INSERT/UPDATE/DELETE or EXCHANGE/TRUNCATE/DROP partition). This column is recorded only on the local CN.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0759.html b/docs/dws/dev/dws_04_0759.html new file mode 100644 index 00000000..4d4bb639 --- /dev/null +++ b/docs/dws/dev/dws_04_0759.html @@ -0,0 +1,79 @@ + + +

PG_STAT_BAD_BLOCK

+

PG_STAT_BAD_BLOCK displays statistics about page or CU verification failures after a node is started.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_BAD_BLOCK columns

Name

+

Type

+

Description

+

nodename

+

text

+

Node name

+

databaseid

+

integer

+

Database OID

+

tablespaceid

+

integer

+

Tablespace OID

+

relfilenode

+

integer

+

File object ID

+

forknum

+

integer

+

File type

+

error_count

+

integer

+

Number of verification failures

+

first_time

+

timestamp with time zone

+

Time of the first occurrence

+

last_time

+

timestamp with time zone

+

Time of the latest occurrence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0760.html b/docs/dws/dev/dws_04_0760.html new file mode 100644 index 00000000..c925b1ae --- /dev/null +++ b/docs/dws/dev/dws_04_0760.html @@ -0,0 +1,100 @@ + + +

PG_STAT_BGWRITER

+

PG_STAT_BGWRITER displays statistics about the background writer process's activity.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_BGWRITER columns

Name

+

Type

+

Description

+

checkpoints_timed

+

bigint

+

Number of scheduled checkpoints that have been performed

+

checkpoints_req

+

bigint

+

Number of requested checkpoints that have been performed

+

checkpoint_write_time

+

double precision

+

Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds

+

checkpoint_sync_time

+

double precision

+

Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds

+

buffers_checkpoint

+

bigint

+

Number of buffers written during checkpoints

+

buffers_clean

+

bigint

+

Number of buffers written by the background writer

+

maxwritten_clean

+

bigint

+

Number of times the background writer stopped a cleaning scan because it had written too many buffers

+

buffers_backend

+

bigint

+

Number of buffers written directly by a backend

+

buffers_backend_fsync

+

bigint

+

Number of times that a backend has to execute fsync

+

buffers_alloc

+

bigint

+

Number of buffers allocated

+

stats_reset

+

timestamp with time zone

+

Time at which these statistics were reset

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0761.html b/docs/dws/dev/dws_04_0761.html new file mode 100644 index 00000000..d48a5f5a --- /dev/null +++ b/docs/dws/dev/dws_04_0761.html @@ -0,0 +1,156 @@ + + +

PG_STAT_DATABASE

+

PG_STAT_DATABASE displays the status and statistics of each database on the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_DATABASE columns

Name

+

Type

+

Description

+

datid

+

oid

+

Database OID

+

datname

+

name

+

Database name

+

numbackends

+

integer

+

Number of backends currently connected to this database on the current node. This is the only column in this view that reflects the current state value. All columns return the accumulated value since the last reset.

+

xact_commit

+

bigint

+

Number of transactions in this database that have been committed on the current node

+

xact_rollback

+

bigint

+

Number of transactions in this database that have been rolled back on the current node

+

blks_read

+

bigint

+

Number of disk blocks read in this database on the current node

+

blks_hit

+

bigint

+

Number of disk blocks found in the buffer cache on the current node, that is, the number of blocks hit in the cache. (This only includes hits in the GaussDB(DWS) buffer cache, not in the file system cache.)

+

tup_returned

+

bigint

+

Number of rows returned by queries in this database on the current node

+

tup_fetched

+

bigint

+

Number of rows fetched by queries in this database on the current node

+

tup_inserted

+

bigint

+

Number of rows inserted in this database on the current node

+

tup_updated

+

bigint

+

Number of rows updated in this database on the current node

+

tup_deleted

+

bigint

+

Number of rows deleted from this database on the current node

+

conflicts

+

bigint

+

Number of queries canceled due to database recovery conflicts on the current node (conflicts occurring only on the standby server). For details, see PG_STAT_DATABASE_CONFLICTS.

+

temp_files

+

bigint

+

Number of temporary files created by this database on the current node. All temporary files are counted, regardless of why the temporary file was created (for example, sorting or hashing), and regardless of the log_temp_files setting.

+

temp_bytes

+

bigint

+

Size of temporary files written to this database on the current node. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting.

+

deadlocks

+

bigint

+

Number of deadlocks in this database on the current node

+

blk_read_time

+

double precision

+

Time spent reading data file blocks by backends in this database on the current node, in milliseconds

+

blk_write_time

+

double precision

+

Time spent writing into data file blocks by backends in this database on the current node, in milliseconds

+

stats_reset

+

timestamp with time zone

+

Time when the database statistics are reset on the current node

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0762.html b/docs/dws/dev/dws_04_0762.html new file mode 100644 index 00000000..d7fe28ff --- /dev/null +++ b/docs/dws/dev/dws_04_0762.html @@ -0,0 +1,72 @@ + + +

PG_STAT_DATABASE_CONFLICTS

+

PG_STAT_DATABASE_CONFLICTS displays statistics about database conflicts.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_DATABASE_CONFLICTS columns

Name

+

Type

+

Description

+

datid

+

oid

+

Database OID

+

datname

+

name

+

Database name

+

confl_tablespace

+

bigint

+

Number of conflicting tablespaces

+

confl_lock

+

bigint

+

Number of conflicting locks

+

confl_snapshot

+

bigint

+

Number conflicting snapshots

+

confl_bufferpin

+

bigint

+

Number of conflicting buffers

+

confl_deadlock

+

bigint

+

Number of conflicting deadlocks

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0763.html b/docs/dws/dev/dws_04_0763.html new file mode 100644 index 00000000..c0fc4df2 --- /dev/null +++ b/docs/dws/dev/dws_04_0763.html @@ -0,0 +1,80 @@ + + +

PG_STAT_GET_MEM_MBYTES_RESERVED

+

PG_STAT_GET_MEM_MBYTES_RESERVED displays the current activity information of a thread stored in memory. You need to specify the thread ID (pid in PG_STAT_ACTIVITY) for query. If the thread ID is set to 0, the current thread ID is used. For example:

+
1
SELECT pg_stat_get_mem_mbytes_reserved(0);
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_GET_MEM_MBYTES_RESERVED columns

Parameter

+

Description

+

ConnectInfo

+

Connection information

+

ParctlManager

+

Concurrency management information

+

GeneralParams

+

Basic parameter information

+

GeneralParams RPDATA

+

Basic resource pool information

+

ExceptionManager

+

Exception management information

+

CollectInfo

+

Collection information

+

GeneralInfo

+

Basic information

+

ParctlState

+

Concurrency status information

+

CPU INFO

+

CPU information

+

ControlGroup

+

Cgroup information

+

IOSTATE

+

I/O status information

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0764.html b/docs/dws/dev/dws_04_0764.html new file mode 100644 index 00000000..5a1cab2d --- /dev/null +++ b/docs/dws/dev/dws_04_0764.html @@ -0,0 +1,65 @@ + + +

PG_STAT_USER_FUNCTIONS

+

PG_STAT_USER_FUNCTIONS displays user-defined function status information in the namespace. (The language of the function is non-internal language.)

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_USER_FUNCTIONS columns

Name

+

Type

+

Description

+

funcid

+

oid

+

Function OID

+

schemaname

+

name

+

Schema name

+

funcname

+

name

+

Function name

+

calls

+

bigint

+

Number of times this function has been called

+

total_time

+

double precision

+

Total time spent in this function and all other functions called by it

+

self_time

+

double precision

+

Total time spent in this function itself, excluding other functions called by it

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0765.html b/docs/dws/dev/dws_04_0765.html new file mode 100644 index 00000000..f260adf2 --- /dev/null +++ b/docs/dws/dev/dws_04_0765.html @@ -0,0 +1,79 @@ + + +

PG_STAT_USER_INDEXES

+

PG_STAT_USER_INDEXES displays information about the index status of user-defined ordinary tables and TOAST tables.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_USER_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID for the index

+

indexrelid

+

oid

+

Index OID

+

schemaname

+

name

+

Schema name for the index

+

relname

+

name

+

Table name for the index

+

indexrelname

+

name

+

Index name

+

idx_scan

+

bigint

+

Number of index scans

+

idx_tup_read

+

bigint

+

Number of index entries returned by scans on this index

+

idx_tup_fetch

+

bigint

+

Number of rows that have live data fetched by index scans

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0766.html b/docs/dws/dev/dws_04_0766.html new file mode 100644 index 00000000..fd41abd3 --- /dev/null +++ b/docs/dws/dev/dws_04_0766.html @@ -0,0 +1,170 @@ + + +

PG_STAT_USER_TABLES

+

PG_STAT_USER_TABLES displays status information about user-defined ordinary tables and TOAST tables in all namespaces.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_USER_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

seq_scan

+

bigint

+

Number of sequential scans started on the table. Data in this column is valid only for non-system catalogs on the local CN.

+

seq_tup_read

+

bigint

+

Number of rows that have live data fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans

+

idx_tup_fetch

+

bigint

+

Number of rows that have live data fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows updated by HOT (no separate index update is required)

+

n_live_tup

+

bigint

+

Estimated number of live rows

+

n_dead_tup

+

bigint

+

Estimated number of dead rows

+

last_vacuum

+

timestamp with time zone

+

Last time at which this table was manually vacuumed (excluding VACUUM FULL)

+

last_autovacuum

+

timestamp with time zone

+

Last time at which this table was automatically vacuumed

+

last_analyze

+

timestamp with time zone

+

Last time at which this table was analyzed

+

last_autoanalyze

+

timestamp with time zone

+

Last time at which this table was automatically analyzed

+

vacuum_count

+

bigint

+

Number of vacuum operations (excluding VACUUM FULL)

+

autovacuum_count

+

bigint

+

Number of autovacuum operations

+

analyze_count

+

bigint

+

Number of analyze operations

+

autoanalyze_count

+

bigint

+

Number of autoanalyze operations

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0767.html b/docs/dws/dev/dws_04_0767.html new file mode 100644 index 00000000..cd93900d --- /dev/null +++ b/docs/dws/dev/dws_04_0767.html @@ -0,0 +1,128 @@ + + +

PG_STAT_REPLICATION

+

PG_STAT_REPLICATION displays information about log synchronization status, such as the locations of the sender sending logs and the receiver receiving logs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_REPLICATION columns

Name

+

Type

+

Description

+

pid

+

bigint

+

PID of the thread

+

usesysid

+

oid

+

User system ID

+

usename

+

name

+

Username

+

application_name

+

text

+

Application name

+

client_addr

+

inet

+

Client address

+

client_hostname

+

text

+

Client name

+

client_port

+

integer

+

Client port number

+

backend_start

+

timestamp with time zone

+

Start time of the program

+

state

+

text

+

Log replication state (catch-up or consistent streaming)

+

sender_sent_location

+

text

+

Location where the sender sends logs

+

receiver_write_location

+

text

+

Location where the receiver writes logs

+

receiver_flush_location

+

text

+

Location where the receiver flushes logs

+

receiver_replay_location

+

text

+

Location where the receiver replays logs

+

sync_priority

+

integer

+

Priority of synchronous duplication (0 indicates asynchronization)

+

sync_state

+

text

+

Synchronization state (asynchronous duplication, synchronous duplication, or potential synchronization)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0768.html b/docs/dws/dev/dws_04_0768.html new file mode 100644 index 00000000..09a5d43b --- /dev/null +++ b/docs/dws/dev/dws_04_0768.html @@ -0,0 +1,79 @@ + + +

PG_STAT_SYS_INDEXES

+

PG_STAT_SYS_INDEXES displays the index status information about all the system catalogs in the pg_catalog and information_schema schemas.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_SYS_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID for the index

+

indexrelid

+

oid

+

Index OID

+

schemaname

+

name

+

Schema name for the index

+

relname

+

name

+

Table name for the index

+

indexrelname

+

name

+

Index name

+

idx_scan

+

bigint

+

Number of index scans

+

idx_tup_read

+

bigint

+

Number of index entries returned by scans on this index

+

idx_tup_fetch

+

bigint

+

Number of rows that have live data fetched by index scans

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0769.html b/docs/dws/dev/dws_04_0769.html new file mode 100644 index 00000000..3fbe8b3e --- /dev/null +++ b/docs/dws/dev/dws_04_0769.html @@ -0,0 +1,170 @@ + + +

PG_STAT_SYS_TABLES

+

PG_STAT_SYS_TABLES displays the statistics about the system catalogs of all the namespaces in pg_catalog and information_schema schemas.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_SYS_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

seq_scan

+

bigint

+

Number of sequential scans started on the table

+

seq_tup_read

+

bigint

+

Number of rows that have live data fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans

+

idx_tup_fetch

+

bigint

+

Number of rows that have live data fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows HOT updated (that is, with no separate index update required)

+

n_live_tup

+

bigint

+

Estimated number of live rows

+

n_dead_tup

+

bigint

+

Estimated number of dead rows

+

last_vacuum

+

timestamp with time zone

+

Last time at which this table was manually vacuumed (excluding VACUUM FULL)

+

last_autovacuum

+

timestamp with time zone

+

Last time at which this table was automatically vacuumed

+

last_analyze

+

timestamp with time zone

+

Last time at which this table was analyzed

+

last_autoanalyze

+

timestamp with time zone

+

Last time at which this table was automatically analyzed

+

vacuum_count

+

bigint

+

Number of vacuum operations (excluding VACUUM FULL)

+

autovacuum_count

+

bigint

+

Number of autovacuum operations

+

analyze_count

+

bigint

+

Number of analyze operations

+

autoanalyze_count

+

bigint

+

Number of autoanalyze operations

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0770.html b/docs/dws/dev/dws_04_0770.html new file mode 100644 index 00000000..8b0aff26 --- /dev/null +++ b/docs/dws/dev/dws_04_0770.html @@ -0,0 +1,100 @@ + + +

PG_STAT_XACT_ALL_TABLES

+

PG_STAT_XACT_ALL_TABLES displays the transaction status information about all ordinary tables and TOAST tables in the namespaces.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_XACT_ALL_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

seq_scan

+

bigint

+

Number of sequential scans started on the table

+

seq_tup_read

+

bigint

+

Number of live rows fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans started on the table

+

idx_tup_fetch

+

bigint

+

Number of live rows fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows HOT updated (that is, with no separate index update required)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0771.html b/docs/dws/dev/dws_04_0771.html new file mode 100644 index 00000000..8b18ae84 --- /dev/null +++ b/docs/dws/dev/dws_04_0771.html @@ -0,0 +1,100 @@ + + +

PG_STAT_XACT_SYS_TABLES

+

PG_STAT_XACT_SYS_TABLES displays the transaction status information of the system catalog in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_XACT_SYS_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

seq_scan

+

bigint

+

Number of sequential scans started on the table

+

seq_tup_read

+

bigint

+

Number of live rows fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans started on the table

+

idx_tup_fetch

+

bigint

+

Number of live rows fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows HOT updated (that is, with no separate index update required)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0772.html b/docs/dws/dev/dws_04_0772.html new file mode 100644 index 00000000..88409a11 --- /dev/null +++ b/docs/dws/dev/dws_04_0772.html @@ -0,0 +1,65 @@ + + +

PG_STAT_XACT_USER_FUNCTIONS

+

PG_STAT_XACT_USER_FUNCTIONS displays statistics about function executions, with statistics about each execution displayed in a row.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_XACT_USER_FUNCTIONS columns

Name

+

Type

+

Description

+

funcid

+

oid

+

Function OID

+

schemaname

+

name

+

Schema name

+

funcname

+

name

+

Function name

+

calls

+

bigint

+

Number of times this function has been called

+

total_time

+

double precision

+

Total time spent in this function and all other functions called by it

+

self_time

+

double precision

+

Total time spent in this function itself, excluding other functions called by it

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0773.html b/docs/dws/dev/dws_04_0773.html new file mode 100644 index 00000000..5c66362f --- /dev/null +++ b/docs/dws/dev/dws_04_0773.html @@ -0,0 +1,100 @@ + + +

PG_STAT_XACT_USER_TABLES

+

PG_STAT_XACT_USER_TABLES displays the transaction status information of the user table in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STAT_XACT_USER_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

seq_scan

+

bigint

+

Number of sequential scans started on the table

+

seq_tup_read

+

bigint

+

Number of live rows fetched by sequential scans

+

idx_scan

+

bigint

+

Number of index scans started on the table

+

idx_tup_fetch

+

bigint

+

Number of live rows fetched by index scans

+

n_tup_ins

+

bigint

+

Number of rows inserted

+

n_tup_upd

+

bigint

+

Number of rows updated

+

n_tup_del

+

bigint

+

Number of rows deleted

+

n_tup_hot_upd

+

bigint

+

Number of rows HOT updated (that is, with no separate index update required)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0774.html b/docs/dws/dev/dws_04_0774.html new file mode 100644 index 00000000..a88cb864 --- /dev/null +++ b/docs/dws/dev/dws_04_0774.html @@ -0,0 +1,72 @@ + + +

PG_STATIO_ALL_INDEXES

+

PG_STATIO_ALL_INDEXES contains each row of each index in the current database, showing I/O statistics about accesses to that specific index.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_ALL_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID for the index

+

indexrelid

+

oid

+

Index OID

+

schemaname

+

name

+

Schema name for the index

+

relname

+

name

+

Table name for the index

+

indexrelname

+

name

+

Index name

+

idx_blks_read

+

bigint

+

Number of disk blocks read from this index

+

idx_blks_hit

+

bigint

+

Number of buffer hits in this index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0775.html b/docs/dws/dev/dws_04_0775.html new file mode 100644 index 00000000..4c892a52 --- /dev/null +++ b/docs/dws/dev/dws_04_0775.html @@ -0,0 +1,58 @@ + + +

PG_STATIO_ALL_SEQUENCES

+

PG_STATIO_ALL_SEQUENCES contains each row of each sequence in the current database, showing I/O statistics about accesses to that specific sequence.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_ALL_SEQUENCES columns

Name

+

Type

+

Description

+

relid

+

oid

+

OID of this sequence

+

schemaname

+

name

+

Name of the schema this sequence is in

+

relname

+

name

+

Name of the sequence

+

blks_read

+

bigint

+

Number of disk blocks read from this sequence

+

blks_hit

+

bigint

+

Number of buffer hits in this sequence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0776.html b/docs/dws/dev/dws_04_0776.html new file mode 100644 index 00000000..07079c6c --- /dev/null +++ b/docs/dws/dev/dws_04_0776.html @@ -0,0 +1,100 @@ + + +

PG_STATIO_ALL_TABLES

+

PG_STATIO_ALL_TABLES contains one row for each table in the current database (including TOAST tables), showing I/O statistics about accesses to that specific table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_ALL_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

heap_blks_read

+

bigint

+

Number of disk blocks read from this table

+

heap_blks_hit

+

bigint

+

Number of buffer hits in this table

+

idx_blks_read

+

bigint

+

Number of disk blocks read from the index in this table

+

idx_blks_hit

+

bigint

+

Number of buffer hits in all indexes on this table

+

toast_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table (if any) in this table

+

toast_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table (if any) in this table

+

tidx_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table index (if any) in this table

+

tidx_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table index (if any) in this table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0777.html b/docs/dws/dev/dws_04_0777.html new file mode 100644 index 00000000..082f6593 --- /dev/null +++ b/docs/dws/dev/dws_04_0777.html @@ -0,0 +1,72 @@ + + +

PG_STATIO_SYS_INDEXES

+

PG_STATIO_SYS_INDEXES displays the I/O status information about all system catalog indexes in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_SYS_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID for the index

+

indexrelid

+

oid

+

Index OID

+

schemaname

+

name

+

Schema name for the index

+

relname

+

name

+

Table name for the index

+

indexrelname

+

name

+

Index name

+

idx_blks_read

+

bigint

+

Number of disk blocks read from this index

+

idx_blks_hit

+

bigint

+

Number of buffer hits in this index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0778.html b/docs/dws/dev/dws_04_0778.html new file mode 100644 index 00000000..3655a95c --- /dev/null +++ b/docs/dws/dev/dws_04_0778.html @@ -0,0 +1,58 @@ + + +

PG_STATIO_SYS_SEQUENCES

+

PG_STATIO_SYS_SEQUENCES displays the I/O status information about all the system sequences in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_SYS_SEQUENCES columns

Name

+

Type

+

Description

+

relid

+

oid

+

OID of this sequence

+

schemaname

+

name

+

Name of the schema this sequence is in

+

relname

+

name

+

Name of the sequence

+

blks_read

+

bigint

+

Number of disk blocks read from this sequence

+

blks_hit

+

bigint

+

Number of buffer hits in this sequence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0779.html b/docs/dws/dev/dws_04_0779.html new file mode 100644 index 00000000..b9806a51 --- /dev/null +++ b/docs/dws/dev/dws_04_0779.html @@ -0,0 +1,100 @@ + + +

PG_STATIO_SYS_TABLES

+

PG_STATIO_SYS_TABLES displays the I/O status information about all the system catalogs in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_SYS_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

heap_blks_read

+

bigint

+

Number of disk blocks read from this table

+

heap_blks_hit

+

bigint

+

Number of buffer hits in this table

+

idx_blks_read

+

bigint

+

Number of disk blocks read from all indexes in this table

+

idx_blks_hit

+

bigint

+

Number of buffer hits in all indexes on this table

+

toast_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table (if any) in this table

+

toast_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table (if any) in this table

+

tidx_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table index (if any) in this table

+

tidx_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table index (if any) in this table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0780.html b/docs/dws/dev/dws_04_0780.html new file mode 100644 index 00000000..b9fba8d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0780.html @@ -0,0 +1,73 @@ + + +

PG_STATIO_USER_INDEXES

+

PG_STATIO_USER_INDEXES displays the I/O status information about all the user relationship table indexes in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_USER_INDEXES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID for the index

+

indexrelid

+

oid

+

Index OID

+

schemaname

+

name

+

Schema name for the index

+

relname

+

name

+

Table name for the index

+

indexrelname

+

name

+

Index name

+

idx_blks_read

+

bigint

+

Number of disk blocks read from this index

+

idx_blks_hit

+

bigint

+

Number of buffer hits in this index

+
+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0781.html b/docs/dws/dev/dws_04_0781.html new file mode 100644 index 00000000..1f90087b --- /dev/null +++ b/docs/dws/dev/dws_04_0781.html @@ -0,0 +1,58 @@ + + +

PG_STATIO_USER_SEQUENCES

+

PG_STATIO_USER_SEQUENCES displays the I/O status information about all the user relation table sequences in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_USER_SEQUENCES columns

Name

+

Type

+

Description

+

relid

+

oid

+

OID of this sequence

+

schemaname

+

name

+

Name of the schema this sequence is in

+

relname

+

name

+

Name of this sequence

+

blks_read

+

bigint

+

Number of disk blocks read from this sequence

+

blks_hit

+

bigint

+

Number of cache hits in this sequence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0782.html b/docs/dws/dev/dws_04_0782.html new file mode 100644 index 00000000..be7843c0 --- /dev/null +++ b/docs/dws/dev/dws_04_0782.html @@ -0,0 +1,100 @@ + + +

PG_STATIO_USER_TABLES

+

PG_STATIO_USER_TABLES displays the I/O status information about all the user relation tables in the namespace.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_STATIO_USER_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

schemaname

+

name

+

Schema name of the table

+

relname

+

name

+

Table name

+

heap_blks_read

+

bigint

+

Number of disk blocks read from this table

+

heap_blks_hit

+

bigint

+

Number of buffer hits in this table

+

idx_blks_read

+

bigint

+

Number of disk blocks read from the index in this table

+

idx_blks_hit

+

bigint

+

Number of buffer hits in all indexes on this table

+

toast_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table (if any) in this table

+

toast_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table (if any) in this table

+

tidx_blks_read

+

bigint

+

Number of disk blocks read from the TOAST table index (if any) in this table

+

tidx_blks_hit

+

bigint

+

Number of buffer hits in the TOAST table index (if any) in this table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0783.html b/docs/dws/dev/dws_04_0783.html new file mode 100644 index 00000000..9cf31bf5 --- /dev/null +++ b/docs/dws/dev/dws_04_0783.html @@ -0,0 +1,1074 @@ + + +

PG_THREAD_WAIT_STATUS

+

PG_THREAD_WAIT_STATUS allows you to test the block waiting status about the backend thread and auxiliary thread of the current instance.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_THREAD_WAIT_STATUS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Current node name

+

db_name

+

text

+

Database name

+

thread_name

+

text

+

Thread name

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

tid

+

bigint

+

Thread ID of the current thread

+

lwtid

+

integer

+

Lightweight thread ID of the current thread

+

ptid

+

integer

+

Parent thread of the streaming thread

+

tlevel

+

integer

+

Level of the streaming thread

+

smpid

+

integer

+

Concurrent thread ID

+

wait_status

+

text

+

Waiting status of the current thread. For details about the waiting status, see Table 2.

+

wait_event

+

text

+

If wait_status is acquire lock, acquire lwlock, or wait io, this column describes the lock, lightweight lock, and I/O information, respectively. If wait_status is not any of the three values, this column is empty.

+
+
+

The waiting statuses in the wait_status column are as follows:

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Waiting status list

Value

+

Description

+

none

+

Waiting for no event

+

acquire lock

+

Waiting for locking until the locking succeeds or times out

+

acquire lwlock

+

Waiting for a lightweight lock

+

wait io

+

Waiting for I/O completion

+

wait cmd

+

Waiting for network communication packet read to complete

+

wait pooler get conn

+

Waiting for pooler to obtain the connection

+

wait pooler abort conn

+

Waiting for pooler to terminate the connection

+

wait pooler clean conn

+

Waiting for pooler to clear connections

+

pooler create conn: [nodename], total N

+

Waiting for the pooler to set up a connection. The connection is being established with the node specified by nodename, and there are N connections waiting to be set up.

+

get conn

+

Obtaining the connection to other nodes

+

set cmd: [nodename]

+

Waiting for running the SET, RESET, TRANSACTION BLOCK LEVEL PARA SET, or SESSION LEVEL PARA SET statement on the connection. The statement is being executed on the node specified by nodename.

+

cancel query

+

Canceling the SQL statement that is being executed through the connection

+

stop query

+

Stopping the query that is being executed through the connection

+

wait node: [nodename](plevel), total N, [phase]

+

Waiting for receiving the data from a connected node. The thread is waiting for the data from the plevel thread of the node specified by nodename. The data of N connections is waiting to be returned. If phase is included, the possible phases are as follows:

+
  • begin: The transaction is being started.
  • commit: The transaction is being committed.
  • rollback: The transaction is being rolled back.
+

wait transaction sync: xid

+

Waiting for synchronizing the transaction specified by xid

+

wait wal sync

+

Waiting for the completion of wal log of synchronization from the specified LSN to the standby instance

+

wait data sync

+

Waiting for the completion of data page synchronization to the standby instance

+

wait data sync queue

+

Waiting for putting the data pages that are in the row storage or the CU in the column storage into the synchronization queue

+

flush data: [nodename](plevel), [phase]

+

Waiting for sending data to the plevel thread of the node specified by nodename. If phase is included, the possible phase is wait quota, indicating that the current communication flow is waiting for the quota value.

+

stream get conn: [nodename], total N

+

Waiting for connecting to the consumer object of the node specified by nodename when the stream flow is initialized. There are N consumers waiting to be connected.

+

wait producer ready: [nodename](plevel), total N

+

Waiting for each producer to be ready when the stream flow is initialized. The thread is waiting for the procedure of the plevel thread on the nodename node to be ready. There are N producers waiting to be ready.

+

synchronize quit

+

Waiting for the threads in the stream thread group to quit when the steam plan ends

+

nodegroup destroy

+

Waiting for destroying the stream node group when the steam plan ends

+

wait active statement

+

Waiting for job execution under resource and load control.

+

wait global queue

+

Waiting for job execution. The job is queuing in the global queue.

+

wait respool queue

+

Waiting for job execution. The job is queuing in the resource pool.

+

wait ccn queue

+

Waiting for job execution. The job is queuing on the central coordinator node (CCN).

+

gtm connect

+

Waiting for connecting to GTM.

+

gtm get gxid

+

Wait for obtaining xids from GTM.

+

gtm get snapshot

+

Wait for obtaining transaction snapshots from GTM.

+

gtm begin trans

+

Waiting for GTM to start a transaction.

+

gtm commit trans

+

Waiting for GTM to commit a transaction.

+

gtm rollback trans

+

Waiting for GTM to roll back a transaction.

+

gtm create sequence

+

Waiting for GTM to create a sequence.

+

gtm alter sequence

+

Waiting for GTM to modify a sequence.

+

gtm get sequence val

+

Waiting for obtaining the next value of a sequence from GTM.

+

gtm set sequence val

+

Waiting for GTM to set a sequence value.

+

gtm drop sequence

+

Waiting for GTM to delete a sequence.

+

gtm rename sequece

+

Waiting for GTM to rename a sequence.

+

analyze: [relname], [phase]

+

The thread is doing ANALYZE to the relname table. If phase is included, the possible phase is autovacuum, indicating that the database automatically enables the AutoVacuum thread to execute ANALYZE.

+

vacuum: [relname], [phase]

+

The thread is doing VACUUM to the relname table. If phase is included, the possible phase is autovacuum, indicating that the database automatically enables the AutoVacuum thread to execute VACUUM.

+

vacuum full: [relname]

+

The thread is doing VACUUM FULL to the relname table.

+

create index

+

An index is being created.

+

HashJoin - [ build hash | write file ]

+

The HashJoin operator is being executed. In this phase, you need to pay attention to the execution time-consuming.

+
  • build hash: The HashJoin operator is creating a hash table.
  • write file: The HashJoin operator is writing data to disks.
+

HashAgg - [ build hash | write file ]

+

The HashAgg operator is being executed. In this phase, you need to pay attention to the execution time-consuming.

+
  • build hash: The HashAgg operator is creating a hash table.
  • write file: The HashAgg operator is writing data to disks.
+

HashSetop - [build hash | write file ]

+

The HashSetop operator is being executed. In this phase, you need to pay attention to the execution time-consuming.

+
  • build hash: The HashSetop operator is creating a hash table.
  • write file: The HashSetop operator is writing data to disks.
+

Sort | Sort - write file

+

The Sort operator is being executed. write file indicates that the Sort operator is writing data to disks.

+

Material | Material - write file

+

The Material operator is being executed. write file indicates that the Material operator is writing data to disks.

+

wait sync consumer next step

+

The consumer (receive end) synchronously waits for the next iteration.

+

wait sync producer next step

+

The producer (transmit end) synchronously waits for the next iteration.

+
+
+

If wait_status is acquire lwlock, acquire lock, or wait io, there is an event performing I/O operations or waiting for obtaining the corresponding lightweight lock or transaction lock.

+

The following table describes the corresponding wait events when wait_status is acquire lwlock. (If wait_event is extension, the lightweight lock is dynamically allocated and is not monitored.)

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 List of wait events corresponding to lightweight locks

wait_event

+

Description

+

ShmemIndexLock

+

Used to protect the primary index table, a hash table, in shared memory

+

OidGenLock

+

Used to prevent different threads from generating the same OID

+

XidGenLock

+

Used to prevent two transactions from obtaining the same XID

+

ProcArrayLock

+

Used to prevent concurrent access to or concurrent modification on the ProcArray shared array

+

SInvalReadLock

+

Used to prevent concurrent execution with invalid message deletion

+

SInvalWriteLock

+

Used to prevent concurrent execution with invalid message write and deletion

+

WALInsertLock

+

Used to prevent concurrent execution with WAL insertion

+

WALWriteLock

+

Used to prevent concurrent write from a WAL buffer to a disk

+

ControlFileLock

+

Used to prevent concurrent read/write or concurrent write/write on the pg_control file

+

CheckpointLock

+

Used to prevent multi-checkpoint concurrent execution

+

CLogControlLock

+

Used to prevent concurrent access to or concurrent modification on the Clog control data structure

+

MultiXactGenLock

+

Used to allocate a unique MultiXact ID in serial mode

+

MultiXactOffsetControlLock

+

Used to prevent concurrent read/write or concurrent write/write on pg_multixact/offset

+

MultiXactMemberControlLock

+

Used to prevent concurrent read/write or concurrent write/write on pg_multixact/members

+

RelCacheInitLock

+

Used to add a lock before any operations are performed on the init file when messages are invalid

+

CheckpointerCommLock

+

Used to send file flush requests to a checkpointer. The request structure needs to be inserted to a request queue in serial mode.

+

TwoPhaseStateLock

+

Used to prevent concurrent access to or modification on two-phase information sharing arrays

+

TablespaceCreateLock

+

Used to check whether a tablespace already exists

+

BtreeVacuumLock

+

Used to prevent VACUUM from clearing pages that are being used by B-tree indexes

+

AutovacuumLock

+

Used to access the autovacuum worker array in serial mode

+

AutovacuumScheduleLock

+

Used to distribute tables requiring VACUUM in serial mode

+

SyncScanLock

+

Used to determine the start position of a relfilenode during heap scanning

+

NodeTableLock

+

Used to protect a shared structure that stores CN and DN information

+

PoolerLock

+

Used to prevent two threads from simultaneously obtaining the same connection from a connection pool

+

RelationMappingLock

+

Used to wait for the mapping file between system catalogs and storage locations to be updated

+

AsyncCtlLock

+

Used to prevent concurrent access to or concurrent modification on the sharing notification status

+

AsyncQueueLock

+

Used to prevent concurrent access to or concurrent modification on the sharing notification queue

+

SerializableXactHashLock

+

Used to prevent concurrent read/write or concurrent write/write on a sharing structure for serializable transactions

+

SerializableFinishedListLock

+

Used to prevent concurrent read/write or concurrent write/write on a shared linked list for completed serial transactions

+

SerializablePredicateLockListLock

+

Used to protect a linked list of serializable transactions that have locks

+

OldSerXidLock

+

Used to protect a structure that records serializable transactions that have conflicts

+

FileStatLock

+

Used to protect a data structure that stores statistics file information

+

SyncRepLock

+

Used to protect Xlog synchronization information during primary-standby replication

+

DataSyncRepLock

+

Used to protect data page synchronization information during primary-standby replication

+

CStoreColspaceCacheLock

+

Used to add a lock when CU space is allocated for a column-store table

+

CStoreCUCacheSweepLock

+

Used to add a lock when CU caches used by a column-store table are cyclically washed out

+

MetaCacheSweepLock

+

Used to add a lock when metadata is cyclically washed out

+

DfsConnectorCacheLock

+

Used to protect a global hash table where HDFS connection handles are cached

+

dummyServerInfoCacheLock

+

Used to protect a global hash table where the information about computing Node Group connections is cached

+

ExtensionConnectorLibLock

+

Used to add a lock when a specific dynamic library is loaded or uninstalled in ODBC connection initialization scenarios

+

SearchServerLibLock

+

Used to add a lock on the file read operation when a specific dynamic library is initially loaded in GPU-accelerated scenarios

+

DfsUserLoginLock

+

Used to protect a global linked table where HDFS user information is stored

+

DfsSpaceCacheLock

+

Used to ensure that the IDs of files to be imported to an HDFS table increase monotonically

+

LsnXlogChkFileLock

+

Used to serially update the Xlog flush points for primary and standby servers recorded in a specific structure

+

GTMHostInfoLock

+

Used to prevent concurrent access to or concurrent modification on GTM host information

+

ReplicationSlotAllocationLock

+

Used to add a lock when a primary server allocates stream replication slots during primary-standby replication

+

ReplicationSlotControlLock

+

Used to prevent concurrent update of replication slot status during primary-standby replication

+

ResourcePoolHashLock

+

Used to prevent concurrent access to or concurrent modification on a resource pool table, a hash table

+

WorkloadStatHashLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains SQL requests from the CN side

+

WorkloadIoStatHashLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains the I/O information of the current DN

+

WorkloadCGroupHashLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains Cgroup information

+

OBSGetPathLock

+

Used to prevent concurrent read/write or concurrent write/write on an OBS path

+

WorkloadUserInfoLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains user information about load management

+

WorkloadRecordLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains requests received by CNs during adaptive memory management

+

WorkloadIOUtilLock

+

Used to protect a structure that records iostat and CPU load information

+

WorkloadNodeGroupLock

+

Used to prevent concurrent access to or concurrent modification on a hash table that contains Node Group information in memory

+

JobShmemLock

+

Used to protect global variables in the shared memory that is periodically read during a scheduled task where MPP is compatible with Oracle

+

OBSRuntimeLock

+

Used to obtain environment variables, for example, GASSHOME

+

LLVMDumpIRLock

+

Used to export the assembly language for dynamically generating functions

+

LLVMParseIRLock

+

Used to compile and parse a finished IR function from the IR file at the start position of a query

+

RPNumberLock

+

Used by a DN on a computing Node Group to count the number of threads for a task where plans are being executed

+

ClusterRPLock

+

Used to control concurrent access on cluster load data maintained in a CCN of the cluster

+

CriticalCacheBuildLock

+

Used to load caches from a shared or local cache initialization file

+

WaitCountHashLock

+

Used to protect a shared structure in user statement counting scenarios

+

BufMappingLock

+

Used to protect operations on a table mapped to shared buffer

+

LockMgrLock

+

It is used to protect a common lock structure.

+

PredicateLockMgrLock

+

Used to protect a lock structure that has serializable transactions

+

OperatorRealTLock

+

Used to prevent concurrent access to or concurrent modification on a global structure that contains real-time data at the operator level

+

OperatorHistLock

+

Used to prevent concurrent access to or concurrent modification on a global structure that contains historical data at the operator level

+

SessionRealTLock

+

Used to prevent concurrent access to or concurrent modification on a global structure that contains real-time data at the query level

+

SessionHistLock

+

Used to prevent concurrent access to or concurrent modification on a global structure that contains historical data at the query level

+

CacheSlotMappingLock

+

Used to protect global CU cache information

+

BarrierLock

+

Used to ensure that only one thread is creating a barrier at a time

+
+
+

The following table describes the corresponding wait events when wait_status is wait io.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 List of wait events corresponding to I/Os

wait_event

+

Description

+

BufFileRead

+

Reads data from a temporary file to a specified buffer.

+

BufFileWrite

+

Writes the content of a specified buffer to a temporary file.

+

ControlFileRead

+

Reads the pg_control file, mainly during database startup, checkpoint execution, and primary/standby verification.

+

ControlFileSync

+

Flushes the pg_control file to a disk, mainly during database initialization.

+

ControlFileSyncUpdate

+

Flushes the pg_control file to a disk, mainly during database startup, checkpoint execution, and primary/standby verification.

+

ControlFileWrite

+

Writes to the pg_control file, mainly during database initialization.

+

ControlFileWriteUpdate

+

Updates the pg_control file, mainly during database startup, checkpoint execution, and primary/standby verification.

+

CopyFileRead

+

Reads a file during file copying.

+

CopyFileWrite

+

Writes a file during file copying.

+

DataFileExtend

+

Writes a file during file extension.

+

DataFileFlush

+

Flushes a table data file to a disk.

+

DataFileImmediateSync

+

Flushes a table data file to a disk immediately.

+

DataFilePrefetch

+

Reads a table data file asynchronously.

+

DataFileRead

+

Reads a table data file synchronously.

+

DataFileSync

+

Flushes table data file modifications to a disk.

+

DataFileTruncate

+

Truncates a table data file.

+

DataFileWrite

+

Writes a table data file.

+

LockFileAddToDataDirRead

+

Reads the postmaster.pid file.

+

LockFileAddToDataDirSync

+

Flushes the postmaster.pid file to a disk.

+

LockFileAddToDataDirWrite

+

Writes the PID information into the postmaster.pid file.

+

LockFileCreateRead

+

Read the LockFile file %s.lock.

+

LockFileCreateSync

+

Flushes the LockFile file %s.lock to a disk.

+

LockFileCreateWRITE

+

Writes the PID information into the LockFile file %s.lock.

+

RelationMapRead

+

Reads the mapping file between system catalogs and storage locations.

+

RelationMapSync

+

Flushes the mapping file between system catalogs and storage locations to a disk.

+

RelationMapWrite

+

Writes the mapping file between system catalogs and storage locations.

+

ReplicationSlotRead

+

Reads a stream replication slot file during a restart.

+

ReplicationSlotRestoreSync

+

Flushes a stream replication slot file to a disk during a restart.

+

ReplicationSlotSync

+

Flushes a temporary stream replication slot file to a disk during checkpoint execution.

+

ReplicationSlotWrite

+

Writes a temporary stream replication slot file during checkpoint execution.

+

SLRUFlushSync

+

Flushes the pg_clog, pg_subtrans, and pg_multixact files to a disk, mainly during checkpoint execution and database shutdown.

+

SLRURead

+

Reads the pg_clog, pg_subtrans, and pg_multixact files.

+

SLRUSync

+

Writes dirty pages into the pg_clog, pg_subtrans, and pg_multixact files, and flushes the files to a disk, mainly during checkpoint execution and database shutdown.

+

SLRUWrite

+

Writes the pg_clog, pg_subtrans, and pg_multixact files.

+

TimelineHistoryRead

+

Reads the timeline history file during database startup.

+

TimelineHistorySync

+

Flushes the timeline history file to a disk during database startup.

+

TimelineHistoryWrite

+

Writes to the timeline history file during database startup.

+

TwophaseFileRead

+

Reads the pg_twophase file, mainly during two-phase transaction submission and restoration.

+

TwophaseFileSync

+

Flushes the pg_twophase file to a disk, mainly during two-phase transaction submission and restoration.

+

TwophaseFileWrite

+

Writes the pg_twophase file, mainly during two-phase transaction submission and restoration.

+

WALBootstrapSync

+

Flushes an initialized WAL file to a disk during database initialization.

+

WALBootstrapWrite

+

Writes an initialized WAL file during database initialization.

+

WALCopyRead

+

Read operation generated when an existing WAL file is read for replication after archiving and restoration.

+

WALCopySync

+

Flushes a replicated WAL file to a disk after archiving and restoration.

+

WALCopyWrite

+

Write operation generated when an existing WAL file is read for replication after archiving and restoration.

+

WALInitSync

+

Flushes a newly initialized WAL file to a disk during log reclaiming or writing.

+

WALInitWrite

+

Initializes a newly created WAL file to 0 during log reclaiming or writing.

+

WALRead

+

Reads data from Xlogs during redo operations on two-phase files.

+

WALSyncMethodAssign

+

Flushes all open WAL files to a disk.

+

WALWrite

+

Writes a WAL file.

+
+
+

The following table describes the corresponding wait events when wait_status is acquire lock.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 List of wait events corresponding to transaction locks

wait_event

+

Description

+

relation

+

Adds a lock to a table.

+

extend

+

Adds a lock to a table being scaled out.

+

partition

+

Adds a lock to a partitioned table.

+

partition_seq

+

Adds a lock to a partition of a partitioned table.

+

page

+

Adds a lock to a table page.

+

tuple

+

Adds a lock to a tuple on a page.

+

transactionid

+

Adds a lock to a transaction ID.

+

virtualxid

+

Adds a lock to a virtual transaction ID.

+

object

+

Adds a lock to an object.

+

cstore_freespace

+

Adds a lock to idle column-store space.

+

userlock

+

Adds a lock to a user.

+

advisory

+

Adds an advisory lock.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0784.html b/docs/dws/dev/dws_04_0784.html new file mode 100644 index 00000000..e7685882 --- /dev/null +++ b/docs/dws/dev/dws_04_0784.html @@ -0,0 +1,115 @@ + + +

PG_TABLES

+

PG_TABLES displays access to each table in the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TABLES columns

Name

+

Type

+

Reference

+

Description

+

schemaname

+

name

+

PG_NAMESPACE.nspname

+

Name of the schema that contains the table

+

tablename

+

name

+

PG_CLASS.relname

+

Name of the table

+

tableowner

+

name

+

pg_get_userbyid(PG_CLASS.relowner)

+

Owner of the table

+

tablespace

+

name

+

PG_TABLESPACE.spcname

+

Tablespace that contains the table. The default value is null

+

hasindexes

+

boolean

+

PG_CLASS.relhasindex

+

Whether the table has (or recently had) an index. If it does, its value is true. Otherwise, its value is false.

+

hasrules

+

boolean

+

PG_CLASS.relhasruls

+

Whether the table has rules. If it does, its value is true. Otherwise, its value is false.

+

hastriggers

+

boolean

+

PG_CLASS.RELHASTRIGGERS

+

Whether the table has triggers. If it does, its value is true. Otherwise, its value is false.

+

tablecreator

+

name

+

pg_get_userbyid(PG_OBJECT.creator)

+

Table creator. If the creator has been deleted, no value is returned.

+

created

+

timestamp with time zone

+

PG_OBJECT.ctime

+

Time when the table was created.

+

last_ddl_time

+

timestamp with time zone

+

PG_OBJECT.mtime

+

Last time when the cluster was modified.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0785.html b/docs/dws/dev/dws_04_0785.html new file mode 100644 index 00000000..9372336b --- /dev/null +++ b/docs/dws/dev/dws_04_0785.html @@ -0,0 +1,60 @@ + + +

PG_TDE_INFO

+

PG_TDE_INFO displays the encryption information about the current cluster.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_TDE_INFO columns

Name

+

Type

+

Description

+

is_encrypt

+

text

+

Whether the cluster is an encryption cluster

+
  • f: Non-encryption cluster
  • t: Encryption cluster
+

g_tde_algo

+

text

+

Encryption algorithm

+
  • SM4-CTR-128
  • AES-CTR-128
+

remain

+

text

+

Reserved

+
+
+

Examples

Check whether the current cluster is encrypted, and check the encryption algorithm (if any) used by the current cluster.

+
1
+2
+3
+4
+5
SELECT * FROM PG_TDE_INFO;
+ is_encrypt | g_tde_algo  | remain
+------------+-------------+--------
+ f          | AES-CTR-128 | remain
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0786.html b/docs/dws/dev/dws_04_0786.html new file mode 100644 index 00000000..34b16869 --- /dev/null +++ b/docs/dws/dev/dws_04_0786.html @@ -0,0 +1,44 @@ + + +

PG_TIMEZONE_ABBREVS

+

PG_TIMEZONE_ABBREVS displays all time zone abbreviations that can be recognized by the input routines.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_TIMEZONE_ABBREVS columns

Name

+

Type

+

Description

+

abbrev

+

text

+

Time zone abbreviation

+

utc_offset

+

interval

+

Offset from UTC

+

is_dst

+

boolean

+

Whether the abbreviation indicates a daylight saving time (DST) zone. If it does, its value is true. Otherwise, its value is false.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0787.html b/docs/dws/dev/dws_04_0787.html new file mode 100644 index 00000000..17f2546f --- /dev/null +++ b/docs/dws/dev/dws_04_0787.html @@ -0,0 +1,51 @@ + + +

PG_TIMEZONE_NAMES

+

PG_TIMEZONE_NAMES displays all time zone names that can be recognized by SET TIMEZONE, along with their associated abbreviations, UTC offsets, and daylight saving time statuses.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TIMEZONE_NAMES columns

Name

+

Type

+

Description

+

name

+

text

+

Name of the time zone

+

abbrev

+

text

+

Time zone name abbreviation

+

utc_offset

+

interval

+

Offset from UTC

+

is_dst

+

boolean

+

Whether DST is used. If it is, its value is true. Otherwise, its value is false.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0788.html b/docs/dws/dev/dws_04_0788.html new file mode 100644 index 00000000..8f980afa --- /dev/null +++ b/docs/dws/dev/dws_04_0788.html @@ -0,0 +1,45 @@ + + +

PG_TOTAL_MEMORY_DETAIL

+

PG_TOTAL_MEMORY_DETAIL displays the memory usage of a certain node in the database.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PG_TOTAL_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

nodename

+

text

+

Node name

+

memorytype

+

text

+

It can be set to any of the following values:

+
  • max_process_memory: memory used by a GaussDB(DWS) cluster instance
  • process_used_memory: memory used by a GaussDB(DWS) process
  • max_dynamic_memory: maximum dynamic memory
  • dynamic_used_memory: used dynamic memory
  • dynamic_peak_memory: dynamic peak value of the memory
  • dynamic_used_shrctx: maximum dynamic shared memory context
  • dynamic_peak_shrctx: dynamic peak value of the shared memory context
  • max_shared_memory: maximum shared memory
  • shared_used_memory: used shared memory
  • max_cstore_memory: maximum memory allowed for column store
  • cstore_used_memory: memory used for column store
  • max_sctpcomm_memory: maximum memory allowed for the communication library
  • sctpcomm_used_memory: memory used for the communication library
  • sctpcomm_peak_memory: memory peak of the communication library
  • other_used_memory: other used memory
  • gpu_max_dynamic_memory: maximum GPU memory
  • gpu_dynamic_used_memory: sum of the available GPU memory and temporary GPU memory
  • gpu_dynamic_peak_memory: maximum memory used for GPU
  • pooler_conn_memory: memory used for pooler connections
  • pooler_freeconn_memory: memory used for idle pooler connections
  • storage_compress_memory: memory used for column-store compression and decompression
  • udf_reserved_memory: memory reserved for the UDF Worker process
  • mmap_used_memory: memory used for mmap
+

memorymbytes

+

integer

+

Size of the used memory (MB)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0789.html b/docs/dws/dev/dws_04_0789.html new file mode 100644 index 00000000..dd0bc3a0 --- /dev/null +++ b/docs/dws/dev/dws_04_0789.html @@ -0,0 +1,65 @@ + + +

PG_TOTAL_SCHEMA_INFO

+

PG_TOTAL_SCHEMA_INFO displays the storage usage of all schemas in each database. This view is valid only if use_workload_manager is set to on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Column

+

Type

+

Description

+

schemaid

+

oid

+

Schema OID

+

schemaname

+

text

+

Schema name

+

databaseid

+

oid

+

Database OID

+

databasename

+

name

+

Database name

+

usedspace

+

bigint

+

Size of the permanent table storage space used by the schema, in bytes.

+

permspace

+

bigint

+

Upper limit of the permanent table storage space of the schema, in bytes.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0790.html b/docs/dws/dev/dws_04_0790.html new file mode 100644 index 00000000..241dd106 --- /dev/null +++ b/docs/dws/dev/dws_04_0790.html @@ -0,0 +1,148 @@ + + +

PG_TOTAL_USER_RESOURCE_INFO

+

PG_TOTAL_USER_RESOURCE_INFO displays the resource usage of all users. Only administrators can query this view. This view is valid only if use_workload_manager is set to on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_TOTAL_USER_RESOURCE_INFO columns

Name

+

Type

+

Description

+

username

+

name

+

Username

+

used_memory

+

integer

+

Used memory (unit: MB)

+

total_memory

+

integer

+

Available memory (unit: MB). 0 indicates that the available memory is not limited and depends on the maximum memory available in the database.

+

used_cpu

+

double precision

+

Number of CPU cores in use. Only the CPU usage of complex jobs in the non-default resource pool is collected, and the value is the CPU usage of the related cgroup.

+

total_cpu

+

integer

+

Total number of CPU cores of the cgroup associated with a user on the node

+

used_space

+

bigint

+

Used permanent table storage space (unit: KB)

+

total_space

+

bigint

+

Available storage space (unit: KB). -1 indicates that the storage space is not limited.

+

used_temp_space

+

bigint

+

Used temporary table storage space (unit: KB)

+

total_temp_space

+

bigint

+

Available temporary table storage space (unit: KB). -1 indicates that the storage space is not limited.

+

used_spill_space

+

bigint

+

Size of the used operator flushing space, in KB

+

total_spill_space

+

bigint

+

Size of the available operator flushing space, in KB. The value -1 indicates that the operator flushing space is not limited.

+

read_kbytes

+

bigint

+

CN: total number of bytes read by a user's complex jobs on all DNs in the last 5 seconds. The unit is KB.

+

DN: total number of bytes read by a user's complex jobs from the instance startup time to the current time. The unit is KB.

+

write_kbytes

+

bigint

+

CN: total number of bytes written by a user's complex jobs on all DNs in the last 5 seconds. The unit is KB.

+

DN: total number of bytes written by a user's complex jobs from the instance startup time to the current time. The unit is KB.

+

read_counts

+

bigint

+

CN: total number of read times of a user's complex jobs on all DNs in the last 5 seconds. Unit: count.

+

DN: total number of read times of a user's complex jobs from the instance startup time to the current time. Unit: count.

+

write_counts

+

bigint

+

CN: total number of write times of a user's complex jobs on all DNs in the last 5 seconds. Unit: count.

+

DN: total number of write times of a user's complex jobs from the instance startup time to the current time. Unit: count.

+

read_speed

+

double precision

+

CN: average read rate of a user's complex jobs on a single DN in the last 5 seconds. (Unit: KB/s)

+

DN: indicates the average read rate of a user's complex jobs on a single DN in the last 5 seconds. (Unit: KB/s)

+

write_speed

+

double precision

+

CN: average write rate of a user's complex jobs on a single DN in the last 5 seconds. (Unit: KB/s)

+

DN: average write rate of a user's complex jobs on a single DN in the last 5 seconds. (Unit: KB/s)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0791.html b/docs/dws/dev/dws_04_0791.html new file mode 100644 index 00000000..518b3dde --- /dev/null +++ b/docs/dws/dev/dws_04_0791.html @@ -0,0 +1,135 @@ + + +

PG_USER

+

PG_USER displays information about users who can access the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_USER columns

Name

+

Type

+

Description

+

usename

+

name

+

User name

+

usesysid

+

oid

+

ID of this user

+

usecreatedb

+

boolean

+

Whether the user has the permission to create databases

+

usesuper

+

boolean

+

whether the user is the initial system administrator with the highest rights.

+

usecatupd

+

boolean

+

whether the user can directly update system tables. Only the initial system administrator whose usesysid is 10 has this permission. It is not available for other users.

+

userepl

+

boolean

+

Whether the user has the permission to duplicate data streams

+

passwd

+

text

+

Encrypted user password. The value is displayed as ********.

+

valbegin

+

timestamp with time zone

+

Account validity start time; null if no start time

+

valuntil

+

timestamp with time zone

+

Password expiry time; null if no expiration

+

respool

+

name

+

Resource pool where the user is in

+

parentid

+

oid

+

Parent user OID

+

spacelimit

+

text

+

The storage space of the permanent table.

+

tempspacelimit

+

text

+

The storage space of the temporary table.

+

spillspacelimit

+

text

+

The operator disk flushing space.

+

useconfig

+

text[]

+

Session defaults for run-time configuration variables

+

nodegroup

+

name

+

Name of the logical cluster associated with the user. If no logical cluster is associated, this column is left blank.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0792.html b/docs/dws/dev/dws_04_0792.html new file mode 100644 index 00000000..1e6ea51a --- /dev/null +++ b/docs/dws/dev/dws_04_0792.html @@ -0,0 +1,80 @@ + + +

PG_USER_MAPPINGS

+

PG_USER_MAPPINGS displays information about user mappings.

+

This is essentially a publicly readable view of PG_USER_MAPPING that leaves out the options column if the user has no rights to use it.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_USER_MAPPINGS columns

Name

+

Type

+

Reference

+

Description

+

umid

+

oid

+

PG_USER_MAPPING.oid

+

OID of the user mapping

+

srvid

+

oid

+

PG_FOREIGN_SERVER.oid

+

OID of the foreign server that contains this mapping

+

srvname

+

name

+

PG_FOREIGN_SERVER.srvname

+

Name of the foreign server

+

umuser

+

oid

+

PG_AUTHID.oid

+

OID of the local role being mapped, 0 if the user mapping is public

+

usename

+

name

+

-

+

Name of the local user to be mapped

+

umoptions

+

text[ ]

+

-

+

User mapping specific options. If the current user is the owner of the foreign server, its value is keyword=value strings. Otherwise, its value is null.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0793.html b/docs/dws/dev/dws_04_0793.html new file mode 100644 index 00000000..0d4e9f15 --- /dev/null +++ b/docs/dws/dev/dws_04_0793.html @@ -0,0 +1,61 @@ + + +

PG_VIEWS

+

PG_VIEWS displays basic information about each view in the database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_VIEWS columns

Name

+

Type

+

Reference

+

Description

+

schemaname

+

name

+

PG_NAMESPACE.nspname

+

Name of the schema that contains the view

+

viewname

+

name

+

PG_CLASS.relname

+

View name

+

viewowner

+

name

+

PG_AUTHID.Erolname

+

Owner of the view

+

definition

+

text

+

-

+

Definition of the view

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0794.html b/docs/dws/dev/dws_04_0794.html new file mode 100644 index 00000000..acc44cab --- /dev/null +++ b/docs/dws/dev/dws_04_0794.html @@ -0,0 +1,102 @@ + + +

PG_WLM_STATISTICS

+

PG_WLM_STATISTICS displays information about workload management after the task is complete or the exception has been handled.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PG_WLM_STATISTICS columns

Name

+

Type

+

Description

+

statement

+

text

+

Statement executed for exception handling

+

block_time

+

bigint

+

Block time before the statement is executed

+

elapsed_time

+

bigint

+

Elapsed time when the statement is executed

+

total_cpu_time

+

bigint

+

Total time used by the CPU on the DN when the statement is executed for exception handling

+

qualification_time

+

bigint

+

Period when the statement checks the inclination ratio

+

cpu_skew_percent

+

integer

+

CPU usage skew on the DN when the statement is executed for exception handling

+

control_group

+

text

+

Cgroup used when the statement is executed for exception handling

+

status

+

text

+

Statement status after it is executed for exception handling

+
  • pending: The statement is waiting to be executed.
  • running: The statement is being executed.
  • finished: The execution is finished normally.
  • abort: The execution is unexpectedly terminated.
+

action

+

text

+

Actions when statements are executed for exception handling

+
  • abort indicates terminating the operation.
  • adjust indicates executing the Cgroup adjustment operations. Currently, you can only perform the demotion operation.
  • finish indicates that the operation is normally finished.
+

queryid

+

bigint

+

Internal query ID used for statement execution

+

threadid

+

bigint

+

ID of the backend thread

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0795.html b/docs/dws/dev/dws_04_0795.html new file mode 100644 index 00000000..5f156317 --- /dev/null +++ b/docs/dws/dev/dws_04_0795.html @@ -0,0 +1,51 @@ + + +

PGXC_BULKLOAD_PROGRESS

+

PGXC_BULKLOAD_PROGRESS displays the progress of the service import. Only GDS common files can be imported. This view is accessible only to users with system administrators rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_BULKLOAD_PROGRESS columns

Name

+

Type

+

Description

+

session_id

+

bigint

+

GDS session ID

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

query

+

text

+

Query statement

+

progress

+

text

+

Progress percentage

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0796.html b/docs/dws/dev/dws_04_0796.html new file mode 100644 index 00000000..dad0631f --- /dev/null +++ b/docs/dws/dev/dws_04_0796.html @@ -0,0 +1,123 @@ + + +

PGXC_BULKLOAD_STATISTICS

+

PGXC_BULKLOAD_STATISTICS displays real-time statistics about service execution, such as GDS, COPY, and \COPY, on a CN. This view summarizes the real-time execution status of import and export services that are being executed on each node in the current cluster. In this way, you can monitor the real-time progress of import and export services and locate performance problems.

+

Columns in PGXC_BULKLOAD_STATISTICS are the same as those in PG_BULKLOAD_STATISTICS. This is because PGXC_BULKLOAD_STATISTICS is essentially the summary result of querying PG_BULKLOAD_STATISTICS on each node in the cluster.

+

This view is accessible only to users with system administrators rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_BULKLOAD_STATISTICS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

db_name

+

text

+

Database name

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

tid

+

bigint

+

ID of the current thread

+

lwtid

+

integer

+

Lightweight thread ID

+

session_id

+

bigint

+

GDS session ID

+

direction

+

text

+

Service type. The options are gds to file, gds from file, gds to pipe, gds from pipe, copy from, and copy to.

+

query

+

text

+

Query statement

+

address

+

text

+

Location of the foreign table used for data import and export

+

query_start

+

timestamp with time zone

+

Start time of data import or export

+

total_bytes

+

bigint

+

Total size of data to be processed

+

This parameter is specified only when a GDS common file is to be imported and the record in the row comes from a CN. Otherwise, left this parameter unspecified.

+

phase

+

text

+

Current phase. The options are INITIALIZING, TRANSFER_DATA, and RELEASE_RESOURCE.

+

done_lines

+

bigint

+

Number of lines that have been transferred

+

done_bytes

+

bigint

+

Number of bytes that have been transferred

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0797.html b/docs/dws/dev/dws_04_0797.html new file mode 100644 index 00000000..39521674 --- /dev/null +++ b/docs/dws/dev/dws_04_0797.html @@ -0,0 +1,86 @@ + + +

PGXC_COMM_CLIENT_INFO

+

PGXC_COMM_CLIENT_INFO stores the client connection information of all nodes. (You can query this view on a DN to view the information about the connection between the CN and DN.)

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_COMM_CLIENT_INFO columns

Name

+

Type

+

Description

+

node_name

+

text

+

Current node name.

+

app

+

text

+

Client application name

+

tid

+

bigint

+

Thread ID of the current thread.

+

lwtid

+

integer

+

Lightweight thread ID of the current thread.

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

socket

+

integer

+

It is displayed if the connection is a physical connection.

+

remote_ip

+

text

+

Peer node IP address.

+

remote_port

+

text

+

Peer node port.

+

logic_id

+

integer

+

If the connection is a logical connection, sid is displayed. If -1 is displayed, the current connection is a physical connection.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0798.html b/docs/dws/dev/dws_04_0798.html new file mode 100644 index 00000000..52147f59 --- /dev/null +++ b/docs/dws/dev/dws_04_0798.html @@ -0,0 +1,74 @@ + + +

PGXC_COMM_DELAY

+

PGXC_COMM_STATUS displays the communication library delay status for all the DNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_COMM_DELAY columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

remote_name

+

text

+

Name of the peer node

+

remote_host

+

text

+

IP address of the peer

+

stream_num

+

integer

+

Number of logical stream connections used by the current physical connection

+

min_delay

+

integer

+

Minimum delay of the current physical connection within 1 minute. Its unit is microsecond.

+
NOTE:

A negative result is invalid. Wait until the delay status is updated and query again.

+
+

average

+

integer

+

Average delay of the current physical connection within 1 minute. Its unit is microsecond.

+

max_delay

+

integer

+

Maximum delay of the current physical connection within 1 minute. Its unit is microsecond.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0799.html b/docs/dws/dev/dws_04_0799.html new file mode 100644 index 00000000..b4724417 --- /dev/null +++ b/docs/dws/dev/dws_04_0799.html @@ -0,0 +1,143 @@ + + +

PGXC_COMM_RECV_STREAM

+

PG_COMM_RECV_STREAM displays the receiving stream status of the communication libraries for all the DNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_COMM_RECV_STREAM columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

local_tid

+

bigint

+

ID of the thread using this stream

+

remote_name

+

text

+

Name of the peer node

+

remote_tid

+

bigint

+

Peer thread ID

+

idx

+

integer

+

Peer DN ID in the local DN

+

sid

+

integer

+

Stream ID in the physical connection

+

tcp_sock

+

integer

+

TCP socket used in the stream

+

state

+

text

+

Current status of the stream

+
  • UNKNOWN: The logical connection is unknown.
  • READY: The logical connection is ready.
  • RUN: The logical connection receives packets normally.
  • HOLD: The logical connection is waiting to receive packets.
  • CLOSED: The logical connection is closed.
  • TO_CLOSED: The logical connection is to be closed.
+

query_id

+

bigint

+

debug_query_id corresponding to the stream

+

pn_id

+

integer

+

plan_node_id of the query executed by the stream

+

send_smp

+

integer

+

smpid of the sender of the query executed by the stream

+

recv_smp

+

integer

+

smpid of the receiver of the query executed by the stream

+

recv_bytes

+

bigint

+

Total data volume received from the stream. The unit is byte.

+

time

+

bigint

+

Current life cycle service duration of the stream. The unit is ms.

+

speed

+

bigint

+

Average receiving rate of the stream. The unit is byte/s.

+

quota

+

bigint

+

Current communication quota value of the stream. The unit is Byte.

+

buff_usize

+

bigint

+

Current size of the data cache of the stream. The unit is byte.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0800.html b/docs/dws/dev/dws_04_0800.html new file mode 100644 index 00000000..1354d6a8 --- /dev/null +++ b/docs/dws/dev/dws_04_0800.html @@ -0,0 +1,143 @@ + + +

PGXC_COMM_SEND_STREAM

+

PGXC_COMM_SEND_STREAM displays the sending stream status of the communication libraries for all the DNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_COMM_SEND_STREAM columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

local_tid

+

bigint

+

ID of the thread using this stream

+

remote_name

+

text

+

Name of the peer node

+

remote_tid

+

bigint

+

Peer thread ID

+

idx

+

integer

+

Peer DN ID in the local DN

+

sid

+

integer

+

Stream ID in the physical connection

+

tcp_sock

+

integer

+

TCP socket used in the stream

+

state

+

text

+

Current status of the stream

+
  • UNKNOWN: The logical connection is unknown.
  • READY: The logical connection is ready.
  • RUN: The logical connection sends packets normally.
  • HOLD: The logical connection is waiting to send packets.
  • CLOSED: The logical connection is closed.
  • TO_CLOSED: The logical connection is to be closed.
+

query_id

+

bigint

+

debug_query_id corresponding to the stream

+

pn_id

+

integer

+

plan_node_id of the query executed by the stream

+

send_smp

+

integer

+

smpid of the sender of the query executed by the stream

+

recv_smp

+

integer

+

smpid of the receiver of the query executed by the stream

+

send_bytes

+

bigint

+

Total data volume sent by the stream. The unit is Byte.

+

time

+

bigint

+

Current life cycle service duration of the stream. The unit is ms.

+

speed

+

bigint

+

Average sending rate of the stream. The unit is Byte/s.

+

quota

+

bigint

+

Current communication quota value of the stream. The unit is Byte.

+

wait_quota

+

bigint

+

Extra time generated when the stream waits the quota value. The unit is ms.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0801.html b/docs/dws/dev/dws_04_0801.html new file mode 100644 index 00000000..be558190 --- /dev/null +++ b/docs/dws/dev/dws_04_0801.html @@ -0,0 +1,114 @@ + + +

PGXC_COMM_STATUS

+

PGXC_COMM_STATUS displays the communication library status for all the DNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_COMM_STATUS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

rxpck/s

+

integer

+

Receiving rate of the communication library on a node. The unit is byte/s.

+

txpck/s

+

integer

+

Sending rate of the communication library on a node. The unit is byte/s.

+

rxkB/s

+

bigint

+

Receiving rate of the communication library on a node. The unit is KB/s.

+

txkB/s

+

bigint

+

Sending rate of the communication library on a node. The unit is KB/s.

+

buffer

+

bigint

+

Size of the buffer of the Cmailbox.

+

memKB(libcomm)

+

bigint

+

Communication memory size of the libcomm process, in KB.

+

memKB(libpq)

+

bigint

+

Communication memory size of the libpq process, in KB.

+

%USED(PM)

+

integer

+

Real-time usage of the postmaster thread.

+

%USED (sflow)

+

integer

+

Real-time usage of the gs_sender_flow_controller thread.

+

%USED (rflow)

+

integer

+

Real-time usage of the gs_receiver_flow_controller thread.

+

%USED (rloop)

+

integer

+

Highest real-time usage among multiple gs_receivers_loop threads.

+

stream

+

integer

+

Total number of used logical connections.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0802.html b/docs/dws/dev/dws_04_0802.html new file mode 100644 index 00000000..b2428216 --- /dev/null +++ b/docs/dws/dev/dws_04_0802.html @@ -0,0 +1,185 @@ + + +

PGXC_DEADLOCK

+

PGXC_DEADLOCK displays lock wait information generated due to distributed deadlocks.

+

Currently, PGXC_DEADLOCK collects only lock wait information about locks whose locktype is relation, partition, page, tuple, or transactionid.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_DEADLOCK columns

Name

+

Type

+

Description

+

locktype

+

text

+

Type of the locked object

+

nodename

+

name

+

Name of the node where the locked object resides

+

dbname

+

name

+

Name of the database where the locked object resides. The value is NULL if the locked object is a transaction.

+

nspname

+

name

+

Name of the namespace of the locked object

+

relname

+

name

+

Name of the relation targeted by the lock. The value is NULL if the object is not a relation or part of a relation.

+

partname

+

name

+

Name of the partition targeted by the lock. The value is NULL if the locked object is not a partition.

+

page

+

integer

+

Number of the page targeted by the lock. The value is NULL if the locked object is neither a page nor a tuple.

+

tuple

+

smallint

+

Number of the tuple targeted by the lock. The value is NULL if the locked object is not a tuple.

+

transactionid

+

xid

+

ID of the transaction targeted by the lock. The value is NULL if the locked object is not a transaction.

+

waitusername

+

name

+

Name of the user who waits for the lock

+

waitgxid

+

xid

+

ID of the transaction that waits for the lock

+

waitxactstart

+

timestamp with time zone

+

Start time of the transaction that waits for the lock

+

waitqueryid

+

bigint

+

Latest query ID of the thread that waits for the lock

+

waitquery

+

text

+

Latest query statement of the thread that waits for the lock

+

waitpid

+

bigint

+

ID of the thread that waits for the lock

+

waitmode

+

text

+

Mode of the waited lock

+

holdusername

+

name

+

Name of the user who holds the lock

+

holdgxid

+

xid

+

ID of the transaction that holds the lock

+

holdxactstart

+

timestamp with time zone

+

Start time of the transaction that holds the lock

+

holdqueryid

+

bigint

+

Latest query ID of the thread that holds the lock

+

holdquery

+

text

+

Latest query statement of the thread that holds the lock

+

holdpid

+

bigint

+

ID of the thread that holds the lock

+

holdmode

+

text

+

Mode of the held lock

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0803.html b/docs/dws/dev/dws_04_0803.html new file mode 100644 index 00000000..a8dfd2e6 --- /dev/null +++ b/docs/dws/dev/dws_04_0803.html @@ -0,0 +1,89 @@ + + +

PGXC_GET_STAT_ALL_TABLES

+

PGXC_GET_STAT_ALL_TABLES displays information about insertion, update, and deletion operations on tables and the dirty page rate of tables.

+

Before running VACUUM FULL to a system catalog with a high dirty page rate, ensure that no user is performing operations it.

+

You are advised to run VACUUM FULL to tables (excluding system catalogs) whose dirty page rate exceeds 30% or run it based on service scenarios.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_GET_STAT_ALL_TABLES columns

Name

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

relname

+

name

+

Table name

+

schemaname

+

name

+

Schema name of the table

+

n_tup_ins

+

numeric

+

Number of inserted tuples

+

n_tup_upd

+

numeric

+

Number of updated tuples

+

n_tup_del

+

numeric

+

Number of deleted tuples

+

n_live_tup

+

numeric

+

Number of live tuples

+

n_dead_tup

+

numeric

+

Number of dead tuples

+

page_dirty_rate

+

numeric(5,2)

+

Dirty page rate (%) of a table

+
+
+

GaussDB(DWS) also provides the pgxc_get_stat_dirty_tables(int dirty_percent, int n_tuples) and pgxc_get_stat_dirty_tables(int dirty_percent, int n_tuples, text schema) functions to quickly filter out tables whose dirty page rate is greater than dirty_percent, number of dead tuples is greater than n_tuples, and schema name is schema. For details, see "Functions and Operators > System Administration Functions > Other Functions" in the SQL Syntax.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0804.html b/docs/dws/dev/dws_04_0804.html new file mode 100644 index 00000000..7babfb68 --- /dev/null +++ b/docs/dws/dev/dws_04_0804.html @@ -0,0 +1,102 @@ + + +

PGXC_GET_STAT_ALL_PARTITIONS

+

PGXC_GET_STAT_ALL_PARTITIONS displays information about insertion, update, and deletion operations on partitions of partitioned tables and the dirty page rate of tables.

+

The statistics of this view depend on the ANALYZE operation. To obtain the most accurate information, perform the ANALYZE operation on the partitioned table first.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_GET_STAT_ALL_PARTITIONS columns

Column

+

Type

+

Description

+

relid

+

oid

+

Table OID

+

partid

+

oid

+

Partition OID

+

schename

+

name

+

Schema name of a table

+

relname

+

name

+

Table name

+

partname

+

name

+

Partition name

+

n_tup_ins

+

numeric

+

Number of inserted tuples

+

n_tup_upd

+

numeric

+

Number of updated tuples

+

n_tup_del

+

numeric

+

Number of deleted tuples

+

n_live_tup

+

numeric

+

Number of live tuples

+

n_dead_tup

+

numeric

+

Number of dead tuples

+

page_dirty_rate

+

numeric(5,2)

+

Dirty page rate (%) of a table

+
+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0805.html b/docs/dws/dev/dws_04_0805.html new file mode 100644 index 00000000..00f336c8 --- /dev/null +++ b/docs/dws/dev/dws_04_0805.html @@ -0,0 +1,86 @@ + + +

PGXC_GET_TABLE_SKEWNESS

+

PGXC_GET_TABLE_SKEWNESS displays the data skew on tables in the current database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_GET_TABLE_SKEWNESS columns

Name

+

Type

+

Description

+

schemaname

+

name

+

Schema name of a table

+

tablename

+

name

+

Name of a table

+

totalsize

+

numeric

+

Total size of a table, in bytes

+

avgsize

+

numeric(1000,0)

+

Average table size (total table size divided by the number of DNs), which is the ideal size of tables distributed on each DN

+

maxratio

+

numeric(4,3)

+

Ratio of the maximum table size on a single DN to the total table size

+

minratio

+

numeric(4,3)

+

Ratio of the minimum table size on a single DN to the total table size

+

skewsize

+

bigint

+

Table skew rate (the maximum table size on a single DN minus the minimum table size on a single DN)

+

skewratio

+

numeric(4,3)

+

Table skew rate (skew size divided by total table size)

+

skewstddev

+

numeric(1000,0)

+

Standard deviation of table distribution (For two tables of the same size, a larger deviation indicates a more severe skew.)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0806.html b/docs/dws/dev/dws_04_0806.html new file mode 100644 index 00000000..60cd6e0a --- /dev/null +++ b/docs/dws/dev/dws_04_0806.html @@ -0,0 +1,65 @@ + + +

PGXC_GTM_SNAPSHOT_STATUS

+

PGXC_GTM_SNAPSHOT_STATUS displays transaction information on the current GTM.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_GTM_SNAPSHOT_STATUS columns

Name

+

Type

+

Description

+

xmin

+

xid

+

Minimum ID of the running transactions

+

xmax

+

xid

+

ID of the transaction next to the executed transaction with the maximum ID

+

csn

+

integer

+

Sequence number of the transaction to be committed

+

oldestxmin

+

xid

+

Minimum ID of the executed transactions

+

xcnt

+

integer

+

Number of the running transactions

+

running_xids

+

text

+

IDs of the running transactions

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0807.html b/docs/dws/dev/dws_04_0807.html new file mode 100644 index 00000000..a00049f8 --- /dev/null +++ b/docs/dws/dev/dws_04_0807.html @@ -0,0 +1,11 @@ + + +

PGXC_INSTANCE_TIME

+

PGXC_INSTANCE_TIME displays the running time of processes on each node in the cluster and the time consumed in each execution phase. Except the node_name column, the other columns are the same as those in the PV_INSTANCE_TIME view. This view is accessible only to users with system administrator rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0808.html b/docs/dws/dev/dws_04_0808.html new file mode 100644 index 00000000..4c796103 --- /dev/null +++ b/docs/dws/dev/dws_04_0808.html @@ -0,0 +1,12 @@ + + +

PGXC_INSTR_UNIQUE_SQL

+

PGXC_INSTR_UNIQUE_SQL displays the complete Unique SQL statistics of all CN nodes in the cluster.

+

Only the system administrator can access this view. For details about the field, see GS_INSTR_UNIQUE_SQL.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0809.html b/docs/dws/dev/dws_04_0809.html new file mode 100644 index 00000000..0ac639bd --- /dev/null +++ b/docs/dws/dev/dws_04_0809.html @@ -0,0 +1,144 @@ + + +

PGXC_LOCK_CONFLICTS

+

PGXC_LOCK_CONFLICTS displays information about conflicting locks in the cluster.

+

When a lock is waiting for another lock or another lock is waiting for this one, a lock conflict occurs.

+

Currently, PGXC_LOCK_CONFLICTS collects only information about locks whose locktype is relation, partition, page, tuple, or transactionid.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_LOCK_CONFLICTS columns

Name

+

Type

+

Description

+

locktype

+

text

+

Type of the locked object

+

nodename

+

name

+

Name of the node where the locked object resides

+

dbname

+

name

+

Name of the database where the locked object resides. The value is NULL if the locked object is a transaction.

+

nspname

+

name

+

Name of the namespace of the locked object

+

relname

+

name

+

Name of the relation targeted by the lock. The value is NULL if the object is not a relation or part of a relation.

+

partname

+

name

+

Name of the partition targeted by the lock. The value is NULL if the locked object is not a partition.

+

page

+

integer

+

Number of the page targeted by the lock. The value is NULL if the locked object is neither a page nor a tuple.

+

tuple

+

smallint

+

Number of the tuple targeted by the lock. The value is NULL if the locked object is not a tuple.

+

transactionid

+

xid

+

ID of the transaction targeted by the lock. The value is NULL if the locked object is not a transaction.

+

username

+

name

+

Name of the user who applies for the lock

+

gxid

+

xid

+

ID of the transaction that applies for the lock

+

xactstart

+

timestamp with time zone

+

Start time of the transaction that applies for the lock

+

queryid

+

bigint

+

Latest query ID of the thread that applies for the lock

+

query

+

text

+

Latest query statement of the thread that applies for the lock

+

pid

+

bigint

+

ID of the thread that applies for the lock

+

mode

+

text

+

Lock mode

+

granted

+

boolean

+
  • TRUE if the lock has been held
  • FALSE if the lock is still waiting for another lock
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0810.html b/docs/dws/dev/dws_04_0810.html new file mode 100644 index 00000000..1d7715b3 --- /dev/null +++ b/docs/dws/dev/dws_04_0810.html @@ -0,0 +1,72 @@ + + +

PGXC_NODE_ENV

+

PGXC_NODE_ENV displays the environmental variables information about all nodes in a cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_NODE_ENV columns

Name

+

Type

+

Description

+

node_name

+

text

+

Names of all nodes in the cluster

+

host

+

text

+

Host names of all nodes in the cluster

+

process

+

integer

+

Process IDs of all nodes in the cluster

+

port

+

integer

+

Port numbers of all nodes in the cluster

+

installpath

+

text

+

Installation directory of all nodes in the cluster

+

datapath

+

text

+

Data directory of all nodes in the cluster

+

log_directory

+

text

+

Log directory of all nodes in the cluster

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0811.html b/docs/dws/dev/dws_04_0811.html new file mode 100644 index 00000000..ef9b8131 --- /dev/null +++ b/docs/dws/dev/dws_04_0811.html @@ -0,0 +1,11 @@ + + +

PGXC_NODE_STAT_RESET_TIME

+

PGXC_NODE_STAT_RESET_TIME displays the time when statistics of each node in the cluster are reset. All columns except node_name are the same as those in the GS_NODE_STAT_RESET_TIME view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0812.html b/docs/dws/dev/dws_04_0812.html new file mode 100644 index 00000000..87321423 --- /dev/null +++ b/docs/dws/dev/dws_04_0812.html @@ -0,0 +1,11 @@ + + +

PGXC_OS_RUN_INFO

+

PGXC_OS_RUN_INFO displays the OS running status of each node in the cluster. All columns except node_name are the same as those in the PV_OS_RUN_INFO view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0813.html b/docs/dws/dev/dws_04_0813.html new file mode 100644 index 00000000..3efbc9a1 --- /dev/null +++ b/docs/dws/dev/dws_04_0813.html @@ -0,0 +1,58 @@ + + +

PGXC_OS_THREADS

+

PGXC_OS_THREADS displays thread status information under all normal nodes in the current cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_OS_THREADS columns

Name

+

Type

+

Description

+

node_name

+

text

+

All normal node names in the cluster

+

pid

+

bigint

+

IDs of running threads among all normal node processes in the current cluster

+

lwpid

+

integer

+

Lightweight thread ID corresponding to the PID

+

thread_name

+

text

+

Thread name corresponding to the PID

+

creation_time

+

timestamp with time zone

+

Thread creation time corresponding to the PID

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0814.html b/docs/dws/dev/dws_04_0814.html new file mode 100644 index 00000000..dca3cfe7 --- /dev/null +++ b/docs/dws/dev/dws_04_0814.html @@ -0,0 +1,30 @@ + + +

PGXC_PREPARED_XACTS

+

PGXC_PREPARED_XACTS displays the two-phase transactions in the prepared phase.

+ +
+ + + + + + + + + +
Table 1 PGXC_PREPARED_XACTS columns

Name

+

Type

+

Description

+

pgxc_prepared_xact

+

text

+

Two-phase transactions in prepared phase

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0815.html b/docs/dws/dev/dws_04_0815.html new file mode 100644 index 00000000..7049ae56 --- /dev/null +++ b/docs/dws/dev/dws_04_0815.html @@ -0,0 +1,11 @@ + + +

PGXC_REDO_STAT

+

PGXC_REDO_STAT displays statistics on redoing Xlogs of each node in the cluster. All columns except node_name are the same as those in the PV_REDO_STAT view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0816.html b/docs/dws/dev/dws_04_0816.html new file mode 100644 index 00000000..e34ea452 --- /dev/null +++ b/docs/dws/dev/dws_04_0816.html @@ -0,0 +1,11 @@ + + +

PGXC_REL_IOSTAT

+

PGXC_REL_IOSTAT displays statistics on disk read and write of each node in the cluster. All columns except node_name are the same as those in the GS_REL_IOSTAT view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0817.html b/docs/dws/dev/dws_04_0817.html new file mode 100644 index 00000000..1c3f153f --- /dev/null +++ b/docs/dws/dev/dws_04_0817.html @@ -0,0 +1,11 @@ + + +

PGXC_REPLICATION_SLOTS

+

PGXC_REPLICATION_SLOTS displays the replication information of DNs in the cluster. All columns except node_name are the same as those in the PG_REPLICATION_SLOTS view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0818.html b/docs/dws/dev/dws_04_0818.html new file mode 100644 index 00000000..158da9fe --- /dev/null +++ b/docs/dws/dev/dws_04_0818.html @@ -0,0 +1,93 @@ + + +

PGXC_RUNNING_XACTS

+

PGXC_RUNNING_XACTS displays information about running transactions on each node in the cluster. The content is the same as that displayed in PG_RUNNING_XACTS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_RUNNING_XACTS columns

Name

+

Type

+

Description

+

handle

+

integer

+

Handle corresponding to the transaction in GTM

+

gxid

+

xid

+

Transaction ID

+

state

+

tinyint

+

Transaction status (3: prepared or 0: starting)

+

node

+

text

+

Node name

+

xmin

+

xid

+

Minimum transaction ID xmin on the node

+

vacuum

+

boolean

+

Whether the current transaction is lazy vacuum

+

timeline

+

bigint

+

Number of database restart

+

prepare_xid

+

xid

+

Transaction ID in prepared state. If the status is not prepared, the value is 0.

+

pid

+

bigint

+

Thread ID corresponding to the transaction

+

next_xid

+

xid

+

Transaction ID sent from a CN to a DN

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0819.html b/docs/dws/dev/dws_04_0819.html new file mode 100644 index 00000000..8557c663 --- /dev/null +++ b/docs/dws/dev/dws_04_0819.html @@ -0,0 +1,11 @@ + + +

PGXC_SETTINGS

+

PGXC_SETTINGS displays the database running status of each node in the cluster. All columns except node_name are the same as those in the PG_SETTINGS view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0820.html b/docs/dws/dev/dws_04_0820.html new file mode 100644 index 00000000..4a38007c --- /dev/null +++ b/docs/dws/dev/dws_04_0820.html @@ -0,0 +1,229 @@ + + +

PGXC_STAT_ACTIVITY

+

PGXC_STAT_ACTIVITY displays information about the query performed by the current user on all the CNs in the current cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_STAT_ACTIVITY columns

Name

+

Type

+

Description

+

coorname

+

text

+

Name of the CN in the current cluster

+

datid

+

oid

+

OID of the database that the user session connects to in the backend

+

datname

+

name

+

Name of the database that the user session connects to in the backend

+

pid

+

bigint

+

ID of the backend thread

+

usesysid

+

oid

+

OID of the user logging in to the backend

+

usename

+

name

+

Name of the user logging in to the backend

+

application_name

+

text

+

Name of the application connected to the backend

+

client_addr

+

inet

+

IP address of the client connected to the backend. If this column is null, it indicates either that the client is connected via a Unix socket on the server machine or that this is an internal process such as autovacuum.

+

client_hostname

+

text

+

Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will only be non-null for IP connections, and only when log_hostname is enabled.

+

client_port

+

integer

+

TCP port number that the client uses for communication with this backend, or -1 if a Unix socket is used

+

backend_start

+

timestamp with time zone

+

Startup time of the backend process, that is, the time when the client connects to the server

+

xact_start

+

timestamp with time zone

+

Time when the current transaction was started, or NULL if no transaction is active. If the current query is the first of its transaction, this column is equal to the query_start column.

+

query_start

+

timestamp with time zone

+

Time when the currently active query was started, or time when the last query was started if state is not active

+

state_change

+

timestamp with time zone

+

Time for the last status change

+

waiting

+

boolean

+

If backend is currently waiting for a lock, the value is true.

+

enqueue

+

text

+
Queuing status of a statement. Its value can be:
  • waiting in global queue: The statement is in the global queue.
  • waiting in respool queue: The statement is in the resource pool queue.
  • waiting in ccn queue: The job is in the CCN queue.
  • Empty or no waiting queue: The statement is running.
+
+

state

+

text

+

Overall state of the backend. Its value can be:

+
  • active: The backend is executing a query.
  • idle: The backend is waiting for a new client command.
  • idle in transaction: The backend is in a transaction, but there is no statement being executed in the transaction.
  • idle in transaction (aborted): The backend is in a transaction, but there are statements failed in the transaction.
  • fastpath function call: The backend is executing a fast-path function.
  • disabled: This state is reported if track_activities is disabled in this backend.
+
NOTE:

Only system administrators can view the session status of their accounts. The state information of other accounts is empty. For example, after user judy is connected to the database, the state information of user joe and the initial user dbadmin in pgxc_stat_activity is empty.

+
SELECT datname, usename, usesysid, state,pid FROM pgxc_stat_activity;
+
 datname  | usename | usesysid | state  |       pid
+----------+---------+----------+--------+-----------------
+ gaussdb | dbadmin     |       10 |        | 139968752121616
+ gaussdb | dbadmin     |       10 |        | 139968903116560
+ db_tpcds | judy    |    16398 | active | 139968391403280
+ gaussdb | dbadmin     |       10 |        | 139968643069712
+ gaussdb | dbadmin     |       10 |        | 139968680818448
+ gaussdb | joe     |    16390 |        | 139968563377936
+(6 rows)
+
+

resource_pool

+

name

+

Resource pool used by the user

+

query_id

+

bigint

+

ID of a query

+

query

+

text

+

Text of this backend's most recent query If state is active, this column shows the running query. In all other states, it shows the last query that was executed.

+

connection_info

+

text

+

A string in JSON format recording the driver type, driver version, driver deployment path, and process owner of the connected database (for details, see connection_info)

+
+
+

Examples

Run the following command to view blocked query statements.

+
1
SELECT datname,usename,state,query FROM PGXC_STAT_ACTIVITY WHERE waiting = true;
+
+ +
+

Check the working status of the snapshot thread.

+
1
SELECT application_name,backend_start,state_change,state,query FROM PGXC_STAT_ACTIVITY WHERE application_name='WDRSnapshot';
+
+ +
+

View the running query statements.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT datname,usename,state,pid FROM PGXC_STAT_ACTIVITY;
+ datname  | usename | state  |       pid
+----------+---------+--------+-----------------
+ gaussdb | Ruby    | active | 140298793514752
+ gaussdb | Ruby    | active | 140298718004992
+ gaussdb | Ruby    | idle   | 140298650908416
+ gaussdb | Ruby    | idle   | 140298625742592
+ gaussdb | dbadmin | active | 140298575406848
+(5 rows)
+
+ +
+

View the number of session connections that have been used by postgres. 1 indicates the number of session connections that have been used by postgres.

+
1
+2
+3
+4
+5
SELECT COUNT(*) FROM PGXC_STAT_ACTIVITY WHERE DATNAME='postgres';
+ count 
+-------
+     1
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0821.html b/docs/dws/dev/dws_04_0821.html new file mode 100644 index 00000000..1be8bd12 --- /dev/null +++ b/docs/dws/dev/dws_04_0821.html @@ -0,0 +1,79 @@ + + +

PGXC_STAT_BAD_BLOCK

+

PGXC_STAT_BAD_BLOCK displays statistics about page or CU verification failures after all nodes in a cluster are started.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_STAT_BAD_BLOCK columns

Name

+

Type

+

Description

+

nodename

+

text

+

Node name

+

databaseid

+

integer

+

Database OID

+

tablespaceid

+

integer

+

Tablespace OID

+

relfilenode

+

integer

+

File object ID

+

forknum

+

integer

+

File type

+

error_count

+

integer

+

Number of verification failures

+

first_time

+

timestamp with time zone

+

Time of the first occurrence

+

last_time

+

timestamp with time zone

+

Time of the latest occurrence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0822.html b/docs/dws/dev/dws_04_0822.html new file mode 100644 index 00000000..009f9a25 --- /dev/null +++ b/docs/dws/dev/dws_04_0822.html @@ -0,0 +1,11 @@ + + +

PGXC_STAT_BGWRITER

+

PGXC_STAT_BGWRITER displays statistics on the background writer of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_BGWRITER view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0823.html b/docs/dws/dev/dws_04_0823.html new file mode 100644 index 00000000..fcad579a --- /dev/null +++ b/docs/dws/dev/dws_04_0823.html @@ -0,0 +1,11 @@ + + +

PGXC_STAT_DATABASE

+

PGXC_STAT_DATABASE displays the database status and statistics of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_DATABASE view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0824.html b/docs/dws/dev/dws_04_0824.html new file mode 100644 index 00000000..09329d62 --- /dev/null +++ b/docs/dws/dev/dws_04_0824.html @@ -0,0 +1,11 @@ + + +

PGXC_STAT_REPLICATION

+

PGXC_STAT_REPLICATION displays the log synchronization status of each node in the cluster. All columns except node_name are the same as those in the PG_STAT_REPLICATION view. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0825.html b/docs/dws/dev/dws_04_0825.html new file mode 100644 index 00000000..0e5764d0 --- /dev/null +++ b/docs/dws/dev/dws_04_0825.html @@ -0,0 +1,14 @@ + + +

PGXC_SQL_COUNT

+

PGXC_SQL_COUNT displays the node-level and user-level statistics for the SQL statements of SELECT, INSERT, UPDATE, DELETE, and MERGE INTO and DDL, DML, and DCL statements of each CN in a cluster in real time, identifies query types with heavy load, and measures the capability of a cluster or a node to perform a specific type of query. You can calculate QPS based on the quantities and response time of the preceding types of SQL statements at certain time points. For example, USER1 SELECT is counted as X1 at T1 and as X2 at T2. The SELECT QPS of the user can be calculated as follows: (X2 – X1)/(T2 – T1). In this way, the system can draw cluster-user-level QPS curve graphs and determine cluster throughput, monitoring changes in the service load of each user. If there are drastic changes, the system can locate the specific statement type (such as SELECT, INSERT, UPDATE, DELETE, and MERGE INTO). You can also observe QPS curves to determine the time points when problems occur and then locate the problems using other tools. The curves provide a basis for optimizing cluster performance and locating problems.

+

Columns in the PGXC_SQL_COUNT view are the same as those in the GS_SQL_COUNT view. For details, see Table 1.

+

If a MERGE INTO statement can be pushed down and a DN receives it, the statement will be counted on the DN and the value of the mergeinto_count column will increment by 1. If the pushdown is not allowed, the DN will receive an UPDATE or INSERT statement. In this case, the update_count or insert_count column will increment by 1.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0826.html b/docs/dws/dev/dws_04_0826.html new file mode 100644 index 00000000..a8770619 --- /dev/null +++ b/docs/dws/dev/dws_04_0826.html @@ -0,0 +1,152 @@ + + +

PGXC_THREAD_WAIT_STATUS

+

PGXC_THREAD_WAIT_STATUS displays all the call layer hierarchy relationship between threads of the SQL statements on all the nodes in a cluster, and the waiting status of the block for each thread, so that you can easily locate the causes of process response failures and similar phenomena.

+

The definitions of PGXC_THREAD_WAIT_STATUS view and PG_THREAD_WAIT_STATUS view are the same, because the essence of the PGXC_THREAD_WAIT_STATUS view is the query summary result of the PG_THREAD_WAIT_STATUS view on each node in the cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_THREAD_WAIT_STATUS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Current node name

+

db_name

+

text

+

Database name

+

thread_name

+

text

+

Thread name

+

query_id

+

bigint

+

Query ID. It is equivalent to debug_query_id.

+

tid

+

bigint

+

Thread ID of the current thread

+

lwtid

+

integer

+

Lightweight thread ID of the current thread

+

ptid

+

integer

+

Parent thread of the streaming thread

+

tlevel

+

integer

+

Level of the streaming thread

+

smpid

+

integer

+

Concurrent thread ID

+

wait_status

+

text

+

Waiting status of the current thread. For details about the waiting status, see Table 2.

+

wait_event

+

text

+

If wait_status is acquire lock, acquire lwlock, or wait io, this column describes the lock, lightweight lock, and I/O information, respectively. If wait_status is not any of the three values, this column is empty.

+
+
+

Example:

+

Assume you run a statement on coordinator1, and no response is returned after a long period of time. In this case, establish another connection to coordinator1 to check the thread status on it.

+
1
+2
+3
+4
+5
 select * from pg_thread_wait_status where query_id > 0;
+  node_name   | db_name  | thread_name  | query_id |       tid       | lwtid | ptid  | tlevel | smpid |     wait_status   |   wait_event   
+--------------+----------+--------------+----------+-----------------+-------+-------+--------+-------+----------------------
+ coordinator1 | gaussdb | gsql         | 20971544 | 140274089064208 | 22579 |       |      0 |     0 | wait node: datanode4 |
+(1 rows)
+
+ +
+

Furthermore, you can view the statement working status on each node in the entire cluster. In the following example, no DNs have threads blocked, and there is a huge amount of data to be read, causing slow execution.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
select * from pgxc_thread_wait_status where query_id=20971544;
+  node_name   | db_name  | thread_name  | query_id |       tid       | lwtid | ptid  | tlevel | smpid |     wait_status   |  wait_event   
+--------------+----------+--------------+----------+-----------------+-------+-------+--------+-------+----------------------
+ datanode1    | gaussdb | coordinator1 | 20971544 | 139902867994384 | 22735 |       |      0 |     0 | wait node: datanode3 |
+ datanode1    | gaussdb | coordinator1 | 20971544 | 139902838634256 | 22970 | 22735 |      5 |     0 | synchronize quit     |
+ datanode1    | gaussdb | coordinator1 | 20971544 | 139902607947536 | 22972 | 22735 |      5 |     1 | synchronize quit     |
+ datanode2    | gaussdb | coordinator1 | 20971544 | 140632156796688 | 22736 |       |      0 |     0 | wait node: datanode3 |
+ datanode2    | gaussdb | coordinator1 | 20971544 | 140632030967568 | 22974 | 22736 |      5 |     0 | synchronize quit     |
+ datanode2    | gaussdb | coordinator1 | 20971544 | 140632081299216 | 22975 | 22736 |      5 |     1 | synchronize quit     |
+ datanode3    | gaussdb | coordinator1 | 20971544 | 140323627988752 | 22737 |       |      0 |     0 | wait node: datanode3 |
+ datanode3    | gaussdb | coordinator1 | 20971544 | 140323523131152 | 22976 | 22737 |      5 |     0 | net flush data       |
+ datanode3    | gaussdb | coordinator1 | 20971544 | 140323548296976 | 22978 | 22737 |      5 |     1 | net flush data
+ datanode4    | gaussdb | coordinator1 | 20971544 | 140103024375568 | 22738 |       |      0 |     0 | wait node: datanode3
+ datanode4    | gaussdb | coordinator1 | 20971544 | 140102919517968 | 22979 | 22738 |      5 |     0 | synchronize quit     |
+ datanode4    | gaussdb | coordinator1 | 20971544 | 140102969849616 | 22980 | 22738 |      5 |     1 | synchronize quit     |
+ coordinator1 | gaussdb | gsql         | 20971544 | 140274089064208 | 22579 |       |      0 |     0 | wait node: datanode4  |
+(13 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0827.html b/docs/dws/dev/dws_04_0827.html new file mode 100644 index 00000000..1a8d3cd1 --- /dev/null +++ b/docs/dws/dev/dws_04_0827.html @@ -0,0 +1,45 @@ + + +

PGXC_TOTAL_MEMORY_DETAIL

+

PGXC_TOTAL_MEMORY_DETAIL displays the memory usage in the cluster.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PGXC_TOTAL_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

nodename

+

text

+

Node name

+

memorytype

+

text

+

Memory name, which can be set to any of the following values:

+
  • max_process_memory: memory used by a GaussDB(DWS) cluster instance
  • process_used_memory: memory used by a GaussDB(DWS) process
  • max_dynamic_memory: maximum dynamic memory
  • dynamic_used_memory: used dynamic memory
  • dynamic_peak_memory: dynamic peak value of the memory
  • dynamic_used_shrctx: maximum dynamic shared memory context
  • dynamic_peak_shrctx: dynamic peak value of the shared memory context
  • max_shared_memory: maximum shared memory
  • shared_used_memory: used shared memory
  • max_cstore_memory: maximum memory allowed for column store
  • cstore_used_memory: memory used for column store
  • max_sctpcomm_memory: maximum memory allowed for the communication library
  • sctpcomm_used_memory: memory used for the communication library
  • sctpcomm_peak_memory: memory peak of the communication library
  • other_used_memory: other used memory
  • gpu_max_dynamic_memory: maximum GPU memory
  • gpu_dynamic_used_memory: sum of the available GPU memory and temporary GPU memory
  • gpu_dynamic_peak_memory: maximum memory used for GPU
  • pooler_conn_memory: memory used for pooler connections
  • pooler_freeconn_memory: memory used for idle pooler connections
  • storage_compress_memory: memory used for column-store compression and decompression
  • udf_reserved_memory: memory reserved for the UDF Worker process
  • mmap_used_memory: memory used for mmap
+

memorymbytes

+

integer

+

Size of the used memory (MB)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0828.html b/docs/dws/dev/dws_04_0828.html new file mode 100644 index 00000000..477f59c5 --- /dev/null +++ b/docs/dws/dev/dws_04_0828.html @@ -0,0 +1,79 @@ + + +

PGXC_TOTAL_SCHEMA_INFO

+

PGXC_TOTAL_SCHEMA_INFO displays the schema space information of all instances in the cluster, providing visibility into the schema space usage of each instance. This view can be queried only on CNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_TOTAL_SCHEMA_INFO columns

Name

+

Type

+

Description

+

schemaname

+

text

+

Schema name

+

schemaid

+

oid

+

Schema OID

+

databasename

+

text

+

Database name

+

databaseid

+

oid

+

Database OID

+

nodename

+

text

+

Instance name

+

nodegroup

+

text

+

Name of the node group

+

usedspace

+

bigint

+

Size of used space

+

permspace

+

bigint

+

Upper limit of the space

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0829.html b/docs/dws/dev/dws_04_0829.html new file mode 100644 index 00000000..3dfff8e7 --- /dev/null +++ b/docs/dws/dev/dws_04_0829.html @@ -0,0 +1,72 @@ + + +

PGXC_TOTAL_SCHEMA_INFO_ANALYZE

+

PGXC_TOTAL_SCHEMA_INFO_ANALYZE displays the overall schema space information of the cluster, including the total cluster space, average space of instances, skew ratio, maximum space of a single instance, minimum space of a single instance, and names of the instances with the maximum space and minimum space. It provides visibility into the schema space usage of the entire cluster. This view can be queried only on CNs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_TOTAL_SCHEMA_INFO_ANALYZE columns

Name

+

Type

+

Description

+

schemaname

+

text

+

Schema name

+

databasename

+

text

+

Database name

+

nodegroup

+

text

+

Name of the node group

+

total_value

+

bigint

+

Total cluster space in the current schema

+

avg_value

+

bigint

+

Average space of instances in the current schema

+

skew_percent

+

integer

+

Skew ratio

+

extend_info

+

text

+

Extended information, including the maximum space of a single instance, minimum space of a single instance, and names of the instances with the maximum sapce and minimum space

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0830.html b/docs/dws/dev/dws_04_0830.html new file mode 100644 index 00000000..9898c1f6 --- /dev/null +++ b/docs/dws/dev/dws_04_0830.html @@ -0,0 +1,79 @@ + + +

PGXC_USER_TRANSACTION

+

PGXC_USER_TRANSACTION provides transaction information about users on all CNs. It is accessible only to users with system administrator rights. This view is valid only when the real-time resource monitoring function is enabled, that is, when enable_resource_track is on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_USER_TRANSACTION columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

usename

+

name

+

Username

+

commit_counter

+

bigint

+

Number of the commit times

+

rollback_counter

+

bigint

+

Number of rollbacks

+

resp_min

+

bigint

+

Minimum response time

+

resp_max

+

bigint

+

Maximum response time

+

resp_avg

+

bigint

+

Average response time

+

resp_total

+

bigint

+

Total response time

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0831.html b/docs/dws/dev/dws_04_0831.html new file mode 100644 index 00000000..626f2580 --- /dev/null +++ b/docs/dws/dev/dws_04_0831.html @@ -0,0 +1,100 @@ + + +

PGXC_VARIABLE_INFO

+

PGXC_VARIABLE_INFO displays information about transaction IDs and OIDs of all nodes in a cluster.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_VARIABLE_INFO columns

Name

+

Type

+

Description

+

node_name

+

text

+

Node name

+

nextOid

+

oid

+

OID generated next time for a node

+

nextXid

+

xid

+

Transaction ID generated next time for a node

+

oldestXid

+

xid

+

Oldest transaction ID for a node

+

xidVacLimit

+

xid

+

Critical point that triggers forcible autovacuum

+

oldestXidDB

+

oid

+

OID of the database that has the minimum datafrozenxid on a node

+

lastExtendCSNLogpage

+

integer

+

Number of the last extended csnlog page

+

startExtendCSNLogpage

+

integer

+

Number of the page from which the csnlog extending starts

+

nextCommitSeqNo

+

integer

+

CSN generated next time for a node

+

latestCompletedXid

+

xid

+

Latest transaction ID on a node after the transaction commission or rollback

+

startupMaxXid

+

xid

+

Last transaction ID before a node is powered off

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0832.html b/docs/dws/dev/dws_04_0832.html new file mode 100644 index 00000000..83b01113 --- /dev/null +++ b/docs/dws/dev/dws_04_0832.html @@ -0,0 +1,11 @@ + + +

PGXC_WAIT_EVENTS

+

PGXC_WAIT_EVENTS displays statistics on the waiting status and events of each node in the cluster. The content is the same as that displayed in GS_WAIT_EVENTS. This view is accessible only to users with system administrators rights.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0836.html b/docs/dws/dev/dws_04_0836.html new file mode 100644 index 00000000..5d2b3280 --- /dev/null +++ b/docs/dws/dev/dws_04_0836.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_OPERATOR_HISTORY

+

PGXC_WLM_OPERATOR_HISTORY displays the operator information of completed jobs executed on all CNs. This view is used by Database Manager to query data from a database. Data in the database is cleared every 3 minutes.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see Table 1.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0837.html b/docs/dws/dev/dws_04_0837.html new file mode 100644 index 00000000..8c02d99d --- /dev/null +++ b/docs/dws/dev/dws_04_0837.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_OPERATOR_INFO

+

PGXC_WLM_OPERATOR_INFO displays the operator information of completed jobs executed on CNs. The data in this view is obtained from GS_WLM_OPERATOR_INFO.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see Table 1.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0838.html b/docs/dws/dev/dws_04_0838.html new file mode 100644 index 00000000..b8c1d761 --- /dev/null +++ b/docs/dws/dev/dws_04_0838.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_OPERATOR_STATISTICS

+

PGXC_WLM_OPERATOR_STATISTICS displays the operator information of jobs being executed on CNs.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see GS_WLM_OPERATOR_STATISTICS columns.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0839.html b/docs/dws/dev/dws_04_0839.html new file mode 100644 index 00000000..8667736c --- /dev/null +++ b/docs/dws/dev/dws_04_0839.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_SESSION_INFO

+

PGXC_WLM_SESSION_INFO displays load management information for completed jobs executed on all CNs. The data in this view is obtained from GS_WLM_SESSION_INFO.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see Table 1.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0840.html b/docs/dws/dev/dws_04_0840.html new file mode 100644 index 00000000..9f3e0b8a --- /dev/null +++ b/docs/dws/dev/dws_04_0840.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_SESSION_HISTORY

+

PGXC_WLM_SESSION_HISTORY displays load management information for completed jobs executed on all CNs. This view is used by Data Manager to query data from a database. Data in the database is cleared every 3 minutes. For details, see GS_WLM_SESSION_HISTORY.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see Table 1.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0841.html b/docs/dws/dev/dws_04_0841.html new file mode 100644 index 00000000..40ea2ccf --- /dev/null +++ b/docs/dws/dev/dws_04_0841.html @@ -0,0 +1,12 @@ + + +

PGXC_WLM_SESSION_STATISTICS

+

PGXC_WLM_SESSION_STATISTICS displays load management information about jobs that are being executed on CNs.

+

This view is accessible only to users with system administrators rights. For details about columns in the view, see Table 1.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0842.html b/docs/dws/dev/dws_04_0842.html new file mode 100644 index 00000000..56c04cfd --- /dev/null +++ b/docs/dws/dev/dws_04_0842.html @@ -0,0 +1,129 @@ + + +

PGXC_WLM_WORKLOAD_RECORDS

+

PGXC_WLM_WORKLOAD_RECORDS displays the status of job executed by the current user on CNs. It is accessible only to users with system administrator rights. This view is available only when enable_dynamic_workload is set to on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_WLM_WORKLOAD_RECORDS columns

Name

+

Type

+

Description

+

node_name

+

text

+

Name of the CN where the job is executed

+

thread_id

+

bigint

+

ID of the backend thread

+

processid

+

integer

+

lwpid of a thread

+

timestamp

+

bigint

+

Time when a statement starts to be executed

+

username

+

name

+

Name of the user logging in to the backend

+

memory

+

integer

+

Memory required by a statement

+

active_points

+

integer

+

Number of resources consumed by a statement in a resource pool

+

max_points

+

integer

+

Maximum number of resources in a resource pool

+

priority

+

integer

+

Priority of a job

+

resource_pool

+

text

+

Resource pool to which a job belongs

+

status

+

text

+

Job execution status. Its value can be:

+

pending

+

running

+

finished

+

aborted

+

unknown

+

control_group

+

text

+

Cgroups used by a job

+

enqueue

+

text

+

Queue that a job is in. Its value can be:

+

GLOBAL: global queue

+

RESPOOL: resource pool queue

+

ACTIVE: not in a queue

+

query

+

text

+

Statement that is being executed

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0843.html b/docs/dws/dev/dws_04_0843.html new file mode 100644 index 00000000..81aea248 --- /dev/null +++ b/docs/dws/dev/dws_04_0843.html @@ -0,0 +1,86 @@ + + +

PGXC_WORKLOAD_SQL_COUNT

+

PGXC_WORKLOAD_SQL_COUNT displays statistics on the number of SQL statements executed in workload Cgroups on all CNs in a cluster, including the number of SELECT, UPDATE, INSERT, and DELETE statements and the number of DDL, DML, and DCL statements. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_WORKLOAD_SQL_COUNT columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

workload

+

name

+

Workload Cgroup name

+

select_count

+

bigint

+

Number of SELECT statements

+

update_count

+

bigint

+

Number of UPDATE statements

+

insert_count

+

bigint

+

Number of INSERT statements

+

delete_count

+

bigint

+

Number of DELETE statements

+

ddl_count

+

bigint

+

Number of DDL statements

+

dml_count

+

bigint

+

Number of DML statements

+

dcl_count

+

bigint

+

Number of DCL statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0844.html b/docs/dws/dev/dws_04_0844.html new file mode 100644 index 00000000..bcb42fd2 --- /dev/null +++ b/docs/dws/dev/dws_04_0844.html @@ -0,0 +1,149 @@ + + +

PGXC_WORKLOAD_SQL_ELAPSE_TIME

+

PGXC_WORKLOAD_SQL_ELAPSE_TIME displays statistics on the response time of SQL statements in workload Cgroups on all CNs in a cluster, including the maximum, minimum, average, and total response time of SELECT, UPDATE, INSERT, and DELETE statements. The unit is microsecond. It is accessible only to users with system administrator rights.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_WORKLOAD_SQL_ELAPSE_TIME columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

workload

+

name

+

Workload Cgroup name

+

total_select_elapse

+

bigint

+

Total response time of SELECT statements

+

max_select_elapse

+

bigint

+

Maximum response time of SELECT statements

+

min_select_elapse

+

bigint

+

Minimum response time of SELECT statements

+

avg_select_elapse

+

bigint

+

Average response time of SELECT statements

+

total_update_elapse

+

bigint

+

Total response time of UPDATE statements

+

max_update_elapse

+

bigint

+

Maximum response time of UPDATE statements

+

min_update_elapse

+

bigint

+

Minimum response time of UPDATE statements

+

avg_update_elapse

+

bigint

+

Average response time of UPDATE statements

+

total_insert_elapse

+

bigint

+

Total response time of INSERT statements

+

max_insert_elapse

+

bigint

+

Maximum response time of INSERT statements

+

min_insert_elapse

+

bigint

+

Minimum response time of INSERT statements

+

avg_insert_elapse

+

bigint

+

Average response time of INSERT statements

+

total_delete_elapse

+

bigint

+

Total response time of DELETE statements

+

max_delete_elapse

+

bigint

+

Maximum response time of DELETE statements

+

min_delete_elapse

+

bigint

+

Minimum response time of DELETE statements

+

avg_delete_elapse

+

bigint

+

Average response time of DELETE statements

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0845.html b/docs/dws/dev/dws_04_0845.html new file mode 100644 index 00000000..2eaa5639 --- /dev/null +++ b/docs/dws/dev/dws_04_0845.html @@ -0,0 +1,79 @@ + + +

PGXC_WORKLOAD_TRANSACTION

+

PGXC_WORKLOAD_TRANSACTION provides transaction information about workload Cgroups on all CNs. It is accessible only to users with system administrator rights. This view is valid only when the real-time resource monitoring function is enabled, that is, when enable_resource_track is on.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PGXC_WORKLOAD_TRANSACTION columns

Name

+

Type

+

Description

+

node_name

+

name

+

Node name

+

workload

+

name

+

Workload Cgroup name

+

commit_counter

+

bigint

+

Number of the commit times

+

rollback_counter

+

bigint

+

Number of rollbacks

+

resp_min

+

bigint

+

Minimum response time (unit: μs)

+

resp_max

+

bigint

+

Maximum response time (unit: μs)

+

resp_avg

+

bigint

+

Average response time (unit: μs)

+

resp_total

+

bigint

+

Total response time (unit: μs)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0846.html b/docs/dws/dev/dws_04_0846.html new file mode 100644 index 00000000..7919df7b --- /dev/null +++ b/docs/dws/dev/dws_04_0846.html @@ -0,0 +1,88 @@ + + +

PLAN_TABLE

+

PLAN_TABLE displays the plan information collected by EXPLAIN PLAN. Plan information is in a session-level life cycle. After the session exits, the data will be deleted. Data is isolated between sessions and between users.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PLAN_TABLE columns

Name

+

Type

+

Description

+

statement_id

+

varchar2(30)

+

Query tag specified by a user

+

plan_id

+

bigint

+

ID of a plan to be queried

+

id

+

int

+

ID of each operator in a generated plan

+

operation

+

varchar2(30)

+

Operation description of an operator in a plan

+

options

+

varchar2(255)

+

Operation parameters

+

object_name

+

name

+

Name of an operated object. It is defined by users, not the object alias used in the query.

+

object_type

+

varchar2(30)

+

Object type

+

object_owner

+

name

+

User-defined schema to which an object belongs

+

projection

+

varchar2(4000)

+

Returned column information

+
+
+
  • A valid object_type value consists of a relkind type defined in PG_CLASS (TABLE ordinary table, INDEX, SEQUENCE, VIEW, FOREIGN TABLE, COMPOSITE TYPE, or TOASTVALUE TOAST table) and the rtekind type used in the plan (SUBQUERY, JOIN, FUNCTION, VALUES, CTE, or REMOTE_QUERY).
  • For RangeTableEntry (RTE), object_owner is the object description used in the plan. Non-user-defined objects do not have object_owner.
  • Information in the statement_id, object_name, object_owner, and projection columns is stored in letter cases specified by users and information in other columns is stored in uppercase.
  • PLAN_TABLE supports only SELECT and DELETE and does not support other DML operations.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0847.html b/docs/dws/dev/dws_04_0847.html new file mode 100644 index 00000000..e90c7bf7 --- /dev/null +++ b/docs/dws/dev/dws_04_0847.html @@ -0,0 +1,102 @@ + + +

PLAN_TABLE_DATA

+

PLAN_TABLE_DATA displays the plan information collected by EXPLAIN PLAN. Different from the PLAN_TABLE view, the system catalog PLAN_TABLE_DATA stores the plan information collected by all sessions and users.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PLAN_TABLE columns

Name

+

Type

+

Description

+

session_id

+

text

+

Session that inserts the data. Its value consists of a service thread start timestamp and a service thread ID. Values are constrained by NOT NULL.

+

user_id

+

oid

+

User who inserts the data. Values are constrained by NOT NULL.

+

statement_id

+

varchar2(30)

+

Query tag specified by a user

+

plan_id

+

bigint

+

ID of a plan to be queried

+

id

+

int

+

Node ID in a plan

+

operation

+

varchar2(30)

+

Operation description

+

options

+

varchar2(255)

+

Operation parameters

+

object_name

+

name

+

Name of an operated object. It is defined by users.

+

object_type

+

varchar2(30)

+

Object type

+

object_owner

+

name

+

User-defined schema to which an object belongs

+

projection

+

varchar2(4000)

+

Returned column information

+
+
+
  • PLAN_TABLE_DATA records data of all users and sessions on the current node. Only administrators can access all the data. Common users can view only their own data in the PLAN_TABLE view.
  • Data of inactive (exited) sessions is cleaned from PLAN_TABLE_DATA by gs_clean after being stored in this system catalog for a certain period of time (5 minutes by default). You can also manually run gs_clean -C to delete inactive session data from the table..
  • Data is automatically inserted into PLAN_TABLE_DATA after EXPLAIN PLAN is executed. Therefore, do not manually insert data into or update data in PLAN_TABLE_DATA. Otherwise, data in PLAN_TABLE_DATA may be disordered. To delete data from PLAN_TABLE_DATA, you are advised to use the PLAN_TABLE view.
  • Information in the statement_id, object_name, object_owner, and projection columns is stored in letter cases specified by users and information in other columns is stored in uppercase.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0848.html b/docs/dws/dev/dws_04_0848.html new file mode 100644 index 00000000..bc6c296d --- /dev/null +++ b/docs/dws/dev/dws_04_0848.html @@ -0,0 +1,114 @@ + + +

PV_FILE_STAT

+

By collecting statistics about the data file I/Os, PV_FILE_STAT displays the I/O performance of the data to detect the performance problems, such as abnormal I/O operations.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_FILE_STAT columns

Name

+

Type

+

Description

+

filenum

+

oid

+

File ID

+

dbid

+

oid

+

Database ID

+

spcid

+

oid

+

Tablespace ID

+

phyrds

+

bigint

+

Number of times of reading physical files

+

phywrts

+

bigint

+

Number of times of writing into physical files

+

phyblkrd

+

bigint

+

Number of times of reading physical file blocks

+

phyblkwrt

+

bigint

+

Number of times of writing into physical file blocks

+

readtim

+

bigint

+

Total duration of reading files. The unit is microsecond.

+

writetim

+

bigint

+

Total duration of writing files. The unit is microsecond.

+

avgiotim

+

bigint

+

Average duration of reading and writing files. The unit is microsecond.

+

lstiotim

+

bigint

+

Duration of the last file reading. The unit is microsecond.

+

miniotim

+

bigint

+

Minimum duration of reading and writing files. The unit is microsecond.

+

maxiowtm

+

bigint

+

Maximum duration of reading and writing files. The unit is microsecond.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0849.html b/docs/dws/dev/dws_04_0849.html new file mode 100644 index 00000000..db6f7731 --- /dev/null +++ b/docs/dws/dev/dws_04_0849.html @@ -0,0 +1,46 @@ + + +

PV_INSTANCE_TIME

+

PV_INSTANCE_TIME collects statistics on the running time of processes and the time consumed in each execution phase, in microseconds.

+

PV_INSTANCE_TIME records time consumption information of the current node. The time consumption information is classified into the following types:

+ + +
+ + + + + + + + + + + + + + + + + +
Table 1 PV_INSTANCE_TIME columns

Name

+

Type

+

Description

+

stat_id

+

integer

+

Type ID

+

stat_name

+

text

+

Running time type name

+

value

+

bigint

+

Running time value

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0850.html b/docs/dws/dev/dws_04_0850.html new file mode 100644 index 00000000..40412760 --- /dev/null +++ b/docs/dws/dev/dws_04_0850.html @@ -0,0 +1,58 @@ + + +

PV_OS_RUN_INFO

+

PV_OS_RUN_INFO displays the running status of the current operating system.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_OS_RUN_INFO columns

Name

+

Type

+

Description

+

id

+

integer

+

ID

+

name

+

text

+

Name of the OS running status

+

value

+

numeric

+

Value of the OS running status

+

comments

+

text

+

Remarks of the OS running status

+

cumulative

+

boolean

+

Whether the value of the OS running status is cumulative

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0851.html b/docs/dws/dev/dws_04_0851.html new file mode 100644 index 00000000..4a9db76b --- /dev/null +++ b/docs/dws/dev/dws_04_0851.html @@ -0,0 +1,51 @@ + + +

PV_SESSION_MEMORY

+

PV_SESSION_MEMORY displays statistics about memory usage at the session level in the unit of MB, including all the memory allocated to Postgres and Stream threads on DNs for jobs currently executed by users.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_SESSION_MEMORY columns

Name

+

Type

+

Description

+

sessid

+

text

+

Thread start time and ID

+

init_mem

+

integer

+

Memory allocated to the currently executed task before the task enters the executor, in MB

+

used_mem

+

integer

+

Memory allocated to the currently executed task, in MB

+

peak_mem

+

integer

+

Peak memory allocated to the currently executed task, in MB

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0852.html b/docs/dws/dev/dws_04_0852.html new file mode 100644 index 00000000..e96fc1e7 --- /dev/null +++ b/docs/dws/dev/dws_04_0852.html @@ -0,0 +1,134 @@ + + +

PV_SESSION_MEMORY_DETAIL

+

PV_SESSION_MEMORY_DETAIL displays statistics about thread memory usage by memory context.

+

The memory context TempSmallContextGroup collects information about all memory contexts whose value in the totalsize column is less than 8192 bytes in the current thread, and the number of the collected memory contexts is recorded in the usedsize column. Therefore, the totalsize and freesize columns for TempSmallContextGroup in the view display the corresponding information about all the memory contexts whose value in the totalsize column is less than 8192 bytes in the current thread, and the usedsize column displays the number of these memory contexts.

+

You can run the SELECT * FROM pv_session_memctx_detail (threadid,''); statement to record information about all memory contexts of a thread into the threadid_timestamp.log file in the /tmp/dumpmem directory. threadid can be obtained from the following table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_SESSION_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

sessid

+

text

+

Thread start time+thread ID (string: timestamp.threadid)

+

sesstype

+

text

+

Thread name

+

contextname

+

text

+

Name of the memory context

+

level

+

smallint

+

Hierarchy of the memory context

+

parent

+

text

+

Name of the parent memory context

+

totalsize

+

bigint

+

Total size of the memory context, in bytes

+

freesize

+

bigint

+

Total size of released memory in the memory context, in bytes

+

usedsize

+

bigint

+

Size of used memory in the memory context, in bytes. For TempSmallContextGroup, this parameter specifies the number of collected memory contexts.

+
+
+

Examples

Query the usage of all MemoryContexts on the current node.

+

Locate the thread in which the MemoryContext is created and used based on sessid. Check whether the memory usage meets the expectation based on totalsize, freesize, and usedsize to see whether memory leakage may occur.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
SELECT * FROM PV_SESSION_MEMORY_DETAIL order by totalsize desc;
+           sessid           |        sesstype         |                 contextname                 | level |            parent            | totalsize | freesize | usedsize
+----------------------------+-------------------------+---------------------------------------------+-------+------------------------------+-----------+----------+----------
+ 0.139975915622720          | postmaster              | gs_signal                                   |     1 | TopMemoryContext             |  17209904 |  8081136 |  9128768
+ 1667462258.139973631031040 | postgres                | SRF multi-call context                      |     5 | FunctionScan_139973631031040 |   1725504 |     3168 |  1722336
+ 1667461280.139973666686720 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   284456 |  1188088
+ 1667450443.139973877479168 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   356088 |  1116456
+ 1667462258.139973631031040 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   128216 |  1344328
+ 1667461250.139973915236096 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   226352 |  1246192
+ 1667450439.139974010144512 | WLMarbiter              | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   386736 |  1085808
+ 1667450439.139974151726848 | WDRSnapshot             | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   159720 |  1312824
+ 1667450439.139974026925824 | WLMmonitor              | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   297976 |  1174568
+ 1667451036.139973746386688 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   208064 |  1264480
+ 1667461250.139973950891776 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   270016 |  1202528
+ 1667450439.139974076212992 | WLMCalSpaceInfo         | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   393952 |  1078592
+ 1667450439.139974092994304 | WLMCollectWorker        | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |    94848 |  1377696
+ 1667461254.139973971343104 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   338544 |  1134000
+ 1667461280.139973822945024 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   284456 |  1188088
+ 1667450439.139974202070784 | JobScheduler            | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   216728 |  1255816
+ 1667450454.139973860697856 | postgres                | CacheMemoryContext                          |     1 | TopMemoryContext             |   1472544 |   388384 |  1084160
+ 0.139975915622720          | postmaster              | Postmaster                                  |     1 | TopMemoryContext             |   1004288 |    88792 |   915496
+ 1667450439.139974218852096 | AutoVacLauncher         | CacheMemoryContext                          |     1 | TopMemoryContext             |    948256 |   183488 |   764768
+ 1667461250.139973915236096 | postgres                | TempSmallContextGroup                       |     0 |                              |    584448 |   148032 |      119
+ 1667462258.139973631031040 | postgres                | TempSmallContextGroup                       |     0 |                              |    579712 |   162128 |      123
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0853.html b/docs/dws/dev/dws_04_0853.html new file mode 100644 index 00000000..6cf30d30 --- /dev/null +++ b/docs/dws/dev/dws_04_0853.html @@ -0,0 +1,58 @@ + + +

PV_SESSION_STAT

+

PV_SESSION_STAT displays session state statistics based on session threads or the AutoVacuum thread.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_SESSION_STAT columns

Name

+

Type

+

Description

+

sessid

+

text

+

Thread ID and start time

+

statid

+

integer

+

Statistics ID

+

statname

+

text

+

Name of the statistics session

+

statunit

+

text

+

Unit of the statistics session

+

value

+

bigint

+

Value of the statistics session

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0854.html b/docs/dws/dev/dws_04_0854.html new file mode 100644 index 00000000..6a16674b --- /dev/null +++ b/docs/dws/dev/dws_04_0854.html @@ -0,0 +1,51 @@ + + +

PV_SESSION_TIME

+

PV_SESSION_TIME displays statistics about the running time of session threads and time consumed in each execution phase, in microseconds.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_SESSION_TIME columns

Name

+

Type

+

Description

+

sessid

+

text

+

Thread ID and start time

+

stat_id

+

integer

+

Statistics ID

+

stat_name

+

text

+

Running time type name

+

value

+

bigint

+

Running time value

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0855.html b/docs/dws/dev/dws_04_0855.html new file mode 100644 index 00000000..9065afdc --- /dev/null +++ b/docs/dws/dev/dws_04_0855.html @@ -0,0 +1,45 @@ + + +

PV_TOTAL_MEMORY_DETAIL

+

PV_TOTAL_MEMORY_DETAIL displays statistics about memory usage of the current database node in the unit of MB.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 PV_TOTAL_MEMORY_DETAIL columns

Name

+

Type

+

Description

+

nodename

+

text

+

Node name

+

memorytype

+

text

+

Memory type. Its value can be:

+
  • max_process_memory: memory used by a GaussDB(DWS) cluster instance
  • process_used_memory: memory used by a GaussDB(DWS) process
  • max_dynamic_memory: maximum dynamic memory
  • dynamic_used_memory: used dynamic memory
  • dynamic_peak_memory: dynamic peak value of the memory
  • dynamic_used_shrctx: maximum dynamic shared memory context
  • dynamic_peak_shrctx: dynamic peak value of the shared memory context
  • max_shared_memory: maximum shared memory
  • shared_used_memory: used shared memory
  • max_cstore_memory: maximum memory allowed for column store
  • cstore_used_memory: memory used for column store
  • max_sctpcomm_memory: maximum memory allowed for the communication library
  • sctpcomm_used_memory: memory used for the communication library
  • sctpcomm_peak_memory: memory peak of the communication library
  • other_used_memory: other used memory
  • gpu_max_dynamic_memory: maximum GPU memory
  • gpu_dynamic_used_memory: sum of the available GPU memory and temporary GPU memory
  • gpu_dynamic_peak_memory: maximum memory used for GPU
  • pooler_conn_memory: memory used for pooler connections
  • pooler_freeconn_memory: memory used for idle pooler connections
  • storage_compress_memory: memory used for column-store compression and decompression
  • udf_reserved_memory: memory reserved for the UDF Worker process
  • mmap_used_memory: memory used for mmap
+

memorymbytes

+

integer

+

Size of allocated memory-typed memory

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0856.html b/docs/dws/dev/dws_04_0856.html new file mode 100644 index 00000000..13098894 --- /dev/null +++ b/docs/dws/dev/dws_04_0856.html @@ -0,0 +1,72 @@ + + +

PV_REDO_STAT

+

PV_REDO_STAT displays statistics on redoing Xlogs on the current node.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PV_REDO_STAT columns

Name

+

Type

+

Description

+

phywrts

+

bigint

+

Number of physical writes

+

phyblkwrt

+

bigint

+

Number of physical write blocks

+

writetim

+

bigint

+

Time consumed by physical writes

+

avgiotim

+

bigint

+

Average time for each write

+

lstiotim

+

bigint

+

Last write time

+

miniotim

+

bigint

+

Minimum write time

+

maxiowtm

+

bigint

+

Maximum write time

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0857.html b/docs/dws/dev/dws_04_0857.html new file mode 100644 index 00000000..229c3c7b --- /dev/null +++ b/docs/dws/dev/dws_04_0857.html @@ -0,0 +1,107 @@ + + +

REDACTION_COLUMNS

+

REDACTION_COLUMNS displays information about all redaction columns in the current database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 REDACTION_COLUMNS columns

Name

+

Type

+

Description

+

object_owner

+

name

+

Owner of the object to be redacted.

+

object_name

+

name

+

Redacted object name

+

column_name

+

name

+

Redacted column name

+

function_type

+

integer

+

Redaction type

+

function_parameters

+

text

+

Parameter used when the redaction type is partial (reserved)

+

regexp_pattern

+

text

+

Pattern string when the redaction type is regexp (reserved)

+

regexp_replace_string

+

text

+

Replacement string when the redaction type is regexp (reserved)

+

regexp_position

+

integer

+

Start and end replacement positions when the redaction type is regexp (reserved)

+

regexp_occurrence

+

integer

+

Replacement times when the redaction type is regexp (reserved)

+

regexp_match_parameter

+

text

+

Regular control parameter used when the redaction type is regexp (reserved)

+

function_info

+

text

+

Redaction function information

+

column_description

+

text

+

Description of the redacted column

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0858.html b/docs/dws/dev/dws_04_0858.html new file mode 100644 index 00000000..04c0651c --- /dev/null +++ b/docs/dws/dev/dws_04_0858.html @@ -0,0 +1,65 @@ + + +

REDACTION_POLICIES

+

REDACTION_POLICIES displays information about all redaction objects in the current database.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 REDACTION_POLICIES columns

Name

+

Type

+

Description

+

object_owner

+

name

+

Owner of the object to be redacted.

+

object_name

+

name

+

Redacted object name

+

policy_name

+

name

+

Name of the redact policy

+

expression

+

text

+

Policy effective expression (for users)

+

enable

+

boolean

+

Policy status (enabled or disabled)

+

policy_description

+

text

+

Description of a policy

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0859.html b/docs/dws/dev/dws_04_0859.html new file mode 100644 index 00000000..d876f720 --- /dev/null +++ b/docs/dws/dev/dws_04_0859.html @@ -0,0 +1,51 @@ + + +

USER_COL_COMMENTS

+

USER_COL_COMMENTS displays the column comments of the table accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_COL_COMMENTS columns

Name

+

Type

+

Description

+

column_name

+

character varying(64)

+

Column name

+

table_name

+

character varying(64)

+

Table name

+

owner

+

character varying(64)

+

Table owner

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0860.html b/docs/dws/dev/dws_04_0860.html new file mode 100644 index 00000000..52b552c6 --- /dev/null +++ b/docs/dws/dev/dws_04_0860.html @@ -0,0 +1,59 @@ + + +

USER_CONSTRAINTS

+

USER_CONSTRAINTS displays the table constraint information accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_CONSTRAINTS columns

Name

+

Type

+

Description

+

constraint_name

+

vcharacter varying(64)

+

Constraint name

+

constraint_type

+

text

+

Constraint type

+
  • C: Check constraint
  • F: Foreign key constraint
  • P: Primary key constraint
  • U: Unique constraint
+

table_name

+

character varying(64)

+

Name of constraint-related table

+

index_owner

+

character varying(64)

+

Owner of constraint-related index (only for the unique constraint and primary key constraint)

+

index_name

+

character varying(64)

+

Name of constraint-related index (only for the unique constraint and primary key constraint)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0861.html b/docs/dws/dev/dws_04_0861.html new file mode 100644 index 00000000..373e661c --- /dev/null +++ b/docs/dws/dev/dws_04_0861.html @@ -0,0 +1,51 @@ + + +

USER_CONS_COLUMNS

+

USER_CONSTRAINTS displays the information about constraint columns of the tables accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_CONS_COLUMNS columns

Name

+

Type

+

Description

+

table_name

+

character varying(64)

+

Name of constraint-related table

+

column_name

+

character varying(64)

+

Name of constraint-related column

+

constraint_name

+

character varying(64)

+

Constraint name

+

position

+

smallint

+

Position of the column in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0862.html b/docs/dws/dev/dws_04_0862.html new file mode 100644 index 00000000..8da2d0d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0862.html @@ -0,0 +1,65 @@ + + +

USER_INDEXES

+

USER_INDEXES displays index information in the current schema.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_INDEXES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_name

+

character varying(64)

+

Table name for the index

+

uniqueness

+

text

+

Whether the index is a unique index

+

generated

+

character varying(1)

+

Whether the index name is generated by the system

+

partitioned

+

character(3)

+

Whether the index has the property of the partition table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0863.html b/docs/dws/dev/dws_04_0863.html new file mode 100644 index 00000000..d395ee09 --- /dev/null +++ b/docs/dws/dev/dws_04_0863.html @@ -0,0 +1,65 @@ + + +

USER_IND_COLUMNS

+

USER_IND_COLUMNS displays column information about all indexes accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_IND_COLUMNS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

name

+

Column name

+

column_position

+

smallint

+

Position of column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0864.html b/docs/dws/dev/dws_04_0864.html new file mode 100644 index 00000000..c23ce27d --- /dev/null +++ b/docs/dws/dev/dws_04_0864.html @@ -0,0 +1,65 @@ + + +

USER_IND_EXPRESSIONS

+

USER_IND_EXPRESSIONS displays information about the function-based expression index accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_IND_EXPRESSIONS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Index owner

+

index_name

+

character varying(64)

+

Index name

+

table_owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_expression

+

text

+

Function-based index expression of a specified column

+

column_position

+

smallint

+

Position of column in the index

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0865.html b/docs/dws/dev/dws_04_0865.html new file mode 100644 index 00000000..a5718d15 --- /dev/null +++ b/docs/dws/dev/dws_04_0865.html @@ -0,0 +1,72 @@ + + +

USER_IND_PARTITIONS

+

USER_IND_PARTITIONS displays information about index partitions accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_IND_PARTITIONS columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Name of the owner of the partitioned table index to which the index partition belongs

+

schema

+

character varying(64)

+

Schema of the partitioned table index to which the index partition belongs

+

index_name

+

character varying(64)

+

Name of the partitioned table index to which the index partition belongs

+

partition_name

+

character varying(64)

+

Name of the index partition

+

index_partition_usable

+

boolean

+

Whether the index partition is available

+

high_value

+

text

+

Upper limit of the partition corresponding to the index partition

+

def_tablespace_name

+

name

+

Name of the tablespace of the index partition

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0866.html b/docs/dws/dev/dws_04_0866.html new file mode 100644 index 00000000..be659d7e --- /dev/null +++ b/docs/dws/dev/dws_04_0866.html @@ -0,0 +1,145 @@ + + +

USER_JOBS

+

USER_JOBS displays all jobs owned by the user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_JOBS columns

Name

+

Type

+

Description

+

job

+

int4

+

Job ID

+

log_user

+

name not null

+

User name of the job creator

+

priv_user

+

name not null

+

User name of the job executor

+

dbname

+

name not null

+

Database in which the job is created

+

start_date

+

timestamp without time zone

+

Job start time

+

start_suc

+

text

+

Start time of the successful job execution

+

last_date

+

timestamp without time zone

+

Start time of the last job execution

+

last_suc

+

text

+

Start time of the last successful job execution

+

this_date

+

timestamp without time zone

+

Start time of the ongoing job execution

+

this suc

+

text

+

Same as THIS_DATE

+

next_date

+

timestamp without time zone

+

Schedule time of the next job execution

+

next suc

+

text

+

Same as next_date

+

broken

+

text

+

Task status

+

Y: the system does not try to execute the task.

+

N: the system attempts to execute the task.

+

status

+

char

+

Status of the current job. The value range is 'r', 's', 'f', 'd'. The default value is 's'. The indications are as follows:

+
  • r: running
  • s: finished
  • f: failed
  • d: aborted
+

interval

+

text

+

Time expression used to calculate the next execution time. If this parameter is set to null, the job will be executed once only.

+

failures

+

smallint

+

Number of times the job has started and failed. If a job fails to be executed for 16 consecutive times, no more attempt will be made on it.

+

what

+

text

+

Body of the PL/SQL blocks or anonymous clock that the job executes

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0867.html b/docs/dws/dev/dws_04_0867.html new file mode 100644 index 00000000..d9d78471 --- /dev/null +++ b/docs/dws/dev/dws_04_0867.html @@ -0,0 +1,67 @@ + + +

USER_OBJECTS

+

USER_OBJECTS displays all database objects accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_OBJECTS columns

Name

+

Type

+

Description

+

object_name

+

name

+

Object name

+

object_id

+

oid

+

OID of the object

+

object_type

+

name

+

Type of the object (TABLE, INDEX, SEQUENCE, or VIEW)

+

namespace

+

oid

+

Namespace that the object belongs to

+

created

+

timestamp with time zone

+

Object creation time

+

last_ddl_time

+

timestamp with time zone

+

The last time when an object was modified.

+
+
+

For details about the value ranges of last_ddl_time and last_ddl_time, see PG_OBJECT.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0868.html b/docs/dws/dev/dws_04_0868.html new file mode 100644 index 00000000..b3613872 --- /dev/null +++ b/docs/dws/dev/dws_04_0868.html @@ -0,0 +1,79 @@ + + +

USER_PART_INDEXES

+

USER_PART_INDEXES displays information about partitioned table indexes accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_PART_INDEXES columns

Name

+

Type

+

Description

+

index_owner

+

character varying(64)

+

Name of the owner of the partitioned table index

+

schema

+

character varying(64)

+

Schema of the partitioned table index

+

index_name

+

character varying(64)

+

Name of the partitioned table index

+

table_name

+

character varying(64)

+

Name of the partitioned table to which the partitioned table index belongs

+

partitioning_type

+

text

+

Partition policy of the partitioned table

+

partition_count

+

bigint

+

Number of index partitions of the partitioned table index

+

def_tablespace_name

+

name

+

Name of the tablespace of the partitioned table index

+

partitioning_key_count

+

integer

+

Number of partition keys of the partitioned table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0869.html b/docs/dws/dev/dws_04_0869.html new file mode 100644 index 00000000..c21b9d63 --- /dev/null +++ b/docs/dws/dev/dws_04_0869.html @@ -0,0 +1,72 @@ + + +

USER_PART_TABLES

+

USER_PART_TABLES displays information about partitioned tables accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_PART_TABLES columns

Name

+

Type

+

Description

+

table_owner

+

character varying(64)

+

Name of the owner of the partitioned table

+

schema

+

character varying(64)

+

Schema of the partitioned table

+

table_name

+

character varying(64)

+

Name of the partitioned table

+

partitioning_type

+

text

+

Partition policy of the partitioned table

+

partition_count

+

bigint

+

Number of partitions of the partitioned table

+

def_tablespace_name

+

name

+

Name of the tablespace of the partitioned table

+

partitioning_key_count

+

integer

+

Number of partition keys of the partitioned table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0870.html b/docs/dws/dev/dws_04_0870.html new file mode 100644 index 00000000..55ec0d19 --- /dev/null +++ b/docs/dws/dev/dws_04_0870.html @@ -0,0 +1,44 @@ + + +

USER_PROCEDURES

+

USER_PROCEDURES displays information about all stored procedures and functions in the current schema.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 USER_PROCEDURES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the stored procedure or the function

+

object_name

+

character varying(64)

+

Name of the stored procedure or the function

+

argument_number

+

smallint

+

Number of the input parameters in the stored procedure

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0871.html b/docs/dws/dev/dws_04_0871.html new file mode 100644 index 00000000..308f9cfd --- /dev/null +++ b/docs/dws/dev/dws_04_0871.html @@ -0,0 +1,37 @@ + + +

USER_SEQUENCES

+

USER_SEQUENCES displays sequence information in the current schema.

+ +
+ + + + + + + + + + + + + +
Table 1 USER_SEQUENCES columns

Name

+

Type

+

Description

+

sequence_owner

+

character varying(64)

+

Owner of the sequence

+

sequence_name

+

character varying(64)

+

Name of the sequence

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0872.html b/docs/dws/dev/dws_04_0872.html new file mode 100644 index 00000000..3c684c1f --- /dev/null +++ b/docs/dws/dev/dws_04_0872.html @@ -0,0 +1,44 @@ + + +

USER_SOURCE

+

USER_SOURCE displays information about stored procedures or functions in this mode, and provides the columns defined by the stored procedures or the functions.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 USER_SOURCE columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the stored procedure or the function

+

name

+

character varying(64)

+

Name of the stored procedure or the function

+

text

+

text

+

Definition of the stored procedure or the function

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0873.html b/docs/dws/dev/dws_04_0873.html new file mode 100644 index 00000000..c22de99d --- /dev/null +++ b/docs/dws/dev/dws_04_0873.html @@ -0,0 +1,58 @@ + + +

USER_SYNONYMS

+

USER_SYNONYMS displays synonyms accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_SYNONYMS columns

Name

+

Type

+

Description

+

schema_name

+

text

+

Name of the schema to which the synonym belongs.

+

synonym_name

+

text

+

Synonym name.

+

table_owner

+

text

+

Owner of the associated object.

+

table_schema_name

+

text

+

Schema name of the associated object.

+

table_name

+

text

+

Name of the associated object.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0874.html b/docs/dws/dev/dws_04_0874.html new file mode 100644 index 00000000..1d138288 --- /dev/null +++ b/docs/dws/dev/dws_04_0874.html @@ -0,0 +1,107 @@ + + +

USER_TAB_COLUMNS

+

USER_TAB_COLUMNS displays information about table columns accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_TAB_COLUMNS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

column_name

+

character varying(64)

+

Column name

+

data_type

+

character varying(128)

+

Data type of the column

+

column_id

+

integer

+

Sequence number of the column when the table is created

+

data_length

+

integer

+

Length of the column in the unit of bytes

+

comments

+

text

+

Comments

+

avg_col_len

+

numeric

+

Average length of a column in the unit of bytes

+

nullable

+

bpchar

+

Whether the column can be empty. For the primary key constraint and non-null constraint, the value is n.

+

data_precision

+

integer

+

Precision of the data type. This parameter is valid for the numeric data type and NULL for other types.

+

data_scale

+

integer

+

Number of decimal places. This parameter is valid for the numeric data type and 0 for other types.

+

char_length

+

numeric

+

Column length in the unit of bytes which is valid only for the varchar, nvarchar2, bpchar, and char types.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0875.html b/docs/dws/dev/dws_04_0875.html new file mode 100644 index 00000000..3c458567 --- /dev/null +++ b/docs/dws/dev/dws_04_0875.html @@ -0,0 +1,44 @@ + + +

USER_TAB_COMMENTS

+

USER_TAB_COMMENTS displays comments about all tables and views accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 USER_TAB_COMMENTS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the table or the view

+

table_name

+

character varying(64)

+

Name of the table or the view

+

comments

+

text

+

Comments

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0876.html b/docs/dws/dev/dws_04_0876.html new file mode 100644 index 00000000..3a398772 --- /dev/null +++ b/docs/dws/dev/dws_04_0876.html @@ -0,0 +1,65 @@ + + +

USER_TAB_PARTITIONS

+

USER_TAB_PARTITIONS displays all table partitions accessible to the current user. Each partition of a partitioned table accessible to the current user has a piece of record in USER_TAB_PARTITIONS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_TAB_PARTITIONS columns

Name

+

Type

+

Description

+

table_owner

+

character varying(64)

+

Name of the owner of the partitioned table

+

schema

+

character varying(64)

+

Schema of the partitioned table

+

table_name

+

character varying(64)

+

Name of the partitioned table

+

partition_name

+

character varying(64)

+

Name of the table partition

+

high_value

+

text

+

Upper boundary of the table partition

+

tablespace_name

+

name

+

Name of the tablespace of the table partition

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0877.html b/docs/dws/dev/dws_04_0877.html new file mode 100644 index 00000000..259f90ae --- /dev/null +++ b/docs/dws/dev/dws_04_0877.html @@ -0,0 +1,74 @@ + + +

USER_TABLES

+

USER_TABLES displays table information in the current schema.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 USER_TABLES columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Table owner

+

table_name

+

character varying(64)

+

Table name

+

tablespace_name

+

character varying(64)

+

Name of the tablespace where the table is located

+

status

+

character varying(8)

+

Whether the current record is valid

+

temporary

+

character(1)

+

Whether the table is a temporary table

+
  • Y: temporary table
  • N: not a temporary table
+

dropped

+

character varying

+

Whether the current record is deleted

+
  • YES: deleted
  • NO: not deleted
+

num_rows

+

numeric

+

Estimated number of rows in the table

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0878.html b/docs/dws/dev/dws_04_0878.html new file mode 100644 index 00000000..a5af0aab --- /dev/null +++ b/docs/dws/dev/dws_04_0878.html @@ -0,0 +1,44 @@ + + +

USER_TRIGGERS

+

USER_TRIGGERS displays the information about triggers accessible to the current user.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 USER_TRIGGERS columns

Name

+

Type

+

Description

+

trigger_name

+

character varying(64)

+

Trigger name

+

table_name

+

character varying(64)

+

Name of the relationship table

+

table_owner

+

character varying(64)

+

Role name

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0879.html b/docs/dws/dev/dws_04_0879.html new file mode 100644 index 00000000..53f262da --- /dev/null +++ b/docs/dws/dev/dws_04_0879.html @@ -0,0 +1,37 @@ + + +

USER_VIEWS

+

USER_VIEWS displays information about all views in the current schema.

+ +
+ + + + + + + + + + + + + +
Table 1 USER_VIEWS columns

Name

+

Type

+

Description

+

owner

+

character varying(64)

+

Owner of the view

+

view_name

+

character varying(64)

+

View name

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0880.html b/docs/dws/dev/dws_04_0880.html new file mode 100644 index 00000000..1922164e --- /dev/null +++ b/docs/dws/dev/dws_04_0880.html @@ -0,0 +1,51 @@ + + +

V$SESSION

+

V$SESSION displays all session information about the current session.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 V$SESSION columns

Name

+

Type

+

Description

+

sid

+

bigint

+

OID of the background process of the current activity

+

serial#

+

integer

+

Sequence number of the active background process, which is 0 in GaussDB(DWS).

+

user#

+

oid

+

OID of the user that has logged in to the background process

+

username

+

name

+

Name of the user that has logged in to the background process

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0881.html b/docs/dws/dev/dws_04_0881.html new file mode 100644 index 00000000..180b3158 --- /dev/null +++ b/docs/dws/dev/dws_04_0881.html @@ -0,0 +1,51 @@ + + +

V$SESSION_LONGOPS

+

V$SESSION_LONGOPS displays the progress of ongoing operations.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 V$SESSION_LONGOPS columns

Name

+

Type

+

Description

+

sid

+

bigint

+

OID of the running background process

+

serial#

+

integer

+

Sequence number of the running background process, which is 0 in GaussDB(DWS).

+

sofar

+

integer

+

Completed workload, which is empty in GaussDB(DWS).

+

totalwork

+

integer

+

Total workload, which is empty in GaussDB(DWS).

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0883.html b/docs/dws/dev/dws_04_0883.html new file mode 100644 index 00000000..bcbf51d6 --- /dev/null +++ b/docs/dws/dev/dws_04_0883.html @@ -0,0 +1,61 @@ + + +

GUC Parameters

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0884.html b/docs/dws/dev/dws_04_0884.html new file mode 100644 index 00000000..689555e4 --- /dev/null +++ b/docs/dws/dev/dws_04_0884.html @@ -0,0 +1,37 @@ + + +

Viewing GUC Parameters

+

GaussDB(DWS) GUC parameters can control database system behaviors. You can check and adjust the GUC parameters based on your business scenario and data volume.

+ + +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0885.html b/docs/dws/dev/dws_04_0885.html new file mode 100644 index 00000000..bbaeb268 --- /dev/null +++ b/docs/dws/dev/dws_04_0885.html @@ -0,0 +1,85 @@ + + +

Configuring GUC Parameters

+

To ensure the optimal performance of GaussDB(DWS), you can adjust the GUC parameters in the database.

+

Parameter Types and Values

+
+

Setting GUC Parameters

You can configure GUC parameters in the following ways:

+ +
+

Procedure

The following example shows how to set explain_perf_mode.

+
  1. View the value of explain_perf_mode.

    1
    +2
    +3
    +4
    +5
    SHOW explain_perf_mode;
    + explain_perf_mode 
    +-------------------
    + normal
    +(1 row)
    +
    + +
    +

  2. Set explain_perf_mode.

    Perform one of the following operations:

    +
    • Set a database-level parameter.
      1
      ALTER DATABASE gaussdb SET explain_perf_mode TO pretty;
      +
      + +
      +

      If the following information is displayed, the setting has been modified.

      +
      ALTER DATABASE
      +

      The setting takes effect in the next session.

      +
    • Set a user-level parameter.
      1
      ALTER USER dbadmin SET explain_perf_mode TO pretty;
      +
      + +
      +

      If the following information is displayed, the setting has been modified.

      +
      ALTER USER
      +

      The setting takes effect in the next session.

      +
    • Set a session-level parameter.
      1
      SET explain_perf_mode TO pretty;
      +
      + +
      +

      If the following information is displayed, the setting has been modified.

      +
      SET
      +
    +

  3. Check whether the parameter is correctly set.

    1
    +2
    +3
    +4
    +5
    SHOW explain_perf_mode;
    + explain_perf_mode
    +--------------
    + pretty
    +(1 row)
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0886.html b/docs/dws/dev/dws_04_0886.html new file mode 100644 index 00000000..f4b470b4 --- /dev/null +++ b/docs/dws/dev/dws_04_0886.html @@ -0,0 +1,14 @@ + + +

GUC Parameter Usage

+

The database provides many operation parameters. Configuration of these parameters affects the behavior of the database system. Before modifying these parameters, learn the impact of these parameters on the database. Otherwise, unexpected results may occur.

+

Precautions

+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0888.html b/docs/dws/dev/dws_04_0888.html new file mode 100644 index 00000000..9e7a6fb7 --- /dev/null +++ b/docs/dws/dev/dws_04_0888.html @@ -0,0 +1,19 @@ + + +

Connection and Authentication

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0889.html b/docs/dws/dev/dws_04_0889.html new file mode 100644 index 00000000..d0568bf1 --- /dev/null +++ b/docs/dws/dev/dws_04_0889.html @@ -0,0 +1,43 @@ + + +

Connection Settings

+

This section describes parameters related to the connection mode between the client and server.

+

max_connections

Parameter description: Specifies the maximum number of allowed parallel connections to the database. This parameter influences the concurrent processing capability of the cluster.

+

Type: POSTMASTER

+

Value range: an integer. For CNs, the ranges from 1 to 16384. For DNs, the value ranges from 1 to 262143. Because there are internal connections in the cluster, the maximum value is rarely reached. If invalid value for parameter "max_connections" is displayed in the log, you need to decrease the max_connections value for DNs.

+

Default value: 800 for CNs and 5000 for DNs. If the default value is greater than the maximum value supported by kernel (determined when the gs_initdb command is executed), an error message will be displayed.

+

Setting suggestions:

+

Retain the default value of this parameter on the CN. Set this parameter on the DN to the following calculation result: Number of CNs x Value of this parameter on the CN.

+

If the parameter is set to a large value, GaussDB(DWS) requires more SystemV shared memories or semaphores, which may exceed the maximum default configuration of the OS. In this case, modify the value as needed.

+

The value of max_connections is related to max_prepared_transactions. Before setting max_connections, ensure that the value of max_prepared_transactions is greater than or equal to that of max_connections. In this way, each session has a prepared transaction in the waiting state.

+
+
+

sysadmin_reserved_connections

Parameter description: Specifies the minimum number of connections reserved for administrators.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 262143

+

Default value: 3

+
+

application_name

Parameter description: Specifies the name of the client program connecting to the database.

+

Type: USERSET

+

Value range: a string

+

Default value: gsql

+
+

connection_info

Parameter description: Specifies the database connection information, including the driver type, driver version, driver deployment path, and process owner. (This is an O&M parameter. Do not configure it by yourself.)

+

Type: USERSET

+

Value range: a string

+

Default value: an empty string

+
  • An empty string indicates that the driver connected to the database does not support automatic setting of the connection_info parameter or the parameter is not set by users in applications.
  • The following is an example of the concatenated value of connection_info:
    1
    {"driver_name":"ODBC","driver_version": "(GaussDB 8.1.1 build af002019) compiled at 2020-01-10 05:43:20 commit 6995 last mr 11566 debug","driver_path":"/usr/local/lib/psqlodbcw.so","os_user":"dbadmin"}
    +
    + +
    +

    driver_name and driver_version are displayed by default. Whether driver_path and os_user are displayed is determined by users.

    +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0890.html b/docs/dws/dev/dws_04_0890.html new file mode 100644 index 00000000..80ac839d --- /dev/null +++ b/docs/dws/dev/dws_04_0890.html @@ -0,0 +1,149 @@ + + +

Security and Authentication (postgresql.conf)

+

This section describes parameters about how to securely authenticate the client and server.

+

authentication_timeout

Parameter description: Specifies the longest duration to wait before the client authentication times out. If a client is not authenticated by the server within the timeout period, the server automatically breaks the connection from the client so that the faulty client does not occupy connection resources.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 600. The minimum unit is second (s).

+

Default value: 1 min

+
+

auth_iteration_count

Parameter description: Specifies the number of interactions during the generation of encryption information for authentication.

+

Type: SIGHUP

+

Value range: an integer ranging from 2048 to 134217728

+

Default value: 50000

+

If this parameter is set to a large value, performance deteriorates in operations involving password encryption, such as authentication and user creation. Set this parameter to an appropriate value based on the hardware conditions.

+
+
+

session_timeout

Parameter description: Specifies the longest duration with no operations after the connection to the server.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 86400. The minimum unit is second (s). 0 means to disable the timeout.

+

Default value: 10 min

+
  • The gsql client of GaussDB(DWS) has an automatic reconnection mechanism. If the initialized local connection of a user to the server times out, gsql disconnects from and reconnects to the server.
  • Connections from the pooler connection pool to other CNs and DNs are not controlled by the session_timeout parameter.
+
+
+

ssl

Parameter description: Specifies whether the SSL connection is enabled.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

GaussDB(DWS) supports the SSL connection when the client connects to CNs. It is recommended that the SSL connection be enabled only on CNs.

+
+

Default value: on

+
+

ssl_ciphers

Parameter description: Specifies the encryption algorithm list supported by the SSL.

+

Type: POSTMASTER

+

Value range: a string. Separate multiple encryption algorithms with semicolons (;).

+

Default value: ALL

+
  • The default value of ssl_ciphers is ALL, indicating that all the following encryption algorithms are supported. Users are advised to retain the default value, unless there are other special requirements on the encryption algorithm.
    • TLS1_3_RFC_AES_128_GCM_SHA256
    • TLS1_3_RFC_AES_256_GCM_SHA384
    • TLS1_3_RFC_CHACHA20_POLY1305_SHA256
    • TLS1_3_RFC_AES_128_CCM_SHA256
    • TLS1_3_RFC_AES_128_CCM_8_SHA256
    +
  • Currently, SSL connection authentication supports only the TLS1.3 encryption algorithm, which has better performance and security. It is also compatible with SSL connection authentication between clients that comply with TLS1.2.
+
+
+

ssl_renegotiation_limit

Parameter description: Specifies the traffic volume over the SSL-encrypted channel before the session key is renegotiated. The renegotiation traffic limitation mechanism reduces the probability that attackers use the password analysis method to crack the key based on a huge amount of data but causes big performance losses. The traffic indicates the sum of sent and received traffic.

+

Type: USERSET

+

You are advised to retain the default value, that is, disable the renegotiation mechanism. You are not advised to use the gs_guc tool or other methods to set the ssl_renegotiation_limit parameter in the postgresql.conf file. The setting does not take effect.

+
+

Value range: an integer ranging from 0 to INT_MAX. The unit is KB. 0 indicates that the renegotiation mechanism is disabled.

+

Default value: 0

+
+

password_policy

Parameter description: Specifies whether to check the password complexity when you run the CREATE ROLE/USER or ALTER ROLE/USER command to create or modify a GaussDB(DWS) account.

+

Type: SIGHUP

+

For security purposes, do not disable the password complexity policy.

+
+

Value range: an integer, 0 or 1

+ +

Default value: 1

+
+

password_reuse_time

Parameter description: Specifies whether to check the reuse days of the new password when you run the ALTER USER or ALTER ROLE command to change a user password.

+

Type: SIGHUP

+

When you change the password, the system checks the values of password_reuse_time and password_reuse_max.

+ +
+

Value range: a floating number ranging from 0 to 3650. The unit is day.

+ +

Default value: 60

+
+

password_reuse_max

Parameter description: Specifies whether to check the reuse times of the new password when you run the ALTER USER or ALTER ROLE command to change a user password.

+

Type: SIGHUP

+

When you change the password, the system checks the values of password_reuse_time and password_reuse_max.

+ +
+

Value range: an integer ranging from 0 to 1000

+ +

Default value: 0

+
+

password_lock_time

Parameter description: Specifies the duration before an account is automatically unlocked.

+

Type: SIGHUP

+

The locking and unlocking functions take effect only when the values of password_lock_time and failed_login_attempts are positive numbers.

+
+

Value range: a floating number ranging from 0 to 365. The unit is day.

+ +

Default value: 1

+
+

failed_login_attempts

Parameter description: Specifies the maximum number of incorrect password attempts before an account is locked. The account will be automatically unlocked after the time specified in password_lock_time. For example, incorrect password attempts during login and password input failures when using the ALTER USER command

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 1000

+ +

Default value: 10

+
  • The locking and unlocking functions take effect only when the values of failed_login_attempts and password_lock_time are positive numbers.
  • failed_login_attempts works with the SSL connection mode of the client to identify the number of incorrect password attempts. If PGSSLMODE is set to allow or prefer, two connection requests are generated for a password connection request. One request attempts an SSL connection, and the other request attempts a non-SSL connection. In this case, the number of incorrect password attempts perceived by the user is the value of failed_login_attempts divided by 2.
+
+
+

password_encryption_type

Parameter description: Specifies the encryption type of user passwords.

+

Type: SIGHUP

+

Value range: an integer, 0, 1, or 2

+ +
  • MD5 is not recommended because it is not a secure encryption algorithm.
  • If the cluster is upgraded from 8.0.0 or an earlier version to the current version, the default value of this parameter is the same as that of the cluster of the earlier version. For example, the default value of password_encryption_type in 8.0.0 is 1. After the cluster is upgraded from 8.0.0 to 8.1.1, the default value of password_encryption_type remains 1.
+
+

Default value: 2

+
+

password_min_length

Parameter description: Specifies the minimum account password length.

+

Type: SIGHUP

+

Value range: an integer. A password can contain 6 to 999 characters.

+

Default value: 8

+
+

password_max_length

Parameter description: Specifies the maximum account password length.

+

Type: SIGHUP

+

Value range: an integer. A password can contain 6 to 999 characters.

+

Default value: 32

+
+

password_min_uppercase

Parameter description: Specifies the minimum number of uppercase letters that an account password must contain.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 999.

+ +

Default value: 0

+
+

password_min_lowercase

Parameter description: Specifies the minimum number of lowercase letters that an account password must contain.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 999.

+ +

Default value: 0

+
+

password_min_digital

Parameter description: Specifies the minimum number of digits that an account password must contain.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 999.

+ +

Default value: 0

+
+

password_min_special

Parameter description: Specifies the minimum number of special characters that an account password must contain.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 999.

+ +

Default value: 0

+
+

password_effect_time

Parameter description: Specifies the validity period of an account password.

+

Type: SIGHUP

+

Value range: a floating number ranging from 0 to 999. The unit is day.

+ +

Default value: 90

+
+

password_notify_time

Parameter description: Specifies how many days in advance users are notified before the account password expires.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 999. The unit is day.

+ +

Default value: 7

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0891.html b/docs/dws/dev/dws_04_0891.html new file mode 100644 index 00000000..ae9a6e9f --- /dev/null +++ b/docs/dws/dev/dws_04_0891.html @@ -0,0 +1,126 @@ + + +

Communication Library Parameters

+

This section describes parameter settings and value ranges for communication libraries.

+

comm_tcp_mode

Parameter description: Specifies whether the communication library uses the TCP or SCTP protocol to set up a data channel. The modification of this parameter takes effect after the cluster is restarted.

+

Type: POSTMASTER

+

Value range: Boolean. If this parameter is set to on for CNs, the CNs connect to DNs using TCP. If this parameter is set to on for DNs, the DNs communicate with each other using TCP.

+

Default value: on

+
+

comm_sctp_port

Parameter description: Specifies the TCP or SCTP listening port used by the TCP proxy communication library or SCTP communication library, respectively.

+

Type: POSTMASTER

+

This port number is automatically allocated during cluster deployment. Do not change the parameter setting. If the port number is incorrectly set, the database communication fails.

+
+

Value range: an integer ranging from 0 to 65535

+

Default value: port + Number of primary DNs on the local host x 2 + Sequence number of the local DN on the local host

+
+

comm_control_port

Parameter description: Specifies the TCP listening port used by the TCP proxy communication library or SCTP communication library, respectively.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 65535

+

Default value: port + Number of primary DNs on the local host x 2 + Sequence number of the local DN on the local host + 1

+

This port number is automatically allocated during cluster deployment. Do not change the parameter setting. If the port number is incorrectly set, the database communication fails.

+
+
+

comm_max_datanode

Parameter description: Specifies the maximum number of DNs supported by the TCP proxy communication library or SCTP communication library.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 8192

+

Default value: actual number of DNs

+
+

comm_max_stream

Parameter description: Specifies the maximum number of concurrent data streams supported by the TCP proxy communication library or SCTP communication library. The value of this parameter must be greater than: Number of concurrent data streams x Number of operators in each stream x Square of SMP.

+

Type: POSTMASTER

+

Value range: an integer ranging from 1 to 60000

+

Default value: calculated by the following formula: min (query_dop_limit x query_dop_limit x 2 x 20, max_process_memory (bytes) x 0.005/(Maximum number of CNs + Number of current DNs)/260. If the value is less than 1024, 1024 is used. query_dop_limit = Number of CPU cores of a single server/Number of DNs of a single server.

+
  • You are not advised to set this parameter to a large value because this will cause high memory usage (256 bytes x comm_max_stream x comm_max_datanode). If the number of concurrent data streams is large, the query is complex and the smp is large, resulting in insufficient memory.
  • If the value of comm_max_datanode is small, the process memory is sufficient. In this case, you can increase the value of comm_max_stream.
+
+
+

comm_max_receiver

Parameter description: Specifies the maximum number of receiving threads for the TCP proxy communication library or SCTP communication library.

+

Type: POSTMASTER

+

Value range: an integer ranging from 1 to 50

+

Default value: 4

+
+

comm_quota_size

Parameter description: Specifies the maximum size of packets that can be consecutively sent by the TCP proxy communication library or SCTP communication library. When you use a 1GE NIC, a small value ranging from 20 KB to 40 KB is recommended.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 102400. The default unit is KB. The value 0 indicates that the quota mechanism is not used.

+

Default value: 1MB

+
+

comm_usable_memory

Parameter description: Specifies the maximum memory available for buffering on the TCP proxy communication library or SCTP communication library on a single DN.

+

Type: POSTMASTER

+

Value range: an integer ranging from 102400 to INT_MAX/2. The default unit is KB. The minimum size cannot be less than 1 GB for installation.

+

Default value: max_process_memory/8

+

This parameter must be specifically set based on environment memory and the deployment method. If it is too large, there may be out-of-memory (OOM). If it is too small, the performance of the TCP proxy communication library or SCTP communication library may deteriorate.

+
+
+

comm_memory_pool_percent

Parameter description: Specifies the percentage of the memory pool resources that can be used by the TCP proxy communication library or the SCTP communication library in a DN. This parameter is used to adaptively reserve memory used by the communication libraries.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 100

+

Default value: 0

+

If the memory used by the communication library is small, set this parameter to a small value. Otherwise, set it to a large value.

+
+
+

comm_client_bind

Parameter description: Specifies whether to bind the client of the communication library to a specified IP address when the client initiates a connection.

+

Type: USERSET

+

Value range: Boolean

+ +

If multiple IP addresses of a node in a cluster are on the same communication network segment, set this parameter to on. In this case, the client is bound to the IP address specified by listen_addresses. The concurrency performance of a cluster depends on the number of random ports because a port can be used only by one client at a time.

+
+

Default value: off

+
+

comm_no_delay

Parameter description: Specifies whether to use the NO_DELAY attribute of the communication library connection. Restart the cluster for the setting to take effect.

+

Type: USERSET

+

Value range: Boolean

+

Default value: off

+

If packet loss occurs because a large number of packets are received per second, set this parameter to off to reduce the total number of packets.

+
+
+

comm_debug_mode

Parameter description: Specifies the debug mode of the TCP proxy communication library or SCTP communication library, that is, whether to print logs about the communication layer. The setting is effective at the session layer.

+

When the switch is set to on, the number of printed logs is huge, adding extra overhead and reducing database performance. Therefore, set the switch to on only in the debug mode.

+
+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

comm_ackchk_time

Parameter description: Specifies the duration after which the communication library server automatically triggers ACK when no data package is received.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 20000. The unit is millisecond (ms). 0 indicates that automatic ACK triggering is disabled.

+

Default value: 2000

+
+

comm_timer_mode

Parameter description: Specifies the timer mode of the TCP proxy communication library or SCTP communication library, that is, whether to print timer logs in each phase of the communication layer. The setting is effective at the session layer.

+

When the switch is set to on, the number of printed logs is huge, adding extra overhead and reducing database performance. Therefore, set the switch to on only in the debug mode.

+
+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

comm_stat_mode

Parameter description: Specifies the statistics mode of the TCP proxy communication library or SCTP communication library, that is, whether to print statistics about the communication layer. The setting is effective at the session layer.

+

When the switch is set to on, the number of printed logs is huge, adding extra overhead and reducing database performance. Therefore, set the switch to on only in the debug mode.

+
+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_stateless_pooler_reuse

Parameter description: Specifies whether to enable the pooler reuse mode. The setting takes effect after the cluster is restarted.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+

comm_cn_dn_logic_conn

Parameter description: Specifies a switch for logical connections between CNs and DNs. The parameter setting takes effect only after the cluster is restarted.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0892.html b/docs/dws/dev/dws_04_0892.html new file mode 100644 index 00000000..028d199b --- /dev/null +++ b/docs/dws/dev/dws_04_0892.html @@ -0,0 +1,23 @@ + + +

Resource Consumption

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0893.html b/docs/dws/dev/dws_04_0893.html new file mode 100644 index 00000000..4e752f9c --- /dev/null +++ b/docs/dws/dev/dws_04_0893.html @@ -0,0 +1,139 @@ + + +

Memory

+

This section describes memory parameters.

+

Parameters described in this section take effect only after the database service restarts.

+
+

enable_memory_limit

Parameter description: Specifies whether to enable the logical memory management module.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+

If the result of max_process_memory - shared_buffer - cstore_buffers is less than 2 GB, GaussDB(DWS) forcibly sets enable_memory_limit to off.

+
+
+

max_process_memory

Parameter description: Specifies the maximum physical memory of a database node.

+

Type: POSTMASTER

+

Value range: an integer ranging from 2 x 1024 x 1024 to INT_MAX/2. The unit is KB.

+

Default value: The value is automatically adapted by non-secondary DNs. The formula is (Physical memory size) x 0.6/(1 + Number of primary DNs). If the result is less than 2 GB, 2 GB is used by default. The default size of the secondary DN is 12 GB.

+

Setting suggestions:

+

On DNs, the value of this parameter is determined based on the physical system memory and the number of DNs deployed on a single node. Parameter value = (Physical memory – vm.min_free_kbytes) x 0.7/(n + Number of primary DNs). This parameter aims to ensure system reliability, preventing node OOM caused by increasing memory usage. vm.min_free_kbytes indicates OS memory reserved for kernels to receive and send data. Its value is at least 5% of the total memory. That is, max_process_memory = Physical memory x 0.665/(n + Number of primary DNs). If the cluster scale (number of nodes in the cluster) is smaller than 256, n=1; if the cluster scale is larger than 256 and smaller than 512, n=2; if the cluster scale is larger than 512, n=3.

+

Set this parameter on CNs to the same value as that on DNs.

+

RAM is the maximum memory allocated to the cluster.

+
+

shared_buffers

Parameter description: Specifies the size of shared memory used by GaussDB(DWS). If this parameter is set to a large value, GaussDB(DWS) may require more System V shared memory than the default setting.

+

Type: POSTMASTER

+

Value range: an integer ranging from 128 to INT_MAX. The unit is 8 KB.

+

Changing the value of BLCKSZ will result in a change in the minimum value of the shared_buffers.

+

Default value: 512 MB for CNs and 1 GB for DNs. If the maximum value allowed by the OS is smaller than 32 MB, this parameter will be automatically changed to the maximum value allowed by the OS during database initialization.

+

Setting suggestions:

+

Set this parameter for DNs to a value greater than that for CNs, because GaussDB(DWS) pushes most of its queries down to DNs.

+

It is recommended that shared_buffers be set to a value less than 40% of the memory. Set it to a large value for row-store tables and a small value for column-store tables. For column-store tables: shared_buffers = (Memory of a single server/Number of DNs on the single server) x 0.4 x 0.25

+

If you want to increase the value of shared_buffers, you also need to increase the value of checkpoint_segments, because a longer period of time is required to write a large amount of new or changed data.

+
+

bulk_write_ring_size

Parameter description: Specifies the size of the ring buffer used for data parallel import.

+

Type: USERSET

+

Value range: an integer ranging from 16384 to INT_MAX. The unit is KB.

+

Default value: 2 GB

+

Setting suggestions: Increase the value of this parameter on DNs if a large amount of data is to be imported.

+
+

temp_buffers

Parameter description: Specifies the maximum size of local temporary buffers used by each database session.

+

Type: USERSET

+

Value range: an integer ranging from 800 to INT_MAX/2. The unit is KB.

+

Default value: 8 MB

+
  • This parameter can be modified only before the first use of temporary tables within each session. Subsequent attempts to change the value of this parameter will not take effect on that session.
  • Based on the value of temp_buffers, a session allocates temporary buffers as required. The cost of setting a large value in sessions that do not require many temporary buffers is only a buffer descriptor. If a buffer is used, 8192 bytes will be consumed for it.
+
+
+

max_prepared_transactions

Parameter description: Specifies the maximum number of transactions that can stay in the prepared state simultaneously. If this parameter is set to a large value, GaussDB(DWS) may require more System V shared memory than the default setting.

+

When GaussDB(DWS) is deployed as an HA system, set this parameter on the standby server to the same value or a value greater than that on the primary server. Otherwise, queries will fail on the standby server.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 536870911. 800 indicates that the prepared transaction feature is disabled.

+

Default value: 800

+

Set this parameter to a value greater than or equal to that of max_connections to avoid failures in preparation.

+
+
+

work_mem

Parameter description: Specifies the memory used for internal sort operations and hash tables before data is written into temporary disk files. Sort operations are used for ORDER BY, DISTINCT, and merge joins. Hash tables are required for Hash joins as well as Hash-based aggregations and IN subqueries.

+

For a complex query, several sort or Hash operations may be running in parallel; each operation will be allowed to use as much memory as this value specifies. If the memory is insufficient, data is written into temporary files. In addition, several running sessions could be performing such operations concurrently. Therefore, the total memory used may be many times the value of work_mem.

+

Type: USERSET

+

Value range: an integer ranging from 64 to INT_MAX. The unit is KB.

+

Default value: 64 MB

+

Setting suggestions:

+

If the physical memory specified by work_mem is insufficient, additional operator calculation data will be written into temporary tables based on query characteristics and the degree of parallelism. This reduces performance by five to ten times, and prolongs the query response time from seconds to minutes.

+ +
+

query_mem

Parameter description: Specifies the memory used by query. If the value of query_mem is greater than 0, the optimizer adjusts the estimated query memory to this value when generating an execution plan.

+

Type: USERSET

+

Value range: 0 or an integer greater than 32. The default unit is KB. If the value is set to a negative value or less than 32 MB, the default value 0 is used. In this case, the optimizer does not adjust the estimated query memory.

+

Default value: 0

+
+

query_max_mem

Parameter description: Specifies the maximum memory that can be used by query. If the value of query_max_mem is greater than 0, an error is reported when the query memory usage exceeds the value.

+

Type: USERSET

+

Value range: 0 or an integer greater than 32 MB. The default unit is KB. If the value is set to a negative value or less than 32 MB, the default value 0 is used. In this case, the query memory will not be limited based on the value.

+

Default value: 0

+
+

maintenance_work_mem

Parameter description: Specifies the maximum size of memory to be used for maintenance operations, such as VACUUM, CREATE INDEX, and ALTER TABLE ADD FOREIGN KEY. This parameter may affect the execution efficiency of VACUUM, VACUUM FULL, CLUSTER, and CREATE INDEX.

+

Type: USERSET

+

Value range: an integer ranging from 1024 to INT_MAX. The unit is KB.

+

Default value: 128 MB

+

Setting suggestions:

+ +
+

psort_work_mem

Parameter description: Specifies the memory used for internal sort operations on column-store tables before data is written into temporary disk files. This parameter can be used for inserting tables with a partial cluster key or index, creating a table index, and deleting or updating a table.

+

Type: USERSET

+

Multiple running sessions may perform partial sorting on a table at the same time. Therefore, the total memory usage may be several times of the psort_work_mem value.

+
+

Value range: an integer ranging from 64 to INT_MAX. The unit is KB.

+

Default value: 512 MB

+
+

max_loaded_cudesc

Parameter description: Specifies the number of loaded CuDescs per column when a column-store table is scanned. Increasing the value will improve the query performance and increase the memory usage, particularly when there are many columns in the column tables.

+

Type: USERSET

+

Value range: an integer ranging from 100 to INT_MAX/2

+

Default value: 1024

+

When the value of max_loaded_cudesc is set to a large value, the memory may be insufficient.

+
+
+

max_stack_depth

Parameter description: Specifies the maximum safe depth of GaussDB(DWS) execution stack. The safety margin is required because the stack depth is not checked in every routine in the server, but only in key potentially-recursive routines, such as expression evaluation.

+

Type: SUSET

+

Configuration principles:

+ +

Value range: an integer ranging from 100 to INT_MAX. The unit is KB.

+

Default value: 2 MB

+

2 MB is a small value and will not incur system breakdown in general, but may lead to execution failures of complex functions.

+
+
+

cstore_buffers

Parameter description: Specifies the size of the shared buffer used by ORC, Parquet, or CarbonData data of column-store tables and OBS or HDFS column-store foreign tables.

+

Type: POSTMASTER

+

Value range: an integer ranging from 16384 to INT_MAX. The unit is KB.

+

Default value: 32 MB

+

Setting suggestions:

+

Column-store tables use the shared buffer specified by cstore_buffers instead of that specified by shared_buffers. When column-store tables are mainly used, reduce the value of shared_buffers and increase that of cstore_buffers.

+

Use cstore_buffers to specify the cache of ORC, Parquet, or CarbonData metadata and data for OBS or HDFS foreign tables. The metadata cache size should be 1/4 of cstore_buffers and not exceed 2 GB. The remaining cache is shared by column-store data and foreign table column-store data.

+
+

enable_orc_cache

Parameter description: Specifies whether to reserve 1/4 of cstore_buffers for storing ORC metadata when the cstore buffer is initialized.

+

Type: POSTMASTER

+

Value range: Boolean

+

Default value: on

+ +
+

schedule_splits_threshold

Parameter description: Specifies the maximum number of files that can be stored in memory when you schedule an HDFS foreign table. If the number is exceeded, all files in the list will be spilled to disk for scheduling.

+

Type: USERSET

+

Value range: an integer ranging from 1 to INT_MAX

+

Default value: 60000

+
+

bulk_read_ring_size

Parameter description: Specifies the size of the ring buffer used for data parallel export.

+

Type: USERSET

+

Value range: an integer ranging from 256 to INT_MAX. The unit is KB.

+

Default value: 16 MB

+
+

check_cu_size_threshold

Parameter description: If the amount of data inserted to a CU is greater than the value of this parameter when data is inserted to a column-store table, the system starts row-level size verification to prevent the generation of a CU whose size is greater than 1 GB (non-compressed size).

+

Type: USERSET

+

Value range: an integer ranging from 0 to 1024. The unit is MB.

+

Default value: 1024 MB

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0894.html b/docs/dws/dev/dws_04_0894.html new file mode 100644 index 00000000..9d9ed4b3 --- /dev/null +++ b/docs/dws/dev/dws_04_0894.html @@ -0,0 +1,42 @@ + + +

Statement Disk Space Control

+

This section describes parameters related to statement disk space control, which are used to limit the disk space usage of statements.

+

sql_use_spacelimit

Parameter description: Specifies the space size for files to be spilled to disks when a single SQL statement is executed on a single DN. The managed space includes the space occupied by ordinary tables, temporary tables, and intermediate result sets to be flushed to disks. System administrators are also restricted by this parameter.

+

Type: USERSET

+

Value range: an integer ranging from -1 to INT_MAX. The unit is KB. –1 indicates no limit.

+

Default value: –1

+

Setting suggestion: You are advised to set sql_use_spacelimit to 10% of the total disk space where DNs reside. If two DNs exist on a single disk, set sql_use_spacelimit to 5% of the total disk space.

+

For example, if sql_use_spacelimit is set to 100 in the statement and the amount data spilled to disks on a single DN exceeds 100 KB, DWS stops the query and displays a message of threshold exceeding.

+
1
+2
insert into user1.t1 select * from user2.t1;
+ERROR:  The space used on DN (104 kB) has exceeded the sql use space limit (100 kB).
+
+ +
+

Handling suggestion:

+
  • Optimize the statement to reduce the data spilled to disks.
  • If the disk space is sufficient, increase the value of this parameter.
+
+
+

temp_file_limit

Parameter description: Specifies the total space for files spilled to disks in a single thread. For example, temporary files used by sorting and hash tables or cursors are controlled by this parameter.

+

This is a session-level setting.

+

Type: SUSET

+

Value range: an integer ranging from -1 to INT_MAX. The unit is KB. –1 indicates no limit.

+

Default value: –1

+

This parameter does not apply to disk space occupied by temporary tablespaces used for executing SQL queries.

+
+
+

bi_page_reuse_factor

Parameter description: Specifies the percentage of idle space of old pages that can be reused when page replication is used for data synchronization between primary and standby DNs in the scenario where data is inserted into row-store tables in batches.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 100. The value is a percentage. Value 0 indicates that the old pages are not reused and new pages are requested.

+

Default value: 70

+
  • In an upgrade, the default value of this parameter is the same as that in the cluster before the upgrade. In a newly installed 8.1.1.500 cluster, the default value of this parameter is 0.
  • You are not advised to set this parameter to a value less than 50 (except 0). If the idle space of the reused page is small, too much old page data will be transmitted between the primary and standby DNs. As a result, the batch insertion performance deteriorates.
  • You are not advised to set this parameter to a value greater than 90. If this parameter is set to a value greater than 90, idle pages will be frequently queried, but old pages cannot be reused.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0895.html b/docs/dws/dev/dws_04_0895.html new file mode 100644 index 00000000..0ada38a9 --- /dev/null +++ b/docs/dws/dev/dws_04_0895.html @@ -0,0 +1,17 @@ + + +

Kernel Resources

+

This section describes kernel resource parameters. Whether these parameters take effect depends on OS settings.

+

max_files_per_process

Parameter description: Specifies the maximum number of simultaneously open files allowed by each server process. If the kernel is enforcing a proper limit, setting this parameter is not required.

+

But on some platforms, especially on most BSD systems, the kernel allows independent processes to open far more files than the system can really support. If the message "Too many open files" is displayed, try to reduce the setting. Generally, the number of file descriptors must be greater than or equal to the maximum number of concurrent tasks multiplied by the number of primary DNs on the current physical machine (*max_files_per_process*3).

+

Type: POSTMASTER

+

Value range: an integer ranging from 25 to INT_MAX

+

Default value: 1000

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0896.html b/docs/dws/dev/dws_04_0896.html new file mode 100644 index 00000000..bb7a4b49 --- /dev/null +++ b/docs/dws/dev/dws_04_0896.html @@ -0,0 +1,44 @@ + + +

Cost-based Vacuum Delay

+

This feature allows administrators to reduce the I/O impact of the VACUUM and ANALYZE statements on concurrent database activities. It is often more important to prevent maintenance statements, such as VACUUM and ANALYZE, from affecting other database operations than to run them quickly. Cost-based vacuum delay provides a way for administrators to achieve this purpose.

+

Certain operations hold critical locks and should be complete as quickly as possible. In GaussDB(DWS), cost-based vacuum delays do not take effect during such operations. To avoid uselessly long delays in such cases, the actual delay is calculated as follows and is the maximum value of the following calculation results:

+
  • vacuum_cost_delay*accumulated_balance/vacuum_cost_limit
  • vacuum_cost_delay*4
+
+

Context

During the execution of the ANALYZE | ANALYSE and VACUUM statements, the system maintains an internal counter that keeps track of the estimated cost of the various I/O operations that are performed. When the accumulated cost reaches a limit (specified by vacuum_cost_limit), the process performing the operation will sleep for a short period of time (specified by vacuum_cost_delay). Then, the counter resets and the operation continues.

+

By default, this feature is disabled. To enable this feature, set vacuum_cost_delay to a value other than 0.

+
+

vacuum_cost_delay

Parameter description: Specifies the length of time that the process will sleep when vacuum_cost_limit has been exceeded.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 100. The unit is millisecond (ms). A positive number enables cost-based vacuum delay and 0 disables cost-based vacuum delay.

+

Default value: 0

+
  • On many systems, the effective resolution of sleep length is 10 ms. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10.
  • This parameter is set to a small value, such as 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing other parameters.
+
+
+

vacuum_cost_page_hit

Parameter description: Specifies the estimated cost for vacuuming a buffer found in the shared buffer. It represents the cost to lock the buffer pool, look up the shared Hash table, and scan the page.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 10000. The unit is millisecond (ms).

+

Default value: 1

+
+

vacuum_cost_page_miss

Parameter description: Specifies the estimated cost for vacuuming a buffer read from the disk. It represents the cost to lock the buffer pool, look up the shared Hash table, read the desired block from the disk, and scan the block.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 10000. The unit is millisecond (ms).

+

Default value: 10

+
+

vacuum_cost_page_dirty

Parameter description: Specifies the estimated cost charged when vacuum modifies a block that was previously clean. It represents the I/Os required to flush the dirty block out to disk again.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 10000. The unit is millisecond (ms).

+

Default value: 20

+
+

vacuum_cost_limit

Parameter description: Specifies the cost limit. The cleanup process will sleep if this limit is exceeded.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 10000. The unit is ms.

+

Default value: 200

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0898.html b/docs/dws/dev/dws_04_0898.html new file mode 100644 index 00000000..b7abd9f7 --- /dev/null +++ b/docs/dws/dev/dws_04_0898.html @@ -0,0 +1,57 @@ + + +

Asynchronous I/O Operations

+

enable_adio_debug

Parameter description: Specifies whether O&M personnel are allowed to generate some ADIO logs to locate ADIO issues. This parameter is used only by developers. Common users are advised not to use it.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_fast_allocate

Parameter description: Specifies whether the quick allocation switch of the disk space is enabled. This switch can be enabled only in the XFS file system.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

prefetch_quantity

Parameter description: Specifies the number of row-store prefetches using the ADIO.

+

Type: USERSET

+

Value range: an integer ranging from 1024 to 1048576. The unit is 8 KB.

+

Default value: 32 MB

+
+

backwrite_quantity

Parameter description: Specifies the number of row-store writes using the ADIO.

+

Type: USERSET

+

Value range: an integer ranging from 1024 to 1048576. The unit is 8 KB.

+

Default value: 8MB

+
+

cstore_prefetch_quantity

Parameter description: Specifies the number of column-store prefetches using the ADIO.

+

Type: USERSET

+

Value range: an integer. The value range is from 1024 to 1048576 and the unit is KB.

+

Default value: 32 MB

+
+

cstore_backwrite_quantity

Parameter description: Specifies the number of column-store writes using the ADIO.

+

Type: USERSET

+

Value range: an integer. The value range is from 1024 to 1048576 and the unit is KB.

+

Default value: 8MB

+
+

cstore_backwrite_max_threshold

Parameter description: Specifies the maximum number of column-store writes buffered in the database using the ADIO.

+

Type: USERSET

+

Value range: An integer. The value range is from 4096 to INT_MAX/2 and the unit is KB.

+

Default value: 2 GB

+
+

fast_extend_file_size

Parameter description: Specifies the disk size that the row-store pre-scales using the ADIO.

+

Type: SUSET

+

Value range: an integer. The value range is from 1024 to 1048576 and the unit is KB.

+

Default value: 8MB

+
+

effective_io_concurrency

Parameter description: Specifies the number of requests that can be simultaneously processed by the disk subsystem. For the RAID array, the parameter value must be the number of disk drive spindles in the array.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 1000

+

Default value: 1

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0899.html b/docs/dws/dev/dws_04_0899.html new file mode 100644 index 00000000..bfe9b5a4 --- /dev/null +++ b/docs/dws/dev/dws_04_0899.html @@ -0,0 +1,40 @@ + + +

Parallel Data Import

+

GaussDB(DWS) provides a parallel data import function that enables a large amount of data to be imported in a fast and efficient manner. This section describes parameters for importing data in parallel in GaussDB(DWS).

+

raise_errors_if_no_files

Parameter description: Specifies whether distinguish between the problems "the number of imported file records is empty" and "the imported file does not exist". If this parameter is set to true and the problem "the imported file does not exist" occurs, GaussDB(DWS) will report the error message "file does not exist".

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

partition_mem_batch

Parameter description: To optimize the inserting of column-store partitioned tables in batches, data is cached during the inserting process and then written to the disk in batches. You can use partition_mem_batch to specify the number of buffers. If the value is too large, much memory will be consumed. If it is too small, the performance of inserting column-store partitioned tables in batches will deteriorate.

+

Type: USERSET

+

Value range: 1 to 65535

+

Default value: 256

+
+

partition_max_cache_size

Parameter description: To optimize the inserting of column-store partitioned tables in batches, data is cached during the inserting process and then written to the disk in batches. You can use partition_max_cache_size to specify the size of the data buffer. If the value is too large, much memory will be consumed. If it is too small, the performance of inserting column-store partitioned tables in batches will deteriorate.

+

Type: USERSET

+

Value range: 4096 to INT_MAX/2. The minimum unit is KB.

+

Default value: 2 GB

+
+

gds_debug_mod

Parameter description: Specifies whether to enable the debug function of Gauss Data Service (GDS). This parameter is used to better locate and analyze GDS faults. After the debug function is enabled, types of packets received or sent by GDS, peer end of GDS during command interaction, and other interaction information about GDS are written into the logs of corresponding nodes. In this way, state switching on the GaussDB state machine and the current state are recorded. If this function is enabled, additional log I/O resources will be consumed, affecting log performance and validity. You are advised to enable this function only when locating GDS faults.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_delta_store

Parameter description: This parameter has been discarded. You can set this parameter to on for forward compatibility, but the setting will not take effect.

+

For details about how to enable the delta table function of column-store tables, see the table-level parameter enable_delta in "CREATE TABLE" in the SQL Syntax.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0900.html b/docs/dws/dev/dws_04_0900.html new file mode 100644 index 00000000..9969ead7 --- /dev/null +++ b/docs/dws/dev/dws_04_0900.html @@ -0,0 +1,19 @@ + + +

Write Ahead Logs

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0901.html b/docs/dws/dev/dws_04_0901.html new file mode 100644 index 00000000..05e0eb89 --- /dev/null +++ b/docs/dws/dev/dws_04_0901.html @@ -0,0 +1,67 @@ + + +

Settings

+

wal_level

Parameter description: Specifies the level of the information that is written to WALs.

+

Type: POSTMASTER

+

Value range: enumerated values

+ +

Default value: hot_standby

+
  • To enable WAL archiving and data streaming replication between primary and standby servers, set this parameter to archive or hot_standby.
  • If this parameter is set to archive, hot_standby must be set to off. Otherwise, the database startup fails.
+
+
+

synchronous_commit

Parameter description: Specifies the synchronization mode of the current transaction.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: on

+
+

wal_buffers

Parameter description: Specifies the number of XLOG_BLCKSZs used for storing WAL data. The size of each XLOG_BLCKSZ is 8 KB.

+

Type: POSTMASTER

+

Value range: -1 to 218. The unit is 8 KB.

+ +

Default value: 16 MB

+

Setting suggestions: The content of WAL buffers is written to disks at each transaction commit, and setting this parameter to a large value does not significantly improve system performance. Setting this parameter to hundreds of megabytes can improve the disk writing performance on the server, to which a large number of transactions are committed. Based on experiences, the default value meets user requirements in most cases.

+
+

commit_delay

Parameter description: Specifies the duration of committed data be stored in the WAL buffer.

+

Type: USERSET

+

Value range: an integer, ranging from 0 to 100000 (unit: μs). 0 indicates no delay.

+

Default value: 0

+
  • When this parameter is set to a value other than 0, the committed transaction is stored in the WAL buffer instead of being written to the WAL immediately. Then, the WALwriter process flushes the buffer out to disks periodically.
  • If system load is high, other transactions are probably ready to be committed within the delay. If no transactions are waiting to be submitted, the delay is a waste of time.
+
+
+

commit_siblings

Parameter description: Specifies a limit on the number of ongoing transactions. If the number of ongoing transactions is greater than the limit, a new transaction will wait for the period of time specified by commit_delay before it is submitted. If the number of ongoing transactions is less than the limit, the new transaction is immediately written into a WAL.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 1000

+

Default value: 5

+
+

enable_xlog_group_insert

Parameter description: Specifies whether to enable the group insertion mode for WALs. Only the Kunpeng architecture supports this parameter.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

wal_compression

Parameter description: Specifies whether to compress FPI pages.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
  • Only zlib compression algorithm is supported.
  • For clusters that are upgraded to the current version from an earlier version, this parameter is set to off by default. You can run the gs_guc command to enable the FPI compression function if needed.
  • If the current version is a newly installed version, this parameter is set to on by default.
  • If this parameter is manually enabled for a cluster upgraded from an earlier version, the cluster cannot be rolled back.
+
+
+

wal_compression_level

Parameter description: Specifies the compression level of zlib compression algorithm when the wal_compression parameter is enabled.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 9.

+ +

Default value: 9

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0902.html b/docs/dws/dev/dws_04_0902.html new file mode 100644 index 00000000..7cf95d1f --- /dev/null +++ b/docs/dws/dev/dws_04_0902.html @@ -0,0 +1,40 @@ + + +

Checkpoints

+

checkpoint_segments

Parameter description: Specifies the minimum number of WAL segment files in the period specified by checkpoint_timeout. The size of each log file is 16 MB.

+

Type: SIGHUP

+

Value range: an integer. The minimum value is 1.

+

Default value: 64

+

Increasing the value of this parameter speeds up the export of big data. Set this parameter based on checkpoint_timeout and shared_buffers. This parameter affects the number of WAL log segment files that can be reused. Generally, the maximum number of reused files in the pg_xlog folder is twice the number of checkpoint segments. The reused files are not deleted and are renamed to the WAL log segment files which will be later used.

+
+
+

checkpoint_timeout

Parameter description: Specifies the maximum time between automatic WAL checkpoints.

+

Type: SIGHUP

+

Value range: an integer ranging from 30 to 3600 (s)

+

Default value: 15min

+

If the value of checkpoint_segments is increased, you need to increase the value of this parameter. The increase of them further requires the increase of shared_buffers. Consider all these parameters during setting.

+
+
+

checkpoint_completion_target

Parameter description: Specifies the target of checkpoint completion, as a fraction of total time between checkpoints.

+

Type: SIGHUP

+

Value range: 0.0 to 1.0. The default value 0.5 indicates that each checkpoint must be completed within 50% of the checkpoint interval.

+

Default value: 0.5

+
+

checkpoint_warning

Parameter description: Specifies a time in seconds. If the checkpoint interval is close to this time due to filling of checkpoint segment files, a message is sent to the server log to increase the value of checkpoint_segments.

+

Type: SIGHUP

+

Value range: an integer (unit: s). 0 indicates that warning is disabled.

+

Default value: 5min

+

Recommended value: 5min

+
+

checkpoint_wait_timeout

Parameter description: Specifies the longest time that the checkpoint waits for the checkpointer thread to start.

+

Type: SIGHUP

+

Value range: an integer ranging from 2 to 3600 (s)

+

Default value: 1min

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0903.html b/docs/dws/dev/dws_04_0903.html new file mode 100644 index 00000000..9b692c9e --- /dev/null +++ b/docs/dws/dev/dws_04_0903.html @@ -0,0 +1,45 @@ + + +

Archiving

+

archive_mode

Parameter description: When archive_mode is enabled, completed WAL segments are sent to archive storage by setting archive_command.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+

When wal_level is set to minimal, archive_mode cannot be used.

+
+
+

archive_command

Parameter description: Specifies the command used to archive WALs set by the administrator. You are advised to set the archive log path to an absolute path.

+

Type: SIGHUP

+

Value range: a string

+

Default value: (disabled)

+
  • Any %p in the string is replaced with the absolute path of the file to archive, and any %f is replaced with only the file name. (The relative path is relative to the data directory.) Use %% to embed an actual % character in the command.
  • This command returns zero only if it succeeds. Example:
    1
    +2
    archive_command = 'cp --remove-destination %p /mnt/server/archivedir/%f' 
    +archive_command = 'copy %p /mnt/server/archivedir/%f'  # Windows
    +
    + +
    +
  • --remove-destination indicates that files will be overwritten during the archiving.
  • When archive_mode is set to on or not specified, a backup folder will be created in the pg_xlog directory and WALs will be compressed and copied to the pg_xlog/backup directory.
+
+
+

max_xlog_backup_size

Parameter description: Specifies the size of WAL logs backed up in the pg_xlog/backup directory.

+

Type: SIGHUP

+

Value range: an integer between 1048576 and 104857600. The unit is KB.

+

Default value: 2097152

+
  • The max_xlog_backup_size parameter setting takes effect only when archive_mode is enabled and archive_command is set to NULL.
  • The system checks the size of backup WALs in the pg_xlog/backup directory every minute. If the size exceeds the value specified by max_xlog_backup_size, the system deletes the earliest backup WALs until the size is less than the max_xlog_backup_size value × 0.9.
+
+
+

archive_timeout

Parameter description: Specifies the archiving period.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX. The unit is second. 0 indicates that archiving timeout is disabled.

+

Default value: 0

+
  • The server is forced to switch to a new WAL segment file with the period specified by this parameter.
  • Archived files that are closed early due to a forced switch are still of the same length as completely full files. Therefore, a very short archive_timeout will bloat the archive storage. You are advised to set archive_timeout to 60s.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0904.html b/docs/dws/dev/dws_04_0904.html new file mode 100644 index 00000000..4b0f73c1 --- /dev/null +++ b/docs/dws/dev/dws_04_0904.html @@ -0,0 +1,17 @@ + + +

HA Replication

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0905.html b/docs/dws/dev/dws_04_0905.html new file mode 100644 index 00000000..29128f4e --- /dev/null +++ b/docs/dws/dev/dws_04_0905.html @@ -0,0 +1,41 @@ + + +

Sending Server

+

wal_keep_segments

Parameter description: Specifies the number of Xlog file segments. Specifies the minimum number of transaction log files stored in the pg_xlog directory. The standby server obtains log files from the primary server for streaming replication.

+

Type: SIGHUP

+

Value range: an integer ranging from 2 to INT_MAX

+

Default value: 65

+

Setting suggestions:

+ +
+

wal_sender_timeout

Parameter description: Specifies the maximum duration that the sending server waits for the WAL reception in the receiver.

+

Type: SIGHUP

+
  • If the primary server contains a huge volume of data, increase the value of this parameter for database rebuilding.
  • This parameter cannot be set to a value larger than the value of wal_receiver_timeout or the timeout parameter for database rebuilding.
+
+

Value range: an integer ranging from 0 to INT_MAX. The unit is millisecond (ms).

+

Default value: 15s

+
+

max_replication_slots

Parameter description: Specifies the number of log replication slots on the primary server.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 262143

+

Default value: 8

+

A physical replication slot provides an automatic method to ensure that an Xlog is not removed from a primary DN before all the standby and secondary DNs receive it. Physical replication slots are used to support HA clusters. The number of physical replication slots required by a cluster is as follows: ratio of standby and secondary DNs to the primary DN in a ring of DNs. For example, if an HA cluster has 1 primary DN, 1 standby DN, and 1 secondary DN, the number of required physical replication slots will be 2.

+
Plan the number of logical replication slots as follows:
  • A logical replication slot can carry changes of only one database for decoding. If multiple databases are involved, create multiple logical replication slots.
  • If logical replication is needed by multiple target databases, create multiple logical replication slots in the source database. Each logical replication slot corresponds to one logical replication link.
+
+
+

max_build_io_limit

Parameter description: Specifies the data volume that can be read from the disk per second when the primary server provides a build session to the standby server.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 1048576. The unit is KB. The value 0 indicates that the I/O flow is not restricted when the primary server provides a build session to the standby server.

+

Default value: 0

+

Setting suggestions: Set this parameter based on the disk bandwidth and job model. If there is no flow restriction or job interference, for disks with good performance such as SSDs, a full build consumes a relatively small proportion of bandwidth and has little impact on service performance. In this case, you do not need to set the threshold. If the service performance of a common 10,000 rpm SAS disk deteriorates significantly during a build, you are advised to set the parameter to 20 MB.

+

This setting directly affects the build speed and completion time. Therefore, you are advised to set this parameter to a value larger than 10 MB. During off-peak hours, you are advised to remove the flow restriction to restore to the normal build speed.

+
  • This parameter is used during peak hours or when the disk I/O pressure of the primary server is high. It limits the build flow rate on the standby server to reduce the impact on primary server services. After the service peak hours, you can remove the restriction or reset the flow rate threshold.
  • You are advised to set a proper threshold based on service scenarios and disk performance.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0906.html b/docs/dws/dev/dws_04_0906.html new file mode 100644 index 00000000..9b0326d8 --- /dev/null +++ b/docs/dws/dev/dws_04_0906.html @@ -0,0 +1,39 @@ + + +

Primary Server

+

vacuum_defer_cleanup_age

Parameter description: Specifies the number of transactions by which VACUUM will defer the cleanup of invalid row-store table records, so that VACUUM and VACUUM FULL do not clean up deleted tuples immediately.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 1000000. 0 means no delay.

+

Default value: 0

+
+

data_replicate_buffer_size

Parameter description: Specifies the size of memory used by queues when the sender sends data pages to the receiver. The value of this parameter affects the buffer size copied for the replication between the primary and standby servers.

+

Type: POSTMASTER

+

Value range: an integer ranging from 4 to 1023. The unit is MB.

+

Default value: 128 MB

+
+

enable_data_replicate

Parameter description: Specifies the data synchronization mode between the primary and standby servers when data is imported to row-store tables in a database.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_incremental_catchup

Parameter description: Specifies the data catchup mode between the primary and standby nodes.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

wait_dummy_time

Parameter description: Specifies the maximum duration for the primary, standby, and secondary clusters to wait for the secondary cluster to start in sequence and the maximum duration for the secondary cluster to send the scanning list when incremental data catchup is enabled.

+

Type: SIGHUP

+

Value range: Integer, from 1 to INT_MAX, in seconds.

+

Default value: 300s

+

The unit can only be second.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0908.html b/docs/dws/dev/dws_04_0908.html new file mode 100644 index 00000000..9df4390e --- /dev/null +++ b/docs/dws/dev/dws_04_0908.html @@ -0,0 +1,21 @@ + + +

Query Planning

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0909.html b/docs/dws/dev/dws_04_0909.html new file mode 100644 index 00000000..82b80a72 --- /dev/null +++ b/docs/dws/dev/dws_04_0909.html @@ -0,0 +1,198 @@ + + +

Optimizer Method Configuration

+

These configuration parameters provide a crude method of influencing the query plans chosen by the query optimizer. If the default plan chosen by the optimizer for a particular query is not optimal, a temporary solution is to use one of these configuration parameters to force the optimizer to choose a different plan. Better ways include adjusting the optimizer cost constants, manually running ANALYZE, increasing the value of the default_statistics_target configuration parameter, and adding the statistics collected in a specific column using ALTER TABLE SET STATISTICS.

+

enable_bitmapscan

Parameter description: Controls whether the query optimizer uses the bitmap-scan plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_hashagg

Parameter description: Controls whether the query optimizer uses the Hash aggregation plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_hashjoin

Parameter description: Controls whether the query optimizer uses the Hash-join plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_indexscan

Parameter description: Controls whether the query optimizer uses the index-scan plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_indexonlyscan

Parameter description: Controls whether the query optimizer uses the index-only-scan plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_material

Parameter description: Controls whether the query optimizer uses materialization. It is impossible to suppress materialization entirely, but setting this parameter to off prevents the optimizer from inserting materialized nodes.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_mergejoin

Parameter description: Controls whether the query optimizer uses the merge-join plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_nestloop

Parameter description: Controls whether the query optimizer uses the nested-loop join plan type to fully scan internal tables. It is impossible to suppress nested-loop joins entirely, but setting this parameter to off allows the optimizer to choose other methods if available.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_index_nestloop

Parameter description: Controls whether the query optimizer uses the nested-loop join plan type to scan the parameterized indexes of internal tables.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: The default value for a newly installed cluster is on. If the cluster is upgraded from R8C10, the forward compatibility is retained. If the version is upgraded from R7C10 or an earlier version, the default value is off.

+
+

enable_seqscan

Parameter description: Controls whether the query optimizer uses the sequential scan plan type. It is impossible to suppress sequential scans entirely, but setting this variable to off allows the optimizer to preferentially choose other methods if available.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_sort

Parameter description: Controls whether the query optimizer uses the sort method. It is impossible to suppress explicit sorts entirely, but setting this variable to off allows the optimizer to preferentially choose other methods if available.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_tidscan

Parameter description: Controls whether the query optimizer uses the Tuple ID (TID) scan plan type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_kill_query

Parameter description: In CASCADE mode, when a user is deleted, all the objects belonging to the user are deleted. This parameter specifies whether the queries of the objects belonging to the user can be unlocked when the user is deleted.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

enforce_oracle_behavior

Parameter description: Controls the rule matching modes of regular expressions.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_stream_concurrent_update

Parameter description: Controls the use of stream in concurrent updates. This parameter is restricted by the enable_stream_operator parameter.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_stream_operator

Parameter description: Controls whether the query optimizer uses streams.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_stream_recursive

Parameter description: Specifies whether to push WITH RECURSIVE join queries to DNs for processing.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

max_recursive_times

Parameter description: Specifies the maximum number of WITH RECURSIVE iterations.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 200

+
+

enable_vector_engine

Parameter description: Controls whether the query optimizer uses the vectorized executor.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_broadcast

Parameter description: Controls whether the query optimizer uses the broadcast distribution method when it evaluates the cost of stream.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_change_hjcost

Parameter description: Specifies whether the optimizer excludes internal table running costs when selecting the Hash Join cost path. If it is set to on, tables with a few records and high running costs are more possible to be selected.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_fstream

Parameter description: Controls whether the query optimizer uses streams when it delivers statements. This parameter is only used for external HDFS tables.

+

This parameter has been discarded. To reserve forward compatibility, set this parameter to on, but the setting does not make a difference.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

best_agg_plan

Parameter description: The query optimizer generates three plans for the aggregate operation under the stream:
  1. hashagg+gather(redistribute)+hashagg
  2. redistribute+hashagg(+gather)
  3. hashagg+redistribute+hashagg(+gather).
+
+

This parameter is used to control the query optimizer to generate which type of hashagg plans.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 3.

+ +

Default value: 0

+
+

agg_redistribute_enhancement

Parameter description: When the aggregate operation is performed, which contains multiple group by columns and all of the columns are not in the distribution column, you need to select one group by column for redistribution. This parameter controls the policy of selecting a redistribution column.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_valuepartition_pruning

Parameter description: Specifies whether the DFS partitioned table is dynamically or statically optimized.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

expected_computing_nodegroup

Parameter description: Specifies a computing Node Group or the way to choose such a group. The Node Group mechanism is now for internal use only. You do not need to set it.

+

During join or aggregation operations, a Node Group can be selected in four modes. In each mode, the specified candidate computing Node Groups are listed for the optimizer to select an appropriate one for the current operator.

+

Type: USERSET

+

Value range: a string

+ +

Default value: bind

+
+

enable_nodegroup_debug

Parameter description: Specifies whether the optimizer assigns computing workloads to a specific Node Group when multiple Node Groups exist in an environment. The Node Group mechanism is now for internal use only. You do not need to set it.

+

This parameter takes effect only when expected_computing_nodegroup is set to a specific Node Group.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

stream_multiple

Parameter description: Specifies the weight used for optimizer to calculate the final cost of stream operators.

+

The base stream cost is multiplied by this weight to make the final cost.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 1

+

This parameter is applicable only to Redistribute and Broadcast streams.

+
+
+

qrw_inlist2join_optmode

Parameter description: Specifies whether enable inlist-to-join (inlist2join) query rewriting.

+

Type: USERSET

+

Value range: a string

+ +

Default value: cost_base

+
+

skew_option

Parameter description: Specifies whether an optimization policy is used

+

Type: USERSET

+

Value range: a string

+ +

Default value: normal

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0910.html b/docs/dws/dev/dws_04_0910.html new file mode 100644 index 00000000..826b3d2c --- /dev/null +++ b/docs/dws/dev/dws_04_0910.html @@ -0,0 +1,51 @@ + + +

Optimizer Cost Constants

+

This section describes the optimizer cost constants. The cost variables described in this section are measured on an arbitrary scale. Only their relative values matter, therefore scaling them all in or out by the same factor will result in no differences in the optimizer's choices. By default, these cost variables are based on the cost of sequential page fetches, that is, seq_page_cost is conventionally set to 1.0 and the other cost variables are set with reference to the parameter. However, you can use a different scale, such as actual execution time in milliseconds.

+

seq_page_cost

Parameter description: Specifies the optimizer's estimated cost of a disk page fetch that is part of a series of sequential fetches.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 1

+
+

random_page_cost

Parameter description: Specifies the optimizer's estimated cost of an out-of-sequence disk page fetch.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 4

+
  • Although the server allows you to set the value of random_page_cost to less than that of seq_page_cost, it is not physically sensitive to do so. However, setting them equal makes sense if the database is entirely cached in RAM, because in that case there is no penalty for fetching pages out of sequence. Also, in a heavily-cached database you should lower both values relative to the CPU parameters, since the cost of fetching a page already in RAM is much smaller than it would normally be.
  • This value can be overwritten for tables and indexes in a particular tablespace by setting the tablespace parameter of the same name.
  • Comparing to seq_page_cost, reducing this value will cause the system to prefer index scans and raising it makes index scans relatively more expensive. You can increase or decrease both values at the same time to change the disk I/O cost relative to CPU cost.
+
+
+

cpu_tuple_cost

Parameter description: Specifies the optimizer's estimated cost of processing each row during a query.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 0.01

+
+

cpu_index_tuple_cost

Parameter description: Specifies the optimizer's estimated cost of processing each index entry during an index scan.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 0.005

+
+

cpu_operator_cost

Parameter description: Specifies the optimizer's estimated cost of processing each operator or function during a query.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 0.0025

+
+

effective_cache_size

Parameter description: Specifies the optimizer's assumption about the effective size of the disk cache that is available to a single query.

+

When setting this parameter you should consider both GaussDB(DWS)'s shared buffer and the kernel's disk cache. Also, take into account the expected number of concurrent queries on different tables, since they will have to share the available space.

+

This parameter has no effect on the size of shared memory allocated by GaussDB(DWS). It is used only for estimation purposes and does not reserve kernel disk cache. The value is in the unit of disk page. Usually the size of each page is 8192 bytes.

+

Type: USERSET

+

Value range: an integer ranging is from 1 to INT_MAX. The unit is 8 KB.

+

A value greater than the default one may enable index scanning, and a value less than the default one may enable sequence scanning.

+

Default value: 128 MB

+
+

allocate_mem_cost

Parameter description: Specifies the query optimizer's estimated cost of creating a Hash table for memory space using Hash join. This parameter is used for optimization when the Hash join estimation is inaccurate.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to DBL_MAX

+

Default value: 0

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0911.html b/docs/dws/dev/dws_04_0911.html new file mode 100644 index 00000000..b3af3cfe --- /dev/null +++ b/docs/dws/dev/dws_04_0911.html @@ -0,0 +1,59 @@ + + +

Genetic Query Optimizer

+

This section describes parameters related to genetic query optimizer. The genetic query optimizer (GEQO) is an algorithm that plans queries by using heuristic searching. This algorithm reduces planning time for complex queries and the cost of producing plans are sometimes inferior to those found by the normal exhaustive-search algorithm.

+

geqo

Parameter description: Controls the use of genetic query optimization.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+

Generally, do not set this parameter to off. geqo_threshold provides more subtle control of GEQO.

+
+
+

geqo_threshold

Parameter description: Specifies the number of FROM items. Genetic query optimization is used to plan queries when the number of statements executed is greater than this value.

+

Type: USERSET

+

Value range: an integer ranging from 2 to INT_MAX

+

Default value: 12

+
  • For simpler queries it is best to use the regular, exhaustive-search planner, but for queries with many tables it is better to use GEQO to manage the queries.
  • A FULL OUTER JOIN construct counts as only one FROM item.
+
+
+

geqo_effort

Parameter description: Controls the trade-off between planning time and query plan quality in GEQO.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 10

+

Default value: 5

+
  • Larger values increase the time spent in query planning, but also increase the probability that an efficient query plan is chosen.
  • geqo_effort does not have direct effect. This parameter is only used to compute the default values for the other variables that influence GEQO behavior. You can manually set other parameters as required.
+
+
+

geqo_pool_size

Parameter description: Specifies the pool size used by GEQO, that is, the number of individuals in the genetic population.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX

+

The value of this parameter must be at least 2, and useful values are typically from 100 to 1000. If this parameter is set to 0, GaussDB(DWS) selects a proper value based on geqo_effort and the number of tables.

+
+

Default value: 0

+
+

geqo_generations

Parameter description: Specifies the number parameter iterations of the algorithm used by GEQO.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX

+

The value of this parameter must be at least 1, and useful values are typically from 100 to 1000. If it is set to 0, a suitable value is chosen based on geqo_pool_size.

+
+

Default value: 0

+
+

geqo_selection_bias

Parameter description: Specifies the selection bias used by GEQO. The selection bias is the selective pressure within the population.

+

Type: USERSET

+

Value range: a floating point number ranging from 1.5 to 2.0

+

Default value: 2

+
+

geqo_seed

Parameter description: Specifies the initial value of the random number generator used by GEQO to select random paths through the join order search space.

+

Type: USERSET

+

Value range: a floating point number ranging from 0.0 to 1.0

+

Varying the value changes the setting of join paths explored, and may result in a better or worse path being found.

+
+

Default value: 0

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0912.html b/docs/dws/dev/dws_04_0912.html new file mode 100644 index 00000000..2d4d5348 --- /dev/null +++ b/docs/dws/dev/dws_04_0912.html @@ -0,0 +1,203 @@ + + +

Other Optimizer Options

+

default_statistics_target

Parameter description: Specifies the default statistics target for table columns without a column-specific target set via ALTER TABLE SET STATISTICS. If this parameter is set to a positive number, it indicates the number of samples of statistics information. If this parameter is set to a negative number, percentage is used to set the statistic target. The negative number converts to its corresponding percentage, for example, -5 means 5%. During sampling, default_statistics_target * 300 is used as the size of the random sampling. For example, if the default value is 100, 100 x 300 pages are read in a random sampling.

+

Type: USERSET

+

Value range: an integer ranging from -100 to 10000

+
  • A larger positive number than the parameter value increases the time required to do ANALYZE, but might improve the quality of the optimizer's estimates.
  • Changing settings of this parameter may result in performance deterioration. If query performance deteriorates, you can:
    1. Restore to the default statistics.
    2. Use hints to optimize the query plan.
    +
  • If this parameter is set to a negative value, the number of samples is greater than or equal to 2% of the total data volume, and the number of records in user tables is less than 1.6 million, the time taken by running ANALYZE will be longer than when this parameter uses its default value.
  • If this parameter is set to a negative value, the autoanalyze function does not support percentage sampling. The sampling uses the default value of this parameter.
  • If this parameter is set to a positive value, you must have the ANALYZE permission to execute ANALYZE.
  • If this parameter is set to a negative value, that is, percentage sampling, you need to be granted the ANALYZE and SELECT permissions to execute ANALYZE.
+
+

Default value: 100

+
+

constraint_exclusion

Parameter description: Controls the query optimizer's use of table constraints to optimize queries.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: partition

+

Currently, this parameter is set to on by default to partition tables. If this parameter is set to on, extra planning is imposed on simple queries, which has no benefits. If you have no partitioned tables, set it to off.

+
+
+

cursor_tuple_fraction

Parameter description: Specifies the optimizer's estimated fraction of a cursor's rows that are retrieved.

+

Type: USERSET

+

Value range: a floating point number ranging from 0.0 to 1.0

+

Smaller values than the default value bias the optimizer towards using fast start plans for cursors, which will retrieve the first few rows quickly while perhaps taking a long time to fetch all rows. Larger values put more emphasis on the total estimated time. At the maximum setting of 1.0, cursors are planned exactly like regular queries, considering only the total estimated time and how soon the first rows might be delivered.

+
+

Default value: 0.1

+
+

from_collapse_limit

Parameter description: Specifies whether the optimizer merges sub-queries into upper queries based on the resulting FROM list. The optimizer merges sub-queries into upper queries if the resulting FROM list would have no more than this many items.

+

Type: USERSET

+

Value range: an integer ranging from 1 to INT_MAX

+

Smaller values reduce planning time but may lead to inferior execution plans.

+
+

Default value: 8

+
+

join_collapse_limit

Parameter description: Specifies whether the optimizer rewrites JOIN constructs (except FULL JOIN) into lists of FROM items based on the number of the items in the result list.

+

Type: USERSET

+

Value range: an integer ranging from 1 to INT_MAX

+
  • Setting this parameter to 1 prevents join reordering. As a result, the join order specified in the query will be the actual order in which the relations are joined. The query optimizer does not always choose the optimal join order. Therefore, advanced users can temporarily set this variable to 1, and then specify the join order they desire explicitly.
  • Smaller values reduce planning time but lead to inferior execution plans.
+
+

Default value: 8

+
+

plan_mode_seed

Parameter description: This is a commissioning parameter. Currently, it supports only OPTIMIZE_PLAN and RANDOM_PLAN. OPTIMIZE_PLAN indicates the optimal plan, the cost of which is estimated using the dynamic planning algorithm, and its value is 0. RANDOM_PLAN indicates the plan that is randomly generated. If plan_mode_seed is set to -1, you do not need to specify the value of the seed identifier. Instead, the optimizer generates a random integer ranging from 1 to 2147483647, and then generates a random execution plan based on this random number. If plan_mode_seed is set to an integer ranging from 1 to 2147483647, you need to specify the value of the seed identifier, and the optimizer generates a random execution plan based on the seed value.

+

Type: USERSET

+

Value range: an integer ranging from -1 to 2147483647

+

Default value: 0

+
  • If plan_mode_seed is set to RANDOM_PLAN, the optimizer generates different random execution plans, which may not be the optimal. Therefore, to guarantee the query performance, the default value 0 is recommended during upgrade, scale-out, scale-in, and O&M.
  • If this parameter is not set to 0, the specified hint will not be used.
+
+
+

enable_hdfs_predicate_pushdown

Parameter description: Specifies whether the function of pushing down predicates the native data layer is enabled.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_random_datanode

Parameter description: Specifies whether the function that random query about DNs in the replication table is enabled. A complete data table is stored on each DN for random retrieval to release the pressure on nodes.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

hashagg_table_size

Parameter description: Specifies the hash table size during the execution of the HASH AGG operation.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX/2

+

Default value: 0

+
+

enable_codegen

Parameter description: Specifies whether code optimization can be enabled. Currently, the code optimization uses the LLVM optimization.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

codegen_strategy

Parameter description: Specifies the codegen optimization strategy that is used when an expression is converted to codegen-based.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: partial

+
+

enable_codegen_print

Parameter description: Specifies whether the LLVM IR function can be printed in logs.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

codegen_cost_threshold

Parameter description: The LLVM compilation takes some time to generate executable machine code. Therefore, LLVM compilation is beneficial only when the actual execution cost is more than the sum of the code required for generating machine code and the optimized execution cost. This parameter specifies a threshold. If the estimated execution cost exceeds the threshold, LLVM optimization is performed.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 10000

+
+

enable_constraint_optimization

Parameter description: Specifies whether the informational constraint optimization execution plan can be used for an HDFS foreign table.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_bloom_filter

Parameter description: Specifies whether the BloomFilter optimization is used.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_extrapolation_stats

Parameter description: Specifies whether the extrapolation logic is used for data of DATE type based on historical statistics. The logic can increase the accuracy of estimation for tables whose statistics are not collected in time, but will possibly provide an overlarge estimation due to incorrect extrapolation. Enable the logic only in scenarios where the data of DATE type is periodically inserted.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

autoanalyze

Parameter description: Specifies whether to allow automatic statistics collection for tables that have statistics when generating a plan. Foreign tables nor temporary tables with the ON COMMIT [DELETE ROWS|DROP] option can trigger autoanalyze. To collect statistics, you need to manually perform the ANALYZE operation. If an exception occurs in the database during the execution of autoanalyze on a table, after the database is recovered, the system may still prompt you to collect the statistics of the table when you run the statement again. In this case, manually perform the ANALYZE operation on the table to synchronize statistics.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

query_dop

Parameter description: Specifies the user-defined degree of parallelism.

+

Type: USERSET

+

Value range: an integer ranging from -64 to 64.

+

[1, 64]: Fixed SMP is enabled, and the system will use the specified degree.

+

0: SMP adaptation function is enabled. The system dynamically selects the optimal parallelism degree [1,8] (x86 platforms) or [1,64] (Kunpeng platforms) for each query based on the resource usage and query plans.

+

[-64, -1]: SMP adaptation is enabled, and the system will dynamically select a degree from the limited range.

+
  • For TP services that mainly involve short queries, if services cannot be optimized through lightweight CNs or statement delivery, it will take a long time to generate an SMP plan. You are advised to set query_dop to 1. For AP services with complex statements, you are advised to set query_dop to 0.
  • After enabling concurrent queries, ensure you have sufficient CPU, memory, network, and I/O resources to achieve the optimal performance.
  • To prevent performance deterioration caused by an overly large value of query_dop, the system calculates the maximum number of available CPU cores for a DN and uses the number as the upper limit for this parameter. If the value of query_dop is greater than 4 and also the upper limit, the system resets query_dop to the upper limit.
+
+

Default value: 1

+
+

query_dop_ratio

Parameter description: Specifies the DOP multiple used to adjust the optimal DOP preset in the system when query_dop is set to 0. That is, DOP = Preset DOP x query_dop_ratio (ranging from 1 to 64). If this parameter is set to 1, the DOP cannot be adjusted.

+

Type: USERSET

+

Value range: a floating point number ranging from 0 to 64

+

Default value: 1

+
+

debug_group_dop

Parameter description: Specifies the unified DOP parallelism degree allocated to the groups that use the Stream operator as the vertex in the generated execution plan when the value of query_dop is 0. This parameter is used to manually specify the DOP for specific groups for performance optimization. Its format is G1,D1,G2,D2,...,, where G1 and G2 indicate the group IDs that can be obtained from logs and D1 and D2 indicate the specified DOP values and can be any positive integers.

+

Type: USERSET

+

Value range: a string

+

Default value: empty

+

This parameter is used only for internal optimization and cannot be set. You are advised to use the default value.

+
+
+

enable_analyze_check

Parameter description: Checks whether statistics were collected about tables whose reltuples and relpages are shown as 0 in pg_class during plan generation.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_sonic_hashagg

Parameter description: Specifies whether to use the Hash Agg operator for column-oriented hash table design when certain constraints are met.

+

Type: USERSET

+

Value range: Boolean

+ +
  • If enable_sonic_hashagg is enabled and certain constraints are met, the Hash Agg operator will be used for column-oriented hash table design, and the memory usage of the operator can be reduced. However, in scenarios where the code generation technology (enabled by enable_codegen) can significantly improve performance, the performance of the operator may deteriorate.
  • If enable_sonic_hashagg is set to on, when certain constraints are met, the hash aggregation operator designed for column-oriented hash tables is used and its name is displayed as Sonic Hash Aggregation in the output of the Explain Analyze/Performance operation. When the constraints are not met, the operator name is displayed as Hash Aggregation.
+
+

Default value: on

+
+

enable_sonic_hashjoin

Parameter description: Specifies whether to use the Hash Join operator for column-oriented hash table design when certain constraints are met.

+

Type: USERSET

+

Value range: Boolean

+ +
  • Currently, the parameter can be used only for Inner Join.
  • If enable_sonic_hashjoin is enabled, the memory usage of the Hash Inner operator can be reduced. However, in scenarios where the code generation technology can significantly improve performance, the performance of the operator may deteriorate.
  • If enable_sonic_hashjoin is set to on, when certain constraints are met, the hash join operator designed for column-oriented hash tables is used and its name is displayed as Sonic Hash Join in the output of the Explain Analyze/Performance operation. When the constraints are not met, the operator name is displayed as Hash Join.
+
+

Default value: on

+
+

enable_sonic_optspill

Parameter description: Specifies whether to optimize the number of Hash Join or Hash Agg files written to disks in the sonic scenario. This parameter takes effect only when enable_sonic_hashjoin or enable_sonic_hashagg is enabled.

+

Type: USERSET

+

Value range: Boolean

+ +

For the Hash Join or Hash Agg operator that meets the sonic condition, if this parameter is set to off, one file is written to disks for each column. If this parameter is set to on and the data types of different columns are similar, only one file (a maximum of five files) will be written to disks.

+
+

Default value: on

+
+

expand_hashtable_ratio

Parameter description: Specifies the expansion ratio used to resize the hash table during the execution of the Hash Agg and Hash Join operators.

+

Type: USERSET

+

Value range: a floating point number of 0 or ranging from 0.5 to 10

+
  • Value 0 indicates that the hash table is adaptively expanded based on the current memory size.
  • The value ranging from 0.5 to 10 indicates the multiple used to expand the hash table. Generally, a larger hash table delivers better performance but occupies more memory space. If the memory space is insufficient, data may be spilled to disks in advance, causing performance deterioration.
+
+

Default value: 0

+
+

plan_cache_mode

Parameter description: Specifies the policy for generating an execution plan in the prepare statement.

+

Type: USERSET

+

Value range: enumerated values

+ +
  • This parameter is valid only for the prepare statement. It is used when the parameterized field in the prepare statement has severe data skew.
  • custom plan is a plan generated after you run a prepare statement where parameters in the execute statement is embedded in the prepare statement. The custom plan generates a plan based on specific parameters in the execute statement. This scheme generates a preferred plan based on specific parameters each time and has good execution performance. The disadvantage is that the plan needs to be regenerated before each execution, resulting in a large amount of repeated optimizer overhead.
  • generic plan is a plan generated for the prepare statement. The plan policy binds parameters to the plan when you run the execute statement and execute the plan. The advantage of this solution is that repeated optimizer overheads can be avoided in each execution. The disadvantage is that the plan may not be optimal when data skew occurs for the bound parameter field. When some bound parameters are used, the plan execution performance is poor.
+
+

Default value: auto

+
+

wlm_query_accelerate

Parameter description: Specifies whether the query needs to be accelerated when short query acceleration is enabled.

+

Type: USERSET

+

Value range: an integer ranging from –1 to 1

+ +

Default value: –1

+
+

show_unshippable_warning

Parameter description: Specifies whether to print the alarm for the statement pushdown failure to the client.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0913.html b/docs/dws/dev/dws_04_0913.html new file mode 100644 index 00000000..e970dbd1 --- /dev/null +++ b/docs/dws/dev/dws_04_0913.html @@ -0,0 +1,19 @@ + + +

Error Reporting and Logging

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0914.html b/docs/dws/dev/dws_04_0914.html new file mode 100644 index 00000000..964bc44c --- /dev/null +++ b/docs/dws/dev/dws_04_0914.html @@ -0,0 +1,35 @@ + + +

Logging Destination

+

log_truncate_on_rotation

Parameter description: Specifies the writing mode of the log files when logging_collector is set to on.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+

Example:

+

Assume that you plan to keep logs in a period of 7 days, one log file is generated per day, log files generated on Monday are named server_log.Mon and named server_log.Tue on Tuesday (others are named in the same way), and log files generated on the same day in different weeks are overwritten. Implement the plan by performing the following operations: set log_filename to server_log.%a, log_truncate_on_rotation to on, and log_rotation_age to 1440 (indicating the valid duration of the log file is 24 hours).

+
+
+

log_rotation_age

Parameter description: Specifies the interval for creating a log file when logging_collector is set to on. If the difference between the current time and the time when the previous audit log file is created is greater than the value of log_rotation_age, a new log file will be generated.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 24 days. The unit is min, h, or d. 0 indicates that the time-based creation of new log files is disabled.

+

Default value: 1d

+
+

log_rotation_size

Parameter description: Specifies the maximum size of a server log file when logging_collector is set to on. If the total size of messages in a server log exceeds the capacity of the server log file, a log file will be generated.

+

Type: SIGHUP

+

Value range: an integer ranging from INT_MAX to 1024. The unit is KB.

+

0 indicates the capacity-based creation of new log files is disabled.

+

Default value: 20 MB

+
+

event_source

Parameter description: Specifies the identifier of the GaussDB(DWS) error messages in logs when log_destination is set to eventlog.

+

Type: POSTMASTER

+

Value range: a string

+

Default value: PostgreSQL

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0915.html b/docs/dws/dev/dws_04_0915.html new file mode 100644 index 00000000..b810258b --- /dev/null +++ b/docs/dws/dev/dws_04_0915.html @@ -0,0 +1,143 @@ + + +

Logging Time

+

client_min_messages

Parameter description: Specifies which level of messages are sent to the client. Each level covers all the levels following it. The lower the level is, the fewer messages are sent.

+

Type: USERSET

+

When the values of client_min_messages and log_min_messages are the same, the levels are different.

+
+

Valid values: Enumerated values. Valid values: debug5, debug4, debug3, debug2, debug1, info, log, notice, warning, error For details about the parameters, see Table 1.

+

Default value: notice

+
+

log_min_messages

Parameter description: Specifies which level of messages will be written into server logs. Each level covers all the levels following it. The lower the level is, the fewer messages will be written into the log.

+

Type: SUSET

+

When the values of client_min_messages and log_min_messages are the same, the levels are different.

+
+

Value range: enumerated type. Valid values: debug5, debug4, debug3, debug2, debug1, info, log, notice, warning, error, fatal, panic For details about the parameters, see Table 1.

+

Default value: warning

+
+

log_min_error_statement

Parameter description: Specifies which SQL statements that cause errors condition will be recorded in the server log.

+

Type: SUSET

+

Value range: enumerated type. Valid values: debug5, debug4, debug3, debug2, debug1, info, log, notice, warning, error, fatal, panic For details about the parameters, see Table 1.

+
  • The default is error, indicating that statements causing errors, log messages, fatal errors, or panics will be logged.
  • panic: This feature is disabled.
+
+

Default value: error

+
+

log_min_duration_statement

Parameter description: Specifies the threshold for logging statement execution durations. The execution duration that is greater than the specified value will be logged.

+

This parameter helps track query statements that need to be optimized. For clients using extended query protocol, durations of the Parse, Bind, and Execute are logged independently.

+

Type: SUSET

+

If this parameter and log_statement are used at the same time, statements recorded based on the value of log_statement will not be logged again after their execution duration exceeds the value of this parameter. If you are not using syslog, it is recommended that you log the process ID (PID) or session ID using log_line_prefix so that you can link the current statement message to the last logged duration.

+
+

Value range: an integer ranging from -1 to INT_MAX. The unit is millisecond.

+ +

Default value: 30min

+
+

backtrace_min_messages

Parameter description: Prints the function's stack information to the server's log file if the level of information generated is greater than or equal to this parameter level.

+

Type: SUSET

+

This parameter is used for locating customer on-site problems. Because frequent stack printing will affect the system's overhead and stability, therefore, when you locate the onsite problems, set the value of this parameter to ranks other than fatal and panic.

+
+

Value range: enumerated values

+

Valid values: debug5, debug4, debug3, debug2, debug1, info, log, notice, warning, error, fatal, panic For details about the parameters, see Table 1.

+

Default value: panic

+
+

Table 1 explains the message security levels used in GaussDB(DWS). If logging output is sent to syslog or eventlog, severity is translated in GaussDB(DWS) as shown in the table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Message Severity Levels

Severity

+

Description

+

syslog

+

eventlog

+

debug[1-5]

+

Provides detailed debug information.

+

DEBUG

+

INFORMATION

+

log

+

Reports information of interest to administrators, for example, checkpoint activity.

+

INFO

+

INFORMATION

+

info

+

Provides information implicitly requested by the user, for example, output from VACUUM VERBOSE.

+

INFO

+

INFORMATION

+

notice

+

Provides information that might be helpful to users, for example, notice of truncation of long identifiers and index created as part of the primary key.

+

NOTICE

+

INFORMATION

+

warning

+

Provides warnings of likely problems, for example, COMMIT outside a transaction block.

+

NOTICE

+

WARNING

+

error

+

Reports an error that causes a command to terminate.

+

WARNING

+

ERROR

+

fatal

+

Reports the reason that causes a session to terminate.

+

ERR

+

ERROR

+

panic

+

Reports an error that caused all database sessions to terminate.

+

CRIT

+

ERROR

+
+
+

plog_merge_age

Parameter description: Specifies the output interval of performance log data.

+

Type: SUSET

+

This parameter value is in milliseconds. You are advised to set this parameter to a value that is a multiple of 1000. That is, the value is in seconds. Name extension of the performance log files controlled by this parameter is .prf. These log files are stored in the $GAUSSLOG/gs_profile/<node_name> directory. node_name is the value of pgxc_node_name in the postgres.conf file. You are advised not to use this parameter externally.

+
+

Value range: an integer ranging from 0 to INT_MAX. The unit is millisecond (ms).

+ +

Default value: 3s

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0916.html b/docs/dws/dev/dws_04_0916.html new file mode 100644 index 00000000..c4e0b3d7 --- /dev/null +++ b/docs/dws/dev/dws_04_0916.html @@ -0,0 +1,178 @@ + + +

Logging Content

+

debug_print_parse

Parameter description: Specifies whether to print parsing tree results.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

debug_print_rewritten

Parameter description: Specifies whether to print query rewriting results.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

debug_print_plan

Parameter description: Specifies whether to print query execution results.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
  • Debugging information about debug_print_parse, debug_print_rewritten, and debug_print_plan are printed only when the log level is set to log or higher. When these parameters are set to on, their debugging information will be recorded in server logs and will not be sent to client logs. You can change the log level by setting client_min_messages and log_min_messages.
  • Do not invoke the gs_encrypt_aes128 and gs_decrypt_aes128 functions when debug_print_plan is set to on, preventing the risk of sensitive information disclosure. You are advised to filter parameter information of the gs_encrypt_aes128 and gs_decrypt_aes128 functions in the log files generated when debug_print_plan is set to on, and then provide the information to external maintenance engineers for fault locating. After you finish using the logs, delete them as soon as possible.
+
+
+

debug_pretty_print

Parameter description: Specifies the logs produced by debug_print_parse, debug_print_rewritten, and debug_print_plan. The output format is more readable but much longer than the output generated when this parameter is set to off.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

log_checkpoints

Parameter description: Specifies whether the statistics on the checkpoints and restart points are recorded in the server logs. When this parameter is set to on, statistics on checkpoints and restart points are recorded in the log messages, including the number of buffers to be written and the time spent in writing them.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

log_connections

Parameter description: Specifies whether to record connection request information of the client.

+

Type: BACKEND

+
  • Session connection parameter. Users are not advised to configure this parameter.
  • Some client programs, such as gsql, attempt to connect twice while determining if a password is required. In this case, duplicate connection receive messages do not necessarily indicate a problem.
+
+

Value range: Boolean

+ +

Default value: off

+
+

log_disconnections

Parameter description: Specifies whether to record end connection request information of the client.

+

Type: BACKEND

+

Value range: Boolean

+ +

Default value: off

+

Session connection parameter. Users are not advised to configure this parameter.

+
+
+

log_duration

Parameter description: Specifies whether to record the duration of every completed SQL statement. For clients using extended query protocols, the time required for parsing, binding, and executing steps are logged independently.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

log_error_verbosity

Parameter description: Specifies the amount of detail written in the server log for each message that is logged.

+

Type: SUSET

+

Value range: enumerated values

+ +

Default value: default

+
+

log_hostname

Parameter description: By default, connection log messages only show the IP address of the connected host. The host name can be recorded when this parameter is set to on. It may take some time to parse the host name. Therefore, the database performance may be affected.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

log_lock_waits

Parameter description: If the time that a session used to wait a lock is longer than the value of deadlock_timeout, this parameter specifies whether to record this message in the database. This is useful in determining if lock waits are causing poor performance.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

log_statement

Parameter description: Specifies whether to record SQL statements. For clients using extended query protocols, logging occurs when an execute message is received, and values of the Bind parameters are included (with any embedded single quotation marks doubled).

+

Type: SUSET

+

Statements that contain simple syntax errors are not logged even if log_statement is set to all, because the log message is emitted only after basic parsing has been completed to determine the statement type. If the extended query protocol is used, this setting also does not log statements before the execution phase (during parse analysis or planning). Set log_min_error_statement to ERROR or lower to log such statements.

+
+

Value range: enumerated values

+ +

Default value: none

+
+

log_temp_files

Parameter description: Specifies whether to record the delete information of temporary files. Temporary files can be created for sorting, hashing, and temporary querying results. A log entry is generated for each temporary file when it is deleted.

+

Type: SUSET

+

Value range: an integer ranging from -1 to INT_MAX. The unit is KB.

+ +

Default value: –1

+
+

log_timezone

Parameter description: Specifies the time zone used for time stamps written in the server log. Different from TimeZone, this parameter takes effect for all sessions in the database.

+

Type: SIGHUP

+

Value range: a string

+

Default value: PRC

+

The value can be changed when gs_initdb is used to set system environments.

+
+
+

logging_module

Parameter description: Specifies whether module logs can be output on the server. This parameter is a session-level parameter, and you are not advised to use the gs_guc tool to set it.

+

Type: USERSET

+

Value range: a string

+

Default value: off. All the module logs on the server can be viewed by running show logging_module.

+

Setting method: First, you can run show logging_module to view which module is controllable. For example, the query output result is as follows:

+
1
+2
+3
+4
show logging_module;
+logging_module
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,PLANHINT,PARQUET,CARBONDATA,SNAPSHOT,XACT,HANDLE,CLOG,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,INSTR,COMM_IPC,COMM_PARAM)
+(1 row)
+
+ +
+

Controllable modules are identified by uppercase letters, and the special ID ALL is used for setting all module logs. You can control module logs to be exported by setting the log modules to on or off. Enable log output for SSL:

+
1
+2
+3
+4
+5
+6
+7
set logging_module='on(SSL)';
+SET
+show logging_module;                                                                                                                                              logging_module                                               
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 
+ALL,on(SSL),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,GDS,TBLSPC,WLM,OBS,EXECUTOR,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_CA
+RD,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,PLANHINT,PARQUET,CARBONDATA,SNAPSHOT,XACT,HANDLE,CLOG,TQUAL,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,INSTR,COMM_IPC,COMM_PARAM,CSTORE)
+(1 row)
+
+ +
+

SSL log output is enabled.

+

The ALL identifier is equivalent to a shortcut operation. That is, logs of all modules can be enabled or disabled.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
set logging_module='off(ALL)';
+SET
+show logging_module;                                                                                                                                                logging_module                                              
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 
+ALL,on(),off(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_C
+ARD,OPT_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,PLANHINT,PARQUET,CARBONDATA,SNAPSHOT,XACT,HANDLE,CLOG,TQUAL,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,INSTR,COMM_IPC,COMM_PARAM,CSTORE)
+(1 row)
+
+set logging_module='on(ALL)';
+SET
+show logging_module;                                                                                                                                               logging_module                                              
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 
+ALL,on(DFS,GUC,HDFS,ORC,SLRU,MEM_CTL,AUTOVAC,CACHE,ADIO,SSL,GDS,TBLSPC,WLM,OBS,EXECUTOR,VEC_EXECUTOR,STREAM,LLVM,OPT,OPT_REWRITE,OPT_JOIN,OPT_AGG,OPT_SUBPLAN,OPT_SETOP,OPT_CARD,OP
+T_SKEW,UDF,COOP_ANALYZE,WLMCP,ACCELERATE,PLANHINT,PARQUET,CARBONDATA,SNAPSHOT,XACT,HANDLE,CLOG,TQUAL,EC,REMOTE,CN_RETRY,PLSQL,TEXTSEARCH,SEQ,INSTR,COMM_IPC,COMM_PARAM,CSTORE),off()
+(1 row)
+
+ +
+

Dependency relationship: The value of this parameter depends on the settings of log_min_messages.

+
+

enable_unshipping_log

Parameter description: Specifies whether to log statements that are not pushed down. The logs help locate performance issues that may be caused by statements not pushed down.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0918.html b/docs/dws/dev/dws_04_0918.html new file mode 100644 index 00000000..14b9e5b0 --- /dev/null +++ b/docs/dws/dev/dws_04_0918.html @@ -0,0 +1,27 @@ + + +

Alarm Detection

+

During cluster running, error scenarios can be detected in a timely manner to inform users as soon as possible.

+

enable_alarm

Parameter description: Enables the alarm detection thread to detect the fault scenarios that may occur in the database.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
+

connection_alarm_rate

Parameter description: Specifies the ratio restriction that the maximum number of allowed parallel connections to the database. The maximum number of concurrent connections to the database is max_connections x connection_alarm_rate.

+

Type: SIGHUP

+

Value range: a floating point number ranging from 0.0 to 1.0

+

Default value: 0.9

+
+

alarm_report_interval

Parameter description: Specifies the interval at which an alarm is reported.

+

Type: SIGHUP

+

Value range: a non-negative integer. The unit is second.

+

Default value: 10

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0919.html b/docs/dws/dev/dws_04_0919.html new file mode 100644 index 00000000..0f738337 --- /dev/null +++ b/docs/dws/dev/dws_04_0919.html @@ -0,0 +1,17 @@ + + +

Statistics During the Database Running

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0920.html b/docs/dws/dev/dws_04_0920.html new file mode 100644 index 00000000..5c2161f0 --- /dev/null +++ b/docs/dws/dev/dws_04_0920.html @@ -0,0 +1,110 @@ + + +

Query and Index Statistics Collector

+

The query and index statistics collector is used to collect statistics during database running. The statistics include the times of inserting and updating a table and an index, the number of disk blocks and tuples, and the time required for the last cleanup and analysis on each table. The statistics can be viewed by querying system view families pg_stats and pg_statistic. The following parameters are used to set the statistics collection feature in the server scope.

+

track_activities

Parameter description: Collects statistics about the commands that are being executed in session.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

track_counts

Parameter description: Collects statistics about data activities.

+

Type: SUSET

+

Value range: Boolean

+ +

When the database to be cleaned up is selected from the AutoVacuum automatic cleanup process, the database statistics are required. In this case, the default value is set to on.

+
+

Default value: on

+
+

track_io_timing

Parameter description: Collects statistics about I/O invoking timing in the database. The I/O timing statistics can be queried by using the pg_stat_database parameter.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

track_functions

Parameter description: Collects statistics about invoking times and duration in a function.

+

Type: SUSET

+

When the SQL functions are set to inline functions queried by the invoking, these SQL functions cannot be traced no matter these functions are set or not.

+
+

Value range: enumerated values

+ +

Default value: none

+
+

track_activity_query_size

Parameter description: Specifies byte counts of the current running commands used to trace each active session.

+

Type: POSTMASTER

+

Value range: an integer ranging from 100 to 102400

+

Default value: 1024

+
+

update_process_title

Parameter description: Collects statistics updated with a process name each time the server receives a new SQL statement.

+

The process name can be viewed on Windows task manager by running the ps command.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

track_thread_wait_status_interval

Parameter description: Specifies the interval of collecting the thread status information periodically.

+

Type: SUSET

+

Value range: an integer ranging from 0 to 1440. The unit is minute (min).

+

Default value: 30min

+
+

enable_save_datachanged_timestamp

Parameter description: Specifies whether to record the time when INSERT, UPDATE, DELETE, or EXCHANGE/TRUNCATE/DROP PARTITION is performed on table data.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

instr_unique_sql_count

Parameter description: Specifies whether to collect Unique SQL statements and the maximum number of unique SQL statements that can be collected.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX

+ +

Default value: 0

+

If a new value is loaded using reload and the new value is less than the original value, the Unique SQL statistics collected by the corresponding CN will be cleared. Note that the clearing operation is performed by the background thread of the resource management module. If the GUC parameter use_workload_manager is set to off, the clearing operation may fail. In this case, you can use the reset_instr_unique_sql function for clearing.

+
+
+

track_sql_count

Parameter description: Specifies whether to collect statistics on the number of the SELECT, INSERT, UPDATE, DELETE, and MERGE INTO statements that are being executed in each session, the response time of the SELECT, INSERT, UPDATE, and DELETE statements, and the number of DDL, DML, and DCL statements.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
  • The track_sql_count parameter is restricted by the track_activities parameter.
    • If track_activities is set to on and track_sql_count is set to off, a warning message indicating that track_sql_count is disabled will be displayed when the view gs_sql_count, pgxc_sql_count, gs_workload_sql_count, pgxc_workload_sql_count, global_workload_sql_count, gs_workload_sql_elapse_time, pgxc_workload_sql_elapse_time, or global_workload_sql_elapse_time are queried.
    • If both track_activities and track_sql_count are set to off, two logs indicating that track_activities is disabled and track_sql_count is disabled will be displayed when the views are queried.
    • If track_activities is set to off and track_sql_count is set to on, a log indicating that track_activities is disabled will be displayed when the views are queried.
    +
+
  • If this parameter is disabled, querying the view returns 0.
+
+
+

enable_track_wait_event

Parameter description: Specifies whether to collect statistics on waiting events, including the number of occurrence times, number of failures, duration, maximum waiting time, minimum waiting time, and average waiting time.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
  • The enable_track_wait_event parameter is restricted by track_activities. Its functions cannot take effect no matter whether it is enabled if track_activities is disabled.
  • When track_activities or enable_track_wait_event is disabled, if you query the get_instr_wait_event function, gs_wait_events view, or pgxc_wait_events view, a message is displayed indicating that the GUC parameter is disabled and the query result is 0.
  • If track_activities or enable_track_wait_event is disabled during cluster running, GaussDB(DWS) will not collect statistics on waiting events. However, statistics that have been collected are not affected.
+
+
+

enable_wdr_snapshot

Parameter description: Specifies whether to enable the performance view snapshot function. After this function is enabled, GaussDB(DWS) will periodically create snapshots for some system performance views and save them permanently. In addition, it will accept manual snapshot creation requests.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
  • If the create_wdr_snapshot function is executed to manually create a view when the enable_wdr_snapshot parameter is disabled, a message is displayed indicating that the GUC parameter is not enabled.
  • If the enable_wdr_snapshot parameter is modified during the snapshot creation process, the snapshot that is being created is not affected. The modification takes effect when the snapshot is manually or periodically created next time.
+
+
+

wdr_snapshot_interval

Parameter description: Specifies the interval for automatically creating performance view snapshots.

+

Type: SIGHUP

+

Value range: an integer ranging from 10 to 180, in minutes

+

Default value: 60

+
  • The value of this parameter must be set in accordance with the cluster load. You are advised to set this parameter to a value greater than the time required for creating a snapshot.
  • If the value of wdr_snapshot_interval is less than the time required for creating a snapshot, the system will skip this snapshot creation because it finds that the previous snapshot creation is not complete when the time for this automatic snapshot creation arrives.
+
+
+

wdr_snapshot_retention_days

Parameter description: Specifies the maximum number of days for storing performance snapshot data.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 15 days

+

Default value: 8

+
  • If enable_wdr_snapshot is enabled, snapshot data that has been stored for wdr_snapshot_retention_days days will be automatically deleted.
  • The value of this parameter must be set in accordance with the available disk space. A larger value requires more disk space.
  • The modification of this parameter does not take effect immediately. The expired snapshot data will be cleared only when a snapshot is automatically created next time.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0921.html b/docs/dws/dev/dws_04_0921.html new file mode 100644 index 00000000..4e5aff2d --- /dev/null +++ b/docs/dws/dev/dws_04_0921.html @@ -0,0 +1,21 @@ + + +

Performance Statistics

+

During the running of the database, the lock access, disk I/O operation, and invalid message process are involved. All these operations are the bottleneck of the database performance. The performance statistics method provided by GaussDB(DWS) can facilitate the performance fault location.

+

Generating Performance Statistics Logs

Parameter description: For each query, the following four parameters control the performance statistics of corresponding modules recorded in the server log:

+ +

All these parameters can only provide assistant analysis for administrators, which are similar to the getrusage() of the Linux OS.

+

Type: SUSET

+
  • log_statement_stats records the total statement statistics while other parameters only record statistics about each statement.
  • The log_statement_stats parameter cannot be enabled together with other parameters recording statistics about each statement.
+
+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0922.html b/docs/dws/dev/dws_04_0922.html new file mode 100644 index 00000000..4093f85f --- /dev/null +++ b/docs/dws/dev/dws_04_0922.html @@ -0,0 +1,251 @@ + + +

Workload Management

+

If database resource usage is not controlled, concurrent tasks easily preempt resources. As a result, the OS will be overloaded and cannot respond to user tasks; or even crash and cannot provide any services to users. The GaussDB(DWS) workload management function balances the database workload based on available resources to avoid database overloading.

+

use_workload_manager

Parameter description: Specifies whether to enable the resource management function. This parameter must be applied on both CNs and DNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

enable_control_group

Parameter description: Specifies whether to enable the Cgroup management function. This parameter must be applied on both CNs and DNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+

If a method in Setting GUC Parameters is used to change the parameter value, the new value takes effect only for the threads that are started after the change. In addition, the new value does not take effect for new jobs that are executed by backend threads and reused threads. You can make the new value take effect for these threads by using kill session or restarting the node.

+
+
+

enable_backend_control

Parameter description: Specifies whether to control the database permanent thread to the DefaultBackend Cgroup. This parameter must be applied on both CNs and DNs.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
+

enable_vacuum_control

Parameter description: Specifies whether to control the database permanent thread autoVacuumWorker to the Vacuum Cgroup. This parameter must be applied on both CNs and DNs.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
+

enable_perm_space

Parameter description: Specifies whether to enable the perm space function. This parameter must be applied on both CNs and DNs.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
+

enable_verify_active_statements

Parameter description: Specifies whether to enable the background calibration function in static adaptive load scenarios. This parameter must be used on CNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

max_active_statements

Parameter description: Specifies the maximum global concurrency. This parameter applies to one CN.

+

The database administrator changes the value of this parameter based on system resources (for example, CPU, I/O, and memory resources) so that the system fully supports the concurrency tasks and avoids too many concurrency tasks resulting in system crash.

+

Type: SIGHUP

+

Value range: an integer ranging from -1 to INT_MAX. The values -1 and 0 indicate that the number of concurrent requests is not limited.

+

Default value: 60

+
+

parctl_min_cost

Parameter description: Specifies the minimum execution cost of a statement under the concurrency control of a resource pool.

+

Type: SIGHUP

+

Value range: an integer ranging from –1 to INT_MAX

+ +

Default value: 100000

+
+

cgroup_name

Parameter description: Specifies the name of the Cgroup in use. It can be used to change the priorities of jobs in the queue of a Cgroup.

+

If you set cgroup_name and then session_respool, the Cgroups associated with session_respool take effect. If you reverse the order, Cgroups associated with cgroup_name take effect.

+

If the Workload Cgroup level is specified during the cgroup_name change, the database does not check the Cgroup level. The level ranges from 1 to 10.

+

Type: USERSET

+

You are not advised to set cgroup_name and session_respool at the same time.

+

Value range: a string

+

Default value: DefaultClass:Medium

+

DefaultClass:Medium indicates the Medium Cgroup belonging to the Timeshare Cgroup under the DefaultClass Cgroup.

+
+
+

cpu_collect_timer

Parameter description: Specifies how frequently CPU data is collected during statement execution on DNs.

+

The database administrator changes the value of this parameter based on system resources (for example, CPU, I/O, and memory resources) so that the system fully supports the concurrency tasks and avoids too many concurrency tasks resulting in system crash.

+

Type: SIGHUP

+

Value range: an integer ranging from -1 to INT_MAX. The unit is second.

+

Default value: 30

+
+

enable_cgroup_switch

Parameter description: Specifies whether the database automatically switches to the TopWD group when executing statements by group type.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

memory_tracking_mode

Parameter description: Specifies the memory information recording mode.

+

Type: USERSET

+

Value range:

+ +

Default value: none

+
+

memory_detail_tracking

Parameter description: Specifies the sequence number of the memory background information distributed in the needed thread and plannodeid of the query where the current thread is located.

+

Type: USERSET

+

Value range: a string

+

Default value: empty

+

It is recommended that you retain the default value for this parameter.

+
+
+

enable_resource_track

Parameter description: Specifies whether the real-time resource monitoring function is enabled. This parameter must be applied on both CNs and DNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

enable_resource_record

Parameter description: Specifies whether resource monitoring records are archived. If this parameter is set to on, records in the history views (GS_WLM_SESSION_HISTORY and GS_WLM_OPERATOR_HISTORY) are archived to the corresponding info views (GS_WLM_SESSION_INFO and GS_WLM_OPERATOR_INFO) at an interval of 3 minutes. After being archived, the records are deleted from the history views. This parameter must be applied on both CNs and DNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

enable_user_metric_persistent

Parameter description: Specifies whether the user historical resource monitoring dumping function is enabled. If this function is enabled, data in view PG_TOTAL_USER_RESOURCE_INFO is periodically sampled and saved to system catalog GS_WLM_USER_RESOURCE_HISTORY.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

user_metric_retention_time

Parameter description: Specifies the retention time of the user historical resource monitoring data. This parameter is valid only when enable_user_metric_persistent is set to on.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 3650. The unit is day.

+ +

Default value: 7

+
+

enable_instance_metric_persistent

Parameter description: Specifies whether the instance resource monitoring dumping function is enabled. When this function is enabled, the instance monitoring data is saved to the system catalog GS_WLM_INSTANCE_HISTORY.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

instance_metric_retention_time

Parameter description: Specifies the retention time of the instance historical resource monitoring data. This parameter is valid only when enable_instance_metric_persistent is set to on.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 3650. The unit is day.

+ +

Default value: 7

+
+

resource_track_level

Parameter description: Specifies the resource monitoring level of the current session. This parameter is valid only when enable_resource_track is set to on.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: query

+
+

resource_track_cost

Parameter description: Specifies the minimum execution cost for resource monitoring on statements in the current session. This parameter is valid only when enable_resource_track is set to on.

+

Type: USERSET

+

Value range: an integer ranging from -1 to INT_MAX

+ +

Default value: 100000

+
+

resource_track_duration

Parameter description: Specifies the minimum statement execution time that determines whether information about jobs of a statement recorded in the real-time view (see Table 1) will be dumped to a historical view after the statement is executed. Job information will be dumped from the real-time view (with the suffix statistics) to a historical view (with the suffix history) if the statement execution time is no less than this value.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX. The unit is second (s).

+ +

Default value: 1min

+
+

dynamic_memory_quota

Parameter description: Specifies the memory quota in adaptive load scenarios, that is, the proportion of maximum available memory to total system memory.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 100

+

Default value: 80

+
+

disable_memory_protect

Parameter description: Stops memory protection. To query system views when system memory is insufficient, set this parameter to on to stop memory protection. This parameter is used only to diagnose and debug the system when system memory is insufficient. Set it to off in other scenarios.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

query_band

Parameter description: Specifies the job type of the current session.

+

Type: USERSET

+

Value range: a string

+

Default value: empty

+
+

enable_bbox_dump

Parameter description: Specifies whether the black box function is enabled. The core files can be generated even through the core dump mechanism is not configured in the system. This parameter must be simultaneously used on CNs and DNs.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

enable_dynamic_workload

Parameter description: Specifies whether to enable the dynamic workload management function.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
  • If memory adaptation is enabled, you do not need to use work_mem to optimize the operator memory usage after collecting statistics. The system will generate a plan for each statement based on the current load, estimating the memory used by each operator and by the entire statement. In a concurrency scenario, statements are queued based on the system load and their memory usage.
  • The optimizer cannot accurately estimate the number of rows and will probably underestimate or overestimate memory usage. If the memory usage is underestimated, the allocated memory will be automatically increased during statement running. If the memory usage is overestimated, system resources will not be fully used, and the number of statements waiting in a queue will increase, which probably results in low performance. To improve performance, identify the statements whose estimated memory usage is much greater than the DN peak memory and adjust the value of query_max_mem. For details, see Adjusting Key Parameters During SQL Tuning.
+
+
+

bbox_dump_count

Parameter description: Specifies the maximum number of core files that are generated by GaussDB(DWS) and can be stored in the path specified by bbox_dump_path. If the number of core files exceeds this value, old core files will be deleted. This parameter is valid only if enable_bbox_dump is set to on.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 20

+

Default value: 8

+

When core files are generated during concurrent SQL statement execution, the number of files may be larger than the value of bbox_dump_count.

+
+
+

io_limits

Parameter description: Specifies the upper limit of IOPS triggered.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 1073741823

+

Default value: 0

+
+

io_priority

Parameter description: Specifies the I/O priority for jobs that consume many I/O resources. It takes effect when the I/O usage reaches 90%.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: None

+
+

session_respool

Parameter description: Specifies the resource pool associated with the current session.

+

Type: USERSET

+

If you set cgroup_name and then session_respool, the Cgroups associated with session_respool take effect. If you reverse the order, Cgroups associated with cgroup_name take effect.

+

If the Workload Cgroup level is specified during the cgroup_name change, the database does not check the Cgroup level. The level ranges from 1 to 10.

+

You are not advised to set cgroup_name and session_respool at the same time.

+

Value range: a string. This parameter can be set to the resource pool configured through create resource pool.

+

Default value: invalid_pool

+
+

enable_transaction_parctl

Parameter description: whether to control transaction block statements and stored procedure statements.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

session_statistics_memory

Parameter description: Specifies the memory size of a real-time query view.

+

Type: SIGHUP

+

Value range: an integer ranging from 5 MB to 50% of max_process_memory

+

Default value: 5 MB

+
+

session_history_memory

Parameter description: Specifies the memory size of a historical query view.

+

Type: SIGHUP

+

Value range: an integer ranging from 10 MB to 50% of max_process_memory

+

Default value: 100 MB

+
+

topsql_retention_time

Parameter description: Specifies the retention period of historical TopSQL data in the gs_wlm_session_info and gs_wlm_operator_info tables.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 3650. The unit is day.

+ +

Default value: 0

+

Before setting this GUC parameter to enable the data retention function, delete data from the gs_wlm_session_info and gs_wlm_operator_info tables.

+
+
+

transaction_pending_time

Parameter description: maximum queuing time of transaction block statements and stored procedure statements if enable_transaction_parctl is set to on.

+

Type: USERSET

+

Value range: an integer ranging from –1 to INT_MAX. The unit is second (s).

+ +

Default value: 0

+

This parameter is valid only for internal statements of stored procedures and transaction blocks. That is, this parameter takes effect only for the statements whose enqueue value (for details, see PG_SESSION_WLMSTAT) is Transaction or StoredProc.

+
+
+

wlm_sql_allow_list

Parameter description: Specifies whitelisted SQL statements for resource management. Whitelisted SQL statements are not monitored by resource management.

+

Type: SIGHUP

+

Value range: a string

+

Default value: empty

+
  • One or more whitelisted SQL statements can be specified in wlm_sql_allow_list. If multiple SQL statements are to be whitelisted, use semicolons (;) to separate them.
  • The system determines whether SQL statements are monitored based on the prefix match. The SQL statements are case insensitive. For example, if wlm_sql_allow_list is set to 'SELECT', all SELECT statements are not monitored by the resource management module.
  • The system identifies spaces at the beginning of the parameter value. For example, 'SELECT' and ' SELECT' have different representations. ' SELECT' filters only the SELECT statements with spaces at the beginning.
  • The system has some whitelisted SQL statements by default, which cannot be modified. You can query the default whitelisted SQL statements and the SQL statements that have been successfully added to the whitelist by GUC through the system view gs_wlm_sql_allow.
  • New SQL statements cannot be appended to the whitelisted SQL statements specified by wlm_sql_allow_list but can be set only through overwriting. To add an SQL statement, query the original GUC value, add the new statement to the end of the original value, separate the statements with a semicolon (;), and set the GUC value again.
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0923.html b/docs/dws/dev/dws_04_0923.html new file mode 100644 index 00000000..9ebd01a0 --- /dev/null +++ b/docs/dws/dev/dws_04_0923.html @@ -0,0 +1,91 @@ + + +

Automatic Cleanup

+

The automatic cleanup process (autovacuum) in the system automatically runs the VACUUM and ANALYZE commands to recycle the record space marked by the deleted status and update statistics in the table.

+

autovacuum

Parameter description: Enables the automatic cleanup process (autovacuum) in the database. Ensure that the track_counts parameter is set to on before enabling the automatic cleanup process.

+

Type: SIGHUP

+
  • Set the autovacuum parameter to on if you want to enable the function to automatically clean up two-phase transactions after the system recovers from faults.
  • If autovacuum is set to on and the value of autovacuum_max_workers is 0, the system will not automatically clean up two-phase transactions. The system will clean up them after recovering from faults.
  • If autovacuum is set to on and the value of autovacuum_max_workers is greater than 0, the system will automatically clean up the two-phase transactions and processes after recovering from faults.
+
+

Even if the autovacuum parameter is set to off, the automatic cleanup process will be enabled automatically by the database when a transaction ID wrap is about to occur. When the create database or drop database operation fails, some nodes may be submitted or rolled back while others in the prepared status may not be submitted. In this case, the system cannot automatically restore these nodes and the manual restoration is required. The restoration steps are as follows:

+
  1. Use the gs_clean tool (setting the option parameter to -N) to query the xid of the abnormal two-phase transactions and nodes in the prepared state.
  2. Log in to the nodes whose transactions are in the prepared status. Administrators connect to an available database such as gaussdb to run the set xc_maintenance_mode = on statement.
  3. Submit or roll back the two-phase transactions (for example, submit or roll back a statement) based on global transaction status.
+
+

Value range: Boolean

+ +

Default value: off

+
+

autovacuum_mode

Parameter description: Specifies whether the autoanalyze or autovacuum function is enabled. This parameter is valid only when autovacuum is set to on.

+

Type: SIGHUP

+

Value range: enumerated values

+ +

Default value: mix

+
+

autoanalyze_timeout

Parameter description: Specifies the timeout period of autoanalyze. If the duration of autoanalyze on a table exceeds the value of autoanalyze_timeout, the autoanalyze is automatically canceled.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 2147483. The unit is second.

+

Default value: 5min

+
+

autovacuum_io_limits

Parameter description: Specifies the upper limit of I/Os triggered by the autovacuum process per second.

+

Type: SIGHUP

+

Value range: an integer ranging from –1 to 1073741823. –1 indicates that the default Cgroup is used.

+

Default value: –1

+
+

log_autovacuum_min_duration

Parameter description: Records each step performed by the automatic cleanup process to the server log when the execution time of the automatic cleanup process is greater than or equal to a certain value. This parameter helps track the automatic cleanup behaviors.

+

Type: SIGHUP

+

For example, set the log_autovacuum_min_duration parameter to 250 ms to record the information related to the automatic cleanup commands running the parameters whose values are greater than or equal to 250 ms.

+

Value range: an integer ranging from –1 to INT_MAX. The unit is ms.

+ +

Default value: –1

+
+

autovacuum_max_workers

Parameter description: Specifies the maximum number of automatic cleanup threads running at the same time.

+

Type: POSTMASTER

+

Value range: an integer ranging from 0 to 262143. 0 indicates that autovacuum is disabled.

+

Default value: 3

+
+

autovacuum_naptime

Parameter description: Specifies the interval between two automatic cleanup operations.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 2147483. The unit is second.

+

Default value: 10min

+
+

autovacuum_vacuum_threshold

Parameter description: Specifies the threshold for triggering the VACUUM operation. When the number of deleted or updated records in a table exceeds the specified threshold, the VACUUM operation is executed on this table.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 50

+
+

autovacuum_analyze_threshold

Parameter description: Specifies the threshold for triggering the ANALYZE operation. When the number of deleted, inserted, or updated records in a table exceeds the specified threshold, the ANALYZE operation is executed on this table.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 50

+
+

autovacuum_vacuum_scale_factor

Parameter description: Specifies the size scaling factor of a table added to the autovacuum_vacuum_threshold parameter when a VACUUM event is triggered.

+

Type: SIGHUP

+

Value range: a floating point number ranging from 0.0 to 100.0

+

Default value: 0.2

+
+

autovacuum_analyze_scale_factor

Parameter description: Specifies the size scaling factor of a table added to the autovacuum_analyze_threshold parameter when an ANALYZE event is triggered.

+

Type: SIGHUP

+

Value range: a floating point number ranging from 0.0 to 100.0

+

Default value: 0.1

+
+

autovacuum_freeze_max_age

Parameter description: Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid column can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table.

+

The old files under the subdirectory of pg_clog/ can also be deleted by the VACUUM operation. Even if the automatic cleanup process is forbidden, the system will invoke the automatic cleanup process to prevent the cyclic repetition.

+

Type: POSTMASTER

+

Value range: an integer ranging from 100000 to 576460752303423487

+

Default value: 20000000000

+
+

autovacuum_vacuum_cost_delay

Parameter description: Specifies the value of the cost delay used in the autovacuum operation.

+

Type: SIGHUP

+

Value range: an integer ranging from –1 to 100. The unit is ms. -1 indicates that the normal vacuum cost delay is used.

+

Default value: 20ms

+
+

autovacuum_vacuum_cost_limit

Parameter description: Specifies the value of the cost limit used in the autovacuum operation.

+

Type: SIGHUP

+

Value range: an integer ranging from –1 to 10000. -1 indicates that the normal vacuum cost limit is used.

+

Default value: –1

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0924.html b/docs/dws/dev/dws_04_0924.html new file mode 100644 index 00000000..9964242a --- /dev/null +++ b/docs/dws/dev/dws_04_0924.html @@ -0,0 +1,19 @@ + + +

Default Settings of Client Connection

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0925.html b/docs/dws/dev/dws_04_0925.html new file mode 100644 index 00000000..8fad9c5e --- /dev/null +++ b/docs/dws/dev/dws_04_0925.html @@ -0,0 +1,132 @@ + + +

Statement Behavior

+

This section describes related default parameters involved in the execution of SQL statements.

+

search_path

Parameter description: Specifies the order in which schemas are searched when an object is referenced with no schema specified. The value of this parameter consists of one or more schema names. Different schema names are separated by commas (,).

+

Type: USERSET

+ +

Value range: a string

+
  • When this parameter is set to "$user", public, a database can be shared (where no users have private schemas, and all share use of public), and private per-user schemas and combinations of them are supported. Other effects can be obtained by modifying the default search path setting, either globally or per-user.
  • When this parameter is set to a null string (''), the system automatically converts it into a pair of double quotation marks ("").
  • If the content contains double quotation marks, the system considers them as insecure characters and converts each double quotation mark into a pair of double quotation marks.
+
+

Default value: "$user",public

+

$user indicates the name of the schema with the same name as the current session user. If the schema does not exist, $user will be ignored.

+
+
+

current_schema

Parameter description: Specifies the current schema.

+

Type: USERSET

+

Value range: a string

+

Default value: "$user",public

+

$user indicates the name of the schema with the same name as the current session user. If the schema does not exist, $user will be ignored.

+
+
+

default_tablespace

Parameter description: Specifies the default tablespace of the created objects (tables and indexes) when a CREATE command does not explicitly specify a tablespace.

+ +

Type: USERSET

+

Value range: a string. An empty string indicates that the default tablespace is used.

+

Default value: empty

+
+

default_storage_nodegroup

Parameter description: Specifies the Node Group where a table is created by default. This parameter takes effect only for ordinary tables.

+ +

Type: USERSET

+

Value range: a string

+

Default value: installation

+
+

default_colversion

Parameter description: Sets the storage format version of the column-store table that is created by default.

+

Type: SIGHUP

+

Value range: enumerated values

+ +

Default value: 2.0

+
+

temp_tablespaces

Parameter description: Specifies tablespaces to which temporary objects will be created (temporary tables and their indexes) when a CREATE command does not explicitly specify a tablespace. Temporary files for sorting large data are created in these tablespaces.

+

The value of this parameter is a list of names of tablespaces. When there is more than one name in the list, GaussDB(DWS) chooses a random tablespace from the list upon the creation of a temporary object each time. Except that within a transaction, successively created temporary objects are placed in successive tablespaces in the list. If the element selected from the list is an empty string, GaussDB(DWS) will automatically use the default tablespace of the current database instead.

+

Type: USERSET

+

Value range: a string An empty string indicates that all temporary objects are created only in the default tablespace of the current database. For details, see default_tablespace.

+

Default value: empty

+
+

check_function_bodies

Parameter description: Specifies whether to enable validation of the function body string during the execution of CREATE FUNCTION. Verification is occasionally disabled to avoid problems, such as forward references when you restore function definitions from a dump.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

default_transaction_isolation

Parameter description: Specifies the default isolation level of each transaction.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: READ COMMITTED

+
+

default_transaction_read_only

Parameter description: Specifies whether each new transaction is in read-only state.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

default_transaction_deferrable

Parameter description: Specifies the default delaying state of each new transaction. It currently has no effect on read-only transactions or those running at isolation levels lower than serializable.

+

GaussDB(DWS) does not support the serializable isolation level of each transaction. The parameter is insignificant.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

session_replication_role

Parameter description: Specifies the behavior of replication-related triggers and rules for the current session.

+

Type: USERSET

+

Setting this parameter will discard all the cached execution plans.

+
+

Value range: enumerated values

+ +

Default value: origin

+
+

statement_timeout

Parameter description: If the statement execution time (starting when the server receives the command) is longer than the duration specified by the parameter, error information is displayed when you attempt to execute the statement and the statement then exits.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 2147483647. The unit is ms.

+

Default value: 0

+
+

vacuum_freeze_min_age

Parameter description: Specifies the minimum cutoff age (in the same transaction), based on which VACUUM decides whether to replace transaction IDs with FrozenXID while scanning a table.

+

Type: USERSET

+

Value range: an integer from 0 to 576460752303423487.

+

Although you can set this parameter to a value ranging from 0 to 1000000000 anytime, VACUUM will limit the effective value to half the value of autovacuum_freeze_max_age by default.

+
+

Default value: 5000000000

+
+

vacuum_freeze_table_age

Parameter description: Specifies the time that VACUUM freezes tuples while scanning the whole table. VACUUM performs a whole-table scan if the value of the pg_class.relfrozenxid column of the table has reached the specified time.

+

Type: USERSET

+

Value range: an integer from 0 to 576460752303423487.

+

Although users can set this parameter to a value ranging from 0 to 2000000000 anytime, VACUUM will limit the effective value to 95% of autovacuum_freeze_max_age by default. Therefore, a periodic manual VACUUM has a chance to run before an anti-wraparound autovacuum is launched for the table.

+
+

Default value: 15000000000

+
+

bytea_output

Parameter description: Specifies the output format for values of the bytea type.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: hex

+
+

xmlbinary

Parameter description: Specifies how binary values are to be encoded in XML.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: base64

+
+

xmloption

Parameter description: Specifies whether DOCUMENT or CONTENT is implicit when converting between XML and string values.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: content

+
+

max_compile_functions

Parameter description: Specifies the maximum number of function compilation results stored in the server. Excessive functions and compilation results generated during the storage may occupy large memory space. Setting this parameter to a proper value can reduce the memory usage and improve system performance.

+

Type: POSTMASTER

+

Value range: an integer ranging from 1 to INT_MAX

+

Default value: 1000

+
+

gin_pending_list_limit

Parameter description: Specifies the maximum size of the GIN pending list which is used when fastupdate is enabled. If the list grows larger than this maximum size, it is cleaned up by moving the entries in it to the main GIN data structure in batches. This setting can be overridden for individual GIN indexes by modifying index storage parameters.

+

Type: USERSET

+

Value range: an integer ranging from 64 to INT_MAX. The unit is KB.

+

Default value: 4 MB

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0926.html b/docs/dws/dev/dws_04_0926.html new file mode 100644 index 00000000..1d119bbd --- /dev/null +++ b/docs/dws/dev/dws_04_0926.html @@ -0,0 +1,96 @@ + + +

Zone and Formatting

+

This section describes parameters related to the time format setting.

+

DateStyle

Parameter description: Specifies the display format for date and time values, as well as the rules for interpreting ambiguous date input values.

+

This variable contains two independent components: the output format specifications (ISO, Postgres, SQL, or German) and the input/output order of year/month/day (DMY, MDY, or YMD). The two components can be set separately or together. The keywords Euro and European are synonyms for DMY; the keywords US, NonEuro, and NonEuropean are synonyms for MDY.

+

Type: USERSET

+

Value range: a string

+

Default value: ISO, MDY

+

gs_initdb will initialize this parameter so that its value is the same as that of lc_time.

+
+

Suggestion: The ISO format is recommended. Postgres, SQL, and German use abbreviations for time zones, such as EST, WST, and CST. These abbreviations can be ambiguous. For example, CST can represent Central Standard Time (USA) UT-6:00, Central Standard Time (Australia) UT+9:30, and others. This may lead to incorrect time zone conversion and cause errors.

+
+

IntervalStyle

Parameter description: Specifies the display format for interval values.

+

Type: USERSET

+

Value range: enumerated values

+ +

The IntervalStyle parameter also affects the interpretation of ambiguous interval input.

+
+

Default value: postgres

+
+

TimeZone

Parameter description: Specifies the time zone for displaying and interpreting time stamps.

+

Type: USERSET

+

Value range: a string. You can obtain it by querying the pg_timezone_names view.

+

Default value: PRC

+

gs_initdb will set a time zone value that is consistent with the system environment.

+
+
+

timezone_abbreviations

Parameter description: Specifies the time zone abbreviations that will be accepted by the server.

+

Type: USERSET

+

Value range: a string. You can obtain it by querying the pg_timezone_names view.

+

Default value: Default

+

Default indicates an abbreviation that works in most of the world. There are also other abbreviations, such as Australia and India that can be defined for a particular installation.

+
+
+

extra_float_digits

Parameter description: Specifies the number of digits displayed for floating-point values, including float4, float8, and geometric data types. The parameter value is added to the standard number of digits (FLT_DIG or DBL_DIG as appropriate).

+

Type: USERSET

+

Value range: an integer ranging from –15 to 3

+
  • This parameter can be set to 3 to include partially-significant digits. It is especially useful for dumping float data that needs to be restored exactly.
  • This parameter can also be set to a negative value to suppress unwanted digits.
+
+

Default value: 0

+
+

client_encoding

Parameter description: Specifies the client-side encoding type (character set).

+

Set this parameter as needed. Try to keep the client code and server code consistent to improve efficiency.

+

Type: USERSET

+

Value range: encoding compatible with PostgreSQL. UTF8 indicates that the database encoding is used.

+
  • You can run the locale -a command to check and set the system-supported zone and the corresponding encoding format.
  • By default, gs_initdb will initialize the setting of this parameter based on the current system environment. You can also run the locale command to check the current configuration environment.
  • To use consistent encoding for communication within a cluster, you are advised to retain the default value of client_encoding. Modification to this parameter in the postgresql.conf file (by using the gs_guc tool, for example) does not take effect.
+
+

Default value: UTF8

+

Recommended value: SQL_ASCII or UTF8

+
+

lc_messages

Parameter description: Specifies the language in which messages are displayed.

+

Valid values depend on the current system. On some systems, this zone category does not exist. Setting this variable will still work, but there will be no effect. In addition, translated messages for the desired language may not exist. In this case, you can still see the English messages.

+

Type: SUSET

+

Value range: a string

+
  • You can run the locale -a command to check and set the system-supported zone and the corresponding encoding format.
  • By default, gs_initdb will initialize the setting of this parameter based on the current system environment. You can also run the locale command to check the current configuration environment.
+
+

Default value: C

+
+

lc_monetary

Parameter description: Specifies the display format of monetary values. It affects the output of functions such as to_char. Valid values depend on the current system.

+

Type: USERSET

+

Value range: a string

+
  • You can run the locale -a command to check and set the system-supported zone and the corresponding encoding format.
  • By default, gs_initdb will initialize the setting of this parameter based on the current system environment. You can also run the locale command to check the current configuration environment.
+
+

Default value: C

+
+

lc_numeric

Parameter description: Specifies the display format of numbers. It affects the output of functions such as to_char. Valid values depend on the current system.

+

Type: USERSET

+

Value range: a string

+
  • You can run the locale -a command to check and set the system-supported zone and the corresponding encoding format.
  • By default, gs_initdb will initialize the setting of this parameter based on the current system environment. You can also run the locale command to check the current configuration environment.
+
+

Default value: C

+
+

lc_time

Parameter description: Specifies the display format of time and zones. It affects the output of functions such as to_char. Valid values depend on the current system.

+

Type: USERSET

+

Value range: a string

+
  • You can run the locale -a command to check and set the system-supported zone and the corresponding encoding format.
  • By default, gs_initdb will initialize the setting of this parameter based on the current system environment. You can also run the locale command to check the current configuration environment.
+
+

Default value: C

+
+

default_text_search_config

Parameter description: Specifies the text search configuration.

+

If the specified text search configuration does not exist, an error will be reported. If the specified text search configuration is deleted, set default_text_search_config again. Otherwise, an error will be reported, indicating incorrect configuration.

+ +

Type: USERSET

+

Value range: a string

+

GaussDB(DWS) supports the following two configurations: pg_catalog.english and pg_catalog.simple.

+
+

Default value: pg_catalog.english

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0927.html b/docs/dws/dev/dws_04_0927.html new file mode 100644 index 00000000..8114ff91 --- /dev/null +++ b/docs/dws/dev/dws_04_0927.html @@ -0,0 +1,28 @@ + + +

Other Default Parameters

+

This section describes the default database loading parameters of the database system.

+

dynamic_library_path

Parameter description: Specifies the path for saving the shared database files that are dynamically loaded for data searching. When a dynamically loaded module needs to be opened and the file name specified in the CREATE FUNCTION or LOAD command does not have a directory component, the system will search this path for the required file.

+
The value of dynamic_library_path must be a list of absolute paths separated by colons (:) or by semi-colons (;) on the Windows OS. The special variable $libdir in the beginning of a path will be replaced with the module installation directory provided by GaussDB(DWS). Example:
1
dynamic_library_path = '/usr/local/lib/postgresql:/opt/testgs/lib:$libdir'
+
+ +
+
+

Type: SUSET

+

Value range: a string

+

If the value of this parameter is set to an empty character string, the automatic path search is turned off.

+
+

Default value: $libdir

+
+

gin_fuzzy_search_limit

Parameter description: Specifies the upper limit of the size of the set returned by GIN indexes.

+

Type: USERSET

+

Value range: an integer ranging from 0 to INT_MAX. The value 0 indicates no limit.

+

Default value: 0

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0928.html b/docs/dws/dev/dws_04_0928.html new file mode 100644 index 00000000..7f909863 --- /dev/null +++ b/docs/dws/dev/dws_04_0928.html @@ -0,0 +1,51 @@ + + +

Lock Management

+

In GaussDB(DWS), a deadlock may occur when concurrently executed transactions compete for resources. This section describes parameters used for managing transaction lock mechanisms.

+

deadlock_timeout

Parameter description: Specifies the time, in milliseconds, to wait on a lock before checking whether there is a deadlock condition. When the applied lock exceeds the preset value, the system will check whether a deadlock occurs.

+ +

Type: SUSET

+

Value range: an integer ranging from 1 to 2147483647. The unit is millisecond (ms).

+

Default value: 1s

+
+

lockwait_timeout

Parameter description: Specifies the longest time to wait before a single lock times out. If the time you wait before acquiring a lock exceeds the specified time, an error is reported.

+

Type: SUSET

+

Value range: an integer ranging from 0 to INT_MAX. The unit is millisecond (ms).

+

Default value: 20 min

+
+

update_lockwait_timeout

Parameter description: sets the maximum duration that a lock waits for concurrent updates on a row to complete when the concurrent update feature is enabled. If the time you wait before acquiring a lock exceeds the specified time, an error is reported.

+

Type: SUSET

+

Value range: an integer ranging from 0 to INT_MAX. The unit is millisecond (ms).

+

Default value: 2min

+
+

max_locks_per_transaction

Parameter description: Controls the average number of object locks allocated for each transaction.

+ +

Type: POSTMASTER

+

Value range: an integer ranging from 10 to INT_MAX

+

Default value: 256

+
+

max_pred_locks_per_transaction

Parameter description: Controls the average number of predicated locks allocated for each transaction.

+ +

Type: POSTMASTER

+

Value range: an integer ranging from 10 to INT_MAX

+

Default value: 64

+
+

partition_lock_upgrade_timeout

Parameter description: Specifies the time to wait before the attempt of a lock upgrade from ExclusiveLock to AccessExclusiveLock times out on partitions.

+ +

Type: USERSET

+

Value range: an integer ranging from -1 to 3000. The unit is second (s).

+

Default value: 1800

+
+

enable_online_ddl_waitlock

Parameter description: Specifies whether to block DDL operations to wait for the release of cluster locks, such as pg_advisory_lock and pgxc_lock_for_backup. This parameter is mainly used in online OM operations and you are not advised to modify the settings.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0929.html b/docs/dws/dev/dws_04_0929.html new file mode 100644 index 00000000..3c668e60 --- /dev/null +++ b/docs/dws/dev/dws_04_0929.html @@ -0,0 +1,18 @@ + + +

Version and Platform Compatibility

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0930.html b/docs/dws/dev/dws_04_0930.html new file mode 100644 index 00000000..75e4aecc --- /dev/null +++ b/docs/dws/dev/dws_04_0930.html @@ -0,0 +1,75 @@ + + +

Compatibility with Earlier Versions

+

This section describes the parameter control of the downward compatibility and external compatibility features of GaussDB(DWS). Backward compatibility of the database system provides support for the application of databases of earlier versions. This section describes parameters used for controlling backward compatibility of a database.

+

array_nulls

Parameter description: Determines whether the array input parser recognizes unquoted NULL as a null array element.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

backslash_quote

Parameter description: Determines whether a single quotation mark can be represented by \' in a string text.

+

Type: USERSET

+

When the string text meets the SQL standards, \ has no other meanings. This parameter only affects the handling of non-standard-conforming string texts, including escape string syntax (E'...').

+
+

Value range: enumerated values

+ +

Default value: safe_encoding

+
+

default_with_oids

Parameter description: Determines whether CREATE TABLE and CREATE TABLE AS include an OID field in newly-created tables if neither WITH OIDS nor WITHOUT OIDS is specified. It also determines whether OIDs will be included in tables created by SELECT INTO.

+

It is not recommended that OIDs be used in user tables. Therefore, this parameter is set to off by default. When OIDs are required for a particular table, WITH OIDS needs to be specified during the table creation.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

escape_string_warning

Parameter description: Specifies a warning on directly using a backslash (\) as an escape in an ordinary character string.

+ +

Type: USERSET

+

Value range: Boolean

+

Default value: on

+
+

lo_compat_privileges

Parameter description: Determines whether to enable backward compatibility for the privilege check of large objects.

+

Type: SUSET

+

Value range: Boolean

+

on indicates that the privilege check is disabled when users read or modify large objects. This setting is compatible with versions earlier than PostgreSQL 9.0.

+

Default value: off

+
+

quote_all_identifiers

Parameter description: When the database generates SQL, this parameter forcibly quotes all identifiers even if they are not keywords. This will affect the output of EXPLAIN as well as the results of functions, such as pg_get_viewdef. For details, see the --quote-all-identifiers parameter of gs_dump.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

sql_inheritance

Parameter description: Determines whether to inherit semantics.

+

Type: USERSET

+

Value range: Boolean

+

off indicates that child tables cannot be accessed by various commands. That is, an ONLY keyword is used by default. This setting is compatible with versions earlier than PostgreSQL 7.1.

+

Default value: on

+
+

standard_conforming_strings

Parameter description: Determines whether ordinary string texts ('...') treat backslashes as ordinary texts as specified in the SQL standard.

+ +

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

synchronize_seqscans

Parameter description: Controls sequential scans of tables to synchronize with each other. Concurrent scans read the same data block about at the same time and share the I/O workload.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_beta_features

Parameter description: Controls whether certain limited features, such as GDS table join, are available. These features are not explicitly prohibited in earlier versions, but are not recommended due to their limitations in certain scenarios.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0931.html b/docs/dws/dev/dws_04_0931.html new file mode 100644 index 00000000..77a85093 --- /dev/null +++ b/docs/dws/dev/dws_04_0931.html @@ -0,0 +1,28 @@ + + +

Platform and Client Compatibility

+

Many platforms use the database system. External compatibility of the database system provides a lot of convenience for platforms.

+

transform_null_equals

Parameter description: Determines whether expressions of the form expr = NULL (or NULL = expr) are treated as expr IS NULL. They return true if expr evaluates to NULL, and false otherwise.

+ +

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+

New users are always confused about the semantics of expressions involving NULL values. Therefore, off is used as the default value.

+
+
+

td_compatible_truncation

Parameter description: Determines whether to enable features compatible with a Teradata database. You can set this parameter to on when connecting to a database compatible with the Teradata database, so that when you perform the INSERT operation, overlong strings are truncated based on the allowed maximum length before being inserted into char- and varchar-type columns in the target table. This ensures all data is inserted into the target table without errors reported.

+
  • The string truncation function cannot be used if the INSERT statement includes a foreign table.
  • If inserting multi-byte character data (such as Chinese characters) to database with the character set byte encoding (SQL_ASCII, LATIN1), and the character data crosses the truncation position, the string is truncated based on its bytes instead of characters. Unexpected result will occur in tail after the truncation. If you want correct truncation result, you are advised to adopt encoding set such as UTF8, which has no character data crossing the truncation position.
+
+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0932.html b/docs/dws/dev/dws_04_0932.html new file mode 100644 index 00000000..9a6858c0 --- /dev/null +++ b/docs/dws/dev/dws_04_0932.html @@ -0,0 +1,49 @@ + + +

Fault Tolerance

+

This section describes parameters used for controlling the methods that the server processes an error occurring in the database system.

+

exit_on_error

Parameter description: Specifies whether to terminate the current session.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

omit_encoding_error

Parameter description: If this parameter is set to on and the client character set of the database is encoded in UTF-8 format, the occurring character encoding conversion errors will be recorded in logs. Additionally, converted characters that have conversion errors will be ignored and replaced with question marks (?).

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

max_query_retry_times

Parameter description: Specifies the maximum number of automatic retry times when an SQL statement error occurs. Currently, a statement can start retrying if the following errors occur: Connection reset by peer, Lock wait timeout, and Connection timed out. If this parameter is set to 0, the retry function is disabled.

+

Type: USERSET

+

Value range: an integer ranging from 0 to 20

+

Default value: 6

+
+

cn_send_buffer_size

Parameter description: Specifies the size of the data buffer used for data transmission on the CN.

+

Type: POSTMASTER

+

Value range: an integer ranging from 8 to 128. The unit is KB.

+

Default value: 8 KB

+
+

max_cn_temp_file_size

Parameter description: Specifies the maximum number of temporary files that can be used by the CN during automatic SQL statement retries. The value 0 indicates that no temporary file is used.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 10485760. The unit is KB.

+

Default value: 5 GB

+
+

retry_ecode_list

Parameter description: Specifies the list of SQL error types that support automatic retry.

+

Type: USERSET

+

Value range: a string

+

Default value: YY001 YY002 YY003 YY004 YY005 YY006 YY007 YY008 YY009 YY010 YY011 YY012 YY013 YY014 YY015 53200 08006 08000 57P01 XX003 XX009 YY016 CG003 CG004 F0011

+
+

data_sync_retry

Parameter description: Specifies whether to keep running the database when updated data fails to be written into disks by using the fsync function. In some OSs, no error is reported even if fsync has failed for multiple times. As a result, data is lost.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0933.html b/docs/dws/dev/dws_04_0933.html new file mode 100644 index 00000000..f862815f --- /dev/null +++ b/docs/dws/dev/dws_04_0933.html @@ -0,0 +1,59 @@ + + +

Connection Pool Parameters

+

When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need to access the database, no new connection is established. Instead, an existing idle connection is selected from the connection pool. After you finish accessing the database, the database does not disable the connection but puts it back into the connection pool. The connection can be used for the next access request.

+

min_pool_size

Parameter description: Specifies the minimum number of connections between a CN's connection pool and another CN/DN.

+

Type: POSTMASTER

+

Value range: an integer ranging from 1 to 65535

+

Default value: 1

+
+

max_pool_size

Parameter description: Specifies the maximum number of connections between a CN's connection pool and another CN/DN.

+

Type: POSTMASTER

+

Value range: an integer ranging from 1 to 65535

+

Default value: 800

+
+

persistent_datanode_connections

Parameter description: Specifies whether to release the connection for the current session.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

max_coordinators

Parameter description: Specifies the maximum number of CNs in a cluster.

+

Type: POSTMASTER

+

Value range: an integer ranging from 2 to 40

+

Default value: 40

+
+

max_datanodes

Parameter description: Specifies the maximum number of DNs in a cluster.

+

Type: POSTMASTER

+

Value range: an integer ranging from 2 to 65535

+

Default value: 4096

+
+

cache_connection

Parameter description: Specifies whether to reclaim the connections of a connection pool.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

enable_force_reuse_connections

Parameter description: Specifies whether a session forcibly reuses a new connection.

+

Type: BACKEND

+

Value range: Boolean

+ +

Default value: off

+

Session connection parameter. Users are not advised to configure this parameter.

+
+
+

enable_pooler_parallel

Parameter description: Specifies whether a CN's connection pool can be connected in parallel mode.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0934.html b/docs/dws/dev/dws_04_0934.html new file mode 100644 index 00000000..b6a88d03 --- /dev/null +++ b/docs/dws/dev/dws_04_0934.html @@ -0,0 +1,80 @@ + + +

Cluster Transaction Parameters

+

This section describes the settings and value ranges of cluster transaction parameters.

+

transaction_isolation

Parameter description: Specifies the isolation level of the current transaction.

+

Type: USERSET

+

Value range:

+ +

Default value: READ COMMITTED

+
+

transaction_read_only

Parameter description: Specifies that the current transaction is a read-only transaction.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off for CNs and on for DNs

+
+

xc_maintenance_mode

Parameter description: Specifies whether the system is in maintenance mode.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+

Enable the maintenance mode with caution to avoid cluster data inconsistencies.

+
+
+

allow_concurrent_tuple_update

Parameter description: Specifies whether to allow concurrent update.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

gtm_backup_barrier

Parameter description: Specifies whether to create a restoration point for the GTM starting point.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

gtm_conn_check_interval

Parameter description: Sets the CN to check whether the connection between the local thread and the primary GTM is normal.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX/1000. The unit is second.

+

Default value: 10s

+
+

transaction_deferrable

Parameter description: Specifies whether to delay the execution of a read-only serial transaction without incurring an execution failure. Assume this parameter is set to on. When the server detects that the tuples read by a read-only transaction are being modified by other transactions, it delays the execution of the read-only transaction until the other transactions finish modifying the tuples. Currently, this parameter is not used in GaussDB(DWS). Similar to this parameter, the default_transaction_deferrable parameter is used to specify whether to allow delayed execution of a transaction.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enforce_two_phase_commit

Parameter description: This parameter is reserved for compatibility with earlier versions. This parameter is invalid in the current version.

+
+

enable_show_any_tuples

Parameter description: This parameter is available only in a read-only transaction and is used for analysis. When this parameter is set to on/true, all versions of tuples in the table are displayed.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

gtm_connect_retries

Parameter description: Specifies the number of GTM reconnection attempts.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 2147483647.

+

Default value: 30

+
+

enable_redistribute

Parameter description: Specifies whether unmatched nodes are redistributed.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_gtm_free

Parameter description: Specifies whether the GTM-FREE mode is enabled. In large concurrency scenarios, the snapshots delivered by the GTM increase in number and size. The network between the GTM and the CN becomes the performance bottleneck. The GTM-FREE mode is used to eliminate the bottleneck. In this mode, the CN communicates with DNs instead of the GTM. The CN sends queries to each DN, which locally generates snapshots and xids, ensuring external write consistency but not external read consistency.

+

You are not advised to set this parameter to on in OLTP or OLAP scenarios where strong read consistency is required. This parameter is invalid for GaussDB(DWS).

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0936.html b/docs/dws/dev/dws_04_0936.html new file mode 100644 index 00000000..3de261d3 --- /dev/null +++ b/docs/dws/dev/dws_04_0936.html @@ -0,0 +1,467 @@ + + +

Developer Operations

+

enable_light_colupdate

Parameter description: Specifies whether to enable the lightweight column-store update.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_fast_query_shipping

Parameter description: Specifies whether to use the distributed framework for a query planner.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_trigger_shipping

Parameter description: Specifies whether the trigger can be pushed to DNs for execution.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_remotejoin

Parameter description: Specifies whether JOIN operation plans can be delivered to DNs for execution.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_remotegroup

Parameter description: Specifies whether the execution plans of GROUP BY and AGGREGATE can be delivered to DNs for execution.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_remotelimit

Parameter description: Specifies whether the execution plan specified in the LIMIT clause can be pushed down to DNs for execution.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_remotesort

Parameter description: Specifies whether the execution plan of the ORDER BY clause can be delivered to DNs for execution.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_join_pseudoconst

Parameter description: Specifies whether joining with the pseudo constant is allowed. A pseudo constant indicates that the variables on both sides of a join are identical to the same constant.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

cost_model_version

Parameter description: Specifies the model used for cost estimation in the application scenario. This parameter affects the distinct estimation of the expression, HashJoin cost model, estimation of the number of rows, distribution key selection during redistribution, and estimation of the number of aggregate rows.

+

Type: USERSET

+

Value range: 0, 1, or 2

+ +

Default value: 1

+
+

debug_assertions

Parameter description: Specifies whether to enable various assertion checks. This parameter assists in debugging. If you are experiencing strange problems or crashes, set this parameter to on to identify programming defects. To use this parameter, the macro USE_ASSERT_CHECKING must be defined (through the configure option --enable-cassert) during the GaussDB(DWS) compilation.

+

Type: USERSET

+

Value range: Boolean

+ +

This parameter is set to on by default if GaussDB(DWS) is compiled with various assertion checks enabled.

+
+

Default value: off

+
+

distribute_test_param

Parameter description: Specifies whether the embedded test stubs for testing the distribution framework take effect. In most cases, developers embed some test stubs in the code during fault injection tests. Each test stub is identified by a unique name. The value of this parameter is a triplet that includes three values: thread level, test stub name, and error level of the injected fault. The three values are separated by commas (,).

+

Type: USERSET

+

Value range: a string indicating the name of any embedded test stub.

+

Default value: -1, default, default

+
+

ignore_checksum_failure

Parameter description: Sets whether to ignore check failures (but still generates an alarm) and continues reading data. This parameter is valid only when enable_crc_check is set to on. Continuing reading data may result in breakdown, damaged data being transferred or hidden, failure of data recovery from remote nodes, or other serious problems. You are not advised to modify the settings.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_colstore

Parameter description: Specifies whether to create a table as a column-store table by default when no storage method is specified. The value for each node must be the same. This parameter is used for tests. Users are not allowed to enable it.

+

Type: SUSET

+

Value range: Boolean

+

Default value: off

+
+

enable_force_vector_engine

Parameter description: Specifies whether to forcibly generate vectorized execution plans for a vectorized execution operator if the operator's child node is a non-vectorized operator. When this parameter is set to on, vectorized execution plans are forcibly generated. When enable_force_vector_engine is enabled, no matter it is a row-store table, column-store table, or hybrid row-column store table, if the plantree does not contain scenarios that do not support vectorization, the vectorized executor is forcibly used.

+

Type: USERSET

+

Value range: Boolean

+

Default value: off

+
+

enable_csqual_pushdown

Parameter description: Specifies whether to deliver filter criteria for a rough check during query.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

explain_dna_file

Parameter description: Specifies the name of a CSV file exported when explain_perf_mode is set to run.

+

Type: USERSET

+

The value of this parameter must be an absolute path plus a file name with the extension .csv.

+
+

Value range: a string

+

Default value: NULL

+
+

explain_perf_mode

Parameter description: Specifies the display format of the explain command.

+

Type: USERSET

+

Value range: normal, pretty, summary, and run

+ +

Default value: pretty

+
+

join_num_distinct

Parameter description: Controls the default distinct value of the join column or expression in application scenarios.

+

Type: USERSET

+

Value range: a double-precision floating point number greater than or equal to -100. Decimals may be truncated when displayed on clients.

+ +

Default value: -20

+
+

qual_num_distinct

Parameter description: Controls the default distinct value of the filter column or expression in application scenarios.

+

Type: USERSET

+

Value range: a double-precision floating point number greater than or equal to -100. Decimals may be truncated when displayed on clients.

+ +

Default value: 200

+
+

trace_notify

Parameter description: Specifies whether to generate a large amount of debugging output for the LISTEN and NOTIFY commands. client_min_messages or log_min_messages must be DEBUG1 or lower so that such output can be recorded in the logs on the client or server separately.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

trace_recovery_messages

Parameter description: Specifies whether to enable logging of recovery-related debugging output. This parameter allows users to overwrite the normal setting of log_min_messages, but only for specific messages. This is intended for use in debugging the standby server.

+

Type: SIGHUP

+

Value range: enumerated values. Valid values include debug5, debug4, debug3, debug2, debug1, and log. For details about the parameter values, see log_min_messages.

+

Default value: log

+
  • log indicates that recovery-related debugging information will not be logged.
  • Except the default value log, each of the other values indicates that recovery-related debugging information at the specified level will also be logged. Common settings of log_min_messages will unconditionally record information into server logs.
+
+
+

trace_sort

Parameter description: Specifies whether to display information about resource usage during sorting operations in logs. This parameter is available only when the macro TRACE_SORT is defined during the GaussDB(DWS) compilation. However, TRACE_SORT is currently defined by default.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

zero_damaged_pages

Parameter description: Specifies whether to detect a damaged page header that causes GaussDB(DWS) to report an error, aborting the current transaction.

+

Type: SUSET

+

Value range: Boolean

+ +
  • Setting this parameter to on causes the system to report a warning, pad the damaged page with zeros, and then continue with subsequent processing. This behavior will damage data, that is, all rows on the damaged page. However, it allows you to bypass the error and retrieve rows from any undamaged pages that are present in the table. Therefore, it is useful for restoring data that is damaged due to a hardware or software error. In most cases, you are not advised to set this parameter to on unless you do not want to restore data from the damaged pages of a table.
  • For a column-store table, the system will skip the entire CU and then continue processing. The supported scenarios include the CRC check failure, magic check failure, and incorrect CU length.
+
+

Default value: off

+
+

string_hash_compatible

Parameter description: Specifies whether to use the same method to calculate char-type hash values and varchar- or text-type hash values. Based on the setting of this parameter, you can determine whether a redistribution is required when a distribution column is converted from a char-type data distribution into a varchar- or text-type data distribution.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Calculation methods differ in the length of input strings used for calculating hash values. (For a char-type hash value, spaces following a string are not counted as the length. For a text- or varchar-type hash value, the spaces are counted.) The hash value affects the calculation result of queries. To avoid query errors, do not modify this parameter during database running once it is set.

+
+

Default value: off

+
+

replication_test

Parameter description: Specifies whether to enable internal testing on the data replication function.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

cost_param

Parameter description: Controls use of different estimation methods in specific customer scenarios, allowing estimated values approximating to onsite values. This parameter can control various methods simultaneously by performing AND (&) operations on the bit for each method. A method is selected if its value is not 0.

+

If cost_param & 1 is not set to 0, an improvement mechanism is selected for calculating a non-equi join selection rate, which is more accurate in estimation of self-join (join between two same tables). In V300R002C00 and later, cost_param & 1=0 is not used. That is, an optimized formula is selected for calculation.

+

When cost_param & 2 is set to a value other than 0, the selection rate is estimated based on multiple filter criteria. The lowest selection rate among all filter criteria, but not the product of the selection rates for two tables under a specific filter criterion, is used as the total selection rate. This method is more accurate when a close correlation exists between the columns to be filtered.

+

When cost_param & 4 is not 0, the selected debugging model is not recommended when the stream node is evaluated.

+

When cost_param & 16 is not 0, the model between fully correlated and fully uncorrelated models is used to calculate the comprehensive selection rate of two or more filtering conditions or join conditions. If there are many filtering conditions, the strongly-correlated model is preferred.

+

Type: USERSET

+

Value range: an integer ranging from 1 to INT_MAX

+

Default value: 16

+
+

convert_string_to_digit

Parameter description: Specifies the implicit conversion priority, which determines whether to preferentially convert strings into numbers.

+

In MySQL-compatible mode, this parameter has no impact.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+

Modify this parameter only when absolutely necessary because the modification will change the rule for converting internal data types and may cause unexpected results.

+
+
+

nls_timestamp_format

Parameter description: Specifies the default timestamp format.

+

Type: USERSET

+

Value range: a string

+

Default value: DD-Mon-YYYY HH:MI:SS.FF AM

+
+

enable_partitionwise

Parameter description: Specifies whether to select an intelligent algorithm for joining partitioned tables.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_partition_dynamic_pruning

Parameter description: Specifies whether dynamic pruning is enabled during partition table scanning.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

max_user_defined_exception

Parameter description: Specifies the maximum number of exceptions. The default value cannot be changed.

+

Type: USERSET

+

Value range: an integer

+

Default value: 1000

+
+

datanode_strong_sync

Parameter description: This parameter no longer takes effect.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

enable_debug_vacuum

Parameter description: Specifies whether to allow output of some VACUUM-related logs for problem locating. This parameter is used only by developers. Common users are advised not to use it.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

enable_global_stats

Parameter description: Specifies the current statistics mode. This parameter is used to compare global statistics generation plans and the statistics generation plans for a single DN. This parameter is used for tests. Users are not allowed to enable it.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_fast_numeric

Parameter description: Specifies whether to enable optimization for numeric data calculation. Calculation of numeric data is time-consuming. Numeric data is converted into int64- or int128-type data to improve numeric data calculation performance.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_row_fast_numeric

Parameter description: Specifies the format in which numeric data in a row-store table is spilled to disks.

+

Type: USERSET

+

Value range: Boolean

+ +

If this parameter is set to on, you are advised to enable enable_force_vector_engine to improve the query performance of large data sets. However, compared with the original format, there is a high probability that the bigint format occupies more disk space. For example, the TPC-H test set occupies about 7% more space (reference value, may vary depending on the environment).

+
+

Default value: off

+
+

rewrite_rule

Parameter description: Specifies the rewriting rule for enabled optional queries. Some query rewriting rules are optional. Enabling them cannot always improve query efficiency. In a specific customer scenario, you can set the query rewriting rules through the GUC parameter to achieve optimal query efficiency.

+

This parameter can control the combination of query rewriting rules, for example, there are multiple rewriting rules: rule1, rule2, rule3, and rule4. To set the parameters, you can perform the following operations:

+
set rewrite_rule=rule1;          --Enable query rewriting rule rule1.
+set rewrite_rule=rule2,rule3;    --Enable query rewriting rules rule2 and rule3.
+set rewrite_rule=none;           --Disable all optional query rewriting rules.
+

Type: USERSET

+

Value range: a string

+ +

Default value: magicset

+
+

enable_compress_spill

Parameter description: Specifies whether to enable the compression function of writing data to a disk.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

analysis_options

Parameter description: Specifies whether to enable function options in the corresponding options to use the corresponding location functions, including data verification and performance statistics. For details, see the options in the value range.

+

Type: USERSET

+

Value range: a string

+ +

Default value: off(ALL), which indicates that no location function is enabled.

+
+

resource_track_log

Parameter description: Specifies the log level of self-diagnosis. Currently, this parameter takes effect only in multi-column statistics.

+

Type: USERSET

+

Value range: a string

+ +

Currently, the two parameter values differ only when there is an alarm about multi-column statistics not collected. If the parameter is set to summary, such an alarm will not be displayed. If it is set to detail, such an alarm will be displayed.

+

Default value: summary

+
+

hll_default_log2m

Parameter description: Specifies the number of buckets for HLL data. The number of buckets affects the precision of distinct values calculated by HLL. The more buckets there are, the smaller the deviation is. The deviation range is as follows: [–1.04/2log2m*1/2, +1.04/2log2m*1/2]

+

Type: USERSET

+

Value range: an integer ranging from 10 to 16

+

Default value: 11

+
+

hll_default_regwidth

Parameter description: Specifies the number of bits in each bucket for HLL data. A larger value indicates more memory occupied by HLL. hll_default_regwidth and hll_default_log2m determine the maximum number of distinct values that can be calculated by HLL. For details, see Table 1.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 5

+

Default value: 5

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Maximum number of calculated distinct values determined by hll_default_log2m and hll_default_regwidth

log2m

+

regwidth = 1

+

regwidth = 2

+

regwidth = 3

+

regwidth = 4

+

regwidth = 5

+

10

+

7.4e+02

+

3.0e+03

+

4.7e+04

+

1.2e+07

+

7.9e+11

+

11

+

1.5e+03

+

5.9e+03

+

9.5e+04

+

2.4e+07

+

1.6e+12

+

12

+

3.0e+03

+

1.2e+04

+

1.9e+05

+

4.8e+07

+

3.2e+12

+

13

+

5.9e+03

+

2.4e+04

+

3.8e+05

+

9.7e+07

+

6.3e+12

+

14

+

1.2e+04

+

4.7e+04

+

7.6e+05

+

1.9e+08

+

1.3e+13

+

15

+

2.4e+04

+

9.5e+04

+

1.5e+06

+

3.9e+08

+

2.5e+13

+
+
+
+

hll_default_expthresh

Parameter description: Specifies the default threshold for switching from the explicit mode to the sparse mode.

+

Type: USERSET

+

Value range: an integer ranging from –1 to 7 –1 indicates the auto mode; 0 indicates that the explicit mode is skipped; a value from 1 to 7 indicates that the mode is switched when the number of distinct values reaches 2hll_default_expthresh.

+

Default value: –1

+
+

hll_default_sparseon

Parameter description: Specifies whether to enable the sparse mode by default.

+

Type: USERSET

+

Valid value: 0 and 1 0 indicates that the sparse mode is disabled by default. 1 indicates that the sparse mode is enabled by default.

+

Default value: 1

+
+

hll_max_sparse

Parameter description: Specifies the size of max_sparse.

+

Type: USERSET

+

Value range: an integer ranging from –1 to INT_MAX

+

Default value: –1

+
+

enable_compress_hll

Parameter description: Specifies whether to enable memory optimization for HLL.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

udf_memory_limit

Parameter description: Controls the maximum physical memory that can be used when each CN or DN executes UDFs.

+

Type: POSTMASTER

+

Value range: an integer. The value range is from 200 x 1024 to the value of max_process_memory and the unit is KB.

+

Default value: 200 MB

+
+

FencedUDFMemoryLimit

Parameter description: Controls the virtual memory used by each fenced udf worker process.

+

Type: USERSET

+

Suggestion: You are not advised to set this parameter. You can set udf_memory_limit instead.

+

Value range: an integer. The unit can be KB, MB, or GB. 0 indicates that the memory is not limited.

+

Default value: 0

+
+

UDFWorkerMemHardLimit

Parameter description: Specifies the maximum value of fencedUDFMemoryLimit.

+

Type: POSTMASTER

+

Suggestion: You are not advised to set this parameter. You can set udf_memory_limit instead.

+

Value range: an integer. The unit can be KB, MB, or GB.

+

Default value: 1 GB

+
+

pljava_vmoptions

Parameter description: Specifies the startup parameters for JVMs used by the PL/Java function.

+

Type: SUSET

+

Value range: a string, supporting:

+ +

If pljava_vmoptions is set to a value beyond the value range, an error will be reported when PL/Java functions are used.

+
+

Default value: empty

+
+

javaudf_disable_feature

Parameter description: Specifies the granularity of Java UDF actions.

+

Type: SIGHUP

+

Value range: a string

+ +

Default value: extdir,hadoop,reflection,loadlibrary,net,socket,security,classloader,access_declared_members

+
+

enable_pbe_optimization

Parameter description: Specifies whether the optimizer optimizes the query plan for statements executed in Parse Bind Execute (PBE) mode.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

enable_light_proxy

Parameter description: Specifies whether the optimizer optimizes the execution of simple queries on CNs.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: on

+
+

checkpoint_flush_after

Parameter description: Specifies the number of consecutive disk pages that the checkpointer writer thread writes before asynchronous flush. In GaussDB(DWS), the size of a disk page is 8 KB.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 256. 0 indicates that the asynchronous flush function is disabled. For example, if the value is 32, the checkpointer thread continuously writes 32 disk pages (that is, 32 x 8 = 256 KB) before asynchronous flush.

+

Default value: 32

+
+

enable_parallel_ddl

Parameter description: Controls whether multiple CNs can concurrently perform DDL operations on the same database object.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

show_acce_estimate_detail

Parameter description: When the GaussDB(DWS) cluster is accelerated (acceleration_with_compute_pool is set to on), specifies whether the EXPLAIN statement displays the evaluation information about execution plan pushdown to computing Node Groups. The evaluation information is generally used by O&M personnel during maintenance, and it may affect the output display of the EXPLAIN statement. Therefore, this parameter is disabled by default. The evaluation information is displayed only if the verbose option of the EXPLAIN statement is enabled.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

support_batch_bind

Parameter description: Specifies whether to batch bind and execute PBE statements through interfaces such as JDBC, ODBC, and Libpq.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

enable_immediate_interrupt

Parameter description: Specifies whether the execution of the current statement or session can be immediately interrupted in the signal processing function.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+

Exercise caution when setting this parameter to on. If the execution of the current statement or session can be immediately interrupted in the signal processing function, the execution of some key processes may be interrupted, causing the failure to release the global lock in the system. It is recommended that this parameter be set to on only during system debugging or fault prevention.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0937.html b/docs/dws/dev/dws_04_0937.html new file mode 100644 index 00000000..e62e793c --- /dev/null +++ b/docs/dws/dev/dws_04_0937.html @@ -0,0 +1,17 @@ + + +

Auditing

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0938.html b/docs/dws/dev/dws_04_0938.html new file mode 100644 index 00000000..f9184087 --- /dev/null +++ b/docs/dws/dev/dws_04_0938.html @@ -0,0 +1,58 @@ + + +

Audit Switch

+

audit_enabled

Parameter description: Specifies whether to enable or disable the audit process. After the audit process is enabled, the auditing information written by the background process can be read from the pipe and written into audit files.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

audit_data_format

Parameter description: Specifies the format of the audit log files. Currently, only the binary format is supported.

+

Type: POSTMASTER

+

Value range: a string

+

Default value: binary

+
+

audit_rotation_interval

Parameter description: Specifies the interval of creating an audit log file. If the difference between the current time and the time when the previous audit log file is created is greater than the value of audit_rotation_interval, a new audit log file will be generated.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to INT_MAX/60. The unit is min.

+

Default value: 1d

+

Adjust this parameter only when required. Otherwise, audit_resource_policy may fail to take effect. To control the storage space and time of audit logs, set the audit_resource_policy, audit_space_limit, and audit_file_remain_time parameters.

+
+
+

audit_rotation_size

Parameter description: Specifies the maximum capacity of an audit log file. If the total number of messages in an audit log exceeds the value of audit_rotation_size, the server will generate a new audit log file.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 1024. The unit is MB.

+

Default value: 10 MB

+

Adjust this parameter only when required. Otherwise, audit_resource_policy may fail to take effect. To control the storage space and time of audit logs, set the audit_resource_policy, audit_space_limit, and audit_file_remain_time parameters.

+
+
+

audit_resource_policy

Parameter description: Specifies the policy for determining whether audit logs are preferentially stored by space or time.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: on

+
+

audit_file_remain_time

Parameter description: Specifies the minimum duration required for recording audit logs. This parameter is valid only when audit_resource_policy is set to off.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 730. The unit is day. 0 indicates that the storage duration is not limited.

+

Default value: 90

+
+

audit_space_limit

Parameter description: Specifies the total disk space occupied by audit files.

+

Type: SIGHUP

+

Value range: an integer ranging from 1024 KB to 1024 GB. The unit is KB.

+

Default value: 1GB

+
+

audit_file_remain_threshold

Parameter description: Specifies the maximum number of audit files in the audit directory.

+

Type: SIGHUP

+

Value range: an integer ranging from 1 to 1048576

+

Default value: 1048576

+

Ensure that the value of this parameter is 1048576. If the value is changed, the audit_resource_policy parameter may not take effect. To control the storage space and time of audit logs, use the audit_resource_policy, audit_space_limit, and audit_file_remain_time parameters.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0940.html b/docs/dws/dev/dws_04_0940.html new file mode 100644 index 00000000..de5090e5 --- /dev/null +++ b/docs/dws/dev/dws_04_0940.html @@ -0,0 +1,226 @@ + + +

Operation Audit

+

audit_operation_exec

Parameter description: Specifies whether to audit successful operations in GaussDB(DWS). Set this parameter as required.

+

Type: SIGHUP

+

Value range: a string

+ +

Default value: login, logout, database_process, user_lock, grant_revoke, set, transaction, and cursor

+
  • You are advised to reserve transaction. Otherwise, statements in a transaction will not be audited.
  • You are advised to reserve cursor. Otherwise, the SELECT statements in a cursor will not be audited. To audit the SELECT statement within transactions and cursors, retain both transaction and cursor audit items.
  • The Data Studio client automatically encapsulates SELECT statements using CURSOR.
+
+
+

audit_operation_error

Parameter description: Specifies whether to audit failed operations in GaussDB(DWS). Set this parameter as required.

+

Type: SIGHUP

+

Value range: a string

+ +

Default value: login

+
+

audit_inner_tool

Parameter description: Specifies whether to audit the operations of the internal maintenance tool in GaussDB(DWS).

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

audit_system_object

Parameter description: Specifies whether to audit the CREATE, DROP, and ALTER operations on the GaussDB(DWS) database object. The GaussDB(DWS) database objects include databases, users, schemas, and tables. The operations on the database object can be audited by changing the value of this parameter.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 4194303

+ +

Value description:

+

The value of this parameter is calculated by 22 binary bits. The 22 binary bits represent 22 types of GaussDB(DWS) database objects. If the corresponding binary bit is set to 0, the CREATE, DROP, and ALTER operations on corresponding database objects are not audited. If it is set to 1, the CREATE, DROP, and ALTER operations are audited. For details about the audit content represented by these 22 binary bits, see Table 1.

+

Default value: 12303

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Meaning of each value for the audit_system_object parameter

Binary Bit

+

Meaning

+

Value Description

+

Bit 0

+

Whether to audit the CREATE, DROP, and ALTER operations on databases.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 1

+

Whether to audit the CREATE, DROP, and ALTER operations on schemas.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 2

+

Whether to audit the CREATE, DROP, and ALTER operations on users.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 3

+

Whether to audit the CREATE, DROP, ALTER, and TRUNCATE operations on tables.

+
  • 0 indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are audited.
+

Bit 4

+

Whether to audit the CREATE, DROP, and ALTER operations on indexes.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 5

+

Whether to audit the CREATE, DROP, and ALTER operations on views.

+
  • 0 indicates that the CREATE and DROP operations on these objects are not audited.
  • 1 indicates that the CREATE and DROP operations on these objects are audited.
+

Bit 6

+

Whether to audit the CREATE, DROP, and ALTER operations on triggers.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 7

+

Whether to audit the CREATE, DROP, and ALTER operations on procedures/functions.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 8

+

Whether to audit the CREATE, DROP, and ALTER operations on tablespaces.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 9

+

Whether to audit the CREATE, DROP, and ALTER operations on resource pools.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 10

+

Whether to audit the CREATE, DROP, and ALTER operations on workloads.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 11

+

Whether to audit the CREATE, DROP, and ALTER operations on SERVER FOR HADOOP objects.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 12

+

Whether to audit the CREATE, DROP, and ALTER operations on data sources.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
+
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 13

+

Whether to audit the CREATE, DROP, and ALTER operations on Node Groups.

+
  • 0 indicates that the CREATE and DROP operations on these objects are not audited.
+
  • 1 indicates that the CREATE and DROP operations on these objects are audited.
+

Bit 14

+

Whether to audit the CREATE, DROP, and ALTER operations on ROW LEVEL SECURITY objects.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on these objects are audited.
+

Bit 15

+

Whether to audit the CREATE, DROP, and ALTER operations on types.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on types are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on types are audited.
+

Bit 16

+

Whether to audit the CREATE, DROP, and ALTER operations on text search objects (configurations and dictionaries)

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on text search objects are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on text search objects are audited.
+

Bit 17

+

Whether to audit the CREATE, DROP, and ALTER operations on directories.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on directories are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on directories are audited.
+

Bit 18

+

Whether to audit the CREATE, DROP, and ALTER operations on workloads.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on types are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on types are audited.
+

Bit 19

+

Whether to audit the CREATE, DROP, and ALTER operations on redaction policies.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on redaction policies are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on redaction policies are audited.
+

Bit 20

+

Whether to audit the CREATE, DROP, and ALTER operations on sequences.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on sequences are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on sequences are audited.
+

Bit 21

+

Whether to audit the CREATE, DROP, and ALTER operations on nodes.

+
  • 0 indicates that the CREATE, DROP, and ALTER operations on nodes are not audited.
  • 1 indicates that the CREATE, DROP, and ALTER operations on nodes are audited.
+
+
+
+

enableSeparationOfDuty

Parameter description: Specifies whether the separation of permissions is enabled.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+

enable_grant_option

Parameter description: Specifies whether the with grant option function can be used in security mode.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

enable_copy_server_files

Parameter description: Specifies whether to enable the permission to copy server files.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: true

+

COPY FROM/TO file requires system administrator permissions. However, if the separation of permissions is enabled, system administrator permissions are different from initial user permissions. In this case, you can use enable_copy_server_file to control the COPY permission of system administrators to prevent escalation of their permissions.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0941.html b/docs/dws/dev/dws_04_0941.html new file mode 100644 index 00000000..01874db8 --- /dev/null +++ b/docs/dws/dev/dws_04_0941.html @@ -0,0 +1,25 @@ + + +

Transaction Monitoring

+

The automatic rollback transaction can be monitored and its statement problems can be located by setting the transaction timeout warning. In addition, the statements with long execution time can also be monitored.

+

transaction_sync_naptime

Parameter description: For data consistency, when the local transaction's status differs from that in the snapshot of the GTM, other transactions will be blocked. You need to wait for a few minutes until the transaction status of the local host is consistent with that of the GTM. The gs_clean tool is automatically triggered for cleansing when the waiting period on the CN exceeds that of transaction_sync_naptime. The tool will shorten the blocking time after it completes the cleansing.

+

Type: USERSET

+

Value range: an integer. The minimum value is 0. The unit is second.

+

Default value: 5s

+

If the value of this parameter is set to 0, gs_clean will not be automatically invoked for the cleansing before the blocking arrives the duration. Instead, the gs_clean tool is invoked by gs_clean_timeout. The default value is 5 minutes.

+
+
+

transaction_sync_timeout

Parameter description: For data consistency, when the local transaction's status differs from that in the snapshot of the GTM, other transactions will be blocked. You need to wait for a few minutes until the transaction status of the local host is consistent with that of the GTM. An exception is reported when the waiting duration on the CN exceeds the value of transaction_sync_timeout. Roll back the transaction to avoid system blocking due to long time of process response failures (for example, sync lock).

+

Type: USERSET

+

Value range: an integer. The minimum value is 0. The unit is second.

+

Default value: 10min

+
+
  • If the value is 0, no error is reported when the blocking times out or the transaction is rolled back.
  • The value of this parameter must be greater than gs_clean_timeout. Otherwise, unnecessary transaction rollback will probably occur due to a block timeout caused by residual transactions that have not been deleted by gs_clean on a DN.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0945.html b/docs/dws/dev/dws_04_0945.html new file mode 100644 index 00000000..c756d4aa --- /dev/null +++ b/docs/dws/dev/dws_04_0945.html @@ -0,0 +1,641 @@ + + +

Miscellaneous Parameters

+

enable_cluster_resize

Parameter description: If an SQL statement involves tables belonging to different groups, you can enable this parameter to push the execution plan of the statement to improve performance.

+

Type: SUSET

+

Value range: Boolean

+ +

Default value: off

+

This parameter is used for internal O&M. Do not set it to on unless absolutely necessary.

+
+
+

cstore_insert_mode

Parameter description: Specifies the storage location of data to be imported to an HDFS table. This parameter is needed for operations that involve data import, such as INSERT, UPDATE, COPY, and VACUUM FULL.

+

Type: USERSET

+

Value range: enumerated values

+ +

Default value: auto

+

You can set other values as the default in the configuration file.

+
+
+

remote_read_mode

Parameter description: When enable_crc_check is set to on and the data read by the primary DN fails the verification, remote_read_mode is used to specify whether to enable remote read and whether to use secure authentication for connection upon the data verification failure. The setting takes effect only after the cluster is restarted.

+

Type: POSTMASTER

+

Value range: off, non_authentication, authentication

+ +

Default value: non_authentication

+
+

enable_upgrade_merge_lock_mode

Parameter description: If this parameter is set to on, the delta merge operation internally increases the lock level, and errors can be avoided when update and delete operations are performed at the same time.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

job_queue_processes

Parameter description: Specifies the number of jobs that can be concurrently executed. This parameter is a postmaster parameter. You can set it using gs_guc, and you need to restart gaussdb to make the setting take effect.

+

Type: POSTMASTER

+

Value range: 0 to 1000

+

Functions:

+ +

After the scheduled task function is enabled, the job_scheduler thread at a scheduled interval polls the pg_jobs system catalog. The scheduled task check is performed every second by default.

+

Too many concurrent tasks consume many system resources, so you need to set the number of concurrent tasks to be processed. If the current number of concurrent tasks reaches job_queue_processes and some of them expire, these tasks will be postponed to the next polling period. Therefore, you are advised to set the polling interval (the interval parameter of the submit interface) based on the execution duration of each task to avoid the problem that tasks in the next polling period cannot be properly processed because overlong task execution time.

+

Note: If the number of parallel jobs is large and the value is too small, these jobs will wait in queues. However, a large parameter value leads to large resource consumption. You are advised to set this parameter to 100 and change it based on the system resource condition.

+

Default value: 10

+
+

ngram_gram_size

Parameter description: Specifies the length of the ngram parser segmentation.

+

Type: USERSET

+

Value range: an integer ranging from 1 to 4

+

Default value: 2

+
+

ngram_grapsymbol_ignore

Parameter description: Specifies whether the ngram parser ignores graphical characters.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

ngram_punctuation_ignore

Parameter description: Specifies whether the ngram parser ignores punctuations.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

zhparser_dict_in_memory

Parameter description: Specifies whether Zhparser adds a dictionary to memory.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: on

+
+

zhparser_multi_duality

Parameter description: Specifies whether Zhparser aggregates segments in long words with duality.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

zhparser_multi_short

Parameter description: Specifies whether Zhparser executes long words composite divide.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

zhparser_multi_zall

Parameter description: Specifies whether Zhparser displays all single words individually.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

zhparser_multi_zmain

Parameter description: Specifies whether Zhparser displays important single words separately.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

zhparser_punctuation_ignore

Parameter description: Specifies whether the Zhparser segmentation result ignores special characters including punctuations (\r and \n will not be ignored).

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: on

+
+

zhparser_seg_with_duality

Parameter description: Specifies whether Zhparser aggregates segments in long words with duality.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

acceleration_with_compute_pool

Parameter description: Specifies whether to use the computing resource pool for acceleration when OBS is queried.

+

Type: USERSET

+

Value range: Boolean

+ +

Default value: off

+
+

behavior_compat_options

Parameter description: Specifies database compatibility behavior. Multiple items are separated by commas (,).

+

Type: USERSET

+

Value range: a string

+

Default value: In upgrade scenarios, the default value of this parameter is the same as that in the cluster before the upgrade. When a new cluster is installed, the default value of this parameter is check_function_conflicts to prevent serious problems caused by incorrect function attributes defined by users.

+
  • Currently, only Table 1 is supported.
  • Multiple items are separated by commas (,), for example, set behavior_compat_options='end_month_calculate,display_leading_zero';
  • strict_concat_functions and strict_text_concat_td are mutually exclusive.
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Compatibility configuration items

Configuration Item

+

Behavior

+

Applicable Compatibility Mode

+

display_leading_zero

+

Specifies how floating point numbers are displayed.

+
  • If this item is not specified, for a decimal number between -1 and 1, the 0 before the decimal point is not displayed. For example, 0.25 is displayed as .25.
  • If this item is specified, for a decimal number between -1 and 1, the 0 before the decimal point is displayed. For example, 0.25 is displayed as 0.25.
+

ORA

+

TD

+

end_month_calculate

+

Specifies the calculation logic of the add_months function.

+

Assume that the two parameters of the add_months function are param1 and param2, and that the sum of param1 and param2 is result.

+
  • If this item is not specified, and the Day of param1 indicates the last day of a month shorter than result, the Day in the calculation result will equal that in param1. For example:
+
1
+2
+3
+4
+5
select add_months('2018-02-28',3) from dual;
+add_months
+---------------------
+2018-05-28 00:00:00
+(1 row)
+
+ +
+
  • If this item is specified, and the Day of param1 indicates the last day of a month shorter than result, the Day in the calculation result will equal that in result. For example:
+
1
+2
+3
+4
+5
select add_months('2018-02-28',3) from dual;
+add_months
+---------------------
+2018-05-31 00:00:00
+(1 row)
+
+ +
+

ORA

+

TD

+

compat_analyze_sample

+

Specifies the sampling behavior of the ANALYZE operation.

+

If this item is specified, the sample collected by the ANALYZE operation will be limited to around 30,000 records, controlling CN memory consumption and maintaining the stability of ANALYZE.

+

ORA

+

TD

+

MySQL

+

bind_schema_tablespace

+

Binds a schema with the tablespace with the same name.

+

If a tablespace name is the same as sche_name, default_tablespace will also be set to sche_name if search_path is set to sche_name.

+

ORA

+

TD

+

MySQL

+

bind_procedure_searchpath

+

Specifies the search path of the database object for which no schema name is specified.

+

If no schema name is specified for a stored procedure, the search is performed in the schema to which the stored procedure belongs.

+

If the stored procedure is not found, the following operations are performed:

+
  • If this item is not specified, the system reports an error and exits.
  • If this item is specified, the search continues based on the settings of search_path. If the issue persists, the system reports an error and exits.
+

ORA

+

TD

+

MySQL

+

correct_to_number

+

Controls the compatibility of the to_number() result.

+

If this item is specified, the result of the to_number() function is the same as that of PG11. Otherwise, the result is the same as that of Oracle.

+

ORA

+

unbind_divide_bound

+

Controls the range check on the result of integer division.

+
  • If this item is not specified, the division result is checked. If the result is out of the range, an error is reported. In the following example, an out-of-range error is reported because the value of INT_MIN/(-1) is greater than the value of INT_MAX.
+
SELECT (-2147483648)::int / (-1)::int;
+ERROR:  integer out of range
+
  • If this item is specified, the range of the division result does not need to be checked. In the following example, INT_MIN/(-1) can be used to obtain the output result INT_MAX+1.
+
SELECT (-2147483648)::int / (-1)::int;
+  ?column?
+------------
+ 2147483648
+(1 row)
+

ORA

+

TD

+

merge_update_multi

+

Performs an update if multiple rows are matched for MERGE INTO.

+

If this item is specified, no error is reported if multiple rows are matched. Otherwise, an error is reported (same as Oracle).

+

ORA

+

TD

+

return_null_string

+

Specifies how to display the empty result (empty string '') of the lpad(), rpad(), repeat(), regexp_split_to_table(), and split_part() functions.

+
  • If this item is not specified, the empty string is displayed as NULL.
+
1
+2
+3
+4
+5
select length(lpad('123',0,'*')) from dual;
+length
+--------
+
+(1 row)
+
+ +
+
  • If this item is specified, the empty string is displayed as single quotation marks ('').
+
1
+2
+3
+4
+5
select length(lpad('123',0,'*')) from dual;
+length
+--------
+0
+(1 row)
+
+ +
+

ORA

+

compat_concat_variadic

+

Specifies the compatibility of variadic results of the concat() and concat_ws() functions.

+

If this item is specified and a concat function has a parameter of the variadic type, different result formats in Oracle and Teradata are retained. If this item is not specified and a concat function has a parameter of the variadic type, the result format of Oracle is retained for both Oracle and Teradata.

+

ORA

+

TD

+

convert_string_digit_to_numeric

+

Specifies the type casting priority for binary BOOL operations on the CHAR type and INT type.

+
  • If this item is not specified, the type casting priority is the same as that of PG9.6.
  • After this item is configured, all binary BOOL operations of the CHAR type and INT type are forcibly converted to the NUMERIC type for computation.

    After this configuration item is set, the CHAR types that are affected include BPCHAR, VARCHAR, NVARCHAR2, and TEXT, and the INT types that are affected include INT1, INT2, INT4, and INT8.

    +
+
CAUTION:

This configuration item is valid only for binary BOOL operation, for example, INT2>TEXT and INT4=BPCHAR. Non-BOOL operation is not affected. This configuration item does not support conversion of UNKNOWN operations such as INT>'1.1'. After this configuration item is enabled, all BOOL operations of the CHAR and INT types are preferred to be converted to the NUMERIC type for computation, which affects the computation performance of the database. When the JOIN column is a combination of affected types, the execution plan is affected.

+
+

ORA

+

TD

+

MySQL

+

check_function_conflicts

+

Controls the check of the custom plpgsql/SQL function attributes.

+
  • If this parameter is not specified, the IMMUTABLE/STABLE/VOLATILE attributes of a custom function are not checked.
  • If this parameter is specified, the IMMUTABLE attribute of a custom function is checked. If the function contains a table or the STABLE/VOLATILE function, an error is reported during the function execution. In a custom function, a table or the STABLE/VOLATILE function conflicts with the IMMUTABLE attribute, thus function behaviors are not IMMUTABLE in this case.
+

For example, when this parameter is specified, an error is reported in the following scenarios:

+
CREATE OR replace FUNCTION sql_immutable (INTEGER)
+RETURNS INTEGER AS 'SELECT a+$1 from shipping_schema.t4 where a=1;'
+LANGUAGE SQL IMMUTABLE
+RETURNS NULL
+ON NULL INPUT;
+select sql_immutable(1);
+ERROR:  IMMUTABLE function cannot contain SQL statements with relation or Non-IMMUTABLE function.
+CONTEXT:  SQL function "sql_immutable" during startup
+referenced column: sql_immutable
+

ORA

+

TD

+

MySQL

+

varray_verification

+

Indicates whether to verify the array length and array type length. Compatible with GaussDB(DWS) versions earlier than 8.1.0.

+

If this parameter is specified, the array length and array type length are not verified.

+
Scenario 1
+CREATE OR REPLACE PROCEDURE varray_verification 
+AS 
+    TYPE org_varray_type IS varray(5) OF VARCHAR2(2); 
+    v_org_varray org_varray_type; 
+BEGIN 
+    v_org_varray(1) := '111'; --If the value exceeds the limit of VARCHAR2(2), the setting will be consistent with that in the historical version and no verification is performed after configuring this option.
+END; 
+/ 
+Scenario 2
+ CREATE OR REPLACE PROCEDURE varray_verification_i3_1 
+AS 
+    TYPE org_varray_type IS varray(2) OF NUMBER(2); 
+    v_org_varray org_varray_type; 
+BEGIN 
+    v_org_varray(3) := 1; --If the value exceeds the limit of varray(2) specified for array length, the setting will be consistent with that in the historical version and no verification is performed after configuring this option.
+END; 
+/ 
+

ORA

+

TD

+

strict_concat_functions

+

Indicates whether the textanycat() and anytextcat() functions are compatible with the return value if there are null parameters. This parameter and strict_text_concat_td are mutually exclusive.

+

In MySQL-compatible mode, this parameter has no impact.

+
  • If this configuration item is not specified, the returned values of the textanycat() and anytextcat() functions are the same as those in the Oracle database.
  • When this configuration item is specified, if there are null parameters in the textanycat() and anytextcat() functions, the returned value is also null. Different result formats in Oracle and Teradata are retained.
+

If this configuration item is not specified, the returned values of the textanycat() and anytextcat() functions are the same as those in the Oracle database.

+
SELECT textanycat('gauss', cast(NULL as BOOLEAN));
+ textanycat
+------------
+ gauss
+(1 row)
+
+SELECT 'gauss' || cast(NULL as BOOLEAN); -- In this case, the || operator is converted to the textanycat function.
+ ?column?
+----------
+ gauss
+(1 row)
+

When setting this configuration item, retain the results that are different from those in Oracle and Teradata:

+
SELECT textanycat('gauss', cast(NULL as BOOLEAN));
+ textanycat
+------------
+
+(1 row)
+
+SELECT 'gauss' || cast(NULL as BOOLEAN); -- In this case, the || operator is converted to the textanycat function.
+ ?column?
+----------
+
+(1 row)
+

ORA

+

TD

+

strict_text_concat_td

+

In Teradata compatible mode, whether the textcat(), textanycat() and anytextcat() functions are compatible with the return value if there are null parameters. This parameter and strict_concat_functions are mutually exclusive.

+
  • If this parameter is not specified, the return values of the textcat(), textanycat(), and anytextcat() functions in Teradata-compatible mode are the same as those in GaussDB(DWS).
  • When this parameter is specified, if the textcat(), textanycat(), and anytextcat() functions contain any null parameter values, the return value is null in the Teradata-compatible mode.
+

If this parameter is not specified, the returned values of the textcat(), textanycat(), and anytextcat() functions are the same as those in the GaussDB(DWS).

+
td_data_compatible_db=# SELECT textcat('abc', NULL);
+textcat
+---------
+abc
+(1 row)
+
td_data_compatible_db=# SELECT 'abc' || NULL; -- In this case, the operator || is converted to the textcat() function.
+?column?
+----------
+abc
+(1 row)
+

When this parameter is specified, NULL is returned if any of the textcat(), textanycat(), and anytextcat() functions returns a null value.

+
td_data_compatible_db=# SELECT textcat('abc', NULL);
+textcat
+---------
+
+(1 row)
+
td_data_compatible_db=# SELECT 'abc' || NULL;
+?column?
+----------
+
+(1 row)
+

TD

+

compat_display_ref_table

+

Sets the column display format in the view.

+
  • If this parameter is not specified, the prefix is used by default, in the tab.col format.
  • Specify this parameter to the same original definition. It is displayed only when the original definition contains a prefix.
+
SET behavior_compat_options='compat_display_ref_table';
+CREATE OR REPLACE VIEW viewtest2 AS SELECT a.c1, c2, a.c3, 0 AS c4 FROM viewtest_tbl a;
+SELECT pg_get_viewdef('viewtest2');
+pg_get_viewdef
+-----------------------------------------------------
+SELECT a.c1, c2, a.c3, 0 AS c4 FROM viewtest_tbl a;
+(1 row)
+

ORA

+

TD

+

para_support_set_func

+

Whether the input parameters of the COALESCE(), NVL(), GREATEST(), and LEAST() functions in a column-store table support multiple result set expressions.

+
  • If this item is not specified and the input parameter contains multiple result set expressions, an error is reported, indicating that the function is not supported.
+
1
+2
SELECT COALESCE(regexp_split_to_table(c3,'#'), regexp_split_to_table(c3,'#')) FROM regexp_ext2_tb1 ORDER BY 1 LIMIT 5;
+ERROR:  set-valued function called in context that cannot accept a set
+
+ +
+
  • When this configuration item is specified, the function input parameter can contain multiple result set expressions.
+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT COALESCE(regexp_split_to_table(c3,'#'), regexp_split_to_table(c3,'#')) FROM regexp_ext2_tb1 ORDER BY 1 LIMIT 5;
+ coalesce
+----------
+ a
+ a
+ a
+ a
+ a
+(5 rows)
+
+ +
+

ORA

+

TD

+

disable_select_truncate_parallel

+

Controls the DDL lock level such as TRUNCATE in a partitioned table.

+
  • If this item is specified, the concurrent execution of TRUNCATE and DML operations (such as SELECT) on different partitions is forbidden, and the fast query shipping (FQS) of the SELECT operation on the partitioned table is allowed. You can set this parameter in the OLTP database, where there are many simple queries on partitioned tables, and there is no requirement for concurrent TRUNCATE and DML operations on different partitions.
  • If this item is not specified, SELECT and TRUNCATE operations can be concurrently performed on different partitions in a partitioned table, and the FQS of the partitioned table is disabled to avoid possible inconsistency.
+

ORA

+

TD

+

MySQL

+

bpchar_text_without_rtrim

+

In Teradata-compatible mode, controls the space to be retained on the right during the character conversion from bpchar to text. If the actual length is less than the length specified by bpchar, spaces are added to the value to be compatible with the Teradata style of the bpchar character string.

+

Currently, ignoring spaces at the end of a string for comparison is not supported. If the concatenated string contains spaces at the end, the comparison is space-sensitive.

+

The following is an example:

+
td_compatibility_basic_db=# select length('a'::char(10)::text);
+length
+--------
+10
+(1 row)
+
+td_compatibility_basic_db=# select length('a'||'a'::char(10));
+length
+--------
+11
+(1 row)
+

TD

+

convert_empty_str_to_null_td

+

In Teradata-compatible mode, controls the to_date, to_timestamp, and to_number type conversion functions to return null when they encounter empty strings, and controls the format of the return value when the to_char function encounters an input parameter of the date type.

+

Example:

+

If this parameter is not specified:

+
td_compatibility_db=# select to_number('');
+ to_number 
+-----------
+         0
+(1 row)
+
+td_compatibility_db=# select to_date('');
+ERROR:  the format is not correct
+DETAIL:  invalid date length "0", must between 8 and 10.
+CONTEXT:  referenced column: to_date
+
+td_compatibility_db=# select to_timestamp('');
+      to_timestamp
+------------------------
+ 0001-01-01 00:00:00 BC
+(1 row)
+
+td_compatibility_db=# select to_char(date '2020-11-16');
+        to_char         
+------------------------
+ 2020-11-16 00:00:00+08
+(1 row)
+

If this parameter is specified, and parameters of to_number, to_date, and to_timestamp functions contain empty strings:

+
td_compatibility_db=# select to_number('');
+ to_number 
+-----------
+
+(1 row)
+
+td_compatibility_db=# select to_date('');
+ to_date 
+---------
+
+(1 row)
+
+td_compatibility_db=# select to_timestamp('');
+ to_timestamp
+--------------
+
+(1 row)
+
+td_compatibility_db=# select to_char(date '2020-11-16');
+  to_char   
+------------
+ 2020/11/16
+(1 row)
+

TD

+

disable_case_specific

+

Determines whether to ignore case sensitivity during character type match. This parameter is valid only in Teradata-compatible mode.

+
  • If this item is not specified, characters are case sensitive during character type match.
  • If this item is specified, characters are case insensitive during character type match.
+

After being specified, this item will affect five character types (CHAR, TEXT, BPCHAR, VARCHAR, and NVARCHAR), 12 operators (<, >, =, >=, <=, !=, <>, !=, like, not like, in, and not in), and expressions case when and decode.

+
CAUTION:

After this item is enabled, the UPPER function is added before the character type, which affects the estimation logic. Therefore, an enhanced estimation model is required. (Suggested settings: cost_param=16, cost_model_version = 1, join_num_distinct=-20, and qual_num_distinct=200)

+
+

TD

+

enable_interval_to_text

+

Controls the implicit conversion from the interval type to the text type.

+
  • When this option is enabled, the implicit conversion from the interval type to the text type is supported.
    SELECT TO_DATE('20200923', 'yyyymmdd') - TO_DATE('20200920', 'yyyymmdd') = '3'::text;
    +?column?
    +----------
    +f
    +(1 row)
    +
  • When this option is disabled, the implicit conversion from the interval type to the text type is not supported.
    SELECT TO_DATE('20200923', 'yyyymmdd') - TO_DATE('20200920', 'yyyymmdd') = '3'::text;
    +?column?
    +----------
    +t
    +(1 row)
    +
+

ORA

+

TD

+

MySQL

+

light_object_mtime

+

Specifies whether the mtime column in the pg_object system catalog records object operations.

+
  • If this parameter is configured, the GRANT, REVOKE, and TRUNCATE operations are not recorded by mtime, that is, the mtime column is not updated.
  • If this parameter is not configured (by default), the ALTER, COMMENT, GRANT, REVOKE, and TRUNCATE operations are recorded by mtime, that is, the mtime column is updated.
+

ORA

+

TD

+

MySQL

+
+
+
+

table_skewness_warning_threshold

Parameter description: Specifies the threshold for triggering a table skew alarm.

+

Type: SUSET

+

Value range: a floating point number ranging from 0 to 1

+

Default value: 1

+
+

table_skewness_warning_rows

Parameter description: Specifies the minimum number of rows for triggering a table skew alarm.

+

Type: SUSET

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 100000

+
+

max_cache_partition_num

Parameter description: Specifies the number of memory-saving partitions in column-store mode during redistribution after scale-out. If the number of partitions exceeds the upper limit, the earliest cached partition is directly written to the column-store file.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to 32767.

+ +

Default value: 0

+

This parameter is used for redistribution during scale-out. A proper value can reduce the memory consumption during redistribution of a partitioned column-store table. However, tables with unbalanced data distribution in some partitions may generate a large number of small CUs after the redistribution. If there are a large number of small CUs, execute the VACUUM FULL statement to merge them.

+
+
+

enable_prevent_job_task_startup

Parameter description: Specifies whether to prevent the thread startup of scheduled jobs. This is an internal parameter. You are not advised to change the value of this parameter.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+

Set this parameter only on CNs.

+
+
+

auto_process_residualfile

Parameter description: Specifies whether to enable the residual file recording function.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

enable_view_update

Parameter description: Enables the view update function or not.

+

Type: POSTMASTER

+

Value range: Boolean

+ +

Default value: off

+
+

view_independent

Parameter description: Decouples views from tables, functions, and synonyms or not. After the base table is restored, automatic association and re-creation are supported.

+

Type: SIGHUP

+

Value range: Boolean

+ +

Default value: off

+
+

bulkload_report_threshold

Parameter description: Sets the threshold for reporting import and export statistics.

+

Type: SIGHUP

+

Value range: an integer ranging from 0 to INT_MAX

+

Default value: 50

+
+

assign_abort_xid

Parameter description: Determines the transaction to be aborted based on the specified XID in a query.

+

Type: USERSET

+

Value range: a character string with the specified XID

+

This parameter is used only for quick restoration if a user deletes data by mistake (DELETE operation). Do not use this parameter in other scenarios. Otherwise, visible transaction errors may occur.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0946.html b/docs/dws/dev/dws_04_0946.html new file mode 100644 index 00000000..bce4f38e --- /dev/null +++ b/docs/dws/dev/dws_04_0946.html @@ -0,0 +1,793 @@ + + +

Glossary

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Term

+

Description

+

A – E

+

ACID

+

Atomicity, Consistency, Isolation, and Durability (ACID). These are a set of properties of database transactions in a DBMS.

+

cluster ring

+

A cluster ring consists of several physical servers. The primary-standby-secondary relationships among its DNs do not involve external DNs. That is, none of the primary, standby, or secondary counterparts of DNs belonging to the ring are deployed in other rings. A ring is the smallest unit used for scaling.

+

Bgwriter

+

A background write thread created when the database starts. The thread pushes dirty pages in the database to a permanent device (such as a disk).

+

bit

+

The smallest unit of information handled by a computer. One bit is expressed as a 1 or a 0 in a binary numeral, or as a true or a false logical condition. A bit is physically represented by an element such as high or low voltage at one point in a circuit, or a small spot on a disk that is magnetized in one way or the other. A single bit conveys little information a human would consider meaningful. A group of eight bits, however, makes up a byte, which can be used to represent many types of information, such as a letter of the alphabet, a decimal digit, or other character.

+

Bloom filter

+

Bloom filter is a space-efficient binary vectorized data structure, conceived by Burton Howard Bloom in 1970, that is used to test whether an element is a member of a set. False positive matches are possible, but false negatives are not, in other words, a query returns either "possibly in set (possible error)" or "definitely not in set". In the cases, Bloom filter sacrificed the accuracy for time and space.

+

CCN

+

The Central Coordinator (CCN) is a node responsible for determining, queuing, and scheduling complex operations in each CN to enable the dynamic load management of GaussDB(DWS).

+

CIDR

+

Classless Inter-Domain Routing (CIDR). CIDR abandons the traditional class-based (class A: 8; class B: 16; and class C: 24) address allocation mode and allows the use of address prefixes of any length, effectively improving the utilization of address space. A CIDR address is in the format of IP address/Number of bits in a network ID. For example, in 192.168.23.35/21, 21 indicates that the first 21 bits are the network prefix and others are the host ID.

+

Cgroups

+

A control group (Cgroup), also called a priority group (PG) in GaussDB(DWS). The Cgroup is a kernel feature of SUSE Linux and Red Hat that can limit, account for, and isolate the resource usage of a collection of processes.

+

CLI

+

Command-line interface (CLI). Users use the CLI to interact with applications. Its input and output are based on texts. Commands are entered through keyboards or similar devices and are compiled and executed by applications. The results are displayed in text or graphic forms on the terminal interface.

+

CM

+

Cluster Manager (CM) manages and monitors the running status of functional units and physical resources in the distributed system, ensuring stable running of the entire system.

+

CMS

+

The Cluster Management Service (CMS) component manages the cluster status.

+

CN

+

The Coordinator (CN) stores database metadata, splits query tasks and supports their execution, and aggregates the query results returned from DNs.

+

CU

+

Compression Unit (CU) is the smallest storage unit in a column-storage table.

+

core file

+

A file that is created when memory overwriting, assertion failures, or access to invalid memory occurs in a process, causing it to fail. This file is then used for further analysis.

+

A core file contains a memory dump, in an all-binary and port-specific format. The name of a core file consists of the word "core" and the OS process ID.

+

The core file is available regardless of the type of platform.

+

core dump

+

When a program stops abnormally, the core dump, memory dump, or system dump records the state of the working memory of the program at that point in time. In practice, other key pieces of program state are usually dumped at the same time, including the processor registers, which may include the program counter and stack pointer, memory management information, and other processor and OS flags and information. A core dump is often used to assist diagnosis and computer program debugging.

+

DBA

+

A database administrator (DBA) instructs or executes database maintenance operations.

+

DBLINK

+

An object defining the path from one database to another. A remote database object can be queried with DBLINK.

+

DBMS

+

Database Management System (DBMS) is a piece of system management software that allows users to access information in a database. This is a collection of programs that allows you to access, manage, and query data in a database. A DBMS can be classified as memory DBMS or disk DBMS based on the location of the data.

+

DCL

+

Data control language (DCL)

+

DDL

+

Data definition language (DDL)

+

DML

+

Data manipulation language (DML)

+

DN

+

Datanode performs table data storage and query operations.

+

ETCD

+

The Editable Text Configuration Daemon (ETCD) is a distributed key-value storage system used for configuration sharing and service discovery (registration and search).

+

ETL

+

Extract-Transform-Load (ETL) refers to the process of data transmission from the source to the target database.

+

Extension Connector

+

Extension Connector is provided by GaussDB(DWS) to process data across clusters. It can send SQL statements to Spark, and can return execution results to your database.

+

Backup

+

A backup, or the process of backing up, refers to the copying and archiving of computer data in case of data loss.

+

backup and restoration

+

A collection of concepts, procedures, and strategies to protect data loss caused by invalid media or misoperations.

+

standby server

+

A node in the GaussDB(DWS) HA solution. It functions as a backup of the primary server. If the primary server is behaving abnormally, the standby server is promoted to primary, ensuring data service continuity.

+

crash

+

A crash (or system crash) is an event in which a computer or a program (such as a software application or an OS) ceases to function properly. Often the program will exit after encountering this type of error. Sometimes the offending program may appear to freeze or hang until a crash reporting service documents details of the crash. If the program is a critical part of the OS kernel, the entire computer may crash (possibly resulting in a fatal system error).

+

encoding

+

Encoding is representing data and information using code so that it can be processed and analyzed by a computer. Characters, digits, and other objects can be converted into digital code, or information and data can be converted into the required electrical pulse signals based on predefined rules.

+

encoding technology

+

A technology that presents data using a specific set of characters, which can be identified by computer hardware and software.

+

table

+

A set of columns and rows. Each column is referred to as a field. The value in each field represents a data type. For example, if a table contains people's names, cities, and states, it has three columns: Name, City, and State. In every row in the table, the Name column contains a name, the City column contains a city, and the State column contains a state.

+

tablespace

+

A tablespace is a logical storage structure that contains tables, indexes, large objects, and long data. A tablespace provides an abstract layer between physical data and logical data, and provides storage space for all database objects. When you create a table, you can specify which tablespace it belongs to.

+

concurrency control

+

A DBMS service that ensures data integrity when multiple transactions are concurrently executed in a multi-user environment. In a multi-threaded environment, GaussDB(DWS) concurrency control ensures that database operations are safe and all database transactions remain consistent at any given time.

+

query

+

Specifies requests sent to the database, such as updating, modifying, querying, or deleting information.

+

query operator

+

An iterator or a query tree node, which is a basic unit for the execution of a query. Execution of a query can be split into one or more query operators. Common query operators include scan, join, and aggregation.

+

query fragment

+

Each query task can be split into one or more query fragments. Each query fragment consists of one or more query operators and can independently run on a node. Query fragments exchange data through data flow operators.

+

durability

+

One of the ACID features of database transactions. Durability indicates that transactions that have been committed will permanently survive and not be rolled back.

+

stored procedure

+

A group of SQL statements compiled into a single execution plan and stored in a large database system. Users can specify a name and parameters (if any) for a stored procedure to execute the procedure.

+

OS

+

An operating system (OS) is loaded by a bootstrap program to a computer to manage other programs in the computer. applications on a computer or similar device.

+

secondary server

+

To ensure high cluster availability, the primary server synchronizes logs to the secondary server if data synchronization between the primary and standby servers fails. If the primary server suddenly breaks down, the standby server is promoted to primary and synchronizes logs from the secondary server for the duration of the breakdown.

+

BLOB

+

Binary large object (BLOB) is a collection of binary data stored in a database, such as videos, audio, and images.

+

dynamic load balancing

+

In GaussDB(DWS), dynamic load balancing automatically adjusts the number of concurrent jobs based on the usage of CPU, I/O, and memory to avoid service errors and to prevent the system from stop responding due to system overload.

+

segment

+

A segment in the database indicates a part containing one or more regions. Region is the smallest range of a database and consists of data blocks. One or more segments comprise a tablespace.

+

F – J

+

failover

+

Automatic switchover from a faulty node to its standby node. Reversely, automatic switchback from the standby node to the primary node is called failback.

+

FDW

+

A foreign data wrapper (FDW) is a SQL interface provided by Postgres. It is used to access big data objects stored in remote data so that DBAs can integrate data from unrelated data sources and store them in public schema in the database.

+

freeze

+

An operation automatically performed by the AutoVacuum Worker process when transaction IDs are exhausted. GaussDB(DWS) records transaction IDs in row headings. When a transaction reads a row, the transaction ID in the row heading and the actual transaction ID are compared to determine whether this row is explicit. Transaction IDs are integers containing no symbols. If exhausted, transaction IDs are re-calculated outside of the integer range, causing the explicit rows to become implicit. To prevent such a problem, the freeze operation marks a transaction ID as a special ID. Rows marked with these special transaction IDs are explicit to all transactions.

+

GDB

+

As a GNU debugger, GDB allows you to see what is going on 'inside' another program while it executes or what another program was doing the moment that it crashed. GDB can perform four main kinds of things (make PDK functions stronger) to help you catch bugs in the act:

+
  • Starts your program, specifying anything that might affect its behavior.
+
  • Stops a program in a specific condition.
+
  • Checks what happens when a program stops.
+
  • Changes things in your program, so you can experiment with correcting the effects of one bug and go on to learn about another.
+

GDS

+

General Data Service (GDS). To import data to GaussDB(DWS), you need to deploy the tool on the server where the source data is stored so that DNs can use this tool to obtain data.

+

GIN index

+

Generalized inverted index (GIN) is used for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to search for element values that appear within the composite items.

+

GNU

+

The GNU Project was publicly announced on September 27, 1983 by Richard Stallman, aiming at building an OS composed wholly of free software. GNU is a recursive acronym for "GNU's Not Unix!". Stallman announced that GNU should be pronounced as Guh-NOO. Technically, GNU is similar to Unix in design, a widely used commercial OS. However, GNU is free software and contains no Unix code.

+

gsql

+

GaussDB(DWS) interaction terminal. It enables you to interactively type in queries, issue them to GaussDB(DWS), and view the query results. Queries can also be entered from files. gsql supports many meta commands and shell-like commands, allowing you to conveniently compile scripts and automate tasks.

+

GTM

+

Global Transaction Manager (GTM) manages the status of transactions.

+

GUC

+

Grand unified configuration (GUC) includes parameters for running databases, the values of which determine database system behavior.

+

HA

+

High availability (HA) is a solution in which two modules operate in primary/standby mode to achieve high availability. This solution helps to minimize the duration of service interruptions caused by routine maintenance (planned) or sudden system breakdowns (unplanned), improving the system and application usability.

+

HBA

+

Host-based authentication (HBA) allows hosts to authenticate on behalf of all or some of the system users. It can apply to all users on a system or a subset using the Match directive. This type of authentication can be useful for managing computing clusters and other fairly homogenous pools of machines. In all, three files on the server and one on the client must be modified to prepare for host-based authentication.

+

HDFS

+

Hadoop Distributed File System (HDFS) is a subproject of Apache Hadoop. HDFS is highly fault tolerant and is designed to run on low-end hardware. The HDFS provides high-throughput access to large data sets and is ideal for applications having large data sets.

+

server

+

A combination of hardware and software designed for providing clients with services. This word alone refers to the computer running the server OS, or the software or dedicated hardware providing services.

+

advanced package

+

Logical and functional stored procedures and functions provided by GaussDB(DWS).

+

isolation

+

One of the ACID features of database transactions. Isolation means that the operations inside a transaction and data used are isolated from other concurrent transactions. The concurrent transactions do not affect each other.

+

relational database

+

A database created using a relational model. It processes data using methods of set algebra.

+

archive thread

+

A thread started when the archive function is enabled on a database. The thread archives database logs to a specified path.

+

failover

+

The automatic substitution of a functionally equivalent system component for a failed one. The system component can be a processor, server, network, or database.

+

environment variable

+

An environment variable defines the part of the environment in which a process runs. For example, it can define the part of the environment as the main directory, command search path, terminal that is in use, or the current time zone.

+

checkpoint

+

A mechanism that stores data in the database memory to disks at a certain time. GaussDB(DWS) periodically stores the data of committed and uncommitted transactions to disks. The data and redo logs can be used for database restoration if a database restarts or breaks down.

+

encryption

+

A function hiding information content during data transmission to prevent the unauthorized use of the information.

+

node

+

Cluster nodes (or nodes) are physical and virtual severs that make up the GaussDB(DWS) cluster environment.

+

error correction

+

A technique that automatically detects and corrects errors in software and data streams to improve system stability and reliability.

+

process

+

An instance of a computer program that is being executed. A process may be made up of multiple threads of execution. Other processes cannot use a thread occupied by the process.

+

PITR

+

Point-In-Time Recovery (PITR) is a backup and restoration feature of GaussDB(DWS). Data can be restored to a specified point in time if backup data and WAL logs are normal.

+

record

+

In a relational database, a record corresponds to data in each row of a table.

+

cluster

+

A cluster is an independent system consisting of servers and other resources, ensuring high availability. In certain conditions, clusters can implement load balancing and concurrent processing of transactions.

+

K – O

+

LLVM

+

LLVM is short for Low Level Virtual Machine. Low Level Virtual Machine (LLVM) is a compiler framework written in C++ and is designed to optimize the compile-time, link-time, run-time, and idle-time of programs that are written in arbitrary programming languages. It is open to developers and compatible with existing scripts.

+

GaussDB(DWS) LLVM dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function invocation, and by making local data more accurate during actual queries.

+

LVS

+

Linux Virtual Server (LVS), a virtual server cluster system, is used for balancing the load of a cluster.

+

MPP

+

Massive Parallel Processing (MPP) refers to cluster architecture that consists of multiple machines. The architecture is also called a cluster system.

+

MVCC

+

Multi-Version Concurrency Control (MVCC) is a protocol that allows a tuple to have multiple versions, on which different query operations can be performed. A basic advantage is that read and write operations do not conflict.

+

NameNode

+

The NameNode is the centerpiece of a Hadoop file system, managing the namespace of the file system and client access to files.

+

OLAP

+

Online analytical processing (OLAP) is the most important application in the database warehouse system. It is dedicated to complex analytical operations, helps decision makers and executives to make decisions, and rapidly and flexibly processes complex queries involving a great amount of data based on analysts' requirements. In addition, the OLAP provides decision makers with query results that are easy to understand, allowing them to learn the operating status of the enterprise. These decision makers can then produce informed and accurate solutions based on the query results.

+

OM

+

Operations Management (OM) provides management interfaces and tools for routine maintenance and configuration management of the cluster.

+

ORC

+

Optimized Row Columnar (ORC) is a widely used file format for structured data in a Hadoop system. It was introduced from the Hadoop HIVE project.

+

client

+

A computer or program that accesses or requests services from another computer or program.

+

free space management

+

A mechanism for managing free space in a table. This mechanism enables the database system to record free space in each table and establish an easy-to-search data structure, accelerating operations (such as INSERT) performed on the free space.

+

cross-cluster

+

In GaussDB(DWS), users can access data in other DBMS through foreign tables or using an Extension Connector. Such access is cross-cluster.

+

junk tuple

+

A tuple that is deleted using the DELETE and UPDATE statements. When deleting a tuple, GaussDB(DWS) only marks the tuples that are to be cleared. The Vacuum thread will then periodically clear these junk tuples.

+

column

+

An equivalent concept of "field". A database table consists of one or more columns. Together they describe all attributes of a record in the table.

+

logical node

+

Multiple logical nodes can be installed on the same node. A logical node is a database instance.

+

schema

+

Collection of database objects, including logical structures, such as tables, views, sequences, stored procedures, synonyms, indexes, clusters, and database links.

+

schema file

+

A SQL file that determines the database structure.

+

P – T

+

Page

+

Minimum memory unit for row storage in the GaussDB(DWS) relational object structure. The default size of a page is 8 KB.

+

PostgreSQL

+

An open-source DBMS developed by volunteers all over the world. PostgreSQL is not controlled by any companies or individuals. Its source code can be used for free.

+

Postgres-XC

+

Postgres-XC is an open source PostgreSQL cluster to provide write-scalable, synchronous, multi-master PostgreSQL cluster solution.

+

Postmaster

+

A thread started when the database service is started. It listens to connection requests from other nodes in the cluster or from clients.

+

After receiving and accepting a connection request from the standby server, the primary server creates a WAL Sender thread to interact with the standby server.

+

RHEL

+

Red Hat Enterprise Linux (RHEL)

+

redo log

+

A log that contains information required for performing an operation again in a database. If a database is faulty, redo logs can be used to restore the database to its original state.

+

SCTP

+

The Stream Control Transmission Protocol (SCTP) is a transport-layer protocol defined by Internet Engineering Task Force (IETF) in 2000. The protocol ensures the reliability of datagram transport based on unreliable service transmission protocols by transferring SCN narrowband signaling over IP network.

+

savepoint

+

A savepoint marks the end of a sub-transaction (also known as a nested transaction) in a relational DBMS. The process of a long transaction can be divided into several parts. After a part is successfully executed, a savepoint will be created. If later execution fails, the transaction will be rolled back to the savepoint instead of being totally rolled back. This is helpful for recovering database applications from complicated errors. If an error occurs in a multi-statement transaction, the application can possibly recover by rolling back to the save point without terminating the entire transaction.

+

session

+

A task created by a database for a connection when an application attempts to connect to the database. Sessions are managed by the session manager. They execute initial tasks to perform all user operations.

+

shared-nothing architecture

+

A distributed computing architecture, in which none of the nodes share CPUs or storage resources. This architecture has good scalability.

+

SLES

+

SUSE Linux Enterprise Server (SLES) is an enterprise Linux OS provided by SUSE.

+

SMP

+

Symmetric multiprocessing (SMP) lets multiple CPUs run on a computer and share the same memory and bus. To ensure an SMP system achieves high performance, an OS must support multi-tasking and multi-thread processing. In databases, SMP means to concurrently execute queries using the multi-thread technology, efficiently using all CPU resources and improving query performance.

+

SQL

+

Structure Query Language (SQL) is a standard database query language. It consists of DDL, DML, and DCL.

+

SSL

+

Secure Socket Layer (SSL) is a network security protocol introduced by Netscape. SSL is a security protocol based on the TCP and IP communications protocols and uses the public key technology. SSL supports a wide range of networks and provides three basic security services, all of which use the public key technology. SSL ensures the security of service communication through the network by establishing a secure connection between the client and server and then sending data through this connection.

+

convergence ratio

+

Downlink to uplink bandwidth ratio of a switch. A high convergence ratio indicates a highly converged traffic environment and severe packet loss.

+

TCP

+

Transmission Control Protocol (TCP) sends and receives data through the IP protocol. It splits data into packets for sending, and checks and reassembles received package to obtain original information. TCP is a connection-oriented, reliable protocol that ensures information correctness in transmission.

+

trace

+

A way of logging to record information about the way a program is executed. This information is typically used by programmers for debugging purposes. System administrators and technical support can diagnose common problems by using software monitoring tools and based on this information.

+

full backup

+

Backup of the entire database cluster.

+

full synchronization

+

A data synchronization mechanism specified in the GaussDB(DWS) HA solution. Used to synchronize all data from the primary server to a standby server.

+

Log File

+

A file to which a computer system writes a record of its activities.

+

transaction

+

A logical unit of work performed within a DBMS against a database. A transaction consists of a limited database operation sequence, and must have ACID features.

+

data

+

A representation of facts or directives for manual or automatic communication, explanation, or processing. Data includes constants, variables, arrays, and strings.

+

data redistribution

+

A process whereby a data table is redistributed among nodes after users change the data distribution mode.

+

data distribution

+

A mode in which table data is split and stored on each database instance in a distributed system. Table data can be distributed in hash, replication, or random mode. In hash mode, a hash value is calculated based on the value of a specified column in a tuple, and then the target storage location of the tuple is determined based on the mapping between nodes and hash values. In replication mode, tuples are replicated to all nodes. In random mode, data is randomly distributed to the nodes.

+

data partitioning

+

A division of a logical database or its constituent elements into multiple parts (partitions) whose data does not overlap based on specified ranges. Data is mapped to storage locations based on the value ranges of specific columns in a tuple.

+

Database Name

+

A collection of data that is stored together and can be accessed, managed, and updated. Data in a view in the database can be classified into the following types: numerals, full text, digits, and images.

+

DB instance

+

A database instance consists of a process in GaussDB(DWS) and files controlled by the process. GaussDB(DWS) installs multiple database instances on one physical node. GTM, CM, CN, and DN installed on cluster nodes are all database instances. A database instance is also called a logical node.

+

database HA

+

GaussDB(DWS) provides a highly reliable HA solution. Every logical node in GaussDB(DWS) is identified as a primary or standby node. Only one GaussDB(DWS) node is identified as primary at a time. When the HA system is deployed for the first time, the primary server synchronizes all data from each standby server (full synchronization). The HA system then synchronizes only data that is new or has been modified from each standby server (incremental synchronization). When the HA system is running, the primary server can receive data read and write operation requests and the standby servers only synchronize logs.

+

database file

+

A binary file that stores user data and the data inside the database system.

+

data flow operator

+

An operator that exchanges data among query fragments. By their input/output relationships, data flows can be categorized into Gather flows, Broadcast flows, and Redistribution flows. Gather combines multiple query fragments of data into one. Broadcast forwards the data of one query fragment to multiple query fragments. Redistribution reorganizes the data of multiple query fragments and then redistributes the reorganized data to multiple query fragments.

+

data dictionary

+

A reserved table within a database which is used to store information about the database itself. The information includes database design information, stored procedure information, user rights, user statistics, database process information, database increase statistics, and database performance statistics.

+

deadlock

+

Unresolved contention for the use of resources.

+

index

+

An ordered data structure in the database management system. An index accelerates querying and the updating of data in database tables.

+

statistics

+

Information that is automatically collected by databases, including table-level information (number of tuples and number of pages) and column-level information (column value range distribution histogram). Statistics in databases are used to estimate the cost of execution plans to find the plan with the lowest cost.

+

stop word

+

In computing, stop words are words which are filtered out before or after processing of natural language data (text), saving storage space and improving search efficiency.

+

U – Z

+

vacuum

+

A thread that is periodically started up by a database to clear junk tuples. Multiple Vacuum threads can be started concurrently by setting a parameter.

+

verbose

+

The VERBOSE option specifies the information to be displayed.

+

WAL

+

Write-ahead logging (WAL) is a standard method for logging a transaction. Corresponding logs must be written into a permanent device before a data file (carrier for a table and index) is modified.

+

WAL Receiver

+

A thread created by the standby server during database duplication. The thread is used to receive data and commands from the primary server and to tell the primary server that the data and commands have been acknowledged. Only one WAL receiver thread can run on one standby server.

+

WAL Sender

+

A thread created on the primary server when the primary server has received a connection request from a standby server during database replication. This thread is used to send data and commands to standby servers and to receive responses from the standby servers. Multiple WAL Sender threads may run on one primary server. Each WAL Sender thread corresponds to a connection request initiated by a standby server.

+

WAL Writer

+

A thread for writing redo logs that are created when a database is started. This thread is used to write logs in the memory to a permanent device, such as a disk.

+

WLM

+

The WorkLoad Manager (WLM) is a module for controlling and allocating system resources in GaussDB(DWS).

+

Xlog

+

A transaction log. A logical node can have only one Xlog file.

+

xDR

+

X detailed record. It refers to detailed records on the user and signaling plans and can be categorized into charging data records (CDRs), user flow data records (UFDRs), transaction detail records (TDRs), and data records (SDRs).

+

network backup

+

Network backup provides a comprehensive and flexible data protection solution to Microsoft Windows, UNIX, and Linux platforms. Network backup can back up, archive, and restore files, folders, directories, volumes, and partitions on a computer.

+

physical node

+

A physical machine or device.

+

system catalog

+

A table storing meta information about the database. The meta information includes user tables, indexes, columns, functions, and the data types in a database.

+

pushdown

+

GaussDB(DWS) is a distributed database, where CN can send a query plan to multiple DNs for parallel execution. This CN behavior is called pushdown. It achieves better query performance than extracting data to CN for query.

+

compression

+

Data compression, source coding, or bit-rate reduction involves encoding information that uses fewer bits than the original representation. Compression can be either lossy or lossless. Lossless compression reduces bits by identifying and eliminating statistical redundancy. No information is lost in lossless compression. Lossy compression reduces bits by identifying and removing unnecessary or unimportant information. The process of reducing the size of a data file is commonly referred as data compression, although its formal name is source coding (coding done at the source of the data, before it is stored or transmitted).

+

consistency

+

One of the ACID features of database transactions. Consistency is a database status. In such a status, data in the database must comply with integrity constraints.

+

metadata

+

Data that provides information about other data. Metadata describes the source, size, format, or other characteristics of data. In database columns, metadata explains the content of a data warehouse.

+

atomicity

+

One of the ACID features of database transactions. Atomicity means that a transaction is composed of an indivisible unit of work. All operations performed in a transaction must either be committed or uncommitted. If an error occurs during transaction execution, the transaction is rolled back to the state when it was not committed.

+

online scale-out

+

Online scale-out means that data can be saved to the database and query services are not interrupted during redistribution in GaussDB(DWS).

+

dirty page

+

A page that has been modified and is not written to a permanent device.

+

incremental backup

+

Incremental backup stores all files changed since the last valid backup.

+

incremental synchronization

+

A data synchronization mechanism in the GaussDB(DWS) HA solution. Only data modified since the last synchronization is synchronized to the standby server.

+

Host

+

A node that receives data read and write operations in the GaussDB(DWS) HA system and works with all standby servers. At any time, only one node in the HA system is identified as the primary server.

+

thesaurus

+

Standardized words or phrases that express document themes and are used for indexing and retrieval.

+

dump file

+

A specific type of the trace file. A dump is typically a one-time output of diagnostic data in response to an event, whereas a trace tends to be continuous output of diagnostic data.

+

resource pool

+

Resource pools used for allocating resources in GaussDB(DWS). By binding a user to a resource pool, you can limit the priority of the jobs executed by the user and resources available to the jobs.

+

tenant

+

A database service user who runs services using allocated computing (CPU, memory, and I/O) and storage resources. Service level agreements (SLAs) are met through resource management and isolation.

+

minimum restoration point

+

A method used by GaussDB(DWS) to ensure data consistency. During startup, GaussDB(DWS) checks consistency between the latest WAL logs and the minimum restoration point. If the record location of the minimum restoration point is greater than that of the latest WAL logs, the database fails to start.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0948.html b/docs/dws/dev/dws_04_0948.html new file mode 100644 index 00000000..7b55a092 --- /dev/null +++ b/docs/dws/dev/dws_04_0948.html @@ -0,0 +1,58 @@ + + +

GS_VIEW_DEPENDENCY_PATH

+

GS_VIEW_DEPENDENCY_PATH allows you to query the direct dependencies of all views visible to the current user. If the base table on which the view depends exists and the dependency between views at different levels is normal, you can use this view to query the dependency between views at different levels starting from the base table.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 GS_VIEW_DEPENDENCY_PATH columns

Column

+

Type

+

Description

+

objschema

+

name

+

View space name

+

objname

+

name

+

View name

+

refobjschema

+

name

+

Name of the space where the dependent object resides

+

refobjname

+

name

+

Name of a dependent object

+

path

+

text

+

Dependency path

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0949.html b/docs/dws/dev/dws_04_0949.html new file mode 100644 index 00000000..f3ee4adb --- /dev/null +++ b/docs/dws/dev/dws_04_0949.html @@ -0,0 +1,83 @@ + + +

Importing Data from One GaussDB(DWS) Cluster to Another

+

Function

You can create foreign tables to perform associated queries and import data between clusters.

+
+

Scenarios

+
+

Precautions

+
+

Procedure

  1. Create a server.

    CREATE SERVER server_remote FOREIGN DATA WRAPPER GC_FDW OPTIONS 
    +   (address '10.180.157.231:8000,10.180.157.130:8000' ,
    +  dbname 'gaussdb', 
    +  username 'xyz', 
    +  password 'xxxxxx'
    +);
    +
    • server_remote is the server name used for the foreign table.
    • address indicates the IP addresses and port numbers of CNs in the remote cluster. If LVS is configured, you are advised to enter only one LVS address. Otherwise, you are advised to set multiple CNs as server addresses.
    • dbname is the database name of the remote cluster.
    • username is the username used for connecting to the remote cluster. This user cannot be a system administrator.
    • password is the password used for logging in to the remote cluster.
    +
    +
    +

  2. Create a foreign table.

     CREATE FOREIGN TABLE region
    +(
    +    R_REGIONKEY INT4,
    +    R_NAME TEXT,
    +    R_COMMENT TEXT
    +)
    +SERVER
    +    server_remote
    +OPTIONS
    +(
    +    schema_name 'test',
    +    table_name 'region',
    +    encoding 'gbk'
    +);
    +
    • Foreign table columns cannot contain any constraints.
    • The column names types of the foreign table must be the same as those of its corresponding remote table.
    • schema_name specifies the schema of the foreign table corresponding to the remote cluster. If this parameter is not specified, the default schema is used.
    • table_name specifies the name of the foreign table corresponding to the remote cluster. If this parameter is not specified, the default foreign table name is used.
    • encoding specifies the encoding format of the remote cluster. If this parameter is not specified, the default encoding format is used.
    +
    +
    +

  3. View the foreign table.

    \d+ region
    +
    +                              Foreign table "public.region"
    +   Column    |  Type   | Modifiers | FDW Options | Storage  | Stats target | Description
    +-------------+---------+-----------+-------------+----------+--------------+-------------
    + r_regionkey | integer |           |             | plain    |              |
    + r_name      | text    |           |             | extended |              |
    + r_comment   | text    |           |             | extended |              |
    +Server: server_remote
    +FDW Options: (schema_name 'test', table_name 'region', encoding 'gbk')
    +FDW permition: read only
    +Has OIDs: no
    +Distribute By: ROUND ROBIN
    +Location Nodes: ALL DATANODES
    +

  4. Check the created server.

    \des+ server_remote
    +                                                                                                                               List of foreign servers
    +     Name      |  Owner  | Foreign-data wrapper | Access privileges | Type | Version |
    +                  FDW Options                                                                                    | Description
    +---------------+---------+----------------------+-------------------+------+---------+-----------------------------------------------------------------
    +-----------------------------------------------------------------------------------------------------------------+-------------
    + server_remote | dbadmin | gc_fdw               |                   |      |         | (address '10.180.157.231:8000,10.180.157.130:8000', dbname 'gaussdb'
    +, username 'xyz', password 'xxxxxx') |
    +(1 row)
    +

  5. Use the foreign table to import data or perform associated queries.

    • Import data.
       CREATE TABLE local_region
      +(
      +    R_REGIONKEY INT4,
      +    R_NAME TEXT,
      +    R_COMMENT TEXT
      +);
      +INSERT INTO local_region SELECT * FROM region;
      +
      • If a connection failure is reported, check the server information and ensure that the specified clusters are connected.
      • If an error is reported, indicating that the table does not exist, check whether the option information of the foreign table is correct.
      • If a column mismatch error is reported, check whether the column information of the foreign table is consistent with that of the corresponding table in the remote cluster.
      • If a version inconsistency error is reported, upgrade the cluster and try again.
      • If garbled characters are displayed, check the encoding format of the source data, re-create a foreign table, and specify the correct coding format.
      +
      +
      +
    • Perform an associated query.
      SELECT * FROM region, local_region WHERE local_region.R_NAME = region.R_NAME;
      +
      • A foreign table can be used as a local table to perform complex jobs.
      • If statistics have been collected on the remote cluster, run ANALYZE on the foreign table to obtain a better execution plan.
      • If there are fewer DNs in the local cluster than in the remote cluster, the local cluster needs to use SMP for better performance.
      +
      +
      +
    +

  6. Delete the foreign table.

    DROP FOREIGN TABLE region; 
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0970.html b/docs/dws/dev/dws_04_0970.html new file mode 100644 index 00000000..7f5e9f93 --- /dev/null +++ b/docs/dws/dev/dws_04_0970.html @@ -0,0 +1,67 @@ + + +

Common Performance Parameter Optimization Design

+

+

To improve the cluster performance, you can use multiple methods to optimize the database, including hardware configuration, software driver upgrade, and internal parameter adjustment of the database. This section describes some common parameters and recommended configurations.

+
  1. query_dop: user-defined query degree of parallelism (DOP)

    The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, more resources are consumed, including the CPU, memory, I/O, and network bandwidth. As the DOP grows, the resource consumption increases.

    +
    • When resources become a bottleneck, the SMP cannot improve the performance and may even deteriorate the performance. In the case of a resource bottleneck, you are advised to disable the SMP.
    • If resources are sufficient, the higher the DOP, the more the performance is improved.
    +

    The SMP DOP can be configured at a session level and you are advised to enable the SMP before executing the query that meets the requirements. After the execution is complete, disable the SMP. Otherwise, SMP may affect services in peak hours.

    +

    The default value of query_dop is 1. You can set query_dop to 10 to enable the SMP in a session.

    +
  2. enable_dynamic_workload: dynamic load management

    Dynamic load management refers to the automatic queue control of complex queries based on user loads in a database. This fine-tunes system parameters without manual adjustment.

    +

    This parameter is enabled by default. Notes:

    +
    • A CN in the cluster is used as the Central Coordinator (CCN) for collecting and scheduling job execution. To query this CN, run gs_om -t status --detail. Its status will be displayed in Central Coordinator State. If there is no CCN, jobs will not be controlled by dynamic load management.
    • Simple query jobs (which are estimated to require less than 32 MB memory) and non-DML statements (statements other than INSERT, UPDATE, DELETE, and SELECT) have no adaptive load restrictions. Control the upper memory limits for them on a single CN using max_active_statements.
    • The default value of work_mem is 64 MB. In adaptive load scenarios, the value cannot be increased. If you increase it, memory cannot be controlled for certain statements, such as statements that have not been analyzed.
    • Reduce concurrency in the following scenarios, because high concurrency may lead to uncontrollable memory usage.
      • A single tuple occupies excessive memory, for example, a base table contains a column more than 1 MB wide.
      • A query is fully pushed down.
      • A statement occupies a large amount of memory on the CN, for example, a statement that cannot be pushed down or a cursor withholding statement.
      • An execution plan creates a hash table based on the hash join operator, and the table has many duplicate values and occupies a large amount of memory.
      • UDFs are used and occupy a large amount of memory.
      +
    +

    When configuring this parameter, you can set query_dop to 0 (adaptive). In this case, the system dynamically selects the optimal DOP between 1 and 8 for each query based on resource usage and plan characteristics. The enable_dynamic_workload parameter supports the dynamic memory allocation.

    +
  3. max_active_statements

    Specifies the maximum number of concurrent jobs. This parameter applies to all the jobs on one CN.

    +

    Set the value of this parameter based on system resources, such as CPU, I/O, and memory resources, to ensure that the system resources can be fully utilized and the system will not be crashed due to excessive concurrent jobs.

    +
    • If this parameter is set to -1 or 0, the number of global concurrent jobs is not limited.
    • In the point query scenario, you are advised to set this parameter to 100.
    • In an analytical query scenario, set this parameter to the number of CPU cores divided by the number of DNs. Generally, its value ranges from 5 to 8.
    +
  4. session_timeout

    By default, if a client is in idle state after connecting to a database, the client automatically disconnects from the database after the duration specified by the parameter.

    +

    You are advised to set this parameter to 0, indicating that the timeout setting is disabled to prevent disconnection due to timeout.

    +
  5. The five parameters that affect the database memory are as follows:
    max_process_memory, shared_buffers, cstore_buffers, work_mem, and maintenance_work_mem
    • max_process_memory

      max_process_memory is a logical memory management parameter. It is used to control the maximum available memory on a single CN or DN.

      +

      Formula: max_process_memory = Physical memory x 0.665/ (1 + Number of primary DNs)

      +
    • shared_buffers

      Specifies the size of the shared memory used by GaussDB(DWS). If the value of this parameter is increased, GaussDB(DWS) requires more System V shared memory than the default system setting.

      +

      You are advised to set shared_buffers to a value less than 40% of the memory. It is used to scan row-store tables. Formula: shared_buffers = (Memory of a single server/Number of DNs on a single server) x 0.4 x 0.25

      +
    +
    • cstore_buffers

      Specifies the size of the shared buffer used by column-store tables and column-store tables (ORC, Parquet, and CarbonData) of OBS and HDFS foreign tables.

      +

      For details about the calculation formula, see the formula in shared_buffers.

      +
    +
    • work_mem

      Specifies the size of the memory used by internal sequential operations and the Hash table before data is written into temporary disk files.

      +

      Sort operations are required for ORDER BY, DISTINCT, and merge joins. Hash tables are used in hash joins, hash-based aggregation, and hash-based processing of IN subqueries.

      +

      In a complex query, several sort or hash operations may run in parallel. Each operation will be allowed to use as much memory as this parameter specifies. If the memory is insufficient, data will be written into temporary files. In addition, several running sessions may be performing such operations concurrently. Therefore, the total memory used may be many times the value of work_mem.

      +

      The formulas are as follows:

      +

      For non-concurrent complex serial queries, each query requires five to ten associated operations. Configure work_mem using the following formula: work_mem = 50% of the memory/10.

      +

      For non-concurrent simple serial queries, each query requires two to five associated operations. Configure work_mem using the following formula: work_mem = 50% of the memory/5.

      +

      For concurrent queries, configure work_mem using the following formula: work_mem = work_mem for serial queries/Number of concurrent SQL statements.

      +
    • maintenance_work_mem

      maintenance_work_mem specifies the maximum size of memory used for maintenance operations, involving VACUUM, CREATE INDEX, and ALTER TABLE ADD FOREIGN KEY.

      +

      Setting suggestions:

      +

      If you set this parameter to a value greater than that of work_mem, database dump files can be cleaned up and restored more efficiently. In a database session, only one maintenance operation can be performed at a time. Maintenance is usually performed when there are not many sessions.

      +

      When the automatic cleanup process is running, up to autovacuum_max_workers times of the memory will be allocated. In this case, set maintenance_work_mem to a value greater than or equal to that of work_mem.

      +
    +
    +
  6. bulk_write_ring_size

    Specifies the size of a ring buffer used for parallel data import.

    +

    This parameter affects the database import performance. You are advised to increase the value of this parameter on DNs when a large amount of data is to be imported.

    +
  7. Two connection parameters:
    max_connections and max_prepared_transactions
    • max_connections

      Specifies the maximum number of concurrent connections to the database. This parameter affects the concurrent processing capability of the cluster.

      +

      Setting suggestions:

      +

      Retain the default value of this parameter on CNs. Set this parameter on DNs to a value calculated using this formula: Number of CNs x Value of this parameter on a CN.

      +

      If the value of this parameter is increased, GaussDB(DWS) may require more System V shared memory or semaphore, which may exceed the default maximum value of the OS. In this case, modify the value as needed.

      +
    +
    +
    • max_prepared_transactions

      Specifies the maximum number of transactions that can stay in the prepared state simultaneously. If the value of this parameter is increased, GaussDB(DWS) requires more System V shared memory than the default system setting.

      +
    +

    The value of max_connections is related to max_prepared_transactions. Before configuring max_connections, ensure that the value of max_prepared_transactions is greater than or equal to that of max_connections. In this way, each session has a prepared transaction in the waiting state.

    +
    +
  8. checkpoint_completion_target

    Specifies the target for which the checkpoint is completed.

    +

    Each checkpoint must be completed within 50% of the checkpoint interval.

    +

    The default value is 0.5. To improve the performance, you can change the value to 0.9.

    +
  9. data_replicate_buffer_size

    Specifies the memory used by queues when the sender sends data pages to the receiver. The value of this parameter affects the buffer size used for the replication from the primary server to the standby server.

    +

    The default value is 128 MB. If the server memory is 256 GB, you can increase the value to 512 MB.

    +
  10. wal_receiver_buffer_size

    Specifies the memory buffer size for the standby and secondary servers to store the received XLOG files.

    +

    The default value is 64 MB. If the server memory is 256 GB, you can increase the value to 128 MB.

    +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0971.html b/docs/dws/dev/dws_04_0971.html new file mode 100644 index 00000000..76d2c5b3 --- /dev/null +++ b/docs/dws/dev/dws_04_0971.html @@ -0,0 +1,106 @@ + + +

PL/Java Usage

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 PL/Java mapping for default data types

GaussDB(DWS)

+

Java

+

BOOLEAN

+

boolean

+

"char"

+

byte

+

bytea

+

byte[]

+

SMALLINT

+

short

+

INTEGER

+

int

+

BIGINT

+

long

+

FLOAT4

+

float

+

FLOAT8

+

double

+

CHAR

+

java.lang.String

+

VARCHAR

+

java.lang.String

+

TEXT

+

java.lang.String

+

name

+

java.lang.String

+

DATE

+

java.sql.Timestamp

+

TIME

+

java.sql.Time (stored value treated as local time)

+

TIMETZ

+

java.sql.Time

+

TIMESTAMP

+

java.sql.Timestamp

+

TIMESTAMPTZ

+

java.sql.Timestamp

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0972.html b/docs/dws/dev/dws_04_0972.html new file mode 100644 index 00000000..6fa1bc22 --- /dev/null +++ b/docs/dws/dev/dws_04_0972.html @@ -0,0 +1,33 @@ + + +

PL/pgSQL Usage

+

General Principles

  1. Development shall strictly comply with design documents.
  2. Program modules shall be highly cohesive and loosely coupled.
  3. Proper, comprehensive troubleshooting measures shall be developed.
  4. Code shall be reasonable and clear.
  5. Program names shall comply with a unified naming rule.
  6. Fully consider the program efficiency, including the program execution efficiency and database query and storage efficiency. Use efficient and effective processing methods.
  7. Program comments shall be detailed, correct, and standard.
  8. The commit or rollback operation shall be performed at the end of a stored procedure, unless otherwise required by applications.
  9. Programs shall support 24/7 processing. In the case of an interruption, the applications shall provide secure, easy-to-use resuming features.
  10. Application output shall be standard and simple. The output shall show the progress, error description, and execution results for application maintenance personnel, and provide clear and intuitive reports and documents for business personnel.
+
+

Programming Principles

  1. Use bound variables in SQL statements in the PL/pgSQL.
  2. RETURNING is recommended for SQL statements in PL/pgSQL.
  3. Principles for using stored procedures:
    1. Do not use more than 50 output parameters of the Varchar or Varchar2 type in a stored procedure.
    2. Do not use the LONG type for input or output parameters.
    3. Use the CLOB type for output strings that exceed 10 MB.
    +
  4. Variable declaration principles:
    1. Use %TYPE to declare a variable that has the same meaning as that of a column or variable in an application table.
    2. Use %ROWTYPE to declare a record that has the same meaning as that of a row in an application table.
    3. Each line of a variable declaration shall contain only one statement.
    4. Do not declare variables of the LONG type.
    +
  5. Principles for using cursors:
    1. Explicit cursors shall be closed after being used.
    2. A cursor variable shall be closed after being used. If the cursor variable needs to transfer data to an invoked application, the cursor shall be closed in the application. If the cursor variable is used only in a stored procedure, the cursor shall be closed explicitly.
    3. Before using DBMS_SQL.CLOSE_CURSOR to close a cursor, use DBMS_SQL.IS_OPEN to check whether the cursor is open.
    +
  6. Principles for collections:
    1. You are advised to use the FOR ALL statement instead of the FOR loop statement to reference elements in a collection.
    +
  7. Principles for using dynamic statements:
    1. Dynamic SQL shall not be used in the transaction programs of online systems.
    2. Dynamic SQL statements can be used to implement DDL statements and system control commands in PL/pgSQL.
    3. Variable binding is recommended.
    +
  8. Principles for assembling SQL statements:
    1. You are advised to use bound variables to assemble SQL statements.
    2. If the conditions for assembling SQL statements contain external input sources, the characters in the input conditions shall be checked to prevent attacks.
    3. In a PL/pgSQL script, the length of a single line of code cannot exceed 2499 characters.
    +
  9. Principles for using triggers:
    1. Triggers can be used to implement availability design in scenarios where differential data logs are irrelevant to service processing.
    2. Do not use triggers to implement service processing functions.
    +
+
+

Exception Handling Principles

Any error that occurs in a PL/pgSQL function aborts the execution of the function and related transactions. You can use a BEGIN block with an EXCEPTION clause to catch and fix errors.

+
  1. In a PL/pgSQL block, if an SQL statement cannot return a definite result, you are advised to handle exceptions (if any) in EXCEPTION. Otherwise, unhandled errors may be transferred to the external block and cause program logic errors.
  2. You can directly use the exceptions that have been defined in the system. DWS does not support custom exceptions.
  3. A block containing an EXCEPTION clause is more expensive to enter and exit than a block without one. Therefore, do not use EXCEPTION without need.
+
+

Writing Standard

  1. Variable naming rules:
    1. The input parameter format of a procedure or function is IN_Parameter_name. The parameter name shall be in uppercase.
    2. The output parameter format of a procedure or function is OUT_Parameter_name. The parameter name shall be in uppercase.
    3. The input and output parameter format of a procedure or function is IO_Parameter_name. The parameter name shall be in uppercase.
    4. Variables used in procedures and functions shall be composed of v_Variable_name. The variable name shall be in lower case.
    5. In query concatenation, the concatenation variable name of the WHERE statement shall be v_where, and the concatenation variable name of the SELECT statement shall be v_select.
    6. The record type (TYPE) name shall consist of T and a variable name. The name shall be in uppercase.
    7. A cursor name shall consist of CUR and a variable name. The name shall be in uppercase.
    8. The name of a reference cursor (REF CURSOR) shall consist of REF and a variable name. The name shall be in uppercase.
    +
  2. Rules for defining variable types:
    1. Use %TYPE to declare the type of a variable that has the same meaning as that of a column in an application table.
    2. Use %ROWTYPE to declare the type of a record that has the same meaning as that of a row in an application table.
    +
  3. Rules for writing comments:
    1. Comments shall be meaningful and shall not just repeat the code content.
    2. Comments shall be concise and easy to understand.
    3. Comments shall be provided at the beginning of each stored procedure or function. The comments shall contain a brief function description, author, compilation date, program version number, and program change history. The format of the comments at the beginning of stored procedures shall be the same.
    4. Comments shall be provided next to the input and output parameters to describe the meaning of variables.
    5. Comments shall be provided at the beginning of each block or large branch to briefly describe the function of the block. If an algorithm is used, comments shall be provided to describe the purpose and result of the algorithm.
    +
  4. Variable declaration format:

    Each line shall contain only one statement. To assign initial values, write them in the same line.

    +
  5. Letter case:

    Use uppercase letters except for variable names.

    +
  6. Indentation:

    In the statements used for creating a stored procedure, the keywords CREATE, AS/IS, BEGIN, and END at the same level shall have the same indent.

    +
  7. Statement rules:
    1. For statements that define variables, Each line shall contain only one statement.
    2. The keywords IF, ELSE IF, ELSE, and END at the same level shall have the same indent.
    3. The keywords CASE and END shall have the same indent. The keywords WHEN and ELSE shall be indented.
    4. The keywords LOOP and END LOOP at the same level shall have the same indent. Nested statements or statements at lower levels shall have more indent.
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0985.html b/docs/dws/dev/dws_04_0985.html new file mode 100644 index 00000000..fde9e476 --- /dev/null +++ b/docs/dws/dev/dws_04_0985.html @@ -0,0 +1,26 @@ + + +

Data Migration

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0986.html b/docs/dws/dev/dws_04_0986.html new file mode 100644 index 00000000..ab7d8e1b --- /dev/null +++ b/docs/dws/dev/dws_04_0986.html @@ -0,0 +1,21 @@ + + + +

Full Database Migration

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0987.html b/docs/dws/dev/dws_04_0987.html new file mode 100644 index 00000000..7549d644 --- /dev/null +++ b/docs/dws/dev/dws_04_0987.html @@ -0,0 +1,18 @@ + + +

Metadata Migration

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0988.html b/docs/dws/dev/dws_04_0988.html new file mode 100644 index 00000000..14db9ab5 --- /dev/null +++ b/docs/dws/dev/dws_04_0988.html @@ -0,0 +1,18 @@ + + +

Other Operations

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0994.html b/docs/dws/dev/dws_04_0994.html new file mode 100644 index 00000000..4336e87c --- /dev/null +++ b/docs/dws/dev/dws_04_0994.html @@ -0,0 +1,23 @@ + + + +

Sensitive Data Management

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_04_0995.html b/docs/dws/dev/dws_04_0995.html new file mode 100644 index 00000000..d8d4cb3a --- /dev/null +++ b/docs/dws/dev/dws_04_0995.html @@ -0,0 +1,100 @@ + + +

Using Functions for Encryption and Decryption

+

GaussDB(DWS) supports encryption and decryption of strings using the following functions:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_04_0998.html b/docs/dws/dev/dws_04_0998.html new file mode 100644 index 00000000..6d0bab4d --- /dev/null +++ b/docs/dws/dev/dws_04_0998.html @@ -0,0 +1,65 @@ + + +

MPP_TABLES

+

MPP_TABLES displays information about tables in PGXC_CLASS.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 MPP_TABLES columns

Name

+

Type

+

Description

+

schemaname

+

name

+

Name of the schema that contains the table

+

tablename

+

name

+

Name of a table

+

tableowner

+

name

+

Owner of the table

+

tablespace

+

name

+

Tablespace where the table is located.

+

pgroup

+

name

+

Name of a node cluster.

+

nodeoids

+

oidvector_extend

+

List of distributed table node OIDs

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_04_1000.html b/docs/dws/dev/dws_04_1000.html new file mode 100644 index 00000000..fd49ac2f --- /dev/null +++ b/docs/dws/dev/dws_04_1000.html @@ -0,0 +1,43 @@ + + + +

Developer Guide

+ +

+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_2000.html b/docs/dws/dev/dws_04_2000.html new file mode 100644 index 00000000..bd4f2be1 --- /dev/null +++ b/docs/dws/dev/dws_04_2000.html @@ -0,0 +1,47 @@ + + + +

SQL Syntax Reference

+ +

+
+ +
+ +
+ diff --git a/docs/dws/dev/dws_04_3333.html b/docs/dws/dev/dws_04_3333.html new file mode 100644 index 00000000..1f778f43 --- /dev/null +++ b/docs/dws/dev/dws_04_3333.html @@ -0,0 +1,27 @@ + + +

Change History

+
+
+ + + + + + + +

Released On

+

Description

+

2022-11-17

+

This issue is the first official release, adapts to DWS 8.1.1.202.

+

Feature Changes:

+

GUC Parameters:Added the GUC parameter included enable_light_colupdate,bi_page_reuse_factor,expand_hashtable_ratio,query_dop_ratio,enable_row_fast_numeric,enable_view_update,enable_grant_option).

+

Functions and Operators:Added the function included pgxc_wlm_get_schema_space(cstring),pgxc_wlm_analyze_schema_space(cstring),median(expression),gs_password_expiration,pgxc_get_lock_conflicts(),percentile_disc(const) within group(order by expression),percentile_cont(const) within group(order by expression).

+

System Views:Added system views included PGXC_TOTAL_SCHEMA_INFO,PGXC_TOTAL_SCHEMA_INFO_ANALYZE,GS_WLM_SQL_ALLOW,PGXC_BULKLOAD_PROGRESS,PGXC_BULKLOAD_STATISTICS,PG_BULKLOAD_STATISTICS.

+

Keyword:Added keywords included EXPIRATION,IFNULL and TIMESTAMPDIFF.

+

CREATE REDACTION POLICY:Added Custom data redaction.

+

Syntax Compatibility Differences Among Oracle, Teradata, and MySQL:Added MySQL Syntax Compatibility Differences .

+
+
+
+ diff --git a/docs/dws/dev/dws_06_0001.html b/docs/dws/dev/dws_06_0001.html new file mode 100644 index 00000000..91ff157b --- /dev/null +++ b/docs/dws/dev/dws_06_0001.html @@ -0,0 +1,20 @@ + + +

GaussDB(DWS) SQL

+

What Is SQL?

SQL is a standard computer language used to control the access to databases and manage data in databases.

+

SQL provides different statements to enable you to:

+ +

SQL consists of commands and functions that are used to manage databases and database objects. SQL can also forcibly implement the rules for data types, expressions, and texts. Therefore, section "SQL Reference" describes data types, expressions, functions, and operators in addition to SQL syntax.

+
+

Development of SQL Standards

Released SQL standards are as follows:

+ +
+

Supported SQL Standards

GaussDB(DWS) is compatible with Postgres-XC features and supports the major features of SQL2, SQL3, and SQL4 by default.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0002.html b/docs/dws/dev/dws_06_0002.html new file mode 100644 index 00000000..0b810d97 --- /dev/null +++ b/docs/dws/dev/dws_06_0002.html @@ -0,0 +1,22 @@ + + +

Differences Between GaussDB(DWS) and PostgreSQL

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0003.html b/docs/dws/dev/dws_06_0003.html new file mode 100644 index 00000000..6ec3e451 --- /dev/null +++ b/docs/dws/dev/dws_06_0003.html @@ -0,0 +1,17 @@ + + +

GaussDB(DWS) gsql, PostgreSQL psql, and libpq

+

GaussDB(DWS) gsql and PostgreSQL psql

GaussDB(DWS) gsql differs from PostgreSQL psql in that the former has made the following changes to enhance security:

+ +

gsql provides the following additional functions based on psql:

+ +
+

libpq

During the development of certain GaussDB(DWS) functions such as the gsql client connection tool, PostgreSQL libpq is greatly modified. However, the libpq interface is not verified in application development. You are not advised to use this set of APIs for application development, because underlying risks probably exist. You can use the ODBC or JDBC APIs instead.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0004.html b/docs/dws/dev/dws_06_0004.html new file mode 100644 index 00000000..675a9893 --- /dev/null +++ b/docs/dws/dev/dws_06_0004.html @@ -0,0 +1,13 @@ + + +

Data Type Differences

+

For details about supported data types by GaussDB(DWS), see Data Types.

+

The following PostgreSQL data type is not supported:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0005.html b/docs/dws/dev/dws_06_0005.html new file mode 100644 index 00000000..28c0978d --- /dev/null +++ b/docs/dws/dev/dws_06_0005.html @@ -0,0 +1,15 @@ + + +

Function Differences

+

For details about the functions supported by GaussDB(DWS), see Functions and Operators.

+

The following PostgreSQL functions are not supported:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0006.html b/docs/dws/dev/dws_06_0006.html new file mode 100644 index 00000000..c333675a --- /dev/null +++ b/docs/dws/dev/dws_06_0006.html @@ -0,0 +1,14 @@ + + +

PostgreSQL Features Unsupported by GaussDB(DWS)

+
+
The following features are disabled in GaussDB(DWS) for separation of rights: +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0007.html b/docs/dws/dev/dws_06_0007.html new file mode 100644 index 00000000..51122ac0 --- /dev/null +++ b/docs/dws/dev/dws_06_0007.html @@ -0,0 +1,6505 @@ + + +

Keyword

+

The SQL contains reserved and non-reserved words. Standards require that reserved keywords not be used as other identifiers. Non-reserved keywords have special meanings only in a specific environment and can be used as identifiers in other environments.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQL keywords

Keyword

+

GaussDB(DWS)

+

SQL:1999

+

SQL-92

+

ABORT

+

Non-reserved

+

-

+

-

+

ABS

+

-

+

Non-reserved

+

-

+

ABSOLUTE

+

Non-reserved

+

Reserved

+

Reserved

+

ACCESS

+

Non-reserved

+

-

+

-

+

ACCOUNT

+

Non-reserved

+

-

+

-

+

ACTION

+

Non-reserved

+

Reserved

+

Reserved

+

ADA

+

-

+

Non-reserved

+

Non-reserved

+

ADD

+

Non-reserved

+

Reserved

+

Reserved

+

ADMIN

+

Non-reserved

+

Reserved

+

-

+

AFTER

+

Non-reserved

+

Reserved

+

-

+

AGGREGATE

+

Non-reserved

+

Reserved

+

-

+

ALIAS

+

-

+

Reserved

+

-

+

ALL

+

Reserved

+

Reserved

+

Reserved

+

ALLOCATE

+

-

+

Reserved

+

Reserved

+

ALSO

+

Non-reserved

+

-

+

-

+

ALTER

+

Non-reserved

+

Reserved

+

Reserved

+

ALWAYS

+

Non-reserved

+

-

+

-

+

ANALYSE

+

Reserved

+

-

+

-

+

ANALYZE

+

Reserved

+

-

+

-

+

AND

+

Reserved

+

Reserved

+

Reserved

+

ANY

+

Reserved

+

Reserved

+

Reserved

+

APP

+

Non-reserved

+

-

+

-

+

ARE

+

-

+

Reserved

+

Reserved

+

ARRAY

+

Reserved

+

Reserved

+

-

+

AS

+

Reserved

+

Reserved

+

Reserved

+

ASC

+

Reserved

+

Reserved

+

Reserved

+

ASENSITIVE

+

-

+

Non-reserved

+

-

+

ASSERTION

+

Non-reserved

+

Reserved

+

Reserved

+

ASSIGNMENT

+

Non-reserved

+

Non-reserved

+

-

+

ASYMMETRIC

+

Reserved

+

Non-reserved

+

-

+

AT

+

Non-reserved

+

Reserved

+

Reserved

+

ATOMIC

+

-

+

Non-reserved

+

-

+

ATTRIBUTE

+

Non-reserved

+

-

+

-

+

AUTHID

+

Reserved

+

-

+

-

+

AUTHINFO

+

Non-reserved

+

-

+

-

+

AUTHORIZATION

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

AUTOEXTEND

+

Non-reserved

+

-

+

-

+

AUTOMAPPED

+

Non-reserved

+

-

+

-

+

AVG

+

-

+

Non-reserved

+

Reserved

+

BACKWARD

+

Non-reserved

+

-

+

-

+

BARRIER

+

Non-reserved

+

-

+

-

+

BEFORE

+

Non-reserved

+

Reserved

+

-

+

BEGIN

+

Non-reserved

+

Reserved

+

Reserved

+

BETWEEN

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

BIGINT

+

Non-reserved (excluding functions and types)

+

-

+

-

+

BINARY

+

Reserved (functions and types allowed)

+

Reserved

+

-

+

BINARY_DOUBLE

+

Non-reserved (excluding functions and types)

+

-

+

-

+

BINARY_INTEGER

+

Non-reserved (excluding functions and types)

+

-

+

-

+

BIT

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

BITVAR

+

-

+

Non-reserved

+

-

+

BIT_LENGTH

+

-

+

Non-reserved

+

Reserved

+

BLOB

+

Non-reserved

+

Reserved

+

-

+

BOOLEAN

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

BOTH

+

Reserved

+

Reserved

+

Reserved

+

BUCKETS

+

Reserved

+

-

+

-

+

BREADTH

+

-

+

Reserved

+

-

+

BY

+

Non-reserved

+

Reserved

+

Reserved

+

C

+

-

+

Non-reserved

+

Non-reserved

+

CACHE

+

Non-reserved

+

-

+

-

+

CALL

+

Non-reserved

+

Reserved

+

-

+

CALLED

+

Non-reserved

+

Non-reserved

+

-

+

CARDINALITY

+

-

+

Non-reserved

+

-

+

CASCADE

+

Non-reserved

+

Reserved

+

Reserved

+

CASCADED

+

Non-reserved

+

Reserved

+

Reserved

+

CASE

+

Reserved

+

Reserved

+

Reserved

+

CAST

+

Reserved

+

Reserved

+

Reserved

+

CATALOG

+

Non-reserved

+

Reserved

+

Reserved

+

CATALOG_NAME

+

-

+

Non-reserved

+

Non-reserved

+

CHAIN

+

Non-reserved

+

Non-reserved

+

-

+

CHAR

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

CHARACTER

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

CHARACTERISTICS

+

Non-reserved

+

-

+

-

+

CHARACTER_LENGTH

+

-

+

Non-reserved

+

Reserved

+

CHARACTER_SET_CATALOG

+

-

+

Non-reserved

+

Non-reserved

+

CHARACTER_SET_NAME

+

-

+

Non-reserved

+

Non-reserved

+

CHARACTER_SET_SCHEMA

+

-

+

Non-reserved

+

Non-reserved

+

CHAR_LENGTH

+

-

+

Non-reserved

+

Reserved

+

CHECK

+

Reserved

+

Reserved

+

Reserved

+

CHECKED

+

-

+

Non-reserved

+

-

+

CHECKPOINT

+

Non-reserved

+

-

+

-

+

CLASS

+

Non-reserved

+

Reserved

+

-

+

CLEAN

+

Non-reserved

+

-

+

-

+

CLASS_ORIGIN

+

-

+

Non-reserved

+

Non-reserved

+

CLOB

+

Non-reserved

+

Reserved

+

-

+

CLOSE

+

Non-reserved

+

Reserved

+

Reserved

+

CLUSTER

+

Non-reserved

+

-

+

-

+

COALESCE

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

COBOL

+

-

+

Non-reserved

+

Non-reserved

+

COLLATE

+

Reserved

+

Reserved

+

Reserved

+

COLLATION

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

COLLATION_CATALOG

+

-

+

Non-reserved

+

Non-reserved

+

COLLATION_NAME

+

-

+

Non-reserved

+

Non-reserved

+

COLLATION_SCHEMA

+

-

+

Non-reserved

+

Non-reserved

+

COLUMN

+

Reserved

+

Reserved

+

Reserved

+

COLUMNS

+

Non-reserved

+

-

+

-

+

COLUMN_NAME

+

-

+

Non-reserved

+

Non-reserved

+

COMMAND_FUNCTION

+

-

+

Non-reserved

+

Non-reserved

+

COMMAND_FUNCTION_CODE

+

-

+

Non-reserved

+

-

+

COMMENT

+

Non-reserved

+

-

+

-

+

COMMENTS

+

Non-reserved

+

-

+

-

+

COMMIT

+

Non-reserved

+

Reserved

+

Reserved

+

COMMITTED

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

COMPATIBLE_ILLEGAL_CHARS

+

Non-reserved

+

-

+

-

+

COMPLETE

+

Non-reserved

+

-

+

-

+

COMPRESS

+

Non-reserved

+

-

+

-

+

COMPLETION

+

-

+

Reserved

+

-

+

CONCURRENTLY

+

Reserved (functions and types allowed)

+

-

+

-

+

CONDITION

+

-

+

-

+

-

+

CONDITION_NUMBER

+

-

+

Non-reserved

+

Non-reserved

+

CONFIGURATION

+

Non-reserved

+

-

+

-

+

CONNECT

+

-

+

Reserved

+

Reserved

+

CONNECTION

+

Non-reserved

+

Reserved

+

Reserved

+

CONNECTION_NAME

+

-

+

Non-reserved

+

Non-reserved

+

CONSTRAINT

+

Reserved

+

Reserved

+

Reserved

+

CONSTRAINTS

+

Non-reserved

+

Reserved

+

Reserved

+

CONSTRAINT_CATALOG

+

-

+

Non-reserved

+

Non-reserved

+

CONSTRAINT_NAME

+

-

+

Non-reserved

+

Non-reserved

+

CONSTRAINT_SCHEMA

+

-

+

Non-reserved

+

Non-reserved

+

CONSTRUCTOR

+

-

+

Reserved

+

-

+

CONTAINS

+

-

+

Non-reserved

+

-

+

CONTENT

+

Non-reserved

+

-

+

-

+

CONTINUE

+

Non-reserved

+

Reserved

+

Reserved

+

CONVERSION

+

Non-reserved

+

-

+

-

+

CONVERT

+

-

+

Non-reserved

+

Reserved

+

COORDINATOR

+

Non-reserved

+

-

+

-

+

COPY

+

Non-reserved

+

-

+

-

+

CORRESPONDING

+

-

+

Reserved

+

Reserved

+

COST

+

Non-reserved

+

-

+

-

+

COUNT

+

-

+

Non-reserved

+

Reserved

+

CREATE

+

Reserved

+

Reserved

+

Reserved

+

CROSS

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

CSV

+

Non-reserved

+

-

+

-

+

CUBE

+

-

+

Reserved

+

-

+

CURRENT

+

Non-reserved

+

Reserved

+

Reserved

+

CURRENT_CATALOG

+

Reserved

+

-

+

-

+

CURRENT_DATE

+

Reserved

+

Reserved

+

Reserved

+

CURRENT_PATH

+

-

+

Reserved

+

-

+

CURRENT_ROLE

+

Reserved

+

Reserved

+

-

+

CURRENT_SCHEMA

+

Reserved (functions and types allowed)

+

-

+

-

+

CURRENT_TIME

+

Reserved

+

Reserved

+

Reserved

+

CURRENT_TIMESTAMP

+

Reserved

+

Reserved

+

Reserved

+

CURRENT_USER

+

Reserved

+

Reserved

+

Reserved

+

CURSOR

+

Non-reserved

+

Reserved

+

Reserved

+

CURSOR_NAME

+

-

+

Non-reserved

+

Non-reserved

+

CYCLE

+

Non-reserved

+

Reserved

+

-

+

DATA

+

Non-reserved

+

Reserved

+

Non-reserved

+

DATE_FORMAT

+

Non-reserved

+

-

+

-

+

DATABASE

+

Non-reserved

+

-

+

-

+

DATAFILE

+

Non-reserved

+

-

+

-

+

DATE

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

DATETIME_INTERVAL_CODE

+

-

+

Non-reserved

+

Non-reserved

+

DATETIME_INTERVAL_PRECISION

+

-

+

Non-reserved

+

Non-reserved

+

DAY

+

Non-reserved

+

Reserved

+

Reserved

+

DBCOMPATIBILITY

+

Non-reserved

+

-

+

-

+

DEALLOCATE

+

Non-reserved

+

Reserved

+

Reserved

+

DEC

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

DECIMAL

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

DECLARE

+

Non-reserved

+

Reserved

+

Reserved

+

DECODE

+

Non-reserved (excluding functions and types)

+

-

+

-

+

DEFAULT

+

Reserved

+

Reserved

+

Reserved

+

DEFAULTS

+

Non-reserved

+

-

+

-

+

DEFERRABLE

+

Reserved

+

Reserved

+

Reserved

+

DEFERRED

+

Non-reserved

+

Reserved

+

Reserved

+

DEFINED

+

-

+

Non-reserved

+

-

+

DEFINER

+

Non-reserved

+

Non-reserved

+

-

+

DELETE

+

Non-reserved

+

Reserved

+

Reserved

+

DELIMITER

+

Non-reserved

+

-

+

-

+

DELIMITERS

+

Non-reserved

+

-

+

-

+

DELTA

+

Non-reserved

+

-

+

-

+

DEPTH

+

-

+

Reserved

+

-

+

DEREF

+

-

+

Reserved

+

-

+

DESC

+

Reserved

+

Reserved

+

Reserved

+

DESCRIBE

+

-

+

Reserved

+

Reserved

+

DESCRIPTOR

+

-

+

Reserved

+

Reserved

+

DESTROY

+

-

+

Reserved

+

-

+

DESTRUCTOR

+

-

+

Reserved

+

-

+

DETERMINISTIC

+

Non-reserved

+

Reserved

+

-

+

DIAGNOSTICS

+

-

+

Reserved

+

Reserved

+

DICTIONARY

+

Non-reserved

+

Reserved

+

-

+

DIRECT

+

Non-reserved

+

-

+

-

+

DIRECTORY

+

Non-reserved

+

-

+

-

+

DISABLE

+

Non-reserved

+

-

+

-

+

DISCARD

+

Non-reserved

+

-

+

-

+

DISCONNECT

+

-

+

Reserved

+

Reserved

+

DISPATCH

+

-

+

Non-reserved

+

-

+

DISTINCT

+

Reserved

+

Reserved

+

Reserved

+

DISTRIBUTE

+

Non-reserved

+

-

+

-

+

DISTRIBUTION

+

Non-reserved

+

-

+

-

+

DO

+

Reserved

+

-

+

-

+

DOCUMENT

+

Non-reserved

+

-

+

-

+

DOMAIN

+

Non-reserved

+

Reserved

+

Reserved

+

DOUBLE

+

Non-reserved

+

Reserved

+

Reserved

+

DROP

+

Non-reserved

+

Reserved

+

Reserved

+

DYNAMIC

+

-

+

Reserved

+

-

+

DYNAMIC_FUNCTION

+

-

+

Non-reserved

+

Non-reserved

+

DYNAMIC_FUNCTION_CODE

+

-

+

Non-reserved

+

-

+

EACH

+

Non-reserved

+

Reserved

+

-

+

ELASTIC

+

Non-reserved

+

-

+

-

+

ELSE

+

Reserved

+

Reserved

+

Reserved

+

ENABLE

+

Non-reserved

+

-

+

-

+

ENCODING

+

Non-reserved

+

-

+

-

+

ENCRYPTED

+

Non-reserved

+

-

+

-

+

END

+

Reserved

+

Reserved

+

Reserved

+

END-EXEC

+

-

+

Reserved

+

Reserved

+

ENFORCED

+

Non-reserved

+

-

+

-

+

ENUM

+

Non-reserved

+

-

+

-

+

EOL

+

Non-reserved

+

-

+

-

+

EQUALS

+

-

+

Reserved

+

-

+

ERRORS

+

Non-reserved

+

-

+

-

+

ESCAPE

+

Non-reserved

+

Reserved

+

Reserved

+

ESCAPING

+

Non-reserved

+

-

+

-

+

EVERY

+

Non-reserved

+

Reserved

+

-

+

EXCEPT

+

Reserved

+

Reserved

+

Reserved

+

EXCEPTION

+

-

+

Reserved

+

Reserved

+

EXCHANGE

+

Non-reserved

+

-

+

-

+

EXCLUDE

+

Non-reserved

+

-

+

-

+

EXCLUDING

+

Non-reserved

+

-

+

-

+

EXCLUSIVE

+

Non-reserved

+

-

+

-

+

EXEC

+

-

+

Reserved

+

Reserved

+

EXECUTE

+

Non-reserved

+

Reserved

+

Reserved

+

EXISTING

+

-

+

Non-reserved

+

-

+

EXISTS

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

EXPIRATION

+

Non-reserved

+

-

+

-

+

EXPLAIN

+

Non-reserved

+

-

+

-

+

EXTENSION

+

Non-reserved

+

-

+

-

+

EXTERNAL

+

Non-reserved

+

Reserved

+

Reserved

+

EXTRACT

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

FALSE

+

Reserved

+

Reserved

+

Reserved

+

FAMILY

+

Non-reserved

+

-

+

-

+

FAST

+

Non-reserved

+

-

+

-

+

FENCED

+

Non-reserved

+

-

+

-

+

FETCH

+

Reserved

+

Reserved

+

Reserved

+

FILEHEADER

+

Non-reserved

+

-

+

-

+

FILL_MISSING_FIELDS

+

Non-reserved

+

-

+

-

+

FINAL

+

-

+

Non-reserved

+

-

+

FIRST

+

Non-reserved

+

Reserved

+

Reserved

+

FIXED

+

Non-reserved

+

Reserved

+

Reserved

+

FLOAT

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

FOLLOWING

+

Non-reserved

+

-

+

-

+

FOR

+

Reserved

+

Reserved

+

Reserved

+

FORCE

+

Non-reserved

+

-

+

-

+

FOREIGN

+

Reserved

+

Reserved

+

Reserved

+

FORMATTER

+

Non-reserved

+

-

+

-

+

FORTRAN

+

-

+

Non-reserved

+

Non-reserved

+

FORWARD

+

Non-reserved

+

-

+

-

+

FOUND

+

-

+

Reserved

+

Reserved

+

FREE

+

-

+

Reserved

+

-

+

FREEZE

+

Reserved (functions and types allowed)

+

-

+

-

+

FROM

+

Reserved

+

Reserved

+

Reserved

+

FULL

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

FUNCTION

+

Non-reserved

+

Reserved

+

-

+

FUNCTIONS

+

Non-reserved

+

-

+

-

+

G

+

-

+

Non-reserved

+

-

+

GENERAL

+

-

+

Reserved

+

-

+

GENERATED

+

-

+

Non-reserved

+

-

+

GET

+

-

+

Reserved

+

Reserved

+

GLOBAL

+

Non-reserved

+

Reserved

+

Reserved

+

GO

+

-

+

Reserved

+

Reserved

+

GOTO

+

-

+

Reserved

+

Reserved

+

GRANT

+

Reserved

+

Reserved

+

Reserved

+

GRANTED

+

Non-reserved

+

Non-reserved

+

-

+

GREATEST

+

Non-reserved (excluding functions and types)

+

-

+

-

+

GROUP

+

Reserved

+

Reserved

+

Reserved

+

GROUPING

+

-

+

Reserved

+

-

+

HANDLER

+

Non-reserved

+

-

+

-

+

HAVING

+

Reserved

+

Reserved

+

Reserved

+

HEADER

+

Non-reserved

+

-

+

-

+

HIERARCHY

+

-

+

Non-reserved

+

-

+

HOLD

+

Non-reserved

+

Non-reserved

+

-

+

HOST

+

-

+

Reserved

+

-

+

HOUR

+

Non-reserved

+

Reserved

+

Reserved

+

IDENTIFIED

+

Non-reserved

+

-

+

-

+

IDENTITY

+

Non-reserved

+

Reserved

+

Reserved

+

IF

+

Non-reserved (excluding functions and types)

+

-

+

-

+

IFNULL

+

Non-reserved (excluding functions and types)

+

-

+

-

+

IGNORE

+

-

+

Reserved

+

-

+

IGNORE_EXTRA_DATA

+

Non-reserved

+

-

+

-

+

ILIKE

+

Reserved (functions and types allowed)

+

-

+

-

+

IMMEDIATE

+

Non-reserved

+

Reserved

+

Reserved

+

IMMUTABLE

+

Non-reserved

+

-

+

-

+

IMPLEMENTATION

+

-

+

Non-reserved

+

-

+

IMPLICIT

+

Non-reserved

+

-

+

-

+

IN

+

Reserved

+

Reserved

+

Reserved

+

INCLUDING

+

Non-reserved

+

-

+

-

+

INCREMENT

+

Non-reserved

+

-

+

-

+

INDEX

+

Non-reserved

+

-

+

-

+

INDEXES

+

Non-reserved

+

-

+

-

+

INDICATOR

+

-

+

Reserved

+

Reserved

+

INFIX

+

-

+

Non-reserved

+

-

+

INHERIT

+

Non-reserved

+

-

+

-

+

INHERITS

+

Non-reserved

+

-

+

-

+

INITIAL

+

Non-reserved

+

-

+

-

+

INITIALIZE

+

-

+

Reserved

+

-

+

INITIALLY

+

Reserved

+

Reserved

+

Reserved

+

INITRANS

+

Non-reserved

+

-

+

-

+

INLINE

+

Non-reserved

+

-

+

-

+

INNER

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

INOUT

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

INPUT

+

Non-reserved

+

Reserved

+

Reserved

+

INSENSITIVE

+

Non-reserved

+

Non-reserved

+

Reserved

+

INSERT

+

Non-reserved

+

Reserved

+

Reserved

+

INSTANCE

+

-

+

Non-reserved

+

-

+

INSTANTIABLE

+

-

+

Non-reserved

+

-

+

INSTEAD

+

Non-reserved

+

-

+

-

+

INT

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

INTEGER

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

INTERNAL

+

Reserved

+

-

+

-

+

INTERSECT

+

Reserved

+

Reserved

+

Reserved

+

INTERVAL

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

INTO

+

Reserved

+

Reserved

+

Reserved

+

INVOKER

+

Non-reserved

+

Non-reserved

+

-

+

IS

+

Reserved

+

Reserved

+

Reserved

+

ISNULL

+

Non-reserved (excluding functions and types)

+

-

+

-

+

ISOLATION

+

Non-reserved

+

Reserved

+

Reserved

+

ITERATE

+

-

+

Reserved

+

-

+

JOIN

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

K

+

-

+

Non-reserved

+

-

+

KEY

+

Non-reserved

+

Reserved

+

Reserved

+

KEY_MEMBER

+

-

+

Non-reserved

+

-

+

KEY_TYPE

+

-

+

Non-reserved

+

-

+

LABEL

+

Non-reserved

+

-

+

-

+

LANGUAGE

+

Non-reserved

+

Reserved

+

Reserved

+

LARGE

+

Non-reserved

+

Reserved

+

-

+

LAST

+

Non-reserved

+

Reserved

+

Reserved

+

LATERAL

+

-

+

Reserved

+

-

+

LC_COLLATE

+

Non-reserved

+

-

+

-

+

LC_CTYPE

+

Non-reserved

+

-

+

-

+

LEADING

+

Reserved

+

Reserved

+

Reserved

+

LEAKPROOF

+

Non-reserved

+

-

+

-

+

LEAST

+

Non-reserved (excluding functions and types)

+

-

+

-

+

LEFT

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

LENGTH

+

-

+

Non-reserved

+

Non-reserved

+

LESS

+

Reserved

+

Reserved

+

-

+

LEVEL

+

Non-reserved

+

Reserved

+

Reserved

+

LIKE

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

LIMIT

+

Reserved

+

Reserved

+

-

+

LISTEN

+

Non-reserved

+

-

+

-

+

LOAD

+

Non-reserved

+

-

+

-

+

LOCAL

+

Non-reserved

+

Reserved

+

Reserved

+

LOCALTIME

+

Reserved

+

Reserved

+

-

+

LOCALTIMESTAMP

+

Reserved

+

Reserved

+

-

+

LOCATION

+

Non-reserved

+

-

+

-

+

LOCATOR

+

-

+

Reserved

+

-

+

LOCK

+

Non-reserved

+

-

+

-

+

LOG

+

Non-reserved

+

-

+

-

+

LOGGING

+

Non-reserved

+

-

+

-

+

LOGIN

+

Non-reserved

+

-

+

-

+

LOOP

+

Non-reserved

+

-

+

-

+

LOWER

+

-

+

Non-reserved

+

Reserved

+

M

+

-

+

Non-reserved

+

-

+

MAP

+

-

+

Reserved

+

-

+

MAPPING

+

Non-reserved

+

-

+

-

+

MATCH

+

Non-reserved

+

Reserved

+

Reserved

+

MATCHED

+

Non-reserved

+

-

+

-

+

MAX

+

-

+

Non-reserved

+

Reserved

+

MAXEXTENTS

+

Non-reserved

+

-

+

-

+

MAXSIZE

+

Non-reserved

+

-

+

-

+

MAXTRANS

+

Non-reserved

+

-

+

-

+

MAXVALUE

+

Reserved

+

-

+

-

+

MERGE

+

Non-reserved

+

-

+

-

+

MESSAGE_LENGTH

+

-

+

Non-reserved

+

Non-reserved

+

MESSAGE_OCTET_LENGTH

+

-

+

Non-reserved

+

Non-reserved

+

MESSAGE_TEXT

+

-

+

Non-reserved

+

Non-reserved

+

METHOD

+

-

+

Non-reserved

+

-

+

MIN

+

-

+

Non-reserved

+

Reserved

+

MINEXTENTS

+

Non-reserved

+

-

+

-

+

MINUS

+

Reserved

+

-

+

-

+

MINUTE

+

Non-reserved

+

Reserved

+

Reserved

+

MINVALUE

+

Non-reserved

+

-

+

-

+

MOD

+

-

+

Non-reserved

+

-

+

MODE

+

Non-reserved

+

-

+

-

+

MODIFIES

+

-

+

Reserved

+

-

+

MODIFY

+

Reserved

+

Reserved

+

-

+

MODULE

+

-

+

Reserved

+

Reserved

+

MONTH

+

Non-reserved

+

Reserved

+

Reserved

+

MORE

+

-

+

Non-reserved

+

Non-reserved

+

MOVE

+

Non-reserved

+

-

+

-

+

MOVEMENT

+

Non-reserved

+

-

+

-

+

MUMPS

+

-

+

Non-reserved

+

Non-reserved

+

NAME

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

NAMES

+

Non-reserved

+

Reserved

+

Reserved

+

NATIONAL

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

NATURAL

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

NCHAR

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

NCLOB

+

-

+

Reserved

+

-

+

NEW

+

-

+

Reserved

+

-

+

NEXT

+

Non-reserved

+

Reserved

+

Reserved

+

NLSSORT

+

Reserved

+

-

+

-

+

NO

+

Non-reserved

+

Reserved

+

Reserved

+

NOCOMPRESS

+

Non-reserved

+

-

+

-

+

NOCYCLE

+

Non-reserved

+

-

+

-

+

NODE

+

Non-reserved

+

-

+

-

+

NOLOGGING

+

Non-reserved

+

-

+

-

+

NOLOGIN

+

Non-reserved

+

-

+

-

+

NOMAXVALUE

+

Non-reserved

+

-

+

-

+

NOMINVALUE

+

Non-reserved

+

-

+

-

+

NONE

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

NOT

+

Reserved

+

Reserved

+

Reserved

+

NOTHING

+

Non-reserved

+

-

+

-

+

NOTIFY

+

Non-reserved

+

-

+

-

+

NOTNULL

+

Reserved (functions and types allowed)

+

-

+

-

+

NOWAIT

+

Non-reserved

+

-

+

-

+

NULL

+

Reserved

+

Reserved

+

Reserved

+

NULLABLE

+

-

+

Non-reserved

+

Non-reserved

+

NULLIF

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

NULLS

+

Non-reserved

+

-

+

-

+

NUMBER

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Non-reserved

+

NUMERIC

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

NUMSTR

+

Non-reserved

+

-

+

-

+

NVARCHAR2

+

Non-reserved (excluding functions and types)

+

-

+

-

+

NVL

+

Non-reserved (excluding functions and types)

+

-

+

-

+

OBJECT

+

Non-reserved

+

Reserved

+

-

+

OCTET_LENGTH

+

-

+

Non-reserved

+

Reserved

+

OF

+

Non-reserved

+

Reserved

+

Reserved

+

OFF

+

Non-reserved

+

Reserved

+

-

+

OFFSET

+

Reserved

+

-

+

-

+

OIDS

+

Non-reserved

+

-

+

-

+

OLD

+

-

+

Reserved

+

-

+

ON

+

Reserved

+

Reserved

+

Reserved

+

ONLY

+

Reserved

+

Reserved

+

Reserved

+

OPEN

+

-

+

Reserved

+

Reserved

+

OPERATION

+

-

+

Reserved

+

-

+

OPERATOR

+

Non-reserved

+

-

+

-

+

OPTIMIZATION

+

Non-reserved

+

-

+

-

+

OPTION

+

Non-reserved

+

Reserved

+

Reserved

+

OPTIONS

+

Non-reserved

+

Non-reserved

+

-

+

OR

+

Reserved

+

Reserved

+

Reserved

+

ORDER

+

Reserved

+

Reserved

+

Reserved

+

ORDINALITY

+

-

+

Reserved

+

-

+

OUT

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

OUTER

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

OUTPUT

+

-

+

Reserved

+

Reserved

+

OVER

+

Non-reserved

+

-

+

-

+

OVERLAPS

+

Reserved (functions and types allowed)

+

Non-reserved

+

Reserved

+

OVERLAY

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

-

+

OVERRIDING

+

-

+

Non-reserved

+

-

+

OWNED

+

Non-reserved

+

-

+

-

+

OWNER

+

Non-reserved

+

-

+

-

+

PACKAGE

+

Non-reserved

+

-

+

-

+

PAD

+

-

+

Reserved

+

Reserved

+

PARAMETER

+

-

+

Reserved

+

-

+

PARAMETERS

+

-

+

Reserved

+

-

+

PARAMETER_MODE

+

-

+

Non-reserved

+

-

+

PARAMETER_NAME

+

-

+

Non-reserved

+

-

+

PARAMETER_ORDINAL_POSITION

+

-

+

Non-reserved

+

-

+

PARAMETER_SPECIFIC_CATALOG

+

-

+

Non-reserved

+

-

+

PARAMETER_SPECIFIC_NAME

+

-

+

Non-reserved

+

-

+

PARAMETER_SPECIFIC_SCHEMA

+

-

+

Non-reserved

+

-

+

PARSER

+

Non-reserved

+

-

+

-

+

PARTIAL

+

Non-reserved

+

Reserved

+

Reserved

+

PARTITION

+

Non-reserved

+

-

+

-

+

PARTITIONS

+

Non-reserved

+

-

+

-

+

PASCAL

+

-

+

Non-reserved

+

Non-reserved

+

PASSING

+

Non-reserved

+

-

+

-

+

PASSWORD

+

Non-reserved

+

-

+

-

+

PATH

+

-

+

Reserved

+

-

+

PCTFREE

+

Non-reserved

+

-

+

-

+

PER

+

Non-reserved

+

-

+

-

+

PERM

+

Non-reserved

+

-

+

-

+

PERCENT

+

Non-reserved

+

-

+

-

+

PERFORMANCE

+

Reserved

+

-

+

-

+

PLACING

+

Reserved

+

-

+

-

+

PLAN

+

Reserved

+

-

+

-

+

PLANS

+

Non-reserved

+

-

+

-

+

PLI

+

-

+

Non-reserved

+

Non-reserved

+

POLICY

+

Non-reserved

+

-

+

-

+

POOL

+

Non-reserved

+

-

+

-

+

POSITION

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

POSTFIX

+

-

+

Reserved

+

-

+

PRECEDING

+

Non-reserved

+

-

+

-

+

PRECISION

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

PREFERRED

+

Non-reserved

+

-

+

-

+

PREFIX

+

Non-reserved

+

Reserved

+

-

+

PREORDER

+

-

+

Reserved

+

-

+

PREPARE

+

Non-reserved

+

Reserved

+

Reserved

+

PREPARED

+

Non-reserved

+

-

+

-

+

PRESERVE

+

Non-reserved

+

Reserved

+

Reserved

+

PRIMARY

+

Reserved

+

Reserved

+

Reserved

+

PRIOR

+

Non-reserved

+

Reserved

+

Reserved

+

PRIVATE

+

Non-reserved

+

-

+

-

+

PRIVILEGE

+

Non-reserved

+

-

+

-

+

PRIVILEGES

+

Non-reserved

+

Reserved

+

Reserved

+

PROCEDURAL

+

Non-reserved

+

-

+

-

+

PROCEDURE

+

Reserved

+

Reserved

+

Reserved

+

PROFILE

+

Non-reserved

+

-

+

-

+

PUBLIC

+

-

+

Reserved

+

Reserved

+

QUERY

+

Non-reserved

+

-

+

-

+

QUOTE

+

Non-reserved

+

-

+

-

+

RANGE

+

Non-reserved

+

-

+

-

+

RAW

+

Non-reserved

+

-

+

-

+

READ

+

Non-reserved

+

Reserved

+

Reserved

+

READS

+

-

+

Reserved

+

-

+

REAL

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

REASSIGN

+

Non-reserved

+

-

+

-

+

REBUILD

+

Non-reserved

+

-

+

-

+

RECHECK

+

Non-reserved

+

-

+

-

+

RECURSIVE

+

Non-reserved

+

Reserved

+

-

+

REF

+

Non-reserved

+

Reserved

+

-

+

REFRESH

+

Non-reserved

+

-

+

-

+

REFERENCES

+

Reserved

+

Reserved

+

Reserved

+

REFERENCING

+

-

+

Reserved

+

-

+

REINDEX

+

Non-reserved

+

-

+

-

+

REJECT

+

Reserved

+

-

+

-

+

RELATIVE

+

Non-reserved

+

Reserved

+

Reserved

+

RELEASE

+

Non-reserved

+

-

+

-

+

RELOPTIONS

+

Non-reserved

+

-

+

-

+

REMOTE

+

Non-reserved

+

-

+

-

+

RENAME

+

Non-reserved

+

-

+

-

+

REPEATABLE

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

REPLACE

+

Non-reserved

+

-

+

-

+

REPLICA

+

Non-reserved

+

-

+

-

+

RESET

+

Non-reserved

+

-

+

-

+

RESIZE

+

Non-reserved

+

-

+

-

+

RESOURCE

+

Non-reserved

+

-

+

-

+

RESTART

+

Non-reserved

+

-

+

-

+

RESTRICT

+

Non-reserved

+

Reserved

+

Reserved

+

RESULT

+

-

+

Reserved

+

-

+

RETURN

+

Non-reserved

+

Reserved

+

-

+

RETURNED_LENGTH

+

-

+

Non-reserved

+

Non-reserved

+

RETURNED_OCTET_LENGTH

+

-

+

Non-reserved

+

Non-reserved

+

RETURNED_SQLSTATE

+

-

+

Non-reserved

+

Non-reserved

+

RETURNING

+

Reserved

+

-

+

-

+

RETURNS

+

Non-reserved

+

Reserved

+

-

+

REUSE

+

Non-reserved

+

-

+

-

+

REVOKE

+

Non-reserved

+

Reserved

+

Reserved

+

RIGHT

+

Reserved (functions and types allowed)

+

Reserved

+

Reserved

+

ROLE

+

Non-reserved

+

Reserved

+

-

+

ROLLBACK

+

Non-reserved

+

Reserved

+

Reserved

+

ROLLUP

+

-

+

Reserved

+

-

+

ROUTINE

+

-

+

Reserved

+

-

+

ROUTINE_CATALOG

+

-

+

Non-reserved

+

-

+

ROUTINE_NAME

+

-

+

Non-reserved

+

-

+

ROUTINE_SCHEMA

+

-

+

Non-reserved

+

-

+

ROW

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

ROWS

+

Non-reserved

+

Reserved

+

Reserved

+

ROW_COUNT

+

-

+

Non-reserved

+

Non-reserved

+

RULE

+

Non-reserved

+

-

+

-

+

SAVEPOINT

+

Non-reserved

+

Reserved

+

-

+

SCALE

+

-

+

Non-reserved

+

Non-reserved

+

SCHEMA

+

Non-reserved

+

Reserved

+

Reserved

+

SCHEMA_NAME

+

-

+

Non-reserved

+

Non-reserved

+

SCOPE

+

-

+

Reserved

+

-

+

SCROLL

+

Non-reserved

+

Reserved

+

Reserved

+

SEARCH

+

Non-reserved

+

Reserved

+

-

+

SECOND

+

Non-reserved

+

Reserved

+

Reserved

+

SECTION

+

-

+

Reserved

+

Reserved

+

SECURITY

+

Non-reserved

+

Non-reserved

+

-

+

SELECT

+

Reserved

+

Reserved

+

Reserved

+

SELF

+

-

+

Non-reserved

+

-

+

SENSITIVE

+

-

+

Non-reserved

+

-

+

SEQUENCE

+

Non-reserved

+

Reserved

+

-

+

SEQUENCES

+

Non-reserved

+

-

+

-

+

SERIALIZABLE

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

SERVER

+

Non-reserved

+

-

+

-

+

SERVER_NAME

+

-

+

Non-reserved

+

Non-reserved

+

SESSION

+

Non-reserved

+

Reserved

+

Reserved

+

SESSION_USER

+

Reserved

+

Reserved

+

Reserved

+

SET

+

Non-reserved

+

Reserved

+

Reserved

+

SETOF

+

Non-reserved (excluding functions and types)

+

-

+

-

+

SETS

+

-

+

Reserved

+

-

+

SHARE

+

Non-reserved

+

-

+

-

+

SHIPPABLE

+

Non-reserved

+

-

+

-

+

SHOW

+

Non-reserved

+

-

+

-

+

SIMILAR

+

Reserved (functions and types allowed)

+

Non-reserved

+

-

+

SIMPLE

+

Non-reserved

+

Non-reserved

+

-

+

SIZE

+

Non-reserved

+

Reserved

+

Reserved

+

SMALLDATETIME

+

Non-reserved (excluding functions and types)

+

-

+

-

+

SMALLDATETIME_FORMAT

+

Non-reserved

+

-

+

-

+

SMALLINT

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

SNAPSHOT

+

Non-reserved

+

-

+

-

+

SOME

+

Reserved

+

Reserved

+

Reserved

+

SOURCE

+

Non-reserved

+

Non-reserved

+

-

+

SPACE

+

-

+

Reserved

+

Reserved

+

SPECIFIC

+

-

+

Reserved

+

-

+

SPECIFICTYPE

+

-

+

Reserved

+

-

+

SPECIFIC_NAME

+

-

+

Non-reserved

+

-

+

SPILL

+

Non-reserved

+

-

+

-

+

SPLIT

+

Non-reserved

+

-

+

-

+

SQL

+

-

+

Reserved

+

Reserved

+

SQLCODE

+

-

+

-

+

Reserved

+

SQLERROR

+

-

+

-

+

Reserved

+

SQLEXCEPTION

+

-

+

Reserved

+

-

+

SQLSTATE

+

-

+

Reserved

+

Reserved

+

SQLWARNING

+

-

+

Reserved

+

-

+

STABLE

+

Non-reserved

+

-

+

-

+

STANDALONE

+

Non-reserved

+

-

+

-

+

START

+

Non-reserved

+

Reserved

+

-

+

STATE

+

-

+

Reserved

+

-

+

STATEMENT

+

Non-reserved

+

Reserved

+

-

+

STATEMENT_ID

+

Non-reserved

+

-

+

-

+

STATIC

+

-

+

Reserved

+

-

+

STATISTICS

+

Non-reserved

+

-

+

-

+

STDIN

+

Non-reserved

+

-

+

-

+

STDOUT

+

Non-reserved

+

-

+

-

+

STORAGE

+

Non-reserved

+

-

+

-

+

STORE

+

Non-reserved

+

-

+

-

+

STRICT

+

Non-reserved

+

-

+

-

+

STRIP

+

Non-reserved

+

-

+

-

+

STRUCTURE

+

-

+

Reserved

+

-

+

STYLE

+

-

+

Non-reserved

+

-

+

SUBCLASS_ORIGIN

+

-

+

Non-reserved

+

Non-reserved

+

SUBLIST

+

-

+

Non-reserved

+

-

+

SUBSTRING

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

SUM

+

-

+

Non-reserved

+

Reserved

+

SUPERUSER

+

Non-reserved

+

-

+

-

+

SYMMETRIC

+

Reserved

+

Non-reserved

+

-

+

SYNONYM

+

Non-reserved

+

-

+

-

+

SYS_REFCURSOR

+

Non-reserved

+

-

+

-

+

SYSDATE

+

Reserved

+

-

+

-

+

SYSID

+

Non-reserved

+

-

+

-

+

SYSTEM

+

Non-reserved

+

Non-reserved

+

-

+

SYSTEM_USER

+

-

+

Reserved

+

Reserved

+

TABLE

+

Reserved

+

Reserved

+

Reserved

+

TABLES

+

Non-reserved

+

-

+

-

+

TABLE_NAME

+

-

+

Non-reserved

+

Non-reserved

+

TEMP

+

Non-reserved

+

-

+

-

+

TEMPLATE

+

Non-reserved

+

-

+

-

+

TEMPORARY

+

Non-reserved

+

Reserved

+

Reserved

+

TERMINATE

+

-

+

Reserved

+

-

+

TEXT

+

Non-reserved

+

-

+

-

+

THAN

+

Non-reserved

+

Reserved

+

-

+

THEN

+

Reserved

+

Reserved

+

Reserved

+

TIME

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

TIME_FORMAT

+

Non-reserved

+

-

+

-

+

TIMESTAMP

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

TIMESTAMPDIFF

+

Non-reserved (excluding functions and types)

+

-

+

-

+

TIMESTAMP_FORMAT

+

Non-reserved

+

-

+

-

+

TIMEZONE_HOUR

+

-

+

Reserved

+

Reserved

+

TIMEZONE_MINUTE

+

-

+

Reserved

+

Reserved

+

TINYINT

+

Non-reserved (excluding functions and types)

+

-

+

-

+

TO

+

Reserved

+

Reserved

+

Reserved

+

TRAILING

+

Reserved

+

Reserved

+

Reserved

+

TRANSACTION

+

Non-reserved

+

Reserved

+

Reserved

+

TRANSACTIONS_COMMITTED

+

-

+

Non-reserved

+

-

+

TRANSACTIONS_ROLLED_BACK

+

-

+

Non-reserved

+

-

+

TRANSACTION_ACTIVE

+

-

+

Non-reserved

+

-

+

TRANSFORM

+

-

+

Non-reserved

+

-

+

TRANSFORMS

+

-

+

Non-reserved

+

-

+

TRANSLATE

+

-

+

Non-reserved

+

Reserved

+

TRANSLATION

+

-

+

Reserved

+

Reserved

+

TREAT

+

Non-reserved (excluding functions and types)

+

Reserved

+

-

+

TRIGGER

+

Non-reserved

+

Reserved

+

-

+

TRIGGER_CATALOG

+

-

+

Non-reserved

+

-

+

TRIGGER_NAME

+

-

+

Non-reserved

+

-

+

TRIGGER_SCHEMA

+

-

+

Non-reserved

+

-

+

TRIM

+

Non-reserved (excluding functions and types)

+

Non-reserved

+

Reserved

+

TRUE

+

Reserved

+

Reserved

+

Reserved

+

TRUNCATE

+

Non-reserved

+

-

+

-

+

TRUSTED

+

Non-reserved

+

-

+

-

+

TYPE

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

TYPES

+

Non-reserved

+

-

+

-

+

UESCAPE

+

-

+

-

+

-

+

UNBOUNDED

+

Non-reserved

+

-

+

-

+

UNCOMMITTED

+

Non-reserved

+

Non-reserved

+

Non-reserved

+

UNDER

+

-

+

Reserved

+

-

+

UNENCRYPTED

+

Non-reserved

+

-

+

-

+

UNION

+

Reserved

+

Reserved

+

Reserved

+

UNIQUE

+

Reserved

+

Reserved

+

Reserved

+

UNKNOWN

+

Non-reserved

+

Reserved

+

Reserved

+

UNLIMITED

+

Non-reserved

+

-

+

-

+

UNLISTEN

+

Non-reserved

+

-

+

-

+

UNLOCK

+

Non-reserved

+

-

+

-

+

UNLOGGED

+

Non-reserved

+

-

+

-

+

UNNAMED

+

-

+

Non-reserved

+

Non-reserved

+

UNNEST

+

-

+

Reserved

+

-

+

UNTIL

+

Non-reserved

+

-

+

-

+

UNUSABLE

+

Non-reserved

+

-

+

-

+

UPDATE

+

Non-reserved

+

Reserved

+

Reserved

+

UPPER

+

-

+

Non-reserved

+

Reserved

+

USAGE

+

-

+

Reserved

+

Reserved

+

USER

+

Reserved

+

Reserved

+

Reserved

+

USER_DEFINED_TYPE_CATALOG

+

-

+

Non-reserved

+

-

+

USER_DEFINED_TYPE_NAME

+

-

+

Non-reserved

+

-

+

USER_DEFINED_TYPE_SCHEMA

+

-

+

Non-reserved

+

-

+

USING

+

Reserved

+

Reserved

+

Reserved

+

VACUUM

+

Non-reserved

+

-

+

-

+

VALID

+

Non-reserved

+

-

+

-

+

VALIDATE

+

Non-reserved

+

-

+

-

+

VALIDATION

+

Non-reserved

+

-

+

-

+

VALIDATOR

+

Non-reserved

+

-

+

-

+

VALUE

+

Non-reserved

+

Reserved

+

Reserved

+

VALUES

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

VARCHAR

+

Non-reserved (excluding functions and types)

+

Reserved

+

Reserved

+

VARCHAR2

+

Non-reserved (excluding functions and types)

+

-

+

-

+

VARIABLE

+

-

+

Reserved

+

-

+

VARIADIC

+

Reserved

+

-

+

-

+

VARYING

+

Non-reserved

+

Reserved

+

Reserved

+

VCGROUP

+

Non-reserved

+

-

+

-

+

VERBOSE

+

Reserved (functions and types allowed)

+

-

+

-

+

VERIFY

+

Non-reserved

+

-

+

-

+

VERSION

+

Non-reserved

+

-

+

-

+

VIEW

+

Non-reserved

+

Reserved

+

Reserved

+

VOLATILE

+

Non-reserved

+

-

+

-

+

WHEN

+

Reserved

+

Reserved

+

Reserved

+

WHENEVER

+

-

+

Reserved

+

Reserved

+

WHERE

+

Reserved

+

Reserved

+

Reserved

+

WHITESPACE

+

Non-reserved

+

-

+

-

+

WINDOW

+

Reserved

+

-

+

-

+

WITH

+

Reserved

+

Reserved

+

Reserved

+

WITHIN

+

Non-reserved

+

-

+

-

+

WITHOUT

+

Non-reserved

+

Reserved

+

-

+

WORK

+

Non-reserved

+

Reserved

+

Reserved

+

WORKLOAD

+

Non-reserved

+

-

+

-

+

WRAPPER

+

Non-reserved

+

-

+

-

+

WRITE

+

Non-reserved

+

Reserved

+

Reserved

+

XML

+

Non-reserved

+

-

+

-

+

XMLATTRIBUTES

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLCONCAT

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLELEMENT

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLEXISTS

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLFOREST

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLNAMESPACES

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLPARSE

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLPI

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLROOT

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLSERIALIZE

+

Non-reserved (excluding functions and types)

+

-

+

-

+

XMLTABLE

+

Non-reserved (excluding functions and types)

+

-

+

-

+

YEAR

+

Non-reserved

+

Reserved

+

Reserved

+

YES

+

Non-reserved

+

-

+

-

+

ZONE

+

Non-reserved

+

Reserved

+

Reserved

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0008.html b/docs/dws/dev/dws_06_0008.html new file mode 100644 index 00000000..e85042c8 --- /dev/null +++ b/docs/dws/dev/dws_06_0008.html @@ -0,0 +1,47 @@ + + +

Data Types

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0009.html b/docs/dws/dev/dws_06_0009.html new file mode 100644 index 00000000..8df6533e --- /dev/null +++ b/docs/dws/dev/dws_06_0009.html @@ -0,0 +1,367 @@ + + +

Numeric Types

+

Numeric types consist of two-, four-, and eight-byte integers, four- and eight-byte floating-point numbers, and selectable-precision decimals.

+

For details about numeric operators and functions, see Mathematical Functions and Operators.

+

GaussDB(DWS) supports integers, arbitrary precision numbers, floating point types, and serial integers.

+

Integer Types

The types TINYINT, SMALLINT, INTEGER, BINARY_INTEGER, and BIGINT store whole numbers, that is, numbers without fractional components, of various ranges. Saving a number with a decimal in any of the data types will result in errors.

+
The type INTEGER is the common choice. Generally, use the SMALLINT type only if you are sure that the value range is within the SMALLINT value range. The storage speed of INTEGER is much faster. BIGINT is used only when the range of INTEGER is not large enough. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Integer types

Column

+

Description

+

Storage Space

+

Range

+

TINYINT

+

Tiny integer, also called INT1

+

1 byte

+

0 ~ 255

+

SMALLINT

+

Small integer, also called INT2

+

2 bytes

+

-32,768 ~ +32,767

+

INTEGER

+

Typical choice for integer, also called INT4

+

4 bytes

+

-2,147,483,648 ~ +2,147,483,647

+

BINARY_INTEGER

+

INTEGER alias, compatible with Oracle

+

4 bytes

+

-2,147,483,648 ~ +2,147,483,647

+

BIGINT

+

Big integer, also called INT8

+

8 bytes

+

-9,223,372,036,854,775,808 ~ 9,223,372,036,854,775,807

+
+
+
+

Examples:

+
Create a table containing TINYINT, INTEGER, and BIGINT data.
1
+2
+3
+4
+5
+6
+7
CREATE TABLE int_type_t1 
+(
+    a TINYINT, 
+    b TINYINT,
+    c INTEGER,
+    d BIGINT
+);
+
+ +
+
+

Insert data.

+
1
INSERT INTO int_type_t1 VALUES(100, 10, 1000, 10000);
+
+ +
+

View data.

+
1
+2
+3
+4
+5
SELECT * FROM int_type_t1;
+  a  | b  |  c   |   d   
+-----+----+------+-------
+ 100 | 10 | 1000 | 10000
+(1 row)
+
+ +
+
+

Arbitrary Precision Types

The type NUMBER can store numbers with a very large number of digits. It is especially recommended for storing monetary amounts and other quantities where exactness is required. The arbitrary precision numbers require larger storage space and have lower storage efficiency, operation efficiency, and poorer compression ratio results than integer types.

+

The scale of a NUMBER value is the count of decimal digits in the fractional part, to the right of the decimal point. The precision of a NUMBER value is the total count of significant digits in the whole number, that is, the number of digits to both sides of the decimal point. So the number 23.5141 has a precision of 6 and a scale of 4. Integers can be considered to have a scale of zero.

+

To configure a numeric or decimal column, you are advised to specify both the maximum precision (p) and the maximum scale (s) of the column.

+

If the precision or scale of a value is greater than the declared scale of the column, the system will round the value to the specified number of fractional digits. Then, if the number of digits to the left of the decimal point exceeds the declared precision minus the declared scale, an error will be reported.

+ +
+ + + + + + + + + + + + + + + + +
Table 2 Any-precision types

Column

+

Description

+

Storage Space

+

Range

+

NUMERIC[(p[,s])],

+

DECIMAL[(p[,s])]

+

The value range of p (precision) is [1,1000], and the value range of s (standard) is [0,p].

+

The precision is specified by users. Every four decimal digits occupy two bytes, and an extra eight-byte overhead is added to the entire data.

+

Up to 131,072 digits before the decimal point; and up to 16,383 digits after the decimal point when no precision is specified

+

NUMBER[(p[,s])]

+

Alias for type NUMERIC, compatible with Oracle

+

The precision is specified by users. Every four decimal digits occupy two bytes, and an extra eight-byte overhead is added to the entire data.

+

Up to 131,072 digits before the decimal point; and up to 16,383 digits after the decimal point when no precision is specified

+
+
+

Examples:

+

Create a table with DECIMAL values.

+
1
CREATE TABLE decimal_type_t1 (DT_COL1 DECIMAL(10,4));
+
+ +
+

Insert data.

+
1
INSERT INTO decimal_type_t1 VALUES(123456.122331);
+
+ +
+

View data.

+
1
+2
+3
+4
+5
SELECT * FROM decimal_type_t1;
+   dt_col1   
+-------------
+ 123456.1223
+(1 row)
+
+ +
+
+

Floating-Point Types

The floating-point type is an inexact, variable-precision numeric type. This type is an implementation of IEEE Standard 754 for Binary Floating-Point Arithmetic (single and double precision, respectively), to the extent that the underlying processor, OS, and compiler support it.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Floating point types

Column

+

Description

+

Storage Space

+

Range

+

REAL,

+

FLOAT4

+

Single precision floating points, inexact

+

4 bytes

+

Six bytes of decimal digits

+

DOUBLE PRECISION,

+

FLOAT8

+

Double precision floating points, inexact

+

8 bytes

+

1E-307~1E+308,

+

15 bytes of decimal digits

+

FLOAT[(p)]

+

Floating points, inexact. The value range of precision (p) is [1,53].

+
NOTE:

p is the precision, indicating the total decimal digits.

+
+

4 or 8 bytes

+

REAL or DOUBLE PRECISION is selected as an internal identifier based on different precision (p). If no precision is specified, DOUBLE PRECISION is used as the internal identifier.

+

BINARY_DOUBLE

+

DOUBLE PRECISION alias, compatible with Oracle

+

8 bytes

+

1E-307~1E+308,

+

15 bytes of decimal digits

+

DEC[(p[,s])]

+

The value range of p (precision) is [1,1000], and the value range of s (standard) is [0,p].

+
NOTE:

p indicates the total digits, and s indicates the decimal digit.

+
+

The precision is specified by users. Every four decimal digits occupy two bytes, and an extra eight-byte overhead is added to the entire data.

+

Up to 131,072 digits before the decimal point; and up to 16,383 digits after the decimal point when no precision is specified

+

INTEGER[(p[,s])]

+

The value range of p (precision) is [1,1000], and the value range of s (standard) is [0,p].

+

The precision is specified by users. Every four decimal digits occupy two bytes, and an extra eight-byte overhead is added to the entire data.

+

Up to 131,072 digits before the decimal point; and up to 16,383 digits after the decimal point when no precision is specified

+
+
+

Examples:

+

Create a table with floating-point values.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE TABLE float_type_t2 
+(
+    FT_COL1 INTEGER,
+    FT_COL2 FLOAT4,
+    FT_COL3 FLOAT8,
+    FT_COL4 FLOAT(3),
+    FT_COL5 BINARY_DOUBLE,
+    FT_COL6 DECIMAL(10,4),
+    FT_COL7 INTEGER(6,3)
+) DISTRIBUTE BY HASH ( ft_col1);
+
+ +
+

Insert data.

+
1
INSERT INTO float_type_t2 VALUES(10,10.365456,123456.1234,10.3214, 321.321, 123.123654, 123.123654);
+
+ +
+

View data.

+
1
+2
+3
+4
+5
SELECT * FROM float_type_t2;
+ ft_col1 | ft_col2 |   ft_col3   | ft_col4 | ft_col5 | ft_col6  | ft_col7 
+---------+---------+-------------+---------+---------+----------+---------
+      10 | 10.3655 | 123456.1234 | 10.3214 | 321.321 | 123.1237 | 123.124
+(1 row)
+
+ +
+
+

Serial Integers

SMALLSERIAL, SERIAL, and BIGSERIAL are not true types, but merely a notational convenience for creating unique identifier columns. Therefore, an integer column is created and its default value plans to be read from a sequencer. A NOT NULL constraint is used to ensure NULL is not inserted. In most cases you would also want to attach a UNIQUE or PRIMARY KEY constraint to prevent duplicate values from being inserted unexpectedly. Lastly, the sequence is marked as "owned by" the column, so that it will be dropped if the column or table is dropped. Currently, the SERIAL column can be specified only when you create a table. You cannot add the SERIAL column in an existing table. In addition, SERIAL columns cannot be created in temporary tables. Because SERIAL is not a data type, columns cannot be converted to this type.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 Sequence integer

Column

+

Description

+

Storage Space

+

Range

+

SMALLSERIAL

+

Two-byte auto-incrementing integer

+

2 bytes

+

1 ~ 32,767

+

SERIAL

+

Four-byte auto-incrementing integer

+

4 bytes

+

1 ~ 2,147,483,647

+

BIGSERIAL

+

Eight-byte auto-incrementing integer

+

8 bytes

+

1 ~ 9,223,372,036,854,775,807

+
+
+

Examples:

+

Create a table with serial values.

+
1
CREATE TABLE smallserial_type_tab(a SMALLSERIAL);
+
+ +
+

Insert data.

+
1
INSERT INTO smallserial_type_tab VALUES(default);
+
+ +
+

Insert data again.

+
1
INSERT INTO smallserial_type_tab VALUES(default);
+
+ +
+

View data.

+
1
+2
+3
+4
+5
+6
SELECT * FROM smallserial_type_tab;  
+ a 
+---
+ 1
+ 2
+(2 rows)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0010.html b/docs/dws/dev/dws_06_0010.html new file mode 100644 index 00000000..3b9dff5d --- /dev/null +++ b/docs/dws/dev/dws_06_0010.html @@ -0,0 +1,46 @@ + + +

Monetary Types

+

The money type stores a currency amount with fixed fractional precision. The range shown in Table 1 assumes there are two fractional digits. Input is accepted in a variety of formats, including integer and floating-point literals, as well as typical currency formatting, such as $1,000.00. Output is generally in the latter form but depends on the locale.

+ +
+ + + + + + + + + + + +
Table 1 Monetary types

Name

+

Storage Size

+

Description

+

Range

+

money

+

8 bytes

+

Currency amount

+

-92233720368547758.08 to +92233720368547758.07

+
+
+

Values of the numeric, int, and bigint data types can be cast to money. Conversion from the real and double precision data types can be done by casting to numeric first, for example:

+
1
SELECT '12.34'::float8::numeric::money;
+
+ +
+

However, this is not recommended. Floating point numbers should not be used to handle money due to the potential for rounding errors.

+

A money value can be cast to numeric without loss of precision. Conversion to other types could potentially lose precision, and must also be done in two stages:

+
1
SELECT '52093.89'::money::numeric::float8;
+
+ +
+

When a money value is divided by another money value, the result is double precision (that is, a pure number, not money); the currency units cancel each other out in the division.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0011.html b/docs/dws/dev/dws_06_0011.html new file mode 100644 index 00000000..2c13d16f --- /dev/null +++ b/docs/dws/dev/dws_06_0011.html @@ -0,0 +1,98 @@ + + +

Boolean Type

+
+
+ + + + + + + + + + + +
Table 1 Boolean type

Name

+

Description

+

Storage Space

+

Value

+

BOOLEAN

+

Boolean type

+

1 byte

+
  • true
  • false
  • null (unknown)
+
+
+

Valid literal values for the "true" state are:

+

TRUE, 't', 'true', 'y', 'yes', '1'

+

Valid literal values for the "false" state include:

+

FALSE, 'f', 'false', 'n', 'no', '0'

+

TRUE and FALSE are standard expressions, compatible with SQL statements.

+

Examples

Data type boolean is displayed with letters t and f.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
-- Create a table:
+CREATE TABLE bool_type_t1  
+(
+    BT_COL1 BOOLEAN,
+    BT_COL2 TEXT
+) DISTRIBUTE BY HASH(BT_COL2);
+
+--Insert data:
+INSERT INTO bool_type_t1 VALUES (TRUE, 'sic est');
+
+INSERT INTO bool_type_t1 VALUES (FALSE, 'non est');
+
+-- View data:
+SELECT * FROM bool_type_t1;
+ bt_col1 | bt_col2 
+---------+---------
+ t       | sic est
+ f       | non est
+(2 rows)
+
+SELECT * FROM bool_type_t1 WHERE bt_col1 = 't';
+ bt_col1 | bt_col2 
+---------+---------
+ t       | sic est
+(1 row)
+
+-- Delete the tables:
+DROP TABLE bool_type_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0012.html b/docs/dws/dev/dws_06_0012.html new file mode 100644 index 00000000..8e0d09bb --- /dev/null +++ b/docs/dws/dev/dws_06_0012.html @@ -0,0 +1,249 @@ + + +

Character Types

+

Table 1 lists the character types that can be used in GaussDB(DWS). For string operators and related built-in functions, see Character Processing Functions and Operators.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Character types

Name

+

Description

+

Length

+

Storage Space

+

CHAR(n)

+

CHARACTER(n)

+

NCHAR(n)

+

Fixed-length character string. If the length is not reached, fill in spaces.

+

n indicates the string length. If it is not specified, the default precision 1 is used. The value of n is less than 10485761.

+

The maximum size is 10 MB.

+

VARCHAR(n)

+

CHARACTER VARYING(n)

+

Variable-length string.

+

n indicates the byte length. The value of n is less than 10485761.

+

The maximum size is 10 MB.

+

VARCHAR2(n)

+

Variable-length string. It is an alias for VARCHAR(n) type, compatible with Oracle.

+

n indicates the byte length. The value of n is less than 10485761.

+

The maximum size is 10 MB.

+

NVARCHAR2(n)

+

Variable-length string.

+

n indicates the string length. The value of n is less than 10485761.

+

The maximum size is 10 MB.

+

CLOB

+

Variable-length string. A big text object. It is an alias for TEXT type, compatible with Oracle.

+

-

+

The maximum size is 1,073,733,621 bytes (1 GB - 8203 bytes).

+

TEXT

+

Variable-length string.

+

-

+

The maximum size is 1,073,733,621 bytes (1 GB - 8203 bytes).

+
+
+
  • In addition to the size limitation on each column, the total size of each tuple is 1,073,733,621 bytes (1 GB – 8023 bytes).
  • For string data, you are advised to use variable-length strings and specify the maximum length. To avoid truncation, ensure that the specified maximum length is greater than the maximum number of characters to be stored. You are not advised to use fixed-length character types such as CHAR(n), NCHAR(n), and CHARACTER(n) unless you know that the data type is a fixed-length character string.
+
+

GaussDB(DWS) has two other fixed-length character types, as listed in Table 2.

+

The name type is used only in the internal system catalog as the storage identifier. The length of this type is 64 bytes (63 characters plus the terminator). This data type is not recommended for common users. When the name type is aligned with other data types (for example, in multiple branches of case when, one branch returns the name type and other branches return the text type), the name type may be aligned but characters may be truncated. If you do not want to have 64-bit truncated characters, you need to forcibly convert the name type to the text type.

+

The type "char" only uses one byte of storage. It is internally used in the system catalogs as a simplistic enumeration type.

+ +
+ + + + + + + + + + + + + +
Table 2 Special character types

Name

+

Description

+

Storage Space

+

name

+

Internal type for object names

+

64 bytes

+

"char"

+

Single-byte internal type

+

1 byte

+
+
+

Length

If a field is defined as char(n) or varchar(n). n indicates the maximum length. Regardless of the type, the length can not exceed 10485760 bytes (10 MB).

+

When the data length exceeds the specified length n, the error "value too long" is reported. Of course, you can also specify to automatically truncate the data that exceeds the length.

+

Example:

+
  1. Create table t1 and specify the character type of its columns.
    1
    CREATE TABLE t1 (a char(5),b varchar(5));
    +
    + +
    +
  2. An error is reported when the length of data inserted into the table t1 exceeds the specified length.
    1
    +2
    +3
    INSERT INTO t1 VALUES('bookstore','123');
    +ERROR:  value too long for type character(5)
    +CONTEXT:  referenced column: a
    +
    + +
    +
  3. Insert data into table t1 and specify that the data is automatically truncated when the length exceeds the specified bytes.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    INSERT INTO t1 VALUES('bookstore'::char(5),'12345678'::varchar(5));
    +INSERT 0 1
    +
    +SELECT a,b FROM t1;
    +   a   |   b
    +-------+-------
    + books | 12345
    +(1 row)
    +
    + +
    +
+
+

Fixed Length and Variable Length

All character types can be classified into fixed-length strings and variable-length strings.

+ +

Example:

+
  1. Create table t2 and specify the character type of its columns.
    1
    CREATE TABLE t2 (a char(5),b varchar(5));
    +
    + +
    +
  2. Insert data into table t2 and query the byte length of column a. During table creation, the character type of column a is specified as char(5) and fixed-length. If the data length does not reach 5 bytes, spaces are added. Therefore, the queried data length is 5.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    INSERT INTO t2 VALUES('abc','abc');
    +INSERT 0 1
    +
    +SELECT a,lengthb(a),b FROM t2;
    +   a   | lengthb |  b
    +-------+---------+-----
    + abc   |       5 | abc
    +(1 row)
    +
    + +
    +
  3. After the conversion by using the function, the actual queried length of the column a is 3 bytes.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    SELECT a = b from t2;
    + ?column?
    +----------
    + t
    +(1 row)
    +
    +SELECT cast(a as text) as val,lengthb(val) FROM t2;
    + val | lengthb
    +-----+---------
    + abc |       3
    +(1 row)
    +
    + +
    +
+
+

Bytes and Characters

n means differently in VARCHAR2(n) and NVARCHAR2(n).

+ +
+

Empty Strings and NULL

In Oracle compatibility mode, empty strings and NULL are not distinguished. When a statement is executed to query or import data, empty strings are processed as NULL.

+

As such, = " cannot be used as the query condition, and so does is ''. Otherwise, no result set is returned. The correct usage is is null, or is not null.

+

Example:

+
  1. Create table t4 and specify the character type of its columns.
    1
    CREATE TABLE t4 (a text);
    +
    + +
    +
  2. Insert data into table t4. The inserted value contains an empty string and NULL.
    1
    +2
    INSERT INTO t4 VALUES('abc'),(''),(null);
    +INSERT 0 3
    +
    + +
    +
  3. Check whether t4 contains null values.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    SELECT a,a isnull FROM t4;
    +  a  | ?column?
    +-----+----------
    +     | t
    +     | t
    + abc | f
    +(3 rows)
    +
    +SELECT a,a isnull FROM t4 WHERE a is null;
    + a | ?column?
    +---+----------
    +   | t
    +   | t
    +(2 rows)
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0013.html b/docs/dws/dev/dws_06_0013.html new file mode 100644 index 00000000..74d4f8c1 --- /dev/null +++ b/docs/dws/dev/dws_06_0013.html @@ -0,0 +1,100 @@ + + +

Binary Data Types

+

Table 1 lists the binary data types that can be used in GaussDB(DWS).

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 Binary Data Types

Name

+

Description

+

Storage Space

+

BLOB

+

Binary large object.

+

Currently, BLOB only supports the following external access interfaces:

+
  • DBMS_LOB.GETLENGTH
  • DBMS_LOB.READ
  • DBMS_LOB.WRITE
  • DBMS_LOB.WRITEAPPEND
  • DBMS_LOB.COPY
  • DBMS_LOB.ERASE
+

For details about the interfaces, see DBMS_LOB.

+
NOTE:

Column storage cannot be used for the BLOB type.

+
+

The maximum size is 10,7373,3621 bytes (1 GB - 8203 bytes).

+

RAW

+

Variable-length hexadecimal string

+
NOTE:

Column storage cannot be used for the raw type.

+
+

4 bytes plus the actual hexadecimal string. The maximum size is 10,7373,3621 bytes (1 GB - 8203 bytes).

+

BYTEA

+

Variable-length binary string

+

4 bytes plus the actual binary string. The maximum size is 10,7373,3621 bytes (1 GB - 8203 bytes).

+
+
+

In addition to the size limitation on each column, the total size of each tuple is 8203 bytes less than 1 GB.

+
+

Examples

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
-- Create a table:
+CREATE TABLE blob_type_t1 
+(
+    BT_COL1 INTEGER,
+    BT_COL2 BLOB,
+    BT_COL3 RAW,
+    BT_COL4 BYTEA
+) DISTRIBUTE BY REPLICATION;
+
+--Insert data:
+INSERT INTO blob_type_t1 VALUES(10,empty_blob(),
+HEXTORAW('DEADBEEF'),E'\\xDEADBEEF');
+
+-- Query data in the table:
+SELECT * FROM blob_type_t1;
+ bt_col1 | bt_col2 | bt_col3  |  bt_col4   
+---------+---------+----------+------------
+      10 |         | DEADBEEF | \xdeadbeef
+(1 row)
+
+-- Delete the tables:
+DROP TABLE blob_type_t1;
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0014.html b/docs/dws/dev/dws_06_0014.html new file mode 100644 index 00000000..3cd0da14 --- /dev/null +++ b/docs/dws/dev/dws_06_0014.html @@ -0,0 +1,732 @@ + + +

Date/Time Types

+

Table 1 lists date and time types supported by GaussDB(DWS). For the operators and built-in functions of the types, see Date and Time Processing Functions and Operators.

+

If the time format of another database is different from that of GaussDB(DWS), modify the value of the DateStyle parameter to keep them consistent.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Date/Time types

Name

+

Description

+

Storage Space

+

DATE

+

In Oracle compatibility mode, it is equivalent to timestamp(0) and records the date and time.

+

In other modes, it records the date.

+

In Oracle compatibility mode, it occupies 8 bytes.

+

In Oracle compatibility mode, it occupies 4 bytes.

+

TIME [(p)] [WITHOUT TIME ZONE]

+

Specifies the time of day (no date).

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

TIME [(p)] [WITH TIME ZONE]

+

Specifies time within one day (with time zone).

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

12 bytes

+

TIMESTAMP[(p)] [WITHOUT TIME ZONE]

+

Specifies the date and time.

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

TIMESTAMP[(p)][WITH TIME ZONE]

+

Specifies the date and time (with time zone). TIMESTAMP is also called TIMESTAMPTZ.

+

p indicates the precision after the decimal point. The value ranges from 0 to 6.

+

8 bytes

+

SMALLDATETIME

+

Specifies the date and time (without time zone).

+

The precision level is minute. 31s to 59s are rounded into 1 minute.

+

8 bytes

+

INTERVAL DAY (l) TO SECOND (p)

+

Specifies the time interval (X days X hours X minutes X seconds).

+
  • l: indicates the precision of days. The value ranges from 0 to 6. To adapt to Oracle syntax, the precision functions are not supported.
  • p: indicates the precision of seconds. The value ranges from 0 to 6. The digit 0 at the end of a decimal number is not displayed.
+

16 bytes

+

INTERVAL [FIELDS] [ (p) ]

+

Specifies the time interval.

+
  • fields: YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, DAY TO HOUR, DAY TO MINUTE, DAY TO SECOND, HOUR TO MINUTE, HOUR TO SECOND, and MINUTE TO SECOND.
+
  • p: indicates the precision of seconds. The value ranges from 0 to 6. p takes effect only when fields are SECOND, DAY TO SECOND, HOUR TO SECOND, or MINUTE TO SECOND. The digit 0 at the end of a decimal number is not displayed.
+

12 bytes

+

reltime

+

Relative time interval. The format is:

+

X years X months X days XX:XX:XX

+
  • The Julian calendar is used. It specifies that a year has 365.25 days and a month has 30 days. The relative time interval needs to be calculated based on the input value. The output format is POSTGRES.
+

4 bytes

+
+
+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
--Create a table:
+CREATE TABLE date_type_tab(coll date);
+
+--Insert data:
+INSERT INTO date_type_tab VALUES (date '12-10-2010');
+
+-- View data:
+SELECT * FROM date_type_tab;
+        coll         
+---------------------
+ 2010-12-10 00:00:00
+(1 row)
+
+-- Delete the tables:
+DROP TABLE date_type_tab;
+
+--Create a table:
+CREATE TABLE time_type_tab (da time without time zone ,dai time with time zone,dfgh timestamp without time zone,dfga timestamp with time zone, vbg smalldatetime);
+
+--Insert data:
+INSERT INTO time_type_tab VALUES ('21:21:21','21:21:21 pst','2010-12-12','2013-12-11 pst','2003-04-12 04:05:06');
+
+-- View data:
+SELECT * FROM time_type_tab;
+    da    |     dai     |        dfgh         |          dfga          |         vbg         
+----------+-------------+---------------------+------------------------+---------------------
+ 21:21:21 | 21:21:21-08 | 2010-12-12 00:00:00 | 2013-12-11 16:00:00+08 | 2003-04-12 04:05:00
+(1 row)
+
+-- Delete the tables:
+DROP TABLE time_type_tab;
+
+--Create a table:
+CREATE TABLE day_type_tab (a int,b INTERVAL DAY(3) TO SECOND (4)); 
+
+--Insert data:
+INSERT INTO day_type_tab VALUES (1, INTERVAL '3' DAY);
+
+-- View data:
+SELECT * FROM day_type_tab;
+ a |   b    
+---+--------
+ 1 | 3 days
+(1 row)
+
+-- Delete the tables:
+DROP TABLE day_type_tab;
+
+--Create a table:
+CREATE TABLE year_type_tab(a int, b interval year (6));
+
+--Insert data:
+INSERT INTO year_type_tab VALUES(1,interval '2' year);
+
+-- View data:
+SELECT * FROM year_type_tab;
+ a |    b    
+---+---------
+ 1 | 2 years
+(1 row)
+
+-- Delete the tables:
+DROP TABLE year_type_tab;
+
+ +
+

Date Input

Date and time input is accepted in almost any reasonable formats, including ISO 8601, SQL-compatible, and traditional POSTGRES. The system allows you to customize the sequence of day, month, and year in the date input. Set the DateStyle parameter to MDY to select month-day-year interpretation, DMY to select day-month-year interpretation, or YMD to select year-month-day interpretation.

+

Remember that any date or time literal input needs to be enclosed with single quotes, and the syntax is as follows:

+

type [ ( p ) ] 'value'

+

The p that can be selected in the precision statement is an integer, indicating the number of fractional digits in the seconds column. Table 2 shows some possible inputs for the date type.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Date input

Example

+

Description

+

1999-01-08

+

ISO 8601 (recommended format). January 8, 1999 in any mode

+

January 8, 1999

+

Unambiguous in any date input mode

+

1/8/1999

+

January 8 in MDY mode. August 1 in DMY mode

+

1/18/1999

+

January 18 in MDY mode, rejected in other modes

+

01/02/03

+
  • January 2, 2003 in MDY mode
  • February 1, 2003 in DMY mode
  • February 3, 2001 in YMD mode
+

1999-Jan-08

+

January 8 in any mode

+

Jan-08-1999

+

January 8 in any mode

+

08-Jan-1999

+

January 8 in any mode

+

99-Jan-08

+

January 8 in YMD mode, else error

+

08-Jan-99

+

January 8, except error in YMD mode

+

Jan-08-99

+

January 8, except error in YMD mode

+

19990108

+

ISO 8601. January 8, 1999 in any mode

+

990108

+

ISO 8601. January 8, 1999 in any mode

+

1999.008

+

Year and day of year

+

J2451187

+

Julian date

+

January 8, 99 BC

+

Year 99 BC

+
+
+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
--Create a table:
+CREATE TABLE date_type_tab(coll date);
+
+--Insert data:
+INSERT INTO date_type_tab VALUES (date '12-10-2010');
+
+-- View data:
+SELECT * FROM date_type_tab;
+        coll         
+---------------------
+ 2010-12-10 00:00:00
+(1 row)
+
+-- View the date format:
+SHOW datestyle;
+ DateStyle 
+-----------
+ ISO, MDY
+(1 row)
+
+-- Configure the date format:
+SET datestyle='YMD';
+SET
+
+-- Insert data:
+INSERT INTO date_type_tab VALUES(date '2010-12-11');
+
+-- View data:
+SELECT * FROM date_type_tab;
+        coll         
+---------------------
+ 2010-12-10 00:00:00
+ 2010-12-11 00:00:00
+(2 rows)
+
+-- Delete the tables:
+DROP TABLE date_type_tab;
+
+ +
+
+

Times

The time-of-day types are TIME [(p)] [WITHOUT TIME ZONE] and TIME [(p)] [WITH TIME ZONE]. TIME alone is equivalent to TIME WITHOUT TIME ZONE.

+

If a time zone is specified in the input for TIME WITHOUT TIME ZONE, it is silently ignored.

+

For details about the time input types, see Table 3. For details about time zone input types, see Table 4.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Time input

Example

+

Description

+

05:06.8

+

ISO 8601

+

4:05:06

+

ISO 8601

+

4:05

+

ISO 8601

+

40506

+

ISO 8601

+

4:05 AM

+

Same as 04:05. AM does not affect value

+

4:05 PM

+

Same as 16:05. Input hour must be <= 12

+

04:05:06.789-8

+

ISO 8601

+

04:05:06-08:00

+

ISO 8601

+

04:05-08:00

+

ISO 8601

+

040506-08

+

ISO 8601

+

04:05:06 PST

+

Time zone specified by abbreviation

+

2003-04-12 04:05:06 America/New_York

+

Time zone specified by full name

+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
Table 4 Time zone input

Example

+

Description

+

PST

+

Abbreviation (for Pacific Standard Time)

+

America/New_York

+

Full time zone name

+

-8:00

+

ISO-8601 offset for PST

+

-800

+

ISO-8601 offset for PST

+

-8

+

ISO-8601 offset for PST

+
+
+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
SELECT time '04:05:06';
+   time   
+----------
+ 04:05:06
+(1 row)
+
+SELECT time '04:05:06 PST';
+   time   
+----------
+ 04:05:06
+(1 row)
+
+SELECT time with time zone '04:05:06 PST';
+   timetz    
+-------------
+ 04:05:06-08
+(1 row)
+
+ +
+
+

Special Values

The special values supported by GaussDB(DWS) are converted to common date/time values when being read. For details, see Table 5.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Special Values

Input String

+

Applicable Type

+

Description

+

epoch

+

date, timestamp

+

1970-01-01 00:00:00+00 (Unix system time zero)

+

infinity

+

timestamp

+

Later than any other timestamps

+

-infinity

+

timestamp

+

Earlier than any other timestamps

+

now

+

date, time, timestamp

+

Start time of the current transaction

+

today

+

date, timestamp

+

Today midnight

+

tomorrow

+

date, timestamp

+

Tomorrow midnight

+

yesterday

+

date, timestamp

+

Yesterday midnight

+

allballs

+

time

+

00:00:00.00 UTC

+
+
+
+

Interval Input

The input of reltime can be any valid interval in TEXT format. It can be a number (negative numbers and decimals are also allowed) or a specific time, which must be in SQL standard format, ISO-8601 format, or POSTGRES format. In addition, the text input needs to be enclosed with single quotation marks ('').

+

For details, see Table 6.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Interval input

Input

+

Output

+

Description

+

60

+

2 mons

+

Numbers are used to indicate intervals. The default unit is day. Decimals and negative numbers are also allowed. Particularly, a negative interval syntactically means how long before.

+

31.25

+

1 mons 1 days 06:00:00

+

-365

+

-12 mons -5 days

+

1 years 1 mons 8 days 12:00:00

+

1 years 1 mons 8 days 12:00:00

+

Intervals are in POSTGRES format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.

+

-13 months -10 hours

+

-1 years -25 days -04:00:00

+

-2 YEARS +5 MONTHS 10 DAYS

+

-1 years -6 mons -25 days -06:00:00

+

P-1.1Y10M

+

-3 mons -5 days -06:00:00

+

Intervals are in ISO-8601 format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.

+

+

-12H

+

-12:00:00

+
+
+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
-- Create a table.
+CREATE TABLE reltime_type_tab(col1 character(30), col2 reltime);
+
+-- Insert data.
+INSERT INTO reltime_type_tab VALUES ('90', '90');
+INSERT INTO reltime_type_tab VALUES ('-366', '-366');
+INSERT INTO reltime_type_tab VALUES ('1975.25', '1975.25');
+INSERT INTO reltime_type_tab VALUES ('-2 YEARS +5 MONTHS 10 DAYS', '-2 YEARS +5 MONTHS 10 DAYS');
+INSERT INTO reltime_type_tab VALUES ('30 DAYS 12:00:00', '30 DAYS 12:00:00');
+INSERT INTO reltime_type_tab VALUES ('P-1.1Y10M', 'P-1.1Y10M');
+
+-- View data.
+SELECT * FROM reltime_type_tab;
+              col1              |                col2                 
+--------------------------------+-------------------------------------
+ 1975.25                        | 5 years 4 mons 29 days
+ -2 YEARS +5 MONTHS 10 DAYS     | -1 years -6 mons -25 days -06:00:00
+ P-1.1Y10M                      | -3 mons -5 days -06:00:00
+ -366                           | -1 years -18:00:00
+ 90                             | 3 mons
+ 30 DAYS 12:00:00               | 1 mon 12:00:00
+(6 rows)
+
+-- Delete tables.
+DROP TABLE reltime_type_tab;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0015.html b/docs/dws/dev/dws_06_0015.html new file mode 100644 index 00000000..d8c78345 --- /dev/null +++ b/docs/dws/dev/dws_06_0015.html @@ -0,0 +1,138 @@ + + +

Geometric Types

+

Table 1 lists the geometric types that can be used in GaussDB(DWS). The most fundamental type, the point, forms the basis for all of the other types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Geometric Type

Name

+

Storage Space

+

Description

+

Representation

+

point

+

16 bytes

+

Point on a plane

+

(x,y)

+

lseg

+

32 bytes

+

Finite line segment

+

((x1,y1),(x2,y2))

+

box

+

32 bytes

+

Rectangular Box

+

((x1,y1),(x2,y2))

+

path

+

16+16n bytes

+

Closed path (similar to polygon)

+

((x1,y1),...)

+

path

+

16+16n bytes

+

Open path

+

[(x1,y1),...]

+

polygon

+

40+16n bytes

+

Polygon (similar to closed path)

+

((x1,y1),...)

+

circle

+

24 bytes

+

Circle

+

<(x,y),r> (center point and radius)

+
+
+

A rich set of functions and operators is available in GaussDB(DWS) to perform various geometric operations, such as scaling, translation, rotation, and determining intersections. For details, see Geometric Functions and Operators.

+

Points

Points are the fundamental two-dimensional building block for geometric types. Values of the point type are specified using either of the following syntaxes:

+
( x , y )
+x , y
+

where x and y are the respective coordinates, as floating-point numbers.

+

Points are output using the first syntax.

+
+

Line Segments

Line segments (lseg) are represented by pairs of points. Values of the lseg type are specified using any of the following syntaxes:

+
[ ( x1 , y1 ) , ( x2 , y2 ) ]
+( ( x1 , y1 ) , ( x2 , y2 ) )
+( x1 , y1 ) , ( x2 , y2 )
+x1 , y1   ,   x2 , y2
+

where (x1,y1) and (x2,y2) are the end points of the line segment.

+

Line segments are output using the first syntax.

+
+

Rectangular Box

Boxes are represented by pairs of points that are opposite corners of the box. Values of the box type are specified using any of the following syntaxes:

+
( ( x1 , y1 ) , ( x2 , y2 ) )
+( x1 , y1 ) , ( x2 , y2 )
+x1 , y1   ,   x2 , y2
+

where (x1,y1) and (x2,y2) are any two opposite corners of the box.

+

Boxes are output using the second syntax.

+

Any two opposite corners can be supplied on input, but in this order, the values will be reordered as needed to store the upper right and lower left corners.

+
+

Path

Paths are represented by lists of connected points. Paths can be open, where the first and last points in the list are considered not connected, or closed, where the first and last points are considered connected.

+

Values of the path type are specified using any of the following syntaxes:

+
[ ( x1 , y1 ) , ... , ( xn , yn ) ]
+( ( x1 , y1 ) , ... , ( xn , yn ) )
+( x1 , y1 ) , ... , ( xn , yn )
+( x1 , y1   , ... ,   xn , yn )
+x1 , y1   , ... ,   xn , yn
+

where the points are the end points of the line segments comprising the path. Square brackets ([]) indicate an open path, while parentheses (()) indicate a closed path. When the outermost parentheses are omitted, as in the third through fifth syntaxes, a closed path is assumed.

+

Paths are output using the first or second syntax.

+
+

Polygons

Polygons are represented by lists of points (the vertexes of the polygon). Polygons are very similar to closed paths, but are stored differently and have their own set of support functions.

+

Values of the polygon type are specified using any of the following syntaxes:

+
( ( x1 , y1 ) , ... , ( xn , yn ) )
+( x1 , y1 ) , ... , ( xn , yn )
+( x1 , y1   , ... ,   xn , yn )
+x1 , y1   , ... ,   xn , yn
+

where the points are the end points of the line segments comprising the boundary of the polygon.

+

Polygons are output using the first syntax.

+
+

Circle

Circles are represented by a center point and radius. Values of the circle type are specified using any of the following syntaxes:

+
< ( x , y ) , r >
+( ( x , y ) , r )
+( x , y ) , r
+x , y   , r
+

where (x,y) is the center point and r is the radius of the circle.

+

Circles are output using the first syntax.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0016.html b/docs/dws/dev/dws_06_0016.html new file mode 100644 index 00000000..ac9208a6 --- /dev/null +++ b/docs/dws/dev/dws_06_0016.html @@ -0,0 +1,167 @@ + + +

Network Address Types

+

GaussDB(DWS) offers data types to store IPv4, IPv6, and MAC addresses.

+

It is better to use network address types instead of plaintext types to store IPv4, IPv6, and MAC addresses, because these types offer input error checking and specialized operators and functions. For details, see Network Address Functions and Operators.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 Network Address Types

Name

+

Storage Space

+

Description

+

cidr

+

7 or 19 bytes

+

IPv4 or IPv6 networks

+

inet

+

7 or 19 bytes

+

IPv4 or IPv6 hosts and networks

+

macaddr

+

6 bytes

+

MAC addresses

+
+
+

When sorting inet or cidr data types, IPv4 addresses will always sort before IPv6 addresses, including IPv4 addresses encapsulated or mapped to IPv6 addresses, such as ::10.2.3.4 or ::ffff:10.4.3.2.

+

cidr

The cidr type (Classless Inter-Domain Routing) holds an IPv4 or IPv6 network specification. The format for specifying networks is address/y where address is the network represented as an IPv4 or IPv6 address, and y is the number of bits in the netmask. If y is omitted, it is calculated using assumptions from the older classful network numbering system, except it will be at least large enough to include all of the octets written in the input.

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 cidr type input examples

cidr Input

+

cidr Output

+

abbrev (cidr)

+

192.168.100.128/25

+

192.168.100.128/25

+

192.168.100.128/25

+

192.168/24

+

192.168.0.0/24

+

192.168.0/24

+

192.168/25

+

192.168.0.0/25

+

192.168.0.0/25

+

192.168.1

+

192.168.1.0/24

+

192.168.1/24

+

192.168

+

192.168.0.0/24

+

192.168.0/24

+

10.1.2

+

10.1.2.0/24

+

10.1.2/24

+

10.1

+

10.1.0.0/16

+

10.1/16

+

10

+

10.0.0.0/8

+

10/8

+

10.1.2.3/32

+

10.1.2.3/32

+

10.1.2.3/32

+

2001:4f8:3:ba::/64

+

2001:4f8:3:ba::/64

+

2001:4f8:3:ba::/64

+

2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128

+

2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128

+

2001:4f8:3:ba:2e0:81ff:fe22:d1f1

+

::ffff:1.2.3.0/120

+

::ffff:1.2.3.0/120

+

::ffff:1.2.3/120

+

::ffff:1.2.3.0/128

+

::ffff:1.2.3.0/128

+

::ffff:1.2.3.0/128

+
+
+
+

inet

The inet type holds an IPv4 or IPv6 host address, and optionally its subnet, all in one field. The subnet is represented by the number of network address bits present in the host address (the "netmask"). If the netmask is 32 and the address is IPv4, then the value does not indicate a subnet, only a single host. In IPv6, the address length is 128 bits, so 128 bits specify a unique host address.

+

The input format for this type is address/y where address is an IPv4 or IPv6 address and y is the number of bits in the netmask. If the /y portion is missing, the netmask is 32 for IPv4 and 128 for IPv6, so the value represents just a single host. On display, the /y portion is suppressed if the netmask specifies a single host.

+

The essential difference between the inet and cidr data types is that inet accepts values with nonzero bits to the right of the netmask, whereas cidr does not.

+
+

macaddr

The macaddr type stores MAC addresses, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). Input is accepted in the following formats:

+
'08:00:2b:01:02:03'
+'08-00-2b-01-02-03'
+'08002b:010203'
+'08002b-010203'
+'0800.2b01.0203'
+'08002b010203'
+

These examples would all specify the same address. Upper and lower cases are accepted for the digits a through f. Output is always in the first of the forms shown.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0017.html b/docs/dws/dev/dws_06_0017.html new file mode 100644 index 00000000..d49f635d --- /dev/null +++ b/docs/dws/dev/dws_06_0017.html @@ -0,0 +1,76 @@ + + +

Bit String Types

+

Bit strings are strings of 1's and 0's. They can be used to store bit masks.

+

GaussDB(DWS) supports two SQL bit types: bit(n) and bit varying(n), where n is a positive integer.

+

The bit type data must match the length n exactly. It is an error to attempt to store shorter or longer bit strings. The bit varying data is of variable length up to the maximum length n; longer strings will be rejected. Writing bit without a length is equivalent to bit(1), while bit varying without a length specification means unlimited length.

+

If one explicitly casts a bit-string value to bit(n), it will be truncated or zero-padded on the right to be exactly n bits, without raising an error.

+

Similarly, if one explicitly casts a bit-string value to bit varying(n), it will be truncated on the right if it is more than n bits.

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
-- Create a table:
+CREATE TABLE bit_type_t1 
+(
+    BT_COL1 INTEGER,
+    BT_COL2 BIT(3),
+    BT_COL3 BIT VARYING(5)
+) DISTRIBUTE BY REPLICATION;
+
+--Insert data:
+INSERT INTO bit_type_t1 VALUES(1, B'101', B'00');
+
+-- Specify the type length. An error is reported if an inserted string exceeds this length.
+INSERT INTO bit_type_t1 VALUES(2, B'10', B'101');
+ERROR:  bit string length 2 does not match type bit(3)
+CONTEXT:  referenced column: bt_col2
+
+-- Specify the type length. Data is converted if it exceeds this length.
+INSERT INTO bit_type_t1 VALUES(2, B'10'::bit(3), B'101');
+
+-- View data:
+SELECT * FROM bit_type_t1;
+ bt_col1 | bt_col2 | bt_col3 
+---------+---------+---------
+       1 | 101     | 00
+       2 | 100     | 101
+(2 rows)
+
+-- Delete the tables:
+DROP TABLE bit_type_t1;
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0018.html b/docs/dws/dev/dws_06_0018.html new file mode 100644 index 00000000..0096325d --- /dev/null +++ b/docs/dws/dev/dws_06_0018.html @@ -0,0 +1,212 @@ + + +

Text Search Types

+

GaussDB(DWS) offers two data types that are designed to support full text search. The tsvector type represents a document in a form optimized for text search. The tsquery type similarly represents a text query.

+

tsvector

The tsvector type represents a retrieval unit, usually a textual column within a row of a database table, or a combination of such columns. A tsvector value is a sorted list of distinct lexemes, which are words that have been normalized to merge different variants of the same word. Sorting and deduplication are done automatically during input. The to_tsvector function is used to parse and normalize a document string. The to_tsvector function is used to parse and normalize a document string.

+

A tsvector value is a sorted list of distinct lexemes, which are words that have been formatted different entries. During segmentation, tsvector automatically performs duplicate-elimination to the entries for input in a certain order. For example:

+
1
+2
+3
+4
+5
SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector;
+                      tsvector                      
+----------------------------------------------------
+ 'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat'
+(1 row)
+
+ +
+

It can be seen from the preceding example that tsvector segments a string by spaces, and segmented lexemes are sorted based on their length and alphabetical order. To represent lexemes containing whitespace or punctuation, surround them with quotes:

+
1
+2
+3
+4
+5
SELECT $$the lexeme '    ' contains spaces$$::tsvector;
+                 tsvector                  
+-------------------------------------------
+ '    ' 'contains' 'lexeme' 'spaces' 'the'
+(1 row)
+
+ +
+

Use double dollar signs ($$) to mark entries containing single quotation marks (').

+
1
+2
+3
+4
+5
SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector;
+                    tsvector                    
+------------------------------------------------
+ 'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the'
+(1 row)
+
+ +
+

Optionally, integer positions can be attached to lexemes:

+
1
+2
+3
+4
+5
SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::tsvector;
+                                   tsvector                                    
+-------------------------------------------------------------------------------
+ 'a':1,6,10 'and':8 'ate':9 'cat':3 'fat':2,11 'mat':7 'on':5 'rat':12 'sat':4
+(1 row)
+
+ +
+

A position normally indicates the source word's location in the document. Positional information can be used for proximity ranking. Position values range from 1 to 16383. The default maximum value is 16383. Duplicate positions for the same lexeme are discarded.

+

Lexemes that have positions can further be labeled with a weight, which can be A, B, C, or D. D is the default and hence is not shown on output:

+
1
+2
+3
+4
+5
SELECT 'a:1A fat:2B,4C cat:5D'::tsvector;
+          tsvector          
+----------------------------
+ 'a':1A 'cat':5 'fat':2B,4C
+(1 row)
+
+ +
+

Weights are typically used to reflect document structure, for example, by marking title words differently from body words. Text search ranking functions can assign different priorities to the different weight markers.

+

The following example is the standard usage of the tsvector type. For example:

+
1
+2
+3
+4
+5
SELECT 'The Fat Rats'::tsvector;
+      tsvector      
+--------------------
+ 'Fat' 'Rats' 'The'
+(1 row)
+
+ +
+

For most English-text-searching applications the above words would be considered non-normalized, which should usually be passed through to_tsvector to normalize the words appropriately for searching:

+
1
+2
+3
+4
+5
SELECT to_tsvector('english', 'The Fat Rats');
+   to_tsvector   
+-----------------
+ 'fat':2 'rat':3
+(1 row)
+
+ +
+
+

tsquery

The tsquery type represents a retrieval condition. A tsquery value stores lexemes that are to be searched for, and combines them honoring the Boolean operators & (AND), | (OR), and ! (NOT). Parentheses can be used to enforce grouping of the operators. The to_tsquery and plainto_tsquery functions will normalize lexemes before the lexemes are converted to the tsquery type.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
SELECT 'fat & rat'::tsquery;
+    tsquery    
+---------------
+ 'fat' & 'rat'
+(1 row)
+
+SELECT 'fat & (rat | cat)'::tsquery;
+          tsquery          
+---------------------------
+ 'fat' & ( 'rat' | 'cat' )
+(1 row)
+
+SELECT 'fat & rat & ! cat'::tsquery;
+        tsquery         
+------------------------
+ 'fat' & 'rat' & !'cat'
+(1 row)
+
+ +
+

In the absence of parentheses, ! (NOT) binds most tightly, and & (AND) binds more tightly than | (OR).

+

Lexemes in a tsquery can be labeled with one or more weight letters, which restrict them to match only tsvector lexemes with matching weights:

+
1
+2
+3
+4
+5
SELECT 'fat:ab & cat'::tsquery;
+     tsquery      
+------------------
+ 'fat':AB & 'cat'
+(1 row)
+
+ +
+

Also, lexemes in a tsquery can be labeled with * to specify prefix matching:

+
1
+2
+3
+4
+5
SELECT 'super:*'::tsquery;
+  tsquery  
+-----------
+ 'super':*
+(1 row)
+
+ +
+

This query will match any word in a tsvector that begins with "super".

+

Note that prefixes are first processed by text search configurations, which means the following example returns true:

+
1
+2
+3
+4
+5
SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'postgres:*' ) AS RESULT;
+  result  
+----------
+ t
+(1 row)
+
+ +
+

because postgres gets stemmed to postgr:

+
1
+2
+3
+4
+5
SELECT to_tsquery('postgres:*');
+ to_tsquery 
+------------
+ 'postgr':*
+(1 row)
+
+ +
+

which then matches postgraduate.

+

'Fat:ab & Cats' is normalized to the tsquery type as follows:

+
1
+2
+3
+4
+5
SELECT to_tsquery('Fat:ab & Cats');
+    to_tsquery    
+------------------
+ 'fat':AB & 'cat'
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0019.html b/docs/dws/dev/dws_06_0019.html new file mode 100644 index 00000000..cb4a20d9 --- /dev/null +++ b/docs/dws/dev/dws_06_0019.html @@ -0,0 +1,20 @@ + + +

UUID Type

+

The data type UUID stores Universally Unique Identifiers (UUID) as defined by RFC 4122, ISO/IEF 9834-8:2005, and related standards. This identifier is a 128-bit quantity that is generated by an algorithm chosen to make it very unlikely that the same identifier will be generated by anyone else in the known universe using the same algorithm.

+

Therefore, for distributed systems, these identifiers provide a better uniqueness guarantee than sequence generators, which are only unique within a single database.

+

A UUID is written as a sequence of lower-case hexadecimal digits, in several groups separated by hyphens, specifically a group of 8 digits followed by three groups of 4 digits followed by a group of 12 digits, for a total of 32 digits representing the 128 bits. An example of a UUID in this standard form is:

+
a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+

GaussDB(DWS) also accepts the following alternative forms for input: use of upper-case letters and digits, the standard format surrounded by braces, omitting some or all hyphens, and adding a hyphen after any group of four digits. Examples

+
A0EEBC99-9C0B-4EF8-BB6D-6BB9BD380A11
+{a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11}
+a0eebc999c0b4ef8bb6d6bb9bd380a11
+a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
+

Output is always in the standard form.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0020.html b/docs/dws/dev/dws_06_0020.html new file mode 100644 index 00000000..8297aab4 --- /dev/null +++ b/docs/dws/dev/dws_06_0020.html @@ -0,0 +1,12 @@ + + +

JSON Types

+

JSON data types are for storing JavaScript Object Notation (JSON) data. Such data can also be stored as TEXT, but the JSON data type has the advantage of checking that each stored value is a valid JSON value.

+

For functions that support the JSON data type, see JSON Functions.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0021.html b/docs/dws/dev/dws_06_0021.html new file mode 100644 index 00000000..fffc972a --- /dev/null +++ b/docs/dws/dev/dws_06_0021.html @@ -0,0 +1,259 @@ + + +

HLL Data Types

+

HyperLoglog (HLL) is an approximation algorithm for efficiently counting the number of distinct values in a data set. It features faster computing and lower space usage. You only need to store HLL data structures, instead of data sets. When new data is added to a data set, make hash calculation on the data and insert the result to an HLL. Then, you can obtain the final result based on the HLL.

+

Table 1 compares HLL with other algorithms.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Comparison between HLL and other algorithms

Item

+

Sorting Algorithm

+

Hash Algorithm

+

HLL

+

Time complexity

+

O(nlogn)

+

O(n)

+

O(n)

+

Space complexity

+

O(n)

+

O(n)

+

1280 bytes

+

Error rate

+

0

+

0

+

≈2%

+

Storage space requirement

+

Size of raw data

+

Size of raw data

+

1280 bytes

+
+
+

HLL has advantages over others in the computing speed and storage space requirement. In terms of time complexity, the sorting algorithm needs O(nlogn) time for sorting, and the hash algorithm and HLL need O(n) time for full table scanning. In terms of storage space requirements, the sorting algorithm and hash algorithm need to store raw data before collecting statistics, whereas the HLL algorithm needs to store only the HLL data structures rather than the raw data, and thereby occupying a fixed space of only 1280 bytes.

+
  • In default specifications, the maximum number of distinct values is 1.6e plus 12, and the maximum error rate is only 2.3%. If a calculation result exceeds the maximum number, the error rate of the calculation result will increase, or the calculation will fail and an error will be reported.
  • When using this feature for the first time, you need to evaluate the distinct values of the service, properly select configuration parameters, and perform verification to ensure that the accuracy meets requirements.
    • When default parameter configuration is used, the calculated number of distinct values is 1.6e plus 12. If the calculated result is NaN, you need to adjust log2m and regwidth to accommodate more distinct values.
    • The hash algorithm has an extremely low probability of collision. However, you are still advised to select 2 or 3 hash seeds for verification when using the hash algorithm for the first time. If there is only a small difference between the distinct values, you can select any one of the seeds as the hash seed.
    +
+
+

Table 2 describes main HLL data structures.

+ +
+ + + + + + + +
Table 2 Main HLL data structures

Data Type

+

Description

+

hll

+

Its size is always 1280 bytes, which can be directly used to calculate the number of distinct values.

+
+
+

The following describes HLL application scenarios.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0022.html b/docs/dws/dev/dws_06_0022.html new file mode 100644 index 00000000..93aeac9f --- /dev/null +++ b/docs/dws/dev/dws_06_0022.html @@ -0,0 +1,236 @@ + + +

Object Identifier Types

+

Object identifiers (OIDs) are used internally by GaussDB(DWS) as primary keys for various system catalogs. OIDs are not added to user-created tables by the system. The OID type represents an object identifier.

+

The OID type is currently implemented as an unsigned four-byte integer. So, using a user-created table's OID column as a primary key is discouraged.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Object identifier types

Name

+

Reference

+

Description

+

Examples

+

OID

+

-

+

Numeric object identifier

+

564182

+

CID

+

-

+

A command identifier. This is the data type of the system columns cmin and cmax. Command identifiers are 32-bit quantities.

+

-

+

XID

+

-

+

A transaction identifier. This is the data type of the system columns xmin and xmax. Transaction identifiers are also 32-bit quantities.

+

-

+

TID

+

-

+

A row identifier. This is the data type of the system column ctid. A row ID is a pair (block number, tuple index within block) that identifies the physical location of the row within its table.

+

-

+

REGCONFIG

+

pg_ts_config

+

Text search configuration

+

english

+

REGDICTIONARY

+

pg_ts_dict

+

Text search dictionary

+

simple

+

REGOPER

+

pg_operator

+

Operator name

+

+

+

REGOPERATOR

+

pg_operator

+

Operator with argument types

+

*(integer,integer) or -(NONE,integer)

+

REGPROC

+

pg_proc

+

Indicates the name of the function.

+

sum

+

REGPROCEDURE

+

pg_proc

+

Function with argument types

+

sum(int4)

+

REGCLASS

+

pg_class

+

Relation name

+

pg_type

+

REGTYPE

+

pg_type

+

Data type name

+

integer

+
+
+

The OID type is used for a column in the database system catalog.

+

For example:

+
1
+2
+3
+4
+5
SELECT oid FROM pg_class WHERE relname = 'pg_type';
+ oid  
+------
+ 1247
+(1 row)
+
+ +
+

The alias type for OID is REGCLASS which allows simplified search for OID values.

+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
SELECT attrelid,attname,atttypid,attstattarget FROM pg_attribute WHERE attrelid = 'pg_type'::REGCLASS;
+ attrelid |  attname   | atttypid | attstattarget 
+----------+------------+----------+---------------
+     1247 | xc_node_id     |       23 |             0
+     1247 | tableoid       |       26 |             0
+     1247 | cmax           |       29 |             0
+     1247 | xmax           |       28 |             0
+     1247 | cmin           |       29 |             0
+     1247 | xmin           |       28 |             0
+     1247 | oid            |       26 |             0
+     1247 | ctid           |       27 |             0
+     1247 | typname        |       19 |            -1
+     1247 | typnamespace   |       26 |            -1
+     1247 | typowner       |       26 |            -1
+     1247 | typlen         |       21 |            -1
+     1247 | typbyval       |       16 |            -1
+     1247 | typtype        |       18 |            -1
+     1247 | typcategory    |       18 |            -1
+     1247 | typispreferred |       16 |            -1
+     1247 | typisdefined   |       16 |            -1
+     1247 | typdelim       |       18 |            -1
+     1247 | typrelid       |       26 |            -1
+     1247 | typelem        |       26 |            -1
+     1247 | typarray       |       26 |            -1
+     1247 | typinput       |       24 |            -1
+     1247 | typoutput      |       24 |            -1
+     1247 | typreceive     |       24 |            -1
+     1247 | typsend        |       24 |            -1
+     1247 | typmodin       |       24 |            -1
+     1247 | typmodout      |       24 |            -1
+     1247 | typanalyze     |       24 |            -1
+     1247 | typalign       |       18 |            -1
+     1247 | typstorage     |       18 |            -1
+     1247 | typnotnull     |       16 |            -1
+     1247 | typbasetype    |       26 |            -1
+     1247 | typtypmod      |       23 |            -1
+     1247 | typndims       |       23 |            -1
+     1247 | typcollation   |       26 |            -1
+     1247 | typdefaultbin  |      194 |            -1
+     1247 | typdefault     |       25 |            -1
+     1247 | typacl         |     1034 |            -1
+(38 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0023.html b/docs/dws/dev/dws_06_0023.html new file mode 100644 index 00000000..d30d72ec --- /dev/null +++ b/docs/dws/dev/dws_06_0023.html @@ -0,0 +1,127 @@ + + +

Pseudo-Types

+

GaussDB(DWS) has a number of special-purpose entries that are collectively called pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to declare a function's argument or result type.

+

Each of the available pseudo-types is useful in situations where a function's behavior does not correspond to simply taking or returning a value of a specific SQL data type. Table 1 lists all pseudo-types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Pseudo-Types

Name

+

Description

+

any

+

Indicates that a function accepts any input data type.

+

anyelement

+

Indicates that a function accepts any data type.

+

anyarray

+

Indicates that a function accepts any array data type.

+

anynonarray

+

Indicates that a function accepts any non-array data type.

+

anyenum

+

Indicates that a function accepts any enum data type.

+

anyrange

+

Indicates that a function accepts any range data type.

+

cstring

+

Indicates that a function accepts or returns a null-terminated C string.

+

internal

+

Indicates that a function accepts or returns a server-internal data type.

+

language_handler

+

Indicates that a procedural language call handler is declared to return language_handler.

+

fdw_handler

+

Indicates that a foreign-data wrapper handler is declared to return fdw_handler.

+

record

+

Identifies a function returning an unspecified row type.

+

trigger

+

Indicates that a trigger function is declared to return trigger.

+

void

+

Indicates that a function returns no value.

+

opaque

+

Indicates an obsolete type name that formerly served all the above purposes.

+
+
+

Functions coded in C (whether built in or dynamically loaded) can be declared to accept or return any of these pseudo data types. It is up to the function author to ensure that the function will behave safely when a pseudo-type is used as an argument type.

+

Functions coded in procedural languages can use pseudo-types only as allowed by their implementation languages. At present the procedural languages all forbid use of a pseudo-type as argument type, and allow only void and record as a result type. Some also support polymorphic functions using the anyelement, anyarray, anynonarray, anyenum, and anyrange types.

+

The internal pseudo-type is used to declare functions that are meant only to be called internally by the database system, and not by direct call in an SQL query. If a function has at least one internal-type argument, it cannot be called from SQL. You are not advised to create any function that is declared to return internal unless the function has at least one internal argument.

+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
-- Create or replace the showall() function:
+CREATE OR REPLACE FUNCTION showall() RETURNS SETOF record
+AS $$ SELECT count(*) from tpcds.store_sales where ss_customer_sk = 9692; $$
+LANGUAGE SQL;
+
+-- Invoke the showall() function:
+SELECT showall();
+ showall 
+---------
+ (35)
+(1 row)
+
+-- Delete the function:
+DROP FUNCTION showall();
+
+ +
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0024.html b/docs/dws/dev/dws_06_0024.html new file mode 100644 index 00000000..a54bac2b --- /dev/null +++ b/docs/dws/dev/dws_06_0024.html @@ -0,0 +1,219 @@ + + +

Data Types Supported by Column-Store Tables

+

Table 1 lists the data types supported by column-store tables.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Data types supported by column-store tables

Category

+

Data Type

+

Length

+

Supported

+

Numeric types

+

smallint

+

2

+

Yes

+

integer

+

4

+

Yes

+

bigint

+

8

+

Yes

+

decimal

+

Variable length

+

Yes

+

numeric

+

Variable length

+

Yes

+

real

+

4

+

Yes

+

double precision

+

8

+

Yes

+

smallserial

+

2

+

Yes

+

serial

+

4

+

Yes

+

bigserial

+

8

+

Yes

+

Monetary types

+

money

+

8

+

Yes

+

Character types

+

character varying(n), varchar(n)

+

Variable length

+

Yes

+

character(n), char(n)

+

n

+

Yes

+

character, char

+

1

+

Yes

+

text

+

Variable length

+

Yes

+

nvarchar2

+

Variable length

+

Yes

+

name

+

64

+

No

+

Date/time types

+

timestamp with time zone

+

8

+

Yes

+

timestamp without time zone

+

8

+

Yes

+

date

+

4

+

Yes

+

time without time zone

+

8

+

Yes

+

time with time zone

+

12

+

Yes

+

interval

+

16

+

Yes

+

Large objects

+

clob

+

Variable length

+

Yes

+

blob

+

Variable length

+

No

+

Others

+

...

+

...

+

No

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0025.html b/docs/dws/dev/dws_06_0025.html new file mode 100644 index 00000000..98cbd1dc --- /dev/null +++ b/docs/dws/dev/dws_06_0025.html @@ -0,0 +1,78 @@ + + +

XML

+

XML data type stores Extensible Markup Language (XML) formatted data. Such data can also be stored as text, but the advantage of the XML data type is that it checks whether each stored value is a well-formed XML value. XML can store well-formed documents and content fragments defined by XML standards. A content fragment can have multiple top-level elements or character nodes.

+

For functions that support the XML data type, see XML Functions.

+

Configuring XML Parameters

The syntax is as follows:

+
1
+2
SET XML OPTION { DOCUMENT | CONTENT };
+SET xmloption TO { DOCUMENT | CONTENT };
+
+ +
+

If a string value is not converted to XML using the XMLPARSE or XMLSERIALIZE function, the XML OPTION session parameter determines the value, DOCUMENT or CONTENT.

+

The default value is CONTENT, indicating that all types of XML data are allowed.

+

Example:

+
1
+2
+3
+4
SET XML OPTION DOCUMENT;
+SET
+SET xmloption TO DOCUMENT;
+SET
+
+ +
+
+

Configuring Binary Data Encoding Format

Syntax:

+
+
1
SET xmlbinary TO { base64 | hex};
+
+ +
+

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
SET xmlbinary TO base64;
+SET
+
+SELECT xmlelement(name foo, bytea 'bar');
+xmlelement
+-----------------
+<foo>YmFy</foo>
+(1 row)
+
+SET xmlbinary TO hex;
+SET
+
+SELECT xmlelement(name foo, bytea 'bar');
+xmlelement
+-------------------
+<foo>626172</foo>
+(1 row)
+
+ +
+

Accessing XML Value

The XML data type is special, and it does not provide any comparison operators, because there is no general comparison algorithm for XML data, so you cannot retrieve data rows by comparing an XML value with a search value. An XML data entry is typically accompanied by an ID for retrieving. Alternatively, you can convert XML values into character strings. However, this is not widely applicable to common scenarios of XML value comparison.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0026.html b/docs/dws/dev/dws_06_0026.html new file mode 100644 index 00000000..daa0efaf --- /dev/null +++ b/docs/dws/dev/dws_06_0026.html @@ -0,0 +1,167 @@ + + +

Constant and Macro

+

Table 1 lists the constants and macros that can be used in GaussDB(DWS).

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Constants and macros

Parameter

+

Description

+

Examples

+

CURRENT_CATALOG

+

Specifies the current database.

+
1
+2
+3
+4
+5
SELECT CURRENT_CATALOG;
+current_database
+------------------
+gaussdb
+(1 row)
+
+ +
+

CURRENT_ROLE

+

Current role

+
1
+2
+3
+4
SELECT CURRENT_ROLE;
+current_user
+--------------
+(1 row)
+
+ +
+

CURRENT_SCHEMA

+

Current database model

+
1
+2
+3
+4
+5
SELECT CURRENT_SCHEMA;
+current_schema
+----------------
+public
+(1 row)
+
+ +
+

CURRENT_USER

+

Current user

+
1
+2
+3
+4
SELECT CURRENT_USER;
+current_user
+--------------
+(1 row)
+
+ +
+

LOCALTIMESTAMP

+

Current session time (without time zone)

+
1
+2
+3
+4
+5
SELECT LOCALTIMESTAMP;
+         timestamp
+----------------------------
+2015-10-10 15:37:30.968538
+(1 row)
+
+ +
+

NULL

+

This parameter is left blank.

+

-

+

+

SESSION_USER

+

Current system user

+
1
+2
+3
+4
SELECT SESSION_USER;
+session_user
+--------------
+(1 row)
+
+ +
+

SYSDATE

+

Current system date

+
1
+2
+3
+4
+5
SELECT SYSDATE;
+sysdate
+---------------------
+2015-10-10 15:48:53
+(1 row)
+
+ +
+

USER

+

Current user, also called CURRENT_USER

+
1
+2
+3
+4
SELECT USER;
+current_user
+--------------
+(1 row)
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0027.html b/docs/dws/dev/dws_06_0027.html new file mode 100644 index 00000000..b3876486 --- /dev/null +++ b/docs/dws/dev/dws_06_0027.html @@ -0,0 +1,73 @@ + + +

Functions and Operators

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0028.html b/docs/dws/dev/dws_06_0028.html new file mode 100644 index 00000000..9ea45806 --- /dev/null +++ b/docs/dws/dev/dws_06_0028.html @@ -0,0 +1,96 @@ + + +

Logical Operators

+

The usual logical operators include AND, OR, and NOT. SQL uses a three-valued logical system with true, false, and null, which represents "unknown". Their priorities are NOT > AND > OR.

+

Table 1 lists operation rules, where a and b represent logical expressions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Operation rules

a

+

b

+

a AND b Result

+

a OR b Result

+

NOT a Result

+

TRUE

+

TRUE

+

TRUE

+

TRUE

+

FALSE

+

TRUE

+

FALSE

+

FALSE

+

TRUE

+

FALSE

+

TRUE

+

NULL

+

NULL

+

TRUE

+

FALSE

+

FALSE

+

FALSE

+

FALSE

+

FALSE

+

TRUE

+

FALSE

+

NULL

+

FALSE

+

NULL

+

TRUE

+

NULL

+

NULL

+

NULL

+

NULL

+

NULL

+
+
+

The operators AND and OR are commutative, that is, you can switch the left and right operand without affecting the result.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0029.html b/docs/dws/dev/dws_06_0029.html new file mode 100644 index 00000000..7dcb5215 --- /dev/null +++ b/docs/dws/dev/dws_06_0029.html @@ -0,0 +1,54 @@ + + +

Comparison Operators

+

Comparison operators are available for all data types and return Boolean values.

+

All comparison operators are binary operators. Only data types that are the same or can be implicitly converted can be compared using comparison operators.

+

Table 1 describes comparison operators provided by GaussDB(DWS).

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Comparison operators

Operators

+

Description

+

<

+

Less than

+

>

+

Greater than

+

<=

+

Less than or equal to

+

>=

+

Greater than or equal to

+

=

+

Equality

+

<> or !=

+

Inequality

+
+
+

Comparison operators are available for all relevant data types. All comparison operators are binary operators that returned values of Boolean type. Expressions like 1 < 2 < 3 are invalid. (Because there is no comparison operator to compare a Boolean value with 3.)

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0030.html b/docs/dws/dev/dws_06_0030.html new file mode 100644 index 00000000..bf898517 --- /dev/null +++ b/docs/dws/dev/dws_06_0030.html @@ -0,0 +1,1649 @@ + + +

Character Processing Functions and Operators

+

String functions and operators provided by GaussDB(DWS) are for concatenating strings with each other, concatenating strings with non-strings, and matching the patterns of strings.

+ + + +
  • For a string containing newline characters, for example, a string consisting of a newline character and a space, the value of length and lengthb in GaussDB(DWS) is 2.
  • In GaussDB(DWS), n of the CHAR(n) type indicates the number of characters. Therefore, for multiple-octet coded character sets, the length returned by the LENGTHB function may be longer than n.
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0031.html b/docs/dws/dev/dws_06_0031.html new file mode 100644 index 00000000..bd367d48 --- /dev/null +++ b/docs/dws/dev/dws_06_0031.html @@ -0,0 +1,179 @@ + + +

Binary String Functions and Operators

+

String operators

SQL defines some string functions that use keywords, rather than commas, to separate arguments.

+ +
+

Other Binary String Functions

GaussDB(DWS) also provides the common syntax used for invoking functions.

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0032.html b/docs/dws/dev/dws_06_0032.html new file mode 100644 index 00000000..6401ae89 --- /dev/null +++ b/docs/dws/dev/dws_06_0032.html @@ -0,0 +1,164 @@ + + +

Bit String Functions and Operators

+

Bit string operators

Aside from the usual comparison operators, the following operators can be used. Bit string operands of &, |, and # must be of equal length. When bit shifting, the original length of the string is preserved by zero padding (if necessary).

+ +
+

The following SQL-standard functions work on bit strings as well as character strings: length, bit_length, octet_length, position, substring, and overlay.

+

The following functions work on bit strings as well as binary strings: get_bit and set_bit. When working with a bit string, these functions number the first (leftmost) bit of the string as bit 0.

+
In addition, it is possible to convert between integral values and type bit. For example:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
SELECT 44::bit(10) AS RESULT;
+   result
+------------
+ 0000101100
+(1 row)
+
+SELECT 44::bit(3) AS RESULT;
+ result 
+--------
+ 100
+(1 row)
+
+SELECT cast(-44 as bit(12)) AS RESULT;
+    result    
+--------------
+ 111111010100
+(1 row)
+
+SELECT '1110'::bit(4)::integer AS RESULT;
+ result 
+--------
+     14
+(1 row)
+
+ +
+
+

Casting to just "bit" means casting to bit(1), and so will deliver only the least significant bit of the integer.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0033.html b/docs/dws/dev/dws_06_0033.html new file mode 100644 index 00000000..a7f21fdb --- /dev/null +++ b/docs/dws/dev/dws_06_0033.html @@ -0,0 +1,334 @@ + + +

Pattern Matching Operators

+

There are three separate approaches to pattern matching provided by the database: the traditional SQL LIKE operator, the more recent SIMILAR TO operator, and POSIX-style regular expressions. Besides these basic operators, functions can be used to extract or replace matching substrings and to split a string at matching locations.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0034.html b/docs/dws/dev/dws_06_0034.html new file mode 100644 index 00000000..6b658bcd --- /dev/null +++ b/docs/dws/dev/dws_06_0034.html @@ -0,0 +1,844 @@ + + +

Mathematical Functions and Operators

+

Numeric operators

+
+

Numeric operation functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0035.html b/docs/dws/dev/dws_06_0035.html new file mode 100644 index 00000000..5bcaba80 --- /dev/null +++ b/docs/dws/dev/dws_06_0035.html @@ -0,0 +1,1463 @@ + + +

Date and Time Processing Functions and Operators

+

Date and Time Operators

When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent with what the user expects, and no unexpected results occur.

+

For example, abnormal mistakes will occur in the following example without an explicit data type.

+
1
SELECT date '2001-10-01' - '7' AS RESULT;
+
+ +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Time and date operators

Operators

+

Examples

+

+

+

Add a date with an integer to obtain the date after 7 days.

+
1
+2
+3
+4
+5
SELECT date '2001-09-28' + integer '7' AS RESULT;
+       result        
+---------------------
+ 2001-10-05 00:00:00
+(1 row)
+
+ +
+

Add a date with an interval to obtain the time after 1 hour.

+
1
+2
+3
+4
+5
SELECT date '2001-09-28' + interval '1 hour' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 01:00:00
+(1 row)
+
+ +
+

Add a date with a time to obtain a specific time.

+
1
+2
+3
+4
+5
SELECT date '2001-09-28' + time '03:00' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 03:00:00
+(1 row)
+
+ +
+

Add a date with an interval to obtain the time after one month.

+

If the sum or subtraction results fall beyond the date range of a month, the result will be rounded to the last day of the month.

+
1
+2
+3
+4
+5
SELECT date '2021-01-31' + interval '1 month' AS RESULT;
+       result
+---------------------
+ 2021-02-28 00:00:00
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT date '2021-02-28' + interval '1 month' AS RESULT;
+       result
+---------------------
+ 2021-03-28 00:00:00
+(1 row)
+
+ +
+

Add two intervals to obtain the sum.

+
1
+2
+3
+4
+5
SELECT interval '1 day' + interval '1 hour' AS RESULT;
+     result     
+----------------
+ 1 day 01:00:00
+(1 row)
+
+ +
+

Add a timestamp with an interval to obtain the time after 23 hours.

+
1
+2
+3
+4
+5
SELECT timestamp '2001-09-28 01:00' + interval '23 hours' AS RESULT;
+       result        
+---------------------
+ 2001-09-29 00:00:00
+(1 row)
+
+ +
+

Add a time with an interval to obtain the time after three hours.

+
1
+2
+3
+4
+5
SELECT time '01:00' + interval '3 hours' AS RESULT;
+  result  
+----------
+ 04:00:00
+(1 row)
+
+ +
+

-

+

Subtract a date from another to obtain the difference.

+
1
+2
+3
+4
+5
SELECT date '2001-10-01' - date '2001-09-28' AS RESULT;
+ result 
+--------
+ 3 days
+(1 row)
+
+ +
+

Subtract an integer from a date, the return is a timestamp type.

+
1
+2
+3
+4
+5
SELECT date '2001-10-01' - integer '7' AS RESULT;
+       result        
+---------------------
+ 2001-09-24 00:00:00
+(1 row)
+
+ +
+

Subtract an interval from a date to obtain the time difference.

+
1
+2
+3
+4
+5
SELECT date '2001-09-28' - interval '1 hour' AS RESULT;
+       result        
+---------------------
+ 2001-09-27 23:00:00
+(1 row)
+
+ +
+

Subtract a time from another time to obtain the time difference.

+
1
+2
+3
+4
+5
SELECT time '05:00' - time '03:00' AS RESULT;
+  result  
+----------
+ 02:00:00
+(1 row)
+
+ +
+

Subtract an interval from a time to obtain the time difference.

+
1
+2
+3
+4
+5
SELECT time '05:00' - interval '2 hours' AS RESULT;
+  result  
+----------
+ 03:00:00
+(1 row)
+
+ +
+

Subtract an interval from a timestamp to obtain the date difference.

+
1
+2
+3
+4
+5
SELECT timestamp '2001-09-28 23:00' - interval '23 hours' AS RESULT;
+       result        
+---------------------
+ 2001-09-28 00:00:00
+(1 row)
+
+ +
+

Subtract an interval from another interval to obtain the time difference.

+
1
+2
+3
+4
+5
SELECT interval '1 day' - interval '1 hour' AS RESULT;
+  result  
+----------
+ 23:00:00
+(1 row)
+
+ +
+

Subtract a timestamp from another timestamp to obtain the time difference.

+
1
+2
+3
+4
+5
SELECT timestamp '2001-09-29 03:00' - timestamp '2001-09-27 12:00' AS RESULT;
+     result     
+----------------
+ 1 day 15:00:00
+(1 row)
+
+ +
+

Obtain the time at the previous day.

+
1
+2
+3
+4
+5
select now() - interval '1 day'AS RESULT;
+           result
+-------------------------------
+ 2022-08-08 01:46:15.555406+00
+(1 row)
+
+ +
+

*

+

Multiply an interval by a quantity:

+
1
+2
+3
+4
+5
SELECT 900 * interval '1 second' AS RESULT;
+  result  
+----------
+ 00:15:00
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT 21 * interval '1 day' AS RESULT;
+ result  
+---------
+ 21 days
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT double precision '3.5' * interval '1 hour' AS RESULT;
+  result  
+----------
+ 03:30:00
+(1 row)
+
+ +
+

/

+

Divide an interval by a quantity to obtain a time segment.

+
1
+2
+3
+4
+5
SELECT interval '1 hour' / double precision '1.5' AS RESULT;
+  result  
+----------
+ 00:40:00
+(1 row)
+
+ +
+
+
+

Time/Date functions

+
+ +

EXTRACT

EXTRACT(field FROM source)

+

The extract function retrieves subcolumns such as year or hour from date/time values. source must be a value expression of type timestamp, time, or interval. (Expressions of type date are cast to timestamp and can therefore be used as well.) field is an identifier or string that selects what column to extract from the source value. The extract function returns values of type double precision. The following are valid field names:

+ +
+

date_part

The date_part function is modeled on the traditional Ingres equivalent to the SQL-standard function extract:

+

date_part('field', source)

+

Note that the field must be a string, rather than a name. The valid field names are the same as those for extract. For details, see EXTRACT.

+

For example:

+
1
+2
+3
+4
+5
SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40');
+ date_part 
+-----------
+        16
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT date_part('hour', INTERVAL '4 hours 3 minutes');
+ date_part 
+-----------
+         4
+(1 row)
+
+ +
+
+

date_format

date_format(timestamp, fmt)

+

Converts a date into a string in the format specified by fmt.

+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
SELECT date_format('2009-10-04 22:23:00', '%M %D %W');
+    date_format
+--------------------
+ October 4th Sunday
+(1 row)
+SELECT date_format('2021-02-20 08:30:45', '%Y-%m-%d %H:%i:%S');
+     date_format
+---------------------
+ 2021-02-20 08:30:45
+(1 row)
+SELECT date_format('2021-02-20 18:10:15', '%r-%T');
+     date_format
+----------------------
+ 06:10:15 PM-18:10:15
+(1 row)
+
+ +
+

The following table describes the patterns of date parameter values. They can be used for the date_format, time_format, str_to_date, str_to_time, and from_unixtime functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Output formats of date_format

Format

+

Description

+

Value

+

%a

+

Abbreviated week name

+

Sun...Sat

+

%b

+

Abbreviated month name

+

Jan...Dec

+

%c

+

Month

+

0...12

+

%D

+

Date with a suffix

+

0th, 1st, 2nd, 3rd, ...

+

%d

+

Day in a month (two digits)

+

00...31

+

%e

+

Day in a month

+

0...31

+

%f

+

Microsecond

+

000000...999999

+

%H

+

Hour, in 24-hour format

+

00...23

+

%h

+

Hour, in 12-hour format

+

01...12

+

%I

+

Hour, in 12-hour format, same as %h

+

01...12

+

%i

+

Minute

+

00...59

+

%j

+

Day in a year

+

001...366

+

%k

+

Hour, in 24-hour format, same as %H

+

0...23

+

%l

+

Hour, in 12-hour format, same as %h

+

1...12

+

%M

+

Month name

+

January...December

+

%m

+

Month (two digits)

+

00...12

+

%p

+

Morning and afternoon

+

AM PM

+

%r

+

Time, in 12-hour format

+

hh::mm::ss AM/PM

+

%S

+

Second

+

00...59

+

%s

+

Second, same as %S

+

00...59

+

%T

+

Time, in 24-hour format

+

hh::mm::ss

+

%U

+

Week (Sunday is the first day of a week)

+

00...53

+

%u

+

Week (Monday is the first day of a week)

+

00...53

+

%V

+

Week (Sunday is the first day of a week). It is used together with %X.

+

01...53

+

%v

+

Week (Monday is the first day of a week). It is used together with %x.

+

01...53

+

%W

+

Week name

+

Sunday...Saturday

+

%w

+

Day of a week. The value is 0 for Sunday.

+

0...6

+

%X

+

Year (four digits). It is used together with %V. Sunday is the first day of a week.

+

-

+

%x

+

Year (four digits). It is used together with %v. Monday is the first day of a week.

+

-

+

%Y

+

Year (four digits)

+

-

+

%y

+

Year (two digits)

+

-

+

%%

+

Character '%'

+

Character '%'

+

%x

+

'x': any character apart from the preceding ones

+

Character 'x'

+
+
+

In the preceding table, %U, %u, %V, %v, %X, and %x are not supported currently.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0036.html b/docs/dws/dev/dws_06_0036.html new file mode 100644 index 00000000..eff228f5 --- /dev/null +++ b/docs/dws/dev/dws_06_0036.html @@ -0,0 +1,846 @@ + + +

Type Conversion Functions

+

Type Conversion Functions

+

The following table describes the value formats of the to_number function.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Template patterns for numeric formatting

Schema

+

Description

+

9

+

Value with specified digits

+

0

+

Values with leading zeros

+

Period (.)

+

Decimal point

+

Comma (,)

+

Group (thousand) separator

+

PR

+

Negative values in angle brackets

+

S

+

Sign anchored to number (uses locale)

+

L

+

Currency symbol (uses locale)

+

D

+

Decimal point (uses locale)

+

G

+

Group separator (uses locale)

+

MI

+

Minus sign in the specified position (if the number is less than 0)

+

PL

+

Plus sign in the specified position (if the number is greater than 0)

+

SG

+

Plus or minus sign in the specified position

+

RN

+

Roman numerals (the input values are between 1 and 3999)

+

TH or th

+

Ordinal number suffix

+

V

+

Shifts specified number of digits (decimal)

+
+
+

The following table describes the patterns of date and time values. They can be used for the to_date, to_timestamp, and to_char functions, and the nls_timestamp_format parameter.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Schemas for formatting date and time

Type

+

Schema

+

Description

+

Hour

+

HH

+

Number of hours in one day (01-12)

+

HH12

+

Number of hours in one day (01-12)

+

HH24

+

Number of hours in one day (00-23)

+

Minute

+

MI

+

Minute (00-59)

+

Second

+

SS

+

Second (00-59)

+

FF

+

Microsecond (000000-999999)

+

SSSSS

+

Second after midnight (0-86399)

+

Morning and afternoon

+

AM or A.M.

+

Morning identifier

+

PM or P.M.

+

Afternoon identifier

+

Year

+

+

+

Y,YYY

+

Year with comma (with four digits or more)

+

SYYYY

+

Year with four digits BC

+

YYYY

+

Year (with four digits or more)

+

YYY

+

Last three digits of a year

+

YY

+

Last two digits of a year

+

Y

+

Last one digit of a year

+

IYYY

+

ISO year (with four digits or more)

+

IYY

+

Last three digits of an ISO year

+

IY

+

Last two digits of an ISO year

+

I

+

Last one digit of an ISO year

+

RR

+

Last two digits of a year (A year of the 20th century can be stored in the 21st century.)

+

The password must comply with the following rules:

+
  • If the range of the input two-digit year is between 00 and 49:

    If the last two digits of the current year are between 00 and 49, the first two digits of the returned year are the same as the first two digits of the current year.

    +

    If the last two digits of the current year are between 50 and 99, the first two digits of the returned year equal to the first two digits of the current year plus 1.

    +
  • If the range of the input two-digit year is between 50 and 99:

    If the last two digits of the current year are between 00 and 49, the first two digits of the returned year equal to the first two digits of the current year minus 1.

    +

    If the last two digits of the current year are between 50 and 99, the first two digits of the returned year are the same as the first two digits of the current year.

    +
+

RRRR

+

Capable of receiving a year with four digits or two digits. If there are 2 digits, the value is the same as the returned value of RR. If there are 4 digits, the value is the same as YYYY.

+
  • BC or B.C.
  • AD or A.D.
+

Era indicator Before Christ (BC) and After Christ (AD)

+

Month

+

MONTH

+

Full spelling of a month in uppercase (9 characters are filled in if the value is empty.)

+

MON

+

Month in abbreviated format in uppercase (with three characters)

+

MM

+

Month (01-12)

+

RM

+

Month in Roman numerals (I-XII; I=JAN) and uppercase

+

Day

+

DAY

+

Full spelling of a date in uppercase (9 characters are filled in if the value is empty.)

+

DY

+

Day in abbreviated format in uppercase (with three characters)

+

DDD

+

Day in a year (001-366)

+

DD

+

Day in a month (01-31)

+

D

+

Day in a week (1-7).

+

Week

+

W

+

Week in a month (1-5) (The first week starts from the first day of the month.)

+

WW

+

Week in a year (1-53) (The first week starts from the first day of the year.)

+

IW

+

Week in an ISO year (The first Thursday is in the first week.)

+

Century

+

CC

+

Century (with two digits) (The 21st century starts from 2001-01-01.)

+

Julian date

+

J

+

Julian date (starting from January 1 of 4712 BC)

+

Quarter

+

Q

+

Quarter

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0037.html b/docs/dws/dev/dws_06_0037.html new file mode 100644 index 00000000..796d9f6a --- /dev/null +++ b/docs/dws/dev/dws_06_0037.html @@ -0,0 +1,919 @@ + + +

Geometric Functions and Operators

+

Geometric Operators

+
+

Geometric Functions

+
+

Geometric Type Conversion Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0038.html b/docs/dws/dev/dws_06_0038.html new file mode 100644 index 00000000..f47ebdc3 --- /dev/null +++ b/docs/dws/dev/dws_06_0038.html @@ -0,0 +1,440 @@ + + +

Network Address Functions and Operators

+

cidr and inet Operators

The operators <<, <<=, >>, and >>= test for subnet inclusion. They consider only the network parts of the two addresses (ignoring any host part) and determine whether one network is identical to or a subnet of the other.

+ +
+

cidr and inet Functions

The abbrev, host, and text functions are primarily intended to offer alternative display formats.

+ +
+

Any cidr value can be cast to inet implicitly or explicitly; therefore, the functions shown above as operating on inet also work on cidr values. An inet value can be cast to cidr. After the conversion, any bits to the right of the subnet mask are silently zeroed to create a valid cidr value. In addition, you can cast a text string to inet or cidr using normal casting syntax. For example, inet(expression) or colname::cidr.

+

macaddr Functions

The function trunc(macaddr) returns a MAC address with the last 3 bytes set to zero.

+

trunc(macaddr)

+

Description: Sets last 3 bytes to zero.

+

Return type: macaddr

+

For example:

+
1
+2
+3
+4
+5
SELECT trunc(macaddr '12:34:56:78:90:ab') AS RESULT;
+      result       
+-------------------
+ 12:34:56:00:00:00
+(1 row)
+
+ +
+
+

The macaddr type also supports the standard relational operators (such as > and <=) for lexicographical ordering, and the bitwise arithmetic operators (~, & and |) for NOT, AND and OR.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0039.html b/docs/dws/dev/dws_06_0039.html new file mode 100644 index 00000000..ab0ca9d0 --- /dev/null +++ b/docs/dws/dev/dws_06_0039.html @@ -0,0 +1,554 @@ + + +

Text Search Functions and Operators

+

Text Search Operators

+
+

In addition to the preceding operators, the ordinary B-tree comparison operators (including = and <) are defined for types tsvector and tsquery.

+

Text search functions

+
+

Text Search Debugging Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0040.html b/docs/dws/dev/dws_06_0040.html new file mode 100644 index 00000000..3d89f363 --- /dev/null +++ b/docs/dws/dev/dws_06_0040.html @@ -0,0 +1,46 @@ + + +

UUID Functions

+

UUID functions are used to generate UUID data (see UUID Type).

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0041.html b/docs/dws/dev/dws_06_0041.html new file mode 100644 index 00000000..44c2cb69 --- /dev/null +++ b/docs/dws/dev/dws_06_0041.html @@ -0,0 +1,42 @@ + + +

JSON Functions

+

JSON functions are used to generate JSON data (see JSON Types).

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0042.html b/docs/dws/dev/dws_06_0042.html new file mode 100644 index 00000000..34f5a134 --- /dev/null +++ b/docs/dws/dev/dws_06_0042.html @@ -0,0 +1,907 @@ + + +

HLL Functions and Operators

+

Hash Functions

+
+ +

If parameters with the same numeric value are hashed using different data types, the data will differ, because hash functions select different calculation policies for each type.

+
+ +

Precision Functions

HLL supports explicit, sparse, and full modes. explicit and sparse excel when the data scale is small, and barely produce errors in calculation results. When the number of distinct values increases, full becomes more suitable, but produces some errors. The following functions are used to view precision parameters in HLLs.

+
+ +

Aggregation Functions

+
+ +

Functional Functions

+
+ +

Built-in Functions

HLL has a series of built-in functions for internal data processing. Generally, users do not need to know how to use these functions. For details, see Table 1.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Built-in functions

Function

+

Description

+

hll_in

+

Receives hll data in string format.

+

hll_out

+

Sends hll data in string format.

+

hll_recv

+

Receives hll data in bytea format.

+

hll_send

+

Sends hll data in bytea format.

+

hll_trans_in

+

Receives hll_trans_type data in string format.

+

hll_trans_out

+

Sends hll_trans_type data in string format.

+

hll_trans_recv

+

Receives hll_trans_type data in bytea format.

+

hll_trans_send

+

Sends hll_trans_type data in bytea format.

+

hll_typmod_in

+

Receives typmod data.

+

hll_typmod_out

+

Sends typmod data.

+

hll_hashval_in

+

Receives hll_hashval data.

+

hll_hashval_out

+

Sends hll_hashval data.

+

hll_add_trans0

+

Works similar to hll_add, and is used on the first phase of DNs in distributed aggregation operations.

+

hll_union_trans

+

Works similar to hll_union, and is used on the first phase of DNs in distributed aggregation operations.

+

hll_union_collect

+

Works similar to hll_union, and is used on the second phase of CNs in distributed aggregation operations to summarize the results of each DN.

+

hll_pack

+

Is used on the third phase of CNs in distributed aggregation operations to convert a user-defined type hll_trans_type to the hll type.

+

hll

+

Converts a hll type to another hll type. Input parameters can be specified.

+

hll_hashval

+

Converts the bigint type to the hll_hashval type.

+

hll_hashval_int4

+

Converts the int4 type to the hll_hashval type.

+
+
+

Operators

+
+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0043.html b/docs/dws/dev/dws_06_0043.html new file mode 100644 index 00000000..bd9c9f50 --- /dev/null +++ b/docs/dws/dev/dws_06_0043.html @@ -0,0 +1,123 @@ + + +

SEQUENCE Functions

+

The sequence functions provide a simple method to ensure security of multiple users for users to obtain sequence values from sequence objects.

+
  • The hybrid data warehouse (standalone) does not support SEQUENCE and related functions.
+
+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0044.html b/docs/dws/dev/dws_06_0044.html new file mode 100644 index 00000000..378fe924 --- /dev/null +++ b/docs/dws/dev/dws_06_0044.html @@ -0,0 +1,387 @@ + + +

Array Functions and Operators

+

Array Operators

+
+

Array comparisons compare the array contents element-by-element, using the default B-tree comparison function for the element data type. In multidimensional arrays, the elements are accessed in row-major order. If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order.

+

Array Functions

+
+

In string_to_array, if the delimiter parameter is NULL, each character in the input string will become a separate element in the resulting array. If the delimiter is an empty string, then the entire input string is returned as a one-element array. Otherwise the input string is split at each occurrence of the delimiter string.

+

In string_to_array, if the null-string parameter is omitted or NULL, none of the substrings of the input will be replaced by NULL.

+

In array_to_string, if the null-string parameter is omitted or NULL, any null elements in the array are simply skipped and not represented in the output string.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0045.html b/docs/dws/dev/dws_06_0045.html new file mode 100644 index 00000000..5fecd956 --- /dev/null +++ b/docs/dws/dev/dws_06_0045.html @@ -0,0 +1,389 @@ + + +

Range Functions and Operators

+

Range Operators

+
+

The simple comparison operators <, >, <=, and >= compare the lower bounds first, and only if those are equal, compare the upper bounds.

+

The <<, >>, and -|- operators always return false when an empty range is involved; that is, an empty range is not considered to be either before or after any other range.

+

The union and difference operators will fail if the resulting range would need to contain two disjoint sub-ranges.

+

Range Functions

+
+

The lower and upper functions return null if the range is empty or the requested bound is infinite. The lower_inc, upper_inc, lower_inf, and upper_inf functions all return false for an empty range.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0046.html b/docs/dws/dev/dws_06_0046.html new file mode 100644 index 00000000..e377790a --- /dev/null +++ b/docs/dws/dev/dws_06_0046.html @@ -0,0 +1,933 @@ + + +

Aggregate Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0047.html b/docs/dws/dev/dws_06_0047.html new file mode 100644 index 00000000..9c713e7d --- /dev/null +++ b/docs/dws/dev/dws_06_0047.html @@ -0,0 +1,309 @@ + + +

Window Functions

+

Regular aggregate functions return a single value calculated from values in a row, or group all rows into a single output row. Window functions perform a calculation across a set of rows and return a value for each row.

+ +

Syntax of a Window Function

1
function_name ([expression [, expression ... ]]) OVER ( window_definition ) function_name ([expression [, expression ... ]]) OVER window_namefunction_name ( * ) OVER ( window_definition ) function_name ( * ) OVER window_name
+
+ +
+

window_definition is defined as follows:

+
1
[ existing_window_name ] [ PARTITION BY expression [, ...] ] [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ] [ frame_clause ]
+
+ +
+

frame_clause is defined as follows:

+
1
[ RANGE | ROWS ] frame_start [ RANGE | ROWS ] BETWEEN frame_start AND frame_end
+
+ +
+

You can use RANGE and ROWS to specify the window frame. ROWS specifies the window in physical units (rows). RANGE specifies the window as a logical offset.

+

In RANGE and ROWS, you can use BETWEEN frame_start AND frame_end to specify the window's first and last rows. If frame_end is left blank, it defaults to CURRENT ROW.

+

The value options of BETWEEN frame_start AND frame_end are as follows:

+ +

frame_start cannot be UNBOUNDED FOLLOWING, frame_end cannot be UNBOUNDED PRECEDING, and frame_end cannot be earlier than frame_start. For example, RANGE BETWEEN CURRENT ROW AND value PRECEDING is not allowed.

+
+

Window Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0048.html b/docs/dws/dev/dws_06_0048.html new file mode 100644 index 00000000..f5b9612d --- /dev/null +++ b/docs/dws/dev/dws_06_0048.html @@ -0,0 +1,584 @@ + + +

Security Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0049.html b/docs/dws/dev/dws_06_0049.html new file mode 100644 index 00000000..86c5c204 --- /dev/null +++ b/docs/dws/dev/dws_06_0049.html @@ -0,0 +1,178 @@ + + +

Set Returning Functions

+

Series Generating Functions

+
+

When step is positive, zero rows are returned if start is greater than stop. Conversely, when step is negative, zero rows are returned if start is less than stop. Zero rows are also returned for NULL inputs. It is an error for step to be zero.

+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
SELECT * FROM generate_series(2,4);
+ generate_series
+-----------------
+               2
+               3
+               4
+(3 rows)
+
+SELECT * FROM generate_series(5,1,-2);
+ generate_series
+-----------------
+               5
+               3
+               1
+(3 rows)
+
+SELECT * FROM generate_series(4,3);
+ generate_series
+-----------------
+(0 rows)
+
+-- this example relies on the date-plus-integer operator
+SELECT current_date + s.a AS dates FROM generate_series(0,14,7) AS s(a);
+   dates
+------------
+ 2017-06-02
+ 2017-06-09
+ 2017-06-16
+(3 rows)
+
+SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, '2008-03-04 12:00', '10 hours');
+   generate_series   
+---------------------
+ 2008-03-01 00:00:00
+ 2008-03-01 10:00:00
+ 2008-03-01 20:00:00
+ 2008-03-02 06:00:00
+ 2008-03-02 16:00:00
+ 2008-03-03 02:00:00
+ 2008-03-03 12:00:00
+ 2008-03-03 22:00:00
+ 2008-03-04 08:00:00
+(9 rows)
+
+ +
+

Subscript Generating Functions

+
+

generate_subscripts is a function that generates the set of valid subscripts for the specified dimension of the given array. Zero rows are returned for arrays that do not have the requested dimension, or for NULL arrays (but valid subscripts are returned for NULL array elements). For example:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
-- basic usage
+SELECT generate_subscripts('{NULL,1,NULL,2}'::int[], 1) AS s;
+ s 
+---
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+ +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
-- unnest a 2D array
+CREATE OR REPLACE FUNCTION unnest2(anyarray)
+RETURNS SETOF anyelement AS $$
+SELECT $1[i][j]
+   FROM generate_subscripts($1,1) g1(i),
+        generate_subscripts($1,2) g2(j);
+$$ LANGUAGE sql IMMUTABLE;
+
+SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]);
+ unnest2 
+---------
+       1
+       2
+       3
+       4
+(4 rows)
+
+-- Delete the function:
+DROP FUNCTION unnest2;
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0050.html b/docs/dws/dev/dws_06_0050.html new file mode 100644 index 00000000..19b0fbc1 --- /dev/null +++ b/docs/dws/dev/dws_06_0050.html @@ -0,0 +1,241 @@ + + +

Conditional Expression Functions

+

Conditional Expression Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0051.html b/docs/dws/dev/dws_06_0051.html new file mode 100644 index 00000000..87b53b1c --- /dev/null +++ b/docs/dws/dev/dws_06_0051.html @@ -0,0 +1,802 @@ + + +

System Information Functions

+

Session Information Functions

+
+

Access Privilege Inquiry Functions

+
+

Schema Visibility Inquiry Functions

Each function performs the visibility check for one type of database object. For functions and operators, an object in the search path is visible if there is no object of the same name and argument data type(s) earlier in the path. For operator classes, both name and associated index access method are considered.

+

All these functions require OIDs to identify the objects to be checked. If you want to test an object by name, it is convenient to use the OID alias types (regclass, regtype, regprocedure, regoperator, regconfig, or regdictionary).

+

For example, a table is said to be visible if its containing schema is in the search path and no table of the same name appears earlier in the search path. This is equivalent to the statement that the table can be referenced by name without explicit schema qualification. For example, to list the names of all visible tables:

+
1
SELECT relname FROM pg_class WHERE pg_table_is_visible(oid);
+
+ +
+ +
+

System Catalog Information Functions

+
+

Comment Information Functions

+
+

Transaction IDs and Snapshots

The following functions provide server transaction information in an exportable form. The main use of these functions is to determine which transactions were committed between two snapshots.

+ +

+

The internal transaction ID type (xid) is 32 bits wide and wraps around every 4 billion transactions. txid_snapshot, the data type used by these functions, stores information about transaction ID visibility at a particular moment in time. Table 1 describes its components.

+ +
+ + + + + + + + + + + + + +
Table 1 Snapshot components

Name

+

Description

+

xmin

+

Earliest transaction ID (txid) that is still active. All earlier transactions will either be committed and visible, or rolled back.

+

xmax

+

First as-yet-unassigned txid. All txids greater than or equal to this are not yet started as of the time of the snapshot, so they are invisible.

+

xip_list

+

Active txids at the time of the snapshot. The list includes only those active txids between xmin and xmax; there might be active txids higher than xmax. A txid that is xmin <= txid < xmax and not in this list was already completed at the time of the snapshot, and is either visible or dead according to its commit status. The list does not include txids of subtransactions.

+
+
+

txid_snapshot's textual representation is xmin:xmax:xip_list.

+

For example: 10:20:10,14,15 means xmin=10, xmax=20, xip_list=10, 14, 15.

+
+

Computing Node Group Function

pv_compute_pool_workload()

+

Description: Load status of a computing Node Group.

+

Return type: void

+

For example:

+
1
+2
+3
+4
+5
+6
SELECT * from pv_compute_pool_workload();
+ nodename  | rpinuse | maxrp | nodestate
+-----------+---------+-------+-----------
+ datanode1 |       0 |  1000 | normal
+ datanode2 |       0 |  1000 | normal
+(2 rows)
+
+ +
+
+

Lock Information Function

pgxc_get_lock_conflicts()

+

Description: Obtains information about conflicting locks in the cluster. When a lock is waiting for another lock or another lock is waiting for it, a lock conflict occurs.

+

Return type: setof record

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0052.html b/docs/dws/dev/dws_06_0052.html new file mode 100644 index 00000000..25de7144 --- /dev/null +++ b/docs/dws/dev/dws_06_0052.html @@ -0,0 +1,35 @@ + + +

System Administration Functions

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0053.html b/docs/dws/dev/dws_06_0053.html new file mode 100644 index 00000000..923ebf27 --- /dev/null +++ b/docs/dws/dev/dws_06_0053.html @@ -0,0 +1,46 @@ + + +

Configuration Settings Functions

+

Configuration setting functions are used for querying and modifying configuration parameters during running.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0054.html b/docs/dws/dev/dws_06_0054.html new file mode 100644 index 00000000..6fa283de --- /dev/null +++ b/docs/dws/dev/dws_06_0054.html @@ -0,0 +1,170 @@ + + +

Universal File Access Functions

+

Universal file access functions provide local access interfaces for files on a database server. Only files in the database cluster directory and the log_directory directory can be accessed. Use a relative path for files in the cluster directory, and a path matching the log_directory configuration setting for log files. Only database system administrators can use these functions.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0055.html b/docs/dws/dev/dws_06_0055.html new file mode 100644 index 00000000..5844270d --- /dev/null +++ b/docs/dws/dev/dws_06_0055.html @@ -0,0 +1,65 @@ + + +

Server Signaling Functions

+

Server signaling functions send control signals to other server processes. Only system administrators can use these functions.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0056.html b/docs/dws/dev/dws_06_0056.html new file mode 100644 index 00000000..3e8a829c --- /dev/null +++ b/docs/dws/dev/dws_06_0056.html @@ -0,0 +1,318 @@ + + +

Backup and Restoration Control Functions

+

Backup Control Functions

Backup control functions help online backup.

+ +
+ +

Restoration Control Functions

Restoration control functions provide information about the status of standby nodes. These functions may be executed both during restoration and in normal running.

+ +

Restoration control functions control restoration processes. These functions may be executed only during restoration.

+ +

While restoration is paused, no further database changes are applied. In hot standby mode, all new queries will see the same consistent snapshot of the database, and no further query conflicts will be generated until restoration is resumed.

+

If streaming replication is disabled, the paused state may continue indefinitely without problem. While streaming replication is in progress, WAL records will continue to be received, which will eventually fill available disk space. This progress depends on the duration of the pause, the rate of WAL generation, and available disk space.

+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0057.html b/docs/dws/dev/dws_06_0057.html new file mode 100644 index 00000000..4b53a5c5 --- /dev/null +++ b/docs/dws/dev/dws_06_0057.html @@ -0,0 +1,15 @@ + + +

Snapshot Synchronization Functions

+

Snapshot synchronization functions save the current snapshot and return its identifier.

+

pg_export_snapshot()

+

Description: Saves the current snapshot and returns its identifier.

+

Return type: text

+

Note: pg_export_snapshot saves the current snapshot and returns a text string identifying the snapshot. This string must be passed to clients that want to import the snapshot. A snapshot can be imported when the set transaction snapshot snapshot_id; command is executed. Doing so is possible only when the transaction is set to the REPEATABLE READ isolation level. The output of the function cannot be used as the input of set transaction snapshot.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0058.html b/docs/dws/dev/dws_06_0058.html new file mode 100644 index 00000000..033b01b5 --- /dev/null +++ b/docs/dws/dev/dws_06_0058.html @@ -0,0 +1,107 @@ + + +

Database Object Functions

+

Database Object Size Functions

Database object size functions calculate the actual disk space used by database objects.

+ +
+

Database Object Position Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0059.html b/docs/dws/dev/dws_06_0059.html new file mode 100644 index 00000000..85bc3314 --- /dev/null +++ b/docs/dws/dev/dws_06_0059.html @@ -0,0 +1,65 @@ + + +

Advisory Lock Functions

+

Advisory lock functions manage advisory locks. These functions are only for internal use currently.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0060.html b/docs/dws/dev/dws_06_0060.html new file mode 100644 index 00000000..d131600d --- /dev/null +++ b/docs/dws/dev/dws_06_0060.html @@ -0,0 +1,582 @@ + + +

Residual File Management Functions

+

Functions for Obtaining the Residual File List

+ +

+
+

Functions for Verifying Residual Files

+ + + +
+

Functions for Deleting Residual Files

+ + +
+

Using the Residual File Management Function:

Procedure:

+
  1. Call the pgxc_get_residualfiles() function to obtain the name of the database that has residual files.
  2. Go to the databases where residual files exist and call the pgxc_verify_residualfiles() function to verify the residual files recorded in the current database.
  3. Call the pgxc_rm_residualfiles() function to delete all the verified residual files.
+

The pgxc residual file management function only operates on the CN and the current primary DN, and does not verify or clear residual files on the standby DN. Therefore, after the primary DN is cleared, you need to clear residual files on the standby DN or build the standby DN in a timely manner. This prevents residual files on the standby DN from being copied back to the primary DN due to incremental build after a primary/standby switchover.

+
+

Example:

+

The following example uses two user-created databases, db1 and db2.

+

+
+
  1. Run the following command to obtain all residual file records of the cluster on the CNs:
    1
    db1=# select * from pgxc_get_residualfiles() order by 4, 6; -- order by is optional.
    +
    + +
    +

    +

    In the current cluster:

    +
    • Residual file records exist in the db1 and db2 databases on the dn_6001_6002 node (active node instance).
    • Residual files are displayed in the residualfile column.
    • The filepath column lists the files that record residual files. These files are stored in the pg_residualfiles directory under the instance data directory.
    +
  2. Call the pgxc_verify_residualfiles() function to verify the db1 database.
    1
    db1=# select * from pgxc_verify_residualfiles();
    +
    + +
    +

    +

    Verification functions are at the database level. Therefore, when a verification function is called in the db1 database, it only verifies residual files in db1.

    +

    You can call the get function again to check whether the verification is complete.

    +
    1
    db1=# select * from pgxc_get_residualfiles() order by 4, 6;
    +
    + +
    +

    +

    As shown in the preceding figure, the residual files in the db1 database have been verified, and the residual files in the db2 database are not verified.

    +
  3. Call the pgxc_rm_residualfiles() function to delete residual files.
    1
    db1=# select * from pgxc_rm_residualfiles();
    +
    + +
    +

    +
  4. Call the pgxc_get_residualfiles() function again to check the deletion result.

    +

    The result shows that the residual files in the db1 database are deleted (isdeleted is marked as t) and the residual files in the db2 database are not deleted.

    +

    In addition, nine query results are displayed. Compared with the previous query results, a record for the residual file ending with 9438 is missing. This is because the record file that records the residual file ending with 9438 contains only one record, which is deleted in step 3. If all residual files in a record file are deleted, the record file is also deleted. Deleted files are backed up in the pg_residualfiles/backup directory.

    +

    +
  5. To delete files from the db2 database, you need to call the verify function in the db2 database and then call the rm function.
    1. Go to the db2 database and call the verification function.

      +

      Query the verification result:

      +

      +
    2. Call the deletion function:

      +
    3. Query the deletion result:

      +

      All residual files recorded in the record file whose name ends with 8342 have been deleted, so the record file is deleted and backed up in the backup directory. As a result, no records are found.

      +

      +
    +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0061.html b/docs/dws/dev/dws_06_0061.html new file mode 100644 index 00000000..b00964d3 --- /dev/null +++ b/docs/dws/dev/dws_06_0061.html @@ -0,0 +1,694 @@ + + +

Replication Functions

+

A replication function synchronizes logs and data between instances. It is a statistics or operation method provided by the system to implement HA.

+

Replication functions except statistics queries are internal functions. You are not advised to use them directly.

+
+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0062.html b/docs/dws/dev/dws_06_0062.html new file mode 100644 index 00000000..963bc86e --- /dev/null +++ b/docs/dws/dev/dws_06_0062.html @@ -0,0 +1,294 @@ + + +

Other Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0063.html b/docs/dws/dev/dws_06_0063.html new file mode 100644 index 00000000..470b3498 --- /dev/null +++ b/docs/dws/dev/dws_06_0063.html @@ -0,0 +1,821 @@ + + +

Resource Management Functions

+

This section describes the functions of the resource management module.

+ + + + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0064.html b/docs/dws/dev/dws_06_0064.html new file mode 100644 index 00000000..74fe2eef --- /dev/null +++ b/docs/dws/dev/dws_06_0064.html @@ -0,0 +1,38 @@ + + +

Data Redaction Functions

+

Data redaction functions are used to mask and protect sensitive data. Generally, you are advised to bind these functions to the columns to be redacted based on the data redaction syntax, rather than use them directly on query statements.

+ +

Redaction functions are recommended if you want to create redaction policies.

+

For details about how to use data redaction functions, see the examples in "Database Security Management > Managing Users and Their Permissions > Data Redaction" in the Developer Guide.

+
+

User-Defined Redaction Functions

You can use the PL/pgSQL language to customize redaction functions.

+
User-defined redaction functions must meet the following requirements:
  • The return type must be the same as the data type of the redacted column.
  • The functions can be pushed down.
  • In addition to the redaction format, only one column can be specified in the argument list for data redaction.
  • The functions only implement the formatting for specific data types and do not involve complex association operations with other table objects.
+
+

If either of the first two requirements is not met, an error will be reported when you create a redaction policy. If either of the last two requirements is not met, unexpected problems may occur in query execution results.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0065.html b/docs/dws/dev/dws_06_0065.html new file mode 100644 index 00000000..1d3ca2ae --- /dev/null +++ b/docs/dws/dev/dws_06_0065.html @@ -0,0 +1,519 @@ + + +

Statistics Information Functions

+

Statistics information functions are divided into the following two categories: functions that access databases, using the OID of each table or index in a database to mark the database for which statistics are generated; functions that access servers, identified by the server process ID, whose value ranges from 1 to the number of currently active servers.

+ + + + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0066.html b/docs/dws/dev/dws_06_0066.html new file mode 100644 index 00000000..4841a3ef --- /dev/null +++ b/docs/dws/dev/dws_06_0066.html @@ -0,0 +1,57 @@ + + +

Trigger Functions

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0067.html b/docs/dws/dev/dws_06_0067.html new file mode 100644 index 00000000..ce3a241c --- /dev/null +++ b/docs/dws/dev/dws_06_0067.html @@ -0,0 +1,388 @@ + + +

XML Functions

+

Generating XML Content

+

Description: Generates an XML value from character data.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlparse(document '<foo>bar</foo>');
+xmlparse
+----------------
+<foo>bar</foo>
+(1 row)
+
+ +
+
+ +

Description: Generates a string from XML values.

+

Return type: type, which can be character, character varying, or text (or its alias)

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlserialize(content 'good' AS CHAR(10));
+xmlserialize
+--------------
+good
+(1 row)
+
+ +
+ +

Description: Creates an XML note that uses the specified text as the content. The text cannot contain two consecutive hyphens (--) or end with a hyphen (-). If the parameter is null, the result is also null.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlcomment('hello');
+xmlcomment
+--------------
+<!--hello-->
+(1 row)
+
+ +
+ +

Description: Concatenates a list of XML values into a single value. Null values are ignored. If all parameters are null, the result is also null.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlconcat('<abc/>', '<bar>foo</bar>');
+xmlconcat
+----------------------
+<abc/><bar>foo</bar>
+(1 row)
+
+ +
+

Note: If XML declarations exist and they are the same XML version, the result will use the version. Otherwise, the result does not use any version. If all XML values have the standalone attribute whose status is yes, the standalone attribute in the result is yes. If at least one XML value's standalone attribute is no, the standalone attribute in the result is no. Otherwise, the result does not contain the standalone attribute.

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlconcat('<?xml version="1.1"?><foo/>', '<?xml version="1.1" standalone="no"?><bar/>');
+xmlconcat
+-----------------------------------
+<?xml version="1.1"?><foo/><bar/>
+(1 row)
+
+ +
+ +

Description: Generates an XML element with the given name, attribute, and content.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlelement(name foo, xmlattributes(current_date as bar), 'cont', 'ent');
+xmlelement
+-------------------------------------
+<foo bar="2020-08-15">content</foo>
+(1 row)
+
+ +
+ +

Description: Generates an XML forest (sequence) of an element with a given name and content.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlforest('abc' AS foo, 123 AS bar);
+xmlforest
+------------------------------
+<foo>abc</foo><bar>123</bar>
+(1 row)
+
+ +
+ +

Description: Creates an XML processing instruction. The content cannot contain the character sequence of ?>.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlpi(name php, 'echo "hello world";');
+xmlpi
+-----------------------------
+<?php echo "hello world";?>
+(1 row)
+
+ +
+ +

Description: Modifies the attributes of the root node of an XML value. If a version is specified, it replaces the value in the version declaration of the root node. If a standalone value is specified, it replaces the standalone value in the root node.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlroot(xmlparse(document '<?xml version="1.0" standalone="no"?><content>abc</content>'), version '1.1', standalone yes);
+xmlroot
+--------------------------------------------------------------
+<?xml version="1.1" standalone="yes"?><content>abc</content>
+(1 row)
+
+ +
+ +

Description: The xmlagg function is an aggregate function that concatenates input values.

+

Return type: XML

+

Example:

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE TABLE test (y int, x xml);
+INSERT INTO test VALUES (1, '<foo>abc</foo>');
+INSERT INTO test VALUES (2, '<bar/>');
+SELECT xmlagg(x) FROM test;
+xmlagg
+----------------------
+<foo>abc</foo><bar/>
+(1 row)
+
+ +
+

To determine the concatenation sequence, you can add an ORDER BY clause for an aggregate call, for example:

+
1
+2
+3
+4
+5
SELECT xmlagg(x ORDER BY y DESC) FROM test;
+xmlagg
+----------------------
+<bar/><foo>abc</foo>
+(1 row)
+
+ +
+

XML Predicates

+

Description: IS DOCUMENT returns true if the XML value of the parameter is a correct XML document; if the XML document is incorrect, false is returned. If the parameter is null, a null value is returned.

+

Return type: bool

+
+ +

Description: Returns true if the XML value of the parameter is not a correct XML document. If the XML document is correct, false is returned. If the parameter is null, a null value is returned.

+

Return type: bool

+ +

Description: If the xpath expression in the first parameter returns any node, the XMLEXISTS function returns true. Otherwise, the function returns false. (If any parameter is null, the result is null.) The BY REF clause is invalid and is used to maintain SQL compatibility.

+

Return type: bool

+

Example:

+
1
+2
+3
+4
+5
SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF '<towns><town>Toronto</town><town>Ottawa</town></towns>');
+xmlexists
+-----------
+t
+(1 row)
+
+ +
+ +

Description: Checks whether a text string is a well-formatted XML value and returns a Boolean result. If the xmloption parameter is set to DOCUMENT, the document is checked. If the xmloption parameter is set to CONTENT, the content is checked.

+

Return type: bool

+

Example:

+
1
+2
+3
+4
+5
SELECT xml_is_well_formed('<abc/>');
+xml_is_well_formed
+--------------------
+t
+(1 row)
+
+ +
+ +

Description: Checks whether a text string is a well-formatted text and returns a Boolean result.

+

Return type: bool

+

Example:

+
1
+2
+3
+4
+5
SELECT xml_is_well_formed_document('<test:foo xmlns:test="http://test.com/test">bar</test:foo>');
+xml_is_well_formed_document
+-----------------------------
+t
+(1 row)
+
+ +
+ +

Description: Checks whether a text string is a well-formatted content and returns a Boolean result.

+

Return type: bool

+

Example:

+
1
+2
+3
+4
+5
SELECT xml_is_well_formed_content('content');
+xml_is_well_formed_content
+----------------------------
+t
+(1 row)
+
+ +
+

Processing XML

+

Description: Returns an array of XML values corresponding to the set of nodes produced by the xpath expression. If the xpath expression returns a scalar value instead of a set of nodes, an array of individual elements is returned. The second parameter xml must be a complete XML document, which must have a root node element. The third parameter is an array map of a namespace. The array should be a two-dimensional text array, and the length of the second dimension should be 2. (It should be an array of arrays, each containing exactly two elements). The first element of each array item is the alias of the namespace name, and the second element is the namespace URI. The alias provided in this array does not have to be the same as the alias used in the XML document itself. In other words, in the context of both XML documents and xpath functions, aliases are local.

+

Return type: XML value array

+

Example:

+
1
+2
+3
+4
+5
SELECT xpath('/my:a/text()', '<my:a xmlns:my="http://example.com">test</my:a>', ARRAY[ARRAY['my', 'http://example.com']]);
+xpath
+--------
+{test}
+(1 row)
+
+ +
+
+ +

Description: The xpath_exists function is a special form of the xpath function. This function does not return an XML value that satisfies the xpath function; it returns a Boolean value indicating whether the query is satisfied. This function is equivalent to the standard XMLEXISTS predicate, but it also provides support for a namespace mapping parameter.

+

Return type: bool

+

Example:

+
1
+2
+3
+4
+5
SELECT xpath_exists('/my:a/text()', '<my:a xmlns:my="http://example.com">test</my:a>', ARRAY[ARRAY['my', 'http://example.com']]);
+xpath_exists
+--------------
+t
+(1 row)
+
+ +
+ +

Description: Generates a table based on the input XML data, XPath expression, and column definition. An xmltable is similar to a function in syntax, but it can appear only as a table in the FROM clause of a query.

+

Return value: setof record

+

Syntax:

+
1
+2
+3
+4
+5
+6
XMLTABLE ( [ XMLNAMESPACES ( namespace_uri AS namespace_name [,  ...] ), ]
+                row_expression PASSING [ BY  { REF | VALUE } ]
+document_expression [ BY  { REF | VALUE } ]
+COLUMNS name  { type  [ PATH column_expression  ] [ DEFAULT default_expression ] [ NOT NULL | NULL ] | FOR ORDINALITY }
+[, ...]
+)
+
+ +
+

Parameter:

+ +

Example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
SELECT * FROM XMLTABLE('/ROWS/ROW'
+PASSING '<ROWS><ROW id="1"><COUNTRY_ID>AU</COUNTRY_ID><COUNTRY_NAME>Australia</COUNTRY_NAME></ROW><ROW id="2"><COUNTRY_ID>FR</COUNTRY_ID><COUNTRY_NAME>France</COUNTRY_NAME></ROW><ROW id="3"><COUNTRY_ID>SG</COUNTRY_ID><COUNTRY_NAME>Singapore</COUNTRY_NAME></ROW></ROWS>'
+COLUMNS id INT PATH '@id',
+_id FOR ORDINALITY,
+country_id TEXT PATH 'COUNTRY_ID',
+country_name TEXT PATH 'COUNTRY_NAME' NOT NULL);
+id  |   _id  | country_id | country_name
+----+-----+---------------+--------------
+  1 |      1 | AU         | Australia
+  2 |      2 | FR         | France
+  3 |      3 | SG         | Singapore
+(3 rows)
+
+ +
+

Mapping a Table to XML

+

Description: Maps the contents of a table to XML values.

+

Return type: XML

+
+ +

Description: Maps a relational table schema to an XML schema document.

+

Return type: XML

+ +

Description: Maps a relational table to XML values and schema documents.

+

Return type: XML

+ +

Description: Maps the contents of an SQL query to XML values.

+

Return type: XML

+ +

Description: Maps an SQL query into an XML schema document.

+

Return type: XML

+ +

Description: Maps SQL queries to XML values and schema documents.

+

Return type: XML

+ +

Description: Maps a cursor query to an XML value.

+

Return type: XML

+ +

Description: Maps a cursor query to an XML schema document.

+

Return type: XML

+ +

Description: Maps a table in a schema to an XML value.

+

Return type: XML

+ +

Description: Maps a table in a schema to an XML schema document.

+

Return type: XML

+ +

Description: Maps a table in a schema to an XML value and a schema document.

+

Return type: XML

+ +

Description: Maps a database table to an XML value.

+

Return type: XML

+ +

Description: Maps a database table to an XML schema document.

+

Return type: XML

+ +

Description: Maps database tables to XML values and schema documents.

+

Return type: XML

+

The parameters for mapping a table to an XML value are described as follows:

+
  • tbl: table name.
  • nulls: indicates whether the output contains null values. If the value is true, the null value in the column is <columnname xsi:nil="true"/>. If the value is false, the columns containing null values are omitted from the output.
  • tableforest: If this parameter is set to true, XML fragments are generated. If this parameter is set to false, XML files are generated.
  • targetns: specifies the XML namespace of the desired result. If this parameter is not specified, an empty string is passed.
  • query: SQL query statement
  • cursor: cursor name
  • count: amount of data obtained from the cursor
  • schema: schema name
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0068.html b/docs/dws/dev/dws_06_0068.html new file mode 100644 index 00000000..f04efcb4 --- /dev/null +++ b/docs/dws/dev/dws_06_0068.html @@ -0,0 +1,144 @@ + + +

Call Stack Recording Functions

+

The pv_memory_profiling(type int) and environment variable MALLOC_CONF are used by GaussDB(DWS) to control the enabling and disabling of the memory allocation call stack recording module and the output of the memory call stack. The following figure illustrates the process.

+

+

MALLOC_CONF

The environment variable MALLOC_CONF is used to enable the monitoring module. It is in the ${BIGDATA_HOME}/mppdb/.mppdbgs_profile file and is enabled by default. Note the following points:

+ +

Commands for enabling and disabling MALLOC_CONF:

+ +
+

pv_memory_profiling (type int)

Parameter description: Controls the backtrace recording and output of memory allocation functions such as malloc in the kernel.

+

Value range: a positive integer from 0 to 3.

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Values and descriptions of pv_memory_profiling

pv_memory_profiling

+

Value

+

Description

+

0

+

Disables the memory trace function and does not record information of call stacks such as malloc.

+

1

+

Enables the memory trace function to record information of call stacks such as malloc.

+

2

+

Outputs trace logs of call stacks such as malloc.

+
  • Output path: /proc/pid/cwd directory. pid indicates the ID of the GaussDB process.
  • Output log name format: jeprof.<pid>.*.heap, where pid indicates the ID of the GaussDB process and * indicates the unique sequence number of the output trace log, for example, jeprof.195473.0.u0.heap.
+

3

+

Outputs memory statistics.

+
  • Output path: /proc/pid/cwd directory. pid indicates the ID of the GaussDB process.
  • Log name format: Node name + Process ID + Time + heap_stats + .out. You can use vim to open the file.
+
+
+

Return type: Boolean

+

Note:

+ +
+

Outputting Memory Call Stack Information

Procedure:

+
  1. Execute the following statement to output the memory call stack information and output the trace file in the directory where the GaussDB process is located:

    1
    select * from pv_memory_profiling(2);
    +
    + +
    +

  2. Use the jeprof tool provided by jemalloc to parse log information.

    Method 1: Output in text format.
    jeprof --text --show_bytes $GAUSSHOME/bin/gaussdb trace file 1 >prof.txt
    +
    +

    Method 2: Export the report in PDF format.

    +
    jeprof --pdf --show_bytes $GAUSSHOME/bin/gaussdb trace file 1 > prof.pdf
    +
    • To parse the memory call stack information, you need to use the GaussDB source code for analysis. You need to send the trace file to R&D engineers for analysis.
    • To analyze the trace file, you need to use the jeprof tool, which is generated by jemalloc. The Perl environment is required for using the tool. To generate PDF calling diagrams, you need to install the Graphviz tool that matches the OS.
    +
    +

+
+

Example

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
-- Log in as the system administrator, set environment variables, and start the database.
+export MALLOC_CONF=prof:true
+
+-- Disable the memory trace recording function when the database is running.
+select pv_memory_profiling(0);
+pv_memory_profiling
+----------------------------
+t
+(1 row)
+
+-- Enable the memory trace recording function when the database is running.
+select pv_memory_profiling(1);
+pv_memory_profiling
+----------------------------
+t
+(1 row)
+
+-- Output memory trace records.
+select pv_memory_profiling(2);
+pv_memory_profiling
+----------------------------
+t
+(1 row)
+
+-- Generate the trace file in text or PDF format in the directory where the GaussDB process is located.
+jeprof --text --show_bytes $GAUSSHOME/bin/gaussdb trace file 1 >prof.txt
+jeprof --pdf --show_bytes $GAUSSHOME/bin/gaussdb trace file 1 > prof.pdf
+
+-- Output memory statistics.
+Execute the following statement to generate the memory statistics file in the directory where the GaussDB process is located. The file can be directly read.
+select pv_memory_profiling(3);
+pv_memory_profiling
+----------------------------
+t
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0069.html b/docs/dws/dev/dws_06_0069.html new file mode 100644 index 00000000..de09035e --- /dev/null +++ b/docs/dws/dev/dws_06_0069.html @@ -0,0 +1,23 @@ + + +

Expressions

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0070.html b/docs/dws/dev/dws_06_0070.html new file mode 100644 index 00000000..56640f68 --- /dev/null +++ b/docs/dws/dev/dws_06_0070.html @@ -0,0 +1,146 @@ + + +

Simple Expressions

+

Logical Expressions

Logical Operators lists the operators and calculation rules of logical expressions.

+
+

Comparative Expressions

Comparison Operators lists the common comparative operators.

+

In addition to comparative operators, you can also use the following sentence structure:

+ +
+

Examples

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
SELECT 2 BETWEEN 1 AND 3 AS RESULT;
+ result 
+----------
+ t
+(1 row)
+
+SELECT 2 >= 1 AND 2 <= 3 AS RESULT;
+ result 
+----------
+ t
+(1 row)
+
+SELECT 2 NOT BETWEEN 1 AND 3 AS RESULT;
+ result 
+----------
+ f
+(1 row)
+
+SELECT 2 < 1 OR 2 > 3 AS RESULT;
+ result 
+----------
+ f
+(1 row)
+
+SELECT 2+2 IS NULL AS RESULT;
+ result 
+----------
+ f
+(1 row)
+
+SELECT 2+2 IS NOT NULL AS RESULT;
+ result 
+----------
+ t
+(1 row)
+
+SELECT 2+2 ISNULL AS RESULT;
+ result 
+----------
+ f
+(1 row)
+
+SELECT 2+2 NOTNULL AS RESULT;
+ result 
+----------
+ t
+(1 row)
+
+SELECT 2+2 IS DISTINCT FROM NULL AS RESULT;
+ result 
+----------
+ t
+(1 row)
+
+SELECT 2+2 IS NOT DISTINCT FROM NULL AS RESULT;
+ result  
+----------
+ f
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0071.html b/docs/dws/dev/dws_06_0071.html new file mode 100644 index 00000000..279bd658 --- /dev/null +++ b/docs/dws/dev/dws_06_0071.html @@ -0,0 +1,252 @@ + + +

Conditional Expressions

+

Data that meets the requirements specified by conditional expressions are filtered during SQL statement execution.

+

Conditional expressions include the following types:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0072.html b/docs/dws/dev/dws_06_0072.html new file mode 100644 index 00000000..41294038 --- /dev/null +++ b/docs/dws/dev/dws_06_0072.html @@ -0,0 +1,176 @@ + + +

Subquery Expressions

+

Subquery expressions include the following types:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0073.html b/docs/dws/dev/dws_06_0073.html new file mode 100644 index 00000000..36195349 --- /dev/null +++ b/docs/dws/dev/dws_06_0073.html @@ -0,0 +1,96 @@ + + +

Array Expressions

+

IN

expression IN (value [, ...])

+

The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list meets the expression result on the left, the result of IN is true. If no result meets the requirements, the result of IN is false.

+

Example:

+
1
+2
+3
+4
+5
SELECT 8000+500 IN (10000, 9000) AS RESULT;
+  result 
+----------
+ f
+(1 row)
+
+ +
+
+

If the expression result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of IN is null rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values.

+
+

NOT IN

expression NOT IN (value [, ...])

+

The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list does not meet the expression result on the left, the result of NOT IN is true. If any content meets the expression result, the result of NOT IN is false.

+

Example:

+
1
+2
+3
+4
+5
SELECT 8000+500 NOT IN (10000, 9000) AS RESULT;
+  result 
+----------
+ t
+(1 row)
+
+ +
+

If the query statement result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of NOT IN is null rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values.

+
+

In all situations, X NOT IN Y equals to NOT(X IN Y).

+
+

ANY/SOME (array)

expression operator ANY (array expression)

+

expression operator SOME (array expression)

+
1
+2
+3
+4
+5
SELECT 8000+500 < SOME (array[10000,9000]) AS RESULT;
+  result 
+----------
+ t
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT 8000+500 < ANY (array[10000,9000]) AS RESULT;
+  result 
+----------
+ t
+(1 row)
+
+ +
+

The parentheses on the right contain an array expression, which must generate an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value.

+ +

If no comparison result is true and the array expression generates at least one null value, the value of ANY is NULL, rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values.

+
+

SOME is a synonym of ANY.

+
+

ALL (array)

expression operator ALL (array expression)

+

The parentheses on the right contain an array expression, which must generate an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value.

+ +

If the array expression yields a null array, the result of ALL will be null. If the left-hand expression yields null, the result of ALL is ordinarily null (though a non-strict comparison operator could possibly yield a different result). Also, if the right-hand array contains any null elements and no false comparison result is obtained, the result of ALL will be null, not true (again, assuming a strict comparison operator). This method is consistent with the Boolean rules used when SQL statements return empty values.

+
1
+2
+3
+4
+5
SELECT 8000+500 < ALL (array[10000,9000]) AS RESULT;
+  result
+----------
+ t
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0074.html b/docs/dws/dev/dws_06_0074.html new file mode 100644 index 00000000..7c83ecdb --- /dev/null +++ b/docs/dws/dev/dws_06_0074.html @@ -0,0 +1,28 @@ + + +

Row Expressions

+

Syntax:

+

row_constructor operator row_constructor

+

Both sides of the row expression are row constructors. The values of both rows must have the same number of fields and they are compared with each other. The row comparison allows operators including =, <>, <, <=, and >= or a similar operator.

+

The use of operators =<> is slightly different from other operators. If all fields of two rows are not empty and equal, the two rows are equal. If any field in two rows is not empty and not equal, the two rows are not equal. Otherwise, the comparison result is null.

+

For operators <, <=, >, and > =, the fields in rows are compared from left to right until a pair of fields that are not equal or are empty are detected. If the pair of fields contains at least one null value, the comparison result is null. Otherwise, the comparison result of this pair of fields is the final result.

+

For example:

+
1
+2
+3
+4
+5
SELECT ROW(1,2,NULL) < ROW(1,3,0) AS RESULT;
+  result
+----------
+ t
+(1 row)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0075.html b/docs/dws/dev/dws_06_0075.html new file mode 100644 index 00000000..7963fd78 --- /dev/null +++ b/docs/dws/dev/dws_06_0075.html @@ -0,0 +1,23 @@ + + +

Type Conversion

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0076.html b/docs/dws/dev/dws_06_0076.html new file mode 100644 index 00000000..c5bb738f --- /dev/null +++ b/docs/dws/dev/dws_06_0076.html @@ -0,0 +1,66 @@ + + +

Overview

+

Context

SQL is a typed language. That is, every data item has an associated data type which determines its behavior and allowed usage. GaussDB(DWS) has an extensible type system that is more general and flexible than other SQL implementations. Hence, most type conversion behavior in GaussDB(DWS) is governed by general rules. This allows the use of mixed-type expressions.

+

The GaussDB(DWS) scanner/parser divides lexical elements into five fundamental categories: integers, floating-point numbers, strings, identifiers, and keywords. Constants of most non-numeric types are first classified as strings. The SQL language definition allows specifying type names with constant strings. For example, the query:

+
1
+2
+3
+4
+5
SELECT text 'Origin' AS "label", point '(0,0)' AS "value";
+ label  | value
+--------+-------
+ Origin | (0,0)
+(1 row)
+
+ +
+

has two literal constants, of type text and point. If a type is not specified for a string literal, then the placeholder type unknown is assigned initially.

+

There are four fundamental SQL constructs requiring distinct type conversion rules in the GaussDB(DWS) parser:

+ +

The system catalog pg_cast stores information about which conversions, or casts, exist between which data types, and how to perform those conversions. For details, see PG_CAST.

+

The return type and conversion behavior of an expression are determined during semantic analysis. Data types are divided into several basic type categories, including boolean, numeric, string, bitstring, datetime, timespan, geometric, and network. Within each category there can be one or more preferred types, which are preferred when there is a choice of possible types. With careful selection of preferred types and available implicit casts, it is possible to ensure that ambiguous expressions (those with multiple candidate parsing solutions) can be resolved in a useful way.

+

All type conversion rules are designed based on the following principles:

+ +
+

Converting Empty Strings to Numeric Values in TD-Compatible Mode

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0077.html b/docs/dws/dev/dws_06_0077.html new file mode 100644 index 00000000..107367f4 --- /dev/null +++ b/docs/dws/dev/dws_06_0077.html @@ -0,0 +1,97 @@ + + +

Operators

+

Operator Type Resolution

  1. Select the operators to be considered from the pg_operator system catalog. Considered operators are those with the matching name and argument count. If the search path finds multiple available operators, only the most suitable one is considered.
  2. Look for the best match.
    1. Discard candidate operators for which the input types do not match and cannot be converted (using an implicit conversion) to match. unknown literals are assumed to be convertible to anything for this purpose. If only one candidate remains, use it; else continue to the next step.

      +
    2. Run through all candidates and keep those with the most exact matches on input types. Domains are considered the same as their base type for this purpose. Keep all candidates if there are no exact matches. If only one candidate remains, use it; else continue to the next step.

      +
    3. Run through all candidates and keep those that accept preferred types (of the input data type's type category) at the most positions where type conversion will be required. Keep all candidates if none accepts preferred types. If only one candidate remains, use it; else continue to the next step.

      +
    4. If any input arguments are of unknown types, check the type categories accepted at those argument positions by the remaining candidates. At each position, select the string category if any candidate accepts that category. (This bias towards string is appropriate since an unknown-type literal looks like a string.) Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Now discard candidates that do not accept the selected type category. Furthermore, if any candidate accepts a preferred type in that category, discard candidates that accept non-preferred types for that argument. Keep all candidates if none survives these tests. If only one candidate remains, use it; else continue to the next step.

      +
    5. If there are both unknown and known-type arguments, and all the known-type arguments have the same type, assume that the unknown arguments are also of that type, and check which candidates can accept that type at the unknown-argument positions. If exactly one candidate passes this test, use it. Otherwise, an error is reported.

      +
    +
+
+

Examples

Example 1: factorial operator type resolution. There is only one factorial operator (postfix !) defined in the system catalog, and it takes an argument of type bigint. The scanner assigns an initial type of bigint to the argument in this query expression:

+
1
+2
+3
+4
+5
+6
SELECT 40 ! AS "40 factorial";
+
+                   40 factorial
+--------------------------------------------------
+ 815915283247897734345611269596115894272000000000
+(1 row)
+
+ +
+

So the parser does a type conversion on the operand and the query is equivalent to:

+
1
SELECT CAST(40 AS bigint) ! AS "40 factorial";
+
+ +
+

Example 2: string concatenation operator type resolution. A string-like syntax is used for working with string types and for working with complex extension types. Strings with unspecified type are matched with likely operator candidates. An example with one unspecified argument:

+
1
+2
+3
+4
+5
SELECT text 'abc' || 'def' AS "text and unknown";
+ text and unknown
+------------------
+ abcdef
+(1 row)
+
+ +
+

In this example, the parser looks for an operator whose parameters are of the text type. Such an operator is found.

+

Here is a concatenation of two values of unspecified types:

+
1
+2
+3
+4
+5
SELECT 'abc' || 'def' AS "unspecified";
+ unspecified
+-------------
+ abcdef
+(1 row)
+
+ +
+

In this case there is no initial hint for which type to use, since no types are specified in the query. So, the parser looks for all candidate operators and finds that there are candidates accepting both string-category and bit-string-category inputs. Since string category is preferred when available, that category is selected, and then the preferred type for strings, text, is used as the specific type to resolve the unknown-type literals.

+
+

Example 3: absolute-value and negation operator type resolution. The GaussDB(DWS) operator catalog has several entries for the prefix operator @. All the entries implement absolute-value operations for various numeric data types. One of these entries is for type float8, which is the preferred type in the numeric category. Therefore, GaussDB(DWS) will use that entry when faced with an unknown input:

+
1
+2
+3
+4
+5
SELECT @ '-4.5' AS "abs";
+ abs
+-----
+ 4.5
+(1 row)
+
+ +
+

Here the system has implicitly resolved the unknown-type literal as type float8 before applying the chosen operator.

+

Example 4: array inclusion operator type resolution. The following is an example of resolving an operator with one known and one unknown input:

+
1
+2
+3
+4
+5
SELECT array[1,2] <@ '{1,2,3}' as "is subset";
+ is subset
+-----------
+ t
+(1 row)
+
+ +
+

In the pg_operator table of GaussDB(DWS), several entries correspond to the infix operator <@, but the only two that may accept an integer array on the left-hand side are array inclusion (anyarray <@ anyarray) and range inclusion (anyelement <@ anyrange). Because none of these polymorphic pseudo-types (see Pseudo-Types) is considered preferred, the parser cannot resolve the ambiguity on that basis. However, 2.e tells it to assume that the unknown-type literal is of the same type as the other input, that is, integer array. Now only one of the two operators can match, so array inclusion is selected. (If you select range inclusion, an error will be reported because the string does not have the right format to be a range literal.)

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0078.html b/docs/dws/dev/dws_06_0078.html new file mode 100644 index 00000000..73907024 --- /dev/null +++ b/docs/dws/dev/dws_06_0078.html @@ -0,0 +1,97 @@ + + +

Functions

+

Function Type Resolution

  1. Select the functions to be considered from the pg_proc system catalog. If a non-schema-qualified function name was used, the functions in the current search path are considered. If a qualified function name was given, only functions in the specified schema are considered.

    If the search path finds multiple functions of different argument types, a proper function in the path is considered.

    +
  2. Check for a function accepting exactly the input argument types. If the function exists, use it. Cases involving unknown will never find a match at this step.
  3. If no exact match is found, see if the function call appears to be a special type conversion request.
  4. Look for the best match.
    1. Discard candidate functions for which the input types do not match and cannot be converted (using an implicit conversion) to match. unknown literals are assumed to be convertible to anything for this purpose. If only one candidate remains, use it; else continue to the next step.
    2. Run through all candidates and keep those with the most exact matches on input types. Domains are considered the same as their base type for this purpose. Keep all candidates if none has exact matches. If only one candidate remains, use it; else continue to the next step.
    3. Run through all candidates and keep those that accept preferred types at the most positions where type conversion will be required. Keep all candidates if none accepts preferred types. If only one candidate remains, use it; else continue to the next step.
    4. If any input arguments are of unknown types, check the type categories accepted at those argument positions by the remaining candidates. At each position, select the string category if any candidate accepts that category. (This bias towards string is appropriate since an unknown-type literal looks like a string.) Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Now discard candidates that do not accept the selected type category. Furthermore, if any candidate accepts a preferred type in that category, discard candidates that accept non-preferred types for that argument. Keep all candidates if none survives these tests. If only one candidate remains, use it; else continue to the next step.
    5. If there are both unknown and known-type arguments, and all the known-type arguments have the same type, assume that the unknown arguments are also of that type, and check which candidates can accept that type at the unknown-argument positions. If exactly one candidate passes this test, use it. Otherwise, fail.
    +
+
+

Examples

Example 1: Use the rounding function argument type resolution as the first example. There is only one round function that takes two arguments; it takes a first argument of type numeric and a second argument of type integer. So the following query automatically converts the first argument of type integer to numeric:

+
1
+2
+3
+4
+5
SELECT round(4, 4);
+ round
+--------
+ 4.0000
+(1 row)
+
+ +
+

That query is converted by the parser to:

+
1
SELECT round(CAST (4 AS numeric), 4);
+
+ +
+

Since numeric constants with decimal points are initially assigned the type numeric, the following query will require no type conversion and therefore might be slightly more efficient:

+
1
SELECT round(4.0, 4);
+
+ +
+

Example 2: Use the substring function type resolution as the second example. There are several substr functions, one of which takes types text and integer. If called with a string constant of unspecified type, the system chooses the candidate function that accepts an argument of the preferred category string (namely of type text).

+
1
+2
+3
+4
+5
SELECT substr('1234', 3);
+ substr
+--------
+     34
+(1 row)
+
+ +
+

If the string is declared to be of type varchar, as might be the case if it comes from a table, then the parser will try to convert it to become text:

+
1
+2
+3
+4
+5
SELECT substr(varchar '1234', 3);
+ substr
+--------
+     34
+(1 row)
+
+ +
+

This is transformed by the parser to effectively become:

+
1
SELECT substr(CAST (varchar '1234' AS text), 3);
+
+ +
+

The parser learns from the pg_cast catalog that text and varchar are binary-compatible, meaning that one can be passed to a function that accepts the other without doing any physical conversion. Therefore, no type conversion is inserted in this case.

+
+

And, if the function is called with an argument of type integer, the parser will try to convert that to text:

+
1
+2
+3
+4
+5
SELECT substr(1234, 3);
+substr
+--------
+ 34
+(1 row)
+
+ +
+

This is transformed by the parser to effectively become:

+
1
+2
+3
+4
+5
SELECT substr(CAST (1234 AS text), 3);
+ substr
+--------
+     34
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0079.html b/docs/dws/dev/dws_06_0079.html new file mode 100644 index 00000000..390fd158 --- /dev/null +++ b/docs/dws/dev/dws_06_0079.html @@ -0,0 +1,54 @@ + + +

Value Storage

+

Value Storage Type Resolution

  1. Search for an exact match with the target column.
  2. Try to convert the expression to the target type. This will succeed if there is a registered cast between the two types. If the expression is an unknown-type literal, the content of the literal string will be fed to the input conversion routine for the target type.
  3. Check to see if there is a sizing cast for the target type. A sizing cast is a cast from that type to itself. If one is found in the pg_cast catalog, apply it to the expression before storing into the destination column. The implementation function for such a cast always takes an extra parameter of type integer. The parameter receives the destination column's atttypmod value (typically its declared length, although the interpretation of atttypmod varies for different data types), and may take a third boolean parameter that says whether the cast is explicit or implicit. The cast function is responsible for applying any length-dependent semantics such as size checking or truncation.
+
+

Examples

Use the character storage type conversion as an example. For a target column declared as character(20) the following statement shows that the stored value is sized correctly:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE x1
+(
+    customer_sk             integer,
+    customer_id             char(20),
+    first_name              char(6),
+    last_name               char(8)
+)
+with (orientation = column,compression=middle)
+distribute by hash (last_name);
+
+INSERT INTO x1(customer_sk, customer_id, first_name) VALUES (3769, 'abcdef', 'Grace');
+
+SELECT customer_id, octet_length(customer_id) FROM x1;
+     customer_id      | octet_length 
+----------------------+--------------
+ abcdef               |           20
+(1 row)
+DROP TABLE x1;
+
+ +
+
+

What has really happened here is that the two unknown literals are resolved to text by default, allowing the || operator to be resolved as text concatenation. Then the text result of the operator is converted to bpchar ("blank-padded char", the internal name of the character data type) to match the target column type. Since the conversion from text to bpchar is binary-coercible, this conversion does not insert any real function call. Finally, the sizing function bpchar(bpchar, integer, boolean) is found in the system catalog and used for the operator's result and the stored column length. This type-specific function performs the required length check and addition of padding spaces.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0080.html b/docs/dws/dev/dws_06_0080.html new file mode 100644 index 00000000..9577293b --- /dev/null +++ b/docs/dws/dev/dws_06_0080.html @@ -0,0 +1,169 @@ + + +

UNION, CASE, and Related Constructs

+

SQL UNION constructs must match up possibly dissimilar types to become a single result set. Since all query results from a SELECT UNION statement must appear in a single set of columns, the types of the results of each SELECT clause must be matched up and converted to a uniform set. Similarly, the result expressions of a CASE construct must be converted to a common type so that the CASE expression as a whole has a known output type. The same holds for ARRAY constructs, and for the GREATEST and LEAST functions.

+

Type Resolution for UNION, CASE, and Related Constructs

+
+

Type Resolution for CASE, COALESCE, IF, and IFNULL in TD-Compatible Mode

+
+

Type Resolution for CASE, COALESCE, IF, and IFNULL in MySQL-Compatible Mode

+
+

Examples

Example 1: Use type resolution with unknown types in a union as the first example. Here, the unknown-type literal 'b' will be resolved to type text.

+
1
+2
+3
+4
+5
+6
SELECT text 'a' AS "text" UNION SELECT 'b';
+ text
+------
+ a
+ b
+(2 rows)
+
+ +
+

Example 2: Use type resolution in a simple union as the second example. The literal 1.2 is of type numeric, and the integer value 1 can be cast implicitly to numeric, so that type is used.

+
1
+2
+3
+4
+5
+6
SELECT 1.2 AS "numeric" UNION SELECT 1;
+ numeric
+---------
+       1
+     1.2
+(2 rows)
+
+ +
+

Example 3: Use type resolution in a transposed union as the third example. Here, since type real cannot be implicitly cast to integer, but integer can be implicitly cast to real, the union result type is resolved as real.

+
1
+2
+3
+4
+5
+6
SELECT 1 AS "real" UNION SELECT CAST('2.2' AS REAL);
+ real
+------
+    1
+  2.2
+(2 rows)
+
+ +
+

Example 4: Use type resolution in the COALESCE function with input values of types int and varchar as the fourth example. Type resolution fails in ORA-compatible mode. The types are resolved as type varchar in TD-compatible mode, and as type text in MySQL-compatible mode.

+

Create the ora_db, td_db, and mysql_db databases by setting dbcompatibility to ORA, TD, and MySQL, respectively.

+
1
+2
+3
CREATE DATABASE ora_db dbcompatibility = 'ORA';
+CREATE DATABASE td_db dbcompatibility = 'TD';
+CREATE DATABASE mysql_db dbcompatibility = 'MySQL';
+
+ +
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0081.html b/docs/dws/dev/dws_06_0081.html new file mode 100644 index 00000000..592d5a37 --- /dev/null +++ b/docs/dws/dev/dws_06_0081.html @@ -0,0 +1,31 @@ + + +

Full Text Search

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0082.html b/docs/dws/dev/dws_06_0082.html new file mode 100644 index 00000000..3ec84e65 --- /dev/null +++ b/docs/dws/dev/dws_06_0082.html @@ -0,0 +1,21 @@ + + +

Introduction

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0083.html b/docs/dws/dev/dws_06_0083.html new file mode 100644 index 00000000..f4af52f3 --- /dev/null +++ b/docs/dws/dev/dws_06_0083.html @@ -0,0 +1,27 @@ + + +

Full-Text Retrieval

+

Textual search operators have been used in databases for years. GaussDB(DWS) has ~, ~*, LIKE, and ILIKE operators for textual data types, but they lack many essential properties required by modern information systems. They can be supplemented by indexes and dictionaries.

+

The hybrid data warehouse (standalone) does not support full-text search.

+
+
Text search lacks the following essential properties required by information systems: + + +
+
Full text indexing allows documents to be preprocessed and an index is saved for later rapid searching. Preprocessing includes: + +
+

Dictionaries allow fine-grained control over how tokens are normalized. With appropriate dictionaries, you can define stop words that should not be indexed.

+

A data type tsvector is provided for storing preprocessed documents, along with a type tsquery for storing query conditions. For details, see Text Search Types. For details about the functions and operators available for these data types, see Text Search Functions and Operators. The match operator @@, which is the most important among those functions and operators, is introduced in Basic Text Matching.

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0084.html b/docs/dws/dev/dws_06_0084.html new file mode 100644 index 00000000..102c22d8 --- /dev/null +++ b/docs/dws/dev/dws_06_0084.html @@ -0,0 +1,40 @@ + + +

What Is a Document?

+

A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents and store associations of lexemes (keywords) with their parent document. Later, these associations are used to search for documents that contain query words.

+

For searches within GaussDB(DWS), a document is normally a textual column within a row of a database table, or possibly a combination (concatenation) of such columns, perhaps stored in several tables or obtained dynamically. In other words, a document can be constructed from different parts for indexing and it might not be stored anywhere as a whole. For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
SELECT d_dow || '-' || d_dom || '-' || d_fy_week_seq  AS identify_serials FROM tpcds.date_dim WHERE d_fy_week_seq = 1;
+identify_serials 
+------------------
+ 5-6-1
+ 0-8-1
+ 2-3-1
+ 3-4-1
+ 4-5-1
+ 1-2-1
+ 6-7-1
+(7 rows) 
+
+ +
+

Actually, in these example queries, coalesce should be used to prevent a single NULL attribute from causing a NULL result for the whole document.

+
+

Another possibility is to store the documents as simple text files in the file system. In this case, the database can be used to store the full text index and to execute searches, and some unique identifier can be used to retrieve the document from the file system. However, retrieving files from outside the database requires system administrator permissions or special function support, so this is less convenient than keeping all the data inside the database. Also, keeping everything inside the database allows easy access to document metadata to assist in indexing and display.

+

For text search purposes, each document must be reduced to the preprocessed tsvector format. Searching and relevance-based ranking are performed entirely on the tsvector representation of a document. The original text is retrieved only when the document has been selected for display to a user. We therefore often speak of the tsvector as being the document, but it is only a compact representation of the full document.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0085.html b/docs/dws/dev/dws_06_0085.html new file mode 100644 index 00000000..53346915 --- /dev/null +++ b/docs/dws/dev/dws_06_0085.html @@ -0,0 +1,74 @@ + + +

Basic Text Matching

+

Full text search in GaussDB(DWS) is based on the match operator @@, which returns true if a tsvector (document) matches a tsquery (query). It does not matter which data type is written first:

+
1
+2
+3
+4
+5
SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery AS RESULT;
+ result
+----------
+ t
+(1 row)
+
+ +
+
1
+2
+3
+4
+5
SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector AS RESULT;
+ result
+----------
+ f
+(1 row) 
+
+ +
+

As the above example suggests, a tsquery is not raw text, any more than a tsvector is. A tsquery contains search terms, which must be already-normalized lexemes, and may combine multiple terms using AND, OR, and NOT operators. For details, see Text Search Types. There are functions to_tsquery and plainto_tsquery that are helpful in converting user-written text into a proper tsquery, for example by normalizing words appearing in the text. Similarly, to_tsvector is used to parse and normalize a document string. So in practice a text search match would look more like this:

+
1
+2
+3
+4
+5
SELECT to_tsvector('fat cats ate fat rats') @@ to_tsquery('fat & rat') AS RESULT;
+result
+----------
+ t
+(1 row)
+
+ +
+

Observe that this match would not succeed if written as follows:

+
1
+2
+3
+4
+5
SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat')AS RESULT;
+result
+----------
+ f
+(1 row)
+
+ +
+

In the preceding match, no normalization of the word rats will occur. Therefore, rats does not match rat.

+

The @@ operator also supports text input, allowing explicit conversion of a text string to tsvector or tsquery to be skipped in simple cases. The variants available are:

+
1
+2
+3
+4
tsvector @@ tsquery
+tsquery  @@ tsvector
+text @@ tsquery
+text @@ text
+
+ +
+

We already saw the first two of these. The form text @@ tsquery is equivalent to to_tsvector(text) @@ tsquery. The form text @@ text is equivalent to to_tsvector(text) @@ plainto_tsquery(text).

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0086.html b/docs/dws/dev/dws_06_0086.html new file mode 100644 index 00000000..c633594f --- /dev/null +++ b/docs/dws/dev/dws_06_0086.html @@ -0,0 +1,15 @@ + + +

Configurations

+

Full text search functionality includes the ability to do many more things: skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, for example, parse based on more than just white space. This functionality is controlled by text search configurations. GaussDB(DWS) comes with predefined configurations for many languages, and you can easily create your own configurations. (The \dF command of gsql shows all available configurations.)

+

During installation an appropriate configuration is selected and default_text_search_config is set accordingly in postgresql.conf. If you are using the same text search configuration for the entire cluster you can use the value in postgresql.conf. To use different configurations throughout the cluster but the same configuration within any one database, use ALTER DATABASE ... SET. Otherwise, you can set default_text_search_config in each session.

+

Each text search function that depends on a configuration has an optional argument, so that the configuration to use can be specified explicitly. default_text_search_config is used only when this argument is omitted.

+

To make it easier to build custom text search configurations, a configuration is built up from simpler database objects. GaussDB(DWS)'s text search facility provides the following types of configuration-related database objects:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0087.html b/docs/dws/dev/dws_06_0087.html new file mode 100644 index 00000000..253aa20d --- /dev/null +++ b/docs/dws/dev/dws_06_0087.html @@ -0,0 +1,19 @@ + + +

Table and index

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0088.html b/docs/dws/dev/dws_06_0088.html new file mode 100644 index 00000000..c15ccbe2 --- /dev/null +++ b/docs/dws/dev/dws_06_0088.html @@ -0,0 +1,132 @@ + + +

Searching a Table

+

It is possible to do a full text search without an index.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0089.html b/docs/dws/dev/dws_06_0089.html new file mode 100644 index 00000000..897b3403 --- /dev/null +++ b/docs/dws/dev/dws_06_0089.html @@ -0,0 +1,70 @@ + + +

Creating an Index

+

You can create a GIN index to speed up text searches:

+
1
CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body));
+
+ +
+

The to_tsvector() function accepts one or two augments.

+

If the one-augment version of the index is used, the system will use the configuration specified by default_text_search_config by default.

+

To create an index, the two-augment version must be used, or the index content may be inconsistent. Only the text search functions that specify a configuration name can be used in expression indexes. Index content is not affected by default_text_search_config, because different entries could contain tsvectors that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly.

+

Because the two-argument version of to_tsvector was used in the index above, only a query reference that uses the two-argument version of to_tsvector with the same configuration name will use that index. That is, WHERE to_tsvector('english', body) @@ 'a & b' can use the index, but WHERE to_tsvector(body) @@ 'a & b' cannot. This ensures that an index will be used only with the same configuration used to create the index entries.

+

More complex expression indexes can be set up when the configuration name of the index is specified by another column. For example:

+
1
CREATE INDEX pgweb_idx_2 ON tsearch.pgweb USING gin(to_tsvector('zhparser', body));
+
+ +
+

In this example, zhparser supports only the UTF-8 or GBK database encoding format. If the SQL_ASCII encoding is used, an error will be reported.

+
+

body is a column in the pgweb table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, for example, WHERE to_tsvector(config_name, body) @@ 'a & b' must match to_tsvector in the index.

+

Indexes can even concatenate columns:

+
1
CREATE INDEX pgweb_idx_3 ON tsearch.pgweb USING gin(to_tsvector('english', title || ' ' || body));
+
+ +
+

Another approach is to create a separate tsvector column to hold the output of to_tsvector. This example is a concatenation of title and body, using coalesce to ensure that one column will still be indexed when the other is NULL:

+
1
+2
ALTER TABLE tsearch.pgweb ADD COLUMN textsearchable_index_col tsvector;
+UPDATE tsearch.pgweb SET textsearchable_index_col = to_tsvector('english', coalesce(title,'') || ' ' || coalesce(body,''));
+
+ +
+

Then, create a GIN index to speed up the search:

+
1
CREATE INDEX textsearch_idx_4 ON tsearch.pgweb USING gin(textsearchable_index_col);
+
+ +
+

Now you are ready to perform a fast full text search:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
SELECT title 
+FROM tsearch.pgweb 
+WHERE textsearchable_index_col @@ to_tsquery('science & Computer') 
+ORDER BY last_mod_date DESC 
+LIMIT 10; 
+
+ title  
+--------
+ Computer science
+
+(1 rows)
+
+ +
+

One advantage of the separate-column approach over an expression index is that it is unnecessary to explicitly specify the text search configuration in queries in order to use the index. As shown in the preceding example, the query can depend on default_text_search_config. Another advantage is that searches will be faster, since it will not be necessary to redo the to_tsvector calls to verify index matches. The expression-index approach is simpler to set up, however, and it requires less disk space since the tsvector representation is not stored explicitly.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0090.html b/docs/dws/dev/dws_06_0090.html new file mode 100644 index 00000000..54f849d2 --- /dev/null +++ b/docs/dws/dev/dws_06_0090.html @@ -0,0 +1,46 @@ + + +

Constraints on Index Use

+

The following is an example of using an index. Run the following statements in a database that uses the UTF-8 or GBK encoding:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table table1 (c_int int,c_bigint bigint,c_varchar varchar,c_text text) with(orientation=row);
+
+create text search configuration ts_conf_1(parser=POUND);
+create text search configuration ts_conf_2(parser=POUND) with(split_flag='%');
+
+set default_text_search_config='ts_conf_1';
+create index idx1 on table1 using gin(to_tsvector(c_text));
+
+set default_text_search_config='ts_conf_2';
+create index idx2 on table1 using gin(to_tsvector(c_text));
+
+select c_varchar,to_tsvector(c_varchar) from table1 where to_tsvector(c_text) @@ plainto_tsquery('¥#@……&**') and to_tsvector(c_text) @@ 
+plainto_tsquery('Company') and c_varchar is not null order by 1 desc limit 3;
+
+ +
+

In this example, table1 has two GIN indexes created on the same column c_text, idx1 and idx2, but these two indexes are created under different settings of default_text_search_config. Differences between this example and the scenario where one table has common indexes created on the same column are as follows:

+ +

As a result, using idx1 and idx2 for the same query returns different results.

+

Constraints

In the preceding example, when:

+
+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0091.html b/docs/dws/dev/dws_06_0091.html new file mode 100644 index 00000000..6ed698ab --- /dev/null +++ b/docs/dws/dev/dws_06_0091.html @@ -0,0 +1,21 @@ + + +

Controlling Text Search

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0092.html b/docs/dws/dev/dws_06_0092.html new file mode 100644 index 00000000..e6bd989c --- /dev/null +++ b/docs/dws/dev/dws_06_0092.html @@ -0,0 +1,55 @@ + + +

Parsing Documents

+

GaussDB(DWS) provides function to_tsvector for converting a document to the tsvector data type.

+
1
to_tsvector([ config regconfig, ] document text) returns tsvector
+
+ +
+

to_tsvector parses a textual document into tokens, reduces the tokens to lexemes, and returns a tsvector, which lists the lexemes together with their positions in the document. The document is processed according to the specified or default text search configuration. Here is a simple example:

+
1
+2
+3
+4
SELECT to_tsvector('english', 'a fat  cat sat on a mat - it ate a fat rats');
+                  to_tsvector
+-----------------------------------------------------
+ 'ate':9 'cat':3 'fat':2,11 'mat':7 'rat':12 'sat':4
+
+ +
+

In the preceding example we see that the resulting tsvector does not contain the words a, on, or it, the word rats became rat, and the punctuation sign (-) was ignored.

+

The to_tsvector function internally calls a parser which breaks the document text into tokens and assigns a type to each token. For each token, a list of dictionaries is consulted. where the list can vary depending on the token type. The first dictionary that recognizes the token emits one or more normalized lexemes to represent the token. For example:

+ +

The choices of parser, dictionaries and which types of tokens to index are determined by the selected text search configuration. It is possible to have many different configurations in the same database, and predefined configurations are available for various languages. In our example we used the default configuration english for the English language.

+

The function setweight can be used to label the entries of a tsvector with a given weight, where a weight is one of the letters A, B, C, or D. This is typically used to mark entries coming from different parts of a document, such as title versus body. Later, this information can be used for ranking of search results.

+

Because to_tsvector(NULL) will return NULL, you are advised to use coalesce whenever a column might be NULL. Here is the recommended method for creating a tsvector from a structured document:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE TABLE tsearch.tt (id int, title text, keyword text, abstract text, body text, ti tsvector);
+
+INSERT INTO tsearch.tt(id, title, keyword, abstract, body) VALUES (1, 'book', 'literature', 'Ancient poetry','Tang poem Song jambic verse');
+
+UPDATE tsearch.tt SET ti =
+    setweight(to_tsvector(coalesce(title,'')), 'A')    ||
+    setweight(to_tsvector(coalesce(keyword,'')), 'B')  ||
+    setweight(to_tsvector(coalesce(abstract,'')), 'C') ||
+    setweight(to_tsvector(coalesce(body,'')), 'D');
+DROP TABLE tsearch.tt;
+
+ +
+

Here we have used setweight to label the source of each lexeme in the finished tsvector, and then merged the labeled tsvector values using the tsvector concatenation operator ||. For details about these operations, see Manipulating tsvector.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0093.html b/docs/dws/dev/dws_06_0093.html new file mode 100644 index 00000000..24306c5f --- /dev/null +++ b/docs/dws/dev/dws_06_0093.html @@ -0,0 +1,81 @@ + + +

Parsing Queries

+

GaussDB(DWS) provides functions to_tsquery and plainto_tsquery for converting a query to the tsquery data type. to_tsquery offers access to more features than plainto_tsquery, but is less forgiving about its input.

+
to_tsquery([ config regconfig, ] querytext text) returns tsquery
+

to_tsquery creates a tsquery value from querytext, which must consist of single tokens separated by the Boolean operators & (AND), | (OR), and ! (NOT). These operators can be grouped using parentheses. In other words, the input to to_tsquery must already follow the general rules for tsquery input, as described in Text Search Types. The difference is that while basic tsquery input takes the tokens at face value, to_tsquery normalizes each token to a lexeme using the specified or default configuration, and discards any tokens that are stop words according to the configuration. For example:

+
1
+2
+3
+4
+5
SELECT to_tsquery('english', 'The & Fat & Rats');
+   to_tsquery   
+---------------
+ 'fat' & 'rat'
+(1 row)
+
+ +
+

As in basic tsquery input, weight(s) can be attached to each lexeme to restrict it to match only tsvector lexemes of those weight(s). For example:

+
1
+2
+3
+4
+5
SELECT to_tsquery('english', 'Fat | Rats:AB');
+    to_tsquery    
+------------------
+ 'fat' | 'rat':AB
+(1 row)
+
+ +
+

Also, the asterisk (*) can be attached to a lexeme to specify prefix matching:

+
1
+2
+3
+4
+5
SELECT to_tsquery('supern:*A & star:A*B');
+        to_tsquery        
+--------------------------
+ 'supern':*A & 'star':*AB
+(1 row)
+
+ +
+

Such a lexeme will match any word having the specified string and weight in a tsquery.

+
plainto_tsquery([ config regconfig, ] querytext text) returns tsquery
+

plainto_tsquery transforms unformatted text querytext to tsquery. The text is parsed and normalized much as for to_tsvector, then the & (AND) Boolean operator is inserted between surviving words.

+

For example:

+
1
+2
+3
+4
+5
SELECT plainto_tsquery('english', 'The Fat Rats');
+ plainto_tsquery 
+-----------------
+ 'fat' & 'rat'
+(1 row)
+
+ +
+

Note that plainto_tsquery cannot recognize Boolean operators, weight labels, or prefix-match labels in its input:

+
1
+2
+3
+4
+5
SELECT plainto_tsquery('english', 'The Fat & Rats:C');
+   plainto_tsquery   
+---------------------
+ 'fat' & 'rat' & 'c'
+(1 row)
+
+ +
+

Here, all the input punctuation was discarded as being space symbols.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0094.html b/docs/dws/dev/dws_06_0094.html new file mode 100644 index 00000000..50ae0738 --- /dev/null +++ b/docs/dws/dev/dws_06_0094.html @@ -0,0 +1,126 @@ + + +

Ranking Search Results

+

Ranking attempts to measure how relevant documents are to a particular query, so that when there are many matches the most relevant ones can be shown first. GaussDB(DWS) provides two predefined ranking functions. which take into account lexical, proximity, and structural information; that is, they consider how often the query terms appear in the document, how close together the terms are in the document, and how important is the part of the document where they occur. However, the concept of relevancy is vague and application-specific. Different applications might require additional information for ranking, for example, document modification time. The built-in ranking functions are only examples. You can write your own ranking functions and/or combine their results with additional factors to fit your specific needs.

+

The two ranking functions currently available are:

+
1
ts_rank([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4
+
+ +
+

Ranks vectors based on the frequency of their matching lexemes.

+
1
ts_rank_cd([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4
+
+ +
+

This function requires positional information in its input. Therefore, it will not work on "stripped" tsvector values. It will always return zero.

+

For both these functions, the optional weights argument offers the ability to weigh word instances more or less heavily depending on how they are labeled. The weight arrays specify how heavily to weigh each category of word, in the order:

+
{D-weight, C-weight, B-weight, A-weight}
+

If no weights are provided, then these defaults are used: {0.1, 0.2, 0.4, 1.0}

+

Typically weights are used to mark words from special areas of the document, like the title or an initial abstract, so they can be treated with more or less importance than words in the document body.

+

Since a longer document has a greater chance of containing a query term it is reasonable to take into account document size. For example, a hundred-word document with five instances of a search word is probably more relevant than a thousand-word document with five instances. Both ranking functions take an integer normalization option that specifies whether and how a document's length should impact its rank. The integer option controls several behaviors, so it is a bit mask: you can specify one or more behaviors using a vertical bar (|) (for example, 2|4).

+ +

If more than one flag bit is specified, the transformations are applied in the order listed.

+

It is important to note that the ranking functions do not use any global information, so it is impossible to produce a fair normalization to 1% or 100% as sometimes desired. Normalization option 32 (rank/(rank+1)) can be applied to scale all ranks into the range zero to one, but of course this is just a cosmetic change; it will not affect the ordering of the search results.

+

The following example selects the top 10 matches. Run the following statements in a database that uses the UTF-8 or GBK encoding:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
SELECT id, title, ts_rank_cd(to_tsvector(body), query) AS rank 
+FROM tsearch.pgweb, to_tsquery('science') query 
+WHERE query @@ to_tsvector(body) 
+ORDER BY rank DESC 
+LIMIT 10;
+ id |  title  | rank 
+----+---------+------
+ 11 | Philology  |   .2
+  2 | Mathematics |   .1
+ 12 | Geography  |   .1
+ 13 | Computer science  |   .1
+(4 rows)
+
+ +
+

This is the same example using normalized ranking:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
SELECT id, title, ts_rank_cd(to_tsvector(body), query, 32 /* rank/(rank+1) */ ) AS rank 
+FROM tsearch.pgweb, to_tsquery('science') query 
+WHERE  query @@ to_tsvector(body) 
+ORDER BY rank DESC 
+LIMIT 10;
+ id |  title  |   rank   
+----+---------+----------
+ 11 | Philology  |  .166667
+  2 | Mathematics | .0909091
+ 12 | Geography  | .0909091
+ 13 | Computer science  | .0909091
+(4 rows)
+
+ +
+

The following example sorts query by Chinese word segmentation:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
CREATE TABLE tsearch.ts_zhparser(id int, body text);
+INSERT INTO tsearch.ts_zhparser VALUES (1, 'sort');
+INSERT INTO tsearch.ts_zhparser VALUES(2, 'sort query');
+INSERT INTO tsearch.ts_zhparser VALUES(3, 'query sort');
+-- Accurate match
+SELECT id, body, ts_rank_cd (to_tsvector ('zhparser', body), query) AS rank FROM tsearch.ts_zhparser, to_tsquery ('sort') query WHERE query @@ to_tsvector (body);
+ id | body | rank 
+----+------+------
+  1 | sort |   .1
+(1 row)
+
+-- Fuzzy match
+SELECT id, body, ts_rank_cd (to_tsvector ('zhparser', body), query) AS rank FROM tsearch.ts_zhparser, to_tsquery ('sort') query WHERE query @@ to_tsvector ('zhparser',body);
+ id |   body   | rank 
+----+----------+------
+  3 | query sort |   .1
+  1 | sort     |   .1
+  2 | sort query |   .1
+(3 rows)
+
+ +
+

Ranking can be expensive since it requires consulting the tsvector of each matching document, which can be I/O bound and therefore slow. Unfortunately, it is almost impossible to avoid since practical queries often result in large numbers of matches.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0095.html b/docs/dws/dev/dws_06_0095.html new file mode 100644 index 00000000..1052f228 --- /dev/null +++ b/docs/dws/dev/dws_06_0095.html @@ -0,0 +1,87 @@ + + +

Highlighting Results

+

To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of the document with marked search terms. GaussDB(DWS) provides function ts_headline that implements this functionality.

+
1
ts_headline([ config regconfig, ] document text, query tsquery [, options text ]) returns text
+
+ +
+

ts_headline accepts a document along with a query, and returns an excerpt from the document in which terms from the query are highlighted. The configuration to be used to parse the document can be specified by config. If config is omitted, the default_text_search_config configuration is used.

+

If an options string is specified it must consist of a comma-separated list of one or more option=value pairs. The available options are:

+ + + + + +

Any unspecified options receive these defaults:

+
1
+2
+3
StartSel=<b>, StopSel=</b>,
+MaxWords=35, MinWords=15, ShortWord=3, HighlightAll=FALSE,
+MaxFragments=0, FragmentDelimiter=" ... "
+
+ +
+

For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
SELECT ts_headline('english',
+'The most common type of search
+is to find all documents containing given query terms
+and return them in order of their similarity to the
+query.',
+to_tsquery('english', 'query & similarity'));
+                        ts_headline                         
+------------------------------------------------------------
+ containing given <b>query</b> terms
+ and return them in order of their <b>similarity</b> to the
+ <b>query</b>.
+(1 row)
+
+SELECT ts_headline('english',
+'The most common type of search
+is to find all documents containing given query terms
+and return them in order of their similarity to the
+query.',
+to_tsquery('english', 'query & similarity'),
+'StartSel = <, StopSel = >');
+                      ts_headline                      
+-------------------------------------------------------
+ containing given <query> terms
+ and return them in order of their <similarity> to the
+ <query>.
+(1 row)
+
+ +
+

ts_headline uses the original document, not a tsvector summary, so it can be slow and should be used with care.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0096.html b/docs/dws/dev/dws_06_0096.html new file mode 100644 index 00000000..3dbf8119 --- /dev/null +++ b/docs/dws/dev/dws_06_0096.html @@ -0,0 +1,21 @@ + + +

Additional Features

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0097.html b/docs/dws/dev/dws_06_0097.html new file mode 100644 index 00000000..5894132e --- /dev/null +++ b/docs/dws/dev/dws_06_0097.html @@ -0,0 +1,19 @@ + + +

Manipulating tsvector

+

GaussDB(DWS) provides functions and operators that can be used to manipulate documents that are already in tsvector type.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0098.html b/docs/dws/dev/dws_06_0098.html new file mode 100644 index 00000000..d2b9c70c --- /dev/null +++ b/docs/dws/dev/dws_06_0098.html @@ -0,0 +1,57 @@ + + +

Manipulating Queries

+

GaussDB(DWS) provides functions and operators that can be used to manipulate queries that are already in tsquery type.

+ + + + + +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0099.html b/docs/dws/dev/dws_06_0099.html new file mode 100644 index 00000000..7e6d8f59 --- /dev/null +++ b/docs/dws/dev/dws_06_0099.html @@ -0,0 +1,85 @@ + + +

Rewriting Queries

+

The ts_rewrite family of functions searches a given tsquery for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this operation is a tsquery specific version of substring replacement. A target and substitute combination can be thought of as a query rewrite rule. A collection of such rewrite rules can be a powerful search aid. For example, you can expand the search using synonyms (that is, new york, big apple, nyc, gotham) or narrow the search to direct the user to some hot topic.

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0100.html b/docs/dws/dev/dws_06_0100.html new file mode 100644 index 00000000..fb1d6b5e --- /dev/null +++ b/docs/dws/dev/dws_06_0100.html @@ -0,0 +1,63 @@ + + +

Gathering Document Statistics

+

The function ts_stat is useful for checking your configuration and for finding stop-word candidates.

+
1
+2
+3
ts_stat(sqlquery text, [ weights text, ]
+        OUT word text, OUT ndoc integer,
+        OUT nentry integer) returns setof record
+
+ +
+

sqlquery is a text value containing an SQL query which must return a single tsvector column. ts_stat executes the query and returns statistics about each distinct lexeme (word) contained in the tsvector data. The columns returned are

+ +

If weights are supplied, only occurrences having one of those weights are counted. For example, to find the ten most frequent words in a document collection:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10;;
+   word | ndoc | nentry 
+------+------+--------
+ 32   |    2 |      2
+ 33   |    2 |      2
+ 1    |    1 |      1
+ 10   |    1 |      1
+ 13   |    1 |      1
+ 14   |    1 |      1
+ 15   |    1 |      1
+ 17   |    1 |      1
+ 20   |    1 |      1
+ 22   |    1 |      1
+(10 rows)
+
+ +
+

The same, but counting only word occurrences with weight A or B:

+
1
+2
+3
+4
SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10', 'a') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10;
+ word | ndoc | nentry 
+------+------+--------
+(0 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0101.html b/docs/dws/dev/dws_06_0101.html new file mode 100644 index 00000000..3353f926 --- /dev/null +++ b/docs/dws/dev/dws_06_0101.html @@ -0,0 +1,484 @@ + + +

Parsers

+

Text search parsers are responsible for splitting raw document text into tokens and identifying each token's type, where the set of types is defined by the parser itself. Note that a parser does not modify the text at all — it simply identifies plausible word boundaries. Because of this limited scope, there is less need for application-specific custom parsers than there is for custom dictionaries.

+

Currently, GaussDB(DWS) provides the following built-in parsers: pg_catalog.default for English configuration, and pg_catalog.ngram, pg_catalog.zhparser, and pg_catalog.pound for full text search in texts containing Chinese, or both Chinese and English.

+

The built-in parser is named pg_catalog.default. It recognizes 23 token types, shown in Table 1.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Default parser's token types

Alias

+

Description

+

Examples

+

asciiword

+

Word, all ASCII letters

+

elephant

+

word

+

Word, all letters

+

mañana

+

numword

+

Word, letters and digits

+

beta1

+

asciihword

+

Hyphenated word, all ASCII

+

up-to-date

+

hword

+

Hyphenated word, all letters

+

lógico-matemática

+

numhword

+

Hyphenated word, letters and digits

+

postgresql-beta1

+

hword_asciipart

+

Hyphenated word part, all ASCII

+

postgresql in the context postgresql-beta1

+

hword_part

+

Hyphenated word part, all letters

+

lógico or matemática in the context lógico-matemática

+

hword_numpart

+

Hyphenated word part, letters and digits

+

beta1 in the context postgresql-beta1

+

email

+

Email address

+

foo@example.com

+

protocol

+

Protocol head

+

http://

+

url

+

URL

+

example.com/stuff/index.html

+

host

+

Host

+

example.com

+

url_path

+

URL path

+

/stuff/index.html, in the context of a URL

+

file

+

File or path name

+

/usr/local/foo.txt, if not within a URL

+

sfloat

+

Scientific notation

+

-1.23E+56

+

float

+

Decimal notation

+

-1.234

+

int

+

Signed integer

+

-1234

+

uint

+

Unsigned integer

+

1234

+

version

+

Version number

+

8.3.0

+

tag

+

XML tag

+

<a href="dictionaries.html">

+

entity

+

XML entity

+

&amp;

+

blank

+

Space symbols

+

(any whitespace or punctuation not otherwise recognized)

+
+
+

Note: The parser's notion of a "letter" is determined by the database's locale setting, specifically lc_ctype. Words containing only the basic ASCII letters are reported as a separate token type, since it is sometimes useful to distinguish them. In most European languages, token types word and asciiword should be treated alike.

+

email does not support all valid email characters as defined by RFC 5322. Specifically, the only non-alphanumeric characters supported for email user names are period, dash, and underscore.

+

It is possible for the parser to identify overlapping tokens in the same piece of text. As an example, a hyphenated word will be reported both as the entire word and as each component:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT alias, description, token FROM ts_debug('english','foo-bar-beta1');
+      alias      |               description                |     token     
+-----------------+------------------------------------------+---------------
+ numhword        | Hyphenated word, letters and digits      | foo-bar-beta1
+ hword_asciipart | Hyphenated word part, all ASCII          | foo
+ blank           | Space symbols                            | -
+ hword_asciipart | Hyphenated word part, all ASCII          | bar
+ blank           | Space symbols                            | -
+ hword_numpart   | Hyphenated word part, letters and digits | beta1
+
+ +
+

This behavior is desirable since it allows searches to work for both the whole compound word and for components. Here is another instructive example:

+
1
+2
+3
+4
+5
+6
+7
SELECT alias, description, token FROM ts_debug('english','http://example.com/stuff/index.html');
+  alias   |  description  |            token             
+----------+---------------+------------------------------
+ protocol | Protocol head | http://
+ url      | URL           | example.com/stuff/index.html
+ host     | Host          | example.com
+ url_path | URL path      | /stuff/index.html
+
+ +
+

N-gram is a mechanical word segmentation method, and applies to no semantic Chinese segmentation scenarios. The N-gram segmentation method ensures the completeness of the segmentation. However, to cover all the possibilities, it but adds unnecessary words to the index, resulting in a large number of index items. N-gram supports Chinese coding, including GBK and UTF-8. Six built-in token types are shown in Table 2.

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 2 Token types

Alias

+

Description

+

zh_words

+

chinese words

+

en_word

+

english word

+

numeric

+

numeric data

+

alnum

+

alnum string

+

grapsymbol

+

graphic symbol

+

multisymbol

+

multiple symbol

+
+
+

Zhparser is a dictionary-based semantic word segmentation method. The bottom-layer calls the Simple Chinese Word Segmentation (SCWS) algorithm (https://github.com/hightman/scws), which applies to Chinese segmentation scenarios. SCWS is a term frequency and dictionary-based mechanical Chinese words engine. It can split a whole paragraph Chinese text into words. The two Chinese coding formats, GBK and UTF-8, are supported. The 26 built-in token types are shown in Table 3.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Token types

Alias

+

Description

+

A

+

Adjective

+

B

+

Differentiation

+

C

+

Conjunction

+

D

+

Adverb

+

E

+

Exclamation

+

F

+

Position

+

G

+

Lexeme

+

H

+

Preceding element

+

I

+

Idiom

+

J

+

Acronyms and abbreviations

+

K

+

Subsequent element

+

L

+

Common words

+

M

+

Numeral

+

N

+

Noun

+

O

+

Onomatopoeia

+

P

+

Preposition

+

Q

+

Quantifiers

+

R

+

Pronoun

+

S

+

Space

+

T

+

Time

+

U

+

Auxiliary word

+

V

+

Verb

+

W

+

Punctuation

+

X

+

Unknown

+

Y

+

Interjection

+

Z

+

Status words

+
+
+

Pound segments words in a fixed format. It is used to segment to-be-parsed nonsense Chinese and English words that are separated by fixed separators. It supports Chinese encoding (including GBK and UTF8) and English encoding (including ASCII). Pound has six pre-configured token types (as listed in Table 4) and supports five separators (as listed in Table 5). The default, the separator is #. Pound The maximum length of a token is 256 characters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 4 Token types

Alias

+

Description

+

zh_words

+

chinese words

+

en_word

+

english word

+

numeric

+

numeric data

+

alnum

+

alnum string

+

grapsymbol

+

graphic symbol

+

multisymbol

+

multiple symbol

+
+
+ +
+ + + + + + + + + + + + + + + + + + + +
Table 5 Separator types

Delimiter

+

Description

+

@

+

Special character

+

#

+

Special character

+

$

+

Special character

+

%

+

Special character

+

/

+

Special character

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0102.html b/docs/dws/dev/dws_06_0102.html new file mode 100644 index 00000000..a2a6f4a5 --- /dev/null +++ b/docs/dws/dev/dws_06_0102.html @@ -0,0 +1,28 @@ + + +

Dictionaries

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0103.html b/docs/dws/dev/dws_06_0103.html new file mode 100644 index 00000000..535543ab --- /dev/null +++ b/docs/dws/dev/dws_06_0103.html @@ -0,0 +1,28 @@ + + +

Overview

+

A dictionary is used to define stop words, that is, words to be ignored in full-text retrieval.

+

A dictionary can also be used to normalize words so that different derived forms of the same word will match. A normalized word is called a lexeme.

+

In addition to improving retrieval quality, normalization and removal of stop words can reduce the size of the tsvector representation of a document, thereby improving performance. Normalization and removal of stop words do not always have linguistic meaning. Users can define normalization and removal rules in dictionary definition files based on application environments.

+

A dictionary is a program that receives a token as input and returns:

+ +

GaussDB(DWS) provides predefined dictionaries for many languages and also provides five predefined dictionary templates, Simple, Synonym, Thesaurus, Ispell, and Snowball. These templates can be used to create new dictionaries with custom parameters.

+

When using full-text retrieval, you are advised to:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0104.html b/docs/dws/dev/dws_06_0104.html new file mode 100644 index 00000000..c6a05bbe --- /dev/null +++ b/docs/dws/dev/dws_06_0104.html @@ -0,0 +1,43 @@ + + +

Stop Words

+

Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text searching. Each type of dictionaries treats stop words in different ways. For example, Ispell dictionaries first normalize words and then check the list of stop words, while Snowball dictionaries first check the list of stop words.

+

For example, every English text contains words like a and the, so it is useless to store them in an index. However, stop words affect the positions in tsvector, which in turn affect ranking.

+
1
+2
+3
+4
SELECT to_tsvector('english','in the list of stop words');
+        to_tsvector
+----------------------------
+ 'list':3 'stop':5 'word':6
+
+ +
+

The missing positions 1, 2, and 4 are because of stop words. Ranks calculated for documents with and without stop words are quite different:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT ts_rank_cd (to_tsvector('english','in the list of stop words'), to_tsquery('list & stop'));
+ ts_rank_cd
+------------
+        .05
+
+SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list & stop'));
+ ts_rank_cd
+------------
+         .1
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0105.html b/docs/dws/dev/dws_06_0105.html new file mode 100644 index 00000000..c6678d71 --- /dev/null +++ b/docs/dws/dev/dws_06_0105.html @@ -0,0 +1,77 @@ + + +

Simple Dictionary

+

A Simple dictionary operates by converting the input token to lower case and checking it against a list of stop words. If the token is found in the list, an empty array will be returned, causing the token to be discarded. If it is not found, the lower-cased form of the word is returned as the normalized lexeme. In addition, you can set Accept to false for Simple dictionaries (default: true) to report non-stop-words as unrecognized, allowing them to be passed on to the next dictionary in the list.

+

Precautions

+
+

Procedure

  1. Create a Simple dictionary.

    1
    +2
    +3
    +4
    CREATE TEXT SEARCH DICTIONARY public.simple_dict (
    +     TEMPLATE = pg_catalog.simple,
    +     STOPWORDS = english
    +);
    +
    + +
    +

    english.stop is the full name of a file of stop words. For details about the syntax and parameters for creating a Simple dictionary, see CREATE TEXT SEARCH DICTIONARY.

    +

  2. Use the Simple dictionary.

     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    SELECT ts_lexize('public.simple_dict','YeS');
    + ts_lexize 
    +-----------
    + {yes}
    +(1 row)
    +
    +SELECT ts_lexize('public.simple_dict','The');
    + ts_lexize 
    +-----------
    + {}
    +(1 row)
    +
    + +
    +

  3. Set Accept=false so that the Simple dictionary returns NULL instead of a lower-cased non-stop word.

     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    ALTER TEXT SEARCH DICTIONARY public.simple_dict ( Accept = false );
    +SELECT ts_lexize('public.simple_dict','YeS');
    + ts_lexize 
    +-----------
    +
    +(1 row)
    +
    +SELECT ts_lexize('public.simple_dict','The');
    + ts_lexize 
    +-----------
    + {}
    +(1 row)
    +
    + +
    +

+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0106.html b/docs/dws/dev/dws_06_0106.html new file mode 100644 index 00000000..2f74ece7 --- /dev/null +++ b/docs/dws/dev/dws_06_0106.html @@ -0,0 +1,191 @@ + + +

Synonym Dictionary

+

A synonym dictionary is used to define, identify, and convert synonyms of tokens. Phrases are not supported (use the thesaurus dictionary in Thesaurus Dictionary).

+

Examples

+
+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0107.html b/docs/dws/dev/dws_06_0107.html new file mode 100644 index 00000000..4417f109 --- /dev/null +++ b/docs/dws/dev/dws_06_0107.html @@ -0,0 +1,109 @@ + + +

Thesaurus Dictionary

+

A thesaurus dictionary (sometimes abbreviated as TZ) is a collection of words that include relationships between words and phrases, such as broader terms (BT), narrower terms (NT), preferred terms, non-preferred terms, and related terms. A thesaurus dictionary replaces all non-preferred terms by one preferred term and, optionally, preserves the original terms for indexing as well. A thesaurus dictionary is an extension of the synonym dictionary with added phrase support.

+

Precautions

+
+

Procedure

  1. Create a TZ named thesaurus_astro.

    thesaurus_astro is a simple astronomical TZ that defines two astronomical word combinations (word+synonym).
    1
    +2
    supernovae stars : sn 
    +crab nebulae : crab
    +
    + +
    +
    +

    Run the following statement to create the TZ:

    +
    1
    +2
    +3
    +4
    +5
    +6
    CREATE TEXT SEARCH DICTIONARY thesaurus_astro (
    +    TEMPLATE = thesaurus,
    +    DictFile = thesaurus_astro,
    +    Dictionary = pg_catalog.english_stem,
    +    FILEPATH =  'obs://bucket01/obs.xxx.xxx.com accesskey=xxxxx secretkey=xxxxx region=xx-xx-xx'
    +);
    +
    + +
    +

    The full name of the dictionary file is thesaurus_astro.ths, and the dictionary is stored in 'obs://bucket01/obs.xxx.xxx.com accesskey=xxxxx secretkey=xxxxx region=xx-xx-xx'. pg_catalog.english_stem is the subdictionary (a Snowball English stemmer) used for input normalization. The subdictionary has its own configuration (for example, stop words), which is not shown here. For details about the syntax and parameters for creating a TZ, see CREATE TEXT SEARCH DICTIONARY.

    +

  2. Bind the TZ to the desired token types in the text search configuration.

    1
    +2
    +3
    ALTER TEXT SEARCH CONFIGURATION english
    +    ALTER MAPPING FOR asciiword, asciihword, hword_asciipart
    +    WITH thesaurus_astro, english_stem;
    +
    + +
    +

  3. Use the TZ.

    • Test the TZ.
      The ts_lexize function is not very useful for testing the TZ because the function processes its input as a single token. Instead, you can use the plainto_tsquery, to_tsvector, or to_tsquery function which will break their input strings into multiple tokens.
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      +12
      +13
      +14
      +15
      +16
      +17
      SELECT plainto_tsquery('english','supernova star');
      + plainto_tsquery 
      +-----------------
      + 'sn'
      +(1 row)
      +
      +SELECT to_tsvector('english','supernova star');
      + to_tsvector 
      +-------------
      + 'sn':1
      +(1 row)
      +
      +SELECT to_tsquery('english','''supernova star''');
      + to_tsquery 
      +------------
      + 'sn'
      +(1 row)
      +
      + +
      +
      +

      supernova star matches supernovae stars in thesaurus_astro because the english_stem stemmer is specified in the thesaurus_astro definition. The stemmer removed e and s.

      +
    • To index the original phrase, include it in the right-hand part of the definition.
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      supernovae stars : sn supernovae stars
      +
      +ALTER TEXT SEARCH DICTIONARY thesaurus_astro (
      +    DictFile = thesaurus_astro,
      +    FILEPATH = 'file:///home/dicts/');
      +
      +SELECT plainto_tsquery('english','supernova star');
      +       plainto_tsquery       
      +-----------------------------
      + 'sn' & 'supernova' & 'star'
      +(1 row)
      +
      + +
      +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0108.html b/docs/dws/dev/dws_06_0108.html new file mode 100644 index 00000000..85fe5d94 --- /dev/null +++ b/docs/dws/dev/dws_06_0108.html @@ -0,0 +1,49 @@ + + +

Ispell Dictionary

+

The Ispell dictionary template supports morphological dictionaries, which can normalize many different linguistic forms of a word into the same lexeme. For example, an English Ispell dictionary can match all declensions and conjugations of the search term bank, such as banking, banked, banks, banks', and bank's.

+

GaussDB(DWS) does not provide any predefined Ispell dictionaries or dictionary files. The .dict files and .affix files support multiple open-source dictionary formats, including Ispell, MySpell, and Hunspell.

+

Procedure

  1. Obtain the dictionary definition file (.dict) and affix file (.affix).

    You can use an open-source dictionary. The name extensions of the open-source dictionary may be .aff and .dic. In this case, you need to change them to .affix and .dict. In addition, for some dictionary files (for example, Norwegian dictionary files), you need to run the following commands to convert the character encoding to UTF-8:

    +
    1
    +2
    iconv -f ISO_8859-1 -t UTF-8 -o nn_no.affix nn_NO.aff 
    +iconv -f ISO_8859-1 -t UTF-8 -o nn_no.dict nn_NO.dic
    +
    + +
    +

  2. Create an Ispell dictionary.

    1
    +2
    +3
    +4
    +5
    +6
    CREATE TEXT SEARCH DICTIONARY norwegian_ispell (
    +    TEMPLATE = ispell,
    +    DictFile = nn_no,
    +    AffFile = nn_no,
    +    FilePath = 'obs://bucket_name/path accesskey=ak secretkey=sk region=rg'
    +);
    +
    + +
    +

    The full name of the Ispell dictionary file is nn_no.dict and nn_no.affix, and the dictionary is stored in the 'obs://bucket01/obs.xxx.xxx.com accesskey=xxxxx secretkey=xxxxx region=xx-xx-xx'. For details about the syntax and parameters for creating an Ispell dictionary, see CREATE TEXT SEARCH DICTIONARY.

    +

  3. Use the Ispell dictionary to split compound words.

    1
    +2
    +3
    +4
    +5
    SELECT ts_lexize('norwegian_ispell', 'sjokoladefabrikk');
    +      ts_lexize      
    +---------------------
    + {sjokolade,fabrikk}
    +(1 row)
    +
    + +
    +

    MySpell does not support compound words. Hunspell supports compound words. GaussDB(DWS) supports only the basic compound word operations of Hunspell. Generally, an Ispell dictionary recognizes a limited set of words, so they should be followed by another broader dictionary, for example, a Snowball dictionary, which recognizes everything.

    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0109.html b/docs/dws/dev/dws_06_0109.html new file mode 100644 index 00000000..ca4fd4dc --- /dev/null +++ b/docs/dws/dev/dws_06_0109.html @@ -0,0 +1,14 @@ + + +

Snowball Dictionary

+

A Snowball dictionary is based on a project by Martin Porter and is used for stem analysis, providing stemming algorithms for many languages. GaussDB(DWS) provides predefined Snowball dictionaries of many languages. You can query the PG_TS_DICT system catalog to view the predefined Snowball dictionaries and supported stemming algorithms.

+

A Snowball dictionary recognizes everything, no matter whether it is able to simplify the word. Therefore, it should be placed at the end of the dictionary list. It is useless to place it before any other dictionary because a token will never pass it through to the next dictionary.

+

For details about the syntax of Snowball dictionaries, see CREATE TEXT SEARCH DICTIONARY.

+

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0110.html b/docs/dws/dev/dws_06_0110.html new file mode 100644 index 00000000..2c15731a --- /dev/null +++ b/docs/dws/dev/dws_06_0110.html @@ -0,0 +1,144 @@ + + +

Configuration Examples

+

Text search configuration specifies the following components required for converting a document into a tsvector:

+ +

Each time when the to_tsvector or to_tsquery function is invoked, a text search configuration is required to specify a processing procedure. The GUC parameter default_text_search_config specifies the default text search configuration, which will be used if the text search function does not explicitly specify a text search configuration.

+

GaussDB(DWS) provides some predefined text search configurations. You can also create user-defined text search configurations. In addition, to facilitate the management of text search objects, multiple gsql meta-commands are provided to display related information. For details, see "Meta-Command Reference" in the Tool Guide.

+

Procedure

  1. Create a text search configuration ts_conf by copying the predefined text search configuration english.

    1
    +2
    CREATE TEXT SEARCH CONFIGURATION ts_conf ( COPY = pg_catalog.english );
    +CREATE TEXT SEARCH CONFIGURATION
    +
    + +
    +

  2. Create a Synonym dictionary.

    Assume that the definition file pg_dict.syn of the Synonym dictionary contains the following contents:
    1
    +2
    +3
    postgres    pg 
    +pgsql       pg 
    +postgresql  pg
    +
    + +
    +
    +

    Run the following statement to create the Synonym dictionary:

    +
    1
    +2
    +3
    +4
    +5
    CREATE TEXT SEARCH DICTIONARY pg_dict (
    +     TEMPLATE = synonym,
    +     SYNONYMS = pg_dict,
    +     FILEPATH =  'obs://bucket01/obs.xxx.xxx.com accesskey=xxxxx secretkey=xxxxx region=xx-xx-xx'
    + );
    +
    + +
    +

  3. Create an Ispell dictionary english_ispell (the dictionary definition file is from the open source dictionary).

    1
    +2
    +3
    +4
    +5
    +6
    +7
    CREATE TEXT SEARCH DICTIONARY english_ispell (
    +    TEMPLATE = ispell,
    +    DictFile = english,
    +    AffFile = english,
    +    StopWords = english,
    +    FILEPATH =   'obs://bucket01/obs.xxx.xxx.com accesskey=xxxxx secretkey=xxxxx region=xx-xx-xx'
    +);
    +
    + +
    +

  4. Modify the text search configuration ts_conf and change the dictionary list for tokens of certain types. For details about token types, see Parsers.

    1
    +2
    +3
    +4
    ALTER TEXT SEARCH CONFIGURATION ts_conf
    +    ALTER MAPPING FOR asciiword, asciihword, hword_asciipart,
    +                      word, hword, hword_part
    +    WITH pg_dict, english_ispell, english_stem;
    +
    + +
    +

  5. In the text search configuration, set non-index or set the search for tokens of certain types.

    1
    +2
    ALTER TEXT SEARCH CONFIGURATION ts_conf
    +    DROP MAPPING FOR email, url, url_path, sfloat, float;
    +
    + +
    +

  6. Use the text retrieval commissioning function ts_debug() to test the text search configuration ts_conf.

    1
    +2
    +3
    +4
    +5
    SELECT * FROM ts_debug('ts_conf', '
    +PostgreSQL, the highly scalable, SQL compliant, open source object-relational
    +database management system, is now undergoing beta testing of the next
    +version of our software.
    +');
    +
    + +
    +

  7. You can set the default text search configuration of the current session to ts_conf. This setting is valid only for the current session.

     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    \dF+ ts_conf
    +      Text search configuration "public.ts_conf"
    +Parser: "pg_catalog.default"
    +      Token      |            Dictionaries             
    +-----------------+-------------------------------------
    + asciihword      | pg_dict,english_ispell,english_stem
    + asciiword       | pg_dict,english_ispell,english_stem
    + file            | simple
    + host            | simple
    + hword           | pg_dict,english_ispell,english_stem
    + hword_asciipart | pg_dict,english_ispell,english_stem
    + hword_numpart   | simple
    + hword_part      | pg_dict,english_ispell,english_stem
    + int             | simple
    + numhword        | simple
    + numword         | simple
    + uint            | simple
    + version         | simple
    + word            | pg_dict,english_ispell,english_stem
    +
    +SET default_text_search_config = 'public.ts_conf';
    +SET
    +SHOW default_text_search_config;
    + default_text_search_config 
    +----------------------------
    + public.ts_conf
    +(1 row)
    +
    + +
    +

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0111.html b/docs/dws/dev/dws_06_0111.html new file mode 100644 index 00000000..c0f48dcc --- /dev/null +++ b/docs/dws/dev/dws_06_0111.html @@ -0,0 +1,19 @@ + + +

Testing and Debugging Text Search

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0112.html b/docs/dws/dev/dws_06_0112.html new file mode 100644 index 00000000..090ec70b --- /dev/null +++ b/docs/dws/dev/dws_06_0112.html @@ -0,0 +1,91 @@ + + +

Testing a Configuration

+

The function ts_debug allows easy testing of a text search configuration.

+
1
+2
+3
+4
+5
+6
+7
+8
ts_debug([ config regconfig, ] document text,
+         OUT alias text,
+         OUT description text,
+         OUT token text,
+         OUT dictionaries regdictionary[],
+         OUT dictionary regdictionary,
+         OUT lexemes text[])
+         returns setof record
+
+ +
+

ts_debug displays information about every token of document as produced by the parser and processed by the configured dictionaries. It uses the configuration specified by config, or default_text_search_config if that argument is omitted.

+

ts_debug returns one row for each token identified in the text by the parser. The columns returned are:

+ +

Here is a simple example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
SELECT * FROM ts_debug('english','a fat  cat sat on a mat - it ate a fat rats');
+   alias   |   description   | token |  dictionaries  |  dictionary  | lexemes 
+-----------+-----------------+-------+----------------+--------------+---------
+ asciiword | Word, all ASCII | a     | {english_stem} | english_stem | {}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | fat   | {english_stem} | english_stem | {fat}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | cat   | {english_stem} | english_stem | {cat}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | sat   | {english_stem} | english_stem | {sat}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | on    | {english_stem} | english_stem | {}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | a     | {english_stem} | english_stem | {}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | mat   | {english_stem} | english_stem | {mat}
+ blank     | Space symbols   |       | {}             |              | 
+ blank     | Space symbols   | -     | {}             |              | 
+ asciiword | Word, all ASCII | it    | {english_stem} | english_stem | {}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | ate   | {english_stem} | english_stem | {ate}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | a     | {english_stem} | english_stem | {}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | fat   | {english_stem} | english_stem | {fat}
+ blank     | Space symbols   |       | {}             |              | 
+ asciiword | Word, all ASCII | rats  | {english_stem} | english_stem | {rat}
+(24 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0113.html b/docs/dws/dev/dws_06_0113.html new file mode 100644 index 00000000..05e558db --- /dev/null +++ b/docs/dws/dev/dws_06_0113.html @@ -0,0 +1,103 @@ + + +

Testing a Parser

+

The ts_parse function allows direct testing of a text search parser.

+
1
+2
ts_parse(parser_name text, document text,
+         OUT tokid integer, OUT token text) returns setof record
+
+ +
+

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token. For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
SELECT * FROM ts_parse('default', '123 - a number');
+ tokid | token
+-------+--------
+    22 | 123
+    12 |
+    12 | -
+     1 | a
+    12 |
+     1 | number
+(6 rows)
+
+ +
+
1
+2
ts_token_type(parser_name text, OUT tokid integer,
+              OUT alias text, OUT description text) returns setof record
+
+ +
+

ts_token_type returns a table which describes each type of token the specified parser can recognize. For each token type, the table gives the integer tokid that the parser uses to label a token of that type, the alias that names the token type in configuration commands, and a short description. For example:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
SELECT * FROM ts_token_type('default');
+ tokid |      alias      |               description                
+-------+-----------------+------------------------------------------
+     1 | asciiword       | Word, all ASCII
+     2 | word            | Word, all letters
+     3 | numword         | Word, letters and digits
+     4 | email           | Email address
+     5 | url             | URL
+     6 | host            | Host
+     7 | sfloat          | Scientific notation
+     8 | version         | Version number
+     9 | hword_numpart   | Hyphenated word part, letters and digits
+    10 | hword_part      | Hyphenated word part, all letters
+    11 | hword_asciipart | Hyphenated word part, all ASCII
+    12 | blank           | Space symbols
+    13 | tag             | XML tag
+    14 | protocol        | Protocol head
+    15 | numhword        | Hyphenated word, letters and digits
+    16 | asciihword      | Hyphenated word, all ASCII
+    17 | hword           | Hyphenated word, all letters
+    18 | url_path        | URL path
+    19 | file            | File or path name
+    20 | float           | Decimal notation
+    21 | int             | Signed integer
+    22 | uint            | Unsigned integer
+    23 | entity          | XML entity
+(23 rows)
+
+ +
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0114.html b/docs/dws/dev/dws_06_0114.html new file mode 100644 index 00000000..f6f49ce5 --- /dev/null +++ b/docs/dws/dev/dws_06_0114.html @@ -0,0 +1,35 @@ + + +

Testing a Dictionary

+

The ts_lexize function facilitates dictionary testing.

+

ts_lexize(dict regdictionary, token text) returns text[] ts_lexize returns an array of lexemes if the input token is known to the dictionary, or an empty array if the token is known to the dictionary but it is a stop word, or NULL if it is an unknown word.

+

For example:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
SELECT ts_lexize('english_stem', 'stars');
+ ts_lexize
+-----------
+ {star}
+
+SELECT ts_lexize('english_stem', 'a');
+ ts_lexize
+-----------
+ {}
+
+ +
+

The ts_lexize function expects a single token, not text.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0115.html b/docs/dws/dev/dws_06_0115.html new file mode 100644 index 00000000..33179218 --- /dev/null +++ b/docs/dws/dev/dws_06_0115.html @@ -0,0 +1,12 @@ + + +

Limitations

+

The current limitations of GaussDB(DWS)'s full text search are:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0116.html b/docs/dws/dev/dws_06_0116.html new file mode 100644 index 00000000..eed863fe --- /dev/null +++ b/docs/dws/dev/dws_06_0116.html @@ -0,0 +1,23 @@ + + +

System Operation

+

GaussDB(DWS) runs SQL statements to perform different system operations, such as setting variables, displaying the execution plan, and collecting garbage data.

+

Setting Variables

For details about how to set various parameters for a session or transaction, see SET.

+
+

Displaying the Execution Plan

For details about how to display the execution plan that GaussDB(DWS) makes for SQL statements, see EXPLAIN.

+
+

Specifying a Checkpoint in Transaction Logs

By default, WALs periodically specify checkpoints in a transaction log. CHECKPOINT forces an immediate checkpoint when the related command is issued, without waiting for a regular checkpoint scheduled by the system. For details, see CHECKPOINT.

+
+

Collecting Unnecessary Data

For details about how to collect garbage data and analyze a database as required, For details, see VACUUM.

+
+

Collecting statistics

For details about how to collect statistics on tables in databases, For details, see ANALYZE | ANALYSE.

+
+

Setting the Constraint Check Mode for the Current Transaction

For details about how to set the constraint check mode for the current transaction, For details, see SET CONSTRAINTS.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0117.html b/docs/dws/dev/dws_06_0117.html new file mode 100644 index 00000000..7cd2b33f --- /dev/null +++ b/docs/dws/dev/dws_06_0117.html @@ -0,0 +1,21 @@ + + +

Controlling Transactions

+

A transaction is a user-defined sequence of database operations, which form an integral unit of work.

+

Starting Transactions

GaussDB(DWS) starts a transaction using START TRANSACTION and BEGIN. For details, see START TRANSACTION and BEGIN.

+
+

Setting Transactions

GaussDB(DWS) sets a transaction using SET TRANSACTION or SET LOCAL TRANSACTION. For details, see SET TRANSACTION.

+
+

Submitting Transactions

GaussDB(DWS) commits all operations of a transaction using COMMIT or END. For details, see COMMIT | END.

+
+

Rolling Back Transactions

If a fault occurs during a transaction and the transaction cannot proceed, the system performs rollback to cancel all the completed database operations related to the transaction. For details, see ROLLBACK.

+

If an execution request (not in a transaction block) received in the database contains multiple statements, the statements will be packed into a transaction. If one of the statements fails, the entire request will be rolled back.

+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0118.html b/docs/dws/dev/dws_06_0118.html new file mode 100644 index 00000000..21ab420c --- /dev/null +++ b/docs/dws/dev/dws_06_0118.html @@ -0,0 +1,197 @@ + + + +

DDL Syntax

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0119.html b/docs/dws/dev/dws_06_0119.html new file mode 100644 index 00000000..abe9fadc --- /dev/null +++ b/docs/dws/dev/dws_06_0119.html @@ -0,0 +1,509 @@ + + +

DDL Syntax Overview

+

Data definition language (DDL) is used to define or modify an object in a database, such as a table, index, or view.

+

GaussDB(DWS) does not support DDL if its CN is unavailable. For example, if a CN in the cluster is faulty, creating a database or table will fail.

+
+

Defining a Database

A database is the warehouse for organizing, storing, and managing data. Defining a database includes: creating a database, altering the database attributes, and dropping the database. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 1 SQL statements for defining a database

Function

+

SQL Statement

+

Create a database

+

CREATE DATABASE

+

Alter database attributes

+

ALTER DATABASE

+

Delete a database

+

DROP DATABASE

+
+
+
+

Defining a Schema

A schema is the set of a group of database objects and is used to control the access to the database objects. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 2 SQL statements for defining a schema

Function

+

SQL Statement

+

Create a schema

+

CREATE SCHEMA

+

Alter schema attributes

+

ALTER SCHEMA

+

Delete a schema

+

DROP SCHEMA

+
+
+
+

Defining a Table

A table is a special data structure in a database and is used to store data objects and the relationship between data objects. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + + + + +
Table 3 SQL statements for defining a table

Function

+

SQL Statement

+

Create a table

+

CREATE TABLE

+

Alter table attributes

+

ALTER TABLE

+

Delete a table

+

DROP TABLE

+

Delete all the data from a table

+

TRUNCATE

+
+
+
+

Defining a Partitioned Table

A partitioned table is a special data structure in a database and is used to store data objects and the relationship between data objects. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 4 SQL statements for defining a partitioned table

Function

+

SQL Statement

+

Create a partitioned table

+

CREATE TABLE PARTITION

+

Create a partition

+

ALTER TABLE PARTITION

+

Alter partitioned table attributes

+

ALTER TABLE PARTITION

+

Delete a partition

+

ALTER TABLE PARTITION

+

Delete a partitioned table

+

DROP TABLE

+
+
+
+

Defining an Index

An index indicates the sequence of values in one or more columns in the database table. The database index is a data structure that improves the speed of data access to specific information in a database table. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + + + + +
Table 5 SQL statements for defining an index

Function

+

SQL Statement

+

Create an index

+

CREATE INDEX

+

Alter index attributes

+

ALTER INDEX

+

Delete an index

+

DROP INDEX

+

Rebuild an index

+

REINDEX

+
+
+
+

Defining a Role

A role is used to manage rights. For database security, all management and operation rights can be assigned to different roles. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 6 SQL statements for defining a role

Function

+

SQL Statement

+

Create a role

+

CREATE ROLE

+

Alter role attributes

+

ALTER ROLE

+

Delete a role

+

DROP ROLE

+
+
+
+

Defining a User

A user is used to log in to a database. Different rights can be assigned to users for managing data accesses and operations of users. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 7 SQL statements for defining a user

Function

+

SQL Statement

+

Create a user

+

CREATE USER

+

Alter user attributes

+

ALTER USER

+

Delete a user

+

DROP USER

+
+
+
+

Defining a Redaction Policy

Data redaction is to protect sensitive data by masking or changing data. You can create a data redaction policy for a specific table object and specify the effective scope of the policy. You can also add, modify, and delete redaction columns. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 8 SQL statements for managing redaction policies

Function

+

SQL Statement

+

Create a data redaction policy

+

CREATE REDACTION POLICY

+

Modify a data redaction policy applied to a specified table

+

ALTER REDACTION POLICY

+

Delete a data redaction policy applied to a specified table

+

DROP REDACTION POLICY

+
+
+
+

Defining Row-Level Access Control

Row-level access control policies control the visibility of rows in database tables. In this way, the same SQL query may return different results for different users. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 9 SQL statements for row-level access control

Function

+

SQL Statement

+

Create a row-level access control policy

+

CREATE ROW LEVEL SECURITY POLICY

+

Modify an existing row-level access control policy

+

ALTER ROW LEVEL SECURITY POLICY

+

Delete a row-level access control policy from a table

+

DROP ROW LEVEL SECURITY POLICY

+
+
+
+

Defining a Stored Procedure

A stored procedure is a set of SQL statements for achieving specific functions and is stored in the database after compiling. Users can specify a name and provide parameters (if necessary) to execute the stored procedure. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + +
Table 10 SQL statements for defining a stored procedure

Function

+

SQL Statement

+

Create a stored procedure

+

CREATE PROCEDURE

+

Delete a stored procedure

+

DROP PROCEDURE

+
+
+
+

Defining a Function

In GaussDB(DWS), a function is similar to a stored procedure, which is a set of SQL statements. The function and stored procedure are used the same. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 11 SQL statements for defining a function

Function

+

SQL Statement

+

Create a function

+

CREATE FUNCTION

+

Alter function attributes

+

ALTER FUNCTION

+

Delete a function

+

DROP FUNCTION

+
+
+
+

Defining a View

A view is a virtual table exported from one or several basic tables. The view is used to control data accesses for users. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + +
Table 12 SQL statements for defining a view

Function

+

SQL Statement

+

Create a view

+

CREATE VIEW

+

Delete a view

+

DROP VIEW

+
+
+
+

Defining a Cursor

To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context regions. With a cursor, the stored procedure can control alterations in context areas.

+ +
+ + + + + + + + + + + + + + + + +
Table 13 SQL statements for defining a cursor

Function

+

SQL Statement

+

Create a cursor

+

CURSOR

+

Move a cursor

+

MOVE

+

Extract data from a cursor

+

FETCH

+

Close a cursor

+

CLOSE

+
+
+
+

Altering or Ending a Session

A session is a connection established between the user and the database. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + +
Table 14 SQL statements related to sessions

Function

+

SQL Statement

+

Alter a session

+

ALTER SESSION

+

End a session

+

ALTER SYSTEM KILL SESSION

+
+
+
+

Defining a Resource Pool

A resource pool is a system catalog used by the resource load management module to specify attributes related to resource management, such as Cgroups. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 15 SQL statements for defining a resource pool

Function

+

SQL Statement

+

Create a resource pool

+

CREATE RESOURCE POOL

+

Change resource attributes

+

ALTER RESOURCE POOL

+

Delete a resource pool

+

DROP RESOURCE POOL

+
+
+
+

Defining Synonyms

A synonym is a special database object compatible with Oracle. It is used to store the mapping between a database object and another. Currently, only synonyms can be used to associate the following database objects: tables, views, functions, and stored procedures. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 16 SQL statements for managing synonyms

Function

+

SQL Statement

+

Creating a synonym

+

CREATE SYNONYM

+

Modifying a synonym

+

ALTER SYNONYM

+

Deleting a synonym

+

DROP SYNONYM

+
+
+
+

Defining Text Search Configuration

A text search configuration specifies a text search parser that can divide a string into tokens, plus dictionaries that can be used to determine which tokens are of interest for searching. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 17 SQL statements for configuring text search

Function

+

SQL Statement

+

Create a text search configuration

+

CREATE TEXT SEARCH CONFIGURATION

+

Modify a text search configuration

+

ALTER TEXT SEARCH CONFIGURATION

+

Delete a text search configuration

+

DROP TEXT SEARCH CONFIGURATION

+
+
+
+

Defining a Full-text Retrieval Dictionary

A dictionary is used to identify and process specific words during full-text retrieval. Dictionaries are created by using predefined templates (defined in the PG_TS_TEMPLATE system catalog). Dictionaries of the Simple, Ispell, Synonym, Thesaurus, and Snowball types can be created. The following table lists the related SQL statements.

+ +
+ + + + + + + + + + + + + +
Table 18 SQL statements for a full-text search dictionary

Function

+

SQL Statement

+

Create a full-text retrieval dictionary

+

CREATE TEXT SEARCH DICTIONARY

+

Modify a full-text retrieval dictionary

+

ALTER TEXT SEARCH DICTIONARY

+

Delete a full-text retrieval dictionary

+

DROP TEXT SEARCH DICTIONARY

+
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0120.html b/docs/dws/dev/dws_06_0120.html new file mode 100644 index 00000000..afad4f60 --- /dev/null +++ b/docs/dws/dev/dws_06_0120.html @@ -0,0 +1,116 @@ + + +

ALTER DATABASE

+

Function

This command is used to modify the attributes of a database, including the database name, owner, maximum number of connections, and object isolation attribute.

+
+

Important Notes

+
+

Syntax

+
+ +

Parameter Description

+
  • Modifies the default tablespace of a database by moving all the tables or indexes from the old tablespace to the new one. This operation does not affect the tables or indexes in other non-default tablespaces.
  • The modified database session parameter values will take effect in the next session.
+
+
+

Examples

Modify the number of connections of the music database.

+
1
ALTER DATABASE music CONNECTION LIMIT= 10;
+
+ +
+

Change the name of the music database to music1.

+
1
ALTER DATABASE music RENAME TO music1;
+
+ +
+

Change the owner of the music1 database.

+
1
ALTER DATABASE music1 OWNER TO tom;
+
+ +
+

Modify the tablespace of the music1 database.

+
1
ALTER DATABASE music1 SET TABLESPACE PG_DEFAULT;
+
+ +
+

Disable the default index scan on the music1 database.

+
1
ALTER DATABASE music1 SET enable_indexscan TO off;
+
+ +
+

Reset the enable_indexscan parameter of the music1 database.

+
1
ALTER DATABASE music1 RESET enable_indexscan;
+
+ +
+
+

Links

CREATE DATABASE, DROP DATABASE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0123.html b/docs/dws/dev/dws_06_0123.html new file mode 100644 index 00000000..039968eb --- /dev/null +++ b/docs/dws/dev/dws_06_0123.html @@ -0,0 +1,43 @@ + + +

ALTER FOREIGN TABLE (for GDS)

+

Function

ALTER FOREIGN TABLE modifies a foreign table.

+
+

Precautions

None

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Modify the customer_ft attribute of the foreign table. Delete the mode option.

+
1
ALTER FOREIGN TABLE customer_ft options(drop mode);
+
+ +
+
+

Helpful Links

CREATE FOREIGN TABLE (for GDS Import and Export), DROP FOREIGN TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0124.html b/docs/dws/dev/dws_06_0124.html new file mode 100644 index 00000000..195e6dca --- /dev/null +++ b/docs/dws/dev/dws_06_0124.html @@ -0,0 +1,103 @@ + + +

ALTER FOREIGN TABLE (for HDFS or OBS)

+

Function

ALTER FOREIGN TABLE modifies an HDFS or OBS foreign table.

+
+

Precautions

None

+
+

Syntax

+ +
+ +

Parameter Description

+

For details on how to modify other parameters in the foreign table, such as IF EXISTS, see Parameter Description in ALTER TABLE.

+
+

Examples

Change the type of the r_name column to text in the ft_region foreign table.

+
1
ALTER FOREIGN TABLE ft_region ALTER r_name TYPE TEXT;
+
+ +
+
Run the following command to mark the r_name column of the ft_region foreign table as not null:
1
ALTER FOREIGN TABLE ft_region ALTER r_name SET NOT NULL;
+
+ +
+
+
+

Links

CREATE FOREIGN TABLE (SQL on OBS or Hadoop), DROP FOREIGN TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0126.html b/docs/dws/dev/dws_06_0126.html new file mode 100644 index 00000000..dfd270c4 --- /dev/null +++ b/docs/dws/dev/dws_06_0126.html @@ -0,0 +1,128 @@ + + +

ALTER FUNCTION

+

Function

ALTER FUNCTION modifies the attributes of a customized function.

+
+

Precautions

Only the owner of a function or a system administrator can run this statement. If a function involves operations on temporary tables, the ALTER FUNCTION cannot be used.

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Alter the execution rule of the func_add_sql function to IMMUTABLE (that is, the same result is returned if the parameter remains unchanged):

+
1
ALTER FUNCTION func_add_sql(INTEGER, INTEGER) IMMUTABLE;
+
+ +
+

Change the name of the func_add_sql function to add_two_number.

+
1
ALTER FUNCTION func_add_sql(INTEGER, INTEGER) RENAME TO add_two_number;
+
+ +
+

Change the owner of the func_add_sql function to dbadmin.

+
1
ALTER FUNCTION add_two_number(INTEGER, INTEGER) OWNER TO dbadmin;
+
+ +
+
+

Helpful Links

CREATE FUNCTION, DROP FUNCTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0127.html b/docs/dws/dev/dws_06_0127.html new file mode 100644 index 00000000..ef4f892c --- /dev/null +++ b/docs/dws/dev/dws_06_0127.html @@ -0,0 +1,39 @@ + + +

ALTER GROUP

+

Function

ALTER GROUP modifies the attributes of a user group.

+
+

Precautions

ALTER GROUP is an alias for ALTER ROLE, and it is not a standard SQL command and not recommended. Users can use ALTER ROLE directly.

+
+

Syntax

+ +
+

Parameter Description

See the Example in ALTER ROLE.

+
+

Helpful Links

CREATE GROUP, DROP GROUP, ALTER ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0128.html b/docs/dws/dev/dws_06_0128.html new file mode 100644 index 00000000..e88935a5 --- /dev/null +++ b/docs/dws/dev/dws_06_0128.html @@ -0,0 +1,102 @@ + + +

ALTER INDEX

+

Function

ALTER INDEX modifies the definition of an existing index.

+

There are several sub-forms:

+ +
+

Precautions

+
+

Syntax

+ + + + + +
+

Parameter Description

+
+

Examples

Rename the ds_ship_mode_t1_index1 index to ds_ship_mode_t1_index5.

+
1
ALTER INDEX tpcds.ds_ship_mode_t1_index1 RENAME TO ds_ship_mode_t1_index5;
+
+ +
+

Set the ds_ship_mode_t1_index2 index as unusable.

+
1
ALTER INDEX tpcds.ds_ship_mode_t1_index2 UNUSABLE;
+
+ +
+

Rebuild the ds_ship_mode_t1_index2 index.

+
1
ALTER INDEX tpcds.ds_ship_mode_t1_index2 REBUILD;
+
+ +
+

Rename a partitioned table index.

+
1
ALTER INDEX tpcds.ds_customer_address_p1_index2 RENAME PARTITION CA_ADDRESS_SK_index1 TO CA_ADDRESS_SK_index4;
+
+ +
+
+

Links

CREATE INDEX, DROP INDEX, REINDEX

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0129.html b/docs/dws/dev/dws_06_0129.html new file mode 100644 index 00000000..bdf8b3c1 --- /dev/null +++ b/docs/dws/dev/dws_06_0129.html @@ -0,0 +1,29 @@ + + +

ALTER LARGE OBJECT

+

Function

ALTER LARGE OBJECT modifies the definition of a large object. It can only assign a new owner to a large object.

+
+

Precautions

Only the administrator or the owner of the to-be-modified large object can run ALTER LARGE OBJECT.

+
+

Syntax

1
+2
ALTER LARGE OBJECT large_object_oid 
+    OWNER TO new_owner;
+
+ +
+
+

Parameter Description

+
+

Examples

None.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0132.html b/docs/dws/dev/dws_06_0132.html new file mode 100644 index 00000000..ecb67a13 --- /dev/null +++ b/docs/dws/dev/dws_06_0132.html @@ -0,0 +1,98 @@ + + +

ALTER REDACTION POLICY

+

Function

ALTER REDACTION POLICY modifies a data redaction policy applied to a specified table.

+
+

Precautions

Only the owner of the table to which the redaction policy is applied has the permission to modify the redaction policy.

+
+

Syntax

+
+ +

Parameter Description

+
+

Examples

Modify the expression for the data redaction policy to take effect for all users.

+
1
ALTER REDACTION POLICY mask_emp ON emp WHEN (1=1);
+
+ +
+

Disable the redaction policy.

+
1
ALTER REDACTION POLICY mask_emp ON emp DISABLE;
+
+ +
+

Enable the redaction policy again.

+
1
ALTER REDACTION POLICY mask_emp ON emp ENABLE;
+
+ +
+

Change the redaction policy name to mask_emp_new.

+
1
ALTER REDACTION POLICY mask_emp ON emp RENAME TO mask_emp_new;
+
+ +
+

Add a column with the redaction policy used.

+
1
ALTER REDACTION POLICY mask_emp_new ON emp ADD COLUMN name WITH mask_partial(name, '*', 1, length(name));
+
+ +
+

Modify the redaction policy for the name column. Use the MASK_FULL function to redact all data in the name column.

+
1
ALTER REDACTION POLICY mask_emp_new ON emp MODIFY COLUMN name WITH mask_full(name);
+
+ +
+

Delete an existing column where the redaction policy is used.

+
1
ALTER REDACTION POLICY mask_emp_new ON emp DROP COLUMN name;
+
+ +
+
+

Helpful Links

CREATE REDACTION POLICY, DROP REDACTION POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0133.html b/docs/dws/dev/dws_06_0133.html new file mode 100644 index 00000000..29cbb368 --- /dev/null +++ b/docs/dws/dev/dws_06_0133.html @@ -0,0 +1,53 @@ + + +

ALTER RESOURCE POOL

+

Function

ALTER RESOURCE POOL changes the Cgroup of a resource pool.

+
+

Precautions

Users having the ALTER permission can modify resource pools.

+
+

Syntax

1
+2
ALTER RESOURCE POOL pool_name
+    WITH ({MEM_PERCENT= pct | CONTROL_GROUP="group_name" | ACTIVE_STATEMENTS=stmt | MAX_DOP = dop | MEMORY_LIMIT='memory_size' | io_limits=io_limits | io_priority='io_priority'}[, ... ]);
+
+ +
+
+

Parameter Description

+

The settings of io_limits and io_priority are valid only for complex jobs, such as batch import (using INSERT INTO SELECT, COPY FROM, or CREATE TABLE AS), complex queries involving over 500 MB data on each DN, and VACUUM FULL.

+
+
+

Examples

Specify "High" Timeshare Workload under "DefaultClass" as the Cgroup for a resource pool.

+
+
1
ALTER RESOURCE POOL pool1 WITH (CONTROL_GROUP="High");
+
+ +
+

Helpful Links

CREATE RESOURCE POOL, DROP RESOURCE POOL

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0134.html b/docs/dws/dev/dws_06_0134.html new file mode 100644 index 00000000..a0368934 --- /dev/null +++ b/docs/dws/dev/dws_06_0134.html @@ -0,0 +1,129 @@ + + +

ALTER ROLE

+

Function

ALTER ROLE changes the attributes of a role.

+
+

Important Notes

None

+
+

Syntax

+
+

Parameters

+

For details about other parameters, see Parameter Description in CREATE ROLE.

+
+

Example

Change the password of role manager.

+
1
ALTER ROLE manager IDENTIFIED BY '{password}' REPLACE '{old_password}';
+
+ +
+

Alter role manager to a system administrator.

+
1
ALTER ROLE manager SYSADMIN;
+
+ +
+

Modify the fulluser information of the LDAP authentication role.

+
1
ALTER ROLE role2 WITH LOGIN AUTHINFO 'ldapcn=role2,cn=user2,dc=func,dc=com' PASSWORD DISABLE;
+
+ +
+

Change the validity period of the login password of the role to 90 days.

+
1
ALTER ROLE role3 PASSWORD EXPIRATION 90;
+
+ +
+
+

Links

CREATE ROLE, DROP ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0135.html b/docs/dws/dev/dws_06_0135.html new file mode 100644 index 00000000..e588ac33 --- /dev/null +++ b/docs/dws/dev/dws_06_0135.html @@ -0,0 +1,52 @@ + + +

ALTER ROW LEVEL SECURITY POLICY

+

Function

ALTER ROW LEVEL SECURITY POLICY modifies an existing row-level access control policy, including the policy name and the users and expressions affected by the policy.

+
+

Precautions

Only the table owner or administrators can perform this operation.

+
+

Syntax

1
+2
+3
+4
+5
ALTER [ ROW LEVEL SECURITY ] POLICY [ IF EXISTS ] policy_name ON table_name RENAME TO new_policy_name
+
+ALTER [ ROW LEVEL SECURITY ] POLICY policy_name ON table_name
+    [ TO { role_name | PUBLIC } [, ...] ]
+    [ USING ( using_expression ) ]
+
+ +
+
+

Parameter Description

+
+

Examples

Change the name of the all_data_rls policy.

+
1
ALTER ROW LEVEL SECURITY POLICY all_data_rls ON all_data RENAME TO all_data_new_rls;
+
+ +
+

Change the users affected by the row-level access control policy.

+
1
ALTER ROW LEVEL SECURITY POLICY all_data_new_rls ON all_data TO alice, bob;
+
+ +
+

Modify the expression defined for the access control policy.

+
1
ALTER ROW LEVEL SECURITY POLICY all_data_new_rls ON all_data USING (id > 100 AND role = current_user);
+
+ +
+
+

Helpful Links

CREATE ROW LEVEL SECURITY POLICY, DROP ROW LEVEL SECURITY POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0136.html b/docs/dws/dev/dws_06_0136.html new file mode 100644 index 00000000..2822f71b --- /dev/null +++ b/docs/dws/dev/dws_06_0136.html @@ -0,0 +1,61 @@ + + +

ALTER SCHEMA

+

Function

ALTER SCHEMA changes the attributes of a schema.

+
+

Precautions

Only the owner of an index or a system administrator can run this statement.

+
+

Syntax

+
+ +

Parameter Description

+
+

Examples

Rename the ds schema to ds_new.

+
1
ALTER SCHEMA ds RENAME TO ds_new;
+
+ +
+

Change the owner of ds_new to jack.

+
1
ALTER SCHEMA ds_new OWNER TO jack;
+
+ +
+
+

Helpful Links

CREATE SCHEMA, DROP SCHEMA

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0137.html b/docs/dws/dev/dws_06_0137.html new file mode 100644 index 00000000..8de70a57 --- /dev/null +++ b/docs/dws/dev/dws_06_0137.html @@ -0,0 +1,58 @@ + + +

ALTER SEQUENCE

+

Function

ALTER SEQUENCE modifies the parameters of an existing sequence.

+
+

Precautions

+
+

Syntax

Change the maximum value or home column of the sequence.

+
1
+2
+3
ALTER SEQUENCE [ IF EXISTS ] name 
+    [ MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE ]
+    [ OWNED BY { table_name.column_name | NONE } ] ;
+
+ +
+

Change the owner of a sequence.

+
1
ALTER SEQUENCE [ IF EXISTS ] name OWNER TO new_owner;
+
+ +
+
+

Parameter Description

+
+

Examples

Modify the maximum value of serial to 200.

+
1
ALTER SEQUENCE serial MAXVALUE 200;
+
+ +
+

Create a table, and specify default values for the sequence.

+
1
CREATE TABLE T1(C1 bigint default nextval('serial'));
+
+ +
+

Change the owning column of the serial sequence to T1.C1.

+
1
ALTER SEQUENCE serial OWNED BY T1.C1;
+
+ +
+

+
+

Helpful Links

CREATE SEQUENCE, DROP SEQUENCE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0138.html b/docs/dws/dev/dws_06_0138.html new file mode 100644 index 00000000..158faff8 --- /dev/null +++ b/docs/dws/dev/dws_06_0138.html @@ -0,0 +1,79 @@ + + +

ALTER SERVER

+

Function

ALTER SERVER adds, modifies, or deletes the parameters of an existing server. You can query existing servers from the pg_foreign_server system catalog.

+
+

Precautions

Only the owner of a server or a system administrator can run this statement.

+
+

Syntax

+
1
+2
ALTER SERVER server_name [ VERSION 'new_version' ]
+    [ OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ] ) ];
+
+ +
+

In OPTIONS, ADD, SET, and DROP are operations to be executed. If these operations are not specified, the ADD operation will be performed by default. option and value are corresponding operation parameters.

+

Currently, only SET is supported on an HDFS server. ADD and DROP are not supported. The syntax for SET and DROP operations is retained for later use.

+ +
1
+2
ALTER SERVER server_name 
+    OWNER TO new_owner;
+
+ +
+ +
1
+2
ALTER SERVER server_name 
+    RENAME TO new_name;
+
+ +
+ +
1
ALTER SERVER server_name REFRESH OPTIONS;
+
+ +
+
+

Parameter Description

The server parameters to be modified are as follows:

+ +
+

Examples

Change the current name to the IP address of the hdfs_server server.

+
ALTER SERVER hdfs_server OPTIONS ( SET address '10.10.0.110:25000,10.10.0.120:25000');
+

Change the current name to hdfscfgpath of the hdfs_server server.

+
ALTER SERVER hdfs_server OPTIONS ( SET hdfscfgpath '/opt/bigdata/hadoop');
+

+
+

Helpful Links

CREATE SERVER DROP SERVER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0139.html b/docs/dws/dev/dws_06_0139.html new file mode 100644 index 00000000..847051a4 --- /dev/null +++ b/docs/dws/dev/dws_06_0139.html @@ -0,0 +1,71 @@ + + +

ALTER SESSION

+

Function

ALTER SESSION defines or modifies the conditions or parameters that affect the current session. Modified session parameters are kept until the current session is disconnected.

+
+

Precautions

+
+

Syntax

+
+

Parameter Description

To modify the description of parameters related to the session, see Parameter Description of the SET syntax.

+
+

Examples

Create the ds schema.

+
CREATE SCHEMA ds;
+

Set the search path of the schema.

+
SET SEARCH_PATH TO ds, public;
+

Set the time/date type to the traditional postgres format (date before month).

+
SET DATESTYLE TO postgres, dmy;
+

Set the character code of the current session to UTF8.

+
ALTER SESSION SET NAMES 'UTF8';
+

Set the time zone to Berkeley of California.

+
SET TIME ZONE 'PST8PDT';
+

Set the time zone to Italy.

+
SET TIME ZONE 'Europe/Rome';
+

Set the current schema.

+
ALTER SESSION SET CURRENT_SCHEMA TO tpcds;
+

Set XML OPTION to DOCUMENT.

+
ALTER SESSION SET XML OPTION DOCUMENT;
+

Create the role joe, and set the session role to joe.

+
CREATE ROLE joe WITH PASSWORD '{password}';
+ALTER SESSION SET SESSION AUTHORIZATION joe PASSWORD '{password}';
+

Switch to the default user.

+
ALTER SESSION SET SESSION AUTHORIZATION default;
+

+
+

Helpful Links

SET

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0140.html b/docs/dws/dev/dws_06_0140.html new file mode 100644 index 00000000..2616235d --- /dev/null +++ b/docs/dws/dev/dws_06_0140.html @@ -0,0 +1,47 @@ + + +

ALTER SYNONYM

+

Function

ALTER SYNONYM is used to modify the attribute of a synonym.

+
+

Precautions

+
+

Syntax

1
+2
ALTER SYNONYM synonym_name
+    OWNER TO new_owner;
+
+ +
+
+

Parameter Description

+ +
+

Examples

Create synonym t1.

+
1
CREATE OR REPLACE SYNONYM t1 FOR ot.t1;
+
+ +
+

Create user u1.

+
1
CREATE USER u1 PASSWORD '{password}';
+
+ +
+

Change the owner of the synonym t1 to u1.

+
1
ALTER SYNONYM t1 OWNER TO u1;
+
+ +
+

+
+

Helpful Links

CREATE SYNONYM and DROP SYNONYM

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0141.html b/docs/dws/dev/dws_06_0141.html new file mode 100644 index 00000000..9a439214 --- /dev/null +++ b/docs/dws/dev/dws_06_0141.html @@ -0,0 +1,41 @@ + + +

ALTER SYSTEM KILL SESSION

+

Function

ALTER SYSTEM KILL SESSION ends a session.

+
+

Precautions

None

+
+

Syntax

1
ALTER SYSTEM KILL SESSION 'session_sid, serial' [ IMMEDIATE ];
+
+ +
+
+

Parameter Description

+
+

Examples

Query session information.

+
SELECT sid,serial#,username FROM V$SESSION;
+
+       sid       | serial# | username 
+-----------------+---------+----------
+ 140131075880720 |       0 | 
+ 140131025549072 |       0 | 
+ 140131073779472 |       0 | 
+ 140131071678224 |       0 | 
+ 140131125774096 |       0 | 
+ 140131127875344 |       0 | 
+ 140131113629456 |       0 | 
+ 140131094742800 |       0 | 
+(8 rows)
+

End the session whose SID is 140131075880720.

+
ALTER SYSTEM KILL SESSION '140131075880720,0' IMMEDIATE;
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0142.html b/docs/dws/dev/dws_06_0142.html new file mode 100644 index 00000000..177566cc --- /dev/null +++ b/docs/dws/dev/dws_06_0142.html @@ -0,0 +1,412 @@ + + +

ALTER TABLE

+

Function

ALTER TABLE is used to modify tables, including modifying table definitions, renaming tables, renaming specified columns in tables, renaming table constraints, setting table schemas, enabling or disabling row-level access control, and adding or updating multiple columns.

+
+

Important Notes

+
+ +

Syntax

+
+ +

Parameter Description

+
+

Example 1: Operations on Tables

Move a table to another schema.

+
1
ALTER TABLE tpcds.warehouse_t19 SET SCHEMA joe;
+
+ +
+

When renaming an existing table, the new table name cannot be prefixed with the schema name of the original table.

+
1
ALTER TABLE joe.warehouse_t19 RENAME TO warehouse_t23;
+
+ +
+

Change the distribution mode of the tpcds.warehouse_t22 table to REPLICATION.

+
1
ALTER TABLE tpcds.warehouse_t22 DISTRIBUTE BY REPLICATION;
+
+ +
+

Change the distribution column of the tpcds.warehouse_t22 table to W_WAREHOUSE_SK.

+
1
ALTER TABLE tpcds.warehouse_t22 DISTRIBUTE BY HASH(W_WAREHOUSE_SK);
+
+ +
+

Switch the storage format of a column-store table.

+
1
ALTER TABLE tpcds.warehouse_t18 SET (COLVERSION = 1.0);
+
+ +
+

Disable the delta table function of the column-store table.

+
1
ALTER TABLE tpcds.warehouse_t21 SET (ENABLE_DELTA = OFF);
+
+ +
+

Disable the SKIP_FPI_HINT function of the table.

+
1
ALTER TABLE tpcds.warehouse_t22 SET (SKIP_FPI_HINT = FALSE);
+
+ +
+

Change the data temperature for a single table.

+
1
ALTER TABLE tpcds.warehouse_t23 REFRESH STORAGE;
+
+ +
+

Change the data temperature for multiple tables in batches.

+
SELECT pg_refresh_storage();
+
+

Example 2: Operations on Table Constraints

Create an index ds_warehouse_t1_index1 for the table tpcds.warehouse_t1. Then add primary key constraints, and rename the created index.

+
1
+2
CREATE UNIQUE INDEX ds_warehouse_t1_index1 ON tpcds.warehouse_t1(W_WAREHOUSE_SK);
+ALTER TABLE tpcds.warehouse_t1 ADD CONSTRAINT ds_warehouse_t1_index2 PRIMARY KEY USING INDEX ds_warehouse_t1_index1;
+
+ +
+

Delete the primary key ds_warehouse_t1_index2 from the table tpcds.warehouse_t1.

+
1
ALTER TABLE tpcds.warehouse_t1 DROP CONSTRAINT ds_warehouse_t1_index2;
+
+ +
+

If no partial clusters have been specified in a column-store table, add a partial cluster to the table.

+
1
ALTER TABLE tpcds.warehouse_t17 ADD PARTIAL CLUSTER KEY(W_WAREHOUSE_SK);
+
+ +
+

Delete a partial cluster column from the column-store table.

+
1
ALTER TABLE tpcds.warehouse_t17 DROP CONSTRAINT warehouse_t17_cluster;
+
+ +
+

Add a Not-Null constraint to an existing column.

+
1
ALTER TABLE tpcds.warehouse_t19 ALTER COLUMN W_GOODS_CATEGORY SET NOT NULL;
+
+ +
+

Remove Not-Null constraints from an existing column.

+
1
ALTER TABLE tpcds.warehouse_t19 ALTER COLUMN W_GOODS_CATEGORY DROP NOT NULL;
+
+ +
+

Add a check constraint to the tpcds.warehouse_t19 table.

+
1
ALTER TABLE tpcds.warehouse_t19 ADD CONSTRAINT W_CONSTR_KEY4 CHECK (W_STATE <> '');
+
+ +
+
+

Example 3: Operations on Columns

Add a primary key to the tpcds.warehouse_t1 table.

+
1
ALTER TABLE tpcds.warehouse_t1 ADD PRIMARY KEY(W_WAREHOUSE_SK);
+
+ +
+

Add a varchar column to the tpcds.warehouse_t19 table.

+
1
ALTER TABLE tpcds.warehouse_t19 ADD W_GOODS_CATEGORY varchar(30);
+
+ +
+

Use one statement to alter the types of two existing columns.

+
1
+2
+3
ALTER TABLE tpcds.warehouse_t19
+ALTER COLUMN W_GOODS_CATEGORY TYPE varchar(80),
+ALTER COLUMN W_STREET_NAME TYPE varchar(100);
+
+ +
+

This statement is equivalent to the preceding statement.

+
1
ALTER TABLE tpcds.warehouse_t19 MODIFY (W_GOODS_CATEGORY varchar(30), W_STREET_NAME varchar(60));
+
+ +
+

Delete a column from the tpcds.warehouse_t23 table.

+
1
ALTER TABLE tpcds.warehouse_t23 DROP COLUMN W_STREET_NAME;
+
+ +
+
+

Links

CREATE TABLE, DROP TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0143.html b/docs/dws/dev/dws_06_0143.html new file mode 100644 index 00000000..5ebc74e1 --- /dev/null +++ b/docs/dws/dev/dws_06_0143.html @@ -0,0 +1,243 @@ + + +

ALTER TABLE PARTITION

+

Function

ALTER TABLE PARTITION modifies table partitioning, including adding, deleting, splitting, merging partitions, and modifying partition attributes.

+
+

Precautions

+ +
+

Syntax

+
+

Parameter Description

+
+

Example

Delete partition P8.

+
1
ALTER TABLE tpcds.web_returns_p1 DROP PARTITION P8;
+
+ +
+

Add a partition WR_RETURNED_DATE_SK with values ranging from 2453005 to 2453105.

+
1
ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P8 VALUES LESS THAN (2453105);
+
+ +
+

Add a partition WR_RETURNED_DATE_SK with values ranging from 2453105 to MAXVALUE.

+
1
ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P9 VALUES LESS THAN (MAXVALUE);
+
+ +
+

Rename the P7 partition as P10.

+
1
ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION P7 TO P10;
+
+ +
+

Rename the P6 partition as P11.

+
1
ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION FOR (2452639) TO P11;
+
+ +
+

Query rows in the P10 partition.

+
1
+2
+3
+4
+5
SELECT count(*) FROM tpcds.web_returns_p1 PARTITION (P10);
+ count  
+--------
+ 9362
+(1 row)
+
+ +
+
+

Split the P8 partition at 2453010.

+
1
+2
+3
+4
+5
ALTER TABLE tpcds.web_returns_p2 SPLIT PARTITION P8 AT (2453010) INTO
+(
+        PARTITION P9,
+        PARTITION P10
+); 
+
+ +
+

Merge the P6 and P7 partitions into one.

+
1
ALTER TABLE tpcds.web_returns_p2 MERGE PARTITIONS P6, P7 INTO PARTITION P8;
+
+ +
+

Modify the migration attribute of a partitioned table.

+
1
ALTER TABLE tpcds.web_returns_p2 DISABLE ROW MOVEMENT;
+
+ +
+
+

Add partitions [5000, 5300), [5300, 5600), [5600, 5900), and [5900, 6000).

+
1
ALTER TABLE tpcds.startend_pt ADD PARTITION p6 START(5000) END(6000) EVERY(300);
+
+ +
+

Add the partition p7, specified by MAXVALUE.

+
1
ALTER TABLE tpcds.startend_pt ADD PARTITION p7 END(MAXVALUE);
+
+ +
+

Rename the partition where 5950 is located to p71.

+
1
ALTER TABLE tpcds.startend_pt RENAME PARTITION FOR(5950) TO p71;
+
+ +
+

Split the partition [4000, 5000) where 4500 is located.

+
1
ALTER TABLE tpcds.startend_pt SPLIT PARTITION FOR(4500) INTO(PARTITION q1 START(4000) END(5000) EVERY;
+
+ +
+

Links

CREATE TABLE PARTITION, DROP TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0145.html b/docs/dws/dev/dws_06_0145.html new file mode 100644 index 00000000..49b8006d --- /dev/null +++ b/docs/dws/dev/dws_06_0145.html @@ -0,0 +1,153 @@ + + +

ALTER TEXT SEARCH CONFIGURATION

+

Function

ALTER TEXT SEARCH CONFIGURATION modifies the definition of a text search configuration. You can modify its mappings from token types to dictionaries, change the configuration's name or owner, or modify the parameters.

+

The ADD MAPPING FOR form installs a list of dictionaries to be consulted for the specified token types; an error will be generated if there is already a mapping for any of the token types.

+

The ALTER MAPPING FOR form removes existing mapping for those token types and then adds specified mappings.

+

ALTER MAPPING REPLACE ... WITH ... and ALTER MAPPING FOR... REPLACE ... WITH ... options replace old_dictionary with new_dictionary. Note that only when pg_ts_config_map has tuples corresponding to maptokentype and old_dictionary, the update will succeed. If the update fails, no messages are returned.

+

The DROP MAPPING FOR form deletes all dictionaries for the specified token types in the text search configuration. If IF EXISTS is not specified and the string type mapping specified by DROP MAPPING FOR does not exist in text search configuration, an error will occur in database.

+
+

Important Notes

+
+

Syntax

+
1
+2
ALTER TEXT SEARCH CONFIGURATION name 
+    ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
+
+ +
+ +
1
+2
ALTER TEXT SEARCH CONFIGURATION name 
+    ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary;
+
+ +
+ +
1
+2
ALTER TEXT SEARCH CONFIGURATION name
+    ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ];
+
+ +
+ +
1
+2
ALTER TEXT SEARCH CONFIGURATION name
+    ALTER MAPPING REPLACE old_dictionary WITH new_dictionary;
+
+ +
+ +
1
+2
ALTER TEXT SEARCH CONFIGURATION name
+    DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ];
+
+ +
+ +
1
ALTER TEXT SEARCH CONFIGURATION name OWNER TO new_owner;
+
+ +
+ +
1
ALTER TEXT SEARCH CONFIGURATION name RENAME TO new_name;
+
+ +
+ +
1
ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA new_schema;
+
+ +
+ +
1
ALTER TEXT SEARCH CONFIGURATION name SET ( { configuration_option = value } [, ...] );
+
+ +
+ +
1
ALTER TEXT SEARCH CONFIGURATION name RESET ( {configuration_option} [, ...] );
+
+ +
+
+

Parameter description

+
+

Examples

Add a type mapping for the text search type ngram1.

+
1
ALTER TEXT SEARCH CONFIGURATION ngram1 ADD MAPPING FOR multisymbol WITH simple;
+
+ +
+

Change the owner of text search configuration.

+
1
ALTER TEXT SEARCH CONFIGURATION ngram1 OWNER TO joe;
+
+ +
+

Modify the schema of text search configuration.

+
1
ALTER TEXT SEARCH CONFIGURATION ngram1 SET SCHEMA joe;
+
+ +
+

Rename a text search configuration.

+
1
ALTER TEXT SEARCH CONFIGURATION joe.ngram1 RENAME TO ngram_1;
+
+ +
+

Delete type mapping.

+
1
ALTER TEXT SEARCH CONFIGURATION joe.ngram_1 DROP MAPPING IF EXISTS FOR multisymbol;
+
+ +
+

Add text search configuration string mapping.

+
1
ALTER TEXT SEARCH CONFIGURATION english_1 ADD MAPPING FOR word WITH simple,english_stem;
+
+ +
+

Add text search configuration string mapping.

+
1
ALTER TEXT SEARCH CONFIGURATION english_1 ADD MAPPING FOR email WITH english_stem, french_stem;
+
+ +
+

Modify text search configuration string mapping.

+
1
ALTER TEXT SEARCH CONFIGURATION english_1 ALTER MAPPING REPLACE french_stem with german_stem;
+
+ +
+

Query information about the text search configuration.

+
1
+2
+3
+4
+5
+6
+7
+8
SELECT b.cfgname,a.maptokentype,a.mapseqno,a.mapdict,c.dictname FROM pg_ts_config_map a,pg_ts_config b, pg_ts_dict c WHERE a.mapcfg=b.oid AND a.mapdict=c.oid AND b.cfgname='english_1' ORDER BY 1,2,3,4,5;
+  cfgname  | maptokentype | mapseqno | mapdict |   dictname   
+-----------+--------------+----------+---------+--------------
+ english_1 |            2 |        1 |    3765 | simple
+ english_1 |            2 |        2 |   12960 | english_stem
+ english_1 |            4 |        1 |   12960 | english_stem
+ english_1 |            4 |        2 |   12966 | german_stem
+(4 rows)
+
+ +
+
+

Links

CREATE TEXT SEARCH CONFIGURATION, DROP TEXT SEARCH CONFIGURATION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0146.html b/docs/dws/dev/dws_06_0146.html new file mode 100644 index 00000000..d2cb4d07 --- /dev/null +++ b/docs/dws/dev/dws_06_0146.html @@ -0,0 +1,70 @@ + + +

ALTER TEXT SEARCH DICTIONARY

+

Function

ALTER TEXT SEARCH DICTIONARY modifies the definition of a full-text retrieval dictionary, including its parameters, name, owner, and schema.

+
+

Precautions

+
+

Syntax

+ +
+

Parameter Description

+
+

Examples

Modify the definition of stop words in Snowball dictionaries. Retain the values of other parameters.

+
1
ALTER TEXT SEARCH DICTIONARY my_dict ( StopWords = newrussian, FilePath = 'obs://bucket_name/path accesskey=ak secretkey=sk region=rg' );
+
+ +
+

Modify the Language parameter in Snowball dictionaries and delete the definition of stop words.

+
1
ALTER TEXT SEARCH DICTIONARY my_dict ( Language = dutch, StopWords );
+
+ +
+

Update the dictionary definition and do not change any other content.

+
1
ALTER TEXT SEARCH DICTIONARY my_dict ( dummy );
+
+ +
+
+

Helpful Links

CREATE TEXT SEARCH DICTIONARY, DROP TEXT SEARCH DICTIONARY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0147.html b/docs/dws/dev/dws_06_0147.html new file mode 100644 index 00000000..b5ff0fec --- /dev/null +++ b/docs/dws/dev/dws_06_0147.html @@ -0,0 +1,45 @@ + + +

ALTER TRIGGER

+

Function

ALTER TRIGGER modifies the definition of a trigger.

+
+

Precautions

Only the owner of a table where a trigger is created and system administrators can run the ALTER TRIGGER statement.

+
+

Syntax

1
ALTER TRIGGER trigger_name ON table_name RENAME TO new_name;
+
+ +
+
+

Parameter Description

+
+

Examples

Modified the trigger delete_trigger.

+
1
ALTER TRIGGER delete_trigger ON test_trigger_src_tbl RENAME TO delete_trigger_renamed;
+
+ +
+

Disable the trigger insert_trigger.

+
1
ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER insert_trigger;  
+
+ +
+

Disable all triggers on the test_trigger_src_tbl table.

+
1
ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER ALL; 
+
+ +
+
+

Helpful Links

CREATE TRIGGER, DROP TRIGGER, ALTER TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0148.html b/docs/dws/dev/dws_06_0148.html new file mode 100644 index 00000000..777611e4 --- /dev/null +++ b/docs/dws/dev/dws_06_0148.html @@ -0,0 +1,137 @@ + + +

ALTER TYPE

+

Function

ALTER TYPE modifies the definition of a type.

+
+

Syntax

+
+ + + + + + + +

Parameter Description

+ + + + + + + +
+

Examples

Rename the data type.

+
1
ALTER TYPE compfoo RENAME TO compfoo1;
+
+ +
+

Change the owner of the user-defined type compfoo1 to usr1.

+
1
ALTER TYPE compfoo1 OWNER TO usr1;
+
+ +
+

Change the schema of the user-defined type compfoo1 to usr1.

+
1
ALTER TYPE compfoo1 SET SCHEMA usr1;
+
+ +
+

Add the f3 attribute to the compfoo1 data type.

+
1
ALTER TYPE compfoo1 ADD ATTRIBUTE f3 int;
+
+ +
+

Add a tag value to the enumeration type bugstatus.

+
1
ALTER TYPE bugstatus ADD VALUE IF NOT EXISTS 'regress' BEFORE 'closed';
+
+ +
+

Rename a tag value of the enumeration type bugstatus.

+
1
ALTER TYPE bugstatus RENAME VALUE 'create' TO 'new';
+
+ +
+
+

Helpful Links

CREATE TYPE, DROP TYPE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0149.html b/docs/dws/dev/dws_06_0149.html new file mode 100644 index 00000000..a50659b5 --- /dev/null +++ b/docs/dws/dev/dws_06_0149.html @@ -0,0 +1,131 @@ + + +

ALTER USER

+

Function

ALTER USER modifies the attributes of a database user.

+
+

Precautions

Session parameters modified by ALTER USER apply to a specified user and take effect in the next session.

+
+

Syntax

+
+

Parameters

+

For details about other parameters, see "Parameter Description" in CREATE ROLE and ALTER ROLE.

+
+

Example

Change the login password of user jim.

+
1
ALTER USER jim IDENTIFIED BY '{password}' REPLACE '{old_password}';
+
+ +
+

Add the CREATEROLE permission to user jim.

+
1
ALTER USER jim CREATEROLE;
+
+ +
+

Set enable_seqscan to on (the setting will take effect in the next session).

+
1
ALTER USER jim SET enable_seqscan TO on;
+
+ +
+

Reset the enable_seqscan parameter for user jim.

+
1
ALTER USER jim RESET enable_seqscan;
+
+ +
+

Lock the jim account.

+
1
ALTER USER jim ACCOUNT LOCK;
+
+ +
+
+

Links

CREATE ROLE, CREATE USER, DROP USER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0150.html b/docs/dws/dev/dws_06_0150.html new file mode 100644 index 00000000..4f52369a --- /dev/null +++ b/docs/dws/dev/dws_06_0150.html @@ -0,0 +1,112 @@ + + +

ALTER VIEW

+

Function

ALTER VIEW modifies all auxiliary attributes of a view. (To modify the query definition of a view, use CREATE OR REPLACE VIEW.)

+
+

Precautions

+
+

Syntax

+
+ + +

Parameter Description

+
+

Examples

Rename a view.

+
1
ALTER VIEW tpcds.customer_details_view_v1 RENAME TO customer_details_view_v2;
+
+ +
+

Change the schema of a view.

+
1
ALTER VIEW tpcds.customer_details_view_v2 SET schema public;
+
+ +
+

Rebuild a view.

+
1
ALTER VIEW public.customer_details_view_v2 REBUILD;
+
+ +
+

Rebuild a dependent view.

+
1
ALTER VIEW ONLY public.customer_details_view_v2 REBUILD;
+
+ +
+
+

Helpful Links

CREATE VIEW, DROP VIEW

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0151.html b/docs/dws/dev/dws_06_0151.html new file mode 100644 index 00000000..d0fe3a89 --- /dev/null +++ b/docs/dws/dev/dws_06_0151.html @@ -0,0 +1,54 @@ + + +

CLEAN CONNECTION

+

Function

CLEAN CONNECTION clears database connections when a database is abnormal. You may use this statement to delete a specific user's connections to a specified database.

+
+

Precautions

None

+
+

Syntax

1
+2
+3
+4
CLEAN CONNECTION 
+    TO { COORDINATOR ( nodename [, ... ] ) | NODE ( nodename [, ... ] )| ALL [ CHECK ] [ FORCE ] }
+    [ FOR DATABASE dbname ] 
+    [ TO USER username ];
+
+ +
+
+

Parameter Description

+
+

Examples

Clean connections to nodes dn1 and dn2 for the template1 database.

+
1
CLEAN CONNECTION TO NODE (dn1,dn2) FOR DATABASE template1;
+
+ +
+

Clean user jack's connections to dn1.

+
1
CLEAN CONNECTION TO NODE (dn1) TO USER jack;
+
+ +
+

Delete all connections to the gaussdb database.

+
1
CLEAN CONNECTION TO ALL FORCE FOR DATABASE gaussdb;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0152.html b/docs/dws/dev/dws_06_0152.html new file mode 100644 index 00000000..d2ade484 --- /dev/null +++ b/docs/dws/dev/dws_06_0152.html @@ -0,0 +1,31 @@ + + +

CLOSE

+

Function

CLOSE frees the resources associated with an open cursor.

+
+

Precautions

+
+

Syntax

1
CLOSE { cursor_name | ALL } ;
+
+ +
+
+

Parameter Description

+
+

Example

Close a cursor.

+
1
CLOSE cursor1;
+
+ +
+
+

Links

FETCH, MOVE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0153.html b/docs/dws/dev/dws_06_0153.html new file mode 100644 index 00000000..e5767300 --- /dev/null +++ b/docs/dws/dev/dws_06_0153.html @@ -0,0 +1,107 @@ + + +

CLUSTER

+

Function

Cluster a table according to an index.

+

CLUSTER instructs GaussDB(DWS) to cluster the table specified by table_name based on the index specified by index_name. The index must have been defined on table_name.

+

When a table is clustered, it is physically reordered based on the index information. Clustering is a one-time operation: when the table is subsequently updated, the changes are not clustered. That is, no attempt is made to store new or updated rows according to their index order.

+

When a table is clustered, GaussDB(DWS) records which index the table was clustered by. The form CLUSTER table_name reclusters the table using the same index as before. You can also use the CLUSTER or SET WITHOUT CLUSTER forms of ALTER TABLE to set the index to be used for future cluster operations, or to clear any previous setting.

+

CLUSTER without any parameter reclusters all the previously-clustered tables in the current database that the calling user owns, or all such tables if called by an administrator.

+

When a table is being clustered, an ACCESS EXCLUSIVE lock is acquired on it. This prevents any other database operations (both reads and writes) from operating on the table until the CLUSTER is finished.

+
+

Precautions

Only row-store B-tree indexes support CLUSTER.

+

In cases where you are accessing single rows randomly within a table, the actual order of the data in the table is unimportant. However, if you tend to access some data more than others, and there is an index that groups them together, you will benefit from using CLUSTER. If you are requesting a range of indexed values from a table, or a single indexed value that has multiple rows that match, CLUSTER will help because once the index identifies the table page for the first row that matches, all other rows that match are probably already on the same table page, and so you save disk accesses and speed up the query.

+

When an index scan is used, a temporary copy of the table is created that contains the table data in the index order. Temporary copies of each index on the table are created as well. Therefore, you need free space on disk at least equal to the sum of the table size and the index sizes.

+

Because CLUSTER remembers which indexes are clustered, one can cluster the tables manually the first time, then set up a time like VACUUM without any parameters, so that the desired tables are periodically reclustered.

+

Because the optimizer records statistics about the ordering of tables, it is advisable to run ANALYZE on the newly clustered table. Otherwise, the optimizer might make poor choices of query plans.

+

CLUSTER cannot be executed in transactions.

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Create a partitioned table.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.inventory_p1
+(
+    INV_DATE_SK               INTEGER               NOT NULL,
+    INV_ITEM_SK               INTEGER               NOT NULL,
+    INV_WAREHOUSE_SK          INTEGER               NOT NULL,
+    INV_QUANTITY_ON_HAND      INTEGER
+)
+DISTRIBUTE BY HASH(INV_ITEM_SK)
+PARTITION BY RANGE(INV_DATE_SK)
+(
+        PARTITION P1 VALUES LESS THAN(2451179),
+        PARTITION P2 VALUES LESS THAN(2451544),
+        PARTITION P3 VALUES LESS THAN(2451910),
+        PARTITION P4 VALUES LESS THAN(2452275),
+        PARTITION P5 VALUES LESS THAN(2452640),
+        PARTITION P6 VALUES LESS THAN(2453005),
+        PARTITION P7 VALUES LESS THAN(MAXVALUE)
+);
+
+ +
+

Create an index named ds_inventory_p1_index1.

+
1
CREATE INDEX ds_inventory_p1_index1 ON tpcds.inventory_p1 (INV_ITEM_SK) LOCAL;
+
+ +
+

Cluster the tpcds.inventory_p1 table.

+
1
CLUSTER tpcds.inventory_p1 USING ds_inventory_p1_index1;
+
+ +
+

Cluster the p3 partition.

+
1
CLUSTER tpcds.inventory_p1 PARTITION (p3) USING ds_inventory_p1_index1;
+
+ +
+

Cluster the tables that can be clustered in the database.

+
1
CLUSTER;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0154.html b/docs/dws/dev/dws_06_0154.html new file mode 100644 index 00000000..679b3181 --- /dev/null +++ b/docs/dws/dev/dws_06_0154.html @@ -0,0 +1,118 @@ + + +

COMMENT

+

Function

COMMENT defines or changes the comment of an object.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
COMMENT ON
+{
+  AGGREGATE agg_name (agg_type [, ...] ) |
+  CAST (source_type AS target_type) |
+  COLLATION object_name |
+  COLUMN { table_name.column_name | view_name.column_name } |
+  CONSTRAINT constraint_name ON table_name |
+  CONVERSION object_name |
+  DATABASE object_name |
+  DOMAIN object_name |
+  EXTENSION object_name |
+  FOREIGN DATA WRAPPER object_name |
+  FOREIGN TABLE object_name |
+  FUNCTION function_name ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) |
+  INDEX object_name |
+  LARGE OBJECT large_object_oid |
+  OPERATOR operator_name (left_type, right_type) |
+  OPERATOR CLASS object_name USING index_method |
+  OPERATOR FAMILY object_name USING index_method |
+  [ PROCEDURAL ] LANGUAGE object_name |
+  ROLE object_name |
+  RULE rule_name ON table_name |
+  SCHEMA object_name |
+  SERVER object_name |
+  TABLE object_name |
+  TABLESPACE object_name |
+  TEXT SEARCH CONFIGURATION object_name |
+  TEXT SEARCH DICTIONARY object_name |
+  TEXT SEARCH PARSER object_name |
+  TEXT SEARCH TEMPLATE object_name |
+  TYPE object_name |
+  VIEW object_name
+}
+   IS 'text';
+
+ +
+
+

Parameter Description

+
+

Examples

Add a comment to the customer.c_customer_sk column.

+
1
COMMENT ON COLUMN customer.c_customer_sk IS 'Primary key of customer demographics table.';
+
+ +
+

Add a comment to the tpcds.customer_details_view_v2 view.

+
1
COMMENT ON VIEW tpcds.customer_details_view_v2 IS 'View of customer detail';
+
+ +
+

Add comments to the customer table.

+
1
COMMENT ON TABLE customer IS 'This is my table';
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0155.html b/docs/dws/dev/dws_06_0155.html new file mode 100644 index 00000000..5142ddba --- /dev/null +++ b/docs/dws/dev/dws_06_0155.html @@ -0,0 +1,34 @@ + + +

CREATE BARRIER

+

Function

Creates a barrier for cluster nodes. The barrier can be used for data restoration.

+
+

Precautions

Before creating a barrier, ensure that gtm_backup_barrier and enable_cbm_tracking are set to on for CNs and DNs in the cluster.

+
+

Syntax

1
CREATE BARRIER [ barrier_name  ] ;
+
+ +
+
+

Parameter Description

barrier_name

+

(Optional) Indicates the name of a barrier.

+

Value range: a string. It must comply with the naming convention.

+
+

Examples

Create a barrier without specifying its name.

+
1
CREATE BARRIER;
+
+ +
+

Create a barrier named barrier1.

+
1
CREATE BARRIER 'barrier1';
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0156.html b/docs/dws/dev/dws_06_0156.html new file mode 100644 index 00000000..222101c3 --- /dev/null +++ b/docs/dws/dev/dws_06_0156.html @@ -0,0 +1,91 @@ + + +

CREATE DATABASE

+

Function

CREATE DATABASE creates a database. By default, the new database will be created by cloning the standard system database template1. A different template can be specified using TEMPLATE template name.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE DATABASE database_name
+    [ [ WITH ] { [ OWNER [=] user_name ] |
+               [ TEMPLATE [=] template ] |
+               [ ENCODING [=] encoding ] |
+               [ LC_COLLATE [=] lc_collate ] |
+               [ LC_CTYPE [=] lc_ctype ] |
+               [ DBCOMPATIBILITY [=] compatibilty_type ] |
+               
+               [ CONNECTION LIMIT [=] connlimit ]}[...] ];
+
+ +
+
+

Parameter Description

+

The following are limitations on character encoding:

+ +
+

Examples

Create database music using GBK (the local encoding type is also GBK).

+
1
CREATE DATABASE music ENCODING 'GBK' template = template0;
+
+ +
+

Create database music2 and specify jim as its owner.

+
1
CREATE DATABASE music2 OWNER jim;
+
+ +
+

Create database music3 using template template0 and specify jim as its owner.

+
1
CREATE DATABASE music3 OWNER jim TEMPLATE template0;
+
+ +
+

Create a compatible Oracle database ora_compatible_db.

+
1
CREATE DATABASE ora_compatible_db DBCOMPATIBILITY 'ORA';
+
+ +
+
+

Helpful Links

ALTER DATABASE, DROP DATABASE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0159.html b/docs/dws/dev/dws_06_0159.html new file mode 100644 index 00000000..d3ac866f --- /dev/null +++ b/docs/dws/dev/dws_06_0159.html @@ -0,0 +1,315 @@ + + +

CREATE FOREIGN TABLE (for GDS Import and Export)

+

CREATE FOREIGN TABLE creates a GDS foreign table.

+

Function

CREATE FOREIGN TABLE creates a GDS foreign table in the current database for concurrent data import and export. The GDS foreign table can be read-only or write-only, used for concurrent data import and export, respectively. The OBS foreign table is read-only by default.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE FOREIGN TABLE [ IF NOT EXISTS  ] table_name 
+    ( [  { column_name type_name POSITION(offset,length) | LIKE source_table } [, ...]  ] ) 
+    SERVER gsmpp_server 
+    OPTIONS (  { option_name ' value '  }  [, ...] ) 
+    [  { WRITE ONLY  |  READ ONLY  }] 
+    [ WITH error_table_name | LOG INTO error_table_name] 
+    [REMOTE LOG 'name'] 
+    [PER NODE REJECT LIMIT 'value']
+    [ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
+
+ +
+
+

Parameter Overview

CREATE FOREIGN TABLE provides multiple parameters, which are classified as follows:

+ +
+

Parameter Description

+
+

Examples

Create a foreign tablecustomer_ft to import data from GDS server 10.10.123.234 in TEXT format.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
CREATE FOREIGN TABLE customer_ft
+(
+    c_customer_sk             integer               ,
+    c_customer_id             char(16)              ,
+    c_current_cdemo_sk        integer               ,
+    c_current_hdemo_sk        integer               ,
+    c_current_addr_sk         integer               ,
+    c_first_shipto_date_sk    integer               ,
+    c_first_sales_date_sk     integer               ,
+    c_salutation              char(10)              ,
+    c_first_name              char(20)              ,
+    c_last_name               char(30)              ,
+    c_preferred_cust_flag     char(1)               ,
+    c_birth_day               integer               ,
+    c_birth_month             integer               ,
+    c_birth_year              integer                       ,
+    c_birth_country           varchar(20)                   ,
+    c_login                   char(13)                      ,
+    c_email_address           char(50)                      ,
+    c_last_review_date        char(10)
+)
+    SERVER gsmpp_server
+    OPTIONS
+(
+    location 'gsfs://10.10.123.234:5000/customer1*.dat',
+    FORMAT 'TEXT' ,
+    DELIMITER '|',
+    encoding 'utf8',
+    mode 'Normal')
+READ ONLY;
+
+ +
+

Create a foreign table to import data from GDS servers 192.168.0.90 and 192.168.0.91 in TEXT format. Record errors that occur during data import in foreign_HR_staffS_ft. A maximum of two data format errors are allowed during the data import.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE FOREIGN TABLE foreign_HR_staffS_ft
+(
+  staff_ID       NUMBER(6) ,
+  FIRST_NAME     VARCHAR2(20),
+  LAST_NAME      VARCHAR2(25),
+  EMAIL          VARCHAR2(25),
+  PHONE_NUMBER   VARCHAR2(20),
+  HIRE_DATE      DATE,
+  employment_ID  VARCHAR2(10),
+  SALARY         NUMBER(8,2),
+  COMMISSION_PCT NUMBER(2,2),
+  MANAGER_ID     NUMBER(6),
+  section_ID  NUMBER(4)
+) SERVER gsmpp_server OPTIONS (location 'gsfs://192.168.0.90:5000/* | gsfs://192.168.0.91:5000/*', format 'TEXT', delimiter E'\x08',  null '',reject_limit '2') WITH err_HR_staffS_ft;
+
+ +
+
+

Helpful Links

ALTER FOREIGN TABLE (for GDS), DROP FOREIGN TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0160.html b/docs/dws/dev/dws_06_0160.html new file mode 100644 index 00000000..f214cecd --- /dev/null +++ b/docs/dws/dev/dws_06_0160.html @@ -0,0 +1,281 @@ + + +

CREATE FOREIGN TABLE (for OBS Import and Export)

+

Function

CREATE FOREIGN TABLE creates a foreign table in the current database for parallel data import and export of OBS data. The server used is gsmpp_server, which is created by the database by default.

+

The hybrid data warehouse (standalone) does not support OBS foreign table import and export.

+
+
+

Precautions

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Read and write formats supported by OBS foreign tables

Data Type

+

User-built Server

+

gsmpp_server

+

-

+

READ ONLY

+

WRITE ONLY

+

READ ONLY

+

WRITE ONLY

+

ORC

+

+

+

×

+

×

+

CARBONDATA

+

+

×

+

×

+

×

+

TEXT

+

+

×

+

+

+

CSV

+

+

×

+

+

+

JSON

+

+

×

+

×

+

×

+
+
+
+

Syntax

CREATE FOREIGN TABLE [ IF NOT EXISTS  ] table_name 
+( { column_name type_name [column_constraint ]
+    | LIKE source_table | table_constraint [, ...]} [, ...] ) 
+SERVER gsmpp_server 
+OPTIONS (  { option_name ' value '  }  [, ...] ) 
+[  { WRITE ONLY  |  READ ONLY  }] 
+[ WITH error_table_name | LOG INTO error_table_name] 
+[PER NODE REJECT LIMIT 'value']  ;
+
+ +

Parameter Overview

CREATE FOREIGN TABLE provides multiple parameters, which are classified as follows: +
+
+

Parameter Description

+
+

Examples

Create a foreign table to import data in the .txt format from OBS to the OBS_ft table.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
DROP FOREIGN TABLE IF EXISTS OBS_ft;
+NOTICE:  foreign table "obs_ft" does not exist, skipping
+DROP FOREIGN TABLE
+
+CREATE FOREIGN TABLE OBS_ft( a int, b int)SERVER gsmpp_server OPTIONS (location 'obs://gaussdbcheck/obs_ddl/test_case_data/txt_obs_informatonal_test001',format 'text',encoding 'utf8',chunksize '32', encrypt 'on',ACCESS_KEY 'access_key_value_to_be_replaced',SECRET_ACCESS_KEY 'secret_access_key_value_to_be_replaced',delimiter E'\x08') read only;
+CREATE FOREIGN TABLE
+
+DROP TABLE row_tbl;
+DROP TABLE
+
+CREATE TABLE row_tbl( a int, b int);
+NOTICE:  The 'DISTRIBUTE BY' clause is not specified. Using 'a' as the distribution column by default.
+HINT:  Please use 'DISTRIBUTE BY' clause to specify suitable data distribution column.
+CREATE TABLE
+
+INSERT INTO row_tbl SELECT * FROM OBS_ft;
+INSERT 0 3
+
+ +
+
+

+
+

Helpful Links

ALTER FOREIGN TABLE (for HDFS or OBS), DROP FOREIGN TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0161.html b/docs/dws/dev/dws_06_0161.html new file mode 100644 index 00000000..af6ecced --- /dev/null +++ b/docs/dws/dev/dws_06_0161.html @@ -0,0 +1,1220 @@ + + +

CREATE FOREIGN TABLE (SQL on OBS or Hadoop)

+

Function

CREATE FOREIGN TABLE creates an HDFS or OBS foreign table in the current database to access or export structured data stored on HDFS or OBS. You can also export data in ORC format to HDFS or OBS.

+

The hybrid data warehouse (standalone) does not support OBS and HDFS foreign table import and export.

+
+
+

Precautions

+
+

Syntax

Create an HDFS foreign table.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name 
+( [ { column_name type_name 
+    [ { [CONSTRAINT constraint_name] NULL |
+    [CONSTRAINT constraint_name] NOT NULL |
+      column_constraint [...]} ] |
+      table_constraint [, ...]} [, ...] ] ) 
+    SERVER server_name 
+    OPTIONS ( { option_name ' value ' } [, ...] ) 
+    [ {WRITE ONLY | READ ONLY}]
+    DISTRIBUTE BY {ROUNDROBIN | REPLICATION}
+   
+    [ PARTITION BY ( column_name ) [ AUTOMAPPED ] ] ;
+
+ +
+ +
+

Parameter Description

+
+

Informational Constraint

In GaussDB(DWS), the use of data constraints depend on users. If users can make data sources strictly comply with certain constraints, the query on data with such constraints can be accelerated. Foreign tables do not support Index. Informational constraint is used for optimizing query plans.

+

The constraints of creating informational constraints for a foreign table are as follows:

+ +
+

Example 1

Example 1: In HDFS, import the TPC-H benchmark test tables part and region using Hive. The path of the part table is /user/hive/warehouse/partition.db/part_4, and that of the region table is /user/hive/warehouse/mppdb.db/region_orc11_64stripe/.

+
  1. Establish HDFS_Server, with HDFS_FDW or DFS_FDW as the foreign data wrapper.
    1
    CREATE SERVER hdfs_server FOREIGN DATA WRAPPER HDFS_FDW OPTIONS (address '10.10.0.100:25000,10.10.0.101:25000',hdfscfgpath '/opt/hadoop_client/HDFS/hadoop/etc/hadoop',type'HDFS');
    +
    + +
    +

    The IP addresses and port numbers of HDFS NameNodes are specified in OPTIONS. 10.10.0.100:25000,10.10.0.101:25000 indicates the IP addresses and port numbers of the primary and standby HDFS NameNodes. It is the recommended format. Two groups of parameter values are separated by commas (,). Take '10.10.0.100:25000' as an example. In this example, the IP address is 10.10.0.100, and the port number is 25000.

    +
    +
  1. Create an HDFS foreign table. The HDFS server associated with the table is hdfs_server, the corresponding file format of the ft_region table on the HDFS server is 'orc', and the file directory in the HDFS file system is '/user/hive/warehouse/mppdb. db/region_orc11_64stripe/'.
+ +
  1. View the created server and foreign table.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    SELECT * FROM pg_foreign_table WHERE ftrelid='ft_region'::regclass;
    + ftrelid | ftserver | ftwriteonly |                                  ftoptions
    +---------+----------+-------------+------------------------------------------------------------------------------
    +   16510 |    16509 | f           | {format=orc,foldername=/user/hive/warehouse/mppdb.db/region_orc11_64stripe/}
    +(1 row)
    +
    +select * from pg_foreign_table where ftrelid='ft_part'::regclass;
    + ftrelid | ftserver | ftwriteonly |                            ftoptions
    +---------+----------+-------------+------------------------------------------------------------------
    +   16513 |    16509 | f           | {format=orc,foldername=/user/hive/warehouse/partition.db/part_4}
    +(1 row)
    +
    + +
    +
+
+

Example 2

Export data from the TPC-H benchmark test table region table to the /user/hive/warehouse/mppdb.db/regin_orc/ directory of the HDFS file system through the HDFS write-only foreign table.

+
  1. Create an HDFS foreign table. The corresponding foreign data wrapper is HDFS_FDW or DFS_FDW, which is the same as that in Example 1.
  2. Create a write-only HDFS foreign table.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE FOREIGN TABLE ft_wo_region
    +(
    +    R_REGIONKEY INT4,
    +    R_NAME TEXT,
    +    R_COMMENT TEXT
    +)
    +SERVER
    +    hdfs_server
    +OPTIONS
    +(
    +    FORMAT 'orc',
    +    encoding 'utf8',
    +    FOLDERNAME '/user/hive/warehouse/mppdb.db/regin_orc/'
    +)
    +WRITE ONLY;
    +
    + +
    +
  3. Writes data to the HDFS file system through a write-only foreign table.
    1
    INSERT INTO ft_wo_regin SELECT * FROM region;
    +
    + +
    +
+
+

Example 3

Perform operations on an HDFS foreign table that includes informational constraints.

+ +
+

Example 4

Read data stored in OBS using a foreign table.

+
  1. Create obs_server, with DFS_FDW as the foreign data wrapper.
    1
    +2
    +3
    +4
    +5
    +6
    CREATE SERVER obs_server FOREIGN DATA WRAPPER DFS_FDW OPTIONS ( 
    +  ADDRESS 'obs.xxx.xxx.com', 
    +   ACCESS_KEY 'xxxxxxxxx', 
    +  SECRET_ACCESS_KEY 'yyyyyyyyyyyyy', 
    +  TYPE 'OBS'
    +);
    +
    + +
    +
    • ADDRESS is the endpoint of OBS. Replace it with the actual endpoint. You can find the domain name by searching for the value of regionCode in the region_map file.
    • ACCESS_KEY and SECRET_ACCESS_KEY are access keys for the cloud account system. Replace the values as needed.
    • TYPE indicates the server type. Retain the value OBS.
    +
    +
  2. Create an OBS foreign table named customer_address, which does not contain partition columns and is associated with an OBS server named obs_server. Files on obs_server are in ORC format and stored in /user/hive/warehouse/mppdb.db/region_orc11_64stripe1/.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    CREATE FOREIGN TABLE customer_address
    +(
    +    ca_address_sk             integer               not null,
    +    ca_address_id             char(16)              not null,
    +    ca_street_number          char(10)                      ,   
    +    ca_street_name            varchar(60)                   ,   
    +    ca_street_type            char(15)                      ,   
    +    ca_suite_number           char(10)                      ,   
    +    ca_city                   varchar(60)                   ,   
    +    ca_county                 varchar(30)                   ,   
    +    ca_state                  char(2)                       ,   
    +    ca_zip                    char(10)                      ,   
    +    ca_country                varchar(20)                   ,   
    +    ca_gmt_offset             decimal(36,33)                  ,   
    +    ca_location_type          char(20)    
    +) 
    +SERVER obs_server OPTIONS (
    +    FOLDERNAME '/user/hive/warehouse/mppdb.db/region_orc11_64stripe1/',
    +    FORMAT 'ORC',
    +    ENCODING 'utf8',
    +    TOTALROWS  '20'
    +)
    +DISTRIBUTE BY roundrobin;
    +
    + +
    +
  3. Query data stored in OBS using a foreign table.
    1
    +2
    +3
    +4
    +5
    SELECT COUNT(*) FROM customer_address;
    + count 
    +-------
    +    20
    +(1 row)
    +
    + +
    +
+
+

Example 5

Read a DLI multi-version foreign table using a foreign table. Only DLI 8.1.1 and later support the multi-version foreign table example.

+
  1. Create dli_server, with DFS_FDW as the foreign data wrapper.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE SERVER dli_server FOREIGN DATA WRAPPER DFS_FDW OPTIONS ( 
    +  ADDRESS 'obs.xxx.xxx.com', 
    +  ACCESS_KEY 'xxxxxxxxx', 
    +  SECRET_ACCESS_KEY 'yyyyyyyyyyyyy', 
    +  TYPE 'DLI',
    +  DLI_ADDRESS 'dli.xxx.xxx.com',
    +  DLI_ACCESS_KEY 'xxxxxxxxx',
    +  DLI_SECRET_ACCESS_KEY 'yyyyyyyyyyyyy'
    +);
    +
    + +
    +
    • ADDRESS is the endpoint of OBS. DLI_ADDRESS is the endpoint of DLI. Replace it with the actual endpoint.
    • ACCESS_KEY and SECRET_ACCESS_KEY are access keys for the cloud account system to access OBS. Use the actual value.
    • DLI_ACCESS_KEY and DLI_SECRET_ACCESS_KEY are access keys for the cloud account system to access DLI. Use the actual value.
    • TYPE indicates the server type. Retain the value DLI.
    +
    +
  1. Create the OBS foreign table customer_address for accessing DLI. The table does not contain partition columns, and the DLI server associated with the table is dli_server. In the preceding command, dli_project_id is xxxxxxxxxxxxxxx, dli_database_name is database123, and dli_table_name is table456. Set their values based on site requirements.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    CREATE FOREIGN TABLE customer_address
    +(
    +    ca_address_sk             integer               not null,
    +    ca_address_id             char(16)              not null,
    +    ca_street_number          char(10)                      ,   
    +    ca_street_name            varchar(60)                   ,   
    +    ca_street_type            char(15)                      ,   
    +    ca_suite_number           char(10)                      ,   
    +    ca_city                   varchar(60)                   ,   
    +    ca_county                 varchar(30)                   ,   
    +    ca_state                  char(2)                       ,   
    +    ca_zip                    char(10)                      ,   
    +    ca_country                varchar(20)                   ,   
    +    ca_gmt_offset             decimal(36,33)                  ,   
    +    ca_location_type          char(20)    
    +) 
    +SERVER dli_server OPTIONS (
    +    FORMAT 'ORC',
    +    ENCODING 'utf8',
    +    DLI_PROJECT_ID 'xxxxxxxxxxxxxxx',
    +    DLI_DATABASE_NAME 'database123',
    +    DLI_TABLE_NAME 'table456'
    +)
    +DISTRIBUTE BY roundrobin;
    +
    + +
    +
  2. Query data in a DLI multi-version table using a foreign table.
    1
    +2
    +3
    +4
    +5
    SELECT COUNT(*) FROM customer_address;
    + count 
    +-------
    +    20
    +(1 row)
    +
    + +
    +
+
+

Helpful Links

ALTER FOREIGN TABLE (for HDFS or OBS), DROP FOREIGN TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0163.html b/docs/dws/dev/dws_06_0163.html new file mode 100644 index 00000000..3656128c --- /dev/null +++ b/docs/dws/dev/dws_06_0163.html @@ -0,0 +1,306 @@ + + +

CREATE FUNCTION

+

Function

CREATE FUNCTION creates a function.

+
+

Precautions

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Define the function as SQL query.

+
1
+2
+3
+4
+5
CREATE FUNCTION func_add_sql(integer, integer) RETURNS integer
+    AS 'select $1 + $2;'
+    LANGUAGE SQL
+    IMMUTABLE
+    RETURNS NULL ON NULL INPUT;
+
+ +
+

Add an integer by parameter name using PL/pgSQL.

+
1
+2
+3
+4
+5
CREATE OR REPLACE FUNCTION func_increment_plsql(i integer) RETURNS integer AS $$
+        BEGIN
+                RETURN i + 1;
+        END;
+$$ LANGUAGE plpgsql;
+
+ +
+

Return the RECORD type.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE OR REPLACE FUNCTION compute(i int, out result_1 bigint, out result_2 bigint)
+returns SETOF RECORD
+as $$
+begin
+    result_1 = i + 1;
+    result_2 = i * 10;
+return next;
+end;
+$$language plpgsql;
+
+ +
+

Get a record containing multiple output parameters.

+
1
+2
+3
+4
CREATE FUNCTION func_dup_sql(in int, out f1 int, out f2 text)
+    AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$
+    LANGUAGE SQL;
+SELECT * FROM func_dup_sql(42);
+
+ +
+

Calculate the sum of two integers and get the result. If the input is null, null will be returned.

+
1
+2
+3
+4
+5
+6
CREATE FUNCTION func_add_sql2(num1 integer, num2 integer) RETURN integer
+AS
+BEGIN 
+RETURN num1 + num2;
+END;
+/
+
+ +
+

Create an overloaded function with the PACKAGE attribute.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
CREATE OR REPLACE FUNCTION package_func_overload(col int, col2  int)
+return integer package
+as
+declare
+    col_type text;
+begin
+     col := 122;
+         dbms_output.put_line('two int parameters ' || col2);
+         return 0;
+end;
+/
+
+CREATE OR REPLACE FUNCTION package_func_overload(col int, col2 smallint)
+return integer package
+as
+declare
+    col_type text;
+begin
+     col := 122;
+         dbms_output.put_line('two smallint parameters ' || col2);
+         return 0;
+end;
+/
+
+ +
+
+

Helpful Links

ALTER FUNCTION, DROP FUNCTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0164.html b/docs/dws/dev/dws_06_0164.html new file mode 100644 index 00000000..db61b7e6 --- /dev/null +++ b/docs/dws/dev/dws_06_0164.html @@ -0,0 +1,84 @@ + + +

CREATE GROUP

+

Function

CREATE GROUP creates a user group.

+
+

Precautions

CREATE GROUP is an alias for CREATE ROLE, and it is not a standard SQL command and not recommended. Users can use CREATE ROLE directly.

+
+

Syntax

1
+2
CREATE GROUP group_name [ [ WITH ] option [ ... ] ] 
+    [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE };
+
+ +
+

The syntax of optional action clause is as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
where option can be:
+{SYSADMIN | NOSYSADMIN}
+    | {AUDITADMIN | NOAUDITADMIN}
+    | {CREATEDB | NOCREATEDB}
+    | {USEFT | NOUSEFT}
+    | {CREATEROLE | NOCREATEROLE}
+    | {INHERIT | NOINHERIT}
+    | {LOGIN | NOLOGIN}
+    | {REPLICATION | NOREPLICATION}
+    | {INDEPENDENT | NOINDEPENDENT}
+    | {VCADMIN | NOVCADMIN}
+    | CONNECTION LIMIT connlimit
+    | VALID BEGIN 'timestamp'
+    | VALID UNTIL 'timestamp'
+    | RESOURCE POOL 'respool'
+    | USER GROUP 'groupuser'
+    | PERM SPACE 'spacelimit'
+    | NODE GROUP logic_group_name
+    | IN ROLE role_name [, ...]
+    | IN GROUP role_name [, ...]
+    | ROLE role_name [, ...]
+    | ADMIN role_name [, ...]
+    | USER role_name [, ...]
+    | SYSID uid
+    | DEFAULT TABLESPACE tablespace_name
+    | PROFILE DEFAULT
+    | PROFILE profile_name
+    | PGUSER
+
+ +
+
+

Parameter Description

See Parameter Description in CREATE ROLE.

+
+

Helpful Links

ALTER GROUP, DROP GROUP, CREATE ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0165.html b/docs/dws/dev/dws_06_0165.html new file mode 100644 index 00000000..3012162e --- /dev/null +++ b/docs/dws/dev/dws_06_0165.html @@ -0,0 +1,199 @@ + + +

CREATE INDEX

+

Function

CREATE INDEX-bak defines a new index.

+

Indexes are primarily used to enhance database performance (though inappropriate use can result in slower database performance). You are advised to create indexes on:

+ +

The partitioned table does not support concurrent index creation, partial index creation, and NULL FIRST.

+
+

Precautions

+
+

Syntax

+
+

Parameters

+
+

Examples

+ +
+

Links

ALTER INDEX, DROP INDEX

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0168.html b/docs/dws/dev/dws_06_0168.html new file mode 100644 index 00000000..d8d03a67 --- /dev/null +++ b/docs/dws/dev/dws_06_0168.html @@ -0,0 +1,69 @@ + + +

CREATE REDACTION POLICY

+

Function

CREATE REDACTION POLICY creates a data redaction policy for a table.

+
+

Precautions

+
+

Syntax

1
+2
+3
CREATE REDACTION POLICY policy_name ON table_name
+    [ WHEN (when_expression) ]
+    [ ADD COLUMN column_name WITH redaction_function_name ( [ argument [, ...] ] )] [, ... ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create a table object emp as user alice, and insert data into the table.

+
1
+2
CREATE TABLE emp(id int, name varchar(20), salary NUMERIC(10,2));
+INSERT INTO emp VALUES(1, 'July', 1230.10), (2, 'David', 999.99);
+
+ +
+

Create a redaction policy mask_emp for the emp table as user alice to make the salary column invisible to user matu.

+
1
CREATE REDACTION POLICY mask_emp ON emp WHEN(current_user = 'matu') ADD COLUMN salary WITH mask_full(salary);
+
+ +
+

Grant the SELECT permission on the emp table to user matu as user alice.

+
1
GRANT SELECT ON emp TO matu;
+
+ +
+

Switch to user matu.

+
1
SET ROLE matu PASSWORD '{password}';
+
+ +
+

Query the emp table. Data in the salary column has been redacted.

+
1
SELECT * FROM emp;
+
+ +
+
+

Helpful Links

ALTER REDACTION POLICY, DROP REDACTION POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0169.html b/docs/dws/dev/dws_06_0169.html new file mode 100644 index 00000000..d930d821 --- /dev/null +++ b/docs/dws/dev/dws_06_0169.html @@ -0,0 +1,185 @@ + + +

CREATE ROW LEVEL SECURITY POLICY

+

Function

CREATE ROW LEVEL SECURITY POLICY creates a row-level access control policy for a table.

+

The policy takes effect only after row-level access control is enabled (by running ALTER TABLE... ENABLE ROW LEVEL SECURITY).

+

Currently, row-level access control affects the read (SELECT, UPDATE, DELETE) of data tables and does not affect the write (INSERT and MERGE INTO) of data tables. The table owner or system administrators can create an expression in the USING clause. When the client reads the data table, the database server combines the expressions that meet the condition and applies it to the execution plan in the statement rewriting phase of a query. For each tuple in a data table, if the expression returns TRUE, the tuple is visible to the current user; if the expression returns FALSE or NULL, the tuple is invisible to the current user.

+

A row-level access control policy name is specific to a table. A data table cannot have row-level access control policies with the same name. Different data tables can have the same row-level access control policy.

+

Row-level access control policies can be applied to specified operations (SELECT, UPDATE, DELETE, and ALL). ALL indicates that SELECT, UPDATE, and DELETE will be affected. For a new row-level access control policy, the default value ALL will be used if you do not specify the operations that will be affected.

+

Row-level access control policies can be applied to a specified user (role) or to all users (PUBLIC). For a new row-level access control policy, the default value PUBLIC will be used if you do not specify the user that will be affected.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
CREATE [ ROW LEVEL SECURITY ] POLICY policy_name ON table_name
+    [ AS { PERMISSIVE | RESTRICTIVE } ]
+    [ FOR { ALL | SELECT | UPDATE | DELETE } ]
+    [ TO { role_name | PUBLIC } [, ...] ]
+    USING ( using_expression )
+
+ +
+
+

Parameter Description

+
+ +

Example 1: Create a row-level access control policy that the current user can only view its own data.

  1. Create users alice and bob.
    CREATE ROLE alice PASSWORD '{password1}'
    +CREATE ROLE bob PASSWORD '{password2}';
    +
  2. Create the data table public.all_data.
    CREATE TABLE public.all_data(id int, role varchar(100), data varchar(100));
    +
  3. Insert data into the table.
    INSERT INTO all_data VALUES(1, 'alice', 'alice data');
    +INSERT INTO all_data VALUES(2, 'bob', 'bob data');
    +INSERT INTO all_data VALUES(3, 'peter', 'peter data');
    +
  4. Grant the read permission for the all_data table to users alice and bob.
    GRANT SELECT ON all_data TO alice, bob;
    +
  5. Enable row-level access control.
    1
    ALTER TABLE all_data ENABLE ROW LEVEL SECURITY;
    +
    + +
    +
  6. Create a row-level access control policy to specify that the current user can view only its own data.
    CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER);
    +
  7. View information about the all_data table.
    \d+ all_data
    +                               Table "public.all_data"
    + Column |          Type          | Modifiers | Storage  | Stats target | Description
    +--------+------------------------+-----------+----------+--------------+-------------
    + id     | integer                |           | plain    |              |
    + role   | character varying(100) |           | extended |              |
    + data   | character varying(100) |           | extended |              |
    +Row Level Security Policies:
    +    POLICY "all_data_rls"
    +      USING (((role)::name = "current_user"()))
    +Has OIDs: no
    +Distribute By: HASH(id)
    +Location Nodes: ALL DATANODES
    +Options: orientation=row, compression=no, enable_rowsecurity=true
    +
  8. Run SELECT.
    SELECT * FROM all_data;
    + id | role  |    data
    +----+-------+------------
    +  1 | alice | alice data
    +  2 | bob   | bob data
    +  3 | peter | peter data
    +(3 rows)
    +
    +EXPLAIN(COSTS OFF) SELECT * FROM all_data;
    +         QUERY PLAN
    +----------------------------
    + Streaming (type: GATHER)
    +   Node/s: All datanodes
    +   ->  Seq Scan on all_data
    +(3 rows)
    +
  9. Switch to the alice user.
    set role alice password '{password1}';
    +
  10. Perform the SELECT operation.
    SELECT * FROM all_data;
    + id | role  |    data
    +----+-------+------------
    +  1 | alice | alice data
    +(1 row)
    +
    +EXPLAIN(COSTS OFF) SELECT * FROM all_data;
    +                           QUERY PLAN
    +----------------------------------------------------------------
    + Streaming (type: GATHER)
    +   Node/s: All datanodes
    +   ->  Seq Scan on all_data
    +         Filter: ((role)::name = 'alice'::name)
    + Notice: This query is influenced by row level security feature
    +(5 rows)
    +
+
+

Helpful Links

DROP ROW LEVEL SECURITY POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0170.html b/docs/dws/dev/dws_06_0170.html new file mode 100644 index 00000000..161ae862 --- /dev/null +++ b/docs/dws/dev/dws_06_0170.html @@ -0,0 +1,145 @@ + + +

CREATE PROCEDURE

+

Function

CREATE PROCEDURE creates a stored procedure.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE [ OR REPLACE ] PROCEDURE procedure_name
+    [ ( {[ argmode ] [ argname ] argtype [ { DEFAULT | := | = } expression ]}[,...]) ]
+    [
+       { IMMUTABLE | STABLE | VOLATILE }
+       | { SHIPPABLE | NOT SHIPPABLE }
+       | {PACKAGE}
+       | [ NOT ] LEAKPROOF
+       | { CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT }
+       | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER}
+       | COST execution_cost
+       | ROWS result_rows
+       | SET configuration_parameter { [ TO | = ] value | FROM CURRENT }
+    ][ ... ]
+ { IS | AS } 
+plsql_body 
+/
+
+ +
+
+

Parameter Description

+

No specific order is applied to argument_name and argmode. The following order is advised: argument_name, argmode, and argument_type.

+
+
+

Examples

Create a stored procedure.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE OR REPLACE PROCEDURE prc_add
+(
+    param1    IN   INTEGER,
+    param2    IN OUT  INTEGER
+)
+AS
+BEGIN
+   param2:= param1 + param2;
+   dbms_output.put_line('result is: '||to_char(param2));
+END;
+/
+
+ +
+

Call the stored procedure.

+
1
SELECT prc_add(2,3);
+
+ +
+

Create a stored procedure whose parameter type is VARIADIC.

+
1
+2
+3
+4
+5
+6
CREATE OR REPLACE PROCEDURE pro_variadic (var1 VARCHAR2(10) DEFAULT 'hello!',var4 VARIADIC int4[])
+AS
+BEGIN
+    dbms_output.put_line(var1);
+END;
+/
+
+ +
+

Execute the stored procedure.

+
1
SELECT pro_variadic(var1=>'hello', VARIADIC var4=> array[1,2,3,4]);
+
+ +
+

Create a stored procedure with the PACKAGE attribute.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
create or replace procedure package_func_overload(col int, col2 out varchar)
+package
+as
+declare
+    col_type text;
+begin
+     col2 := '122';
+         dbms_output.put_line('two varchar parameters ' || col2);
+end;
+/
+
+ +
+

+
+

Helpful Links

DROP PROCEDURE, CALL

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0171.html b/docs/dws/dev/dws_06_0171.html new file mode 100644 index 00000000..206fff16 --- /dev/null +++ b/docs/dws/dev/dws_06_0171.html @@ -0,0 +1,82 @@ + + +

CREATE RESOURCE POOL

+

Function

CREATE RESOURCE POOL creates a resource pool and specifies the Cgroup for the resource pool.

+
+

Precautions

As long as the current user has CREATE permission, it can create a resource pool.

+
+

Syntax

1
+2
CREATE RESOURCE POOL pool_name
+    [WITH ({MEM_PERCENT=pct | CONTROL_GROUP="group_name" | ACTIVE_STATEMENTS=stmt | MAX_DOP = dop | MEMORY_LIMIT='memory_size' | io_limits=io_limits | io_priority='io_priority' | nodegroup="nodegroupname" | is_foreign=boolean }[, ... ])];
+
+ +
+
+

Parameter Description

+
+

Examples

This example assumes that Cgroups have been created by users in advance.

+

Create a default resource pool, and associate it with the Medium Timeshare Cgroup under Workload under DefaultClass.

+
1
CREATE RESOURCE POOL pool1;
+
+ +
+

Create a resource pool, and associate it with the High Timeshare Cgroup under Workload under DefaultClass.

+
1
CREATE RESOURCE POOL pool2 WITH (CONTROL_GROUP="High");
+
+ +
+

Create a resource pool, and associate it with the Low Timeshare Cgroup under Workload under class1.

+
1
CREATE RESOURCE POOL pool3 WITH (CONTROL_GROUP="class1:Low");
+
+ +
+

Create a resource pool, and associate it with the wg1 Workload Cgroup under class1.

+
1
CREATE RESOURCE POOL pool4 WITH (CONTROL_GROUP="class1:wg1");
+
+ +
+

Create a resource pool, and associate it with the wg2 Workload Cgroup under class1.

+
1
CREATE RESOURCE POOL pool5 WITH (CONTROL_GROUP="class1:wg2:3");
+
+ +
+

+
+

Helpful Links

ALTER RESOURCE POOL, DROP RESOURCE POOL

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0172.html b/docs/dws/dev/dws_06_0172.html new file mode 100644 index 00000000..b51cb462 --- /dev/null +++ b/docs/dws/dev/dws_06_0172.html @@ -0,0 +1,191 @@ + + +

CREATE ROLE

+

Function

Create a role.

+

A role is an entity that has own database objects and permissions. In different environments, a role can be considered a user, a group, or both.

+
+

Important Notes

+
+

Syntax

1
CREATE ROLE role_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE };
+
+ +
+
The syntax of role information configuration clause option is as follows:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
{SYSADMIN | NOSYSADMIN}
+    | {AUDITADMIN | NOAUDITADMIN}
+    | {CREATEDB | NOCREATEDB}
+    | {USEFT | NOUSEFT}
+    | {CREATEROLE | NOCREATEROLE}
+    | {INHERIT | NOINHERIT}
+    | {LOGIN | NOLOGIN}
+    | {REPLICATION | NOREPLICATION}
+    | {INDEPENDENT | NOINDEPENDENT}
+    | {VCADMIN | NOVCADMIN}
+    | CONNECTION LIMIT connlimit
+    | VALID BEGIN 'timestamp'
+    | VALID UNTIL 'timestamp'
+    | RESOURCE POOL 'respool'
+    | USER GROUP 'groupuser'
+    | PERM SPACE 'spacelimit'
+    | TEMP SPACE 'tmpspacelimit'
+    | SPILL SPACE 'spillspacelimit'
+    | NODE GROUP logic_cluster_name
+    | IN ROLE role_name [, ...]
+    | IN GROUP role_name [, ...]
+    | ROLE role_name [, ...]
+    | ADMIN rol e_name [, ...]
+    | USER role_name [, ...]
+    | SYSID uid
+    | DEFAULT TABLESPACE tablespace_name
+    | PROFILE DEFAULT
+    | PROFILE profile_name
+    | PGUSER
+    | AUTHINFO 'authinfo'
+    | PASSWORD EXPIRATOIN period
+
+ +
+
+
+

Parameter Description

+
+ + +

Examples

Create a role manager.

+
1
CREATE ROLE manager IDENTIFIED BY '{password}';
+
+ +
+

Create a role with a validity from January 1, 2015 to January 1, 2026.

+
1
CREATE ROLE miriam WITH LOGIN PASSWORD '{password}' VALID BEGIN '2015-01-01' VALID UNTIL '2026-01-01';
+
+ +
+

Create a role. The authentication type is LDAP. Other LDAP authentication information is provided by pg_hba.conf.

+
1
CREATE ROLE role1 WITH LOGIN AUTHINFO 'ldap' PASSWORD DISABLE;
+
+ +
+

Create a role. The authentication type is LDAP. The fulluser information for LDAP authentication is specified during the role creation. In this case, LDAP is case sensitive and must be enclosed in single quotation marks.

+
1
CREATE ROLE role2 WITH LOGIN AUTHINFO 'ldapcn=role2,cn=user,dc=lework,dc=com' PASSWORD DISABLE;
+
+ +
+

Create a role and set the validity period of the login password to 30 days.

+
1
CREATE ROLE role3 WITH LOGIN PASSWORD '{password}' PASSWORD EXPIRATION 30;
+
+ +
+
+

Links

SET ROLE, ALTER ROLE, DROP ROLE, GRANT, REVOKE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0173.html b/docs/dws/dev/dws_06_0173.html new file mode 100644 index 00000000..0af43de1 --- /dev/null +++ b/docs/dws/dev/dws_06_0173.html @@ -0,0 +1,51 @@ + + +

CREATE SCHEMA

+

Function

CREATE SCHEMA creates a schema.

+

Named objects are accessed either by "qualifying" their names with the schema name as a prefix, or by setting a search path that includes the desired schema(s). When creating named objects, you can also use the schema name as a prefix.

+

Optionally, CREATE SCHEMA can include sub-commands to create objects within the new schema. The sub-commands are treated essentially the same as separate commands issued after creating the schema, If the AUTHORIZATION clause is used, all the created objects are owned by this user.

+
+

Precautions

+
+

Syntax

+
+

Parameter Description

+

If objects in the schema on the current search path are with the same name, specify the schemas different objects are in. You can run the SHOW SEARCH_PATH command to check the schemas on the current search path.

+
+
+

Examples

Create a schema named role1 for the role1 role. The owner of the films and winners tables created by the clause is role1.

+
CREATE SCHEMA AUTHORIZATION role1
+CREATE TABLE films (title text, release date, awards text[])      
+CREATE VIEW winners AS         
+SELECT title, release FROM films WHERE awards IS NOT NULL;
+
+

Helpful Links

ALTER SCHEMA, DROP SCHEMA

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0174.html b/docs/dws/dev/dws_06_0174.html new file mode 100644 index 00000000..73ce3400 --- /dev/null +++ b/docs/dws/dev/dws_06_0174.html @@ -0,0 +1,147 @@ + + +

CREATE SEQUENCE

+

Function

CREATE SEQUENCE adds a sequence to the current database. The owner of a sequence is the user who creates the sequence.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
CREATE SEQUENCE name [ INCREMENT [ BY ] increment ]
+    [ MINVALUE minvalue | NO MINVALUE | NOMINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE | NOMAXVALUE] 
+    [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE | NOCYCLE ] 
+    [ OWNED BY { table_name.column_name | NONE } ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create an ascending sequence named serial, which starts from 101:

+
1
+2
+3
CREATE SEQUENCE serial
+ START 101
+ CACHE 20;
+
+ +
+

Select the next number from the sequence:

+
1
+2
+3
+4
SELECT nextval('serial');
+ nextval 
+ ---------
+      101
+
+ +
+

Select the next number from the sequence:

+
1
+2
+3
+4
SELECT nextval('serial');
+ nextval 
+ ---------
+      102
+
+ +
+

Create a sequence associated with the table:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
CREATE TABLE customer_address
+(
+    ca_address_sk             integer               not null,
+    ca_address_id             char(16)              not null,
+    ca_street_number          char(10)                      ,
+    ca_street_name            varchar(60)                   ,
+    ca_street_type            char(15)                      ,
+    ca_suite_number           char(10)                      ,
+    ca_city                   varchar(60)                   ,
+    ca_county                 varchar(30)                   ,
+    ca_state                  char(2)                       ,
+    ca_zip                    char(10)                      ,
+    ca_country                varchar(20)                   ,
+    ca_gmt_offset             decimal(5,2)                  ,
+    ca_location_type          char(20)                     
+) ;
+
+CREATE SEQUENCE serial1
+ START 101
+ CACHE 20
+OWNED BY customer_address.ca_address_sk;
+
+ +
+

Use SERIAL to create a serial table serial_table for primary key auto-increment.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE TABLE serial_table(a int, b serial);
+INSERT INTO serial_table (a) VALUES (1),(2),(3);
+SELECT * FROM serial_table ORDER BY b;
+ a | b
+---+---
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+ +
+
+

Helpful Links

DROP SEQUENCE ALTER SEQUENCE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0175.html b/docs/dws/dev/dws_06_0175.html new file mode 100644 index 00000000..5473099f --- /dev/null +++ b/docs/dws/dev/dws_06_0175.html @@ -0,0 +1,130 @@ + + +

CREATE SERVER

+

Function

CREATE SERVER creates an external server.

+

An external server stores information of HDFS clusters, OBS servers, DLI connections, or other homogeneous clusters.

+
+

Precautions

By default, only the system administrator can create a foreign server. Otherwise, creating a server requires permissions on the foreign data wrapper being used. Use the following syntax to grant permissions:

+
1
GRANT USAGE ON FOREIGN DATA WRAPPER fdw_name TO username;
+
+ +
+

fdw_name is the name of the foreign data wrapper, and username is the username of creating SERVER.

+
+

Syntax

1
+2
+3
CREATE SERVER server_name 
+    FOREIGN DATA WRAPPER fdw_name
+    OPTIONS ( { option_name ' value ' } [, ...] ) ;
+
+ +
+
+

Parameter Description

+
+

Examples

Create the hdfs_server server, in which hdfs_fdw is the built-in foreign data wrapper.

+
1
+2
+3
+4
+5
CREATE SERVER hdfs_server FOREIGN DATA WRAPPER HDFS_FDW OPTIONS 
+   (address '10.10.0.100:25000,10.10.0.101:25000',
+    hdfscfgpath '/opt/hadoop_client/HDFS/hadoop/etc/hadoop', 
+    type 'HDFS'
+) ;
+
+ +
+

Create the obs_server server, in which dfs_fdw is the built-in foreign data wrapper.

+
1
+2
+3
+4
+5
+6
CREATE SERVER obs_server FOREIGN DATA WRAPPER DFS_FDW OPTIONS ( 
+  address 'obs.xxx.xxx.com', 
+   access_key 'xxxxxxxxx', 
+  secret_access_key 'yyyyyyyyyyyyy', 
+  type 'obs'
+);
+
+ +
+

Create the dli_server server, in which dfs_fdw is the built-in foreign data wrapper.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SERVER dli_server FOREIGN DATA WRAPPER DFS_FDW OPTIONS ( 
+  address 'obs.xxx.xxx.com', 
+  access_key 'xxxxxxxxx', 
+  secret_access_key 'yyyyyyyyyyyyy', 
+  type 'dli',
+  dli_address 'dli.xxx.xxx.com',
+  dli_access_key 'xxxxxxxxx',
+  dli_secret_access_key 'yyyyyyyyyyyyy'
+);
+
+ +
+

Create another server in the homogeneous cluster, where gc_fdw is the foreign data wrapper in the database.

+
1
+2
+3
+4
+5
+6
CREATE SERVER server_remote FOREIGN DATA WRAPPER GC_FDW OPTIONS 
+   (address '10.10.0.100:25000,10.10.0.101:25000',
+  dbname 'test', 
+  username 'test', 
+  password 'xxxxxxxx'
+);
+
+ +
+

Helpful Links

+

ALTER SERVER DROP SERVER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0176.html b/docs/dws/dev/dws_06_0176.html new file mode 100644 index 00000000..c481ec4d --- /dev/null +++ b/docs/dws/dev/dws_06_0176.html @@ -0,0 +1,127 @@ + + +

CREATE SYNONYM

+

Function

CREATE SYNONYM is used to create a synonym object. A synonym is an alias of a database object and is used to record the mapping between database object names. You can use synonyms to access associated database objects.

+
+

Precautions

+
+

Syntax

1
+2
CREATE [ OR REPLACE ] SYNONYM synonym_name 
+    FOR object_name;
+
+ +
+
+

Parameter Description

+
+

Examples

Create schema ot.

+
1
CREATE SCHEMA ot;
+
+ +
+

Create table ot.t1 and its synonym t1.

+
1
+2
CREATE TABLE ot.t1(id int, name varchar2(10)) DISTRIBUTE BY hash(id);
+CREATE OR REPLACE SYNONYM t1 FOR ot.t1;
+
+ +
+

Use synonym t1.

+
1
+2
+3
SELECT * FROM t1;
+INSERT INTO t1 VALUES (1, 'ada'), (2, 'bob');
+UPDATE t1 SET t1.name = 'cici' WHERE t1.id = 2;
+
+ +
+

Create synonym v1 and its associated view ot.v_t1.

+
1
+2
CREATE SYNONYM v1 FOR ot.v_t1;
+CREATE VIEW ot.v_t1 AS SELECT * FROM ot.t1;
+
+ +
+

Use synonym v1.

+
1
SELECT * FROM v1;
+
+ +
+

Create overloaded function ot.add and its synonym add.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
CREATE OR REPLACE FUNCTION ot.add(a integer, b integer) RETURNS integer AS
+$$
+SELECT $1 + $2
+$$
+LANGUAGE sql;
+
+CREATE OR REPLACE FUNCTION ot.add(a decimal(5,2), b decimal(5,2)) RETURNS decimal(5,2) AS
+$$
+SELECT $1 + $2
+$$
+LANGUAGE sql;
+
+CREATE OR REPLACE SYNONYM add FOR ot.add;
+
+ +
+

Use synonym add.

+
1
+2
SELECT add(1,2);
+SELECT add(1.2,2.3);
+
+ +
+

Create stored procedure ot.register and its synonym register.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE PROCEDURE ot.register(n_id integer, n_name varchar2(10))
+SECURITY INVOKER
+AS
+BEGIN
+    INSERT INTO ot.t1 VALUES(n_id, n_name);
+END;
+/
+
+CREATE OR REPLACE SYNONYM register FOR ot.register;
+
+ +
+

Use synonym register to invoke the stored procedure.

+
1
CALL register(3,'mia');
+
+ +
+
+

Helpful Links

ALTER SYNONYM DROP SYNONYM

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0177.html b/docs/dws/dev/dws_06_0177.html new file mode 100644 index 00000000..f93e0cae --- /dev/null +++ b/docs/dws/dev/dws_06_0177.html @@ -0,0 +1,1284 @@ + + +

CREATE TABLE

+

Function

CREATE TABLE creates a table in the current database. The table will be owned by the user who created it.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name 
+    ({ column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ]
+        | table_constraint
+        | LIKE source_table [ like_option [...] ] }
+        [, ... ])
+    [ WITH ( {storage_parameter = value} [, ... ] ) ]
+    [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]
+    [ COMPRESS | NOCOMPRESS ]
+    
+    [ DISTRIBUTE BY { REPLICATION | { HASH ( column_name [,...] ) } } ]
+    [ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ];
+
+ +
+ + + +
+

Parameter Description

+
+

Using the LIKE Clause to Declare a Table

The new table films_bk automatically inherits all column names, data types, and non-null constraints from the source table films.

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE TABLE films (
+code        char(5) PRIMARY KEY,
+title       varchar(40) NOT NULL,
+did         integer NOT NULL,
+date_prod   date,
+kind        varchar(10),
+len         interval hour to minute
+);
+CREATE TABLE films_bk LIKE films;
+
+ +
+
+

Creating a Table with Default Columns

Specify that the default value of the W_STATE column to GA. At the end of the transaction, check for duplicate values in the W_WAREHOUSE_NAME column.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t2
+(
+    W_WAREHOUSE_SK            INTEGER                NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)               NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)   UNIQUE DEFERRABLE,
+    W_WAREHOUSE_SQ_FT         INTEGER                        ,
+    W_STREET_NUMBER           CHAR(10)                       ,
+    W_STREET_NAME             VARCHAR(60)                    ,
+    W_STREET_TYPE             CHAR(15)                       ,
+    W_SUITE_NUMBER            CHAR(10)                       ,
+    W_CITY                    VARCHAR(60)                    ,
+    W_COUNTY                  VARCHAR(30)                    ,
+    W_STATE                   CHAR(2)            DEFAULT 'GA',
+    W_ZIP                     CHAR(10)                       ,
+    W_COUNTRY                 VARCHAR(20)                    ,
+    W_GMT_OFFSET              DECIMAL(5,2) 
+);
+
+ +
+
+

Creating a Table with a Filler Factor

Set the fill factor to 70%.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t3
+(
+    W_WAREHOUSE_SK            INTEGER                NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)               NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                    ,
+    W_WAREHOUSE_SQ_FT         INTEGER                        ,
+    W_STREET_NUMBER           CHAR(10)                       ,
+    W_STREET_NAME             VARCHAR(60)                    ,
+    W_STREET_TYPE             CHAR(15)                       ,
+    W_SUITE_NUMBER            CHAR(10)                       ,
+    W_CITY                    VARCHAR(60)                    ,
+    W_COUNTY                  VARCHAR(30)                    ,
+    W_STATE                   CHAR(2)                        ,
+    W_ZIP                     CHAR(10)                       ,
+    W_COUNTRY                 VARCHAR(20)                    ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    UNIQUE(W_WAREHOUSE_NAME) WITH(fillfactor=70)
+);
+
+ +
+

Alternatively, use the following syntax to create a table with its fillfactor set to 70%:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t4
+(
+    W_WAREHOUSE_SK            INTEGER                NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)               NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)              UNIQUE,
+    W_WAREHOUSE_SQ_FT         INTEGER                        ,
+    W_STREET_NUMBER           CHAR(10)                       ,
+    W_STREET_NAME             VARCHAR(60)                    ,
+    W_STREET_TYPE             CHAR(15)                       ,
+    W_SUITE_NUMBER            CHAR(10)                       ,
+    W_CITY                    VARCHAR(60)                    ,
+    W_COUNTY                  VARCHAR(30)                    ,
+    W_STATE                   CHAR(2)                        ,
+    W_ZIP                     CHAR(10)                       ,
+    W_COUNTRY                 VARCHAR(20)                    ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH(fillfactor=70);
+
+ +
+
+

Creating a Table Whose Data Is Not Written to WALs

Use UNLOGGED to specify that table data is not written to write-ahead logs (WALs).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE UNLOGGED TABLE tpcds.warehouse_t5
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+);
+
+ +
+
+

Creating a Table Without Reporting Errors for Duplicate Tables (If Any)

If IF NOT EXISTS is specified, a table will be created if there is no table using the specified name. If there is already a table using the specified name, no error will be reported. A message will be displayed indicating that the table already exists, and the database will skip table creation.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE IF NOT EXISTS tpcds.warehouse_t6
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+);
+
+ +
+
+

Creating a Table with a Primary Key Constraint

Use PRIMARY KEY to declare the primary key.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t7
+(
+    W_WAREHOUSE_SK            INTEGER            PRIMARY KEY,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+);
+
+ +
+

Alternatively, use the following syntax to create a table with a primary key constraint:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t8
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    PRIMARY KEY(W_WAREHOUSE_SK)
+);
+
+ +
+

Or use the following statement to specify the name of the constraint:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t9
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    CONSTRAINT W_CSTR_KEY1 PRIMARY KEY(W_WAREHOUSE_SK)
+);
+
+ +
+
+

Creating a Table with a Compound Primary Key Constraint

Use PRIMARY KEY to declare two primary keys at the same time.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t10
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    CONSTRAINT W_CSTR_KEY2 PRIMARY KEY(W_WAREHOUSE_SK, W_WAREHOUSE_ID)
+);
+
+ +
+
+

Creating a Column-store Table

Use ORIENTATION to specify the storage mode of table data.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t11
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = COLUMN);
+
+ +
+
+

Creating a Column-store Table Using Partial Clustered Storage

When data is imported to a column-store table, perform partial sorting based on the one or more columns specified by PARTIAL CLUSTER KEY.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t12
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    PARTIAL CLUSTER KEY(W_WAREHOUSE_SK, W_WAREHOUSE_ID)
+) WITH (ORIENTATION = COLUMN)
+
+ +
+
+

Defining a Column-store Table with Compression Enabled

Use the with clause to declare the compression level.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t17
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = COLUMN, COMPRESSION=HIGH);
+
+ +
+
+

Defining a Table with Compression Enabled

When creating a table, specify the keyword COMPRESS.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t13
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) COMPRESS;
+
+ +
+
+

Creating a Table that Checks Column Constraints

Use CONSTRAINT to declare a constraint.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t19
+(
+    W_WAREHOUSE_SK            INTEGER               PRIMARY KEY CHECK (W_WAREHOUSE_SK > 0),
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)           CHECK (W_WAREHOUSE_NAME IS NOT NULL),
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+);
+
+ +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t20
+(
+    W_WAREHOUSE_SK            INTEGER               PRIMARY KEY,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)           CHECK (W_WAREHOUSE_NAME IS NOT NULL),
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    CONSTRAINT W_CONSTR_KEY2 CHECK(W_WAREHOUSE_SK > 0 AND W_WAREHOUSE_NAME IS NOT NULL)  
+);
+
+ +
+
+

Creating a Temporary Table

Specify the TEMP or TEMPORARY keyword to create a temporary table.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TEMPORARY TABLE warehouse_t14
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+);
+
+ +
+

Create a temporary table in a transaction and specify that data of this table is deleted when the transaction is committed.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TEMPORARY TABLE warehouse_t15
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) ON COMMIT DELETE ROWS;
+
+ +
+
+

Creating a Row-store Table

Set ORIENTATION to ROW.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t16
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = ROW);
+
+ +
+
+

Creating a Column-store Table in a Specified Version

Set COLVERSION to specify the version of the column storage format.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t18
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = COLUMN, COLVERSION=2.0);
+
+ +
+
+

Creating a Column-store Table with the Delta Table Enabled

Set enable_delta=on to enable the delta table in column-store tables.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t21
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = COLUMN, ENABLE_DELTA = ON);
+
+ +
+
+

Defining a Table with SKIP_FPI_HINT Enabled

Use the with clause to set SKIP_FPI_HINT.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t22
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (SKIP_FPI_HINT = TRUE);
+
+ +
+
+

Creating Hot and Cold Tables

Create an OBS tablespace that hot and cold tables depend on.

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE TABLESPACE obs_location WITH(
+    filesystem = obs, 
+    address = 'obs URL', 
+    access_key = 'xxxxxxxx',  
+    secret_access_key = 'xxxxxxxx', 
+    encrypt = 'on', 
+    storepath = '/obs_bucket/obs_tablespace'
+);
+
+ +
+

Create a hot or cold table. Only column-store partitioned tables are supported.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
CREATE TABLE tpcds.warehouse_t23
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+)
+WITH (ORIENTATION = COLUMN, cold_tablespace = "obs_location", storage_policy = 'LMT:30')
+DISTRIBUTE BY HASH (W_WAREHOUSE_SK)
+PARTITION BY RANGE(W_WAREHOUSE_SQ_FT)
+(
+    PARTITION P1 VALUES LESS THAN(100000),
+    PARTITION P2 VALUES LESS THAN(200000),
+    PARTITION P3 VALUES LESS THAN(300000),
+    PARTITION P4 VALUES LESS THAN(400000),
+    PARTITION P5 VALUES LESS THAN(500000),
+    PARTITION P6 VALUES LESS THAN(600000),
+    PARTITION P7 VALUES LESS THAN(700000),
+    PARTITION P8 VALUES LESS THAN(MAXVALUE)
+)ENABLE ROW MOVEMENT;
+
+ +
+
+

Creating an Auto-increment Table That Uses UUID as the Primary Key

Set W_UUID to SMALLSERIAL.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t24
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_UUID                    SMALLSERIAL                   ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+) WITH (ORIENTATION = ROW);
+
+ +
+
+
+

Creating a Table that Uses Hash Distribution

Use DISTRIBUTE BY to specify table distribution across nodes.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE TABLE tpcds.warehouse_t25
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2),
+    CONSTRAINT W_CONSTR_KEY3 UNIQUE(W_WAREHOUSE_SK)
+)DISTRIBUTE BY HASH(W_WAREHOUSE_SK);
+
+ +
+
+

Defining a Table with Each Row Stored in All DNs

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE TABLE tpcds.warehouse_t26
+(
+    W_WAREHOUSE_SK            INTEGER               NOT NULL,
+    W_WAREHOUSE_ID            CHAR(16)              NOT NULL,
+    W_WAREHOUSE_NAME          VARCHAR(20)                   ,
+    W_WAREHOUSE_SQ_FT         INTEGER                       ,
+    W_STREET_NUMBER           CHAR(10)                      ,
+    W_STREET_NAME             VARCHAR(60)                   ,
+    W_STREET_TYPE             CHAR(15)                      ,
+    W_SUITE_NUMBER            CHAR(10)                      ,
+    W_CITY                    VARCHAR(60)                   ,
+    W_COUNTY                  VARCHAR(30)                   ,
+    W_STATE                   CHAR(2)                       ,
+    W_ZIP                     CHAR(10)                      ,
+    W_COUNTRY                 VARCHAR(20)                   ,
+    W_GMT_OFFSET              DECIMAL(5,2)
+)DISTRIBUTE BY REPLICATION;
+
+ +
+
+

Links

ALTER TABLE, DROP TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0178.html b/docs/dws/dev/dws_06_0178.html new file mode 100644 index 00000000..49c2d907 --- /dev/null +++ b/docs/dws/dev/dws_06_0178.html @@ -0,0 +1,101 @@ + + +

CREATE TABLE AS

+

Function

CREATE TABLE AS creates a table based on the results of a query.

+

It creates a table and fills it with data obtained using SELECT. The table columns have the names and data types associated with the output columns of the SELECT. Except that you can override the SELECT output column names by giving an explicit list of new column names.

+

CREATE TABLE AS queries once the source table and writes data in the new table. The query result view changes when the source table changes. In contrast, a view re-evaluates its defining SELECT statement whenever it is queried.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE [ UNLOGGED ] TABLE table_name
+    [ (column_name [, ...] ) ]
+    [ WITH ( {storage_parameter = value} [, ... ] ) ]
+    [ COMPRESS | NOCOMPRESS ]
+    
+    [ DISTRIBUTE BY { REPLICATION | { [HASH ] ( column_name ) } } ]
+    
+    AS query
+    [ WITH [ NO ] DATA ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create the store_returns_t1 table and insert numbers that are greater than 4795 in the sr_item_sk column of the store_returns table.

+
1
CREATE TABLE store_returns_t1 AS SELECT * FROM store_returns WHERE sr_item_sk > '4795';
+
+ +
+

-- Copy store_returns to create the store_returns_t2 table.

+
1
CREATE TABLE store_returns_t2 AS table store_returns;
+
+ +
+
+

Helpful Links

CREATE TABLE, SELECT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0179.html b/docs/dws/dev/dws_06_0179.html new file mode 100644 index 00000000..5c182f66 --- /dev/null +++ b/docs/dws/dev/dws_06_0179.html @@ -0,0 +1,646 @@ + + +

CREATE TABLE PARTITION

+

Function

CREATE TABLE PARTITION creates a partitioned table. Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partition cable, and a physical piece is called a partition. Data is stored on these smaller physical pieces, namely, partitions, instead of the larger logical partitioned table.

+

The common forms of partitioning include range partitioning, hash partitioning, list partitioning, and value partitioning. Currently, row-store and column-store tables support only range partitioning.

+

In range partitioning, the table is partitioned into ranges defined by a key column or set of columns, with no overlap between the ranges of values assigned to different partitions. Each range has a dedicated partition for data storage.

+

The partitioning policy for Range Partitioning refers to how data is inserted into partitions. Currently, range partitioning only allows the use of the range partitioning policy.

+

Range partitioning policy: Data is mapped to a created partition based on the partition key value. If the data can be mapped to, it is inserted into the specific partition; if it cannot be mapped to, error messages are returned. This is the most commonly used partitioning policy.

+

Partitioning can provide several benefits:

+ +
+

Precautions

A partitioned table supports unique and primary key constraints. The constraint keys of these constraints contain all partition keys.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE TABLE [ IF NOT EXISTS ] partition_table_name
+( [ 
+    { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ]
+    | table_constraint
+    | LIKE source_table [ like_option [...] ] }[, ... ]
+] )
+    [ WITH ( {storage_parameter = value} [, ... ] ) ]
+    [ COMPRESS | NOCOMPRESS ]
+    [ TABLESPACE tablespace_name ]
+    [ DISTRIBUTE BY { REPLICATION | { [ HASH ] ( column_name ) } } ]
+    [ TO { GROUP groupname | NODE ( nodename [, ... ] ) } ]
+    PARTITION BY { 
+        {VALUES (partition_key)} |
+        {RANGE (partition_key) ( partition_less_than_item [, ... ] )} |
+        {RANGE (partition_key) ( partition_start_end_item [, ... ] )}
+    } [ { ENABLE | DISABLE } ROW MOVEMENT ]; 
+
+ +
+ + + +
+ +

Parameter Description

+
+ +

Examples

+
+ +

Links

ALTER TABLE PARTITION, DROP TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0182.html b/docs/dws/dev/dws_06_0182.html new file mode 100644 index 00000000..6447064c --- /dev/null +++ b/docs/dws/dev/dws_06_0182.html @@ -0,0 +1,130 @@ + + +

CREATE TEXT SEARCH CONFIGURATION

+

Function

CREATE TEXT SEARCH CONFIGURATION creates a text search configuration. A text search configuration specifies a text search parser that can divide a string into tokens, plus dictionaries that can be used to determine which tokens are of interest for searching.

+
+

Important Notes

+
+

Syntax

1
+2
+3
CREATE TEXT SEARCH CONFIGURATION name 
+    ( PARSER = parser_name | COPY = source_config )
+    [ WITH ( {configuration_option = value} [, ...] )];
+
+ +
+
+

Parameter Description

+
+

Examples

Create a text search configuration.

+
1
CREATE TEXT SEARCH CONFIGURATION ngram1 (parser=ngram) WITH (gram_size = 2, grapsymbol_ignore = false);
+
+ +
+

Create a text search configuration.

+
1
CREATE TEXT SEARCH CONFIGURATION ngram2 (copy=ngram1) WITH (gram_size = 2, grapsymbol_ignore = false);
+
+ +
+

Create a text search configuration.

+
1
CREATE TEXT SEARCH CONFIGURATION english_1 (parser=default);
+
+ +
+
+

Helpful Links

ALTER TEXT SEARCH CONFIGURATION, DROP TEXT SEARCH CONFIGURATION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0183.html b/docs/dws/dev/dws_06_0183.html new file mode 100644 index 00000000..b736988d --- /dev/null +++ b/docs/dws/dev/dws_06_0183.html @@ -0,0 +1,96 @@ + + +

CREATE TEXT SEARCH DICTIONARY

+

Function

CREATE TEXT SEARCH DICTIONARY creates a full-text search dictionary. A dictionary is used to identify and process specified words during full-text search.

+

Dictionaries are created by using predefined templates (defined in the PG_TS_TEMPLATE system catalog). Five types of dictionaries can be created, Simple, Ispell, Synonym, Thesaurus, and Snowball. Each type of dictionaries is used to handle different tasks.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
CREATE TEXT SEARCH DICTIONARY name (
+    TEMPLATE = template
+    [, option = value [, ... ]]
+);
+
+ +
+
+

Parameter Description

+
+

Examples

Create an Ispell dictionary english_ispell (the dictionary definition file is from the open source dictionary).
1
+2
+3
+4
+5
+6
+7
CREATE TEXT SEARCH DICTIONARY english_ispell (
+    TEMPLATE = ispell,
+    DictFile = english,
+    AffFile = english,
+    StopWords = english,
+    FilePath = 'obs://bucket_name/path accesskey=ak secretkey=sk region=rg' 
+);
+
+ +
+
+

See examples in Configuration Examples.

+
+

Helpful Links

ALTER TEXT SEARCH DICTIONARY, DROP TEXT SEARCH DICTIONARY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0184.html b/docs/dws/dev/dws_06_0184.html new file mode 100644 index 00000000..4d9b829b --- /dev/null +++ b/docs/dws/dev/dws_06_0184.html @@ -0,0 +1,313 @@ + + +

CREATE TRIGGER

+

Function

CREATE TRIGGER creates a trigger. The trigger will be associated with a specified table or view, and will execute a specified function when certain events occur.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE [ CONSTRAINT ] TRIGGER trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] }
+    ON table_name
+    [ FROM referenced_table_name ]
+    { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }
+    [ FOR [ EACH ] { ROW | STATEMENT } ]
+    [ WHEN ( condition ) ]
+    EXECUTE PROCEDURE function_name ( arguments );
+
+ +
+

Events include:

+
1
+2
+3
+4
    INSERT
+    UPDATE [ OF column_name [, ... ] ]
+    DELETE
+    TRUNCATE
+
+ +
+
+

Parameter Description

+
+

Examples

Create a source table and a target table.

+
1
CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT);
+
+ +
+
1
CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT);
+
+ +
+

Create the trigger function tri_insert_func().

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS
+           $$
+           DECLARE
+           BEGIN
+                   INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3);
+                   RETURN NEW;
+           END
+           $$ LANGUAGE PLPGSQL;
+
+ +
+

Create the trigger function tri_update_func().

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS
+           $$
+           DECLARE
+           BEGIN
+                   UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1;
+                   RETURN OLD;
+           END
+           $$ LANGUAGE PLPGSQL;
+
+ +
+

Create the trigger function tri_delete_func().

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE OR REPLACE FUNCTION tri_delete_func() RETURNS TRIGGER AS
+           $$
+           DECLARE
+           BEGIN
+                   DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1;
+                   RETURN OLD;
+           END
+           $$ LANGUAGE PLPGSQL;
+
+ +
+

Create an INSERT trigger.

+
1
+2
+3
+4
CREATE TRIGGER insert_trigger
+           BEFORE INSERT ON test_trigger_src_tbl
+           FOR EACH ROW
+           EXECUTE PROCEDURE tri_insert_func();
+
+ +
+

Create an UPDATE trigger.

+
1
+2
+3
+4
CREATE TRIGGER update_trigger
+           AFTER UPDATE ON test_trigger_src_tbl  
+           FOR EACH ROW
+           EXECUTE PROCEDURE tri_update_func();
+
+ +
+

Create a DELETE trigger.

+
1
+2
+3
+4
CREATE TRIGGER delete_trigger
+           BEFORE DELETE ON test_trigger_src_tbl
+           FOR EACH ROW
+           EXECUTE PROCEDURE tri_delete_func();
+
+ +
+
+

Helpful Links

ALTER TRIGGER, DROP TRIGGER, ALTER TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0185.html b/docs/dws/dev/dws_06_0185.html new file mode 100644 index 00000000..01787695 --- /dev/null +++ b/docs/dws/dev/dws_06_0185.html @@ -0,0 +1,394 @@ + + +

CREATE TYPE

+

Function

CREATE TYPE defines a new data type in the current database. The user who defines a new data type becomes its owner. Types are designed only for row-store tables.

+

Four types of data can be created by using CREATE TYPE: composite data, base data, a shell data, and enumerated data.

+ +
+

Precautions

If a schema name is given, the type will be created in the specified schema. Otherwise, it will be created in the current schema. A type name must be different from the name of any existing type or domain in the same schema. (Since tables have associated data types, a type name must also be different from the name of any existing table in the same schema.)

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
CREATE TYPE name AS
+    ( [ attribute_name data_type [ COLLATE collation ] [, ... ] ] )
+
+CREATE TYPE name (
+    INPUT = input_function,
+    OUTPUT = output_function
+    [ , RECEIVE = receive_function ]
+    [ , SEND = send_function ]
+    [ , TYPMOD_IN =
+type_modifier_input_function ]
+    [ , TYPMOD_OUT =
+type_modifier_output_function ]
+    [ , ANALYZE = analyze_function ]
+    [ , INTERNALLENGTH = { internallength |
+VARIABLE } ]
+    [ , PASSEDBYVALUE ]
+    [ , ALIGNMENT = alignment ]
+    [ , STORAGE = storage ]
+    [ , LIKE = like_type ]
+    [ , CATEGORY = category ]
+    [ , PREFERRED = preferred ]
+    [ , DEFAULT = default ]
+    [ , ELEMENT = element ]
+    [ , DELIMITER = delimiter ]
+    [ , COLLATABLE = collatable ]
+)
+
+CREATE TYPE name
+
+CREATE TYPE name AS ENUM
+    ( [ 'label' [, ... ] ] )
+
+ +
+
+

Parameter Description

Composite types

+ +

Base types

+

When creating a base type, you can place parameters in any order. The input_function and output_function parameters are mandatory, and other parameters are optional.

+ +

Whenever a user-defined type is created, GaussDB(DWS) automatically creates an associated array type whose name consists of the element type name prepended with an underscore (_).

+
+
+

Example

Example 1: Create a composite type, create a table, insert data, and make a query.

+
1
+2
+3
+4
+5
+6
+7
CREATE TYPE compfoo AS (f1 int, f2 text);
+CREATE TABLE t1_compfoo(a int, b compfoo);
+CREATE TABLE t2_compfoo(a int, b compfoo);
+INSERT INTO t1_compfoo values(1,(1,'demo'));
+INSERT INTO t2_compfoo select * from t1_compfoo;
+SELECT (b).f1 FROM t1_compfoo;
+SELECT * FROM t1_compfoo t1 join t2_compfoo t2 on (t1.b).f1=(t1.b).f1;
+
+ +
+

Example 2: Create an enumeration type and use it in the table definition.

+
1
+2
+3
+4
CREATE TYPE bugstatus AS ENUM ('create', 'modify', 'closed');
+CREATE TABLE customer (name text,current_bugstatus bugstatus);
+INSERT INTO customer VALUES ('type','create');
+SELECT * FROM customer WHERE current_bugstatus = 'create';
+
+ +
+

Example 3: Compile a .so file and create the shell type.

+
1
CREATE TYPE complex;
+
+ +
+

This statement creates a placeholder for the type to be created, which can then be referenced when defining its I/O function. Now you can define an I/O function. Note that the function must be declared in NOT FENCED mode when it is created.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
CREATE FUNCTION
+complex_in(cstring)
+    RETURNS complex
+    AS 'filename'
+    LANGUAGE C IMMUTABLE STRICT not fenced;
+
+CREATE FUNCTION
+complex_out(complex)
+    RETURNS cstring
+    AS 'filename'
+    LANGUAGE C IMMUTABLE STRICT not fenced;
+
+CREATE FUNCTION
+complex_recv(internal)
+    RETURNS complex
+    AS 'filename'
+    LANGUAGE C IMMUTABLE STRICT not fenced;
+
+CREATE FUNCTION
+complex_send(complex)
+    RETURNS bytea
+    AS 'filename'
+    LANGUAGE C IMMUTABLE STRICT not fenced;
+
+ +
+

Finally, provide a complete definition of the data type.

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE TYPE complex (
+internallength = 16,
+input = complex_in,
+output = complex_out,
+receive = complex_recv,
+send = complex_send,
+alignment = double
+);
+
+ +
+

+

The C functions corresponding to the input, output, receive, and send functions are defined as follows:

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
-- Define a structure body Complex:
+typedef struct Complex {
+    double      x;
+    double      y;
+} Complex;
+
+-- Define an input function:
+PG_FUNCTION_INFO_V1(complex_in);
+
+Datum
+complex_in(PG_FUNCTION_ARGS)
+{
+    char       *str = PG_GETARG_CSTRING(0);
+    double      x,
+                y;
+    Complex    *result;
+
+    if (sscanf(str, " ( %lf , %lf )", &x, &y) != 2)
+        ereport(ERROR,
+                (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+                 errmsg("invalid input syntax for complex: \"%s\"",
+                        str)));
+
+    result = (Complex *) palloc(sizeof(Complex));
+    result->x = x;
+    result->y = y;
+    PG_RETURN_POINTER(result);
+}
+
+-- Define an output function:
+PG_FUNCTION_INFO_V1(complex_out);
+
+Datum
+complex_out(PG_FUNCTION_ARGS)
+{
+        Complex    *complex = (Complex *) PG_GETARG_POINTER(0);
+        char       *result;
+
+        result = (char *) palloc(100);
+        snprintf(result, 100, "(%g,%g)", complex->x, complex->y);
+        PG_RETURN_CSTRING(result);
+}
+
+-- Define a receive function:
+PG_FUNCTION_INFO_V1(complex_recv);
+
+Datum
+complex_recv(PG_FUNCTION_ARGS)
+{
+    StringInfo  buf = (StringInfo) PG_GETARG_POINTER(0);
+    Complex    *result;
+
+    result = (Complex *) palloc(sizeof(Complex));
+    result->x = pq_getmsgfloat8(buf);
+    result->y = pq_getmsgfloat8(buf);
+    PG_RETURN_POINTER(result);
+}
+
+-- Define a send function:
+PG_FUNCTION_INFO_V1(complex_send);
+
+Datum
+complex_send(PG_FUNCTION_ARGS)
+{
+    Complex    *complex = (Complex *) PG_GETARG_POINTER(0);
+    StringInfoData buf;
+
+    pq_begintypsend(&buf);
+    pq_sendfloat8(&buf, complex->x);
+    pq_sendfloat8(&buf, complex->y);
+    PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
+}
+
+ +
+
+

Helpful Links

ALTER TYPE, DROP TYPE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0186.html b/docs/dws/dev/dws_06_0186.html new file mode 100644 index 00000000..0c220186 --- /dev/null +++ b/docs/dws/dev/dws_06_0186.html @@ -0,0 +1,111 @@ + + +

CREATE USER

+

Function

CREATE USER creates a user.

+
+

Important Notes

+
+

Syntax

1
CREATE USER user_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' | DISABLE };
+
+ +
+

The option clause is used for setting information including permissions and attributes.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
{SYSADMIN | NOSYSADMIN}
+    | {AUDITADMIN | NOAUDITADMIN}
+    | {CREATEDB | NOCREATEDB}
+    | {USEFT | NOUSEFT}
+    | {CREATEROLE | NOCREATEROLE}
+    | {INHERIT | NOINHERIT}
+    | {LOGIN | NOLOGIN}
+    | {REPLICATION | NOREPLICATION}
+    | {INDEPENDENT | NOINDEPENDENT}
+    | {VCADMIN | NOVCADMIN}
+    | CONNECTION LIMIT connlimit
+    | VALID BEGIN 'timestamp'
+    | VALID UNTIL 'timestamp'
+    | RESOURCE POOL 'respool'
+    | USER GROUP 'groupuser'
+    | PERM SPACE 'spacelimit'
+    | TEMP SPACE 'tmpspacelimit'
+    | SPILL SPACE 'spillspacelimit'
+    | NODE GROUP logic_cluster_name
+    | IN ROLE role_name [, ...]
+    | IN GROUP role_name [, ...]
+    | ROLE role_name [, ...]
+    | ADMIN role_name [, ...]
+    | USER role_name [, ...]
+    | SYSID uid
+    | DEFAULT TABLESPACE tablespace_name
+    | PROFILE DEFAULT
+    | PROFILE profile_name
+    | PGUSER
+    | AUTHINFO 'authinfo'
+    | PASSWORD EXPIRATOIN period
+
+ +
+
+

Parameters

+

For details on other parameters, see CREATE ROLE Parameter Description.

+
+

Example

Create user jim.

+
1
CREATE USER jim PASSWORD '{password}';
+
+ +
+

The following statements are equivalent to the above.

+
1
CREATE USER kim IDENTIFIED BY '{password}';
+
+ +
+

For a user having the Create Database permission, add the CREATEDB keyword.

+
1
CREATE USER dim CREATEDB PASSWORD '{password}';
+
+ +
+
+

Links

ALTER USER, CREATE ROLE, DROP USER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0187.html b/docs/dws/dev/dws_06_0187.html new file mode 100644 index 00000000..a784d6d6 --- /dev/null +++ b/docs/dws/dev/dws_06_0187.html @@ -0,0 +1,77 @@ + + +

CREATE VIEW

+

Function

CREATE VIEW creates a view. A view is a virtual table, not a base table. A database only stores the definition of a view and does not store its data. The data is still stored in the original base table. If data in the base table changes, the data in the view changes accordingly. In this sense, a view is like a window through which users can know their interested data and data changes in the database.

+
+

Precautions

None

+
+

Syntax

1
+2
+3
CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW view_name [ ( column_name [, ...] ) ]
+    [ WITH ( {view_option_name [= view_option_value]} [, ... ] ) ]
+    AS query;
+
+ +
+
  • You can use WITH (security_barriers) to create a relatively secure view. This prevents attackers from printing hidden base table data by using the RAISE statement of low-cost functions.
  • When the view_independent GUC parameter is enabled, columns can be deleted from common views. Note that if a column-level constraint exists, the corresponding column cannot be deleted.
+
+
+

Parameter Description

+
+

Examples

Create a view consisting of columns whose spcname is pg_default.

+
1
+2
CREATE VIEW myView AS
+    SELECT * FROM pg_tablespace WHERE spcname = 'pg_default';
+
+ +
+

Run the following command to redefine the existing view myView and create a view consisting of columns whose spcname is pg_global:

+
1
+2
CREATE OR REPLACE VIEW myView AS
+    SELECT * FROM pg_tablespace WHERE spcname = 'pg_global';
+
+ +
+

Create a view consisting of rows with c_customer_sk smaller than 150.

+
1
+2
+3
CREATE VIEW tpcds.customer_details_view_v1 AS
+    SELECT * FROM tpcds.customer
+    WHERE c_customer_sk < 150;
+
+ +
+
+

Updatable Views

After the enable_view_update parameter is enabled, simple views that meet all the following conditions can be updated using the INSERT, UPDATE, and DELETE statements:

+ +

If the definition of the updatable view contains a WHERE condition, the condition restricts the UPDATE and DELETE statements from modifying rows on the base table. If the WHERE condition is not met after the UPDATE statement is executed, the updated rows cannot be queried in the view. Similarly, If the WHERE condition is not met after the INSERT statement is executed, the inserted data cannot be queried in the view. To insert, update, or delete data in a view, you must have the corresponding permission on the view and tables.

+
+

Helpful Links

ALTER VIEW and DROP VIEW

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0188.html b/docs/dws/dev/dws_06_0188.html new file mode 100644 index 00000000..ab1e609b --- /dev/null +++ b/docs/dws/dev/dws_06_0188.html @@ -0,0 +1,97 @@ + + +

CURSOR

+

Function

CURSOR defines a cursor. This command retrieves few rows of data in a query.

+

To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context regions. With cursors, stored procedures can control alterations in context regions.

+
+

Precautions

+
+

Syntax

1
+2
+3
CURSOR cursor_name
+    [ BINARY ]  [ NO SCROLL ]  [ { WITH | WITHOUT } HOLD ]
+    FOR query;
+
+ +
+
+

Parameter Description

+
+

Examples

Set up the cursor1 cursor.

+
1
CURSOR cursor1 FOR SELECT * FROM tpcds.customer_address ORDER BY 1;
+
+ +
+

Set up the cursor cursor2.

+
1
CURSOR cursor2 FOR VALUES(1,2),(0,3) ORDER BY 1;
+
+ +
+

An example of using the WITH HOLD cursor is as follows:

+

Start a transaction.

+
1
START TRANSACTION;
+
+ +
+

Set up a WITH HOLD cursor.

+
1
DECLARE cursor3 CURSOR WITH HOLD FOR SELECT * FROM tpcds.customer_address ORDER BY 1;
+
+ +
+
+

Fetch the first two rows from cursor3.

+
1
+2
+3
+4
+5
+6
FETCH FORWARD 2 FROM cursor3;
+ ca_address_sk |  ca_address_id   | ca_street_number |   ca_street_name   | ca_street_type  | ca_suite_number |     ca_city     |    ca_county    | ca_state |   ca_zip   |  ca_country   | ca_gmt_offset |   ca_location_type   
+---------------+------------------+------------------+--------------------+-----------------+-----------------+-----------------+-----------------+----------+------------+---------------+---------------+----------------------
+             1 | AAAAAAAABAAAAAAA | 18               | Jackson            | Parkway         | Suite 280       | Fairfield       | Maricopa County | AZ       | 86192      | United States |         -7.00 | condo               
+             2 | AAAAAAAACAAAAAAA | 362              | Washington 6th     | RD              | Suite 80        | Fairview        | Taos County     | NM       | 85709      | United States |         -7.00 | condo               
+(2 rows)
+
+ +
+

End the transaction.

+
1
END;
+
+ +
+

Fetch the next row from cursor3.

+
1
+2
+3
+4
+5
FETCH FORWARD 1 FROM cursor3;
+ ca_address_sk |  ca_address_id   | ca_street_number |   ca_street_name   | ca_street_type  | ca_suite_number |     ca_city     |    ca_county    | ca_state |   ca_zip   |  ca_country   | ca_gmt_offset |   ca_location_type   
+---------------+------------------+------------------+--------------------+-----------------+-----------------+-----------------+-----------------+----------+------------+---------------+---------------+----------------------
+             3 | AAAAAAAADAAAAAAA | 585              | Dogwood Washington | Circle          | Suite Q         | Pleasant Valley | York County     | PA       | 12477      | United States |         -5.00 | single family       
+(1 row)
+
+ +
+

Close a cursor.

+
1
CLOSE cursor3;
+
+ +
+

Helpful Links

FETCH

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0189.html b/docs/dws/dev/dws_06_0189.html new file mode 100644 index 00000000..557327c1 --- /dev/null +++ b/docs/dws/dev/dws_06_0189.html @@ -0,0 +1,32 @@ + + +

DROP DATABASE

+

Function

DROP DATABASE deletes a database.

+
+

Precautions

+
+

Syntax

1
DROP DATABASE [ IF EXISTS ] database_name;
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the database named music.

+
1
DROP DATABASE music;
+
+ +
+
+

Links

CREATE DATABASE, ALTER DATABASE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0192.html b/docs/dws/dev/dws_06_0192.html new file mode 100644 index 00000000..4b168ebb --- /dev/null +++ b/docs/dws/dev/dws_06_0192.html @@ -0,0 +1,35 @@ + + +

DROP FOREIGN TABLE

+

Function

DROP FOREIGN TABLE deletes a specified foreign table.

+
+

Precautions

DROP FOREIGN TABLE forcibly deletes a specified table. After a table is deleted, any indexes that exist for the table will be deleted. The functions and stored procedures used in this table cannot be run.

+
+

Syntax

1
+2
DROP FOREIGN TABLE [ IF EXISTS ] 
+    table_name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the foreign table named customer_ft.

+
1
DROP FOREIGN TABLE customer_ft;
+
+ +
+
+

Helpful Links

ALTER FOREIGN TABLE (for GDS), ALTER FOREIGN TABLE (for HDFS or OBS), CREATE FOREIGN TABLE (for GDS Import and Export), CREATE FOREIGN TABLE (SQL on OBS or Hadoop)

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0193.html b/docs/dws/dev/dws_06_0193.html new file mode 100644 index 00000000..cd865cac --- /dev/null +++ b/docs/dws/dev/dws_06_0193.html @@ -0,0 +1,38 @@ + + +

DROP FUNCTION

+

Function

DROP FUNCTION deletes an existing function.

+
+

Precautions

If a function involves operations on temporary tables, the function cannot be deleted by running DROP FUNCTION.

+
+

Syntax

1
+2
DROP FUNCTION [ IF EXISTS ] function_name 
+[ ( [ {[ argmode ] [ argname ] argtype} [, ...] ] ) [ CASCADE | RESTRICT ] ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete a function named add_two_number.

+
1
DROP FUNCTION add_two_number;
+
+ +
+
+

Helpful Links

ALTER FUNCTION, CREATE FUNCTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0194.html b/docs/dws/dev/dws_06_0194.html new file mode 100644 index 00000000..3591f2c6 --- /dev/null +++ b/docs/dws/dev/dws_06_0194.html @@ -0,0 +1,24 @@ + + +

DROP GROUP

+

Function

DROP GROUP deletes a user group.

+

DROP GROUP is the alias for DROP ROLE.

+
+

Precautions

DROP GROUP is the internal interface encapsulated in the gs_om tool. You are not advised to use this interface, because doing so affects the cluster.

+
+

Syntax

1
DROP GROUP [ IF EXISTS ] group_name [, ...];
+
+ +
+
+

Parameter Description

See Examples in DROP ROLE.

+
+

Helpful Links

CREATE GROUP, ALTER GROUP, DROP ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0195.html b/docs/dws/dev/dws_06_0195.html new file mode 100644 index 00000000..66a3bf1e --- /dev/null +++ b/docs/dws/dev/dws_06_0195.html @@ -0,0 +1,35 @@ + + +

DROP INDEX

+

Function

DROP INDEX deletes an index.

+
+

Precautions

Only the owner of an index or a system administrator can run DROP INDEX command.

+
+

Syntax

1
+2
DROP INDEX [ IF EXISTS ] 
+    index_name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the ds_ship_mode_t1_index2 index.

+
1
DROP INDEX tpcds.ds_ship_mode_t1_index2;
+
+ +
+
+

Helpful Links

ALTER INDEX, CREATE INDEX

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0198.html b/docs/dws/dev/dws_06_0198.html new file mode 100644 index 00000000..87a6905e --- /dev/null +++ b/docs/dws/dev/dws_06_0198.html @@ -0,0 +1,23 @@ + + +

DROP OWNED

+

Function

DROP OWNED deletes the database objects of a database role.

+
+

Important Notes

The role's permissions on all the database objects in the current database and shared objects (databases and tablespaces) are revoked.

+
+

Syntax

1
DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0199.html b/docs/dws/dev/dws_06_0199.html new file mode 100644 index 00000000..227c44c4 --- /dev/null +++ b/docs/dws/dev/dws_06_0199.html @@ -0,0 +1,32 @@ + + +

DROP REDACTION POLICY

+

Function

DROP REDACTION POLICY deletes a data redaction policy applied to a specified table.

+
+

Precautions

Only the table owner has the permission to delete a data redaction policy.

+
+

Syntax

1
DROP REDACTION POLICY [ IF EXISTS ] policy_name ON table_name;
+
+ +
+
+

Parameter Description

+
+

Examples

Delete a data masking policy.

+
1
DROP REDACTION POLICY mask_emp ON emp;
+
+ +
+
+

Helpful Links

ALTER REDACTION POLICY, CREATE REDACTION POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0200.html b/docs/dws/dev/dws_06_0200.html new file mode 100644 index 00000000..1fa1f41d --- /dev/null +++ b/docs/dws/dev/dws_06_0200.html @@ -0,0 +1,31 @@ + + +

DROP ROW LEVEL SECURITY POLICY

+

Function

DROP ROW LEVEL SECURITY POLICY deletes a row-level access control policy from a table.

+
+

Precautions

Only the table owner or administrators can delete a row-level access control policy from the table.

+
+

Syntax

1
DROP [ ROW LEVEL SECURITY ] POLICY [ IF EXISTS ] policy_name ON table_name [ CASCADE | RESTRICT ]
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the row-level access control policy.

+
DROP ROW LEVEL SECURITY POLICY all_data_rls ON all_data;
+
+

Helpful Links

ALTER ROW LEVEL SECURITY POLICY, CREATE ROW LEVEL SECURITY POLICY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0201.html b/docs/dws/dev/dws_06_0201.html new file mode 100644 index 00000000..af5f630f --- /dev/null +++ b/docs/dws/dev/dws_06_0201.html @@ -0,0 +1,32 @@ + + +

DROP PROCEDURE

+

Function

DROP PROCEDURE deletes an existing stored procedure.

+
+

Precautions

None.

+
+

Syntax

1
DROP PROCEDURE [ IF EXISTS  ] procedure_name ;
+
+ +
+
+

Parameter Description

+
+

Examples

Delete a stored procedure.

+
1
DROP PROCEDURE prc_add;
+
+ +
+
+

Helpful Links

CREATE PROCEDURE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0202.html b/docs/dws/dev/dws_06_0202.html new file mode 100644 index 00000000..73c0381b --- /dev/null +++ b/docs/dws/dev/dws_06_0202.html @@ -0,0 +1,34 @@ + + +

DROP RESOURCE POOL

+

Function

DROP RESOURCE POOL deletes a resource pool.

+

The resource pool cannot be deleted if it is associated with a role.

+
+
+

Precautions

The user must have the DROP permission in order to delete a resource pool.

+
+

Syntax

1
DROP RESOURCE POOL [ IF EXISTS ] pool_name;
+
+ +
+
+

Parameter Description

+
+ +

A resource pool can be independently deleted only when it is not associated with any users.

+
+

Example

Delete a resource pool.

+
DROP RESOURCE POOL pool1;
+
+

Links

ALTER RESOURCE POOL, CREATE RESOURCE POOL

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0203.html b/docs/dws/dev/dws_06_0203.html new file mode 100644 index 00000000..fb0205ec --- /dev/null +++ b/docs/dws/dev/dws_06_0203.html @@ -0,0 +1,32 @@ + + +

DROP ROLE

+

Function

DROP ROLE deletes a specified role.

+
+

Precautions

If a "role is being used by other users" error is displayed when you run DROP ROLE, it might be that threads cannot respond to signals in a timely manner during the CLEAN CONNECTION process. As a result, connections are not completely cleared. In this case, you need to run CLEAN CONNECTION again.

+
+

Syntax

1
DROP ROLE [ IF EXISTS ] role_name [, ...];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the manager role.

+
1
DROP ROLE manager;
+
+ +
+
+

Helpful Links

CREATE ROLE, ALTER ROLE, SET ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0204.html b/docs/dws/dev/dws_06_0204.html new file mode 100644 index 00000000..5f424412 --- /dev/null +++ b/docs/dws/dev/dws_06_0204.html @@ -0,0 +1,36 @@ + + +

DROP SCHEMA

+

Function

DROP SCHEMA deletes a schema in a database.

+
+

Precautions

Only a schema owner or a system administrator can run the DROP SCHEMA command.

+ +
+

Syntax

1
DROP SCHEMA [ IF EXISTS ] schema_name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+

Do not delete the schemas with the beginning of pg_temp or pg_toast_temp. They are internal system schemas, and deleting them may cause unexpected errors.

+
+

A user cannot delete the schema in use. To delete the schema in use, switch to another schema.

+
+
+

Example

Delete the ds_new schema.

+
DROP SCHEMA ds_new;
+

+
+

Links

ALTER SCHEMA, CREATE SCHEMA

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0205.html b/docs/dws/dev/dws_06_0205.html new file mode 100644 index 00000000..0771ae52 --- /dev/null +++ b/docs/dws/dev/dws_06_0205.html @@ -0,0 +1,33 @@ + + +

DROP SEQUENCE

+

Function

DROP SEQUENCE deletes a sequence from the current database.

+
+

Precautions

Only a sequence owner or a system administrator can delete a sequence.

+
+

Syntax

1
DROP SEQUENCE [ IF EXISTS ] {[schema.]sequence_name} [ , ... ] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the sequence.

+
1
DROP SEQUENCE serial;
+
+ +
+
+

Helpful Links

CREATE SEQUENCE ALTER SEQUENCE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0206.html b/docs/dws/dev/dws_06_0206.html new file mode 100644 index 00000000..26db102d --- /dev/null +++ b/docs/dws/dev/dws_06_0206.html @@ -0,0 +1,32 @@ + + +

DROP SERVER

+

Function

DROP SERVER deletes an existing data server.

+
+

Precautions

Only the server owner can delete a server.

+
+

Syntax

1
DROP SERVER [ IF EXISTS ] server_name [ {CASCADE | RESTRICT} ] ;
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the hdfs_server server.

+
1
DROP SERVER hdfs_server;
+
+ +
+
+

Helpful Links

CREATE SERVER, ALTER SERVER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0207.html b/docs/dws/dev/dws_06_0207.html new file mode 100644 index 00000000..9f41da07 --- /dev/null +++ b/docs/dws/dev/dws_06_0207.html @@ -0,0 +1,34 @@ + + +

DROP SYNONYM

+

Function

DROP SYNONYM is used to delete a synonym object.

+
+

Precautions

Only a synonym owner or a system administrator can run the DROP SYNONYM command.

+
+

Syntax

1
DROP SYNONYM [ IF EXISTS ] synonym_name [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete a synonym.

+
1
+2
DROP SYNONYM t1;
+DROP SCHEMA ot CASCADE;
+
+ +
+
+

Helpful Links

ALTER SYNONYM and CREATE SYNONYM

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0208.html b/docs/dws/dev/dws_06_0208.html new file mode 100644 index 00000000..d9c81c40 --- /dev/null +++ b/docs/dws/dev/dws_06_0208.html @@ -0,0 +1,35 @@ + + +

DROP TABLE

+

Function

DROP TABLE deletes a specified table.

+
+

Precautions

+
+

Syntax

1
+2
DROP TABLE [ IF EXISTS ] 
+    { [schema.]table_name } [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Example

Delete the warehouse_t1 table.

+
1
DROP TABLE tpcds.warehouse_t1;
+
+ +
+
+

Links

ALTER TABLE, CREATE TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0210.html b/docs/dws/dev/dws_06_0210.html new file mode 100644 index 00000000..dea27d04 --- /dev/null +++ b/docs/dws/dev/dws_06_0210.html @@ -0,0 +1,33 @@ + + +

DROP TEXT SEARCH CONFIGURATION

+

Function

DROP TEXT SEARCH CONFIGURATION deletes an existing text search configuration.

+
+

Precautions

To run the DROP TEXT SEARCH CONFIGURATION command, you must be the owner of the text search configuration.

+
+

Syntax

1
DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the text search configuration ngram1.

+
1
DROP TEXT SEARCH CONFIGURATION ngram1;
+
+ +
+
+

Helpful Links

ALTER TEXT SEARCH CONFIGURATION, CREATE TEXT SEARCH CONFIGURATION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0211.html b/docs/dws/dev/dws_06_0211.html new file mode 100644 index 00000000..b1a2d219 --- /dev/null +++ b/docs/dws/dev/dws_06_0211.html @@ -0,0 +1,35 @@ + + +

DROP TEXT SEARCH DICTIONARY

+

Function

DROP TEXT SEARCH DICTIONARY deletes a full-text retrieval dictionary.

+
+

Precautions

+
+

Syntax

1
DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ]
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the english dictionary.

+
1
DROP TEXT SEARCH DICTIONARY english;
+
+ +
+
+

Helpful Links

ALTER TEXT SEARCH DICTIONARY, CREATE TEXT SEARCH DICTIONARY

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0212.html b/docs/dws/dev/dws_06_0212.html new file mode 100644 index 00000000..f96e6160 --- /dev/null +++ b/docs/dws/dev/dws_06_0212.html @@ -0,0 +1,35 @@ + + +

DROP TRIGGER

+

Function

DROP TRIGGER deletes a trigger.

+
+

Precautions

Only the owner of a trigger and system administrators can run the DROP TRIGGER statement.

+
+

Syntax

1
DROP TRIGGER [ IF EXISTS ] trigger_name ON table_name [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the trigger insert_trigger.

+
1
DROP TRIGGER insert_trigger ON test_trigger_src_tbl;
+
+ +
+
+

Helpful Links

CREATE TRIGGER, ALTER TRIGGER, ALTER TABLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0213.html b/docs/dws/dev/dws_06_0213.html new file mode 100644 index 00000000..dfd5a02e --- /dev/null +++ b/docs/dws/dev/dws_06_0213.html @@ -0,0 +1,32 @@ + + +

DROP TYPE

+

Function

DROP TYPE deletes a user-defined data type. Only the type owner has permission to run this statement.

+
+

Syntax

1
DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the compfoo type.

+
1
DROP TYPE compfoo cascade;
+
+ +
+
+

Helpful Links

ALTER TYPE, CREATE TYPE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0214.html b/docs/dws/dev/dws_06_0214.html new file mode 100644 index 00000000..56b03cef --- /dev/null +++ b/docs/dws/dev/dws_06_0214.html @@ -0,0 +1,36 @@ + + +

DROP USER

+

Function

Deleting a user will also delete the schema having the same name as the user.

+
+

Precautions

+
+

Syntax

1
DROP USER [ IF EXISTS ] user_name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Example

Delete user jim.

+
1
DROP USER jim CASCADE;
+
+ +
+
+

Links

ALTER USER, CREATE USER

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0215.html b/docs/dws/dev/dws_06_0215.html new file mode 100644 index 00000000..7ab8319b --- /dev/null +++ b/docs/dws/dev/dws_06_0215.html @@ -0,0 +1,38 @@ + + +

DROP VIEW

+

Function

DROP VIEW forcibly deletes an existing view in a database.

+
+

Precautions

Only a view owner or a system administrator can run DROP VIEW command.

+
+

Syntax

1
DROP VIEW [ IF EXISTS ] view_name [, ...] [ CASCADE | RESTRICT ];
+
+ +
+
+

Parameter Description

+
+

Examples

Delete the myView view.

+
1
DROP VIEW myView;
+
+ +
+

Delete the customer_details_view_v2 view.

+
1
DROP VIEW public.customer_details_view_v2;
+
+ +
+
+

Helpful Links

ALTER VIEW, CREATE VIEW

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0216.html b/docs/dws/dev/dws_06_0216.html new file mode 100644 index 00000000..4fd5a3d0 --- /dev/null +++ b/docs/dws/dev/dws_06_0216.html @@ -0,0 +1,127 @@ + + +

FETCH

+

Function

FETCH retrieves data using a previously-created cursor.

+

A cursor has an associated position, which is used by FETCH. The cursor position can be before the first row of the query result, on any particular row of the result, or after the last row of the result.

+ +
+

Precautions

+
+

Syntax

1
FETCH [ direction { FROM | IN } ] cursor_name;
+
+ +
+

The direction clause specifies optional parameters.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
NEXT
+   | PRIOR
+   | FIRST
+   | LAST
+   | ABSOLUTE count
+   | RELATIVE count
+   | count
+   | ALL
+   | FORWARD
+   | FORWARD count
+   | FORWARD ALL
+   | BACKWARD
+   | BACKWARD count
+   | BACKWARD ALL
+
+ +
+
+

Parameter Description

+
+

Examples

Example 1: Run the SELECT statement to read a table using a cursor.

+

Set up the cursor1 cursor.

+
1
CURSOR cursor1 FOR SELECT * FROM tpcds.customer_address ORDER BY 1;
+
+ +
+

Fetch the first three rows from cursor1.

+
1
+2
+3
+4
+5
+6
+7
FETCH FORWARD 3 FROM cursor1;
+ ca_address_sk |  ca_address_id   | ca_street_number |   ca_street_name   | ca_street_type  | ca_suite_number |     ca_city     |    ca_county    | ca_state |   ca_zip   |  ca_country   | ca_gmt_offset |   ca_location_type   
+---------------+------------------+------------------+--------------------+-----------------+-----------------+-----------------+-----------------+----------+------------+---------------+---------------+----------------------
+             1 | AAAAAAAABAAAAAAA | 18               | Jackson            | Parkway         | Suite 280       | Fairfield       | Maricopa County | AZ       | 86192      | United States |         -7.00 | condo               
+             2 | AAAAAAAACAAAAAAA | 362              | Washington 6th     | RD              | Suite 80        | Fairview        | Taos County     | NM       | 85709      | United States |         -7.00 | condo               
+             3 | AAAAAAAADAAAAAAA | 585              | Dogwood Washington | Circle          | Suite Q         | Pleasant Valley | York County     | PA       | 12477      | United States |         -5.00 | single family       
+(3 rows)
+
+ +
+

Example 2: Use a cursor to read the content in the VALUES clause.

+

Set up the cursor cursor2.

+
1
CURSOR cursor2 FOR VALUES(1,2),(0,3) ORDER BY 1;
+
+ +
+

Fetch the first two rows from cursor2.

+
1
+2
+3
+4
+5
+6
FETCH FORWARD 2 FROM cursor2;
+column1 | column2
+---------+---------
+0 |       3
+1 |       2
+(2 rows)
+
+ +
+
+

Helpful Links

CLOSE, MOVE, CURSOR

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0217.html b/docs/dws/dev/dws_06_0217.html new file mode 100644 index 00000000..02d8d692 --- /dev/null +++ b/docs/dws/dev/dws_06_0217.html @@ -0,0 +1,62 @@ + + +

MOVE

+

Function

MOVE repositions a cursor without retrieving any data. MOVE works exactly like the FETCH command, except it only repositions the cursor and does not return rows.

+
+

Precautions

None

+
+

Syntax

1
MOVE [ direction [ FROM | IN ] ] cursor_name;
+
+ +
+

The direction clause specifies optional parameters.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
NEXT
+   | PRIOR
+   | FIRST
+   | LAST
+   | ABSOLUTE count
+   | RELATIVE count
+   | count
+   | ALL
+   | FORWARD
+   | FORWARD count
+   | FORWARD ALL
+   | BACKWARD
+   | BACKWARD count
+   | BACKWARD ALL
+
+ +
+
+

Parameter Description

MOVE command parameters are the same as FETCH command parameters. For details, see Parameter Description in FETCH.

+

On successful completion, a MOVE command returns a command tag of the form MOVE count. The count is the number of rows that a FETCH command with the same parameters would have returned (possibly zero).

+
+
+

Examples

Skip the first three rows of cursor1.

+
1
MOVE FORWARD 3 FROM cursor1;
+
+ +
+
+

Helpful Links

CLOSE, FETCH, CURSOR

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0218.html b/docs/dws/dev/dws_06_0218.html new file mode 100644 index 00000000..a151453e --- /dev/null +++ b/docs/dws/dev/dws_06_0218.html @@ -0,0 +1,56 @@ + + +

REINDEX

+

Function

REINDEX rebuilds an index using the data stored in the index's table, replacing the old copy of the index.

+

There are several scenarios in which REINDEX can be used:

+ +
+

Precautions

Index reconstruction of the REINDEX DATABASE or SYSTEM type cannot be performed in transaction blocks.

+
+

Syntax

+ +
+

Parameter Description

+

Index reconstruction of the REINDEX DATABASE or SYSTEM type cannot be performed in transaction blocks.

+
+
+

Examples

Rebuild a single index.

+
1
REINDEX INDEX tpcds.tpcds_customer_index1;
+
+ +
+

Rebuild all indexes on the tpcds.customer_t1 table.

+
1
REINDEX TABLE tpcds.customer_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0219.html b/docs/dws/dev/dws_06_0219.html new file mode 100644 index 00000000..56562dbd --- /dev/null +++ b/docs/dws/dev/dws_06_0219.html @@ -0,0 +1,42 @@ + + +

RESET

+

Function

RESET restores run-time parameters to their default values. The default values are parameter default values complied in the postgresql.conf configuration file.

+

RESET is an alternative spelling for:

+

SET configuration_parameter TO DEFAULT

+
+

Precautions

RESET and SET have the same transaction behavior. Their impact will be rolled back.

+
+

Syntax

RESET {configuration_parameter | CURRENT_SCHEMA | TIME ZONE | TRANSACTION ISOLATION LEVEL | SESSION AUTHORIZATION | ALL };
+
+

Parameter Description

+
+

Examples

Reset timezone to the default value.

+
1
RESET timezone;
+
+ +
+

Set all parameters to their default values.

+
1
RESET ALL;
+
+ +
+
+

Helpful Links

SET, SHOW

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0220.html b/docs/dws/dev/dws_06_0220.html new file mode 100644 index 00000000..dea2cd72 --- /dev/null +++ b/docs/dws/dev/dws_06_0220.html @@ -0,0 +1,78 @@ + + +

SET

+

Function

SET modifies a run-time parameter.

+
+

Precautions

Most run-time parameters can be modified by executing SET. Some parameters cannot be modified after a server or session starts.

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Configure the search path of the tpcds schema.

+
1
SET search_path TO tpcds, public;
+
+ +
+

Set the date style to the traditional POSTGRES style (date placed before month).

+
1
SET datestyle TO postgres;
+
+ +
+
+

Helpful Links

RESET, SHOW

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0221.html b/docs/dws/dev/dws_06_0221.html new file mode 100644 index 00000000..638ce4b1 --- /dev/null +++ b/docs/dws/dev/dws_06_0221.html @@ -0,0 +1,37 @@ + + +

SET CONSTRAINTS

+

Function

SET CONSTRAINTS sets the behavior of constraint checking within the current transaction.

+

IMMEDIATE constraints are checked at the end of each statement. DEFERRED constraints are not checked until transaction commit. Each constraint has its own IMMEDIATE or DEFERRED mode.

+

Upon creation, a constraint is given one of three characteristics DEFERRABLE INITIALLY DEFERRED, DEFERRABLE INITIALLY IMMEDIATE, or NOT DEFERRABLE. The third class is always IMMEDIATE and is not affected by the SET CONSTRAINTS command. The first two classes start every transaction in specified modes, but its behaviors can be changed within a transaction by SET CONSTRAINTS.

+

SET CONSTRAINTS with a list of constraint names changes the mode of just those constraints (which must all be deferrable). If multiple constraints match a name, the name is affected by all of these constraints. SET CONSTRAINTS ALL changes the modes of all deferrable constraints.

+

When SET CONSTRAINTS changes the mode of a constraint from DEFERRED to IMMEDIATE, the new mode takes effect retroactively: any outstanding data modifications that would have been checked at the end of the transaction are instead checked during the execution of the SET CONSTRAINTS command. If any such constraint is violated, the SET CONSTRAINTS fails (and does not change the constraint mode). Therefore, SET CONSTRAINTS can be used to force checking of constraints to occur at a specific point in a transaction.

+

Only foreign key constraints are affected by this setting. Check and unique constraints are always checked immediately when a row is inserted or modified.

+
+

Precautions

SET CONSTRAINTS sets the behavior of constraint checking only within the current transaction. Therefore, if you execute this command outside of a transaction block (START TRANSACTION/COMMIT pair), it will not appear to have any effect.

+
+

Syntax

1
SET CONSTRAINTS  { ALL  |  { name  }  [, ...]  }  { DEFERRED  | IMMEDIATE  } ;
+
+ +
+
+

Parameter Description

+
+

Examples

Set that constraints are checked when a transaction is committed.

+
1
SET CONSTRAINTS ALL DEFERRED;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0222.html b/docs/dws/dev/dws_06_0222.html new file mode 100644 index 00000000..c234a489 --- /dev/null +++ b/docs/dws/dev/dws_06_0222.html @@ -0,0 +1,49 @@ + + +

SET ROLE

+

Function

SET ROLE sets the current user identifier of the current session.

+
+

Precautions

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Set the current user to paul.

+
1
SET ROLE paul PASSWORD '{password}';
+
+ +
+

View the current session user and the current user.

+
1
SELECT SESSION_USER, CURRENT_USER;
+
+ +
+

Reset the current user.

+
1
RESET role;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0223.html b/docs/dws/dev/dws_06_0223.html new file mode 100644 index 00000000..ce48e097 --- /dev/null +++ b/docs/dws/dev/dws_06_0223.html @@ -0,0 +1,53 @@ + + +

SET SESSION AUTHORIZATION

+

Function

SET SESSION AUTHORIZATION sets the session user identifier and the current user identifier of the current SQL session to a specified user.

+
+

Precautions

The session identifier can be changed only when the initial session user has the system administrator rights. Otherwise, the system supports the command only when the authenticated user name is specified.

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Set the current user to paul.

+
1
SET SESSION AUTHORIZATION paul password '{password}';
+
+ +
+

View the current session user and the current user.

+
1
SELECT SESSION_USER, CURRENT_USER;
+
+ +
+

Reset the current user.

+
1
RESET SESSION AUTHORIZATION;
+
+ +
+
+

Helpful Links

SET ROLE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0224.html b/docs/dws/dev/dws_06_0224.html new file mode 100644 index 00000000..c6580ad6 --- /dev/null +++ b/docs/dws/dev/dws_06_0224.html @@ -0,0 +1,55 @@ + + +

SHOW

+

Function

SHOW shows the current value of a run-time parameter. You can use the SET statement to set these parameters.

+
+

Precautions

Some parameters that can be viewed by SHOW are read-only. You can view but cannot modify their values.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
SHOW 
+  { 
+    configuration_parameter | 
+    CURRENT_SCHEMA | 
+    TIME ZONE | 
+    TRANSACTION ISOLATION LEVEL | 
+    SESSION AUTHORIZATION | 
+    ALL 
+  };
+
+ +
+
+

Parameter Description

See Parameter Description in RESET.

+
+

Examples

Show the value of timezone.

+
1
SHOW timezone;
+
+ +
+

Show the current setting of the DateStyle parameter.

+
1
SHOW DateStyle;
+
+ +
+

Show the current setting of all parameters.

+
1
SHOW ALL;
+
+ +
+
+

Helpful Links

SET, RESET

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0225.html b/docs/dws/dev/dws_06_0225.html new file mode 100644 index 00000000..820267ca --- /dev/null +++ b/docs/dws/dev/dws_06_0225.html @@ -0,0 +1,66 @@ + + +

TRUNCATE

+

Function

TRUNCATE quickly removes all rows from a database table.

+

It has the same effect as an unqualified DELETE on each table, but it is faster since it does not actually scan the tables. This is most useful on large tables.

+

TRUNCATE obtains an ACCESS EXCLUSIVE lock on each table it operates on, which blocks all other concurrent operations on that table. If concurrent access to the table is required, use the DELETE command instead.

+
+

Precautions

+
+

Syntax

+
1
+2
TRUNCATE [ TABLE ] [ ONLY ] {[[database_name.]schema_name.]table_name [ * ]} [, ... ]
+    [ CONTINUE IDENTITY ] [ CASCADE | RESTRICT ];
+
+ +
+ +
1
+2
+3
+4
+5
ALTER TABLE [ IF EXISTS  ] { [ ONLY  ] [[database_name.]schema_name.]table_name  
+                           | table_name *  
+                           | ONLY ( table_name )  } 
+    TRUNCATE PARTITION { partition_name  
+                       | FOR (  partition_value  [, ...] )  } ;
+
+ +
+
+

Parameter Description

+
+

Examples

Clear the p1 partition of the customer_address table.

+
1
ALTER TABLE tpcds.customer_address TRUNCATE PARTITION p1;
+
+ +
+

Clear a partitioned table.

+
1
TRUNCATE TABLE tpcds.customer_address;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0226.html b/docs/dws/dev/dws_06_0226.html new file mode 100644 index 00000000..f5cc121e --- /dev/null +++ b/docs/dws/dev/dws_06_0226.html @@ -0,0 +1,88 @@ + + +

VACUUM

+

Function

VACUUM reclaims storage space occupied by tables or B-tree indexes. In normal database operation, rows that have been deleted or obsoleted by an update are not physically removed from their table; they remain present until a VACUUM is done. Therefore, it is necessary to execute VACUUM periodically, especially on frequently-updated tables.

+

+
+

Precautions

+
+

Syntax

+
+

Parameter Description

+
+

Examples

Delete all tables in the current database.

+
1
VACUUM;
+
+ +
+

Reclaim the space of partition P2 of the tpcds.web_returns_p1 table without updating statistics.

+
1
VACUUM FULL tpcds.web_returns_p1 PARTITION(P2);
+
+ +
+

Reclaim the tpcds.web_returns_p1 table and update statistics.

+
1
VACUUM FULL ANALYZE tpcds.web_returns_p1;
+
+ +
+

Delete all tables in the current database and collect statistics about the query optimizer.

+
1
VACUUM ANALYZE;
+
+ +
+

Delete only the reason table.

+
1
VACUUM (VERBOSE, ANALYZE) reason;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0227.html b/docs/dws/dev/dws_06_0227.html new file mode 100644 index 00000000..8084be19 --- /dev/null +++ b/docs/dws/dev/dws_06_0227.html @@ -0,0 +1,39 @@ + + + +

DML Syntax

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0228.html b/docs/dws/dev/dws_06_0228.html new file mode 100644 index 00000000..6cb7e316 --- /dev/null +++ b/docs/dws/dev/dws_06_0228.html @@ -0,0 +1,25 @@ + + +

DML Syntax Overview

+

Data Manipulation Language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data.

+

Insert Data

Inserting data refers to adding one or multiple records to a database table. For details, see INSERT.

+
+

Updating Data

Modifying data refers to modifying one or multiple records in a database table. For details, see UPDATE.

+
+

Querying Data

The database query statement SELECT is used to search required information in a database. For details, see SELECT.

+
+

Deleting Data

For details about how to delete data that meets specified conditions from a table, see DELETE.

+
+

Copying Data

GaussDB(DWS) provides a statement for copying data between tables and files. For details, see COPY.

+
+

Locking a Table

GaussDB(DWS) provides multiple lock modes to control concurrent accesses to table data. For details, see LOCK.

+
+

Run the following statement to invoke the function:

GaussDB(DWS) provides three statements for invoking functions. These statements are the same in the syntax structure. For details, see CALL.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0229.html b/docs/dws/dev/dws_06_0229.html new file mode 100644 index 00000000..c6008449 --- /dev/null +++ b/docs/dws/dev/dws_06_0229.html @@ -0,0 +1,153 @@ + + +

CALL

+

Function

CALL calls defined functions or stored procedures.

+
+

Precautions

None

+
+

Syntax

1
CALL [schema.] {func_name| procedure_name} ( param_expr );
+
+ +
+
+

Parameter Description

+
+

Examples

Create the func_add_sql function to compute the sum of two integers and return the result.
1
+2
+3
+4
+5
+6
CREATE FUNCTION func_add_sql(num1 integer, num2 integer) RETURN integer
+AS
+BEGIN
+RETURN num1 + num2;
+END;
+/
+
+ +
+
+
Transfer based on parameter values.
1
CALL func_add_sql(1, 3);
+
+ +
+
+
Transfer based on the naming flags.
1
+2
CALL func_add_sql(num1 => 1,num2 => 3);
+CALL func_add_sql(num2 := 2, num1 := 3);
+
+ +
+
+
Delete the function.
1
DROP FUNCTION func_add_sql;
+
+ +
+
+
Create a function with output parameters.
1
+2
+3
+4
+5
+6
+7
CREATE FUNCTION func_increment_sql(num1 IN integer, num2 IN integer, res OUT integer)
+RETURN integer
+AS
+BEGIN
+res := num1 + num2;
+END;
+/
+
+ +
+
+
Set output parameters to constants.
1
CALL func_increment_sql(1,2,1);
+
+ +
+
+
Set output parameters to variables.
1
+2
+3
+4
+5
+6
+7
DECLARE
+res int;
+BEGIN
+func_increment_sql(1, 2, res);
+dbms_output.put_line(res);
+END;
+/
+
+ +
+
+
Create overloaded functions.
1
+2
+3
+4
+5
+6
+7
+8
+9
create or replace procedure package_func_overload(col int, col2 out int) package
+as
+declare
+    col_type text;
+begin
+     col := 122;
+         dbms_output.put_line('two out parameters ' || col2);
+end;
+/
+
+ +
+
1
+2
+3
+4
+5
+6
+7
+8
+9
create or replace procedure package_func_overload(col int, col2 out varchar) package
+as
+declare
+    col_type text;
+begin
+     col2 := '122';
+         dbms_output.put_line('two varchar parameters ' || col2);
+end;
+/
+
+ +
+
+
Call a function.
1
+2
call package_func_overload(1, 'test'); 
+call package_func_overload(1, 1); 
+
+ +
+
+
Delete a function.
1
DROP FUNCTION func_increment_sql;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0230.html b/docs/dws/dev/dws_06_0230.html new file mode 100644 index 00000000..3b7be03d --- /dev/null +++ b/docs/dws/dev/dws_06_0230.html @@ -0,0 +1,409 @@ + + +

COPY

+

Function

COPY copies data between tables and files.

+

COPY FROM copies data from a file to a table. COPY TO copies data from a table to a file.

+
+

Important Notes

+
+ +

Syntax

+
+

Parameter Description

+
+

Examples

Copy data from the tpcds.ship_mode file to the /home/omm/ds_ship_mode.dat file.
1
COPY tpcds.ship_mode TO '/home/omm/ds_ship_mode.dat';
+
+ +
+
+
Write tpcds.ship_mode as output to stdout.
1
COPY tpcds.ship_mode TO stdout;
+
+ +
+
+
Create the tpcds.ship_mode_t1 table.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE TABLE tpcds.ship_mode_t1
+(
+    SM_SHIP_MODE_SK           INTEGER               NOT NULL,
+    SM_SHIP_MODE_ID           CHAR(16)              NOT NULL,
+    SM_TYPE                   CHAR(30)                      ,
+    SM_CODE                   CHAR(10)                      ,
+    SM_CARRIER                CHAR(20)                      ,
+    SM_CONTRACT               CHAR(20)
+)
+WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE)
+DISTRIBUTE BY HASH(SM_SHIP_MODE_SK );
+
+ +
+
+
Copy data from stdin to the tpcds.ship_mode_t1 table.
1
COPY tpcds.ship_mode_t1 FROM stdin;
+
+ +
+
+

Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table.

+
1
COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat';
+
+ +
+

Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table, with the import format set to TEXT (format 'text'), the delimiter set to \t' (delimiter E'\t'), excessive columns ignored (ignore_extra_data 'true'), and characters not escaped (noescaping 'true').

+
1
COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat' WITH(format 'text', delimiter E'\t', ignore_extra_data 'true', noescaping 'true');
+
+ +
+

Copy data from the /home/omm/ds_ship_mode.dat file to the tpcds.ship_mode_t1 table, with the import format set to FIXED, fixed-length format specified (FORMATTER(SM_SHIP_MODE_SK(0, 2), SM_SHIP_MODE_ID(2,16), SM_TYPE(18,30), SM_CODE(50,10), SM_CARRIER(61,20), SM_CONTRACT(82,20))), excessive columns ignored (ignore_extra_data), and headers included (header).

+
1
COPY tpcds.ship_mode_t1 FROM '/home/omm/ds_ship_mode.dat' FIXED FORMATTER(SM_SHIP_MODE_SK(0, 2), SM_SHIP_MODE_ID(2,16), SM_TYPE(18,30), SM_CODE(50,10), SM_CARRIER(61,20), SM_CONTRACT(82,20)) header ignore_extra_data;
+
+ +
+

Delete the tpcds.ship_mode_t1 table.

+
1
DROP TABLE tpcds.ship_mode_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0231.html b/docs/dws/dev/dws_06_0231.html new file mode 100644 index 00000000..fa82fa22 --- /dev/null +++ b/docs/dws/dev/dws_06_0231.html @@ -0,0 +1,75 @@ + + +

DELETE

+

Function

DELETE deletes rows that satisfy the WHERE clause from the specified table. If the WHERE clause does not exist, all rows in the table will be deleted. The result is a valid, but an empty table.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
+5
[ WITH [ RECURSIVE ] with_query [, ...] ]
+DELETE FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ]
+    [ USING using_list ]
+    [ WHERE condition | WHERE CURRENT OF cursor_name ]
+    [ RETURNING { * | { output_expr [ [ AS ] output_name ] } [, ...] } ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create the tpcds.customer_address_bak table.

+
1
CREATE TABLE tpcds.customer_address_bak AS TABLE tpcds.customer_address;
+
+ +
+

Delete employees whose ca_address_sk is less than 14888 in the tpcds.customer_address_bak table.

+
1
DELETE FROM tpcds.customer_address_bak WHERE ca_address_sk < 14888;
+
+ +
+

Delete the employees whose ca_address_sk is 14891, 14893, and 14895 from tpcds.customer_address_bak.

+
1
DELETE FROM tpcds.customer_address_bak WHERE ca_address_sk in (14891,14893,14895);
+
+ +
+

Delete all data in the tpcds.customer_address_bak table.

+
1
DELETE FROM tpcds.customer_address_bak;
+
+ +
+

Use a subquery (to delete the row-store table tpcds.warehouse_t30) to obtain a temporary table temp_t, and then query all data in the temporary table temp_t.

+
1
WITH temp_t AS (DELETE FROM tpcds.warehouse_t30 RETURNING *) SELECT * FROM temp_t ORDER BY 1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0232.html b/docs/dws/dev/dws_06_0232.html new file mode 100644 index 00000000..b255fa38 --- /dev/null +++ b/docs/dws/dev/dws_06_0232.html @@ -0,0 +1,260 @@ + + +

EXPLAIN

+

Function

EXPLAIN shows the execution plan of an SQL statement.

+

The execution plan shows how the tables referenced by the SQL statement will be scanned, for example, by plain sequential scan or index scan. If multiple tables are referenced, the execution plan also shows what join algorithms will be used to bring together the required rows from each input table.

+

The most critical part of the display is the estimated statement execution cost, which is the planner's guess at how long it will take to run the statement.

+

The ANALYZE option causes the statement to be executed, not only planned. Then actual runtime statistics are added to the display, including the total elapsed time expended within each plan node (in milliseconds) and the total number of rows it actually returned. This is useful to check whether the planner's estimates are close to reality.

+
+

Precautions

The statement is executed when the ANALYZE option is used. To use EXPLAIN ANALYZE on an INSERT, UPDATE, DELETE, CREATE TABLE AS, or EXECUTE statement without letting the command affect your data, use this approach:

+
1
+2
+3
START TRANSACTION;
+EXPLAIN ANALYZE ...;
+ROLLBACK;
+
+ +
+
+

Syntax

+
+

Parameter Description

+
+

Examples

Create the tpcds.customer_address_p1 table.

+
1
CREATE TABLE tpcds.customer_address_p1 AS TABLE tpcds.customer_address;
+
+ +
+

Change the value of explain_perf_mode to normal.

+
1
SET explain_perf_mode=normal;
+
+ +
+

Display an execution plan for simple queries in the table.

+
1
+2
+3
+4
+5
+6
EXPLAIN SELECT * FROM tpcds.customer_address_p1;
+			       QUERY PLAN
+----------------------------------------------------------------------------
+ Data Node Scan on "__REMOTE_FQS_QUERY__"  (cost=0.00..0.00 rows=0 width=0)
+   Node/s: All datanodes
+(2 rows)
+
+ +
+

Generate an execution plan in JSON format (assume explain_perf_mode is set to normal).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
EXPLAIN(FORMAT JSON) SELECT * FROM tpcds.customer_address_p1;
+                    QUERY PLAN
+---------------------------------------------------
+ [                                                +
+   {                                              +
+     "Plan": {                                    +
+       "Node Type": "Data Node Scan",             +
+       "RemoteQuery name": "__REMOTE_FQS_QUERY__",+
+       "Alias": "__REMOTE_FQS_QUERY__",           +
+       "Startup Cost": 0.00,                      +
+       "Total Cost": 0.00,                        +
+       "Plan Rows": 0,                            +
+       "Plan Width": 0,                           +
+       "Nodes": "All datanodes"                   +
+     }                                            +
+   }                                              +
+ ]
+(1 row)
+
+ +
+

If there is an index and we use a query with an indexable WHERE condition, EXPLAIN might show a different pla.

+
1
+2
+3
+4
+5
+6
EXPLAIN SELECT * FROM tpcds.customer_address_p1 WHERE ca_address_sk=10000;
+                                  QUERY PLAN
+------------------------------------------------------------------------------
+ Data Node Scan on "__REMOTE_LIGHT_QUERY__"  (cost=0.00..0.00 rows=0 width=0)
+   Node/s: datanode2
+(2 rows)
+
+ +
+

Generate an execution plan in YAML format (assume explain_perf_mode is set to normal).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
EXPLAIN(FORMAT YAML) SELECT * FROM tpcds.customer_address_p1 WHERE ca_address_sk=10000;
+                   QUERY PLAN
+------------------------------------------------
+ - Plan:                                       +
+     Node Type: "Data Node Scan"               +
+     RemoteQuery name: "__REMOTE_LIGHT_QUERY__"+
+     Alias: "__REMOTE_LIGHT_QUERY__"           +
+     Startup Cost: 0.00                        +
+     Total Cost: 0.00                          +
+     Plan Rows: 0                              +
+     Plan Width: 0                             +
+     Nodes: "datanode2"
+(1 row)
+
+ +
+

Here is an example of an execution plan with cost estimates suppressed.

+
1
+2
+3
+4
+5
+6
EXPLAIN(COSTS FALSE)SELECT * FROM tpcds.customer_address_p1 WHERE ca_address_sk=10000;
+                 QUERY PLAN
+--------------------------------------------
+ Data Node Scan on "__REMOTE_LIGHT_QUERY__"
+   Node/s: datanode2
+(2 rows)
+
+ +
+

Here is an example of an execution plan for a query that uses an aggregate function.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
EXPLAIN SELECT SUM(ca_address_sk) FROM tpcds.customer_address_p1 WHERE ca_address_sk<10000;
+                                      QUERY PLAN                                       
+---------------------------------------------------------------------------------------
+ Aggregate  (cost=18.19..14.32 rows=1 width=4)
+   ->  Streaming (type: GATHER)  (cost=18.19..14.32 rows=3 width=4)
+         Node/s: All datanodes
+         ->  Aggregate  (cost=14.19..14.20 rows=3 width=4)
+               ->  Seq Scan on customer_address_p1  (cost=0.00..14.18 rows=10 width=4)
+                     Filter: (ca_address_sk < 10000)
+(6 rows)
+
+ +
+

-- Delete the tpcds.customer_address_p1 table.

+
1
DROP TABLE tpcds.customer_address_p1;
+
+ +
+
+

Helpful Links

ANALYZE | ANALYSE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0233.html b/docs/dws/dev/dws_06_0233.html new file mode 100644 index 00000000..b433773a --- /dev/null +++ b/docs/dws/dev/dws_06_0233.html @@ -0,0 +1,111 @@ + + +

EXPLAIN PLAN

+

Function

You can run the EXPLAIN PLAN statement to save the information about an execution plan to the PLAN_TABLE table. Different from the EXPLAIN statement, EXPLAIN PLAN only stores plan information and does not print it on the screen.

+
+

Syntax

1
+2
+3
EXPLAIN PLAN
+[ SET STATEMENT_ID = string ]
+FOR statement ;
+
+ +
+
+

Parameter Description

+
+

Precautions

+
+

Example 1

You can perform the following steps to collect execution plans of SQL statements by running EXPLAIN PLAN:

+
  1. Import TPC-H sample data. For details, see .
  2. Run the EXPLAN PLAN statement.

    After the EXPLAIN PLAN statement is executed, plan information is automatically stored in PLAN_TABLE. INSERT, UPDATE, and ANALYZE cannot be performed on PLAN_TABLE.

    +

    For details about PLAN_TABLE, see the PLAN_TABLE system view.

    +
    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    explain plan set statement_id='TPCH-Q4' for
    +select
    +o_orderpriority,
    +count(*) as order_count
    +from
    +orders
    +where
    +o_orderdate >= '1993-07-01'::date
    +and o_orderdate < '1993-07-01'::date + interval '3 month'
    +and exists (
    +select
    +*
    +from
    +lineitem
    +where
    +l_orderkey = o_orderkey
    +and l_commitdate < l_receiptdate
    +)
    +group by
    +o_orderpriority
    +order by
    +o_orderpriority;
    +
    + +
    +

  3. Query PLAN_TABLE.

    1
    SELECT * FROM PLAN_TABLE;
    +
    + +
    +

    +

  4. Delete data from PLAN_TABLE.

    1
    DELETE FROM PLAN_TABLE WHERE xxx;
    +
    + +
    +

+
+

Example 2

For a query that cannot be pushed down, only such information as REMOTE_QUERY and CTE can be collected from PLAN_TABLE after EXPLAIN PLAN is executed.

+
The optimizer generates a plan for pushing down statements. In this case, only REMOTE_QUERY can be collected.
1
+2
+3
+4
+5
  explain plan set statement_id = 'test remote query' for
+  select
+  current_user
+  from 
+  customer;
+
+ +
+
+
Query PLAN_TABLE.
1
SELECT * FROM PLAN_TABLE;
+
+ +
+
+

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0234.html b/docs/dws/dev/dws_06_0234.html new file mode 100644 index 00000000..b2a4b066 --- /dev/null +++ b/docs/dws/dev/dws_06_0234.html @@ -0,0 +1,281 @@ + + +

LOCK

+

Function

LOCK TABLE obtains a table-level lock.

+

GaussDB(DWS) always tries to select the lock mode with minimum constraints when automatically requesting a lock for a command referenced by a table. Use LOCK if users need a more strict lock mode. For example, suppose an application runs a transaction at the Read Committed isolation level and needs to ensure that data in a table remains stable in the duration of the transaction. To achieve this, you could obtain SHARE lock mode over the table before the query. This will prevent concurrent data changes and ensure subsequent reads of the table see a stable view of committed data. It is because the SHARE lock mode conflicts with the ROW EXCLUSIVE lock acquired by writers, and your LOCK TABLE name IN SHARE MODE statement will wait until any concurrent holders of ROW EXCLUSIVE mode locks commit or roll back. Therefore, once you obtain the lock, there are no uncommitted writes outstanding; furthermore none can begin until you release the lock.

+
+

Precautions

+
+

Syntax

1
+2
+3
LOCK [ TABLE ] {[ ONLY ] name [, ...]| {name [ * ]} [, ...]}
+    [ IN {ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE} MODE ]
+    [ NOWAIT ];
+
+ +
+
+

Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Lock mode conflicts

Requested Lock Mode/Current Lock Mode

+

ACCESS SHARE

+

ROW SHARE

+

ROW EXCLUSIVE

+

SHARE UPDATE EXCLUSIVE

+

SHARE

+

SHARE ROW EXCLUSIVE

+

EXCLUSIVE

+

ACCESS EXCLUSIVE

+

ACCESS SHARE

+

-

+

-

+

-

+

-

+

-

+

-

+

-

+

X

+

ROW SHARE

+

-

+

-

+

-

+

-

+

-

+

-

+

X

+

X

+

ROW EXCLUSIVE

+

-

+

-

+

-

+

-

+

X

+

X

+

X

+

X

+

SHARE UPDATE EXCLUSIVE

+

-

+

-

+

-

+

X

+

X

+

X

+

X

+

X

+

SHARE

+

-

+

-

+

X

+

X

+

-

+

X

+

X

+

X

+

SHARE ROW EXCLUSIVE

+

-

+

-

+

X

+

X

+

X

+

X

+

X

+

X

+

EXCLUSIVE

+

-

+

X

+

X

+

X

+

X

+

X

+

X

+

X

+

ACCESS EXCLUSIVE

+

X

+

X

+

X

+

X

+

X

+

X

+

X

+

X

+
+
+

LOCK parameters are as follows:

+ +
+

Examples

Obtain a SHARE lock on a primary key table when going to perform inserts into a foreign key table.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
START TRANSACTION;
+
+LOCK TABLE tpcds.reason IN SHARE MODE;
+
+SELECT r_reason_desc FROM tpcds.reason WHERE r_reason_sk=5;
+r_reason_desc
+-----------
+ Parts missing
+(1 row)
+
+COMMIT;
+
+ +
+

Obtain a SHARE ROW EXCLUSIVE lock on a primary key table when going to perform a delete operation.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE TABLE tpcds.reason_t1 AS TABLE tpcds.reason;
+
+START TRANSACTION;
+
+LOCK TABLE tpcds.reason_t1 IN SHARE ROW EXCLUSIVE MODE;
+
+DELETE FROM tpcds.reason_t1 WHERE r_reason_desc IN(SELECT r_reason_desc FROM tpcds.reason_t1 WHERE r_reason_sk < 6 );
+
+DELETE FROM tpcds.reason_t1 WHERE r_reason_sk = 7;
+
+COMMIT;
+
+ +
+

Delete the tpcds.reason_t1 table.

+
1
DROP TABLE tpcds.reason_t1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0235.html b/docs/dws/dev/dws_06_0235.html new file mode 100644 index 00000000..6a4c2f5c --- /dev/null +++ b/docs/dws/dev/dws_06_0235.html @@ -0,0 +1,163 @@ + + +

MERGE INTO

+

Function

The MERGE INTO statement is used to conditionally match data in a target table with that in a source table. If data matches, UPDATE is executed on the target table; if data does not match, INSERT is executed. You can use this syntax to run UPDATE and INSERT at a time for convenience.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
MERGE INTO table_name [ [ AS ] alias ]
+USING { { table_name | view_name } | subquery } [ [ AS ] alias ]
+ON ( condition )
+[
+  WHEN MATCHED THEN
+  UPDATE SET { column_name = { expression | DEFAULT } |
+          ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]
+  [ WHERE condition ]
+]
+[
+  WHEN NOT MATCHED THEN
+  INSERT { DEFAULT VALUES |
+  [ ( column_name [, ...] ) ] VALUES ( { expression | DEFAULT } [, ...] ) [, ...] [ WHERE condition ] }
+];
+
+ +
+
+

Parameter Description

+
+

Examples

Create the target table products and source table newproducts, and insert data to them.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
CREATE TABLE products
+(
+product_id INTEGER,
+product_name VARCHAR2(60),
+category VARCHAR2(60)
+);
+
+INSERT INTO products VALUES (1501, 'vivitar 35mm', 'electrncs');
+INSERT INTO products VALUES (1502, 'olympus is50', 'electrncs');
+INSERT INTO products VALUES (1600, 'play gym', 'toys');
+INSERT INTO products VALUES (1601, 'lamaze', 'toys');
+INSERT INTO products VALUES (1666, 'harry potter', 'dvd');
+
+CREATE TABLE newproducts
+(
+product_id INTEGER,
+product_name VARCHAR2(60),
+category VARCHAR2(60)
+);
+
+INSERT INTO newproducts VALUES (1502, 'olympus camera', 'electrncs');
+INSERT INTO newproducts VALUES (1601, 'lamaze', 'toys');
+INSERT INTO newproducts VALUES (1666, 'harry potter', 'toys');
+INSERT INTO newproducts VALUES (1700, 'wait interface', 'books');
+
+ +
+

Run MERGE INTO.

+
1
+2
+3
+4
+5
+6
+7
+8
MERGE INTO products p   
+USING newproducts np   
+ON (p.product_id = np.product_id)   
+WHEN MATCHED THEN  
+  UPDATE SET p.product_name = np.product_name, p.category = np.category WHERE p.product_name != 'play gym'  
+WHEN NOT MATCHED THEN  
+  INSERT VALUES (np.product_id, np.product_name, np.category) WHERE np.category = 'books';
+MERGE 4
+
+ +
+

Query updates.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
SELECT * FROM products ORDER BY product_id;
+ product_id |  product_name  | category  
+------------+----------------+-----------
+       1501 | vivitar 35mm   | electrncs
+       1502 | olympus camera | electrncs
+       1600 | play gym       | toys
+       1601 | lamaze         | toys
+       1666 | harry potter   | toys
+       1700 | wait interface | books
+(6 rows)
+
+ +
+

Delete a table.

+
1
+2
DROP TABLE products;
+DROP TABLE newproducts;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0236.html b/docs/dws/dev/dws_06_0236.html new file mode 100644 index 00000000..ea179c50 --- /dev/null +++ b/docs/dws/dev/dws_06_0236.html @@ -0,0 +1,228 @@ + + +

INSERT

+

Function

INSERT inserts new rows into a table.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
[ WITH [ RECURSIVE ] with_query [, ...] ]
+INSERT [ IGNORE | OVERWRITE ] INTO table_name [ AS alias ] [ ( column_name [, ...] ) ]
+    { DEFAULT VALUES
+    | VALUES {( { expression | DEFAULT } [, ...] ) }[, ...] 
+    | query }
+    [ ON DUPLICATE KEY duplicate_action | ON CONFLICT [ conflict_target ] conflict_action ]
+    [ RETURNING {* | {output_expression [ [ AS ] output_name ] }[, ...]} ];
+
+where duplicate_action can be:
+
+    UPDATE { column_name = { expression | DEFAULT } |
+             ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] )
+           } [, ...]
+
+and conflict_target can be one of:
+
+    ( { index_column_name | ( index_expression ) } [ COLLATE collation ] [ opclass ] [, ...] ) [ WHERE index_predicate ]
+    ON CONSTRAINT constraint_name
+
+and conflict_action is one of:
+
+    DO NOTHING
+    DO UPDATE SET { column_name = { expression | DEFAULT } |
+                    ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] )
+                  } [, ...]
+              [ WHERE condition ]
+
+ +
+
+

Parameter Description

+
+

Examples

Create the reason_t1 table.

+
1
+2
+3
+4
+5
+6
CREATE TABLE reason_t1
+(
+    TABLE_SK          INTEGER               ,
+    TABLE_ID          VARCHAR(20)           ,
+    TABLE_NA          VARCHAR(20)
+);
+
+ +
+

Insert a record into a table.

+
1
INSERT INTO reason_t1(TABLE_SK, TABLE_ID, TABLE_NA) VALUES (1, 'S01', 'StudentA');
+
+ +
+

Insert a record into a table. This command is equivalent to the last one.

+
1
INSERT INTO reason_t1 VALUES (1, 'S01', 'StudentA');
+
+ +
+

Insert records whose TABLE_SK is less than 1 into the table.

+
1
INSERT INTO reason_t1 SELECT * FROM reason_t1 WHERE TABLE_SK < 1;
+
+ +
+

Insert records into the table.

+
1
+2
+3
+4
+5
+6
+7
+8
INSERT INTO reason_t1 VALUES (1, 'S01', 'StudentA'),(2, 'T01', 'TeacherA'),(3, 'T02', 'TeacherB');
+SELECT * FROM reason_t1 ORDER BY 1;
+ TABLE_SK | TABLE_ID | TABLE_NAME
+----------+----------+------------
+        1 |      S01 |   StudentA
+        2 |      T01 |   TeacherA
+        3 |      T02 |   TeacherB
+(3 rows)
+
+ +
+

Clear existing data in the table and insert data to the table.

+
1
+2
+3
+4
+5
+6
INSERT OVERWRITE INTO reason_t1 values (4, 'S02', 'StudentB');
+SELECT * FROM reason_t1 ORDER BY 1;
+ TABLE_SK | TABLE_ID | TABLE_NAME
+----------+----------+------------
+        4 |      S02 |   StudentB
+(1 rows)
+
+ +
+

Insert data back into the reason_t1 table.

+
INSERT INTO reason_t1 SELECT * FROM reason_t1;
+

Specify default values for independent columns.

+
INSERT INTO reason_t1 VALUES (5, 'S03', DEFAULT);
+

Insert some data in a table to another table: Use the WITH subquery to obtain a temporary table temp_t, and then insert all data in temp_t to another table reason_t1.

+
WITH temp_t AS (SELECT * FROM reason_t1) INSERT INTO reason_t1 SELECT * FROM temp_t ORDER BY 1;
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0237.html b/docs/dws/dev/dws_06_0237.html new file mode 100644 index 00000000..b329fb25 --- /dev/null +++ b/docs/dws/dev/dws_06_0237.html @@ -0,0 +1,282 @@ + + +

UPSERT

+

Function

UPSERT inserts rows into a table. When a row duplicates an existing primary key or unique key value, the row will be ignored or updated.

+

The UPSERT syntax is supported only in 8.1.1 and later.

+
+
+

Syntax

For details, see Syntax of INSERT. The following table describes the syntax of UPSERT.

+ +
+ + + + + + + + + + + + + +
Table 1 UPSERT syntax

Syntax

+

Update Data Upon Conflict

+

Ignore Data Upon Conflict

+

Syntax 1: No index is specified.

+
INSERT INTO ON DUPLICATE KEY UPDATE
+
INSERT IGNORE
+INSERT INTO ON CONFLICT DO NOTHING
+

Syntax 2: The unique key constraint can be inferred from the specified column name or constraint name.

+
INSERT INTO ON CONFLICT(...) DO UPDATE SET
+INSERT INTO ON CONFLICT ON CONSTRAINT con_name DO UPDATE SET
+
INSERT INTO ON CONFLICT(...) DO NOTHING
+INSERT INTO ON CONFLICT ON CONSTRAINT con_name DO NOTHING
+
+
+
+

In syntax 1, no index is specified. The system checks for conflicts on all primary keys or unique indexes. If a conflict exists, the system ignores or updates the corresponding data.

+

In syntax 2, a specified index is used for conflict check. The primary key or unique index is inferred from the column name, the expression that contains column names, or the constraint name specified in the ON CONFLICT clause.

+ +

The UPDATE clause can use VALUES(colname) or EXCLUDED.colname to reference inserted data. EXCLUDED indicates the rows that should be excluded due to conflicts. An example is as follows:

+
1
+2
+3
+4
+5
+6
+7
CREATE TABLE t1(id int PRIMARY KEY, a int, b int);
+INSERT INTO t1 VALUES(1,1,1);
+-- Upon a conflicting row, change the value in column a to the value in column a of the target table plus 1, which, in this example, is (1,2,1).
+INSERT INTO t1 VALUES(1,10,20) ON CONFLICT(id) DO UPDATE SET a = a + 1;
+-- EXCLUDED.a is used to reference the value of column a that is originally proposed for insertion. In this example, the value is 10.
+-- Upon a conflicting row, change the value of column a to that of the referenced column plus 1. In this example, the value is updated to (1,11,1).
+INSERT INTO t1 VALUES(1,10,20) ON CONFLICT(id) DO UPDATE SET a = EXCLUDED.a + 1;
+
+ +
+ +
Note the following when using the syntax: +
+

Precautions

+
+

Examples

Create table reason_t2 and insert data into it.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
CREATE TABLE reason_t2
+(
+  a    int primary key,
+  b    int,
+  c    int
+);
+
+INSERT INTO reason_t2 VALUES (1, 2, 3);
+SELECT * FROM reason_t2 ORDER BY 1;
+ a | b | c
+---+---+---
+ 1 | 2 | 3
+ (1 rows)
+
+ +
+

Insert two data records into the table reason_t2. One data record conflicts and the other does not. Conflicting data is ignored, and non-conflicting data is inserted.

+
1
+2
+3
+4
+5
+6
+7
INSERT INTO reason_t2 VALUES (1, 4, 5),(2, 6, 7) ON CONFLICT(a) DO NOTHING;
+SELECT * FROM reason_t2 ORDER BY 1;
+ a | b | c
+---+---+----
+ 1 | 2 | 3
+ 2 | 6 | 7
+(2 rows)
+
+ +
+

Insert two data records into the table reason_t2. One data record conflicts and the other does not. Conflicting data is updated, and non-conflicting data is inserted.

+
1
+2
+3
+4
+5
+6
+7
+8
INSERT INTO reason_t2 VALUES (1, 4, 5),(3, 8, 9) ON CONFLICT(a) DO UPDATE SET b = EXCLUDED.b, c = EXCLUDED.c;
+SELECT * FROM reason_t2 ORDER BY 1;
+ a | b | c
+---+---+----
+ 1 | 4 | 5
+ 2 | 6 | 7
+ 3 | 8 | 9
+ (3 rows)
+
+ +
+

Filter the updated rows.

+
1
+2
+3
+4
+5
+6
+7
+8
INSERT INTO reason_t2 VALUES (2, 7, 8) ON CONFLICT (a) DO UPDATE SET b = excluded.b, c = excluded.c  WHERE reason_t2.c = 7;
+SELECT * FROM reason_t2 ORDER BY 1;
+ a | b | c
+---+---+---
+ 1 | 4 | 5
+ 2 | 7 | 8
+ 3 | 8 | 9
+(3 rows)
+
+ +
+

Insert data into the table reason_t. Update the conflicting data and adjust the mapping. That is, update column c to column b and column b to column c.

+
1
+2
+3
+4
+5
+6
+7
+8
INSERT INTO reason_t2 VALUES (1, 2, 3) ON CONFLICT (a) DO UPDATE SET b = excluded.c, c = excluded.b;
+SELECT * FROM reason_t2 ORDER BY 1;
+ a | b | c
+---+---+---
+ 1 | 3 | 2
+ 2 | 7 | 8
+ 3 | 8 | 9
+(3 rows)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0238.html b/docs/dws/dev/dws_06_0238.html new file mode 100644 index 00000000..56b2c333 --- /dev/null +++ b/docs/dws/dev/dws_06_0238.html @@ -0,0 +1,597 @@ + + +

SELECT

+

Function

SELECT retrieves data from a table or view.

+

Serving as an overlaid filter for a database table, SELECT using SQL keywords retrieves required data from data tables.

+
+

Precautions

+
+ +

Syntax

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
[ WITH [ RECURSIVE ] with_query [, ...] ]
+SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]
+{ * | {expression [ [ AS ] output_name ]} [, ...] }
+[ FROM from_item [, ...] ]
+[ WHERE condition ]
+[ GROUP BY grouping_element [, ...] ]
+[ HAVING condition [, ...] ]
+[ WINDOW {window_name AS ( window_definition )} [, ...] ]
+[ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ]
+[ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ]
+[ { [ LIMIT { count | ALL } ] [ OFFSET start [ ROW | ROWS ] ] } | { LIMIT start, { count | ALL } } ]
+[ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
+[ {FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ]} [...] ];
+
+ +
+

In condition and expression, you can use the aliases of expressions in targetlist in compliance with the following rules:

+
  • Reference only in the same level.
  • Only reference aliases in targetlist.
  • Reference a prior expression in a subsequent expression.
  • The volatile function cannot be used.
  • The Window function cannot be used.
  • Do not reference an alias in the join on condition.
  • An error is reported if targetlist contains multiple referenced aliases.
+
+ +
+

Parameter Description

+
+

Examples

Obtain the temp_t temporary table by a subquery and query all records in this table.

+
1
WITH temp_t(name,isdba) AS (SELECT usename,usesuper FROM pg_user) SELECT * FROM temp_t;
+
+ +
+

Query all the r_reason_sk records in the tpcds.reason table and de-duplicate them.

+
1
SELECT DISTINCT(r_reason_sk) FROM tpcds.reason;
+
+ +
+

Example of a LIMIT clause: Obtain a record from the table.

+
1
SELECT * FROM tpcds.reason LIMIT 1;
+
+ +
+

Example of a LIMIT clause: Obtain the third record from the table.

+
1
SELECT * FROM tpcds.reason LIMIT 1 OFFSET 2;
+
+ +
+

Example of a LIMIT clause: Obtain the first two records from a table.

+
1
SELECT * FROM tpcds.reason LIMIT 2;
+
+ +
+

Query all records and sort them in alphabetic order.

+
1
SELECT r_reason_desc FROM tpcds.reason ORDER BY r_reason_desc;
+
+ +
+

Use table aliases to obtain data from the pg_user and pg_user_status tables.

+
1
SELECT a.usename,b.locktime FROM pg_user a,pg_user_status b WHERE a.usesysid=b.roloid;
+
+ +
+

Example of the FULL JOIN clause: Join data in the pg_user and pg_user_status tables.

+
1
SELECT a.usename,b.locktime,a.usesuper FROM pg_user a FULL JOIN pg_user_status b on a.usesysid=b.roloid;
+
+ +
+

Example of the GROUP BY clause: Filter data based on query conditions, and group the results.

+
1
SELECT r_reason_id, AVG(r_reason_sk) FROM tpcds.reason GROUP BY r_reason_id HAVING AVG(r_reason_sk) > 25;
+
+ +
+

Example of the GROUP BY clause: Group the results by alias.

+
1
SELECT r_reason_id AS id FROM tpcds.reason GROUP BY id;
+
+ +
+

Example of the GROUP BY CUBE clause: Filter data based on query conditions, and group the results.

+
1
SELECT r_reason_id,AVG(r_reason_sk) FROM tpcds.reason GROUP BY CUBE(r_reason_id,r_reason_sk);
+
+ +
+

Example of the GROUP BY GROUPING SETS clause: Filter data based on query conditions, and group the results.

+
1
SELECT r_reason_id,AVG(r_reason_sk) FROM tpcds.reason GROUP BY GROUPING SETS((r_reason_id,r_reason_sk),r_reason_sk);
+
+ +
+

Example of the UNION clause: Merge the names started with W and N in the r_reason_desc column in the tpcds.reason table.

+
1
+2
+3
+4
+5
+6
+7
SELECT r_reason_sk, tpcds.reason.r_reason_desc
+    FROM tpcds.reason
+    WHERE tpcds.reason.r_reason_desc LIKE 'W%'
+UNION
+SELECT r_reason_sk, tpcds.reason.r_reason_desc
+    FROM tpcds.reason
+    WHERE tpcds.reason.r_reason_desc LIKE 'N%';
+
+ +
+
1
SELECT * FROM stu_pinyin_info ORDER BY NLSSORT (name, 'NLS_SORT = SCHINESE_PINYIN_M' );
+
+ +
+

Case-insensitive order:

+
1
+2
+3
+4
+5
+6
+7
+8
CREATE TABLE stu_icase_info (id bigint, name text) DISTRIBUTE BY REPLICATION;
+INSERT INTO stu_icase_info VALUES (1, 'aaaa'),(2, 'AAAA');
+SELECT * FROM stu_icase_info ORDER BY NLSSORT (name, 'NLS_SORT = generic_m_ci');
+ id | name
+----+------
+  1 | aaaa
+  2 | AAAA
+(2 rows)
+
+ +
+

Create the table tpcds.reason_p.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE TABLE tpcds.reason_p
+(
+  r_reason_sk integer,
+  r_reason_id character(16),
+  r_reason_desc character(100)
+)
+PARTITION BY RANGE (r_reason_sk)
+(
+  partition P_05_BEFORE values less than (05),
+  partition P_15 values less than (15),
+  partition P_25 values less than (25),
+  partition P_35 values less than (35),
+  partition P_45_AFTER values less than (MAXVALUE)
+);
+
+ +
+

Insert data.

+
1
INSERT INTO tpcds.reason_p values(3,'AAAAAAAABAAAAAAA','reason 1'),(10,'AAAAAAAABAAAAAAA','reason 2'),(4,'AAAAAAAABAAAAAAA','reason 3'),(10,'AAAAAAAABAAAAAAA','reason 4'),(10,'AAAAAAAABAAAAAAA','reason 5'),(20,'AAAAAAAACAAAAAAA','reason 6'),(30,'AAAAAAAACAAAAAAA','reason 7');
+
+ +
+

Example of the PARTITION clause: Obtain data from the P_05_BEFORE partition in the tpcds.reason_p table.

+
1
+2
+3
+4
+5
+6
SELECT * FROM tpcds.reason_p PARTITION (P_05_BEFORE);
+ r_reason_sk |   r_reason_id    |   r_reason_desc                   
+-------------+------------------+------------------------------------
+           4 | AAAAAAAABAAAAAAA | reason 3                          
+           3 | AAAAAAAABAAAAAAA | reason 1                          
+(2 rows)
+
+ +
+

Example of the GROUP BY clause: Group records in the tpcds.reason_p table by r_reason_id, and count the number of records in each group.

+
1
+2
+3
+4
+5
+6
SELECT COUNT(*),r_reason_id FROM tpcds.reason_p GROUP BY r_reason_id;
+ count |   r_reason_id    
+-------+------------------
+     2 | AAAAAAAACAAAAAAA
+     5 | AAAAAAAABAAAAAAA
+(2 rows)
+
+ +
+

Example of the GROUP BY CUBE clause: Filter data based on query conditions, and group the results.

+
1
SELECT * FROM tpcds.reason GROUP BY  CUBE (r_reason_id,r_reason_sk,r_reason_desc);
+
+ +
+

Example of the GROUP BY GROUPING SETS clause: Filter data based on query conditions, and group the results.

+
1
SELECT * FROM tpcds.reason GROUP BY  GROUPING SETS ((r_reason_id,r_reason_sk),r_reason_desc);
+
+ +
+

Example of the HAVING clause: Group records in the tpcds.reason_p table by r_reason_id, count the number of records in each group, and display only values whose number of r_reason_id is greater than 2.

+
1
+2
+3
+4
+5
SELECT COUNT(*) c,r_reason_id FROM tpcds.reason_p GROUP BY r_reason_id HAVING c>2;
+ c |   r_reason_id    
+---+------------------
+ 5 | AAAAAAAABAAAAAAA
+(1 row)
+
+ +
+

Example of the IN clause: Group records in the tpcds.reason_p table by r_reason_id, count the number of records in each group, and display only the numbers of records whose r_reason_id is AAAAAAAABAAAAAAA or AAAAAAAADAAAAAAA.

+
1
+2
+3
+4
+5
SELECT COUNT(*),r_reason_id FROM tpcds.reason_p GROUP BY r_reason_id HAVING r_reason_id IN('AAAAAAAABAAAAAAA','AAAAAAAADAAAAAAA'); 
+count |   r_reason_id    
+-------+------------------
+     5 | AAAAAAAABAAAAAAA
+(1 row)
+
+ +
+

Example of the INTERSECT clause: Query records whose r_reason_id is AAAAAAAABAAAAAAA and whose r_reason_sk is smaller than 5.

+
1
+2
+3
+4
+5
+6
SELECT * FROM tpcds.reason_p WHERE r_reason_id='AAAAAAAABAAAAAAA' INTERSECT SELECT * FROM tpcds.reason_p WHERE r_reason_sk<5;
+ r_reason_sk |   r_reason_id    |     r_reason_desc                 
+-------------+------------------+------------------------------------
+           4 | AAAAAAAABAAAAAAA | reason 3                           
+           3 | AAAAAAAABAAAAAAA | reason 1                           
+(2 rows)
+
+ +
+

Example of the EXCEPT clause: Query records whose r_reason_id is AAAAAAAABAAAAAAA and whose r_reason_sk is greater than or equal to 4.

+
1
+2
+3
+4
+5
+6
+7
+8
SELECT * FROM tpcds.reason_p WHERE r_reason_id='AAAAAAAABAAAAAAA' EXCEPT SELECT * FROM tpcds.reason_p WHERE r_reason_sk<4;
+r_reason_sk |   r_reason_id    |      r_reason_desc                  
+-------------+------------------+------------------------------------
+          10 | AAAAAAAABAAAAAAA | reason 2                          
+          10 | AAAAAAAABAAAAAAA | reason 5                          
+          10 | AAAAAAAABAAAAAAA | reason 4                          
+           4 | AAAAAAAABAAAAAAA | reason 3                          
+(4 rows)
+
+ +
+

Specify the operator (+) in the WHERE clause to indicate a left join.

+
1
+2
+3
+4
+5
+6
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where t1.sr_customer_sk  = t2.c_customer_sk(+) 
+order by 1 desc limit 1;
+ sr_item_sk | c_customer_id
+------------+---------------
+      18000 |
+(1 row)
+
+ +
+

Specify the operator (+) in the WHERE clause to indicate a right join.

+
1
+2
+3
+4
+5
+6
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where t1.sr_customer_sk(+)  = t2.c_customer_sk 
+order by 1 desc limit 1;
+ sr_item_sk |  c_customer_id
+------------+------------------
+            | AAAAAAAAJNGEBAAA
+(1 row)
+
+ +
+

Specify the operator (+) in the WHERE clause to indicate a left join and add a join condition.

+
1
+2
+3
+4
+5
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where t1.sr_customer_sk  = t2.c_customer_sk(+) and t2.c_customer_sk(+) < 1 order by 1  limit 1;
+ sr_item_sk | c_customer_id
+------------+---------------
+          1 |
+(1 row)
+
+ +
+

If the operator (+) is specified in the WHERE clause, do not use expressions connected through AND/OR.

+
1
+2
+3
+4
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where not(t1.sr_customer_sk  = t2.c_customer_sk(+) and t2.c_customer_sk(+) < 1);
+ERROR:  Operator "(+)" can not be used in nesting expression.
+LINE 1: ...tomer_id from store_returns t1, customer t2 where not(t1.sr_...
+                                                             ^
+
+ +
+

If the operator (+) is specified in the WHERE clause which does not support expression macros, an error will be reported.

+
1
+2
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where (t1.sr_customer_sk  = t2.c_customer_sk(+))::bool;
+ERROR:  Operator "(+)" can only be used in common expression.
+
+ +
+

If the operator (+) is specified on both sides of an expression in the WHERE clause, an error will be reported.

+
1
+2
+3
select t1.sr_item_sk ,t2.c_customer_id from store_returns t1, customer t2 where t1.sr_customer_sk(+)  = t2.c_customer_sk(+);
+ERROR:  Operator "(+)" can't be specified on more than one relation in one join condition
+HINT:  "t1", "t2"...are specified Operator "(+)" in one condition.
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0239.html b/docs/dws/dev/dws_06_0239.html new file mode 100644 index 00000000..7c8a36db --- /dev/null +++ b/docs/dws/dev/dws_06_0239.html @@ -0,0 +1,67 @@ + + +

SELECT INTO

+

Function

SELECT INTO defines a new table based on a query result and insert data obtained by query to the new table.

+

Different from SELECT, data found by SELECT INTO is not returned to the client. The table columns have the same names and data types as the output columns of the SELECT.

+
+

Precautions

CREATE TABLE AS provides functions similar to SELECT INTO in functions and provides a superset of functions provided by SELECT INTO. You are advised to use CREATE TABLE AS, because SELECT INTO cannot be used in a stored procedure.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
[ WITH [ RECURSIVE ] with_query [, ...] ]
+SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]
+    { * | {expression [ [ AS ] output_name ]} [, ...] }
+    INTO [ UNLOGGED ] [ TABLE ] new_table
+    [ FROM from_item [, ...] ]
+    [ WHERE condition ]
+    [ GROUP BY expression [, ...] ]
+    [ HAVING condition [, ...] ]
+    [ WINDOW {window_name AS ( window_definition )} [, ...] ]
+    [ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ]
+    [ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ]
+    [ { [ LIMIT { count | ALL } ] [ OFFSET start [ ROW | ROWS ] ] } | { LIMIT start, { count | ALL } } ]
+    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
+    [ {FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ]} [...] ];
+
+ +
+
+

Parameter Description

INTO [ UNLOGGED ] [ TABLE ] new_table

+

UNLOGGED indicates that the table is created as an unlogged table. Data written to unlogged tables is not written to the write-ahead log, which makes them considerably faster than ordinary tables. However, they are not crash-safe: an unlogged table is automatically truncated after a crash or unclean shutdown. The contents of an unlogged table are also not replicated to standby servers. Any indexes created on an unlogged table are automatically unlogged as well.

+

new_table specifies the name of a new table, which can be schema-qualified.

+

For details about other SELECT INTO parameters, see Parameter Description in SELECT.

+
+
+

Example

Add values that are less than 5 in the r_reason_sk column in the tpcds.reason table to the new table.

+
1
+2
SELECT * INTO tpcds.reason_t1 FROM tpcds.reason WHERE r_reason_sk < 5;
+INSERT 0 6
+
+ +
+

Delete the tpcds.reason_t1 table.

+
1
DROP TABLE tpcds.reason_t1;
+
+ +
+
+

Helpful Links

SELECT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0240.html b/docs/dws/dev/dws_06_0240.html new file mode 100644 index 00000000..27ac302d --- /dev/null +++ b/docs/dws/dev/dws_06_0240.html @@ -0,0 +1,106 @@ + + +

UPDATE

+

Function

UPDATE updates data in a table. UPDATE changes the values of the specified columns in all rows that satisfy the condition. The WHERE clause clarifies conditions. The columns to be modified need be mentioned in the SET clause; columns not explicitly modified retain their previous values.

+
+

Precautions

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
UPDATE [ ONLY ] table_name [ * ] [ [ AS ] alias ]
+SET {column_name = { expression | DEFAULT } 
+    |( column_name [, ...] ) = {( { expression | DEFAULT } [, ...] ) |sub_query }}[, ...]
+    [ FROM from_list] [ WHERE condition ]
+    [ RETURNING {* 
+                | {output_expression [ [ AS ] output_name ]} [, ...] }];
+
+where sub_query can be:
+SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]
+{ * | {expression [ [ AS ] output_name ]} [, ...] }
+[ FROM from_item [, ...] ]
+[ WHERE condition ]
+[ GROUP BY grouping_element [, ...] ]
+[ HAVING condition [, ...] ]
+
+ +
+
+

Parameter Description

+
+

Examples

Update the values of all records.

+
1
UPDATE reason SET r_reason_sk = r_reason_sk * 2;
+
+ +
+

If the WHERE clause is not included, all r_reason_sk values are updated.

+
1
UPDATE reason SET r_reason_sk = r_reason_sk + 100;
+
+ +
+

Redefine r_reason_sk whose r_reason_desc is reason2 in the reason table.

+
1
UPDATE reason SET r_reason_sk = 5 WHERE r_reason_desc = 'reason2';
+
+ +
+

Redefine r_reason_sk whose value is 2 in the reason table.

+
1
UPDATE reason SET r_reason_sk = r_reason_sk + 100 WHERE r_reason_sk = 2;
+
+ +
+

Redefine the course IDs whose r_reason_sk is greater than 2 in the reason table.

+
1
UPDATE reason SET r_reason_sk = 201 WHERE r_reason_sk > 2;
+
+ +
+

You can run an UPDATE statement to update multiple columns by specifying multiple values in the SET clause. For example:

+
1
UPDATE reason SET r_reason_sk = 5, r_reason_desc = 'reason5' WHERE r_reason_id = 'fourth'; 
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0241.html b/docs/dws/dev/dws_06_0241.html new file mode 100644 index 00000000..ac7a477b --- /dev/null +++ b/docs/dws/dev/dws_06_0241.html @@ -0,0 +1,107 @@ + + +

VALUES

+

Function

VALUES computes a row or a set of rows based on given values. It is most commonly used to generate a constant table within a large command.

+
+

Precautions

+
+

Syntax

1
+2
+3
+4
VALUES {( expression [, ...] )} [, ...]
+    [ ORDER BY { sort_expression [ ASC | DESC | USING operator ] } [, ...] ]
+    [ { [ LIMIT { count | ALL } ] [ OFFSET start [ ROW | ROWS ] ] } | { LIMIT start, { count | ALL } } ]
+    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create the reason_t1 table.

+
1
+2
+3
+4
+5
+6
CREATE TABLE reason_t1
+(
+    TABLE_SK          INTEGER               ,
+    TABLE_ID          VARCHAR(20)           ,
+    TABLE_NA          VARCHAR(20)
+);
+
+ +
+

Insert a record into a table.

+
1
INSERT INTO reason_t1(TABLE_SK, TABLE_ID, TABLE_NA) VALUES (1, 'S01', 'StudentA');
+
+ +
+

Insert a record into a table. This command is equivalent to the last one.

+
1
INSERT INTO reason_t1 VALUES (1, 'S01', 'StudentA');
+
+ +
+

Insert records whose TABLE_SK is less than 1 into the table.

+
1
INSERT INTO reason_t1 SELECT * FROM reason_t1 WHERE TABLE_SK < 1;
+
+ +
+

Insert records into the table.

+
1
+2
+3
+4
+5
+6
+7
+8
INSERT INTO reason_t1 VALUES (1, 'S01', 'StudentA'),(2, 'T01', 'TeacherA'),(3, 'T02', 'TeacherB');
+SELECT * FROM reason_t1 ORDER BY 1;
+ TABLE_SK | TABLE_ID | TABLE_NAME
+----------+----------+------------
+        1 |      S01 |   StudentA
+        2 |      T01 |   TeacherA
+        3 |      T02 |   TeacherB
+(3 rows)
+
+ +
+

Clear existing data in the table and insert data to the table.

+
1
+2
+3
+4
+5
+6
INSERT OVERWRITE INTO reason_t1 values (4, 'S02', 'StudentB');
+SELECT * FROM reason_t1 ORDER BY 1;
+ TABLE_SK | TABLE_ID | TABLE_NAME
+----------+----------+------------
+        4 |      S02 |   StudentB
+(1 rows)
+
+ +
+

Insert data back into the reason_t1 table.

+
INSERT INTO reason_t1 SELECT * FROM reason_t1;
+

Specify default values for independent columns.

+
INSERT INTO reason_t1 VALUES (5, 'S03', DEFAULT);
+

Insert some data in a table to another table: Use the WITH subquery to obtain a temporary table temp_t, and then insert all data in temp_t to another table reason_t1.

+
WITH temp_t AS (SELECT * FROM reason_t1) INSERT INTO reason_t1 SELECT * FROM temp_t ORDER BY 1;
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0242.html b/docs/dws/dev/dws_06_0242.html new file mode 100644 index 00000000..7fd1ba5d --- /dev/null +++ b/docs/dws/dev/dws_06_0242.html @@ -0,0 +1,39 @@ + + + +

DCL Syntax

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0243.html b/docs/dws/dev/dws_06_0243.html new file mode 100644 index 00000000..2b56f3d7 --- /dev/null +++ b/docs/dws/dev/dws_06_0243.html @@ -0,0 +1,17 @@ + + +

DCL Syntax Overview

+

Data control language (DCL) is used to set or modify database users or role rights.

+

Authorization

GaussDB(DWS) provides a statement for granting rights to data objects and roles. For details, see GRANT.

+
+

Revoking Rights

GaussDB(DWS) provides a statement for revoking rights. For details, see REVOKE.

+
+

Setting Default Rights

GaussDB(DWS) allows users to set rights for objects that will be created. For details, see ALTER DEFAULT PRIVILEGES.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0244.html b/docs/dws/dev/dws_06_0244.html new file mode 100644 index 00000000..d6fd99c6 --- /dev/null +++ b/docs/dws/dev/dws_06_0244.html @@ -0,0 +1,205 @@ + + +

ALTER DEFAULT PRIVILEGES

+

Function

ALTER DEFAULT PRIVILEGES allows you to set the permissions that will be used for objects to be created. It does not affect permissions assigned to existing objects.

+

To isolate permissions, the WITH GRANT OPTION syntax is disabled in the current GaussDB(DWS) version.

+

A user can modify only the default permissions of the objects created by the user or the role to which the user belongs. These permissions can be set globally (that is, all objects created in the database) or for objects in a specified schema.

+

To view information about the default permissions of database users, query the system catalog .

+
+

Precautions

Only the permissions for tables (including views), sequences, functions, and types (including domains) can be altered.

+
+

Syntax

1
+2
+3
+4
ALTER DEFAULT PRIVILEGES
+    [ FOR { ROLE | USER } target_role [, ...] ]
+    [ IN SCHEMA schema_name [, ...] ]
+    abbreviated_grant_or_revoke;
+
+ +
+ + +
+

Parameter Description

+

If you want to delete a role that has been assigned default permissions, you must revoke the changes to the default permissions or use DROP OWNED BY to get rid of the default permission entry for the role.

+
+
+

Examples

+
+

Helpful Links

GRANT, REVOKE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0245.html b/docs/dws/dev/dws_06_0245.html new file mode 100644 index 00000000..24788312 --- /dev/null +++ b/docs/dws/dev/dws_06_0245.html @@ -0,0 +1,107 @@ + + +

ANALYZE | ANALYSE

+

Function

ANALYZE collects statistics about ordinary tables in a database, and stores the results in the PG_STATISTIC system catalog. The execution plan generator uses these statistics to determine which one is the most effective execution plan.

+

If no parameters are specified, ANALYZE analyzes each table and partitioned table in the current database. You can also specify table_name, column, and partition_name to limit the analysis to a specified table, column, or partitioned table.

+

Users who can execute ANALYZE on a specific table include the owner of the table, the owner of the database where the table resides, users who are granted the ANALYZE permission on the table through GRANT, and users who have the SYSADMIN attribute.

+

To collect statistics using percentage sampling, you must have the ANALYZE and SELECT permissions.

+

ANALYZE and ANALYSE VERIFY are used to check whether data files of common tables (row-store and column-store tables) in a database are damaged. Currently, this function does not support HDFS tables.

+
+

Precautions

+
+

Syntax

+ +
+ +
1
{ANALYZE | ANALYSE} VERIFY {FAST|COMPLETE} table_name PARTITION {(partition_name)}[CASCADE];
+
+ +
+
  • You can detect a single partition of a table, but cannot perform the CASCADE operation on index tables.
  • HDFS tables (internal and foreign tables), temporary tables, and unlog tables are not supported.
+
+

Parameter Description

+
+

Examples

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0246.html b/docs/dws/dev/dws_06_0246.html new file mode 100644 index 00000000..8e021134 --- /dev/null +++ b/docs/dws/dev/dws_06_0246.html @@ -0,0 +1,26 @@ + + +

DEALLOCATE

+

Function

DEALLOCATE deallocates a previously prepared statement. If you do not explicitly deallocate a prepared statement, it is deallocated when the session ends.

+

The PREPARE key word is always ignored.

+
+

Precautions

None

+
+

Syntax

1
DEALLOCATE [ PREPARE ] { name | ALL };
+
+ +
+
+

Parameter Description

+
+

Examples

None

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0247.html b/docs/dws/dev/dws_06_0247.html new file mode 100644 index 00000000..3440217a --- /dev/null +++ b/docs/dws/dev/dws_06_0247.html @@ -0,0 +1,44 @@ + + +

DO

+

Function

DO executes an anonymous code block.

+

A code block is a function body without parameters that returns void. It is analyzed and executed at the same time.

+
+

Precautions

+
+

Syntax

1
DO [ LANGUAGE lang_name ] code;
+
+ +
+
+

Parameter Description

+
+

Examples

Grant user webuser all the operation permissions on views in the tpcds schema.
1
+2
+3
+4
+5
+6
+7
+8
DO $$DECLARE r record;
+BEGIN
+    FOR r IN SELECT c.relname,n.nspname FROM pg_class c,pg_namespace n 
+             WHERE c.relnamespace = n.oid AND n.nspname = 'tpcds' AND relkind IN ('r','v')
+    LOOP
+        EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser';
+    END LOOP;
+END$$;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0248.html b/docs/dws/dev/dws_06_0248.html new file mode 100644 index 00000000..6153fa3c --- /dev/null +++ b/docs/dws/dev/dws_06_0248.html @@ -0,0 +1,33 @@ + + +

EXECUTE

+

Function

EXECUTE executes a prepared statement. A prepared statement only exists in the lifecycle of a session. Therefore, only prepared statements created using PREPARE earlier in the session can be executed.

+
+

Precautions

If the PREPARE statement creating the prepared statement declares certain parameters, the parameter set transferred to the EXECUTE statement must be compatible. Otherwise, an error occurs.

+
+

Syntax

1
EXECUTE name [ ( parameter [, ...] ) ];
+
+ +
+
+

Parameter Description

+
+

Examples

Create and run a prepared statement for the INSERT statement.
1
+2
PREPARE insert_reason(integer,character(16),character(100)) AS INSERT INTO tpcds.reason_t1 VALUES($1,$2,$3);
+EXECUTE insert_reason(52, 'AAAAAAAADDAAAAAA', 'reason 52'); 
+
+ +
+
+
+

Helpful Links

PREPARE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0249.html b/docs/dws/dev/dws_06_0249.html new file mode 100644 index 00000000..4fbba8d1 --- /dev/null +++ b/docs/dws/dev/dws_06_0249.html @@ -0,0 +1,38 @@ + + +

EXECUTE DIRECT

+

Function

EXECUTE DIRECT executes an SQL statement on a specified node. Generally, the cluster automatically allocates an SQL statement to proper nodes. EXECUTE DIRECT is mainly used for database maintenance and testing.

+
+

Precautions

+
+

Syntax

1
EXECUTE DIRECT ON ( nodename [, ... ] ) query ;
+
+ +
+
+

Parameter Description

+
+

Examples

Query records in table tpcds.customer_address on the dn_6001_6002 node.

+
1
+2
+3
+4
+5
EXECUTE DIRECT ON(dn_6001_6002) 'select count(*) from tpcds.customer_address';
+ count 
+-------
+ 16922
+(1 row)
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0250.html b/docs/dws/dev/dws_06_0250.html new file mode 100644 index 00000000..04e2d312 --- /dev/null +++ b/docs/dws/dev/dws_06_0250.html @@ -0,0 +1,291 @@ + + +

GRANT

+

Function

GRANT grants permissions to roles and users.

+

GRANT is used in the following scenarios:

+ +
+

Precautions

To isolate permissions, GaussDB(DWS) disables WITH GRANT OPTION and TO PUBLIC.

+
+

Syntax

+
+

Parameter Description

GRANT grants the following permissions:

+ +

GRANT parameters are as follows:

+ +
+

Examples

+
+

Helpful Links

REVOKE, ALTER DEFAULT PRIVILEGES

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0251.html b/docs/dws/dev/dws_06_0251.html new file mode 100644 index 00000000..29333225 --- /dev/null +++ b/docs/dws/dev/dws_06_0251.html @@ -0,0 +1,35 @@ + + +

PREPARE

+

Function

PREPARE creates a prepared statement.

+

A prepared statement is a performance optimizing object on the server. When the PREPARE statement is executed, the specified query is parsed, analyzed, and rewritten. When the EXECUTE is executed, the prepared statement is planned and executed. This avoids repetitive parsing and analysis. After the PREPARE statement is created, it exists throughout the database session. Once it is created (even if in a transaction block), it will not be deleted when a transaction is rolled back. It can only be deleted by explicitly invoking DEALLOCATE or automatically deleted when the session ends.

+
+

Precautions

None

+
+

Syntax

1
PREPARE name [ ( data_type [, ...] ) ] AS statement;
+
+ +
+
+

Parameter Description

+
+

Examples

Create and run a prepared statement for the INSERT statement.
1
+2
PREPARE insert_reason(integer,character(16),character(100)) AS INSERT INTO tpcds.reason_t1 VALUES($1,$2,$3); 
+EXECUTE insert_reason(52, 'AAAAAAAADDAAAAAA', 'reason 52'); 
+
+ +
+
+
+

Helpful Links

DEALLOCATE, EXECUTE

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0252.html b/docs/dws/dev/dws_06_0252.html new file mode 100644 index 00000000..e14d57bb --- /dev/null +++ b/docs/dws/dev/dws_06_0252.html @@ -0,0 +1,30 @@ + + +

REASSIGN OWNED

+

Function

REASSIGN OWNED changes the owner of a database.

+

REASSIGN OWNED requires that the system change owners of all the database objects owned by old_roles to new_role.

+
+

Precautions

+
+

Syntax

1
REASSIGN OWNED BY old_role [, ...] TO new_role;
+
+ +
+
+

Parameter Description

+
+

Examples

Reassign all database objects owned by the joe and jack roles to admin.

+
1
REASSIGN OWNED BY joe, jack TO admin;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0253.html b/docs/dws/dev/dws_06_0253.html new file mode 100644 index 00000000..ad6699a0 --- /dev/null +++ b/docs/dws/dev/dws_06_0253.html @@ -0,0 +1,186 @@ + + +

REVOKE

+

Function

REVOKE revokes rights from one or more roles.

+
+

Precautions

If a non-owner user of an object attempts to REVOKE rights on the object, the command is executed based on the following rules:

+ +
+

Syntax

+
+

Parameter Description

The keyword PUBLIC indicates an implicitly defined group that contains all roles.

+

See Parameter Description of the GRANT command for the meaning of the privileges and related parameters.

+

Permissions of a role include the permissions directly granted to the role, permissions inherited from the parent role, and permissions granted to PUBLIC. Therefore, revoking the SELECT permission on an object from PUBLIC does not necessarily mean that such permission has been revoked from all roles, because the SELECT permission directly granted to roles or inherited from parent roles remains. Similarly, if the SELECT permission is revoked from a user but is not revoked from PUBLIC, the user can still run the SELECT statement.

+

If GRANT OPTION FOR is specified, only the grant option for the right is revoked, not the right itself.

+

If user A holds the UPDATE rights on a table and the WITH GRANT OPTION and has granted them to user B, the rights that user B holds are called dependent rights. If the rights or the grant option held by user A is revoked, the dependent rights still exist. Those dependent rights are also revoked if CASCADE is specified.

+

A user can only revoke rights that were granted directly by that user. If, for example, user A has granted a right with grant option (WITH ADMIN OPTION) to user B, and user B has in turned granted it to user C, then user A cannot revoke the right directly from C. However, user A can revoke the grant option held by user B and use CASCADE. In this manner, the rights held by user C are automatically revoked. For another example, if both user A and user B have granted the same right to C, A can revoke his own grant but not B's grant, so C will still effectively have the right.

+

If the role executing REVOKE holds rights indirectly via more than one role membership path, it is unspecified which containing role will be used to execute the command. In such cases, it is best practice to use SET ROLE to become the specific role you want to do the REVOKE as, and then execute REVOKE. Failure to do so may lead to deleting rights not intended to delete, or not deleting any rights at all.

+
+

Examples

Revoke all permissions of user joe.
1
REVOKE ALL PRIVILEGES FROM joe;
+
+ +
+
+
Revoke the permissions granted in a specified schema.
1
REVOKE USAGE,CREATE ON SCHEMA tpcds FROM tpcds_manager;
+
+ +
+
+

Revoke the CONNECT privilege from user joe.

+
1
REVOKE CONNECT FROM joe;
+
+ +
+

Revoke the membership of role admins from user joe.

+
1
REVOKE admins FROM joe;
+
+ +
+

Revoke all the privileges of user joe for the myView view.

+
1
REVOKE ALL PRIVILEGES ON myView FROM joe;
+
+ +
+

Revoke the public insert permission on the customer_t1 table.

+
1
REVOKE INSERT ON customer_t1 FROM PUBLIC;
+
+ +
+

Revoke user joe's permission for the tpcds schema.

+
1
REVOKE USAGE ON SCHEMA tpcds FROM joe;
+
+ +
+

Revoke the query permissions for r_reason_sk and r_reason_id in the tpcds.reason table from user joe.

+
1
REVOKE select (r_reason_sk, r_reason_id) ON tpcds.reason FROM joe;
+
+ +
+
+

Links

GRANT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0254.html b/docs/dws/dev/dws_06_0254.html new file mode 100644 index 00000000..39836edb --- /dev/null +++ b/docs/dws/dev/dws_06_0254.html @@ -0,0 +1,42 @@ + + +

TCL Syntax

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0255.html b/docs/dws/dev/dws_06_0255.html new file mode 100644 index 00000000..dfcfc0e4 --- /dev/null +++ b/docs/dws/dev/dws_06_0255.html @@ -0,0 +1,17 @@ + + +

TCL Syntax Overview

+

Transaction Control Language (TCL) controls the time and effect of database transactions and monitors the database.

+

Commit

GaussDB(DWS) uses the COMMIT or END statement to commit transactions. For details, see COMMIT | END.

+
+

Setting a Savepoint

GaussDB(DWS) creates a new savepoint in the current transaction. For details, see SAVEPOINT.

+
+

Rollback

GaussDB(DWS) rolls back the current transaction to the last committed state. For details, see ROLLBACK.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0256.html b/docs/dws/dev/dws_06_0256.html new file mode 100644 index 00000000..fd6f9ea1 --- /dev/null +++ b/docs/dws/dev/dws_06_0256.html @@ -0,0 +1,31 @@ + + +

ABORT

+

Function

ABORT rolls back the current transaction and cancels the changes in the transaction.

+

This command is equivalent to ROLLBACK, and is present only for historical reasons. Now ROLLBACK is recommended.

+
+

Precautions

ABORT has no impact outside a transaction, but will provoke a warning.

+
+

Syntax

1
ABORT [ WORK | TRANSACTION ] ;
+
+ +
+
+

Parameter Description

WORK | TRANSACTION

+

Optional keyword has no effect except increasing readability.

+
+

Examples

Abort a transaction. Performed update operations will be undone.

+
1
ABORT; 
+
+ +
+
+

Helpful Links

SET TRANSACTION, COMMIT | END, ROLLBACK

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0257.html b/docs/dws/dev/dws_06_0257.html new file mode 100644 index 00000000..74506a1a --- /dev/null +++ b/docs/dws/dev/dws_06_0257.html @@ -0,0 +1,70 @@ + + +

BEGIN

+

Function

BEGIN may be used to initiate an anonymous block or a single transaction. This section describes the syntax of BEGIN used to initiate an anonymous block. For details about the BEGIN syntax that initiates transactions, see START TRANSACTION.

+

An anonymous block is a structure that can dynamically create and execute stored procedure code instead of permanently storing code as a database object in the database.

+
+

Precautions

None

+
+

Syntax

+
+

Parameter Description

+
+

Examples

+
+

Helpful Links

START TRANSACTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0258.html b/docs/dws/dev/dws_06_0258.html new file mode 100644 index 00000000..c2131e6f --- /dev/null +++ b/docs/dws/dev/dws_06_0258.html @@ -0,0 +1,28 @@ + + +

CHECKPOINT

+

Function

A checkpoint is a point in the transaction log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to a disk.

+

CHECKPOINT forces a transaction log checkpoint. By default, WALs periodically specify checkpoints in a transaction log. You may use gs_guc to specify run-time parameters checkpoint_segments and checkpoint_timeout to adjust the atomized checkpoint intervals.

+
+

Precautions

+
+

Syntax

1
CHECKPOINT;
+
+ +
+
+

Parameter Description

None

+
+

Examples

Set a checkpoint.

+
1
CHECKPOINT;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0259.html b/docs/dws/dev/dws_06_0259.html new file mode 100644 index 00000000..e77418de --- /dev/null +++ b/docs/dws/dev/dws_06_0259.html @@ -0,0 +1,31 @@ + + +

COMMIT | END

+

Function

COMMIT or END commits all operations of a transaction.

+
+

Precautions

Only the transaction creators or system administrators can run the COMMIT command. The creation and commit operations must be in different sessions.

+
+

Syntax

1
{ COMMIT | END } [ WORK | TRANSACTION ] ;
+
+ +
+
+

Parameter Description

+
+

Examples

Commit the transaction to make all changes permanent.

+
1
COMMIT;
+
+ +
+
+

Helpful Links

ROLLBACK

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0260.html b/docs/dws/dev/dws_06_0260.html new file mode 100644 index 00000000..9a41fee5 --- /dev/null +++ b/docs/dws/dev/dws_06_0260.html @@ -0,0 +1,28 @@ + + +

COMMIT PREPARED

+

Function

COMMIT PREPARED commits a prepared two-phase transaction.

+
+

Precautions

+
+

Syntax

1
+2
COMMIT PREPARED transaction_id ;
+COMMIT PREPARED transaction_id WITH CSN;
+
+ +
+
+

Parameter Description

+ +
+

Helpful Links

PREPARE TRANSACTION, ROLLBACK PREPARED

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0262.html b/docs/dws/dev/dws_06_0262.html new file mode 100644 index 00000000..349a5df2 --- /dev/null +++ b/docs/dws/dev/dws_06_0262.html @@ -0,0 +1,26 @@ + + +

PREPARE TRANSACTION

+

Function

PREPARE TRANSACTION prepares the current transaction for two-phase commit.

+

After this command, the transaction is no longer associated with the current session; instead, its state is fully stored on disk, and there is a high probability that it can be committed successfully, even if a database crash occurs before the commit is requested.

+

Once prepared, a transaction can later be committed or rolled back with COMMIT PREPARED or ROLLBACK PREPARED, respectively. Those commands can be issued from any session, not only the one that executed the original transaction.

+

From the point of view of the issuing session, PREPARE TRANSACTION is not unlike a ROLLBACK command: after executing it, there is no active current transaction, and the effects of the prepared transaction are no longer visible. (The effects will become visible again if the transaction is committed.)

+

If the PREPARE TRANSACTION command fails for any reason, it becomes a ROLLBACK and the current transaction is canceled.

+
+

Precautions

+
+

Syntax

PREPARE TRANSACTION transaction_id;
+
+

Parameter Description

transaction_id

+

An arbitrary identifier that later identifies this transaction for COMMIT PREPARED or ROLLBACK PREPARED. The identifier must be different from those for current prepared transactions.

+

Value range: The identifier must be written as a string literal, and must be less than 200 bytes long.

+
+

Helpful Links

COMMIT PREPARED, ROLLBACK PREPARED

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0263.html b/docs/dws/dev/dws_06_0263.html new file mode 100644 index 00000000..e426a0fa --- /dev/null +++ b/docs/dws/dev/dws_06_0263.html @@ -0,0 +1,56 @@ + + +

SAVEPOINT

+

Function

SAVEPOINT establishes a new savepoint within the current transaction.

+

A savepoint is a special mark inside a transaction that rolls back all commands that are executed after the savepoint was established, restoring the transaction state to what it was at the time of the savepoint.

+
+

Precautions

+
+

Syntax

SAVEPOINT savepoint_name;
+
+

Parameter Description

savepoint_name

+

Specifies the name of a new savepoint.

+
+

Examples

+
+

Helpful Links

RELEASE SAVEPOINT, ROLLBACK TO SAVEPOINT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0264.html b/docs/dws/dev/dws_06_0264.html new file mode 100644 index 00000000..8d7faf90 --- /dev/null +++ b/docs/dws/dev/dws_06_0264.html @@ -0,0 +1,45 @@ + + +

SET TRANSACTION

+

Function

SET TRANSACTION sets the characteristics of the current transaction. It has no effect on any subsequent transactions. Available transaction characteristics include the transaction separation level and transaction access mode (read/write or read only).

+
+

Precautions

None

+
+

Syntax

Set the isolation level and access mode of the transaction.
1
+2
+3
{ SET [ LOCAL ] TRANSACTION|SET SESSION CHARACTERISTICS AS TRANSACTION }
+  { ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED | SERIALIZABLE | REPEATABLE READ }
+  | { READ WRITE | READ ONLY } } [, ...]
+
+ +
+
+
+

Parameter Description

+
+

Examples

Set the isolation level of the current transaction to READ COMMITTED and the access mode to READ ONLY.

+
1
+2
+3
START TRANSACTION;
+SET LOCAL TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY;
+COMMIT;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0265.html b/docs/dws/dev/dws_06_0265.html new file mode 100644 index 00000000..1c464275 --- /dev/null +++ b/docs/dws/dev/dws_06_0265.html @@ -0,0 +1,78 @@ + + +

START TRANSACTION

+

Function

START TRANSACTION starts a transaction. If the isolation level, read/write mode, or deferrable mode is specified, a new transaction will have those characteristics. You can also specify them using SET TRANSACTION.

+
+

Precautions

None

+
+

Syntax

Format 1: START TRANSACTION

+
1
+2
+3
+4
+5
+6
+7
START TRANSACTION
+  [ 
+    { 
+       ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED | SERIALIZABLE | REPEATABLE READ }
+       | { READ WRITE | READ ONLY }
+     } [, ...] 
+  ];
+
+ +
+

Format 2: BEGIN

+
1
+2
+3
+4
+5
+6
+7
BEGIN [ WORK | TRANSACTION ]
+  [ 
+    { 
+       ISOLATION LEVEL { READ COMMITTED | READ UNCOMMITTED | SERIALIZABLE | REPEATABLE READ }
+       | { READ WRITE | READ ONLY }
+      } [, ...] 
+  ];
+
+ +
+
+

Parameter Description

+
+

Examples

+
+

Helpful Links

COMMIT | END, ROLLBACK, SET TRANSACTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0266.html b/docs/dws/dev/dws_06_0266.html new file mode 100644 index 00000000..d856d621 --- /dev/null +++ b/docs/dws/dev/dws_06_0266.html @@ -0,0 +1,31 @@ + + +

ROLLBACK

+

Function

Rolls back the current transaction and backs out all updates in the transaction.

+

ROLLBACK backs out of all changes that a transaction makes to a database if the transaction fails to be executed due to a fault.

+
+

Precautions

If a ROLLBACK statement is executed out of a transaction, no error occurs, but a warning information is displayed.

+
+

Syntax

1
ROLLBACK [ WORK | TRANSACTION ];
+
+ +
+
+

Parameter Description

WORK | TRANSACTION

+

Optional keyword that more clearly illustrates the syntax.

+
+

Examples

Undo all changes in the current transaction.

+
1
ROLLBACK;
+
+ +
+
+

Helpful Links

COMMIT | END

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0267.html b/docs/dws/dev/dws_06_0267.html new file mode 100644 index 00000000..e812b82b --- /dev/null +++ b/docs/dws/dev/dws_06_0267.html @@ -0,0 +1,42 @@ + + +

RELEASE SAVEPOINT

+

Function

RELEASE SAVEPOINT destroys a savepoint previously defined in the current transaction.

+

Destroying a savepoint makes it unavailable as a rollback point, but it has no other user visible behavior. It does not undo the effects of commands executed after the savepoint was established. To do that, use ROLLBACK TO SAVEPOINT. Destroying a savepoint when it is no longer needed allows the system to reclaim some resources earlier than transaction end.

+

RELEASE SAVEPOINT also destroys all savepoints that were established after the named savepoint was established.

+
+

Precautions

+
+

Syntax

1
RELEASE [ SAVEPOINT ] savepoint_name;
+
+ +
+
+

Parameter Description

savepoint_name

+

Specifies the name of the savepoint you want to destroy.

+
+

Examples

Create and then destroy a savepoint.

+
1
+2
+3
+4
+5
+6
BEGIN;
+    INSERT INTO tpcds.table1 VALUES (3);
+    SAVEPOINT my_savepoint;
+    INSERT INTO tpcds.table1 VALUES (4);
+    RELEASE SAVEPOINT my_savepoint;
+COMMIT;
+
+ +
+
+

Helpful Links

SAVEPOINT, ROLLBACK TO SAVEPOINT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0268.html b/docs/dws/dev/dws_06_0268.html new file mode 100644 index 00000000..8fafbb5b --- /dev/null +++ b/docs/dws/dev/dws_06_0268.html @@ -0,0 +1,24 @@ + + +

ROLLBACK PREPARED

+

Function

ROLLBACK PREPARED cancels a transaction ready for two-phase committing.

+
+

Precautions

+
+

Syntax

1
ROLLBACK PREPARED transaction_id ;
+
+ +
+
+

Parameter Description

transaction_id

+

Specifies the identifier of the transaction to be submitted. The identifier must be different from those for current prepared transactions.

+
+

Helpful Links

COMMIT PREPARED, PREPARE TRANSACTION

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0269.html b/docs/dws/dev/dws_06_0269.html new file mode 100644 index 00000000..a7936adf --- /dev/null +++ b/docs/dws/dev/dws_06_0269.html @@ -0,0 +1,60 @@ + + +

ROLLBACK TO SAVEPOINT

+

Function

ROLLBACK TO SAVEPOINT rolls back to a savepoint. It implicitly destroys all savepoints that were established after the named savepoint.

+

Rolls back all commands that were executed after the savepoint was established. The savepoint remains valid and can be rolled back to again later, if needed.

+
+

Precautions

+
+

Syntax

1
ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name;
+
+ +
+
+

Parameter Description

savepoint_name

+

Rolls back to a savepoint.

+
+

Examples

Undo the effects of the commands executed after my_savepoint was established.

+
1
ROLLBACK TO SAVEPOINT my_savepoint;
+
+ +
+

Cursor positions are not affected by savepoint rollback.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
BEGIN;
+DECLARE foo CURSOR FOR SELECT 1 UNION SELECT 2;
+SAVEPOINT foo;
+FETCH 1 FROM foo;
+ ?column? 
+----------
+        1
+ROLLBACK TO SAVEPOINT foo;
+FETCH 1 FROM foo;
+ ?column? 
+----------
+        2
+COMMIT;
+
+ +
+
+

Helpful Links

SAVEPOINT, RELEASE SAVEPOINT

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0270.html b/docs/dws/dev/dws_06_0270.html new file mode 100644 index 00000000..e8ea7deb --- /dev/null +++ b/docs/dws/dev/dws_06_0270.html @@ -0,0 +1,22 @@ + + +

GIN Indexes

+

+
+
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0271.html b/docs/dws/dev/dws_06_0271.html new file mode 100644 index 00000000..8aa18a3e --- /dev/null +++ b/docs/dws/dev/dws_06_0271.html @@ -0,0 +1,14 @@ + + +

Introduction

+

Generalized Inverted Index (GIN) is designed for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to search for element values in the composite items. For example, the items could be documents, and the queries could be searches for documents containing specific words.

+

We use the word "item" to refer to a composite value that is to be indexed, and the word "key" to refer to an element value. GIN stores and searches for keys, not item values.

+

A GIN index stores a set of (key, posting list) key-value pairs, where a posting list is a set of row IDs in which the key occurs. The same row ID can appear in multiple posting lists, since an item can contain more than one key. Each key value is stored only once, so a GIN index is very compact for cases where the same key appears many times.

+

GIN is generalized in the sense that the GIN access method code does not need to know the specific operations that it accelerates. Instead, it uses custom strategies defined for particular data types. The strategy defines how keys are extracted from indexed items and query conditions, and how to determine whether a row that contains some of the key values in a query actually satisfies the query.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0272.html b/docs/dws/dev/dws_06_0272.html new file mode 100644 index 00000000..436b738d --- /dev/null +++ b/docs/dws/dev/dws_06_0272.html @@ -0,0 +1,31 @@ + + +

Scalability

+

The GIN interface has a high level of abstraction, requiring the access method implementer only to implement the semantics of the data type being accessed. The GIN layer itself takes care of concurrency, logging and searching the tree structure.

+

All it takes to get a GIN access method working is to implement multiple user-defined methods, which define the behavior of keys in the tree and the relationships between keys, indexed items, and indexable queries. In short, GIN combines extensibility with generality, code reuse, and a clean interface.

+

There are four methods that an operator class for GIN must provide:

+ + + + +

Optionally, an operator class for GIN can supply the following method:

+ +

To support "partial match" queries, an operator class must provide the comparePartial method, and its extractQuery method must set the pmatch parameter when a partial-match query is encountered. For details, see Partial Match Algorithm.

+

The actual data types of the various Datum values mentioned in this section vary depending on the operator class. The item values passed to extractValue are always of the operator class's input type, and all key values must be of the class's STORAGE type. The type of the query argument passed to extractQuery, consistent and triConsistent is whatever is specified as the right-hand input type of the class member operator identified by the strategy number. This need not be the same as the item type, so long as key values of the correct type can be extracted from it.

+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0273.html b/docs/dws/dev/dws_06_0273.html new file mode 100644 index 00000000..ac292b5f --- /dev/null +++ b/docs/dws/dev/dws_06_0273.html @@ -0,0 +1,18 @@ + + +

Implementation

+

Internally, a GIN index contains a B-tree index constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and where each tuple in a leaf page contains either a pointer to a B-tree of heap pointers (a "posting tree"), or a simple list of heap pointers (a "posting list") when the list is small enough to fit into a single index tuple along with the key value.

+

Multi-column GIN indexes are implemented by building a single B-tree over composite values (column number, key value). The key values for different columns can be of different types.

+

GIN Fast Update Technique

Updating a GIN index tends to be slow because of the intrinsic nature of inverted indexes: inserting or updating one heap row can cause many inserts into the index. After the table is vacuumed or if the pending list becomes larger than work_mem, the entries are moved to the main GIN data structure using the same bulk insert techniques used during initial index creation. This greatly increases the GIN index update speed, even counting the additional vacuum overhead. Moreover the overhead work can be done by a background process instead of in foreground query processing.

+

The main disadvantage of this approach is that searches must scan the list of pending entries in addition to searching the regular index, and so a large list of pending entries will slow searches significantly. Another disadvantage is that, while most updates are fast, an update that causes the pending list to become "too large" will incur an immediate cleanup cycle and be much slower than other updates. Proper use of autovacuum can minimize both of these problems.

+

If consistent response time (of entity cleanup and of update) is more important than update speed, use of pending entries can be disabled by turning off the fastupdate storage parameter for a GIN index. For details, see the CREATE INDEX.

+
+

Partial Match Algorithm

GIN can support "partial match" queries, in which the query does not determine an exact match for one or more keys, but the possible matches fall within a narrow range of key values (within the key sorting order determined by the compare support method). The extractQuery method, instead of returning a key value to be matched exactly, returns a key value that is the lower bound of the range to be searched, and sets the pmatch flag true. The key range is then scanned using the comparePartial method. comparePartial must return zero for a matching index key, less than zero for a non-match that is still within the range to be searched, or greater than zero if the index key is past the range that could match.

+
+
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0274.html b/docs/dws/dev/dws_06_0274.html new file mode 100644 index 00000000..aab31388 --- /dev/null +++ b/docs/dws/dev/dws_06_0274.html @@ -0,0 +1,17 @@ + + +

GIN Tips and Tricks

+

Create vs. Insert

+

Insertion into a GIN index can be slow due to the likelihood of many keys being inserted for each item. So, for bulk insertions into a table, it is advisable to drop the GIN index and recreate it after finishing the bulk insertions. GUC parameters related to GIN index creation and query performance as follows:

+ +
+
+ +
+ diff --git a/docs/dws/dev/dws_06_0275.html b/docs/dws/dev/dws_06_0275.html new file mode 100644 index 00000000..9a744e88 --- /dev/null +++ b/docs/dws/dev/dws_06_0275.html @@ -0,0 +1,21 @@ + + + +

INSERT and UPSERT

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0276.html b/docs/dws/dev/dws_06_0276.html new file mode 100644 index 00000000..83a3c873 --- /dev/null +++ b/docs/dws/dev/dws_06_0276.html @@ -0,0 +1,23 @@ + + + +

DQL Syntax

+ +

+
+ +
+ + + +
+ diff --git a/docs/dws/dev/dws_06_0277.html b/docs/dws/dev/dws_06_0277.html new file mode 100644 index 00000000..23b98f9e --- /dev/null +++ b/docs/dws/dev/dws_06_0277.html @@ -0,0 +1,15 @@ + + +

DQL Syntax Overview

+

Data Query Language (DQL) can obtain data from tables or views.

+

Query

GaussDB(DWS) provides statements for obtaining data from tables or views. For details, see SELECT.

+
+

Defining a New Table Based on Query Results

GaussDB(DWS) provides a statement for creating a table based on query results and inserting the queried data into the table. For details, see SELECT INTO.

+
+
+
+ +
+ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655278.png b/docs/dws/dev/figure/en-us_image_0000001098655278.png new file mode 100644 index 00000000..f7de9ca1 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655278.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655292.png b/docs/dws/dev/figure/en-us_image_0000001098655292.png new file mode 100644 index 00000000..e72b1660 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655292.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655300.png b/docs/dws/dev/figure/en-us_image_0000001098655300.png new file mode 100644 index 00000000..6b9d3c19 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655300.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655330.png b/docs/dws/dev/figure/en-us_image_0000001098655330.png new file mode 100644 index 00000000..86309d5b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655330.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655346.png b/docs/dws/dev/figure/en-us_image_0000001098655346.png new file mode 100644 index 00000000..a1093781 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655346.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655364.png b/docs/dws/dev/figure/en-us_image_0000001098655364.png new file mode 100644 index 00000000..8282aee1 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655364.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655366.png b/docs/dws/dev/figure/en-us_image_0000001098655366.png new file mode 100644 index 00000000..1e5d4fab Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655366.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655372.png b/docs/dws/dev/figure/en-us_image_0000001098655372.png new file mode 100644 index 00000000..bb9b05c5 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655372.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655374.png b/docs/dws/dev/figure/en-us_image_0000001098655374.png new file mode 100644 index 00000000..6e8a0d6a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655374.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655390.png b/docs/dws/dev/figure/en-us_image_0000001098655390.png new file mode 100644 index 00000000..976f9dac Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655390.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655392.png b/docs/dws/dev/figure/en-us_image_0000001098655392.png new file mode 100644 index 00000000..c39ef851 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655392.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655398.jpg b/docs/dws/dev/figure/en-us_image_0000001098655398.jpg new file mode 100644 index 00000000..960c3e33 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655398.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655400.jpg b/docs/dws/dev/figure/en-us_image_0000001098655400.jpg new file mode 100644 index 00000000..e4927681 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655400.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655422.jpg b/docs/dws/dev/figure/en-us_image_0000001098655422.jpg new file mode 100644 index 00000000..e0c994c4 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655422.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098655436.png b/docs/dws/dev/figure/en-us_image_0000001098655436.png new file mode 100644 index 00000000..95fd0c9f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098655436.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098671216.png b/docs/dws/dev/figure/en-us_image_0000001098671216.png new file mode 100644 index 00000000..b42a2f1f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098671216.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098671232.png b/docs/dws/dev/figure/en-us_image_0000001098671232.png new file mode 100644 index 00000000..af96d21e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098671232.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098671234.png b/docs/dws/dev/figure/en-us_image_0000001098671234.png new file mode 100644 index 00000000..aae9b91f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098671234.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098671236.png b/docs/dws/dev/figure/en-us_image_0000001098671236.png new file mode 100644 index 00000000..6e36e524 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098671236.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098671238.png b/docs/dws/dev/figure/en-us_image_0000001098671238.png new file mode 100644 index 00000000..a9c566b9 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098671238.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815108.png b/docs/dws/dev/figure/en-us_image_0000001098815108.png new file mode 100644 index 00000000..1b90ff98 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815108.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815118.png b/docs/dws/dev/figure/en-us_image_0000001098815118.png new file mode 100644 index 00000000..3d43371e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815118.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815122.png b/docs/dws/dev/figure/en-us_image_0000001098815122.png new file mode 100644 index 00000000..f0663af2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815122.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815148.png b/docs/dws/dev/figure/en-us_image_0000001098815148.png new file mode 100644 index 00000000..f5722834 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815148.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815162.png b/docs/dws/dev/figure/en-us_image_0000001098815162.png new file mode 100644 index 00000000..c7b22fe6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815162.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815180.png b/docs/dws/dev/figure/en-us_image_0000001098815180.png new file mode 100644 index 00000000..2e34d689 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815180.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815188.png b/docs/dws/dev/figure/en-us_image_0000001098815188.png new file mode 100644 index 00000000..e0b7ff19 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815188.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815194.png b/docs/dws/dev/figure/en-us_image_0000001098815194.png new file mode 100644 index 00000000..b180446c Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815194.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815204.png b/docs/dws/dev/figure/en-us_image_0000001098815204.png new file mode 100644 index 00000000..3381656f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815204.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815208.png b/docs/dws/dev/figure/en-us_image_0000001098815208.png new file mode 100644 index 00000000..c56db1c2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815208.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815218.png b/docs/dws/dev/figure/en-us_image_0000001098815218.png new file mode 100644 index 00000000..af044b0e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815218.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815222.png b/docs/dws/dev/figure/en-us_image_0000001098815222.png new file mode 100644 index 00000000..ad71ba68 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815222.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815232.png b/docs/dws/dev/figure/en-us_image_0000001098815232.png new file mode 100644 index 00000000..cc331395 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815232.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815236.png b/docs/dws/dev/figure/en-us_image_0000001098815236.png new file mode 100644 index 00000000..a9549c18 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815236.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098815256.png b/docs/dws/dev/figure/en-us_image_0000001098815256.png new file mode 100644 index 00000000..3210b21a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098815256.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098831044.png b/docs/dws/dev/figure/en-us_image_0000001098831044.png new file mode 100644 index 00000000..7b0e7436 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098831044.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098831058.png b/docs/dws/dev/figure/en-us_image_0000001098831058.png new file mode 100644 index 00000000..2ba0afb4 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098831058.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098831060.png b/docs/dws/dev/figure/en-us_image_0000001098831060.png new file mode 100644 index 00000000..c85623ae Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098831060.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098831064.png b/docs/dws/dev/figure/en-us_image_0000001098831064.png new file mode 100644 index 00000000..e98f6430 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098831064.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975108.png b/docs/dws/dev/figure/en-us_image_0000001098975108.png new file mode 100644 index 00000000..4a702472 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975108.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975112.png b/docs/dws/dev/figure/en-us_image_0000001098975112.png new file mode 100644 index 00000000..2c28d3c5 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975112.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975116.png b/docs/dws/dev/figure/en-us_image_0000001098975116.png new file mode 100644 index 00000000..5c8b53c1 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975116.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975124.png b/docs/dws/dev/figure/en-us_image_0000001098975124.png new file mode 100644 index 00000000..22e43576 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975124.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975130.png b/docs/dws/dev/figure/en-us_image_0000001098975130.png new file mode 100644 index 00000000..ea133acf Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975130.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975134.png b/docs/dws/dev/figure/en-us_image_0000001098975134.png new file mode 100644 index 00000000..d4d894a3 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975134.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975146.png b/docs/dws/dev/figure/en-us_image_0000001098975146.png new file mode 100644 index 00000000..2becb076 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975146.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975154.png b/docs/dws/dev/figure/en-us_image_0000001098975154.png new file mode 100644 index 00000000..7cd19389 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975154.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975158.png b/docs/dws/dev/figure/en-us_image_0000001098975158.png new file mode 100644 index 00000000..7cd19389 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975158.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975164.jpg b/docs/dws/dev/figure/en-us_image_0000001098975164.jpg new file mode 100644 index 00000000..8f9cf0e0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975164.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975174.jpg b/docs/dws/dev/figure/en-us_image_0000001098975174.jpg new file mode 100644 index 00000000..41ee83fd Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975174.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975180.jpg b/docs/dws/dev/figure/en-us_image_0000001098975180.jpg new file mode 100644 index 00000000..5150409e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975180.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975190.png b/docs/dws/dev/figure/en-us_image_0000001098975190.png new file mode 100644 index 00000000..cb1725c5 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975190.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975206.png b/docs/dws/dev/figure/en-us_image_0000001098975206.png new file mode 100644 index 00000000..7bd32e4b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975206.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975208.png b/docs/dws/dev/figure/en-us_image_0000001098975208.png new file mode 100644 index 00000000..606e68c7 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975208.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975214.png b/docs/dws/dev/figure/en-us_image_0000001098975214.png new file mode 100644 index 00000000..69f157ff Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975214.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975228.png b/docs/dws/dev/figure/en-us_image_0000001098975228.png new file mode 100644 index 00000000..8ce079a0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975228.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098975234.png b/docs/dws/dev/figure/en-us_image_0000001098975234.png new file mode 100644 index 00000000..a465ede2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098975234.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098991040.png b/docs/dws/dev/figure/en-us_image_0000001098991040.png new file mode 100644 index 00000000..666e005e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098991040.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001098991054.png b/docs/dws/dev/figure/en-us_image_0000001098991054.png new file mode 100644 index 00000000..170de5f1 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001098991054.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135088.jpg b/docs/dws/dev/figure/en-us_image_0000001099135088.jpg new file mode 100644 index 00000000..c3c77252 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135088.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135094.png b/docs/dws/dev/figure/en-us_image_0000001099135094.png new file mode 100644 index 00000000..5d81d64f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135094.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135098.png b/docs/dws/dev/figure/en-us_image_0000001099135098.png new file mode 100644 index 00000000..7558f6aa Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135098.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135104.png b/docs/dws/dev/figure/en-us_image_0000001099135104.png new file mode 100644 index 00000000..f20d827b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135104.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135110.png b/docs/dws/dev/figure/en-us_image_0000001099135110.png new file mode 100644 index 00000000..ac9b0e13 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135110.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135134.jpg b/docs/dws/dev/figure/en-us_image_0000001099135134.jpg new file mode 100644 index 00000000..67022632 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135134.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135152.png b/docs/dws/dev/figure/en-us_image_0000001099135152.png new file mode 100644 index 00000000..18fc66d6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135152.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135168.jpg b/docs/dws/dev/figure/en-us_image_0000001099135168.jpg new file mode 100644 index 00000000..f8b55369 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135168.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135172.png b/docs/dws/dev/figure/en-us_image_0000001099135172.png new file mode 100644 index 00000000..91ffaae6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135172.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135180.png b/docs/dws/dev/figure/en-us_image_0000001099135180.png new file mode 100644 index 00000000..d6cd8239 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135180.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135194.png b/docs/dws/dev/figure/en-us_image_0000001099135194.png new file mode 100644 index 00000000..2d8585b6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135194.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135196.jpg b/docs/dws/dev/figure/en-us_image_0000001099135196.jpg new file mode 100644 index 00000000..3f2db9b0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135196.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135202.png b/docs/dws/dev/figure/en-us_image_0000001099135202.png new file mode 100644 index 00000000..058a79dd Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135202.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135208.png b/docs/dws/dev/figure/en-us_image_0000001099135208.png new file mode 100644 index 00000000..54cd3eff Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135208.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099135218.png b/docs/dws/dev/figure/en-us_image_0000001099135218.png new file mode 100644 index 00000000..339ec5a4 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099135218.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099151022.png b/docs/dws/dev/figure/en-us_image_0000001099151022.png new file mode 100644 index 00000000..c8650cdc Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099151022.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099151052.png b/docs/dws/dev/figure/en-us_image_0000001099151052.png new file mode 100644 index 00000000..d5fc7383 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099151052.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099282726.png b/docs/dws/dev/figure/en-us_image_0000001099282726.png new file mode 100644 index 00000000..50275a8c Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099282726.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099480652.jpg b/docs/dws/dev/figure/en-us_image_0000001099480652.jpg new file mode 100644 index 00000000..c0a206bb Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099480652.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001099602632.png b/docs/dws/dev/figure/en-us_image_0000001099602632.png new file mode 100644 index 00000000..53cdb2b6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001099602632.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001100340752.png b/docs/dws/dev/figure/en-us_image_0000001100340752.png new file mode 100644 index 00000000..ef064bbe Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001100340752.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495111.png b/docs/dws/dev/figure/en-us_image_0000001145495111.png new file mode 100644 index 00000000..5cd7644e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495111.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495125.png b/docs/dws/dev/figure/en-us_image_0000001145495125.png new file mode 100644 index 00000000..5af6c44f Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495125.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495133.png b/docs/dws/dev/figure/en-us_image_0000001145495133.png new file mode 100644 index 00000000..eb891ee2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495133.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495159.jpg b/docs/dws/dev/figure/en-us_image_0000001145495159.jpg new file mode 100644 index 00000000..9bbeac00 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495159.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495161.png b/docs/dws/dev/figure/en-us_image_0000001145495161.png new file mode 100644 index 00000000..03a1b7fc Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495161.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495175.png b/docs/dws/dev/figure/en-us_image_0000001145495175.png new file mode 100644 index 00000000..f4adee6a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495175.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495193.png b/docs/dws/dev/figure/en-us_image_0000001145495193.png new file mode 100644 index 00000000..ff82ca8d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495193.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495195.png b/docs/dws/dev/figure/en-us_image_0000001145495195.png new file mode 100644 index 00000000..73e92391 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495195.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495203.png b/docs/dws/dev/figure/en-us_image_0000001145495203.png new file mode 100644 index 00000000..baabc915 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495203.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495209.png b/docs/dws/dev/figure/en-us_image_0000001145495209.png new file mode 100644 index 00000000..fb06b2b0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495209.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495219.png b/docs/dws/dev/figure/en-us_image_0000001145495219.png new file mode 100644 index 00000000..0c08e7d2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495219.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495221.png b/docs/dws/dev/figure/en-us_image_0000001145495221.png new file mode 100644 index 00000000..328410f2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495221.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495227.jpg b/docs/dws/dev/figure/en-us_image_0000001145495227.jpg new file mode 100644 index 00000000..7a73ee10 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495227.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495247.png b/docs/dws/dev/figure/en-us_image_0000001145495247.png new file mode 100644 index 00000000..a0f89c77 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495247.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495253.png b/docs/dws/dev/figure/en-us_image_0000001145495253.png new file mode 100644 index 00000000..12a9342a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495253.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145495261.png b/docs/dws/dev/figure/en-us_image_0000001145495261.png new file mode 100644 index 00000000..ffb95b0a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145495261.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145511041.png b/docs/dws/dev/figure/en-us_image_0000001145511041.png new file mode 100644 index 00000000..681174f6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145511041.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145511043.png b/docs/dws/dev/figure/en-us_image_0000001145511043.png new file mode 100644 index 00000000..423bb1e6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145511043.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145511057.png b/docs/dws/dev/figure/en-us_image_0000001145511057.png new file mode 100644 index 00000000..70bb339a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145511057.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695045.png b/docs/dws/dev/figure/en-us_image_0000001145695045.png new file mode 100644 index 00000000..5722c56a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695045.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695049.png b/docs/dws/dev/figure/en-us_image_0000001145695049.png new file mode 100644 index 00000000..c278db49 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695049.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695053.png b/docs/dws/dev/figure/en-us_image_0000001145695053.png new file mode 100644 index 00000000..fbf7b2f8 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695053.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695065.png b/docs/dws/dev/figure/en-us_image_0000001145695065.png new file mode 100644 index 00000000..e03d843e Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695065.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695071.png b/docs/dws/dev/figure/en-us_image_0000001145695071.png new file mode 100644 index 00000000..7c8ff4fe Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695071.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695083.png b/docs/dws/dev/figure/en-us_image_0000001145695083.png new file mode 100644 index 00000000..428ac046 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695083.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695089.png b/docs/dws/dev/figure/en-us_image_0000001145695089.png new file mode 100644 index 00000000..8ce079a0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695089.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695097.png b/docs/dws/dev/figure/en-us_image_0000001145695097.png new file mode 100644 index 00000000..db2ca81d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695097.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695103.jpg b/docs/dws/dev/figure/en-us_image_0000001145695103.jpg new file mode 100644 index 00000000..e22cd6e7 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695103.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695131.png b/docs/dws/dev/figure/en-us_image_0000001145695131.png new file mode 100644 index 00000000..fa0f3c1b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695131.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695139.png b/docs/dws/dev/figure/en-us_image_0000001145695139.png new file mode 100644 index 00000000..e8d47a24 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695139.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695143.png b/docs/dws/dev/figure/en-us_image_0000001145695143.png new file mode 100644 index 00000000..e8cae3b9 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695143.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695149.png b/docs/dws/dev/figure/en-us_image_0000001145695149.png new file mode 100644 index 00000000..fad0651b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695149.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695169.jpg b/docs/dws/dev/figure/en-us_image_0000001145695169.jpg new file mode 100644 index 00000000..bd44606b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695169.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695171.jpg b/docs/dws/dev/figure/en-us_image_0000001145695171.jpg new file mode 100644 index 00000000..a2f6cbe0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695171.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695177.png b/docs/dws/dev/figure/en-us_image_0000001145695177.png new file mode 100644 index 00000000..a9cc81be Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695177.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695187.png b/docs/dws/dev/figure/en-us_image_0000001145695187.png new file mode 100644 index 00000000..f66ce422 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695187.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145695193.png b/docs/dws/dev/figure/en-us_image_0000001145695193.png new file mode 100644 index 00000000..1242c468 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145695193.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145710975.png b/docs/dws/dev/figure/en-us_image_0000001145710975.png new file mode 100644 index 00000000..bb6cd8c0 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145710975.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145710989.png b/docs/dws/dev/figure/en-us_image_0000001145710989.png new file mode 100644 index 00000000..fe10410a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145710989.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145710991.png b/docs/dws/dev/figure/en-us_image_0000001145710991.png new file mode 100644 index 00000000..c7b50274 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145710991.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145710993.png b/docs/dws/dev/figure/en-us_image_0000001145710993.png new file mode 100644 index 00000000..4a98fa8d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145710993.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145710995.png b/docs/dws/dev/figure/en-us_image_0000001145710995.png new file mode 100644 index 00000000..e94b8279 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145710995.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814971.png b/docs/dws/dev/figure/en-us_image_0000001145814971.png new file mode 100644 index 00000000..ba98e58b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814971.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814977.png b/docs/dws/dev/figure/en-us_image_0000001145814977.png new file mode 100644 index 00000000..a8f844c3 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814977.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814981.png b/docs/dws/dev/figure/en-us_image_0000001145814981.png new file mode 100644 index 00000000..ea40852c Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814981.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814985.png b/docs/dws/dev/figure/en-us_image_0000001145814985.png new file mode 100644 index 00000000..61ad33f4 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814985.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814991.png b/docs/dws/dev/figure/en-us_image_0000001145814991.png new file mode 100644 index 00000000..13c05e08 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814991.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145814997.png b/docs/dws/dev/figure/en-us_image_0000001145814997.png new file mode 100644 index 00000000..acfed7a3 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145814997.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815011.png b/docs/dws/dev/figure/en-us_image_0000001145815011.png new file mode 100644 index 00000000..41a7ab13 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815011.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815017.png b/docs/dws/dev/figure/en-us_image_0000001145815017.png new file mode 100644 index 00000000..a922995d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815017.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815021.png b/docs/dws/dev/figure/en-us_image_0000001145815021.png new file mode 100644 index 00000000..d5a16a93 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815021.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815025.png b/docs/dws/dev/figure/en-us_image_0000001145815025.png new file mode 100644 index 00000000..4067af0a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815025.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815033.png b/docs/dws/dev/figure/en-us_image_0000001145815033.png new file mode 100644 index 00000000..b7eeb9a5 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815033.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815039.png b/docs/dws/dev/figure/en-us_image_0000001145815039.png new file mode 100644 index 00000000..4d69926a Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815039.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815045.png b/docs/dws/dev/figure/en-us_image_0000001145815045.png new file mode 100644 index 00000000..f7d670d9 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815045.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815055.png b/docs/dws/dev/figure/en-us_image_0000001145815055.png new file mode 100644 index 00000000..28c82f99 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815055.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815059.png b/docs/dws/dev/figure/en-us_image_0000001145815059.png new file mode 100644 index 00000000..f9083877 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815059.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815069.png b/docs/dws/dev/figure/en-us_image_0000001145815069.png new file mode 100644 index 00000000..0ad17d3b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815069.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815079.png b/docs/dws/dev/figure/en-us_image_0000001145815079.png new file mode 100644 index 00000000..e2410d43 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815079.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815093.png b/docs/dws/dev/figure/en-us_image_0000001145815093.png new file mode 100644 index 00000000..ba98e58b Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815093.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815101.png b/docs/dws/dev/figure/en-us_image_0000001145815101.png new file mode 100644 index 00000000..6b28ab5c Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815101.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145815117.png b/docs/dws/dev/figure/en-us_image_0000001145815117.png new file mode 100644 index 00000000..03521028 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145815117.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145830887.png b/docs/dws/dev/figure/en-us_image_0000001145830887.png new file mode 100644 index 00000000..745a3fde Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145830887.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145830893.jpg b/docs/dws/dev/figure/en-us_image_0000001145830893.jpg new file mode 100644 index 00000000..a1c19617 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145830893.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145830907.png b/docs/dws/dev/figure/en-us_image_0000001145830907.png new file mode 100644 index 00000000..169cfb55 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145830907.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145830909.png b/docs/dws/dev/figure/en-us_image_0000001145830909.png new file mode 100644 index 00000000..ca510bf7 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145830909.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895087.png b/docs/dws/dev/figure/en-us_image_0000001145895087.png new file mode 100644 index 00000000..5f836458 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895087.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895093.png b/docs/dws/dev/figure/en-us_image_0000001145895093.png new file mode 100644 index 00000000..5da1e715 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895093.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895095.png b/docs/dws/dev/figure/en-us_image_0000001145895095.png new file mode 100644 index 00000000..85411be4 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895095.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895101.png b/docs/dws/dev/figure/en-us_image_0000001145895101.png new file mode 100644 index 00000000..05071567 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895101.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895125.png b/docs/dws/dev/figure/en-us_image_0000001145895125.png new file mode 100644 index 00000000..38415868 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895125.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895131.png b/docs/dws/dev/figure/en-us_image_0000001145895131.png new file mode 100644 index 00000000..8ba92c4d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895131.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895139.png b/docs/dws/dev/figure/en-us_image_0000001145895139.png new file mode 100644 index 00000000..a31f6408 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895139.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895141.png b/docs/dws/dev/figure/en-us_image_0000001145895141.png new file mode 100644 index 00000000..66506a24 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895141.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895151.png b/docs/dws/dev/figure/en-us_image_0000001145895151.png new file mode 100644 index 00000000..737757f5 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895151.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895161.png b/docs/dws/dev/figure/en-us_image_0000001145895161.png new file mode 100644 index 00000000..52eed836 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895161.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895173.png b/docs/dws/dev/figure/en-us_image_0000001145895173.png new file mode 100644 index 00000000..26e8c434 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895173.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895175.png b/docs/dws/dev/figure/en-us_image_0000001145895175.png new file mode 100644 index 00000000..c8b8167d Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895175.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895187.png b/docs/dws/dev/figure/en-us_image_0000001145895187.png new file mode 100644 index 00000000..1eaa46d2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895187.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895199.png b/docs/dws/dev/figure/en-us_image_0000001145895199.png new file mode 100644 index 00000000..d80b9878 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895199.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145895219.png b/docs/dws/dev/figure/en-us_image_0000001145895219.png new file mode 100644 index 00000000..31630a87 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145895219.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145911019.jpg b/docs/dws/dev/figure/en-us_image_0000001145911019.jpg new file mode 100644 index 00000000..9c378ec2 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145911019.jpg differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145911035.png b/docs/dws/dev/figure/en-us_image_0000001145911035.png new file mode 100644 index 00000000..164bf964 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145911035.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145911037.png b/docs/dws/dev/figure/en-us_image_0000001145911037.png new file mode 100644 index 00000000..f66bad86 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145911037.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001145911043.png b/docs/dws/dev/figure/en-us_image_0000001145911043.png new file mode 100644 index 00000000..14447947 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001145911043.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001146480609.png b/docs/dws/dev/figure/en-us_image_0000001146480609.png new file mode 100644 index 00000000..99194057 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001146480609.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001146820617.png b/docs/dws/dev/figure/en-us_image_0000001146820617.png new file mode 100644 index 00000000..e1ec27c6 Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001146820617.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001147735331.png b/docs/dws/dev/figure/en-us_image_0000001147735331.png new file mode 100644 index 00000000..e879d5bb Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001147735331.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001217970670.png b/docs/dws/dev/figure/en-us_image_0000001217970670.png new file mode 100644 index 00000000..bd9c9aff Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001217970670.png differ diff --git a/docs/dws/dev/figure/en-us_image_0000001248721592.png b/docs/dws/dev/figure/en-us_image_0000001248721592.png new file mode 100644 index 00000000..7d0e227c Binary files /dev/null and b/docs/dws/dev/figure/en-us_image_0000001248721592.png differ diff --git a/docs/dws/dev/public_sys-resources/ExpandCollapse.js b/docs/dws/dev/public_sys-resources/ExpandCollapse.js new file mode 100644 index 00000000..116ddaab --- /dev/null +++ b/docs/dws/dev/public_sys-resources/ExpandCollapse.js @@ -0,0 +1 @@ +var expandClassName="dropdownexpand";var collapseClassName="dropdowncollapse";var collapseTableClassName="dropdowncollapsetable";function ExpandorCollapseNode(a){a=a.parentNode;if(a.className==expandClassName){a.className=collapseClassName}else{a.className=expandClassName}}function ExpandorCollapseTableNode(a){a=a.parentNode;if(a.className==expandClassName){a.className=collapseTableClassName}else{a.className=expandClassName}}function ExpandorCollapseAllNodes(g,h,c){var a=g.getAttribute("title");var b=g.parentNode;if(a=="collapse"){g.setAttribute("title","expand");g.className="dropdownAllButtonexpand";g.innerHTML=h}else{g.setAttribute("title","collapse");g.className="dropdownAllButtoncollapse";g.innerHTML=c}var f=b.getElementsByTagName("*");for(var d=0;d-1){ExpandForHref(a.substring(a.lastIndexOf("#")+1))}}catch(c){}}; \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/caution_3.0-en-us.png b/docs/dws/dev/public_sys-resources/caution_3.0-en-us.png new file mode 100644 index 00000000..60f60762 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/caution_3.0-en-us.png differ diff --git a/docs/dws/dev/public_sys-resources/commonltr.css b/docs/dws/dev/public_sys-resources/commonltr.css new file mode 100644 index 00000000..c5480b0a --- /dev/null +++ b/docs/dws/dev/public_sys-resources/commonltr.css @@ -0,0 +1 @@ +body{font-size:10pt;font-family:Arial;margin:1.5em;border-top:2pt;padding-top:1em;padding-bottom:2em}.msgph{font-family:Courier New}.rowlinecopyright{color:red;margin-top:10pt}.unresolved{background-color:skyblue}.noTemplate{background-color:yellow}.base{background-color:#fff}.nested0{margin-top:1em}.p{margin-top:.6em;margin-bottom:.6em}p{margin-top:.5em;margin-bottom:.5em}.note p{margin-top:.5em;margin-bottom:.5em}.tip p{margin-top:.5em;margin-bottom:.5em}.danger p{margin-top:.5em;margin-bottom:.5em}.notice p{margin-top:.5em;margin-bottom:.5em}.warning p{margin-top:.5em;margin-bottom:.5em}.caution p{margin-top:.5em;margin-bottom:.5em}.attention p{margin-top:.5em;margin-bottom:.5em}table p{margin-top:.2em;margin-bottom:.2em}table .p{margin-top:.4em;margin-bottom:.2em}.figcap{font-size:10pt}img{margin-top:.3em}.figdesc{font-style:normal}.figborder{border-style:solid;padding-left:3px;border-width:2px;padding-right:3px;margin-top:1em;border-color:Silver}.figsides{border-left:2px solid;padding-left:3px;border-right:2px solid;padding-right:3px;margin-top:1em;border-color:Silver}.figtop{border-top:2px solid;margin-top:1em;border-color:Silver}.figbottom{border-bottom:2px solid;border-color:Silver}.figtopbot{border-top:2px solid;border-bottom:2px solid;margin-top:1em;border-color:Silver}.fignone{font-size:10pt;margin-top:8pt;margin-bottom:8pt}.familylinks{margin-top:1.5em;margin-bottom:1em}.ullinks{list-style-type:none}.linklist{margin-bottom:1em}.linklistwithchild{margin-left:1.5em;margin-bottom:1em}.sublinklist{margin-left:1.5em;margin-bottom:1em}.relconcepts{margin-top:.6em;margin-bottom:.6em}.reltasks{margin-top:.6em;margin-bottom:.6em}.relref{margin-top:.6em;margin-bottom:.6em}.relinfo{margin-top:.6em;margin-bottom:.6em}.breadcrumb{font-size:smaller;margin-bottom:.6em}.prereq{margin-left:20px}.parentlink{margin-top:.6em;margin-bottom:.6em}.nextlink{margin-top:.6em;margin-bottom:.6em}.previouslink{margin-top:.6em;margin-bottom:.6em}.topictitle1{margin-top:0;margin-bottom:1em;font-size:14pt;color:#007af4}.topictitle2{margin-top:1pc;margin-bottom:.45em;font-size:1.17em;color:#007af4}.topictitle3{margin-top:1pc;margin-bottom:.17em;font-size:1.17em;font-weight:bold;color:#007af4}.topictitle4{margin-top:.83em;font-size:1.17em;font-weight:bold}.topictitle5{font-size:1.17em;font-weight:bold}.topictitle6{font-size:1.17em;font-style:italic}.sectiontitle{margin-top:1em;margin-bottom:1em;color:black;font-size:10.5pt;font-weight:bold;color:#007af4;overflow:auto}.section{margin-top:1em;margin-bottom:1em}.example{margin-top:1em;margin-bottom:1em}.sectiontitle2contents:link{color:#007af4}.sectiontitle2contents:visited{color:#800080}.note{margin-top:1em;margin-bottom:1em;background-color:#ffc}.notetitle{font-weight:bold}.notelisttitle{font-weight:bold}.tip{margin-top:1em;margin-bottom:1em;background-color:#ffc}.tiptitle{font-weight:bold}.fastpath{margin-top:1em;margin-bottom:1em;background-color:#ffc}.fastpathtitle{font-weight:bold}.important{margin-top:1em;margin-bottom:1em;background-color:#ffc}.importanttitle{font-weight:bold}.remember{margin-top:1em;margin-bottom:1em;background-color:#ffc}.remembertitle{font-weight:bold}.restriction{margin-top:1em;margin-bottom:1em;background-color:#ffc}.restrictiontitle{font-weight:bold}.attention{margin-top:1em;margin-bottom:1em;background-color:#ffc}.attentiontitle{font-weight:bold}.dangertitle{font-weight:bold}.danger{margin-top:1em;margin-bottom:1em;background-color:#ffc}.noticetitle{font-weight:bold}.notice{margin-top:1em;margin-bottom:1em;background-color:#ffc}.warningtitle{font-weight:bold}.warning{margin-top:1em;margin-bottom:1em;background-color:#ffc}.cautiontitle{font-weight:bold}.caution{margin-top:1em;margin-bottom:1em;background-color:#ffc}ul.simple{list-style-type:none}li ul{margin-top:.6em}li{margin-top:.6em;margin-bottom:.6em}.note li{margin-top:.2em;margin-bottom:.2em}.tip li{margin-top:.2em;margin-bottom:.2em}.danger li{margin-top:.2em;margin-bottom:.2em}.warning li{margin-top:.2em;margin-bottom:.2em}.notice li{margin-top:.2em;margin-bottom:.2em}.caution li{margin-top:.2em;margin-bottom:.2em}.attention li{margin-top:.2em;margin-bottom:.2em}table li{margin-top:.2em;margin-bottom:.2em}ol{margin-top:1em;margin-bottom:1em;margin-left:2.4em;padding-left:0}ul{margin-top:1em;margin-bottom:1em;margin-left:2.0em;padding-left:0}ol ul{list-style:disc}ul ul{list-style:square}ol ul ul{list-style:square}ol ul{list-style-type:disc}table ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}table ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ul{margin-top:.4em;margin-bottom:.4em;list-style:square}table ol ol{margin-top:.4em;margin-bottom:.4em;list-style:lower-alpha}table ol ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}.substepthirdol{list-style-type:lower-roman}.firstcol{font-weight:bold}th{background-color:#cfcfcf}table{margin-top:8pt;margin-bottom:12pt;width:100%}table caption{margin-top:8pt;text-align:left}.bold{font-weight:bold}.boldItalic{font-weight:bold;font-style:italic}.italic{font-style:italic}.underlined{text-decoration:underline}.var{font-style:italic}.shortcut{text-decoration:underline}.dlterm{font-weight:bold}dd{margin-top:.5em;margin-bottom:.5em}.dltermexpand{font-weight:bold;margin-top:1em}*[compact="yes"]>li{margin-top:0}*[compact="no"]>li{margin-top:.53em}.liexpand{margin-top:1em;margin-bottom:1em}.sliexpand{margin-top:1em;margin-bottom:1em}.dlexpand{margin-top:1em;margin-bottom:1em}.ddexpand{margin-top:1em;margin-bottom:1em}.stepexpand{margin-top:.3em;margin-bottom:.3em}.substepexpand{margin-top:.3em;margin-bottom:.3em}div.imageleft{text-align:left}div.imagecenter{text-align:center}div.imageright{text-align:right}div.imagejustify{text-align:justify}div.noblankline{text-align:center}div.noblankline img{margin-top:0}pre.screen{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;background-color:#ddd;white-space:pre}pre.codeblock{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;background-color:#ddd;white-space:pre}.hrcopyright{color:#3f4e5d;margin-top:18pt}.hwcopyright{text-align:center}.comment{margin:2px 2px 2px 2px;font-family:Arial;font-size:10pt;background-color:#bfb;color:#000}.dropdownAllButtonexpand{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdownAllButtoncollapse{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;text-decoration:underline;color:#007af4}.dropdowntitle{background-repeat:no-repeat;background-position:0 4px;padding-left:15px;cursor:pointer;text-decoration:underline;color:#007af4}.dropdownexpand .dropdowntitle{background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapse .dropdowncontext{display:none}.dropdowncollapse .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdowncollapsetable{border:0}.dropdowncollapsetable .dropdowncontext{display:none}.dropdowncollapsetable .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}pre{font-size:10pt;font-weight:normal;margin-left:9;margin-top:2;margin-bottom:2}.termcolor{color:blue;cursor:pointer}#dhtmlgoodies_tooltip{background-color:#f0f0d2;border:1px solid #000;position:absolute;display:none;z-index:20000;padding:2px;font-size:.9em;-moz-border-radius:6px;font-family:"Trebuchet MS","Lucida Sans Unicode",Arial,sans-serif}#dhtmlgoodies_tooltipShadow{position:absolute;background-color:#555;display:none;z-index:10000;opacity:.7;filter:alpha(opacity=70);-khtml-opacity:.7;-moz-opacity:.7;-moz-border-radius:6px}.freeze{position:fixed;_position:absolute;_top:expression(eval(document.documentElement.scrollTop));left:10;top:0} \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/commonltr_print.css b/docs/dws/dev/public_sys-resources/commonltr_print.css new file mode 100644 index 00000000..a5982314 --- /dev/null +++ b/docs/dws/dev/public_sys-resources/commonltr_print.css @@ -0,0 +1 @@ +body{font-size:12.0pt;margin:1.5em;margin-left:1.6cm}.msgph{font-family:Courier New}.rowlinecopyright{color:red;margin-top:10pt}.unresolved{background-color:skyblue}.noTemplate{background-color:yellow}.base{background-color:#fff}.nested0{margin-top:1em}.p{margin-top:1em}p{margin-top:.5em;margin-bottom:.5em}.note p{margin-top:.5em;margin-bottom:.5em}.tip p{margin-top:.5em;margin-bottom:.5em}.danger p{margin-top:.5em;margin-bottom:.5em}.warning p{margin-top:.5em;margin-bottom:.5em}.notice p{margin-top:.5em;margin-bottom:.5em}.caution p{margin-top:.5em;margin-bottom:.5em}.attention p{margin-top:.5em;margin-bottom:.5em}table p{margin-top:.2em;margin-bottom:.2em}table .p{margin-top:.4em;margin-bottom:.2em}.covertable{border:0;width:100% cellpadding:8pt;cellspacing:8pt}.cover_productname{font-size:15.0pt;font-family:"Arial"}.cover_manualtitle{font-size:24.0pt;font-weight:bold;font-family:"Arial"}.cover_manualsubtitle{font-size:18.0pt;font-weight:bold;font-family:"Arial"}.cover_heading{font-size:12.0pt;font-weight:bold;font-family:"Arial"}.cover_text{font-size:9.0pt;font-family:"Arial"}.tocheading,.heading1,.topictitle1{margin-top:40.0pt;margin-right:0;margin-bottom:20.0pt;margin-left:-1cm;text-align:left;border:0;border-bottom:solid windowtext .5pt;font-size:22.0pt;font-family:"Arial";font-weight:bold}.topictitlenumber1{font-size:72.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle2{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:-1cm;text-indent:0;font-size:18.0pt;font-family:"Arial";font-weight:bold}.topictitle3{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:16.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle4{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:14.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle5{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.blocklabel,.topictitle6{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.sectiontitle{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:-1cm;text-indent:0;font-size:13.0pt;font-family:"Arial";font-weight:bold}.tocentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:12.0pt;font-family:"Book Antiqua";font-weight:bold}.tocentry2{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry3{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry4{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry5{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tofentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman";font-weight:normal}.totentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman";font-weight:normal}.indexheading{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.indexentry1{margin-top:4pt;margin-right:0;margin-bottom:0;margin-left:0;line-height:12.0pt;font-size:12.0pt;font-family:"Times New Roman"}.indexentry2{margin-top:0;margin-right:0;margin-bottom:0;margin-left:24.0pt;line-height:12.0pt;font-size:12.0pt}.indexentry3{margin-top:0;margin-right:0;margin-bottom:0;margin-left:48pt;line-height:12.0pt;font-size:12.0pt}.figurenumber{font-weight:bold}.tablenumber{font-weight:bold}.familylinks{margin-top:1.5em;margin-bottom:1em}.figcap{font-size:11.0pt}.tablecap{font-size:11.0pt}.figdesc{font-style:normal}.fignone{margin-top:8.0pt}.figborder{border-style:solid;padding-left:3px;border-width:2px;padding-right:3px;margin-top:1em;border-color:Silver}.figsides{border-left:2px solid;padding-left:3px;border-right:2px solid;padding-right:3px;margin-top:1em;border-color:Silver}.figtop{border-top:2px solid;margin-top:1em;border-color:Silver}.figbottom{border-bottom:2px solid;border-color:Silver}.figtopbot{border-top:2px solid;border-bottom:2px solid;margin-top:1em;border-color:Silver}.ullinks{margin-left:0;list-style-type:none}.ulchildlink{margin-top:1em;margin-bottom:1em}.olchildlink{margin-top:1em;margin-bottom:1em;margin-left:1em}.linklist{margin-bottom:1em}.linklistwithchild{margin-left:1.5em;margin-bottom:1em}.sublinklist{margin-left:1.5em;margin-bottom:1em}.relconcepts{margin-left:1cm;margin-top:1em;margin-bottom:1em}.reltasks{margin-left:1cm;margin-top:1em;margin-bottom:1em}.relref{margin-left:1cm;margin-top:1em;margin-bottom:1em}.relinfo{margin-top:1em;margin-bottom:1em}.breadcrumb{font-size:smaller;margin-bottom:1em}.prereq{margin-left:0}.parentlink{margin-top:.6em;margin-bottom:.6em}.nextlink{margin-top:.6em;margin-bottom:.6em}.previouslink{margin-top:.6em;margin-bottom:.6em}.section{margin-top:1em;margin-bottom:1em}.example{margin-top:1em;margin-bottom:1em}table .note{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.note{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.notetitle{font-weight:bold;font-size:11.0pt}.notelisttitle{font-weight:bold}table .tip{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.tip{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.tiptitle{font-weight:bold;font-size:11.0pt}table .fastpath{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.fastpath{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.fastpathtitle{font-weight:bold;font-size:11.0pt}table .important{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.important{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.importanttitle{font-weight:bold;font-size:11.0pt}table .remember{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.remember{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.remembertitle{font-weight:bold;font-size:11.0pt}table .restriction{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.restriction{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.restrictiontitle{font-weight:bold;font-size:11.0pt}table .attention{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.attention{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}.attentiontitle{font-weight:bold}table .danger{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.dangertitle{font-weight:bold}.danger{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .notice{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.noticetitle{font-weight:bold}.notice{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .warning{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.warningtitle{font-weight:bold}.warning{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .caution{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}table caption{margin-top:8pt;text-align:left;font-weight:bold}.tablenoborder{margin-top:8pt}.cautiontitle{font-weight:bold}.caution{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}ul.simple{list-style-type:none}li ul{margin-top:.6em}li{margin-top:.6em;margin-bottom:.6em}.note li{margin-top:.2em;margin-bottom:.2em}.tip li{margin-top:.2em;margin-bottom:.2em}.danger li{margin-top:.2em;margin-bottom:.2em}.warning li{margin-top:.2em;margin-bottom:.2em}.notice li{margin-top:.2em;margin-bottom:.2em}.caution li{margin-top:.2em;margin-bottom:.2em}.attention li{margin-top:.2em;margin-bottom:.2em}table li{margin-top:.2em;margin-bottom:.2em}.firstcol{font-weight:bold}th{background-color:#cfcfcf}.bold{font-weight:bold}.boldItalic{font-weight:bold;font-style:italic}.italic{font-style:italic}.underlined{text-decoration:underline}.var{font-style:italic}.shortcut{text-decoration:underline}.dlterm{font-weight:bold}dd{margin-top:.5em;margin-bottom:.5em}.dltermexpand{font-weight:bold;margin-top:1em}*[compact="yes"]>li{margin-top:0}*[compact="no"]>li{margin-top:.53em}.liexpand{margin-top:1em;margin-bottom:1em}.sliexpand{margin-top:1em;margin-bottom:1em}.dlexpand{margin-top:1em;margin-bottom:1em}.ddexpand{margin-top:1em;margin-bottom:1em}.stepexpand{margin-top:1em;margin-bottom:1em}.substepexpand{margin-top:1em;margin-bottom:1em}table{margin-top:8pt;margin-bottom:10.0pt;width:100%}thead{font-size:10.0pt;font-family:"Book Antiqua";font-weight:bold}tbody{font-size:11.0pt}ol{margin-top:1em;margin-bottom:1em;margin-left:1.7em;-webkit-padding-start:0}ul{margin-top:1em;margin-bottom:1em;margin-left:1.2em;-webkit-padding-start:0}ol ul{list-style:disc}ul ul{list-style:square}ol ol{list-style-type:lower-alpha}table ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}table ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ul{margin-top:.4em;margin-bottom:.4em;list-style:square}table ol ol{margin-top:.4em;margin-bottom:.4em;list-style:lower-alpha}table ol ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}.substepthirdol{list-style-type:lower-roman}div.imageleft{text-align:left}div.imagecenter{text-align:center}div.imageright{text-align:right}div.imagejustify{text-align:justify}div.noblankline{text-align:center}div.noblankline img{margin-top:0}pre{font-size:10.0pt;border-width:2px;padding:2px;margin-top:5px;margin-bottom:5px;white-space:pre-wrap;white-space:-moz-pre-wrap;white-space:-pre-wrap;white-space:-o-pre-wrap;word-wrap:break-word}pre.screen{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;white-space:pre}pre.codeblock{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;white-space:pre}.dropdownAllButtonexpand{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdownAllButtoncollapse{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;text-decoration:underline;color:#007af4}.dropdowntitle{background-repeat:no-repeat;background-position:0 4px;padding-left:15px;cursor:pointer;text-decoration:underline;color:#007af4}.dropdownexpand .dropdowntitle{background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapse .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapsetable .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.prefacesectiontitle1{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:-1cm;text-indent:0;font-size:18.0pt;font-family:"Book Antiqua";font-weight:bold;overflow:auto}.termcolor{color:blue;cursor:pointer}#dhtmlgoodies_tooltip{background-color:#f0f0d2;border:1px solid #000;position:absolute;display:none;z-index:20000;padding:2px;font-size:.9em;-moz-border-radius:6px;font-family:"Trebuchet MS","Lucida Sans Unicode",Arial,sans-serif}#dhtmlgoodies_tooltipShadow{position:absolute;background-color:#555;display:none;z-index:10000;opacity:.7;filter:alpha(opacity=70);-khtml-opacity:.7;-moz-opacity:.7;-moz-border-radius:6px}.freeze{position:fixed;_position:absolute;_top:expression(eval(document.documentElement.scrollTop));left:10;top:0}.hrcopyright{color:#3f4e5d;margin-top:18pt;margin-left:-1cm}.hwcopyright{text-align:center;font-family:Arial;margin-left:-1cm} \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/commonrtl.css b/docs/dws/dev/public_sys-resources/commonrtl.css new file mode 100644 index 00000000..f261da75 --- /dev/null +++ b/docs/dws/dev/public_sys-resources/commonrtl.css @@ -0,0 +1,2 @@ +/*! Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved. */.msgph{font-family:Courier New}.unresolved{background-color:#87ceeb}.noTemplate{background-color:#ff0}.base{background-color:#fff}/*! Add space for top level topics */.nested0,.p{margin-top:1em}/*! div with class=p is used for paragraphs that contain blocks, to keep the XHTML valid *//*! Default of italics to set apart figure captions */.figcap,.italic,.var{font-style:italic}.figdesc{font-style:normal}/*! Use @frame to create frames on figures */.figborder{padding-left:3px;padding-right:3px;margin-top:1em;border:2px solid Silver}.figsides{margin-top:1em;padding-left:3px;padding-right:3px;border-left:2px solid Silver;border-right:2px solid Silver}.figtop{border-top:2px solid Silver;margin-top:1em}.figbottom{border-bottom:2px solid Silver}.figtopbot{border-top:2px solid Silver;border-bottom:2px solid Silver;margin-top:1em}/*! Most link groups are created with
. Ensure they have space before and after. */.ullinks,ul.simple{list-style-type:none}.attention,.danger,.ddexpand,.dlexpand,.example,.fastpath,.important,.liexpand,.linklist,.note,.notice,.olchildlink,.relconcepts,.relinfo,.relref,.reltasks,.remember,.restriction,.section,.sliexpand,.stepexpand,.substepexpand,.tip,.ulchildlink,.warning{margin-top:1em;margin-bottom:1em}.linklistwithchild,.sublinklist{margin-top:1em;margin-right:1.5em;margin-bottom:1em}.breadcrumb{font-size:smaller;margin-bottom:1em}.prereq{margin-right:20px}/*! Set heading sizes, getting smaller for deeper nesting */.topictitle1{font-size:1.34em;margin-top:0;margin-bottom:.1em}.topictitle2,.topictitle3,.topictitle4,.topictitle5,.topictitle6,.sectiontitle{font-size:1.17em}.topictitle2{margin-top:1pc;margin-bottom:.45em}.topictitle3{margin-top:1pc;margin-bottom:.17em;font-weight:700}.topictitle4{margin-top:.83em;font-weight:700}.topictitle5{font-weight:700}.topictitle6{font-style:italic}.sectiontitle{margin-top:1em;margin-bottom:0;color:#000;font-weight:700}/*! All note formats have the same default presentation */.attentiontitle,.bold,.cautiontitle,.dangertitle,.dlterm,.fastpathtitle,.firstcol,.importanttitle,.notelisttitle,.notetitle,.noticetitle,.parmname,.remembertitle,.restrictiontitle,.tiptitle,.uicontrol,.warningtitle{font-weight:700}.caution{font-weight:700;margin-bottom:1em}/*! Simple lists do not get a bullet *//*! Used on the first column of a table, when rowheader="firstcol" is used *//*! Various basic phrase styles */.boldItalic{font-weight:700;font-style:italic}.shortcut,.underlined{text-decoration:underline}/*! 2008-10-27 keyword采用跟随上下文的样式 +*//*! Default of bold for definition list terms *//*! Use CSS to expand lists with @compact="no" */.dltermexpand{font-weight:700;margin-top:1em}[compact="yes"]>li{margin-top:0}[compact="no"]>li{margin-top:.53em}/*! Align images based on @align on topic/image */div.imageleft,.text-align-left{text-align:left}div.imagecenter,.text-align-center{text-align:center}div.imageright,.text-align-right{text-align:right}div.imagejustify,.text-align-justify{text-align:justify}.cellrowborder{border-right:0;border-top:0;border-left:1px solid;border-bottom:1px solid}.row-nocellborder{border-left:hidden;border-right:0;border-top:0;border-bottom:1px solid}.cell-norowborder{border-top:0;border-bottom:hidden;border-right:0;border-left:1px solid}.nocellnorowborder{border:0;border-left:hidden;border-bottom:hidden}pre.codeblock,pre.screen{padding:5px;border:outset;background-color:#ccc;margin-top:2px;margin-bottom:2px;white-space:pre} \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/danger_3.0-en-us.png b/docs/dws/dev/public_sys-resources/danger_3.0-en-us.png new file mode 100644 index 00000000..47a9c723 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/danger_3.0-en-us.png differ diff --git a/docs/dws/dev/public_sys-resources/delta.gif b/docs/dws/dev/public_sys-resources/delta.gif new file mode 100644 index 00000000..0d1b1f67 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/delta.gif differ diff --git a/docs/dws/dev/public_sys-resources/deltaend.gif b/docs/dws/dev/public_sys-resources/deltaend.gif new file mode 100644 index 00000000..cc7da0fc Binary files /dev/null and b/docs/dws/dev/public_sys-resources/deltaend.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-arrowdn.gif b/docs/dws/dev/public_sys-resources/icon-arrowdn.gif new file mode 100644 index 00000000..37942803 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-arrowdn.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-arrowrt.gif b/docs/dws/dev/public_sys-resources/icon-arrowrt.gif new file mode 100644 index 00000000..6aaaa11c Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-arrowrt.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-caution.gif b/docs/dws/dev/public_sys-resources/icon-caution.gif new file mode 100644 index 00000000..079c79b2 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-caution.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-danger.gif b/docs/dws/dev/public_sys-resources/icon-danger.gif new file mode 100644 index 00000000..079c79b2 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-danger.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-huawei.gif b/docs/dws/dev/public_sys-resources/icon-huawei.gif new file mode 100644 index 00000000..a31d60f8 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-huawei.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-note.gif b/docs/dws/dev/public_sys-resources/icon-note.gif new file mode 100644 index 00000000..31be2b03 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-note.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-notice.gif b/docs/dws/dev/public_sys-resources/icon-notice.gif new file mode 100644 index 00000000..40907065 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-notice.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-tip.gif b/docs/dws/dev/public_sys-resources/icon-tip.gif new file mode 100644 index 00000000..c47bae05 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-tip.gif differ diff --git a/docs/dws/dev/public_sys-resources/icon-warning.gif b/docs/dws/dev/public_sys-resources/icon-warning.gif new file mode 100644 index 00000000..079c79b2 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/icon-warning.gif differ diff --git a/docs/dws/dev/public_sys-resources/note_3.0-en-us.png b/docs/dws/dev/public_sys-resources/note_3.0-en-us.png new file mode 100644 index 00000000..57a0e1f5 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/note_3.0-en-us.png differ diff --git a/docs/dws/dev/public_sys-resources/notice_3.0-en-us.png b/docs/dws/dev/public_sys-resources/notice_3.0-en-us.png new file mode 100644 index 00000000..fa4b6499 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/notice_3.0-en-us.png differ diff --git a/docs/dws/dev/public_sys-resources/popup.js b/docs/dws/dev/public_sys-resources/popup.js new file mode 100644 index 00000000..a550862e --- /dev/null +++ b/docs/dws/dev/public_sys-resources/popup.js @@ -0,0 +1 @@ +var i=0;var dhtmlgoodies_tooltipFlag=false;var dhtmlgoodies_tooltip="";var dhtmlgoodies_tooltipShadow="";var dhtmlgoodies_shadowSize=3;var dhtmlgoodies_tooltipMaxWidth=500;var dhtmlgoodies_tooltipMinWidth=100;var dhtmlgoodies_iframe=false;var timeId;var clickFlag=false;var tooltip_is_msie=(navigator.userAgent.indexOf("MSIE")>=0&&navigator.userAgent.indexOf("opera")==-1&&document.all)?true:false;var xPos;var yPos;window.document.onmousemove=function(a){a=a||window.event;if(a.pageX){xPos=a.pageX;yPos=a.pageY}else{if(document.body!==null&&typeof document.body!=="undefined"){xPos=a.clientX+document.body.scrollLeft-document.body.clientLeft;yPos=a.clientY+document.body.scrollTop-document.body.clientTop}}};function showTooltip(e){if(document.body===null||typeof document.body==="undefined"){return}if(i==0){return}clickFlag=true;var f=Json.parse("jsonData."+e);var a=Math.max(document.body.clientWidth,document.documentElement.clientWidth)-20;if(!dhtmlgoodies_tooltipFlag){dhtmlgoodies_tooltip=document.createElement("DIV");dhtmlgoodies_tooltip.id="dhtmlgoodies_tooltip";dhtmlgoodies_tooltipShadow=document.createElement("DIV");dhtmlgoodies_tooltipShadow.id="dhtmlgoodies_tooltipShadow";document.body.appendChild(dhtmlgoodies_tooltip);document.body.appendChild(dhtmlgoodies_tooltipShadow);if(tooltip_is_msie){dhtmlgoodies_iframe=document.createElement("IFRAME");dhtmlgoodies_iframe.frameborder="5";dhtmlgoodies_iframe.style.backgroundColor="#FFFFFF";dhtmlgoodies_iframe.src="#";dhtmlgoodies_iframe.style.zIndex=100;dhtmlgoodies_iframe.style.position="absolute";document.body.appendChild(dhtmlgoodies_iframe)}}dhtmlgoodies_tooltip.style.display="block";dhtmlgoodies_tooltipShadow.style.display="block";if(tooltip_is_msie){dhtmlgoodies_iframe.style.display="block"}var b=Math.max(document.body.scrollTop,document.documentElement.scrollTop);if(navigator.userAgent.toLowerCase().indexOf("safari")>=0){b=0}var c=xPos+10;dhtmlgoodies_tooltip.style.width=null;dhtmlgoodies_tooltip.innerHTML=f;dhtmlgoodies_tooltip.style.left=c+"px";if(tooltip_is_msie){dhtmlgoodies_tooltip.style.top=yPos+20+b+"px"}else{dhtmlgoodies_tooltip.style.top=yPos+20+"px"}dhtmlgoodies_tooltipShadow.style.left=c+dhtmlgoodies_shadowSize+"px";if(tooltip_is_msie){dhtmlgoodies_tooltipShadow.style.top=yPos+20+b+dhtmlgoodies_shadowSize+"px"}else{dhtmlgoodies_tooltipShadow.style.top=yPos+20+dhtmlgoodies_shadowSize+"px"}if(dhtmlgoodies_tooltip.offsetWidth>dhtmlgoodies_tooltipMaxWidth){dhtmlgoodies_tooltip.style.width=dhtmlgoodies_tooltipMaxWidth+"px"}var d=dhtmlgoodies_tooltip.offsetWidth;if(da){dhtmlgoodies_tooltip.style.left=(dhtmlgoodies_tooltipShadow.style.left.replace("px","")-((c+d)-a))+"px";dhtmlgoodies_tooltipShadow.style.left=(dhtmlgoodies_tooltipShadow.style.left.replace("px","")-((c+d)-a)+dhtmlgoodies_shadowSize)+"px"}if(tooltip_is_msie){dhtmlgoodies_iframe.style.left=dhtmlgoodies_tooltip.style.left;dhtmlgoodies_iframe.style.top=dhtmlgoodies_tooltip.style.top;dhtmlgoodies_iframe.style.width=dhtmlgoodies_tooltip.offsetWidth+"px";dhtmlgoodies_iframe.style.height=dhtmlgoodies_tooltip.offsetHeight+"px"}}function hideTooltip(){i=0;clickFlag=false;if((dhtmlgoodies_tooltip!==null&&typeof dhtmlgoodies_tooltip!=="undefined")&&+(dhtmlgoodies_tooltip.style!==null&&typeof dhtmlgoodies_tooltip.style!=="undefined")){dhtmlgoodies_tooltip.style.display="none";dhtmlgoodies_tooltipShadow.style.display="none";if(tooltip_is_msie){dhtmlgoodies_iframe.style.display="none"}}if(timeId!==null&&typeof timeId!=="undefined"&&timeId!=""){clearTimeout(timeId)}}function showText(a){i=1;timeId=setTimeout(function(){showTooltip(a)},500)}function showText2(a){if(!clickFlag){i=1;showTooltip(a);i=0;if(timeId!==null&&typeof timeId!=="undefined"&&timeId!=""){clearTimeout(timeId)}}}function anchorScroll(b){var d=document.getElementsByName(b);if(d!=null&&d.length>0){var c=d[0];var a=c.getBoundingClientRect().left+(document.body.scrollLeft||(document.documentElement&&document.documentElement.scrollLeft));var e=c.getBoundingClientRect().top+(document.body.scrollTop||(document.documentElement&&document.documentElement.scrollTop));window.scrollTo(a,e-30)}}; \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/pygments.css b/docs/dws/dev/public_sys-resources/pygments.css new file mode 100644 index 00000000..53c53657 --- /dev/null +++ b/docs/dws/dev/public_sys-resources/pygments.css @@ -0,0 +1 @@ +.hll{background-color:#ffc}.c{color:#069;font-style:italic}.err{color:black}.k{color:#069;font-weight:bold}.ch{color:#069;font-style:italic}.cm{color:#008200}.cp{color:black}.cpf{color:#069;font-style:italic}.c1{color:#008200}.cs{color:#069;font-weight:bold}.gd{color:#000;background-color:#fdd}.ge{font-style:italic}.gr{color:#a00}.gh{color:#999}.gi{color:#000;background-color:#dfd}.go{color:#888}.gp{color:#555}.gs{font-weight:bold}.gu{color:#aaa}.gt{color:#a00}.kc{color:#000080;font-weight:bold}.kd{color:#069;font-weight:bold}.kn{color:#000080;font-weight:bold}.kp{color:#069;font-weight:bold}.kr{color:#000080;font-weight:bold}.kt{color:#069;font-weight:bold}.m{color:blue}.s{color:blue}.na{color:gray}.nt{color:#069;font-weight:bold}.ow{font-weight:bold}.w{color:#bbb}.mb{color:blue}.mf{color:blue}.mh{color:blue}.mi{color:black}.mo{color:blue}.sa{color:blue}.sb{color:blue}.sc{color:#800080}.dl{color:blue}.sd{color:blue}.s2{color:blue}.se{color:blue}.sh{color:blue}.si{color:blue}.sx{color:blue}.sr{color:blue}.s1{color:blue}.ss{color:blue}.il{color:blue}.linenos{width:15px}.linenos{border-right:3px solid #6ce26c;color:#afafaf;text-align:right;padding:0 .5em 0 1em}.codecoloring{line-height:1.1em}.code,.highlight pre{font-family:"Consolas","Bitstream Vera Sans Mono","Courier New",Courier,monospace}.code,.highlight pre span{font-size:1em}.nb{color:#ff1493}.nv{color:#a70}.nx,.o,.n,.nf{color:black}.vc,.vi{color:#a70}.code,.highlight pre{padding:0 1em}.highlighttable pre{box-sizing:border-box;margin:0} \ No newline at end of file diff --git a/docs/dws/dev/public_sys-resources/warning_3.0-en-us.png b/docs/dws/dev/public_sys-resources/warning_3.0-en-us.png new file mode 100644 index 00000000..def5c356 Binary files /dev/null and b/docs/dws/dev/public_sys-resources/warning_3.0-en-us.png differ