diff --git a/docs/mrs/component-operation-guide-lts/ALL_META.TXT.json b/docs/mrs/component-operation-guide-lts/ALL_META.TXT.json index 5e5a9d80..359d8513 100644 --- a/docs/mrs/component-operation-guide-lts/ALL_META.TXT.json +++ b/docs/mrs/component-operation-guide-lts/ALL_META.TXT.json @@ -1,9081 +1,18318 @@ [ + { + "dockw":"Component Operation Guide (LTS)" + }, { "uri":"mrs_01_1400.html", + "node_id":"mrs_01_1400.xml", "product_code":"mrs", "code":"1", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using CarbonData", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using CarbonData", "githuburl":"" }, { "uri":"mrs_01_1401.html", + "node_id":"mrs_01_1401.xml", "product_code":"mrs", "code":"2", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Overview", - "title":"Overview", + "kw":"Spark CarbonData Overview", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Spark CarbonData Overview", "githuburl":"" }, { "uri":"mrs_01_1402.html", + "node_id":"mrs_01_1402.xml", "product_code":"mrs", "code":"3", "des":"CarbonData is a new Apache Hadoop native data-store format. CarbonData allows faster interactive queries over PetaBytes of data using advanced columnar storage, index, co", "doc_type":"usermanual", - "kw":"CarbonData Overview,Overview,Component Operation Guide (LTS)", + "kw":"CarbonData Overview,Spark CarbonData Overview,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Overview", "githuburl":"" }, { "uri":"mrs_01_1403.html", + "node_id":"mrs_01_1403.xml", "product_code":"mrs", "code":"4", "des":"The memory required for data loading depends on the following factors:Number of columnsColumn valuesConcurrency (configured using carbon.number.of.cores.while.loading)Sor", "doc_type":"usermanual", - "kw":"Main Specifications of CarbonData,Overview,Component Operation Guide (LTS)", + "kw":"Main Specifications of CarbonData,Spark CarbonData Overview,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Main Specifications of CarbonData", "githuburl":"" }, { "uri":"mrs_01_1404.html", + "node_id":"mrs_01_1404.xml", "product_code":"mrs", "code":"5", "des":"This section provides the details of all the configurations required for the CarbonData System.Configure the following parameters in the spark-defaults.conf file on the S", "doc_type":"usermanual", "kw":"limit,limit,Configuration Reference,Using CarbonData,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuration Reference", "githuburl":"" }, { "uri":"mrs_01_1405.html", + "node_id":"mrs_01_1405.xml", "product_code":"mrs", "code":"6", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Operation Guide", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Operation Guide", "githuburl":"" }, { "uri":"mrs_01_1406.html", + "node_id":"mrs_01_1406.xml", "product_code":"mrs", "code":"7", "des":"This section describes how to create CarbonData tables, load data, and query data. This quick start provides operations based on the Spark Beeline client. If you want to ", "doc_type":"usermanual", "kw":"CarbonData Quick Start,CarbonData Operation Guide,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Quick Start", "githuburl":"" }, { "uri":"mrs_01_1407.html", + "node_id":"mrs_01_1407.xml", "product_code":"mrs", "code":"8", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Table Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Table Management", "githuburl":"" }, { "uri":"mrs_01_1408.html", + "node_id":"mrs_01_1408.xml", "product_code":"mrs", "code":"9", "des":"In CarbonData, data is stored in entities called tables. CarbonData tables are similar to RDBMS tables. RDBMS data is stored in a table consisting of rows and columns. Ca", "doc_type":"usermanual", "kw":"About CarbonData Table,CarbonData Table Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"About CarbonData Table", "githuburl":"" }, { "uri":"mrs_01_1409.html", + "node_id":"mrs_01_1409.xml", "product_code":"mrs", "code":"10", "des":"A CarbonData table must be created to load and query data. You can run the Create Table command to create a table. This command is used to create a table using custom col", "doc_type":"usermanual", "kw":"Creating a CarbonData Table,CarbonData Table Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a CarbonData Table", "githuburl":"" }, { "uri":"mrs_01_1410.html", + "node_id":"mrs_01_1410.xml", "product_code":"mrs", "code":"11", "des":"You can run the DROP TABLE command to delete a table. After a CarbonData table is deleted, its metadata and loaded data are deleted together.Run the following command to ", "doc_type":"usermanual", "kw":"Deleting a CarbonData Table,CarbonData Table Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Deleting a CarbonData Table", "githuburl":"" }, { "uri":"mrs_01_1411.html", + "node_id":"mrs_01_1411.xml", "product_code":"mrs", "code":"12", "des":"When the SET command is executed, the new properties overwrite the existing ones.SORT SCOPEThe following is an example of the SET SORT SCOPE command:ALTER TABLE tablename", "doc_type":"usermanual", "kw":"Modify the CarbonData Table,CarbonData Table Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Modify the CarbonData Table", "githuburl":"" }, { "uri":"mrs_01_1412.html", + "node_id":"mrs_01_1412.xml", "product_code":"mrs", "code":"13", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Table Data Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Table Data Management", "githuburl":"" }, { "uri":"mrs_01_1413.html", + "node_id":"mrs_01_1413.xml", "product_code":"mrs", "code":"14", "des":"After a CarbonData table is created, you can run the LOAD DATA command to load data to the table for query. Once data loading is triggered, data is encoded in CarbonData ", "doc_type":"usermanual", "kw":"Loading Data,CarbonData Table Data Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Loading Data", "githuburl":"" }, { "uri":"mrs_01_1414.html", + "node_id":"mrs_01_1414.xml", "product_code":"mrs", "code":"15", "des":"If you want to modify and reload the data because you have loaded wrong data into a table, or there are too many bad records, you can delete specific segments by segment ", "doc_type":"usermanual", "kw":"Deleting Segments,CarbonData Table Data Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Deleting Segments", "githuburl":"" }, { "uri":"mrs_01_1415.html", + "node_id":"mrs_01_1415.xml", "product_code":"mrs", "code":"16", "des":"Frequent data access results in a large number of fragmented CarbonData files in the storage directory. In each data loading, data is sorted and indexing is performed. Th", "doc_type":"usermanual", "kw":"Combining Segments,CarbonData Table Data Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Combining Segments", "githuburl":"" }, { "uri":"mrs_01_1416.html", + "node_id":"mrs_01_1416.xml", "product_code":"mrs", "code":"17", "des":"If you want to rapidly migrate CarbonData data from a cluster to another one, you can use the CarbonData backup and restoration commands. This method does not require dat", "doc_type":"usermanual", "kw":"CarbonData Data Migration,CarbonData Operation Guide,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Data Migration", "githuburl":"" }, { "uri":"mrs_01_2301.html", + "node_id":"mrs_01_2301.xml", "product_code":"mrs", "code":"18", "des":"This migration guides you to migrate the CarbonData table data of Spark 1.5 to that of Spark2x.Before performing this operation, you need to stop the data import service ", "doc_type":"usermanual", "kw":"Migrating Data on CarbonData from Spark1.5 to Spark2x,CarbonData Operation Guide,Component Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Migrating Data on CarbonData from Spark1.5 to Spark2x", "githuburl":"" }, { "uri":"mrs_01_1417.html", + "node_id":"mrs_01_1417.xml", "product_code":"mrs", "code":"19", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1418.html", + "node_id":"mrs_01_1418.xml", "product_code":"mrs", "code":"20", "des":"There are various parameters that can be tuned to improve the query performance in CarbonData. Most of the parameters focus on increasing the parallelism in processing an", "doc_type":"usermanual", "kw":"Tuning Guidelines,CarbonData Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Tuning Guidelines", "githuburl":"" }, { "uri":"mrs_01_1419.html", + "node_id":"mrs_01_1419.xml", "product_code":"mrs", "code":"21", "des":"This section provides suggestions based on more than 50 test cases to help you create CarbonData tables with higher query performance.If the to-be-created table contains ", "doc_type":"usermanual", "kw":"Suggestions for Creating CarbonData Tables,CarbonData Performance Tuning,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Suggestions for Creating CarbonData Tables", "githuburl":"" }, { "uri":"mrs_01_1421.html", + "node_id":"mrs_01_1421.xml", "product_code":"mrs", "code":"22", "des":"This section describes the configurations that can improve CarbonData performance.Table 1 and Table 2 describe the configurations about query of CarbonData.Table 3, Table", "doc_type":"usermanual", "kw":"Configurations for Performance Tuning,CarbonData Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configurations for Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1422.html", + "node_id":"mrs_01_1422.xml", "product_code":"mrs", "code":"23", "des":"The following table provides details about Hive ACL permissions required for performing operations on CarbonData tables.Parameters listed in Table 5 or Table 6 have been ", "doc_type":"usermanual", "kw":"CarbonData Access Control,Using CarbonData,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Access Control", "githuburl":"" }, { "uri":"mrs_01_1423.html", + "node_id":"mrs_01_1423.xml", "product_code":"mrs", "code":"24", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Syntax Reference", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Syntax Reference", "githuburl":"" }, { "uri":"mrs_01_1424.html", + "node_id":"mrs_01_1424.xml", "product_code":"mrs", "code":"25", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"DDL", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DDL", "githuburl":"" }, { "uri":"mrs_01_1425.html", + "node_id":"mrs_01_1425.xml", "product_code":"mrs", "code":"26", "des":"This command is used to create a CarbonData table by specifying the list of fields along with the table properties.CREATE TABLE [IF NOT EXISTS] [db_name.]table_name[(col_", "doc_type":"usermanual", "kw":"CREATE TABLE,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CREATE TABLE", "githuburl":"" }, { "uri":"mrs_01_1426.html", + "node_id":"mrs_01_1426.xml", "product_code":"mrs", "code":"27", "des":"This command is used to create a CarbonData table by specifying the list of fields along with the table properties.CREATE TABLE[IF NOT EXISTS] [db_name.]table_name STORED", "doc_type":"usermanual", "kw":"CREATE TABLE As SELECT,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CREATE TABLE As SELECT", "githuburl":"" }, { "uri":"mrs_01_1427.html", + "node_id":"mrs_01_1427.xml", "product_code":"mrs", "code":"28", "des":"This command is used to delete an existing table.DROP TABLE [IF EXISTS] [db_name.]table_name;In this command, IF EXISTS and db_name are optional.DROP TABLE IF EXISTS prod", "doc_type":"usermanual", "kw":"DROP TABLE,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DROP TABLE", "githuburl":"" }, { "uri":"mrs_01_1428.html", + "node_id":"mrs_01_1428.xml", "product_code":"mrs", "code":"29", "des":"SHOW TABLES command is used to list all tables in the current or a specific database.SHOW TABLES [IN db_name];IN db_Name is optional.SHOW TABLES IN ProductDatabase;All ta", "doc_type":"usermanual", "kw":"SHOW TABLES,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SHOW TABLES", "githuburl":"" }, { "uri":"mrs_01_1429.html", + "node_id":"mrs_01_1429.xml", "product_code":"mrs", "code":"30", "des":"The ALTER TABLE COMPACTION command is used to merge a specified number of segments into a single segment. This improves the query performance of a table.ALTER TABLE[db_na", "doc_type":"usermanual", "kw":"ALTER TABLE COMPACTION,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ALTER TABLE COMPACTION", "githuburl":"" }, { "uri":"mrs_01_1430.html", + "node_id":"mrs_01_1430.xml", "product_code":"mrs", "code":"31", "des":"This command is used to rename an existing table.ALTER TABLE [db_name.]table_name RENAME TO new_table_name;Parallel queries (using table names to obtain paths for reading", "doc_type":"usermanual", "kw":"TABLE RENAME,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"TABLE RENAME", "githuburl":"" }, { "uri":"mrs_01_1431.html", + "node_id":"mrs_01_1431.xml", "product_code":"mrs", "code":"32", "des":"This command is used to add a column to an existing table.ALTER TABLE [db_name.]table_name ADD COLUMNS (col_name data_type,...) TBLPROPERTIES(''COLUMNPROPERTIES.columnNam", "doc_type":"usermanual", "kw":"ADD COLUMNS,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ADD COLUMNS", "githuburl":"" }, { "uri":"mrs_01_1432.html", + "node_id":"mrs_01_1432.xml", "product_code":"mrs", "code":"33", "des":"This command is used to delete one or more columns from a table.ALTER TABLE [db_name.]table_name DROP COLUMNS (col_name, ...);After a column is deleted, at least one key ", "doc_type":"usermanual", "kw":"DROP COLUMNS,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DROP COLUMNS", "githuburl":"" }, { "uri":"mrs_01_1433.html", + "node_id":"mrs_01_1433.xml", "product_code":"mrs", "code":"34", "des":"This command is used to change the data type from INT to BIGINT or decimal precision from lower to higher.ALTER TABLE [db_name.]table_name CHANGE col_name col_name change", "doc_type":"usermanual", "kw":"CHANGE DATA TYPE,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CHANGE DATA TYPE", "githuburl":"" }, { "uri":"mrs_01_1434.html", + "node_id":"mrs_01_1434.xml", "product_code":"mrs", "code":"35", "des":"This command is used to register Carbon table to Hive meta store catalogue from exisiting Carbon table data.REFRESH TABLE db_name.table_name;The new database name and the", "doc_type":"usermanual", "kw":"REFRESH TABLE,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"REFRESH TABLE", "githuburl":"" }, { "uri":"mrs_01_1435.html", + "node_id":"mrs_01_1435.xml", "product_code":"mrs", "code":"36", "des":"This command is used to register an index table with the primary table.REGISTER INDEX TABLE indextable_name ON db_name.maintable_name;Before running this command, run REF", "doc_type":"usermanual", "kw":"REGISTER INDEX TABLE,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"REGISTER INDEX TABLE", "githuburl":"" }, { "uri":"mrs_01_1436.html", + "node_id":"mrs_01_1436.xml", "product_code":"mrs", "code":"37", "des":"This command is used to merge all segments for data files in the secondary index table.REFRESH INDEX indextable_name ON TABLE maintable_nameThis command is used to merge ", "doc_type":"usermanual", "kw":"REFRESH INDEX,DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"REFRESH INDEX", "githuburl":"" }, { "uri":"mrs_01_1437.html", + "node_id":"mrs_01_1437.xml", "product_code":"mrs", "code":"38", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"DML", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DML", "githuburl":"" }, { "uri":"mrs_01_1438.html", + "node_id":"mrs_01_1438.xml", "product_code":"mrs", "code":"39", "des":"This command is used to load user data of a particular type, so that CarbonData can provide good query performance.Only the raw data on HDFS can be loaded.LOAD DATA INPAT", "doc_type":"usermanual", "kw":"LOAD DATA,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"LOAD DATA", "githuburl":"" }, { "uri":"mrs_01_1439.html", + "node_id":"mrs_01_1439.xml", "product_code":"mrs", "code":"40", "des":"This command is used to update the CarbonData table based on the column expression and optional filtering conditions.Syntax 1:UPDATE SET (column_name1, col", "doc_type":"usermanual", "kw":"UPDATE CARBON TABLE,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"UPDATE CARBON TABLE", "githuburl":"" }, { "uri":"mrs_01_1440.html", + "node_id":"mrs_01_1440.xml", "product_code":"mrs", "code":"41", "des":"This command is used to delete records from a CarbonData table.DELETE FROM CARBON_TABLE [WHERE expression];If a segment is deleted, all secondary indexes associated with ", "doc_type":"usermanual", "kw":"DELETE RECORDS from CARBON TABLE,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DELETE RECORDS from CARBON TABLE", "githuburl":"" }, { "uri":"mrs_01_1441.html", + "node_id":"mrs_01_1441.xml", "product_code":"mrs", "code":"42", "des":"This command is used to add the output of the SELECT command to a Carbon table.INSERT INTO [CARBON TABLE] [select query];A table has been created.You must belong to the d", "doc_type":"usermanual", "kw":"INSERT INTO CARBON TABLE,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"INSERT INTO CARBON TABLE", "githuburl":"" }, { "uri":"mrs_01_1442.html", + "node_id":"mrs_01_1442.xml", "product_code":"mrs", "code":"43", "des":"This command is used to delete segments by the ID.DELETE FROM TABLE db_name.table_name WHERE SEGMENT.ID IN (segment_id1,segment_id2);Segments cannot be deleted from the s", "doc_type":"usermanual", "kw":"DELETE SEGMENT by ID,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DELETE SEGMENT by ID", "githuburl":"" }, { "uri":"mrs_01_1443.html", + "node_id":"mrs_01_1443.xml", "product_code":"mrs", "code":"44", "des":"This command is used to delete segments by loading date. Segments created before a specific date will be deleted.DELETE FROM TABLE db_name.table_name WHERE SEGMENT.STARTT", "doc_type":"usermanual", "kw":"DELETE SEGMENT by DATE,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DELETE SEGMENT by DATE", "githuburl":"" }, { "uri":"mrs_01_1444.html", + "node_id":"mrs_01_1444.xml", "product_code":"mrs", "code":"45", "des":"This command is used to list the segments of a CarbonData table.SHOW SEGMENTS FOR TABLE [db_name.]table_name LIMIT number_of_loads;NoneSHOW SEGMENTS FOR TABLE CarbonDatab", "doc_type":"usermanual", "kw":"SHOW SEGMENTS,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SHOW SEGMENTS", "githuburl":"" }, { "uri":"mrs_01_1445.html", + "node_id":"mrs_01_1445.xml", "product_code":"mrs", "code":"46", "des":"This command is used to create secondary indexes in the CarbonData tables.CREATE INDEX index_nameON TABLE [db_name.]table_name (col_name1, col_name2)AS 'carbondata'PROPER", "doc_type":"usermanual", "kw":"CREATE SECONDARY INDEX,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CREATE SECONDARY INDEX", "githuburl":"" }, { "uri":"mrs_01_1446.html", + "node_id":"mrs_01_1446.xml", "product_code":"mrs", "code":"47", "des":"This command is used to list all secondary index tables in the CarbonData table.SHOW INDEXES ON db_name.table_name;db_name is optional.SHOW INDEXES ON productsales.produc", "doc_type":"usermanual", "kw":"SHOW SECONDARY INDEXES,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SHOW SECONDARY INDEXES", "githuburl":"" }, { "uri":"mrs_01_1447.html", + "node_id":"mrs_01_1447.xml", "product_code":"mrs", "code":"48", "des":"This command is used to delete the existing secondary index table in a specific table.DROP INDEX [IF EXISTS] index_nameON [db_name.]table_name;In this command, IF EXISTS ", "doc_type":"usermanual", "kw":"DROP SECONDARY INDEX,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DROP SECONDARY INDEX", "githuburl":"" }, { "uri":"mrs_01_1448.html", + "node_id":"mrs_01_1448.xml", "product_code":"mrs", "code":"49", "des":"After the DELETE SEGMENT command is executed, the deleted segments are marked as the delete state. After the segments are merged, the status of the original segments chan", "doc_type":"usermanual", "kw":"CLEAN FILES,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CLEAN FILES", "githuburl":"" }, { "uri":"mrs_01_1449.html", + "node_id":"mrs_01_1449.xml", "product_code":"mrs", "code":"50", "des":"This command is used to dynamically add, update, display, or reset the CarbonData properties without restarting the driver.Add or Update parameter value:SET parameter_nam", "doc_type":"usermanual", "kw":"SET/RESET,DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SET/RESET", "githuburl":"" }, { "uri":"mrs_01_24046.html", + "node_id":"mrs_01_24046.xml", "product_code":"mrs", "code":"51", "des":"Before performing DDL and DML operations, you need to obtain the corresponding locks. See Table 1 for details about the locks that need to be obtained for each operation.", "doc_type":"usermanual", "kw":"Operation Concurrent Execution,CarbonData Syntax Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Operation Concurrent Execution", "githuburl":"" }, { "uri":"mrs_01_1450.html", + "node_id":"mrs_01_1450.xml", "product_code":"mrs", "code":"52", "des":"This section describes the APIs and usage methods of Segment. All methods are in the org.apache.spark.util.CarbonSegmentUtil class.The following methods have been abandon", "doc_type":"usermanual", "kw":"API,CarbonData Syntax Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"API", "githuburl":"" }, { "uri":"mrs_01_1451.html", + "node_id":"mrs_01_1451.xml", "product_code":"mrs", "code":"53", "des":"Spatial data includes multidimensional points, lines, rectangles, cubes, polygons, and other geometric objects. A spatial data object occupies a certain region of space, ", "doc_type":"usermanual", "kw":"Spatial Indexes,CarbonData Syntax Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spatial Indexes", "githuburl":"" }, { "uri":"mrs_01_1454.html", + "node_id":"mrs_01_1454.xml", "product_code":"mrs", "code":"54", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData Troubleshooting", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData Troubleshooting", "githuburl":"" }, { "uri":"mrs_01_1455.html", + "node_id":"mrs_01_1455.xml", "product_code":"mrs", "code":"55", "des":"When double data type values with higher precision are used in filters, incorrect values are returned by filtering results.When double data type values with higher precis", "doc_type":"usermanual", "kw":"Filter Result Is not Consistent with Hive when a Big Double Type Value Is Used in Filter,CarbonData ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Filter Result Is not Consistent with Hive when a Big Double Type Value Is Used in Filter", "githuburl":"" }, { "uri":"mrs_01_1456.html", + "node_id":"mrs_01_1456.xml", "product_code":"mrs", "code":"56", "des":"The query performance fluctuates when the query is executed in different query periods.During data loading, the memory configured for each executor program instance may b", "doc_type":"usermanual", "kw":"Query Performance Deterioration,CarbonData Troubleshooting,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Query Performance Deterioration", "githuburl":"" }, { "uri":"mrs_01_1457.html", + "node_id":"mrs_01_1457.xml", "product_code":"mrs", "code":"57", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"CarbonData FAQ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CarbonData FAQ", "githuburl":"" }, { "uri":"mrs_01_1458.html", + "node_id":"mrs_01_1458.xml", "product_code":"mrs", "code":"58", "des":"Why is incorrect output displayed when I perform query with filter on decimal data type values?For example:select * from carbon_table where num = 1234567890123456.22;Outp", "doc_type":"usermanual", "kw":"Why Is Incorrect Output Displayed When I Perform Query with Filter on Decimal Data Type Values?,Carb", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is Incorrect Output Displayed When I Perform Query with Filter on Decimal Data Type Values?", "githuburl":"" }, { "uri":"mrs_01_1459.html", + "node_id":"mrs_01_1459.xml", "product_code":"mrs", "code":"59", "des":"How to avoid minor compaction for historical data?If you want to load historical data first and then the incremental data, perform following steps to avoid minor compacti", "doc_type":"usermanual", "kw":"How to Avoid Minor Compaction for Historical Data?,CarbonData FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Avoid Minor Compaction for Historical Data?", "githuburl":"" }, { "uri":"mrs_01_1460.html", + "node_id":"mrs_01_1460.xml", "product_code":"mrs", "code":"60", "des":"How to change the default group name for CarbonData data loading?By default, the group name for CarbonData data loading is ficommon. You can perform the following operati", "doc_type":"usermanual", "kw":"How to Change the Default Group Name for CarbonData Data Loading?,CarbonData FAQ,Component Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Change the Default Group Name for CarbonData Data Loading?", "githuburl":"" }, { "uri":"mrs_01_1461.html", + "node_id":"mrs_01_1461.xml", "product_code":"mrs", "code":"61", "des":"Why does the INSERT INTO CARBON TABLE command fail and the following error message is displayed?The INSERT INTO CARBON TABLE command fails in the following scenarios:If t", "doc_type":"usermanual", "kw":"Why Does INSERT INTO CARBON TABLE Command Fail?,CarbonData FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does INSERT INTO CARBON TABLE Command Fail?", "githuburl":"" }, { "uri":"mrs_01_1462.html", + "node_id":"mrs_01_1462.xml", "product_code":"mrs", "code":"62", "des":"Why is the data logged in bad records different from the original input data with escaped characters?An escape character is a backslash (\\) followed by one or more charac", "doc_type":"usermanual", "kw":"Why Is the Data Logged in Bad Records Different from the Original Input Data with Escape Characters?", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is the Data Logged in Bad Records Different from the Original Input Data with Escape Characters?", "githuburl":"" }, { "uri":"mrs_01_1463.html", + "node_id":"mrs_01_1463.xml", "product_code":"mrs", "code":"63", "des":"Why data load performance decreases due to bad records?If bad records are present in the data and BAD_RECORDS_LOGGER_ENABLE is true or BAD_RECORDS_ACTION is redirect then", "doc_type":"usermanual", "kw":"Why Data Load Performance Decreases due to Bad Records?,CarbonData FAQ,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Data Load Performance Decreases due to Bad Records?", "githuburl":"" }, { "uri":"mrs_01_1464.html", + "node_id":"mrs_01_1464.xml", "product_code":"mrs", "code":"64", "des":"Why INSERT INTO or LOAD DATA task distribution is incorrect, and the openedtasks are less than the available executors when the number of initial executors is zero?In ca", "doc_type":"usermanual", "kw":"Why INSERT INTO/LOAD DATA Task Distribution Is Incorrect and the Opened Tasks Are Less Than the Avai", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why INSERT INTO/LOAD DATA Task Distribution Is Incorrect and the Opened Tasks Are Less Than the Available Executors when the Number of Initial Executors Is Zero?", "githuburl":"" }, { "uri":"mrs_01_1465.html", + "node_id":"mrs_01_1465.xml", "product_code":"mrs", "code":"65", "des":"Why does CarbonData require additional executors even though the parallelism is greater than the number of blocks to be processed?CarbonData block distribution optimizes ", "doc_type":"usermanual", "kw":"Why Does CarbonData Require Additional Executors Even Though the Parallelism Is Greater Than the Num", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does CarbonData Require Additional Executors Even Though the Parallelism Is Greater Than the Number of Blocks to Be Processed?", "githuburl":"" }, { "uri":"mrs_01_1466.html", + "node_id":"mrs_01_1466.xml", "product_code":"mrs", "code":"66", "des":"Why Data Loading fails during off heap?YARN Resource Manager will consider (Java heap memory + spark.yarn.am.memoryOverhead) as memory limit, so during the off heap, the ", "doc_type":"usermanual", "kw":"Why Data loading Fails During off heap?,CarbonData FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Data loading Fails During off heap?", "githuburl":"" }, { "uri":"mrs_01_1467.html", + "node_id":"mrs_01_1467.xml", "product_code":"mrs", "code":"67", "des":"Why do I fail to create a hive table?Creating a Hive table fails, when source table or sub query has more number of partitions. The implementation of the query requires a", "doc_type":"usermanual", "kw":"Why Do I Fail to Create a Hive Table?,CarbonData FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do I Fail to Create a Hive Table?", "githuburl":"" }, { "uri":"mrs_01_1468.html", + "node_id":"mrs_01_1468.xml", "product_code":"mrs", "code":"68", "des":"Why CarbonData tables created in V100R002C50RC1 not reflecting the privileges provided in Hive Privileges for non-owner?The Hive ACL is implemented after the version V100", "doc_type":"usermanual", "kw":"Why CarbonData tables created in V100R002C50RC1 not reflecting the privileges provided in Hive Privi", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why CarbonData tables created in V100R002C50RC1 not reflecting the privileges provided in Hive Privileges for non-owner?", "githuburl":"" }, { "uri":"mrs_01_1469.html", + "node_id":"mrs_01_1469.xml", "product_code":"mrs", "code":"69", "des":"How do I logically split data across different namespaces?Configuration:To logically split data across different namespaces, you must update the following configuration i", "doc_type":"usermanual", "kw":"How Do I Logically Split Data Across Different Namespaces?,CarbonData FAQ,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Logically Split Data Across Different Namespaces?", "githuburl":"" }, { "uri":"mrs_01_1470.html", + "node_id":"mrs_01_1470.xml", "product_code":"mrs", "code":"70", "des":"Why drop database cascade is throwing the following exception?This error is thrown when the owner of the database performs drop database cascade which con", "doc_type":"usermanual", "kw":"Why Missing Privileges Exception is Reported When I Perform Drop Operation on Databases?,CarbonData ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Missing Privileges Exception is Reported When I Perform Drop Operation on Databases?", "githuburl":"" }, { "uri":"mrs_01_1471.html", + "node_id":"mrs_01_1471.xml", "product_code":"mrs", "code":"71", "des":"Why the UPDATE command cannot be executed in Spark Shell?The syntax and examples provided in this document are about Beeline commands instead of Spark Shell commands.To r", "doc_type":"usermanual", "kw":"Why the UPDATE Command Cannot Be Executed in Spark Shell?,CarbonData FAQ,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why the UPDATE Command Cannot Be Executed in Spark Shell?", "githuburl":"" }, { "uri":"mrs_01_1472.html", + "node_id":"mrs_01_1472.xml", "product_code":"mrs", "code":"72", "des":"How do I configure unsafe memory in CarbonData?In the Spark configuration, the value of spark.yarn.executor.memoryOverhead must be greater than the sum of (sort.inmemory.", "doc_type":"usermanual", "kw":"How Do I Configure Unsafe Memory in CarbonData?,CarbonData FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Configure Unsafe Memory in CarbonData?", "githuburl":"" }, { "uri":"mrs_01_1473.html", + "node_id":"mrs_01_1473.xml", "product_code":"mrs", "code":"73", "des":"Why exception occurs in CarbonData when Disk Space Quota is set for the storage directory in HDFS?The data will be written to HDFS when you during create table, load tabl", "doc_type":"usermanual", "kw":"Why Exception Occurs in CarbonData When Disk Space Quota is Set for Storage Directory in HDFS?,Carbo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Exception Occurs in CarbonData When Disk Space Quota is Set for Storage Directory in HDFS?", "githuburl":"" }, { "uri":"mrs_01_1474.html", + "node_id":"mrs_01_1474.xml", "product_code":"mrs", "code":"74", "des":"Why does data query or loading fail and \"org.apache.carbondata.core.memory.MemoryException: Not enough memory\" is displayed?This exception is thrown when the out-of-heap ", "doc_type":"usermanual", "kw":"Why Does Data Query or Loading Fail and \"org.apache.carbondata.core.memory.MemoryException: Not enou", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does Data Query or Loading Fail and \"org.apache.carbondata.core.memory.MemoryException: Not enough memory\" Is Displayed?", "githuburl":"" }, { - "uri":"mrs_01_2344.html", + "uri":"mrs_01_24123.html", + "node_id":"mrs_01_24123.xml", "product_code":"mrs", "code":"75", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"Using CDL", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], + "title":"Using CDL", + "githuburl":"" + }, + { + "uri":"mrs_01_24124.html", + "node_id":"mrs_01_24124.xml", + "product_code":"mrs", + "code":"76", + "des":"CDL is a simple and efficient real-time data integration service. It captures data change events from various OLTP databases and pushes them to Kafka. The Sink Connector ", + "doc_type":"cmpntguide", + "kw":"CDL Usage Instructions,Using CDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"CDL Usage Instructions", + "githuburl":"" + }, + { + "uri":"mrs_01_24232.html", + "node_id":"mrs_01_24232.xml", + "product_code":"", + "code":"77", + "des":"CDL supports data synchronization or comparison tasks in multiple scenarios. This section describes how to import data from PgSQL to Kafka on the CDLService WebUI of a cl", + "doc_type":"", + "kw":"Using CDL from Scratch,Using CDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Using CDL from Scratch", + "githuburl":"" + }, + { + "uri":"mrs_01_24234.html", + "node_id":"mrs_01_24234.xml", + "product_code":"", + "code":"78", + "des":"Before using the CDL service, a cluster administrator needs to create a user and grant operation permissions to the user to meet service requirements.CDL users are classi", + "doc_type":"", + "kw":"Creating a CDL User,Using CDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Creating a CDL User", + "githuburl":"" + }, + { + "uri":"mrs_01_24235.html", + "node_id":"mrs_01_24235.xml", + "product_code":"mrs", + "code":"79", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"Preparing for Creating a CDL Job", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], + "title":"Preparing for Creating a CDL Job", + "githuburl":"" + }, + { + "uri":"mrs_01_24236.html", + "node_id":"mrs_01_24236.xml", + "product_code":"mrs", + "code":"80", + "des":"After CDL is installed in an MRS cluster, you can manage data connections and visualized jobs using the CDL web UI.This section describes how to access the CDL web UI in ", + "doc_type":"cmpntguide", + "kw":"Logging In to the CDLService WebUI,Preparing for Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Logging In to the CDLService WebUI", + "githuburl":"" + }, + { + "uri":"mrs_01_24237.html", + "node_id":"mrs_01_24237.xml", + "product_code":"", + "code":"81", + "des":"CDL is a simple and efficient real-time data integration service. It captures events from various OLTP databases and pushes them to Kafka. When creating a database connec", + "doc_type":"", + "kw":"Uploading a Driver File,Preparing for Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Uploading a Driver File", + "githuburl":"" + }, + { + "uri":"mrs_01_24238.html", + "node_id":"mrs_01_24238.xml", + "product_code":"", + "code":"82", + "des":"Create a database link on the CDLService web UI.You have obtained the driver JAR package of the data to be connected.A user with the CDL management permission has been cr", + "doc_type":"", + "kw":"Creating a Database Link,Preparing for Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Creating a Database Link", + "githuburl":"" + }, + { + "uri":"mrs_01_24255.html", + "node_id":"mrs_01_24255.xml", + "product_code":"", + "code":"83", + "des":"To capture data to or from Hudi, create and manage Hudi environment variables by performing the operations in this section.A user with the CDL management permission has b", + "doc_type":"", + "kw":"Managing ENV,Preparing for Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Managing ENV", + "githuburl":"" + }, + { + "uri":"mrs_01_24811.html", + "node_id":"mrs_01_24811.xml", + "product_code":"", + "code":"84", + "des":"The heartbeat and data consistency check function is used to collect full-link information about CDL synchronization tasks, including the time required for sending data f", + "doc_type":"", + "kw":"Configuring Heartbeat and Data Consistency Check for a Synchronization Task,Preparing for Creating a", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Heartbeat and Data Consistency Check for a Synchronization Task", + "githuburl":"" + }, + { + "uri":"mrs_01_24774.html", + "node_id":"mrs_01_24774.xml", + "product_code":"", + "code":"85", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"Creating a CDL Job", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Creating a CDL Job", + "githuburl":"" + }, + { + "uri":"mrs_01_24239.html", + "node_id":"mrs_01_24239.xml", + "product_code":"", + "code":"86", + "des":"The CDLService web UI provides a visualized page for users to quickly create CDL jobs and import real-time data into the data lake.A user with the CDL management permissi", + "doc_type":"", + "kw":"Creating a CDL Data Synchronization Job,Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Creating a CDL Data Synchronization Job", + "githuburl":"" + }, + { + "uri":"mrs_01_24775.html", + "node_id":"mrs_01_24775.xml", + "product_code":"", + "code":"87", + "des":"Data comparison checks the consistency between data in the source database and that in the target Hive. If the data is inconsistent, CDL can attempt to repair the inconsi", + "doc_type":"", + "kw":"Creating a CDL Data Comparison Job,Creating a CDL Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Creating a CDL Data Comparison Job", + "githuburl":"" + }, + { + "uri":"mrs_01_24240.html", + "node_id":"mrs_01_24240.xml", + "product_code":"", + "code":"88", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"Common CDL Jobs", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Common CDL Jobs", + "githuburl":"" + }, + { + "uri":"mrs_01_24254.html", + "node_id":"mrs_01_24254.xml", + "product_code":"", + "code":"89", + "des":"This section describes how to import data from PgSQL to Kafka by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Kafka services ", + "doc_type":"", + "kw":"Synchronizing Data from PgSQL to Kafka,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from PgSQL to Kafka", + "githuburl":"" + }, + { + "uri":"mrs_01_24751.html", + "node_id":"mrs_01_24751.xml", + "product_code":"", + "code":"90", + "des":"This section describes how to import data from MySQL to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services ha", + "doc_type":"", + "kw":"Synchronizing Data from MySQL to Hudi,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from MySQL to Hudi", + "githuburl":"" + }, + { + "uri":"mrs_01_24752.html", + "node_id":"mrs_01_24752.xml", + "product_code":"", + "code":"91", + "des":"This section describes how to import data from PgSQL to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services ha", + "doc_type":"", + "kw":"Synchronizing Data from PgSQL to Hudi,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from PgSQL to Hudi", + "githuburl":"" + }, + { + "uri":"mrs_01_24763.html", + "node_id":"mrs_01_24763.xml", + "product_code":"", + "code":"92", + "des":"This section describes how to import data from ThirdKafka to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi servic", + "doc_type":"", + "kw":"Synchronizing Data from ThirdKafka to Hudi,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from ThirdKafka to Hudi", + "githuburl":"" + }, + { + "uri":"mrs_01_24753.html", + "node_id":"mrs_01_24753.xml", + "product_code":"", + "code":"93", + "des":"This section describes how to import data from Hudi to DWS by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services have", + "doc_type":"", + "kw":"Synchronizing Data from Hudi to DWS,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from Hudi to DWS", + "githuburl":"" + }, + { + "uri":"mrs_01_24754.html", + "node_id":"mrs_01_24754.xml", + "product_code":"", + "code":"94", + "des":"This section describes how to import data from Hudi to ClickHouse by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL, Hudi, and Clic", + "doc_type":"", + "kw":"Synchronizing Data from Hudi to ClickHouse,Common CDL Jobs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Data from Hudi to ClickHouse", + "githuburl":"" + }, + { + "uri":"mrs_01_24129.html", + "node_id":"mrs_01_24129.xml", + "product_code":"mrs", + "code":"95", + "des":"Log path: The default log storage path of CDL is /var/log/Bigdata/cdl/Role name abbreviation.CDLService: /var/log/Bigdata/cdl/service (run logs) and /var/log/Bigdata/audi", + "doc_type":"cmpntguide", + "kw":"CDL Log Overview,Using CDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"CDL Log Overview", + "githuburl":"" + }, + { + "uri":"mrs_01_24288.html", + "node_id":"mrs_01_24288.xml", + "product_code":"mrs", + "code":"96", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"CDL FAQs", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], + "title":"CDL FAQs", + "githuburl":"" + }, + { + "uri":"mrs_01_24793.html", + "node_id":"mrs_01_24793.xml", + "product_code":"", + "code":"97", + "des":"After the CDL job for capturing data to Hudi is executed, related data exists in Kafka, but no record exists in Spark RDD, no related data exists in Hudi, and the error m", + "doc_type":"", + "kw":"Hudi Does Not Receive Data After a CDL Job Is Executed,CDL FAQs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hudi Does Not Receive Data After a CDL Job Is Executed", + "githuburl":"" + }, + { + "uri":"mrs_01_24794.html", + "node_id":"mrs_01_24794.xml", + "product_code":"", + "code":"98", + "des":"After an CDL job runs for a period of time, the YARN job fails and the status code 104 or 143 is returned.A large amount of data is captured to Hudi. As a result, the mem", + "doc_type":"", + "kw":"Error 104 or 143 Is Reported After a CDL Job Runs for a Period of Time,CDL FAQs,Component Operation ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Error 104 or 143 Is Reported After a CDL Job Runs for a Period of Time", + "githuburl":"" + }, + { + "uri":"mrs_01_24795.html", + "node_id":"mrs_01_24795.xml", + "product_code":"", + "code":"99", + "des":"The error message \"Record key is empty\" is displayed when the job of capturing data from PgSQL to Hudi is started.The primary key parameter table.primarykey.mapping of th", + "doc_type":"", + "kw":"Error Is Reported When the Job of Capturing Data From PgSQL to Hudi Is Started,CDL FAQs,Component Op", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Error Is Reported When the Job of Capturing Data From PgSQL to Hudi Is Started", + "githuburl":"" + }, + { + "uri":"mrs_01_24796.html", + "node_id":"mrs_01_24796.xml", + "product_code":"", + "code":"100", + "des":"The error message \"parameter exception with code: 403\" is displayed when a CDL job is stopped on the CDLService web UI.The current user does not have the permission to st", + "doc_type":"", + "kw":"Error 403 Is Reported When a CDL Job Is Stopped,CDL FAQs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Error 403 Is Reported When a CDL Job Is Stopped", + "githuburl":"" + }, + { + "uri":"mrs_01_2344.html", + "node_id":"mrs_01_2344.xml", + "product_code":"mrs", + "code":"101", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using ClickHouse", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using ClickHouse", "githuburl":"" }, { "uri":"mrs_01_2345.html", + "node_id":"mrs_01_2345.xml", "product_code":"mrs", - "code":"76", + "code":"102", "des":"ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provides good query performance. The aggregation analysis and ", "doc_type":"usermanual", "kw":"Using ClickHouse from Scratch,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using ClickHouse from Scratch", "githuburl":"" }, + { + "uri":"mrs_01_24451.html", + "node_id":"mrs_01_24451.xml", + "product_code":"", + "code":"103", + "des":"This section applies only to MRS 3.2.0 or later.During data migration, one-click balancing, decommissioning and capacity reduction, ClickHouse allows you to set the only_", + "doc_type":"", + "kw":"Enabling the Read-Only Mode of the ClickHouse Table,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Enabling the Read-Only Mode of the ClickHouse Table", + "githuburl":"" + }, { "uri":"mrs_01_24199.html", + "node_id":"mrs_01_24199.xml", "product_code":"mrs", - "code":"77", + "code":"104", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"cmpntguide", "kw":"Common ClickHouse SQL Syntax", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Common ClickHouse SQL Syntax", "githuburl":"" }, { "uri":"mrs_01_24200.html", + "node_id":"mrs_01_24200.xml", "product_code":"mrs", - "code":"78", + "code":"105", "des":"This section describes the basic syntax and usage of the SQL statement for creating a ClickHouse database.CREATE DATABASE [IF NOT EXISTS] Database_name [ON CLUSTERClickHo", "doc_type":"cmpntguide", "kw":"CREATE DATABASE: Creating a Database,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"CREATE DATABASE: Creating a Database", "githuburl":"" }, { "uri":"mrs_01_24201.html", + "node_id":"mrs_01_24201.xml", "product_code":"mrs", - "code":"79", + "code":"106", "des":"This section describes the basic syntax and usage of the SQL statement for creating a ClickHouse table.Method 1: Creating a table named table_name in the specified databa", "doc_type":"cmpntguide", "kw":"CREATE TABLE: Creating a Table,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"CREATE TABLE: Creating a Table", "githuburl":"" }, { "uri":"mrs_01_24202.html", + "node_id":"mrs_01_24202.xml", "product_code":"mrs", - "code":"80", + "code":"107", "des":"This section describes the basic syntax and usage of the SQL statement for inserting data to a table in ClickHouse.Method 1: Inserting data in standard formatINSERT INTO ", "doc_type":"cmpntguide", "kw":"INSERT INTO: Inserting Data into a Table,Common ClickHouse SQL Syntax,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"INSERT INTO: Inserting Data into a Table", "githuburl":"" }, { "uri":"mrs_01_24203.html", + "node_id":"mrs_01_24203.xml", "product_code":"mrs", - "code":"81", + "code":"108", "des":"This section describes the basic syntax and usage of the SQL statement for querying table data in ClickHouse.SELECT [DISTINCT] expr_list[FROM[database_name.]table| (subqu", "doc_type":"cmpntguide", "kw":"SELECT: Querying Table Data,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"SELECT: Querying Table Data", "githuburl":"" }, { "uri":"mrs_01_24204.html", + "node_id":"mrs_01_24204.xml", "product_code":"mrs", - "code":"82", + "code":"109", "des":"This section describes the basic syntax and usage of the SQL statement for modifying a table structure in ClickHouse.ALTER TABLE [database_name].name[ON CLUSTER cluster] ", "doc_type":"cmpntguide", "kw":"ALTER TABLE: Modifying a Table Structure,Common ClickHouse SQL Syntax,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"ALTER TABLE: Modifying a Table Structure", "githuburl":"" }, { "uri":"mrs_01_24205.html", + "node_id":"mrs_01_24205.xml", "product_code":"mrs", - "code":"83", + "code":"110", "des":"This section describes the basic syntax and usage of the SQL statement for querying a table structure in ClickHouse.DESC|DESCRIBETABLE[database_name.]table[INTOOUTFILE fi", "doc_type":"cmpntguide", "kw":"DESC: Querying a Table Structure,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"DESC: Querying a Table Structure", "githuburl":"" }, { "uri":"mrs_01_24208.html", + "node_id":"mrs_01_24208.xml", "product_code":"mrs", - "code":"84", + "code":"111", "des":"This section describes the basic syntax and usage of the SQL statement for deleting a ClickHouse table.DROP[TEMPORARY] TABLE[IF EXISTS] [database_name.]name[ON CLUSTER cl", "doc_type":"cmpntguide", "kw":"DROP: Deleting a Table,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"DROP: Deleting a Table", "githuburl":"" }, { "uri":"mrs_01_24207.html", + "node_id":"mrs_01_24207.xml", "product_code":"mrs", - "code":"85", + "code":"112", "des":"This section describes the basic syntax and usage of the SQL statement for displaying information about databases and tables in ClickHouse.show databasesshow tables", "doc_type":"cmpntguide", "kw":"SHOW: Displaying Information About Databases and Tables,Common ClickHouse SQL Syntax,Component Opera", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"SHOW: Displaying Information About Databases and Tables", "githuburl":"" }, - { - "uri":"mrs_01_24206.html", - "product_code":"mrs", - "code":"86", - "des":"This section describes the basic syntax and usage of the SQL statement for importing and exporting file data in ClickHouse.Importing data in CSV formatclickhouse client -", - "doc_type":"cmpntguide", - "kw":"Importing and Exporting File Data,Common ClickHouse SQL Syntax,Component Operation Guide (LTS)", - "title":"Importing and Exporting File Data", - "githuburl":"" - }, { "uri":"mrs_01_24251.html", + "node_id":"mrs_01_24251.xml", "product_code":"mrs", - "code":"87", + "code":"113", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"cmpntguide", "kw":"User Management and Authentication", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], "title":"User Management and Authentication", "githuburl":"" }, { "uri":"mrs_01_24057.html", + "node_id":"mrs_01_24057.xml", "product_code":"mrs", - "code":"88", + "code":"114", "des":"ClickHouse user permission management enables unified management of users, roles, and permissions on each ClickHouse instance in the cluster. You can use the permission m", "doc_type":"usermanual", "kw":"ClickHouse User and Permission Management,User Management and Authentication,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ClickHouse User and Permission Management", "githuburl":"" }, { "uri":"mrs_01_2395.html", + "node_id":"mrs_01_2395.xml", "product_code":"mrs", - "code":"89", + "code":"115", "des":"After a ClickHouse cluster is created, you can use the ClickHouse client to connect to the ClickHouse server. The default username is default.This section describes how t", "doc_type":"cmpntguide", - "kw":"Setting the ClickHouse Username and Password,User Management and Authentication,Component Operation ", - "title":"Setting the ClickHouse Username and Password", + "kw":"Configuring the Password of the Default Account of a ClickHouse Cluster(for MRS 3.1.2),User Manageme", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Configuring the Password of the Default Account of a ClickHouse Cluster(for MRS 3.1.2)", + "githuburl":"" + }, + { + "uri":"mrs_01_24575.html", + "node_id":"mrs_01_24575.xml", + "product_code":"", + "code":"116", + "des":"After a ClickHouse cluster is created, you can use the ClickHouse client to connect to the ClickHouse server.Configure the passwords of the default accounts default and c", + "doc_type":"", + "kw":"Configuring the Password of the Default Account of a ClickHouse Cluster,User Management and Authenti", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring the Password of the Default Account of a ClickHouse Cluster", + "githuburl":"" + }, + { + "uri":"mrs_01_24784.html", + "node_id":"mrs_01_24784.xml", + "product_code":"", + "code":"117", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"ClickHouse Multi-Tenancy", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ClickHouse Multi-Tenancy", + "githuburl":"" + }, + { + "uri":"mrs_01_24790.html", + "node_id":"mrs_01_24790.xml", + "product_code":"", + "code":"118", + "des":"This section applies only to MRS 3.2.0 or later.The ClickHouse multi-tenancy feature enables you to manage cluster resources through the user > tenant role > resource pro", + "doc_type":"", + "kw":"ClickHouse Multi-Tenancy Overview,ClickHouse Multi-Tenancy,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ClickHouse Multi-Tenancy Overview", + "githuburl":"" + }, + { + "uri":"mrs_01_24789.html", + "node_id":"mrs_01_24789.xml", + "product_code":"", + "code":"119", + "des":"This section applies only to MRS 3.2.0 or later.ClickHouse tenants support CPU priorities. This feature depends on CAP_SYS_NICE of the OS and takes effect only after bein", + "doc_type":"", + "kw":"Enabling the CPU Priority Feature,ClickHouse Multi-Tenancy,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Enabling the CPU Priority Feature", + "githuburl":"" + }, + { + "uri":"mrs_01_24791.html", + "node_id":"mrs_01_24791.xml", + "product_code":"", + "code":"120", + "des":"This section applies only to MRS 3.2.0 or later.On FusionInsight Manager, cluster administrators can create a ClickHouse tenant and associate it with a logical cluster. A", + "doc_type":"", + "kw":"Managing ClickHouse Tenants,ClickHouse Multi-Tenancy,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Managing ClickHouse Tenants", + "githuburl":"" + }, + { + "uri":"mrs_01_24786.html", + "node_id":"mrs_01_24786.xml", + "product_code":"", + "code":"121", + "des":"This section applies only to MRS 3.2.0 or later.Modify the maximum memory allowed for ClickHouse on a ClickHouseServer node to ensure the normal use of other service inst", + "doc_type":"", + "kw":"Modifying the Memory Limit of ClickHouse on a ClickHouseServer Node,ClickHouse Multi-Tenancy,Compone", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Modifying the Memory Limit of ClickHouse on a ClickHouseServer Node", "githuburl":"" }, { "uri":"mrs_01_24105.html", + "node_id":"mrs_01_24105.xml", "product_code":"mrs", - "code":"90", + "code":"122", "des":"Table engines play a key role in ClickHouse to determine:Where to write and read dataSupported query modesWhether concurrent data access is supportedWhether indexes can b", "doc_type":"usermanual", "kw":"ClickHouse Table Engine Overview,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ClickHouse Table Engine Overview", "githuburl":"" }, { "uri":"mrs_01_2398.html", + "node_id":"mrs_01_2398.xml", "product_code":"mrs", - "code":"91", + "code":"123", "des":"ClickHouse implements the replicated table mechanism based on the ReplicatedMergeTree engine and ZooKeeper. When creating a table, you can specify an engine to determine ", "doc_type":"usermanual", "kw":"Creating a ClickHouse Table,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a ClickHouse Table", "githuburl":"" }, { - "uri":"mrs_01_24053.html", + "uri":"mrs_01_24250.html", + "node_id":"mrs_01_24250.xml", "product_code":"mrs", - "code":"92", + "code":"124", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"Migrating ClickHouse Data", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], + "title":"Migrating ClickHouse Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24206.html", + "node_id":"mrs_01_24206.xml", + "product_code":"mrs", + "code":"125", + "des":"Use the ClickHouse client to import and export data.Importing data in CSV formatclickhouse client --hostHost name or IP address of the ClickHouse instance--databaseDataba", + "doc_type":"cmpntguide", + "kw":"Using ClickHouse to Import and Export Data,Migrating ClickHouse Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Using ClickHouse to Import and Export Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24377.html", + "node_id":"mrs_01_24377.xml", + "product_code":"", + "code":"126", + "des":"This section describes how to create a Kafka table to automatically synchronize Kafka data to the ClickHouse cluster.You have created a Kafka cluster. The Kafka client ha", + "doc_type":"", + "kw":"Synchronizing Kafka Data to ClickHouse,Migrating ClickHouse Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Synchronizing Kafka Data to ClickHouse", + "githuburl":"" + }, + { + "uri":"mrs_01_24053.html", + "node_id":"mrs_01_24053.xml", + "product_code":"mrs", + "code":"127", "des":"The ClickHouse data migration tool can migrate some partitions of one or more partitioned MergeTree tables on several ClickHouseServer nodes to the same tables on other C", "doc_type":"usermanual", - "kw":"Using the ClickHouse Data Migration Tool,Using ClickHouse,Component Operation Guide (LTS)", + "kw":"Using the ClickHouse Data Migration Tool,Migrating ClickHouse Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the ClickHouse Data Migration Tool", "githuburl":"" }, + { + "uri":"mrs_01_24508.html", + "node_id":"mrs_01_24508.xml", + "product_code":"", + "code":"128", + "des":"This section applies only to MRS 3.2.0 or later.Scenario 1: As the number of MRS ClickHouse services increases, the storage and compute resources of clusters cannot meet ", + "doc_type":"", + "kw":"Using the Migration Tool to Quickly Migrate ClickHouse Cluster Data,Migrating ClickHouse Data,Compon", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Using the Migration Tool to Quickly Migrate ClickHouse Cluster Data", + "githuburl":"" + }, { "uri":"mrs_01_24229.html", + "node_id":"mrs_01_24229.xml", "product_code":"mrs", - "code":"93", + "code":"129", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"cmpntguide", "kw":"Monitoring of Slow ClickHouse Query Statements and Replication Table Data Synchronization", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Monitoring of Slow ClickHouse Query Statements and Replication Table Data Synchronization", "githuburl":"" }, { "uri":"mrs_01_24230.html", + "node_id":"mrs_01_24230.xml", "product_code":"mrs", - "code":"94", + "code":"130", "des":"The SQL statement query in ClickHouse is slow because the conditions such as partitions, where conditions, and indexes of SQL statements are set improperly. As a result, ", "doc_type":"cmpntguide", "kw":"Slow Query Statement Monitoring,Monitoring of Slow ClickHouse Query Statements and Replication Table", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Slow Query Statement Monitoring", "githuburl":"" }, { "uri":"mrs_01_24231.html", + "node_id":"mrs_01_24231.xml", "product_code":"mrs", - "code":"95", + "code":"131", "des":"MRS monitors the synchronization between multiple copies of data in the same shard of a Replicated*MergeTree table.Currently, you can monitor and query only Replicated*Me", "doc_type":"cmpntguide", "kw":"Replication Table Data Synchronization Monitoring,Monitoring of Slow ClickHouse Query Statements and", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Replication Table Data Synchronization Monitoring", "githuburl":"" }, { "uri":"mrs_01_24287.html", + "node_id":"mrs_01_24287.xml", "product_code":"", - "code":"96", + "code":"132", "des":"Materialized views (MVs) are used in ClickHouse to save the precomputed result of time-consuming operations. When querying data, you can query the materialized views rath", "doc_type":"", "kw":"Adaptive MV Usage in ClickHouse,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Adaptive MV Usage in ClickHouse", "githuburl":"" }, { "uri":"mrs_01_2399.html", + "node_id":"mrs_01_2399.xml", "product_code":"mrs", - "code":"97", + "code":"133", "des":"Log path: The default storage path of ClickHouse log files is as follows: ${BIGDATA_LOG_HOME}/clickhouseLog archive rule: The automatic ClickHouse log compression functio", "doc_type":"usermanual", "kw":"ClickHouse Log Overview,Using ClickHouse,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ClickHouse Log Overview", "githuburl":"" }, + { + "uri":"mrs_01_24848.html", + "node_id":"mrs_01_24848.xml", + "product_code":"", + "code":"134", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"ClickHouse Performance Tuning", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ClickHouse Performance Tuning", + "githuburl":"" + }, + { + "uri":"mrs_01_24849.html", + "node_id":"mrs_01_24849.xml", + "product_code":"", + "code":"135", + "des":"Log in to the ClickHouse client and check whether abnormal merge exists.select database, table, elapsed, progress, merge_type from system.merges;select database, table, e", + "doc_type":"", + "kw":"Solution to the \"Too many parts\" Error in Data Tables,ClickHouse Performance Tuning,Component Operat", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Solution to the \"Too many parts\" Error in Data Tables", + "githuburl":"" + }, + { + "uri":"mrs_01_24853.html", + "node_id":"mrs_01_24853.xml", + "product_code":"", + "code":"136", + "des":"To accelerate background tasks, adjust the ZooKeeper service configuration first. Otherwise, the ClickHouse service and background tasks will be abnormal due to insuffici", + "doc_type":"", + "kw":"Accelerating Merge Operations,ClickHouse Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Accelerating Merge Operations", + "githuburl":"" + }, + { + "uri":"mrs_01_24855.html", + "node_id":"mrs_01_24855.xml", + "product_code":"", + "code":"137", + "des":"When TTL is triggered in ClickHouse, a large amount of CPU and memory are consumed.Log in to FusionInsight Manager and choose Cluster > Services > ClickHouse. Click Confi", + "doc_type":"", + "kw":"Accelerating TTL Operations,ClickHouse Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Accelerating TTL Operations", + "githuburl":"" + }, + { + "uri":"mrs_01_24777.html", + "node_id":"mrs_01_24777.xml", + "product_code":"", + "code":"138", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"ClickHouse FAQ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ClickHouse FAQ", + "githuburl":"" + }, + { + "uri":"mrs_01_24778.html", + "node_id":"mrs_01_24778.xml", + "product_code":"", + "code":"139", + "des":"How do I do if the disk status displayed in the System.disks table is fault or abnormal?This problem is caused by I/O errors on the disk. To rectify the fault, perform th", + "doc_type":"", + "kw":"How Do I Do If the Disk Status Displayed in the System.disks Table Is fault or abnormal?,ClickHouse ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"How Do I Do If the Disk Status Displayed in the System.disks Table Is fault or abnormal?", + "githuburl":"" + }, + { + "uri":"mrs_01_24831.html", + "node_id":"mrs_01_24831.xml", + "product_code":"", + "code":"140", + "des":"How do I migrate Hive/HDFS data to ClickHouse?You can export data from Hive as CSV files and import the CSV files to ClickHouse.Export data from Hive as CSV files.hive -e", + "doc_type":"", + "kw":"How Do I Migrate Data from Hive/HDFS to ClickHouse?,ClickHouse FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"How Do I Migrate Data from Hive/HDFS to ClickHouse?", + "githuburl":"" + }, + { + "uri":"mrs_01_24837.html", + "node_id":"mrs_01_24837.xml", + "product_code":"", + "code":"141", + "des":"An error is reported in logs when the auxiliary ZooKeeper or replica data is used to synchronize table data.The versions of replication table replicas are inconsistent, c", + "doc_type":"", + "kw":"An Error Is Reported in Logs When the Auxiliary ZooKeeper or Replica Data Is Used to Synchronize Tab", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"An Error Is Reported in Logs When the Auxiliary ZooKeeper or Replica Data Is Used to Synchronize Table Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24846.html", + "node_id":"mrs_01_24846.xml", + "product_code":"", + "code":"142", + "des":"su - ommsource{Client installation directory}/bigdata_envkinitComponent user (You do not need to run the kinit command for normal clusters.)clickhouse client --hostIP ad", + "doc_type":"", + "kw":"How Do I Grant the Select Permission at the Database Level to ClickHouse Users?,ClickHouse FAQ,Compo", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"How Do I Grant the Select Permission at the Database Level to ClickHouse Users?", + "githuburl":"" + }, { "uri":"mrs_01_2356.html", + "node_id":"mrs_01_2356.xml", "product_code":"mrs", - "code":"98", + "code":"143", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using DBService", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using DBService", "githuburl":"" }, { "uri":"mrs_01_2346.html", + "node_id":"mrs_01_2346.xml", "product_code":"mrs", - "code":"99", + "code":"144", "des":"This section describes how to manually configure SSL for the HA module of DBService in the cluster where DBService is installed.After this operation is performed, if you ", "doc_type":"usermanual", "kw":"Configuring SSL for the HA Module,Using DBService,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring SSL for the HA Module", "githuburl":"" }, { "uri":"mrs_01_2347.html", + "node_id":"mrs_01_2347.xml", "product_code":"mrs", - "code":"100", + "code":"145", "des":"This section describes how to restore SSL for the HA module of DBService in the cluster where DBService is installed.SSL has been enabled for the HA module of DBService.C", "doc_type":"usermanual", "kw":"Restoring SSL for the HA Module,Using DBService,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Restoring SSL for the HA Module", "githuburl":"" }, { "uri":"mrs_01_24283.html", + "node_id":"mrs_01_24283.xml", "product_code":"", - "code":"101", + "code":"146", "des":"The default timeout interval of DBService backup tasks is 2 hours. When the data volume in DBService is too large, the backup task may fail to be executed because the tim", "doc_type":"", "kw":"Configuring the Timeout Interval of DBService Backup Tasks,Using DBService,Component Operation Guide", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Configuring the Timeout Interval of DBService Backup Tasks", "githuburl":"" }, { "uri":"mrs_01_0789.html", + "node_id":"mrs_01_0789.xml", "product_code":"mrs", - "code":"102", + "code":"147", "des":"Log path: The default storage path of DBService log files is /var/log/Bigdata/dbservice.GaussDB: /var/log/Bigdata/dbservice/DB (GaussDB run log directory), /var/log/Bigda", "doc_type":"usermanual", "kw":"DBService Log Overview,Using DBService,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DBService Log Overview", "githuburl":"" }, { "uri":"mrs_01_0591.html", + "node_id":"mrs_01_0591.xml", "product_code":"mrs", - "code":"103", + "code":"148", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Flink", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Flink", "githuburl":"" }, { "uri":"mrs_01_0473.html", + "node_id":"mrs_01_0473.xml", "product_code":"mrs", - "code":"104", + "code":"149", "des":"This section describes how to use Flink to run wordcount jobs.Flink has been installed in the MRS cluster and all components in the cluster are running properly.The clust", "doc_type":"usermanual", "kw":"Using Flink from Scratch,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using Flink from Scratch", "githuburl":"" }, { "uri":"mrs_01_0784.html", + "node_id":"mrs_01_0784.xml", "product_code":"mrs", - "code":"105", + "code":"150", "des":"You can view Flink job information on the Yarn web UI.The Flink service has been installed in a cluster.Log in to FusionInsight Manager. For details, see Accessing Fusion", "doc_type":"usermanual", "kw":"Viewing Flink Job Information,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Flink Job Information", "githuburl":"" }, { "uri":"mrs_01_0592.html", + "node_id":"mrs_01_0592.xml", "product_code":"mrs", - "code":"106", + "code":"151", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Flink Configuration Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flink Configuration Management", "githuburl":"" }, { "uri":"mrs_01_1565.html", + "node_id":"mrs_01_1565.xml", "product_code":"mrs", - "code":"107", + "code":"152", "des":"All parameters of Flink must be set on a client. The path of a configuration file is as follows: Client installation path/Flink/flink/conf/flink-conf.yaml.You are advised", "doc_type":"usermanual", "kw":"Configuring Parameter Paths,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Parameter Paths", "githuburl":"" }, { "uri":"mrs_01_1566.html", + "node_id":"mrs_01_1566.xml", "product_code":"mrs", - "code":"108", + "code":"153", "des":"JobManager and TaskManager are main components of Flink. You can configure the parameters for different security and performance scenarios on the client.Main configuratio", "doc_type":"usermanual", "kw":"JobManager & TaskManager,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"JobManager & TaskManager", "githuburl":"" }, { "uri":"mrs_01_1567.html", + "node_id":"mrs_01_1567.xml", "product_code":"mrs", - "code":"109", + "code":"154", "des":"The Blob server on the JobManager node is used to receive JAR files uploaded by users on the client, send JAR files to TaskManager, and transfer log files. Flink provides", "doc_type":"usermanual", "kw":"Blob,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Blob", "githuburl":"" }, { "uri":"mrs_01_1568.html", + "node_id":"mrs_01_1568.xml", "product_code":"mrs", - "code":"110", + "code":"155", "des":"The Akka actor model is the basis of communications between the Flink client and JobManager, JobManager and TaskManager, as well as TaskManager and TaskManager. Flink ena", "doc_type":"usermanual", "kw":"Distributed Coordination (via Akka),Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Distributed Coordination (via Akka)", "githuburl":"" }, { "uri":"mrs_01_1569.html", + "node_id":"mrs_01_1569.xml", "product_code":"mrs", - "code":"111", + "code":"156", "des":"When the secure Flink cluster is required, SSL-related configuration items must be set.Configuration items include the SSL switch, certificate, password, and encryption a", "doc_type":"usermanual", "kw":"SSL,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SSL", "githuburl":"" }, { "uri":"mrs_01_1570.html", + "node_id":"mrs_01_1570.xml", "product_code":"mrs", - "code":"112", + "code":"157", "des":"When Flink runs a job, data transmission and reverse pressure detection between tasks depend on Netty. In certain environments, Netty parameters should be configured.For ", "doc_type":"usermanual", "kw":"Network communication (via Netty),Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Network communication (via Netty)", "githuburl":"" }, { "uri":"mrs_01_1571.html", + "node_id":"mrs_01_1571.xml", "product_code":"mrs", - "code":"113", + "code":"158", "des":"When JobManager is started, the web server in the same process is also started.You can access the web server to obtain information about the current Flink cluster, includ", "doc_type":"usermanual", "kw":"JobManager Web Frontend,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"JobManager Web Frontend", "githuburl":"" }, { "uri":"mrs_01_1572.html", + "node_id":"mrs_01_1572.xml", "product_code":"mrs", - "code":"114", + "code":"159", "des":"Result files are created when tasks are running. Flink enables you to configure parameters for file creation.Configuration items include overwriting policy and directory ", "doc_type":"usermanual", "kw":"File Systems,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"File Systems", "githuburl":"" }, { "uri":"mrs_01_1573.html", + "node_id":"mrs_01_1573.xml", "product_code":"mrs", - "code":"115", + "code":"160", "des":"Flink enables HA and job exception, as well as job pause and recovery during version upgrade. Flink depends on state backend to store job states and on the restart strate", "doc_type":"usermanual", "kw":"State Backend,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"State Backend", "githuburl":"" }, { "uri":"mrs_01_1574.html", + "node_id":"mrs_01_1574.xml", "product_code":"mrs", - "code":"116", + "code":"161", "des":"Flink Kerberos configuration items must be configured in security mode.The configuration items include keytab, principal, and cookie of Kerberos.", "doc_type":"usermanual", "kw":"Kerberos-based Security,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kerberos-based Security", "githuburl":"" }, { "uri":"mrs_01_1575.html", + "node_id":"mrs_01_1575.xml", "product_code":"mrs", - "code":"117", + "code":"162", "des":"The Flink HA mode depends on ZooKeeper. Therefore, ZooKeeper-related configuration items must be set.Configuration items include the ZooKeeper address, path, and security", "doc_type":"usermanual", "kw":"HA,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HA", "githuburl":"" }, { "uri":"mrs_01_1576.html", + "node_id":"mrs_01_1576.xml", "product_code":"mrs", - "code":"118", + "code":"163", "des":"In scenarios raising special requirements on JVM configuration, users can use configuration items to transfer JVM parameters to the client, JobManager, and TaskManager.Co", "doc_type":"usermanual", "kw":"Environment,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Environment", "githuburl":"" }, { "uri":"mrs_01_1577.html", + "node_id":"mrs_01_1577.xml", "product_code":"mrs", - "code":"119", + "code":"164", "des":"Flink runs on a Yarn cluster and JobManager runs on ApplicationMaster. Certain configuration parameters of JobManager depend on Yarn. By setting Yarn-related configuratio", "doc_type":"usermanual", "kw":"Yarn,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Yarn", "githuburl":"" }, { "uri":"mrs_01_1578.html", + "node_id":"mrs_01_1578.xml", "product_code":"mrs", - "code":"120", + "code":"165", "des":"The Netty connection is used among multiple jobs to reduce latency. In this case, NettySink is used on the server and NettySource is used on the client for data transmiss", "doc_type":"usermanual", "kw":"Pipeline,Flink Configuration Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Pipeline", "githuburl":"" }, { "uri":"mrs_01_0593.html", + "node_id":"mrs_01_0593.xml", "product_code":"mrs", - "code":"121", + "code":"166", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Security Configuration", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Security Configuration", "githuburl":"" }, { "uri":"mrs_01_1579.html", + "node_id":"mrs_01_1579.xml", "product_code":"mrs", - "code":"122", + "code":"167", "des":"All Flink cluster components support authentication.The Kerberos authentication is supported between Flink cluster components and external components, such as Yarn, HDFS,", "doc_type":"usermanual", "kw":"Security Features,Security Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Security Features", "githuburl":"" }, { "uri":"mrs_01_1580.html", + "node_id":"mrs_01_1580.xml", "product_code":"mrs", - "code":"123", + "code":"168", "des":"Sample project data of Flink is stored in Kafka. A user with Kafka permission can send data to Kafka and receive data from it.Run Linux command line to create a topic. Be", "doc_type":"usermanual", "kw":"Configuring Kafka,Security Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Kafka", "githuburl":"" }, { "uri":"mrs_01_1581.html", + "node_id":"mrs_01_1581.xml", "product_code":"mrs", - "code":"124", + "code":"169", "des":"File configurationnettyconnector.registerserver.topic.storage: (Mandatory) Configures the path (on a third-party server) to information about IP address, port numbers, an", "doc_type":"usermanual", "kw":"Configuring Pipeline,Security Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Pipeline", "githuburl":"" }, { "uri":"mrs_01_0594.html", + "node_id":"mrs_01_0594.xml", "product_code":"mrs", - "code":"125", + "code":"170", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Security Hardening", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Security Hardening", "githuburl":"" }, { "uri":"mrs_01_1583.html", + "node_id":"mrs_01_1583.xml", "product_code":"mrs", - "code":"126", + "code":"171", "des":"Flink uses the following three authentication modes:Kerberos authentication: It is used between the Flink Yarn client and Yarn ResourceManager, JobManager and ZooKeeper, ", "doc_type":"usermanual", "kw":"Authentication and Encryption,Security Hardening,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Authentication and Encryption", "githuburl":"" }, { "uri":"mrs_01_1584.html", + "node_id":"mrs_01_1584.xml", "product_code":"mrs", - "code":"127", + "code":"172", "des":"In HA mode of Flink, ZooKeeper can be used to manage clusters and discover services. Zookeeper supports SASL ACL control. Only users who have passed the SASL (Kerberos) a", "doc_type":"usermanual", "kw":"ACL Control,Security Hardening,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ACL Control", "githuburl":"" }, { "uri":"mrs_01_1585.html", + "node_id":"mrs_01_1585.xml", "product_code":"mrs", - "code":"128", + "code":"173", "des":"Note: The same coding mode is used on the web service client and server to prevent garbled characters and to enable input verification.Security hardening: apply UTF-8 to ", "doc_type":"usermanual", "kw":"Web Security,Security Hardening,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Web Security", "githuburl":"" }, { "uri":"mrs_01_1586.html", + "node_id":"mrs_01_1586.xml", "product_code":"mrs", - "code":"129", + "code":"174", "des":"All security functions of Flink are provided by the open source community or self-developed. Security features that need to be configured by users, such as authentication", "doc_type":"usermanual", "kw":"Security Statement,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Security Statement", "githuburl":"" }, { "uri":"mrs_01_24014.html", + "node_id":"mrs_01_24014.xml", "product_code":"mrs", - "code":"130", + "code":"175", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using the Flink Web UI", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24015.html", + "node_id":"mrs_01_24015.xml", "product_code":"mrs", - "code":"131", + "code":"176", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Overview", - "title":"Overview", + "kw":"Flink Web UI Overview", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Flink Web UI Overview", "githuburl":"" }, { "uri":"mrs_01_24016.html", + "node_id":"mrs_01_24016.xml", "product_code":"mrs", - "code":"132", + "code":"177", "des":"Flink web UI provides a web-based visual development platform. You only need to compile SQL statements to develop jobs, slashing the job development threshold. In additio", "doc_type":"usermanual", - "kw":"Introduction to Flink Web UI,Overview,Component Operation Guide (LTS)", + "kw":"Introduction to Flink Web UI,Flink Web UI Overview,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24017.html", + "node_id":"mrs_01_24017.xml", "product_code":"mrs", - "code":"133", + "code":"178", "des":"The Flink web UI application process is shown as follows:", "doc_type":"usermanual", - "kw":"Flink Web UI Application Process,Overview,Component Operation Guide (LTS)", + "kw":"Flink Web UI Application Process,Flink Web UI Overview,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flink Web UI Application Process", "githuburl":"" }, { "uri":"mrs_01_24047.html", + "node_id":"mrs_01_24047.xml", "product_code":"mrs", - "code":"134", + "code":"179", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"FlinkServer Permissions Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"FlinkServer Permissions Management", "githuburl":"" }, { "uri":"mrs_01_24048.html", + "node_id":"mrs_01_24048.xml", "product_code":"mrs", - "code":"135", + "code":"180", "des":"User admin of Manager does not have the FlinkServer service operation permission. To perform FlinkServer service operations, you need to grant related permission to the u", "doc_type":"usermanual", - "kw":"Overview,FlinkServer Permissions Management,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"FlinkServer Permissions Overview,FlinkServer Permissions Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"FlinkServer Permissions Overview", "githuburl":"" }, { "uri":"mrs_01_24049.html", + "node_id":"mrs_01_24049.xml", "product_code":"mrs", - "code":"136", + "code":"181", "des":"This section describes how to create and configure a FlinkServer role on Manager as the system administrator. A FlinkServer role can be configured with FlinkServer admini", "doc_type":"usermanual", "kw":"Authentication Based on Users and Roles,FlinkServer Permissions Management,Component Operation Guide", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Authentication Based on Users and Roles", "githuburl":"" }, { "uri":"mrs_01_24019.html", + "node_id":"mrs_01_24019.xml", "product_code":"mrs", - "code":"137", + "code":"182", "des":"After Flink is installed in an MRS cluster, you can connect to clusters and data as well as manage stream tables and jobs using the Flink web UI.This section describes ho", "doc_type":"usermanual", "kw":"Accessing the Flink Web UI,Using the Flink Web UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Accessing the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24020.html", + "node_id":"mrs_01_24020.xml", "product_code":"mrs", - "code":"138", + "code":"183", "des":"Applications can be used to isolate different upper-layer services.After the application is created, you can switch to the application to be operated in the upper left co", "doc_type":"usermanual", "kw":"Creating an Application on the Flink Web UI,Using the Flink Web UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating an Application on the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24021.html", + "node_id":"mrs_01_24021.xml", "product_code":"mrs", - "code":"139", + "code":"184", "des":"Different clusters can be accessed by configuring the cluster connection.To obtain the cluster client configuration files, perform the following steps:Log in to FusionIns", "doc_type":"usermanual", "kw":"Creating a Cluster Connection on the Flink Web UI,Using the Flink Web UI,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Cluster Connection on the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24022.html", + "node_id":"mrs_01_24022.xml", "product_code":"mrs", - "code":"140", + "code":"185", "des":"Different data services can be accessed through data connections. Currently, FlinkServer supports HDFS, Kafka data connections.", "doc_type":"usermanual", "kw":"Creating a Data Connection on the Flink Web UI,Using the Flink Web UI,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Data Connection on the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24023.html", + "node_id":"mrs_01_24023.xml", "product_code":"mrs", - "code":"141", + "code":"186", "des":"Data tables can be used to define basic attributes and parameters of source tables, dimension tables, and output tables.", "doc_type":"usermanual", "kw":"Managing Tables on the Flink Web UI,Using the Flink Web UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Tables on the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24024.html", + "node_id":"mrs_01_24024.xml", "product_code":"mrs", - "code":"142", + "code":"187", "des":"Define Flink jobs, including Flink SQL and Flink JAR jobs.Creating a Flink SQL jobDevelop the job on the job development page.Click Check Semantic to check the input cont", "doc_type":"usermanual", "kw":"Managing Jobs on the Flink Web UI,Using the Flink Web UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Jobs on the Flink Web UI", "githuburl":"" }, + { + "uri":"mrs_01_24481.html", + "node_id":"mrs_01_24481.xml", + "product_code":"", + "code":"188", + "des":"The FlinkServer web UI enables you only to import and export jobs, UDFs, andstream tables.Jobs, flow tables, and UDFs with the same name cannot be imported to the same cl", + "doc_type":"", + "kw":"Importing and Exporting Jobs,Using the Flink Web UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Importing and Exporting Jobs", + "githuburl":"" + }, { "uri":"mrs_01_24223.html", + "node_id":"mrs_01_24223.xml", "product_code":"mrs", - "code":"143", + "code":"189", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Managing UDFs on the Flink Web UI", - "title":"Managing UDFs on the Flink Web UI", + "kw":"Managing UDFs", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], + "title":"Managing UDFs", "githuburl":"" }, { "uri":"mrs_01_24211.html", + "node_id":"mrs_01_24211.xml", "product_code":"mrs", - "code":"144", + "code":"190", "des":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called user-defined functions (UDFs). You can upload and manag", "doc_type":"usermanual", - "kw":"Managing UDFs on the Flink Web UI,Managing UDFs on the Flink Web UI,Component Operation Guide (LTS)", + "kw":"Managing UDFs on the Flink Web UI,Managing UDFs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Managing UDFs on the Flink Web UI", "githuburl":"" }, { "uri":"mrs_01_24224.html", + "node_id":"mrs_01_24224.xml", "product_code":"mrs", - "code":"145", + "code":"191", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"UDF Java and SQL Examples,Managing UDFs on the Flink Web UI,Component Operation Guide (LTS)", + "kw":"UDF Java and SQL Examples,Managing UDFs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"UDF Java and SQL Examples", "githuburl":"" }, { "uri":"mrs_01_24225.html", + "node_id":"mrs_01_24225.xml", "product_code":"mrs", - "code":"146", + "code":"192", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"UDAF Java and SQL Examples,Managing UDFs on the Flink Web UI,Component Operation Guide (LTS)", + "kw":"UDAF Java and SQL Examples,Managing UDFs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"UDAF Java and SQL Examples", "githuburl":"" }, { "uri":"mrs_01_24227.html", + "node_id":"mrs_01_24227.xml", "product_code":"mrs", - "code":"147", + "code":"193", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"UDTF Java and SQL Examples,Managing UDFs on the Flink Web UI,Component Operation Guide (LTS)", + "kw":"UDTF Java and SQL Examples,Managing UDFs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"UDTF Java and SQL Examples", "githuburl":"" }, { "uri":"mrs_01_24226.html", + "node_id":"mrs_01_24226.xml", "product_code":"mrs", - "code":"148", + "code":"194", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Interconnecting FlinkServer with External Components", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Interconnecting FlinkServer with External Components", "githuburl":"" }, { "uri":"mrs_01_24148.html", + "node_id":"mrs_01_24148.xml", "product_code":"mrs", - "code":"149", - "des":"Flink interconnects with the ClickHouseBalancer instance of ClickHouse to read and write data, preventing ClickHouse traffic distribution problems.Services such as ClickH", + "code":"195", + "des":"Flink interconnects with the ClickHouseBalancer instance of ClickHouse to read and write data, preventing ClickHouse traffic distribution problems.When \"FlinkSQL\" is disp", "doc_type":"usermanual", "kw":"Interconnecting FlinkServer with ClickHouse,Interconnecting FlinkServer with External Components,Com", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Interconnecting FlinkServer with ClickHouse", "githuburl":"" }, { "uri":"mrs_01_24120.html", + "node_id":"mrs_01_24120.xml", "product_code":"mrs", - "code":"150", + "code":"196", "des":"FlinkServer can be interconnected with HBase. The details are as follows:It can be interconnected with dimension tables and sink tables.When HBase and Flink are in the sa", "doc_type":"usermanual", "kw":"Interconnecting FlinkServer with HBase,Interconnecting FlinkServer with External Components,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Interconnecting FlinkServer with HBase", "githuburl":"" }, { "uri":"mrs_01_24247.html", + "node_id":"mrs_01_24247.xml", "product_code":"mrs", - "code":"151", + "code":"197", "des":"This section describes the data definition language (DDL) of HDFS as a sink table, as well as the WITH parameters and example code for creating a sink table, and provides", "doc_type":"cmpntguide", "kw":"Interconnecting FlinkServer with HDFS,Interconnecting FlinkServer with External Components,Component", + "search_title":"", + "metedata":[ + { + "documenttype":"usermanual", + "prodname":"mrs" + } + ], "title":"Interconnecting FlinkServer with HDFS", "githuburl":"" }, { "uri":"mrs_01_24179.html", + "node_id":"mrs_01_24179.xml", "product_code":"mrs", - "code":"152", + "code":"198", "des":"Currently, FlinkServer interconnects with Hive MetaStore. Therefore, the MetaStore function must be enabled for Hive. Hive can be used as source, sink, and dimension tabl", "doc_type":"usermanual", "kw":"Interconnecting FlinkServer with Hive,Interconnecting FlinkServer with External Components,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Interconnecting FlinkServer with Hive", "githuburl":"" }, { "uri":"mrs_01_24180.html", + "node_id":"mrs_01_24180.xml", "product_code":"mrs", - "code":"153", + "code":"199", "des":"This section describes how to interconnect FlinkServer with Hudi through Flink SQL jobs.The HDFS, Yarn, Flink, and Hudi services have been installed in a cluster.The clie", "doc_type":"usermanual", "kw":"Interconnecting FlinkServer with Hudi,Interconnecting FlinkServer with External Components,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Interconnecting FlinkServer with Hudi", "githuburl":"" }, { "uri":"mrs_01_24248.html", + "node_id":"mrs_01_24248.xml", "product_code":"mrs", - "code":"154", + "code":"200", "des":"This section describes the data definition language (DDL) of Kafka as a source or sink table, as well as the WITH parameters and example code for creating a table, and pr", "doc_type":"cmpntguide", "kw":"Interconnecting FlinkServer with Kafka,Interconnecting FlinkServer with External Components,Componen", + "search_title":"", + "metedata":[ + { + "documenttype":"usermanual", + "prodname":"mrs" + } + ], "title":"Interconnecting FlinkServer with Kafka", "githuburl":"" }, { "uri":"mrs_01_24256.html", + "node_id":"mrs_01_24256.xml", "product_code":"mrs", - "code":"155", + "code":"201", "des":"If a Flink task stops unexpectedly, some directories may reside in the ZooKeeper and HDFS services. To delete the residual directories, set ClearUpEnabled to true.A Flink", "doc_type":"usermanual", "kw":"Deleting Residual Information About Flink Tasks,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Deleting Residual Information About Flink Tasks", "githuburl":"" }, { "uri":"mrs_01_0596.html", + "node_id":"mrs_01_0596.xml", "product_code":"mrs", - "code":"156", + "code":"202", "des":"Log path:Run logs of a Flink job: ${BIGDATA_DATA_HOME}/hadoop/data${i}/nm/containerlogs/application_${appid}/container_{$contid}The logs of executing tasks are stored in ", "doc_type":"usermanual", "kw":"Flink Log Overview,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flink Log Overview", "githuburl":"" }, { "uri":"mrs_01_0597.html", + "node_id":"mrs_01_0597.xml", "product_code":"mrs", - "code":"157", + "code":"203", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Flink Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flink Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1587.html", + "node_id":"mrs_01_1587.xml", "product_code":"mrs", - "code":"158", + "code":"204", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Optimization DataStream", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimization DataStream", "githuburl":"" }, { "uri":"mrs_01_1588.html", + "node_id":"mrs_01_1588.xml", "product_code":"mrs", - "code":"159", + "code":"205", "des":"The computing of Flink depends on memory. If the memory is insufficient, the performance of Flink will be greatly deteriorated. One solution is to monitor garbage collect", "doc_type":"usermanual", "kw":"Memory Configuration Optimization,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Memory Configuration Optimization", "githuburl":"" }, { "uri":"mrs_01_1589.html", + "node_id":"mrs_01_1589.xml", "product_code":"mrs", - "code":"160", + "code":"206", "des":"The degree of parallelism (DOP) indicates the number of tasks to be executed concurrently. It determines the number of data blocks after the operation. Configuring the DO", "doc_type":"usermanual", "kw":"Configuring DOP,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring DOP", "githuburl":"" }, { "uri":"mrs_01_1590.html", + "node_id":"mrs_01_1590.xml", "product_code":"mrs", - "code":"161", + "code":"207", "des":"In Flink on Yarn mode, there are JobManagers and TaskManagers. JobManagers and TaskManagers schedule and run tasks.Therefore, configuring parameters of JobManagers and Ta", "doc_type":"usermanual", - "kw":"Configuring Process Parameters,Optimization DataStream,Component Operation Guide (LTS)", - "title":"Configuring Process Parameters", + "kw":"Configuring Flink Process Parameters,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Configuring Flink Process Parameters", "githuburl":"" }, { "uri":"mrs_01_1591.html", + "node_id":"mrs_01_1591.xml", "product_code":"mrs", - "code":"162", + "code":"208", "des":"The divide of tasks can be optimized by optimizing the partitioning method. If data skew occurs in a certain task, the whole execution process is delayed. Therefore, when", "doc_type":"usermanual", "kw":"Optimizing the Design of Partitioning Method,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing the Design of Partitioning Method", "githuburl":"" }, { "uri":"mrs_01_1592.html", + "node_id":"mrs_01_1592.xml", "product_code":"mrs", - "code":"163", + "code":"209", "des":"The communication of Flink is based on Netty network. The network performance determines the data switching speed and task execution efficiency. Therefore, the performanc", "doc_type":"usermanual", "kw":"Configuring the Netty Network Communication,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Netty Network Communication", "githuburl":"" }, { "uri":"mrs_01_1593.html", + "node_id":"mrs_01_1593.xml", "product_code":"mrs", - "code":"164", + "code":"210", "des":"If data skew occurs (certain data volume is large), the execution time of tasks is inconsistent even if no garbage collection is performed.Redefine keys. Use keys of smal", "doc_type":"usermanual", "kw":"Summarization,Optimization DataStream,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Summarization", "githuburl":"" }, { "uri":"mrs_01_0598.html", + "node_id":"mrs_01_0598.xml", "product_code":"mrs", - "code":"165", + "code":"211", "des":"Before running the Flink shell commands, perform the following steps:source /opt/client/bigdata_envkinit Service user", "doc_type":"usermanual", "kw":"Common Flink Shell Commands,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Flink Shell Commands", "githuburl":"" }, { "uri":"mrs_01_0620.html", + "node_id":"mrs_01_0620.xml", "product_code":"mrs", - "code":"166", + "code":"212", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Reference", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Reference", "githuburl":"" }, { "uri":"mrs_01_0621.html", + "node_id":"mrs_01_0621.xml", "product_code":"mrs", - "code":"167", + "code":"213", "des":"Generate the generate_keystore.sh script based on the sample code and save the script to the bin directory on the Flink client.Run the sh generate_keystore.sh c", "doc_type":"usermanual", "kw":"Example of Issuing a Certificate,Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Example of Issuing a Certificate", "githuburl":"" }, + { + "uri":"mrs_01_24779.html", + "node_id":"mrs_01_24779.xml", + "product_code":"", + "code":"214", + "des":"Flink supports different restart policies to control whether and how to restart a job when a fault occurs. If no restart policy is specified, the cluster uses the default", + "doc_type":"", + "kw":"Flink Restart Policy,Using Flink,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Flink Restart Policy", + "githuburl":"" + }, { "uri":"mrs_01_0390.html", + "node_id":"mrs_01_0390.xml", "product_code":"mrs", - "code":"168", + "code":"215", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Flume", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Flume", "githuburl":"" }, { "uri":"mrs_01_0397.html", + "node_id":"mrs_01_0397.xml", "product_code":"mrs", - "code":"169", + "code":"216", "des":"You can use Flume to import collected log information to Kafka.A streaming cluster with Kerberos authentication enabled has been created.The Flume client has been install", "doc_type":"usermanual", "kw":"Using Flume from Scratch,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Flume from Scratch", "githuburl":"" }, { "uri":"mrs_01_0391.html", + "node_id":"mrs_01_0391.xml", "product_code":"mrs", - "code":"170", + "code":"217", "des":"Flume is a distributed, reliable, and highly available system for aggregating massive logs, which can efficiently collect, aggregate, and move massive log data from diffe", "doc_type":"usermanual", - "kw":"Overview,Using Flume,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"Flume Overview,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Flume Overview", "githuburl":"" }, { "uri":"mrs_01_1595.html", + "node_id":"mrs_01_1595.xml", "product_code":"mrs", - "code":"171", + "code":"218", "des":"To use Flume to collect logs, you must install the Flume client on a log host.A cluster with the Flume component has been created.The log host is in the same VPC and subn", "doc_type":"usermanual", "kw":"Installing the Flume Client on Clusters,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Installing the Flume Client on Clusters", "githuburl":"" }, { "uri":"mrs_01_0393.html", + "node_id":"mrs_01_0393.xml", "product_code":"mrs", - "code":"172", + "code":"219", "des":"You can view logs to locate faults.The Flume client has been installed.ls -lR flume-client-*A log file is shown as follows:In the log file, FlumeClient.log is the run log", "doc_type":"usermanual", "kw":"Viewing Flume Client Logs,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Flume Client Logs", "githuburl":"" }, { "uri":"mrs_01_0394.html", + "node_id":"mrs_01_0394.xml", "product_code":"mrs", - "code":"173", + "code":"220", "des":"You can stop and start the Flume client or uninstall the Flume client when the Flume data ingestion channel is not required.Stop the Flume client of the Flume role.Assume", "doc_type":"usermanual", "kw":"Stopping or Uninstalling the Flume Client,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Stopping or Uninstalling the Flume Client", "githuburl":"" }, { "uri":"mrs_01_0395.html", + "node_id":"mrs_01_0395.xml", "product_code":"mrs", - "code":"174", + "code":"221", "des":"You can use the encryption tool provided by the Flume client to encrypt some parameter values in the configuration file.The Flume client has been installed.cd fusioninsig", "doc_type":"usermanual", "kw":"Using the Encryption Tool of the Flume Client,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Encryption Tool of the Flume Client", "githuburl":"" }, { "uri":"mrs_01_1057.html", + "node_id":"mrs_01_1057.xml", "product_code":"mrs", - "code":"175", + "code":"222", "des":"This configuration guide describes how to configure common Flume services. For non-common Source, Channel, and Sink configuration, see the user manual provided by the Flu", "doc_type":"usermanual", "kw":"Flume Service Configuration Guide,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flume Service Configuration Guide", "githuburl":"" }, { "uri":"mrs_01_0396.html", + "node_id":"mrs_01_0396.xml", "product_code":"mrs", - "code":"176", + "code":"223", "des":"Some parameters can be configured on Manager.This section describes how to configure the sources, channels, and sinks of Flume, and modify the configuration items of each", "doc_type":"usermanual", "kw":"Flume Configuration Parameter Description,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flume Configuration Parameter Description", "githuburl":"" }, { "uri":"mrs_01_1058.html", + "node_id":"mrs_01_1058.xml", "product_code":"mrs", - "code":"177", + "code":"224", "des":"This section describes how to use environment variables in the properties.properties configuration file.The Flume service is running properly and the Flume client has bee", "doc_type":"usermanual", "kw":"Using Environment Variables in the properties.properties File,Using Flume,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Environment Variables in the properties.properties File", "githuburl":"" }, { "uri":"mrs_01_1059.html", + "node_id":"mrs_01_1059.xml", "product_code":"mrs", - "code":"178", + "code":"225", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Non-Encrypted Transmission", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Non-Encrypted Transmission", "githuburl":"" }, { "uri":"mrs_01_1060.html", + "node_id":"mrs_01_1060.xml", "product_code":"mrs", - "code":"179", + "code":"226", "des":"This section describes how to configure Flume server and client parameters after the cluster and the Flume service are installed to ensure proper running of the service.B", "doc_type":"usermanual", "kw":"Configuring Non-encrypted Transmission,Non-Encrypted Transmission,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Non-encrypted Transmission", "githuburl":"" }, { "uri":"mrs_01_1061.html", + "node_id":"mrs_01_1061.xml", "product_code":"mrs", - "code":"180", + "code":"227", "des":"This section describes how to use Flume client to collect static logs from a local host and save them to the topic list (test1) of Kafka.By default, the cluster network e", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Local Static Logs and Uploading Them to Kafka,Non-Encrypted Transmissio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to Kafka", "githuburl":"" }, { "uri":"mrs_01_1063.html", + "node_id":"mrs_01_1063.xml", "product_code":"mrs", - "code":"181", + "code":"228", "des":"This section describes how to use Flume client to collect static logs from a local PC and save them to the /flume/test directory on HDFS.By default, the cluster network e", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS,Non-Encrypted Transmission", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS", "githuburl":"" }, { "uri":"mrs_01_1064.html", + "node_id":"mrs_01_1064.xml", "product_code":"mrs", - "code":"182", + "code":"229", "des":"This section describes how to use Flume client to collect dynamic logs from a local PC and save them to the /flume/test directory on HDFS.By default, the cluster network ", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Local Dynamic Logs and Uploading Them to HDFS,Non-Encrypted Transmissio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Local Dynamic Logs and Uploading Them to HDFS", "githuburl":"" }, { "uri":"mrs_01_1065.html", + "node_id":"mrs_01_1065.xml", "product_code":"mrs", - "code":"183", + "code":"230", "des":"This section describes how to use Flume client to collect logs from the Topic list (test1) of Kafka and save them to the /flume/test directory on HDFS.By default, the clu", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS,Non-Encrypted Transmission,C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS", "githuburl":"" }, { "uri":"mrs_01_1066.html", + "node_id":"mrs_01_1066.xml", "product_code":"mrs", - "code":"184", + "code":"231", "des":"This section describes how to use Flume client to collect logs from the Topic list (test1) of Kafka client and save them to the /flume/test directory on HDFS.By default, ", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS Through the Flume Client,Non", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS Through the Flume Client", "githuburl":"" }, { "uri":"mrs_01_1067.html", + "node_id":"mrs_01_1067.xml", "product_code":"mrs", - "code":"185", + "code":"232", "des":"This section describes how to use Flume client to collect static logs from a local computer and upload them to the flume_test table of HBase.By default, the cluster netwo", "doc_type":"usermanual", "kw":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HBase,Non-Encrypted Transmissio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HBase", "githuburl":"" }, { "uri":"mrs_01_1068.html", + "node_id":"mrs_01_1068.xml", "product_code":"mrs", - "code":"186", + "code":"233", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Encrypted Transmission", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Encrypted Transmission", "githuburl":"" }, { "uri":"mrs_01_1069.html", + "node_id":"mrs_01_1069.xml", "product_code":"mrs", - "code":"187", + "code":"234", "des":"This section describes how to configure the server and client parameters of the Flume service (including the Flume and MonitorServer roles) after the cluster is installed", "doc_type":"usermanual", "kw":"Configuring the Encrypted Transmission,Encrypted Transmission,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Encrypted Transmission", "githuburl":"" }, { "uri":"mrs_01_1070.html", + "node_id":"mrs_01_1070.xml", "product_code":"mrs", - "code":"188", + "code":"235", "des":"This section describes how to use Flume client to collect static logs from a local PC and save them to the /flume/test directory on HDFS.The cluster, HDFS and Flume servi", "doc_type":"usermanual", - "kw":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS,Encrypted Transmission,Com", - "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS", + "kw":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS Encrypted Transmission,Enc", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS Encrypted Transmission", "githuburl":"" }, { "uri":"mrs_01_1596.html", + "node_id":"mrs_01_1596.xml", "product_code":"mrs", - "code":"189", + "code":"236", "des":"The Flume client outside the FusionInsight cluster is a part of the end-to-end data collection. Both the Flume client outside the cluster and the Flume server in the clus", "doc_type":"usermanual", "kw":"Viewing Flume Client Monitoring Information,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Flume Client Monitoring Information", "githuburl":"" }, { "uri":"mrs_01_1071.html", + "node_id":"mrs_01_1071.xml", "product_code":"mrs", - "code":"190", + "code":"237", "des":"This section describes how to connect to Kafka using the Flume client in security mode.Set keyTab and principal based on site requirements. The configured principal must ", "doc_type":"usermanual", "kw":"Connecting Flume to Kafka in Security Mode,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Connecting Flume to Kafka in Security Mode", "githuburl":"" }, { "uri":"mrs_01_1072.html", + "node_id":"mrs_01_1072.xml", "product_code":"mrs", - "code":"191", + "code":"238", "des":"This section describes how to use Flume to connect to Hive (version 3.1.0) in the cluster.Flume and Hive have been correctly installed in the cluster. The services are ru", "doc_type":"usermanual", "kw":"Connecting Flume with Hive in Security Mode,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Connecting Flume with Hive in Security Mode", "githuburl":"" }, { "uri":"mrs_01_1073.html", + "node_id":"mrs_01_1073.xml", "product_code":"mrs", - "code":"192", + "code":"239", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Configuring the Flume Service Model", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Flume Service Model", "githuburl":"" }, { "uri":"mrs_01_1074.html", + "node_id":"mrs_01_1074.xml", "product_code":"mrs", - "code":"193", + "code":"240", "des":"Guide a reasonable Flume service configuration by providing performance differences between Flume common modules, to avoid a nonstandard overall service performance cause", "doc_type":"usermanual", - "kw":"Overview,Configuring the Flume Service Model,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"Flume Service Model Overview,Configuring the Flume Service Model,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Flume Service Model Overview", "githuburl":"" }, { "uri":"mrs_01_1075.html", + "node_id":"mrs_01_1075.xml", "product_code":"mrs", - "code":"194", + "code":"241", "des":"During Flume service configuration and module selection, the ultimate throughput of a sink must be greater than the maximum throughput of a source. Otherwise, in extreme ", "doc_type":"usermanual", "kw":"Service Model Configuration Guide,Configuring the Flume Service Model,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Service Model Configuration Guide", "githuburl":"" }, { "uri":"mrs_01_1081.html", + "node_id":"mrs_01_1081.xml", "product_code":"mrs", - "code":"195", + "code":"242", "des":"Log path: The default path of Flume log files is /var/log/Bigdata/Role name.FlumeServer: /var/log/Bigdata/flume/flumeFlumeClient: /var/log/Bigdata/flume-client-n/flumeMon", "doc_type":"usermanual", "kw":"Introduction to Flume Logs,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to Flume Logs", "githuburl":"" }, { "uri":"mrs_01_1082.html", + "node_id":"mrs_01_1082.xml", "product_code":"mrs", - "code":"196", + "code":"243", "des":"This section describes how to join and log out of a cgroup, query the cgroup status, and change the cgroup CPU threshold.Join CgroupAssume that the Flume client installat", "doc_type":"usermanual", "kw":"Flume Client Cgroup Usage Guide,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Flume Client Cgroup Usage Guide", "githuburl":"" }, { "uri":"mrs_01_1083.html", + "node_id":"mrs_01_1083.xml", "product_code":"mrs", - "code":"197", + "code":"244", "des":"This section describes how to perform secondary development for third-party plug-ins.You have obtained the third-party JAR package.You have installed Flume server or clie", "doc_type":"usermanual", "kw":"Secondary Development Guide for Flume Third-Party Plug-ins,Using Flume,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Secondary Development Guide for Flume Third-Party Plug-ins", "githuburl":"" }, { "uri":"mrs_01_1598.html", + "node_id":"mrs_01_1598.xml", "product_code":"mrs", - "code":"198", + "code":"245", "des":"Flume logs are stored in /var/log/Bigdata/flume/flume/flumeServer.log. Most data transmission exceptions and data transmission failures are recorded in logs. You can run ", "doc_type":"usermanual", "kw":"Common Issues About Flume,Using Flume,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Flume", "githuburl":"" }, { "uri":"mrs_01_0500.html", + "node_id":"mrs_01_0500.xml", "product_code":"mrs", - "code":"199", + "code":"246", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using HBase", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HBase", "githuburl":"" }, { "uri":"mrs_01_0368.html", + "node_id":"mrs_01_0368.xml", "product_code":"mrs", - "code":"200", + "code":"247", "des":"HBase is a column-based distributed storage system that features high reliability, performance, and scalability. This section describes how to use HBase from scratch, inc", "doc_type":"usermanual", "kw":"Using HBase from Scratch,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HBase from Scratch", "githuburl":"" }, { "uri":"mrs_01_1608.html", + "node_id":"mrs_01_1608.xml", "product_code":"mrs", - "code":"201", + "code":"248", "des":"This section guides the system administrator to create and configure an HBase role on Manager. The HBase role can set HBase administrator permissions and read (R), write ", "doc_type":"usermanual", "kw":"Creating HBase Roles,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating HBase Roles", "githuburl":"" }, { "uri":"mrs_01_24041.html", + "node_id":"mrs_01_24041.xml", "product_code":"mrs", - "code":"202", + "code":"249", "des":"This section describes how to use the HBase client in an O&M scenario or a service scenario.The client has been installed. For example, the installation directory is /opt", "doc_type":"usermanual", "kw":"Using an HBase Client,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using an HBase Client", "githuburl":"" }, { "uri":"mrs_01_0501.html", + "node_id":"mrs_01_0501.xml", "product_code":"mrs", - "code":"203", + "code":"250", "des":"As a key feature to ensure high availability of the HBase cluster system, HBase cluster replication provides HBase with remote data replication in real time. It provides ", "doc_type":"usermanual", "kw":"Configuring HBase Replication,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HBase Replication", "githuburl":"" }, { "uri":"mrs_01_0502.html", + "node_id":"mrs_01_0502.xml", "product_code":"mrs", - "code":"204", + "code":"251", "des":"DistCp is used to copy the data stored on HDFS from a cluster to another cluster. DistCp depends on the cross-cluster copy function, which is disabled by default. This fu", "doc_type":"usermanual", "kw":"Enabling Cross-Cluster Copy,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enabling Cross-Cluster Copy", "githuburl":"" }, { "uri":"mrs_01_0493.html", + "node_id":"mrs_01_0493.xml", "product_code":"mrs", - "code":"205", + "code":"252", "des":"You can create tables and indexes using createTable of org.apache.luna.client.LunaAdmin and specify table names, column family names, requests for creating indexes, as we", "doc_type":"usermanual", "kw":"Supporting Full-Text Index,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Supporting Full-Text Index", "githuburl":"" }, { "uri":"mrs_01_0510.html", + "node_id":"mrs_01_0510.xml", "product_code":"mrs", - "code":"206", + "code":"253", "des":"Active and standby clusters have been installed and started.Time is consistent between the active and standby clusters and the NTP service on the active and standby clust", "doc_type":"usermanual", "kw":"Using the ReplicationSyncUp Tool,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the ReplicationSyncUp Tool", "githuburl":"" }, + { + "uri":"mrs_01_24579.html", + "node_id":"mrs_01_24579.xml", + "product_code":"", + "code":"254", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"In-House Enhanced Phoenix", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"In-House Enhanced Phoenix", + "githuburl":"" + }, + { + "uri":"mrs_01_24580.html", + "node_id":"mrs_01_24580.xml", + "product_code":"", + "code":"255", + "des":"Phoenix provides CsvBulkloadTool, a batch data import tool. This tool supports import of user-defined delimiters. Specifically, users can use any visible characters withi", + "doc_type":"", + "kw":"CsvBulkloadTool Supports Parsing User-Defined Delimiters in Data Files,In-House Enhanced Phoenix,Com", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CsvBulkloadTool Supports Parsing User-Defined Delimiters in Data Files", + "githuburl":"" + }, { "uri":"mrs_01_1609.html", + "node_id":"mrs_01_1609.xml", "product_code":"mrs", - "code":"207", + "code":"256", "des":"HBase disaster recovery (DR), a key feature that is used to ensure high availability (HA) of the HBase cluster system, provides the real-time remote DR function for HBase", "doc_type":"usermanual", "kw":"Configuring HBase DR,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HBase DR", "githuburl":"" }, { "uri":"mrs_01_1610.html", + "node_id":"mrs_01_1610.xml", "product_code":"mrs", - "code":"208", + "code":"257", "des":"The system administrator can configure HBase cluster DR to improve system availability. If the active cluster in the DR environment is faulty and the connection to the HB", "doc_type":"usermanual", "kw":"Performing an HBase DR Service Switchover,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Performing an HBase DR Service Switchover", "githuburl":"" }, { - "uri":"en-us_topic_0000001295898904.html", + "uri":"mrs_01_24112.html", + "node_id":"mrs_01_24112.xml", "product_code":"mrs", - "code":"209", + "code":"258", "des":"HBase encodes data blocks in HFiles to reduce duplicate keys in KeyValues, reducing used space. Currently, the following data block encoding modes are supported: NONE, PR", "doc_type":"usermanual", "kw":"Configuring HBase Data Compression and Encoding,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HBase Data Compression and Encoding", "githuburl":"" }, { "uri":"mrs_01_1611.html", + "node_id":"mrs_01_1611.xml", "product_code":"mrs", - "code":"210", + "code":"259", "des":"The HBase cluster in the current environment is a DR cluster. Due to some reasons, the active and standby clusters need to be switched over. That is, the standby cluster ", "doc_type":"usermanual", "kw":"Performing an HBase DR Active/Standby Cluster Switchover,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Performing an HBase DR Active/Standby Cluster Switchover", "githuburl":"" }, { "uri":"mrs_01_1612.html", + "node_id":"mrs_01_1612.xml", "product_code":"mrs", - "code":"211", + "code":"260", "des":"The Apache HBase official website provides the function of importing data in batches. For details, see the description of the Import and ImportTsv tools at http://hbase.a", "doc_type":"usermanual", "kw":"Community BulkLoad Tool,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Community BulkLoad Tool", "githuburl":"" }, { "uri":"mrs_01_1631.html", + "node_id":"mrs_01_1631.xml", "product_code":"mrs", - "code":"212", + "code":"261", "des":"In the actual application scenario, data in various sizes needs to be stored, for example, image data and documents. Data whose size is smaller than 10 MB can be stored i", "doc_type":"usermanual", "kw":"Configuring the MOB,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the MOB", "githuburl":"" }, { "uri":"mrs_01_1009.html", + "node_id":"mrs_01_1009.xml", "product_code":"mrs", - "code":"213", + "code":"262", "des":"This topic provides the procedure to configure the secure HBase replication during cross-realm Kerberos setup in security mode.Mapping for all the FQDNs to their realms s", "doc_type":"usermanual", "kw":"Configuring Secure HBase Replication,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Secure HBase Replication", "githuburl":"" }, { "uri":"mrs_01_1010.html", + "node_id":"mrs_01_1010.xml", "product_code":"mrs", - "code":"214", + "code":"263", "des":"In a faulty environment, there are possibilities that a region may be stuck in transition for longer duration due to various reasons like slow region server response, uns", "doc_type":"usermanual", "kw":"Configuring Region In Transition Recovery Chore Service,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Region In Transition Recovery Chore Service", "githuburl":"" }, { "uri":"mrs_01_1635.html", + "node_id":"mrs_01_1635.xml", "product_code":"mrs", - "code":"215", + "code":"264", "des":"HIndex enables HBase indexing based on specific column values, making the retrieval of data highly efficient and fast.Column families are separated by semicolons (;).Colu", "doc_type":"usermanual", "kw":"Using a Secondary Index,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using a Secondary Index", "githuburl":"" }, { "uri":"mrs_01_1056.html", + "node_id":"mrs_01_1056.xml", "product_code":"mrs", - "code":"216", + "code":"265", "des":"Log path: The default storage path of HBase logs is /var/log/Bigdata/hbase/Role name.HMaster: /var/log/Bigdata/hbase/hm (run logs) and /var/log/Bigdata/audit/hbase/hm (au", "doc_type":"usermanual", "kw":"HBase Log Overview,Using HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HBase Log Overview", "githuburl":"" }, { "uri":"mrs_01_1013.html", + "node_id":"mrs_01_1013.xml", "product_code":"mrs", - "code":"217", + "code":"266", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HBase Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HBase Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1636.html", + "node_id":"mrs_01_1636.xml", "product_code":"mrs", - "code":"218", + "code":"267", "des":"BulkLoad uses MapReduce jobs to directly generate files that comply with the internal data format of HBase, and then loads the generated StoreFiles to a running cluster. ", "doc_type":"usermanual", "kw":"Improving the BulkLoad Efficiency,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving the BulkLoad Efficiency", "githuburl":"" }, { "uri":"mrs_01_1637.html", + "node_id":"mrs_01_1637.xml", "product_code":"mrs", - "code":"219", + "code":"268", "des":"In the scenario where a large number of requests are continuously put, setting the following two parameters to false can greatly improve the Put performance.hbase.regions", "doc_type":"usermanual", "kw":"Improving Put Performance,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Put Performance", "githuburl":"" }, { "uri":"mrs_01_1016.html", + "node_id":"mrs_01_1016.xml", "product_code":"mrs", - "code":"220", + "code":"269", "des":"HBase has many configuration parameters related to read and write performance. The configuration parameters need to be adjusted based on the read/write request loads. Thi", "doc_type":"usermanual", "kw":"Optimizing Put and Scan Performance,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Put and Scan Performance", "githuburl":"" }, { "uri":"mrs_01_1017.html", + "node_id":"mrs_01_1017.xml", "product_code":"mrs", - "code":"221", + "code":"270", "des":"Scenarios where data needs to be written to HBase in real time, or large-scale and consecutive put scenariosThe HBase put or delete interface can be used to save data to ", "doc_type":"usermanual", "kw":"Improving Real-time Data Write Performance,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Real-time Data Write Performance", "githuburl":"" }, { "uri":"mrs_01_1018.html", + "node_id":"mrs_01_1018.xml", "product_code":"mrs", - "code":"222", + "code":"271", "des":"HBase data needs to be read.The get or scan interface of HBase has been invoked and data is read in real time from HBase.Data reading server tuningParameter portal:Go to ", "doc_type":"usermanual", "kw":"Improving Real-time Data Read Performance,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Real-time Data Read Performance", "githuburl":"" }, { "uri":"mrs_01_1019.html", + "node_id":"mrs_01_1019.xml", "product_code":"mrs", - "code":"223", + "code":"272", "des":"When the number of clusters reaches a certain scale, the default settings of the Java virtual machine (JVM) cannot meet the cluster requirements. In this case, the cluste", "doc_type":"usermanual", - "kw":"Optimizing JVM Parameters,HBase Performance Tuning,Component Operation Guide (LTS)", - "title":"Optimizing JVM Parameters", + "kw":"Optimizing HBase JVM Parameters,HBase Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Optimizing HBase JVM Parameters", "githuburl":"" }, { "uri":"mrs_01_1638.html", + "node_id":"mrs_01_1638.xml", "product_code":"mrs", - "code":"224", + "code":"273", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About HBase", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About HBase", "githuburl":"" }, { "uri":"mrs_01_1639.html", + "node_id":"mrs_01_1639.xml", "product_code":"mrs", - "code":"225", + "code":"274", "des":"A HBase server is faulty and cannot provide services. In this case, when a table operation is performed on the HBase client, why is the operation suspended and no respons", "doc_type":"usermanual", "kw":"Why Does a Client Keep Failing to Connect to a Server for a Long Time?,Common Issues About HBase,Com", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does a Client Keep Failing to Connect to a Server for a Long Time?", "githuburl":"" }, { "uri":"mrs_01_1640.html", + "node_id":"mrs_01_1640.xml", "product_code":"mrs", - "code":"226", + "code":"275", "des":"Why submitted operations fail by stopping BulkLoad on the client during BulkLoad data importing?When BulkLoad is enabled on the client, a partitioner file is generated an", "doc_type":"usermanual", "kw":"Operation Failures Occur in Stopping BulkLoad On the Client,Common Issues About HBase,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Operation Failures Occur in Stopping BulkLoad On the Client", "githuburl":"" }, { "uri":"mrs_01_1641.html", + "node_id":"mrs_01_1641.xml", "product_code":"mrs", - "code":"227", + "code":"276", "des":"When HBase consecutively deletes and creates the same table, why may a table creation exception occur?Execution process: Disable Table > Drop Table > Create Table > Disab", "doc_type":"usermanual", "kw":"Why May a Table Creation Exception Occur When HBase Deletes or Creates the Same Table Consecutively?", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why May a Table Creation Exception Occur When HBase Deletes or Creates the Same Table Consecutively?", "githuburl":"" }, { "uri":"mrs_01_1642.html", + "node_id":"mrs_01_1642.xml", "product_code":"mrs", - "code":"228", + "code":"277", "des":"Why other services become unstable if HBase sets up a large number of connections over the network port?When the OS command lsof or netstat is run, it is found that many ", "doc_type":"usermanual", "kw":"Why Other Services Become Unstable If HBase Sets up A Large Number of Connections over the Network P", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Other Services Become Unstable If HBase Sets up A Large Number of Connections over the Network Port?", "githuburl":"" }, { "uri":"mrs_01_1643.html", + "node_id":"mrs_01_1643.xml", "product_code":"mrs", - "code":"229", + "code":"278", "des":"The HBase bulkLoad task (a single table contains 26 TB data) has 210,000 maps and 10,000 reduce tasks, and the task fails.ZooKeeper I/O bottleneck observation methods:On ", "doc_type":"usermanual", "kw":"Why Does the HBase BulkLoad Task (One Table Has 26 TB Data) Consisting of 210,000 Map Tasks and 10,0", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the HBase BulkLoad Task (One Table Has 26 TB Data) Consisting of 210,000 Map Tasks and 10,000 Reduce Tasks Fail?", "githuburl":"" }, { "uri":"mrs_01_1644.html", + "node_id":"mrs_01_1644.xml", "product_code":"mrs", - "code":"230", + "code":"279", "des":"How do I restore a region in the RIT state for a long time?Log in to the HMaster WebUI, choose Procedure & Locks in the navigation tree, and check whether any process ID ", "doc_type":"usermanual", "kw":"How Do I Restore a Region in the RIT State for a Long Time?,Common Issues About HBase,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Restore a Region in the RIT State for a Long Time?", "githuburl":"" }, { "uri":"mrs_01_1645.html", + "node_id":"mrs_01_1645.xml", "product_code":"mrs", - "code":"231", + "code":"280", "des":"Why does HMaster exit due to timeout when waiting for the namespace table to go online?During the HMaster active/standby switchover or startup, HMaster performs WAL split", "doc_type":"usermanual", "kw":"Why Does HMaster Exits Due to Timeout When Waiting for the Namespace Table to Go Online?,Common Issu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does HMaster Exits Due to Timeout When Waiting for the Namespace Table to Go Online?", "githuburl":"" }, { "uri":"mrs_01_1646.html", + "node_id":"mrs_01_1646.xml", "product_code":"mrs", - "code":"232", + "code":"281", "des":"Why does the following exception occur on the client when I use the HBase client to operate table data?At the same time, the following log is displayed on RegionServer:Th", "doc_type":"usermanual", "kw":"Why Does SocketTimeoutException Occur When a Client Queries HBase?,Common Issues About HBase,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does SocketTimeoutException Occur When a Client Queries HBase?", "githuburl":"" }, { "uri":"mrs_01_1647.html", + "node_id":"mrs_01_1647.xml", "product_code":"mrs", - "code":"233", + "code":"282", "des":"Why modified and deleted data can still be queried by using the scan command?Because of the scalability of HBase, all values specific to the versions in the queried colum", "doc_type":"usermanual", "kw":"Why Modified and Deleted Data Can Still Be Queried by Using the Scan Command?,Common Issues About HB", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Modified and Deleted Data Can Still Be Queried by Using the Scan Command?", "githuburl":"" }, { "uri":"mrs_01_1648.html", + "node_id":"mrs_01_1648.xml", "product_code":"mrs", - "code":"234", + "code":"283", "des":"Why \"java.lang.UnsatisfiedLinkError: Permission denied\" exception thrown while starting HBase shell?During HBase shell execution JRuby create temporary files under java.i", "doc_type":"usermanual", "kw":"Why \"java.lang.UnsatisfiedLinkError: Permission denied\" exception thrown while starting HBase shell?", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why \"java.lang.UnsatisfiedLinkError: Permission denied\" exception thrown while starting HBase shell?", "githuburl":"" }, { "uri":"mrs_01_1649.html", + "node_id":"mrs_01_1649.xml", "product_code":"mrs", - "code":"235", + "code":"284", "des":"When does the RegionServers listed under \"Dead Region Servers\" on HMaster WebUI gets cleared?When an online RegionServer goes down abruptly, it is displayed under \"Dead R", "doc_type":"usermanual", "kw":"When does the RegionServers listed under \"Dead Region Servers\" on HMaster WebUI gets cleared?,Common", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"When does the RegionServers listed under \"Dead Region Servers\" on HMaster WebUI gets cleared?", "githuburl":"" }, { "uri":"mrs_01_1650.html", + "node_id":"mrs_01_1650.xml", "product_code":"mrs", - "code":"236", + "code":"285", "des":"If the data to be imported by HBase bulkload has identical rowkeys, the data import is successful but identical query criteria produce different query results.Data with a", "doc_type":"usermanual", "kw":"Why Are Different Query Results Returned After I Use Same Query Criteria to Query Data Successfully ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Are Different Query Results Returned After I Use Same Query Criteria to Query Data Successfully Imported by HBase bulkload?", "githuburl":"" }, { "uri":"mrs_01_1651.html", + "node_id":"mrs_01_1651.xml", "product_code":"mrs", - "code":"237", + "code":"286", "des":"What should I do if I fail to create tables due to the FAILED_OPEN state of Regions?If a network, HDFS, or Active HMaster fault occurs during the creation of tables, some", "doc_type":"usermanual", "kw":"What Should I Do If I Fail to Create Tables Due to the FAILED_OPEN State of Regions?,Common Issues A", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Should I Do If I Fail to Create Tables Due to the FAILED_OPEN State of Regions?", "githuburl":"" }, { "uri":"mrs_01_1652.html", + "node_id":"mrs_01_1652.xml", "product_code":"mrs", - "code":"238", + "code":"287", "des":"In security mode, names of tables that failed to be created are unnecessarily retained in the table-lock node (default directory is /hbase/table-lock) of ZooKeeper. How d", "doc_type":"usermanual", "kw":"How Do I Delete Residual Table Names in the /hbase/table-lock Directory of ZooKeeper?,Common Issues ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Delete Residual Table Names in the /hbase/table-lock Directory of ZooKeeper?", "githuburl":"" }, { "uri":"mrs_01_1653.html", + "node_id":"mrs_01_1653.xml", "product_code":"mrs", - "code":"239", + "code":"288", "des":"Why does HBase become faulty when I set quota for the directory used by HBase in HDFS?The flush operation of a table is to write memstore data to HDFS.If the HDFS directo", "doc_type":"usermanual", "kw":"Why Does HBase Become Faulty When I Set a Quota for the Directory Used by HBase in HDFS?,Common Issu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does HBase Become Faulty When I Set a Quota for the Directory Used by HBase in HDFS?", "githuburl":"" }, { "uri":"mrs_01_1654.html", + "node_id":"mrs_01_1654.xml", "product_code":"mrs", - "code":"240", + "code":"289", "des":"Why HMaster times out while waiting for namespace table to be assigned after rebuilding meta using OfflineMetaRepair tool and startups failed?HMaster abort with following", "doc_type":"usermanual", "kw":"Why HMaster Times Out While Waiting for Namespace Table to be Assigned After Rebuilding Meta Using O", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why HMaster Times Out While Waiting for Namespace Table to be Assigned After Rebuilding Meta Using OfflineMetaRepair Tool and Startups Failed", "githuburl":"" }, { "uri":"mrs_01_1655.html", + "node_id":"mrs_01_1655.xml", "product_code":"mrs", - "code":"241", + "code":"290", "des":"Why messages containing FileNotFoundException and no lease are frequently displayed in the HMaster logs during the WAL splitting process?During the WAL splitting process,", "doc_type":"usermanual", "kw":"Why Messages Containing FileNotFoundException and no lease Are Frequently Displayed in the HMaster L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Messages Containing FileNotFoundException and no lease Are Frequently Displayed in the HMaster Logs During the WAL Splitting Process?", "githuburl":"" }, { "uri":"mrs_01_1657.html", + "node_id":"mrs_01_1657.xml", "product_code":"mrs", - "code":"242", + "code":"291", "des":"When a tenant accesses Phoenix, a message is displayed indicating that the tenant has insufficient rights.You need to associate the HBase service and Yarn queues when cre", "doc_type":"usermanual", "kw":"Insufficient Rights When a Tenant Accesses Phoenix,Common Issues About HBase,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Insufficient Rights When a Tenant Accesses Phoenix", "githuburl":"" }, { "uri":"mrs_01_1659.html", + "node_id":"mrs_01_1659.xml", "product_code":"mrs", - "code":"243", + "code":"292", "des":"The system automatically rolls back data after an HBase recovery task fails. If \"Rollback recovery failed\" is displayed, the rollback fails. After the rollback fails, dat", "doc_type":"usermanual", "kw":"What Can I Do When HBase Fails to Recover a Task and a Message Is Displayed Stating \"Rollback recove", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do When HBase Fails to Recover a Task and a Message Is Displayed Stating \"Rollback recovery failed\"?", "githuburl":"" }, { "uri":"mrs_01_1660.html", + "node_id":"mrs_01_1660.xml", "product_code":"mrs", - "code":"244", + "code":"293", "des":"When the HBaseFsck tool is used to check the region status, if the log contains ERROR: (regions region1 and region2) There is an overlap in the region chain or ERROR: (re", "doc_type":"usermanual", "kw":"How Do I Fix Region Overlapping?,Common Issues About HBase,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Fix Region Overlapping?", "githuburl":"" }, { "uri":"mrs_01_1661.html", + "node_id":"mrs_01_1661.xml", "product_code":"mrs", - "code":"245", + "code":"294", "des":"Check the hbase-omm-*.out log of the node where RegionServer fails to be started. It is found that the log contains An error report file with more information is saved as", "doc_type":"usermanual", "kw":"Why Does RegionServer Fail to Be Started When GC Parameters Xms and Xmx of HBase RegionServer Are Se", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does RegionServer Fail to Be Started When GC Parameters Xms and Xmx of HBase RegionServer Are Set to 31 GB?", "githuburl":"" }, { "uri":"mrs_01_0625.html", + "node_id":"mrs_01_0625.xml", "product_code":"mrs", - "code":"246", + "code":"295", "des":"Why does the LoadIncrementalHFiles tool fail to be executed and \"Permission denied\" is displayed when a Linux user is manually created in a normal cluster and DataNode in", "doc_type":"usermanual", "kw":"Why Does the LoadIncrementalHFiles Tool Fail to Be Executed and \"Permission denied\" Is Displayed Whe", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the LoadIncrementalHFiles Tool Fail to Be Executed and \"Permission denied\" Is Displayed When Nodes in a Cluster Are Used to Import Data in Batches?", "githuburl":"" }, { "uri":"mrs_01_2210.html", + "node_id":"mrs_01_2210.xml", "product_code":"mrs", - "code":"247", + "code":"296", "des":"When the sqlline script is used on the client, the error message \"import argparse\" is displayed.", "doc_type":"usermanual", "kw":"Why Is the Error Message \"import argparse\" Displayed When the Phoenix sqlline Script Is Used?,Common", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is the Error Message \"import argparse\" Displayed When the Phoenix sqlline Script Is Used?", "githuburl":"" }, { "uri":"mrs_01_2211.html", + "node_id":"mrs_01_2211.xml", "product_code":"mrs", - "code":"248", + "code":"297", "des":"When the indexed field data is updated, if a batch of data exists in the user table, the BulkLoad tool cannot update the global and partial mutable indexes.Problem Analys", "doc_type":"usermanual", "kw":"How Do I Deal with the Restrictions of the Phoenix BulkLoad Tool?,Common Issues About HBase,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Deal with the Restrictions of the Phoenix BulkLoad Tool?", "githuburl":"" }, { "uri":"mrs_01_2212.html", + "node_id":"mrs_01_2212.xml", "product_code":"mrs", - "code":"249", + "code":"298", "des":"When CTBase accesses the HBase service with the Ranger plug-ins enabled and you are creating a cluster table, a message is displayed indicating that the permission is ins", "doc_type":"usermanual", "kw":"Why a Message Is Displayed Indicating that the Permission is Insufficient When CTBase Connects to th", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why a Message Is Displayed Indicating that the Permission is Insufficient When CTBase Connects to the Ranger Plug-ins?", "githuburl":"" }, { "uri":"mrs_01_0790.html", + "node_id":"mrs_01_0790.xml", "product_code":"mrs", - "code":"250", + "code":"299", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using HDFS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HDFS", "githuburl":"" }, { "uri":"mrs_01_0791.html", + "node_id":"mrs_01_0791.xml", "product_code":"mrs", - "code":"251", + "code":"300", "des":"In HDFS, each file object needs to register corresponding information in the NameNode and occupies certain storage space. As the number of files increases, if the origina", "doc_type":"usermanual", "kw":"Configuring Memory Management,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Memory Management", "githuburl":"" }, { "uri":"mrs_01_1662.html", + "node_id":"mrs_01_1662.xml", "product_code":"mrs", - "code":"252", + "code":"301", "des":"This section describes how to create and configure an HDFS role on FusionInsight Manager. The HDFS role is granted the rights to read, write, and execute HDFS directories", "doc_type":"usermanual", "kw":"Creating an HDFS Role,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating an HDFS Role", "githuburl":"" }, { "uri":"mrs_01_1663.html", + "node_id":"mrs_01_1663.xml", "product_code":"mrs", - "code":"253", + "code":"302", "des":"This section describes how to use the HDFS client in an O&M scenario or service scenario.The client has been installed.For example, the installation directory is /opt/had", "doc_type":"usermanual", "kw":"Using the HDFS Client,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the HDFS Client", "githuburl":"" }, { "uri":"mrs_01_0794.html", + "node_id":"mrs_01_0794.xml", "product_code":"mrs", - "code":"254", + "code":"303", "des":"DistCp is a tool used to perform large-amount data replication between clusters or in a cluster. It uses MapReduce tasks to implement distributed copy of a large amount o", "doc_type":"usermanual", "kw":"Running the DistCp Command,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Running the DistCp Command", "githuburl":"" }, { "uri":"mrs_01_0795.html", + "node_id":"mrs_01_0795.xml", "product_code":"mrs", - "code":"255", + "code":"304", "des":"This section describes the directory structure in HDFS, as shown in the following table.", "doc_type":"usermanual", "kw":"Overview of HDFS File System Directories,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Overview of HDFS File System Directories", "githuburl":"" }, { "uri":"mrs_01_1664.html", + "node_id":"mrs_01_1664.xml", "product_code":"mrs", - "code":"256", + "code":"305", "des":"If the storage directory defined by the HDFS DataNode is incorrect or the HDFS storage plan changes, the system administrator needs to modify the DataNode storage directo", "doc_type":"usermanual", "kw":"Changing the DataNode Storage Directory,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Changing the DataNode Storage Directory", "githuburl":"" }, { "uri":"mrs_01_0797.html", + "node_id":"mrs_01_0797.xml", "product_code":"mrs", - "code":"257", + "code":"306", "des":"The permission for some HDFS directories is 777 or 750 by default, which brings potential security risks. You are advised to modify the permission for the HDFS directorie", "doc_type":"usermanual", "kw":"Configuring HDFS Directory Permission,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HDFS Directory Permission", "githuburl":"" }, { "uri":"mrs_01_1665.html", + "node_id":"mrs_01_1665.xml", "product_code":"mrs", - "code":"258", + "code":"307", "des":"Before deploying a cluster, you can deploy a Network File System (NFS) server based on requirements to store NameNode metadata to enhance data reliability.If the NFS serv", "doc_type":"usermanual", "kw":"Configuring NFS,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring NFS", "githuburl":"" }, { "uri":"mrs_01_0799.html", + "node_id":"mrs_01_0799.xml", "product_code":"mrs", - "code":"259", + "code":"308", "des":"In HDFS, DataNode stores user files and directories as blocks, and file objects are generated on the NameNode to map each file, directory, and block on the DataNode.The f", "doc_type":"usermanual", "kw":"Planning HDFS Capacity,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Planning HDFS Capacity", "githuburl":"" }, { "uri":"mrs_01_0801.html", + "node_id":"mrs_01_0801.xml", "product_code":"mrs", - "code":"260", + "code":"309", "des":"When you open an HDFS file, an error occurs due to the limit on the number of file handles. Information similar to the following is displayed.You can contact the system a", "doc_type":"usermanual", "kw":"Configuring ulimit for HBase and HDFS,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring ulimit for HBase and HDFS", "githuburl":"" }, { "uri":"mrs_01_1667.html", + "node_id":"mrs_01_1667.xml", "product_code":"mrs", - "code":"261", + "code":"310", "des":"In the HDFS cluster, unbalanced disk usage among DataNodes may occur, for example, when new DataNodes are added to the cluster. Unbalanced disk usage may result in multip", "doc_type":"usermanual", "kw":"Balancing DataNode Capacity,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Balancing DataNode Capacity", "githuburl":"" }, { "uri":"mrs_01_0804.html", + "node_id":"mrs_01_0804.xml", "product_code":"mrs", - "code":"262", + "code":"311", "des":"By default, NameNode randomly selects a DataNode to write files. If the disk capacity of some DataNodes in a cluster is inconsistent (the total disk capacity of some node", "doc_type":"usermanual", "kw":"Configuring Replica Replacement Policy for Heterogeneous Capacity Among DataNodes,Using HDFS,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Replica Replacement Policy for Heterogeneous Capacity Among DataNodes", "githuburl":"" }, { "uri":"mrs_01_0805.html", + "node_id":"mrs_01_0805.xml", "product_code":"mrs", - "code":"263", + "code":"312", "des":"Generally, multiple services are deployed in a cluster, and the storage of most services depends on the HDFS file system. Different components such as Spark and Yarn or c", "doc_type":"usermanual", "kw":"Configuring the Number of Files in a Single HDFS Directory,Using HDFS,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Number of Files in a Single HDFS Directory", "githuburl":"" }, { "uri":"mrs_01_0806.html", + "node_id":"mrs_01_0806.xml", "product_code":"mrs", - "code":"264", + "code":"313", "des":"On HDFS, deleted files are moved to the recycle bin (trash can) so that the data deleted by mistake can be restored.You can set the time threshold for storing files in th", "doc_type":"usermanual", "kw":"Configuring the Recycle Bin Mechanism,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Recycle Bin Mechanism", "githuburl":"" }, { "uri":"mrs_01_0807.html", + "node_id":"mrs_01_0807.xml", "product_code":"mrs", - "code":"265", + "code":"314", "des":"HDFS allows users to modify the default permissions of files and directories. The default mask provided by the HDFS for creating file and directory permissions is 022. If", "doc_type":"usermanual", "kw":"Setting Permissions on Files and Directories,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Setting Permissions on Files and Directories", "githuburl":"" }, { "uri":"mrs_01_0808.html", + "node_id":"mrs_01_0808.xml", "product_code":"mrs", - "code":"266", + "code":"315", "des":"In security mode, users can flexibly set the maximum token lifetime and token renewal interval in HDFS based on cluster requirements.Navigation path for setting parameter", "doc_type":"usermanual", "kw":"Setting the Maximum Lifetime and Renewal Interval of a Token,Using HDFS,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Setting the Maximum Lifetime and Renewal Interval of a Token", "githuburl":"" }, { "uri":"mrs_01_1669.html", + "node_id":"mrs_01_1669.xml", "product_code":"mrs", - "code":"267", + "code":"316", "des":"In the open source version, if multiple data storage volumes are configured for a DataNode, the DataNode stops providing services by default if one of the volumes is dama", "doc_type":"usermanual", "kw":"Configuring the Damaged Disk Volume,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Damaged Disk Volume", "githuburl":"" }, { "uri":"mrs_01_0810.html", + "node_id":"mrs_01_0810.xml", "product_code":"mrs", - "code":"268", + "code":"317", "des":"Encrypted channel is an encryption protocol of remote procedure call (RPC) in HDFS. When a user invokes RPC, the user's login name will be transmitted to RPC through RPC ", "doc_type":"usermanual", "kw":"Configuring Encrypted Channels,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Encrypted Channels", "githuburl":"" }, { "uri":"mrs_01_0811.html", + "node_id":"mrs_01_0811.xml", "product_code":"mrs", - "code":"269", + "code":"318", "des":"Clients probably encounter running errors when the network is not stable. Users can adjust the following parameter values to improve the running efficiency.Go to the All ", "doc_type":"usermanual", "kw":"Reducing the Probability of Abnormal Client Application Operation When the Network Is Not Stable,Usi", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Reducing the Probability of Abnormal Client Application Operation When the Network Is Not Stable", "githuburl":"" }, { "uri":"mrs_01_1670.html", + "node_id":"mrs_01_1670.xml", "product_code":"mrs", - "code":"270", + "code":"319", "des":"In the existing default DFSclient failover proxy provider, if a NameNode in a process is faulty, all HDFS client instances in the same process attempt to connect to the N", "doc_type":"usermanual", "kw":"Configuring the NameNode Blacklist,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the NameNode Blacklist", "githuburl":"" }, { "uri":"mrs_01_1672.html", + "node_id":"mrs_01_1672.xml", "product_code":"mrs", - "code":"271", + "code":"320", "des":"Several finished Hadoop clusters are faulty because the NameNode is overloaded and unresponsive.Such problem is caused by the initial design of Hadoop: In Hadoop, the Nam", "doc_type":"usermanual", "kw":"Optimizing HDFS NameNode RPC QoS,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing HDFS NameNode RPC QoS", "githuburl":"" }, { "uri":"mrs_01_1673.html", + "node_id":"mrs_01_1673.xml", "product_code":"mrs", - "code":"272", + "code":"321", "des":"When the speed at which the client writes data to the HDFS is greater than the disk bandwidth of the DataNode, the disk bandwidth is fully occupied. As a result, the Data", "doc_type":"usermanual", "kw":"Optimizing HDFS DataNode RPC QoS,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing HDFS DataNode RPC QoS", "githuburl":"" }, { "uri":"mrs_01_1675.html", + "node_id":"mrs_01_1675.xml", "product_code":"mrs", - "code":"273", + "code":"322", "des":"When the Yarn local directory and DataNode directory are on the same disk, the disk with larger capacity can run more tasks. Therefore, more intermediate data is stored i", "doc_type":"usermanual", "kw":"Configuring Reserved Percentage of Disk Usage on DataNodes,Using HDFS,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Reserved Percentage of Disk Usage on DataNodes", "githuburl":"" }, { "uri":"mrs_01_1676.html", + "node_id":"mrs_01_1676.xml", "product_code":"mrs", - "code":"274", + "code":"323", "des":"You need to configure the nodes for storing HDFS file data blocks based on data features. You can configure a label expression to an HDFS directory or file and assign one", "doc_type":"usermanual", "kw":"Configuring HDFS NodeLabel,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HDFS NodeLabel", "githuburl":"" }, { "uri":"mrs_01_1678.html", + "node_id":"mrs_01_1678.xml", "product_code":"mrs", - "code":"275", + "code":"324", "des":"DiskBalancer is an online disk balancer that balances disk data on running DataNodes based on various indicators. It works in the similar way of the HDFS Balancer. The di", "doc_type":"usermanual", "kw":"Configuring HDFS DiskBalancer,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HDFS DiskBalancer", "githuburl":"" }, { "uri":"mrs_01_1684.html", + "node_id":"mrs_01_1684.xml", "product_code":"mrs", - "code":"276", + "code":"325", "des":"Performing this operation can concurrently modify file and directory permissions and access control tools in a cluster.Performing concurrent file modification operations ", "doc_type":"cmpntguide", "kw":"Performing Concurrent Operations on HDFS Files,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Performing Concurrent Operations on HDFS Files", "githuburl":"" }, + { + "uri":"mrs_01_24485.html", + "node_id":"mrs_01_24485.xml", + "product_code":"", + "code":"326", + "des":"By default, an HDFS file can be closed only if all blocks are reported (in the COMPLETED state). Therefore, the write performance of HDFS is affected by waiting for DataN", + "doc_type":"", + "kw":"Closing HDFS Files,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Closing HDFS Files", + "githuburl":"" + }, { "uri":"mrs_01_0828.html", + "node_id":"mrs_01_0828.xml", "product_code":"mrs", - "code":"277", + "code":"327", "des":"Log path: The default path of HDFS logs is /var/log/Bigdata/hdfs/Role name.NameNode: /var/log/Bigdata/hdfs/nn (run logs) and /var/log/Bigdata/audit/hdfs/nn (audit logs)Da", "doc_type":"usermanual", "kw":"Introduction to HDFS Logs,Using HDFS,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to HDFS Logs", "githuburl":"" }, { "uri":"mrs_01_0829.html", + "node_id":"mrs_01_0829.xml", "product_code":"mrs", - "code":"278", + "code":"328", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HDFS Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HDFS Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1687.html", + "node_id":"mrs_01_1687.xml", "product_code":"mrs", - "code":"279", + "code":"329", "des":"Improve the HDFS write performance by modifying the HDFS attributes.Navigation path for setting parameters:On FusionInsight Manager, choose Cluster >Name of the desired c", "doc_type":"usermanual", "kw":"Improving Write Performance,HDFS Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Write Performance", "githuburl":"" }, { "uri":"mrs_01_1688.html", + "node_id":"mrs_01_1688.xml", "product_code":"mrs", - "code":"280", + "code":"330", "des":"Improve the HDFS read performance by using the client to cache the metadata for block locations.This function is recommended only for reading files that are not modified ", "doc_type":"usermanual", "kw":"Improving Read Performance Using Client Metadata Cache,HDFS Performance Tuning,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Read Performance Using Client Metadata Cache", "githuburl":"" }, { "uri":"mrs_01_1689.html", + "node_id":"mrs_01_1689.xml", "product_code":"mrs", - "code":"281", + "code":"331", "des":"When HDFS is deployed in high availability (HA) mode with multiple NameNode instances, the HDFS client needs to connect to each NameNode in sequence to determine which is", "doc_type":"usermanual", "kw":"Improving the Connection Between the Client and NameNode Using Current Active Cache,HDFS Performance", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving the Connection Between the Client and NameNode Using Current Active Cache", "githuburl":"" }, { "uri":"mrs_01_1690.html", + "node_id":"mrs_01_1690.xml", "product_code":"mrs", - "code":"282", + "code":"332", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"FAQ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"FAQ", "githuburl":"" }, { "uri":"mrs_01_1691.html", + "node_id":"mrs_01_1691.xml", "product_code":"mrs", - "code":"283", + "code":"333", "des":"The NameNode startup is slow when it is restarted immediately after a large number of files (for example, 1 million files) are deleted.It takes time for the DataNode to d", "doc_type":"usermanual", "kw":"NameNode Startup Is Slow,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"NameNode Startup Is Slow", "githuburl":"" }, { "uri":"mrs_01_1692.html", + "node_id":"mrs_01_1692.xml", "product_code":"mrs", - "code":"284", + "code":"334", "des":"Why MapReduce or Yarn tasks using the viewFS function fail to be executed in the environment with multiple NameServices?When viewFS is used, only directories mounted to v", "doc_type":"usermanual", "kw":"Why MapReduce Tasks Fails in the Environment with Multiple NameServices?,FAQ,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why MapReduce Tasks Fails in the Environment with Multiple NameServices?", "githuburl":"" }, { "uri":"mrs_01_1693.html", + "node_id":"mrs_01_1693.xml", "product_code":"mrs", - "code":"285", + "code":"335", "des":"The DataNode is normal, but cannot report data blocks. As a result, the existing data blocks cannot be used.This error may occur when the number of data blocks in a data ", "doc_type":"usermanual", "kw":"DataNode Is Normal but Cannot Report Data Blocks,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"DataNode Is Normal but Cannot Report Data Blocks", "githuburl":"" }, { "uri":"mrs_01_1694.html", + "node_id":"mrs_01_1694.xml", "product_code":"mrs", - "code":"286", + "code":"336", "des":"When errors occur in the dfs.datanode.data.dir directory of DataNode due to the permission or disk damage, HDFS WebUI does not display information about damaged data.Afte", "doc_type":"usermanual", "kw":"HDFS WebUI Cannot Properly Update Information About Damaged Data,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HDFS WebUI Cannot Properly Update Information About Damaged Data", "githuburl":"" }, { "uri":"mrs_01_1695.html", + "node_id":"mrs_01_1695.xml", "product_code":"mrs", - "code":"287", + "code":"337", "des":"Why distcp command fails in the secure cluster with the following error displayed?Client side exceptionServer side exceptionThe preceding error may occur if webhdfs:// is", "doc_type":"usermanual", "kw":"Why Does the Distcp Command Fail in the Secure Cluster, Causing an Exception?,FAQ,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the Distcp Command Fail in the Secure Cluster, Causing an Exception?", "githuburl":"" }, { "uri":"mrs_01_1696.html", + "node_id":"mrs_01_1696.xml", "product_code":"mrs", - "code":"288", + "code":"338", "des":"If the number of disks specified by dfs.datanode.data.dir is equal to the value of dfs.datanode.failed.volumes.tolerated, DataNode startup will fail.By default, the failu", "doc_type":"usermanual", "kw":"Why Does DataNode Fail to Start When the Number of Disks Specified by dfs.datanode.data.dir Equals d", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does DataNode Fail to Start When the Number of Disks Specified by dfs.datanode.data.dir Equals dfs.datanode.failed.volumes.tolerated?", "githuburl":"" }, { "uri":"mrs_01_1697.html", + "node_id":"mrs_01_1697.xml", "product_code":"mrs", - "code":"289", + "code":"339", "des":"DataNode capacity count incorrect if several data.dir configured in one disk partition.Currently calculation will be done based on the disk like df command in linux. Idea", "doc_type":"usermanual", "kw":"Why Does an Error Occur During DataNode Capacity Calculation When Multiple data.dir Are Configured i", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does an Error Occur During DataNode Capacity Calculation When Multiple data.dir Are Configured in a Partition?", "githuburl":"" }, { "uri":"mrs_01_1698.html", + "node_id":"mrs_01_1698.xml", "product_code":"mrs", - "code":"290", + "code":"340", "des":"When the standby NameNode is powered off during metadata (namespace) storage, it fails to be started and the following error information is displayed.When the standby Nam", "doc_type":"usermanual", "kw":"Standby NameNode Fails to Be Restarted When the System Is Powered off During Metadata (Namespace) St", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Standby NameNode Fails to Be Restarted When the System Is Powered off During Metadata (Namespace) Storage", "githuburl":"" }, { "uri":"mrs_01_1699.html", + "node_id":"mrs_01_1699.xml", "product_code":"mrs", - "code":"291", + "code":"341", "des":"Why data in the buffer is lost if a power outage occurs during storage of small files?Because of a power outage, the blocks in the buffer are not written to the disk imme", "doc_type":"usermanual", "kw":"Why Data in the Buffer Is Lost If a Power Outage Occurs During Storage of Small Files,FAQ,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Data in the Buffer Is Lost If a Power Outage Occurs During Storage of Small Files", "githuburl":"" }, { "uri":"mrs_01_1700.html", + "node_id":"mrs_01_1700.xml", "product_code":"mrs", - "code":"292", + "code":"342", "des":"When HDFS calls the FileInputFormat getSplit method, the ArrayIndexOutOfBoundsException: 0 appears in the following log:The elements of each block correspondent frame are", "doc_type":"usermanual", "kw":"Why Does Array Border-crossing Occur During FileInputFormat Split?,FAQ,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does Array Border-crossing Occur During FileInputFormat Split?", "githuburl":"" }, { "uri":"mrs_01_1701.html", + "node_id":"mrs_01_1701.xml", "product_code":"mrs", - "code":"293", + "code":"343", "des":"When the storage policy of the file is set to LAZY_PERSIST, the storage type of the first replica should be RAM_DISK, and the storage type of other replicas should be DIS", "doc_type":"usermanual", "kw":"Why Is the Storage Type of File Copies DISK When the Tiered Storage Policy Is LAZY_PERSIST?,FAQ,Comp", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is the Storage Type of File Copies DISK When the Tiered Storage Policy Is LAZY_PERSIST?", "githuburl":"" }, { "uri":"mrs_01_1702.html", + "node_id":"mrs_01_1702.xml", "product_code":"mrs", - "code":"294", + "code":"344", "des":"When the NameNode node is overloaded (100% of the CPU is occupied), the NameNode is unresponsive. The HDFS clients that are connected to the overloaded NameNode fail to r", "doc_type":"usermanual", "kw":"The HDFS Client Is Unresponsive When the NameNode Is Overloaded for a Long Time,FAQ,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"The HDFS Client Is Unresponsive When the NameNode Is Overloaded for a Long Time", "githuburl":"" }, { "uri":"mrs_01_1703.html", + "node_id":"mrs_01_1703.xml", "product_code":"mrs", - "code":"295", + "code":"345", "des":"In DataNode, the storage directory of data blocks is specified by dfs.datanode.data.dir.Can I modify dfs.datanode.data.dir tomodify the data storage directory?Can I modif", "doc_type":"usermanual", "kw":"Can I Delete or Modify the Data Storage Directory in DataNode?,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Can I Delete or Modify the Data Storage Directory in DataNode?", "githuburl":"" }, { "uri":"mrs_01_1704.html", + "node_id":"mrs_01_1704.xml", "product_code":"mrs", - "code":"296", + "code":"346", "des":"Why are some blocks missing on the NameNode UI after the rollback is successful?This problem occurs because blocks with new IDs or genstamps may exist on the DataNode. Th", "doc_type":"usermanual", "kw":"Blocks Miss on the NameNode UI After the Successful Rollback,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Blocks Miss on the NameNode UI After the Successful Rollback", "githuburl":"" }, { "uri":"mrs_01_1705.html", + "node_id":"mrs_01_1705.xml", "product_code":"mrs", - "code":"297", + "code":"347", "des":"Why is an \"java.net.SocketException: No buffer space available\" exception reported when data is written to HDFS?This problem occurs when files are written to the HDFS. Ch", "doc_type":"usermanual", "kw":"Why Is \"java.net.SocketException: No buffer space available\" Reported When Data Is Written to HDFS,F", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is \"java.net.SocketException: No buffer space available\" Reported When Data Is Written to HDFS", "githuburl":"" }, { "uri":"mrs_01_1706.html", + "node_id":"mrs_01_1706.xml", "product_code":"mrs", - "code":"298", + "code":"348", "des":"Why are there two standby NameNodes after the active NameNode is restarted?When this problem occurs, check the ZooKeeper and ZooKeeper FC logs. You can find that the sess", "doc_type":"usermanual", "kw":"Why are There Two Standby NameNodes After the active NameNode Is Restarted?,FAQ,Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why are There Two Standby NameNodes After the active NameNode Is Restarted?", "githuburl":"" }, { "uri":"mrs_01_1707.html", + "node_id":"mrs_01_1707.xml", "product_code":"mrs", - "code":"299", + "code":"349", "des":"After I start a Balance process in HDFS, the process is shut down abnormally. If I attempt to execute the Balance process again, it fails again.After a Balance process is", "doc_type":"usermanual", "kw":"When Does a Balance Process in HDFS, Shut Down and Fail to be Executed Again?,FAQ,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"When Does a Balance Process in HDFS, Shut Down and Fail to be Executed Again?", "githuburl":"" }, { "uri":"mrs_01_1708.html", + "node_id":"mrs_01_1708.xml", "product_code":"mrs", - "code":"300", + "code":"350", "des":"Occasionally, nternet Explorer 9, Explorer 10, or Explorer 11 fails to access the native HDFS UI.Internet Explorer 9, Explorer 10, or Explorer 11 fails to access the nati", "doc_type":"usermanual", "kw":"\"This page can't be displayed\" Is Displayed When Internet Explorer Fails to Access the Native HDFS U", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"\"This page can't be displayed\" Is Displayed When Internet Explorer Fails to Access the Native HDFS UI", "githuburl":"" }, { "uri":"mrs_01_1709.html", + "node_id":"mrs_01_1709.xml", "product_code":"mrs", - "code":"301", + "code":"351", "des":"If a JournalNode server is powered off, the data directory disk is fully occupied, and the network is abnormal, the EditLog sequence number on the JournalNode is inconsec", "doc_type":"usermanual", "kw":"NameNode Fails to Be Restarted Due to EditLog Discontinuity,FAQ,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"NameNode Fails to Be Restarted Due to EditLog Discontinuity", "githuburl":"" }, { "uri":"mrs_01_1710.html", + "node_id":"mrs_01_1710.xml", "product_code":"mrs", - "code":"302", + "code":"352", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using HetuEngine", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HetuEngine", "githuburl":"" }, { "uri":"mrs_01_1711.html", + "node_id":"mrs_01_1711.xml", "product_code":"mrs", - "code":"303", + "code":"353", "des":"This section describes how to use HetuEngine to connect to the Hive data source and query database tables of the Hive data source of the cluster through HetuEngine.The He", "doc_type":"usermanual", "kw":"Using HetuEngine from Scratch,Using HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HetuEngine from Scratch", "githuburl":"" }, { "uri":"mrs_01_1721.html", + "node_id":"mrs_01_1721.xml", "product_code":"mrs", - "code":"304", + "code":"354", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HetuEngine Permission Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Permission Management", "githuburl":"" }, { "uri":"mrs_01_1722.html", + "node_id":"mrs_01_1722.xml", "product_code":"mrs", - "code":"305", + "code":"355", "des":"HetuEngine supports permission control for clusters in security mode. For clusters in non-security mode, permission control is not performed.In security mode, HetuEngine ", "doc_type":"usermanual", "kw":"HetuEngine Permission Management Overview,HetuEngine Permission Management,Component Operation Guide", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Permission Management Overview", "githuburl":"" }, { "uri":"mrs_01_1714.html", + "node_id":"mrs_01_1714.xml", "product_code":"mrs", - "code":"306", + "code":"356", "des":"Before using the HetuEngine service in a security cluster, a cluster administrator needs to create a user and grant operation permissions to the user to meet service requ", "doc_type":"cmpntguide", "kw":"Creating a HetuEngine User,HetuEngine Permission Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], "title":"Creating a HetuEngine User", "githuburl":"" }, { "uri":"mrs_01_1723.html", + "node_id":"mrs_01_1723.xml", "product_code":"mrs", - "code":"307", + "code":"357", "des":"Newly installed clusters use Ranger for authentication by default. System administrators can use Ranger to configure the permissions to manage databases, tables, and colu", "doc_type":"usermanual", "kw":"HetuEngine Ranger-based Permission Control,HetuEngine Permission Management,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Ranger-based Permission Control", "githuburl":"" }, { "uri":"mrs_01_1724.html", + "node_id":"mrs_01_1724.xml", "product_code":"mrs", - "code":"308", + "code":"358", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HetuEngine MetaStore-based Permission Control", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine MetaStore-based Permission Control", "githuburl":"" }, { "uri":"mrs_01_1725.html", + "node_id":"mrs_01_1725.xml", "product_code":"mrs", - "code":"309", + "code":"359", "des":"Constraints: This parameter applies only to the Hive data source.When multiple HetuEngine clusters are deployed for collaborative computing, the metadata is centrally man", "doc_type":"usermanual", - "kw":"Overview,HetuEngine MetaStore-based Permission Control,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"MetaStore Permission Overview,HetuEngine MetaStore-based Permission Control,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"MetaStore Permission Overview", "githuburl":"" }, { "uri":"mrs_01_2350.html", + "node_id":"mrs_01_2350.xml", "product_code":"mrs", - "code":"310", + "code":"360", "des":"The system administrator can create and set a HetuEngine role on FusionInsight Manager. The HetuEngine role can be configured with the HetuEngine administrator permission", "doc_type":"usermanual", "kw":"Creating a HetuEngine Role,HetuEngine MetaStore-based Permission Control,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a HetuEngine Role", "githuburl":"" }, { "uri":"mrs_01_2352.html", + "node_id":"mrs_01_2352.xml", "product_code":"mrs", - "code":"311", + "code":"361", "des":"If a user needs to access HetuEngine tables or databases created by other users, the user needs to be granted with related permissions. HetuEngine supports permission con", "doc_type":"usermanual", "kw":"Configuring Permissions for Tables, Columns, and Databases,HetuEngine MetaStore-based Permission Con", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Permissions for Tables, Columns, and Databases", "githuburl":"" }, { "uri":"mrs_01_1728.html", + "node_id":"mrs_01_1728.xml", "product_code":"mrs", - "code":"312", + "code":"362", "des":"Access data sources in the same cluster using HetuEngineIf Ranger authentication is enabled for HetuEngine, the PBAC permission policy of Ranger is used for authenticatio", "doc_type":"usermanual", "kw":"Permission Principles and Constraints,HetuEngine Permission Management,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Permission Principles and Constraints", "githuburl":"" }, { "uri":"mrs_01_1731.html", + "node_id":"mrs_01_1731.xml", "product_code":"mrs", - "code":"313", + "code":"363", "des":"This section describes how to create a HetuEngine compute instance. If you want to stop the cluster where compute instances are successfully created, you need to manually", "doc_type":"usermanual", "kw":"Creating HetuEngine Compute Instances,Using HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating HetuEngine Compute Instances", "githuburl":"" }, { "uri":"mrs_01_2314.html", + "node_id":"mrs_01_2314.xml", "product_code":"mrs", - "code":"314", + "code":"364", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Configuring Data Sources", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Data Sources", "githuburl":"" }, { "uri":"mrs_01_2315.html", + "node_id":"mrs_01_2315.xml", "product_code":"mrs", - "code":"315", + "code":"365", "des":"HetuEngine supports quick joint query of multiple data sources and GUI-based data source configuration and management. You can quickly add a data source on the HSConsole ", "doc_type":"usermanual", "kw":"Before You Start,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Before You Start", "githuburl":"" }, { "uri":"mrs_01_24174.html", + "node_id":"mrs_01_24174.xml", "product_code":"mrs", - "code":"316", + "code":"366", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Configuring a Hive Data Source", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a Hive Data Source", "githuburl":"" }, { "uri":"mrs_01_24253.html", + "node_id":"mrs_01_24253.xml", "product_code":"mrs", - "code":"317", + "code":"367", "des":"This section describes how to add a Hive data source of the same Hadoop cluster as HetuEngine on HSConsole.Currently, HetuEngine supports data sources of the following tr", "doc_type":"cmpntguide", "kw":"Configuring a Co-deployed Hive Data Source,Configuring a Hive Data Source,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "documenttype":"usermanual", + "prodname":"mrs" + } + ], "title":"Configuring a Co-deployed Hive Data Source", "githuburl":"" }, { "uri":"mrs_01_2348.html", + "node_id":"mrs_01_2348.xml", "product_code":"mrs", - "code":"318", + "code":"368", "des":"This section describes how to add a Hive data source on HSConsole.Currently, HetuEngine supports data sources of the following traditional data formats: AVRO, TEXT, RCTEX", "doc_type":"usermanual", "kw":"Configuring a Traditional Data Source,Configuring a Hive Data Source,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a Traditional Data Source", "githuburl":"" }, { "uri":"mrs_01_2363.html", + "node_id":"mrs_01_2363.xml", "product_code":"mrs", - "code":"319", + "code":"369", "des":"HetuEngine can be connected to the Hudi data source of the cluster of MRS 3.1.1 or later.HetuEngine does not support the reading of Hudi bootstrap tables.You have created", "doc_type":"usermanual", "kw":"Configuring a Hudi Data Source,Configuring a Hive Data Source,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a Hudi Data Source", "githuburl":"" }, { "uri":"mrs_01_2349.html", + "node_id":"mrs_01_2349.xml", "product_code":"mrs", - "code":"320", + "code":"370", "des":"This section describes how to add an HBase data source on HSConsole.The domain name of the cluster where the data source is located must be different from the HetuEngine ", "doc_type":"usermanual", "kw":"Configuring an HBase Data Source,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring an HBase Data Source", "githuburl":"" }, { "uri":"mrs_01_2351.html", + "node_id":"mrs_01_2351.xml", "product_code":"mrs", - "code":"321", + "code":"371", "des":"This section describes how to add a GaussDB JDBC data source on the HSConsole page.The domain name of the cluster where the data source is located must be different from ", "doc_type":"usermanual", "kw":"Configuring a GaussDB Data Source,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a GaussDB Data Source", "githuburl":"" }, { "uri":"mrs_01_1719.html", + "node_id":"mrs_01_1719.xml", "product_code":"mrs", - "code":"322", + "code":"372", "des":"This section describes how to add another HetuEngine data source on the HSConsole page for a cluster in security mode.Currently, the following data sources are supported:", "doc_type":"usermanual", "kw":"Configuring a HetuEngine Data Source,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a HetuEngine Data Source", "githuburl":"" }, { "uri":"mrs_01_24146.html", + "node_id":"mrs_01_24146.xml", "product_code":"mrs", - "code":"323", + "code":"373", "des":"Currently, HetuEngine supports the interconnection with the ClickHouse data source in the cluster of MRS 3.1.1 or later.The HetuEngine cluster in security mode supports t", "doc_type":"usermanual", "kw":"Configuring a ClickHouse Data Source,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a ClickHouse Data Source", "githuburl":"" }, { - "uri":"mrs_01_1720.html", + "uri":"mrs_01_24743.html", + "node_id":"mrs_01_24743.xml", "product_code":"mrs", - "code":"324", + "code":"374", + "des":"This section applies to MRS 3.2.0 or later.Add an IoTDB JDBC data source on HSConsole of a cluster in security mode.The domain name of the cluster where the data source i", + "doc_type":"cmpntguide", + "kw":"Configuring an IoTDB Data Source,Configuring Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"usermanual", + "prodname":"mrs" + } + ], + "title":"Configuring an IoTDB Data Source", + "githuburl":"" + }, + { + "uri":"mrs_01_1720.html", + "node_id":"mrs_01_1720.xml", + "product_code":"mrs", + "code":"375", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Managing Data Sources", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Data Sources", "githuburl":"" }, { "uri":"mrs_01_24061.html", + "node_id":"mrs_01_24061.xml", "product_code":"mrs", - "code":"325", + "code":"376", "des":"On the HetuEngine web UI, you can view, edit, and delete an added data source.You have created a HetuEngine administrator for accessing the HetuEngine web UI. For details", "doc_type":"usermanual", "kw":"Managing an External Data Source,Managing Data Sources,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing an External Data Source", "githuburl":"" }, { "uri":"mrs_01_1729.html", + "node_id":"mrs_01_1729.xml", "product_code":"mrs", - "code":"326", + "code":"377", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Managing Compute Instances", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Compute Instances", "githuburl":"" }, { "uri":"mrs_01_1732.html", + "node_id":"mrs_01_1732.xml", "product_code":"mrs", - "code":"327", + "code":"378", "des":"The resource group mechanism controls the overall query load of the instance from the perspective of resource allocation and implements queuing policies for queries. Mult", "doc_type":"usermanual", "kw":"Configuring Resource Groups,Managing Compute Instances,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Resource Groups", "githuburl":"" }, { "uri":"mrs_01_2320.html", + "node_id":"mrs_01_2320.xml", "product_code":"mrs", - "code":"328", + "code":"379", "des":"On the HetuEngine web UI, you can adjust the number of worker nodes for a compute instance. In this way, resources can be expanded for the compute instance when resources", "doc_type":"usermanual", "kw":"Adjusting the Number of Worker Nodes,Managing Compute Instances,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adjusting the Number of Worker Nodes", "githuburl":"" }, { "uri":"mrs_01_1736.html", + "node_id":"mrs_01_1736.xml", "product_code":"mrs", - "code":"329", + "code":"380", "des":"On the HetuEngine web UI, you can start, stop, delete, and roll-restart a single compute instance or compute instances in batches.Restarting HetuEngineDuring the restart ", "doc_type":"usermanual", "kw":"Managing a HetuEngine Compute Instance,Managing Compute Instances,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing a HetuEngine Compute Instance", "githuburl":"" }, { "uri":"mrs_01_1733.html", + "node_id":"mrs_01_1733.xml", "product_code":"mrs", - "code":"330", + "code":"381", "des":"On the HetuEngine web UI, you can import or export the instance configuration file and download the instance configuration template.You have created a user for accessing ", "doc_type":"usermanual", "kw":"Importing and Exporting Compute Instance Configurations,Managing Compute Instances,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Importing and Exporting Compute Instance Configurations", "githuburl":"" }, { "uri":"mrs_01_1734.html", + "node_id":"mrs_01_1734.xml", "product_code":"mrs", - "code":"331", + "code":"382", "des":"On the HetuEngine web UI, you can view the detailed information about a specified service, including the execution status of each SQL statement. If the current cluster us", "doc_type":"usermanual", "kw":"Viewing the Instance Monitoring Page,Managing Compute Instances,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing the Instance Monitoring Page", "githuburl":"" }, { "uri":"mrs_01_1735.html", + "node_id":"mrs_01_1735.xml", "product_code":"mrs", - "code":"332", + "code":"383", "des":"On the HetuEngine web UI, you can view Coordinator and Worker logs on the Yarn web UI.You have created a user for accessing the HetuEngine web UI. For details, see Creati", "doc_type":"usermanual", "kw":"Viewing Coordinator and Worker Logs,Managing Compute Instances,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Coordinator and Worker Logs", "githuburl":"" }, { "uri":"mrs_01_24260.html", + "node_id":"mrs_01_24260.xml", "product_code":"mrs", - "code":"333", + "code":"384", "des":"By default, coordinator and worker nodes randomly start on Yarn NodeManager nodes, and you have to open all ports on all NodeManager nodes. Using resource labels of Yarn,", "doc_type":"usermanual", "kw":"Using Resource Labels to Specify on Which Node Coordinators Should Run,Managing Compute Instances,Co", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using Resource Labels to Specify on Which Node Coordinators Should Run", "githuburl":"" }, { "uri":"mrs_01_1737.html", + "node_id":"mrs_01_1737.xml", "product_code":"mrs", - "code":"334", + "code":"385", "des":"If a compute instance is not created or started, you can log in to the HetuEngine client to create or start the compute instance. This section describes how to manage a c", "doc_type":"usermanual", "kw":"Using the HetuEngine Client,Using HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the HetuEngine Client", "githuburl":"" }, { "uri":"mrs_01_1738.html", + "node_id":"mrs_01_1738.xml", "product_code":"mrs", - "code":"335", + "code":"386", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using the HetuEngine Cross-Source Function", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the HetuEngine Cross-Source Function", "githuburl":"" }, { "uri":"mrs_01_1739.html", + "node_id":"mrs_01_1739.xml", "product_code":"mrs", - "code":"336", + "code":"387", "des":"Enterprises usually store massive data, such as from various databases and warehouses, for management and information collection. However, diversified data sources, hybri", "doc_type":"usermanual", "kw":"Introduction to HetuEngine Cross-Source Function,Using the HetuEngine Cross-Source Function,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to HetuEngine Cross-Source Function", "githuburl":"" }, { "uri":"mrs_01_2341.html", + "node_id":"mrs_01_2341.xml", "product_code":"mrs", - "code":"337", + "code":"388", "des":"The format of the statement for creating a mapping table is as follows:CREATE TABLE schemaName.tableName (\n rowId VARCHAR,\n qualifier1 TINYINT,\n qualifier2 SMALLINT,\n ", "doc_type":"usermanual", "kw":"Usage Guide of HetuEngine Cross-Source Function,Using the HetuEngine Cross-Source Function,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Usage Guide of HetuEngine Cross-Source Function", "githuburl":"" }, { "uri":"mrs_01_2342.html", + "node_id":"mrs_01_2342.xml", "product_code":"mrs", - "code":"338", + "code":"389", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using HetuEngine Cross-Domain Function", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HetuEngine Cross-Domain Function", "githuburl":"" }, { "uri":"mrs_01_2334.html", + "node_id":"mrs_01_2334.xml", "product_code":"mrs", - "code":"339", + "code":"390", "des":"HetuEngine provide unified standard SQL to implement efficient access to multiple data sources distributed in multiple regions (or data centers), shields data differences", "doc_type":"usermanual", - "kw":"Introduction to HetuEngine Cross-Source Function,Using HetuEngine Cross-Domain Function,Component Op", - "title":"Introduction to HetuEngine Cross-Source Function", + "kw":"Introduction to HetuEngine Cross-Domain Function,Using HetuEngine Cross-Domain Function,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Introduction to HetuEngine Cross-Domain Function", "githuburl":"" }, { "uri":"mrs_01_2335.html", + "node_id":"mrs_01_2335.xml", "product_code":"mrs", - "code":"340", + "code":"391", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HetuEngine Cross-Domain Function Usage,Using HetuEngine Cross-Domain Function,Component Operation Gu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Cross-Domain Function Usage", "githuburl":"" }, { "uri":"mrs_01_24284.html", + "node_id":"mrs_01_24284.xml", "product_code":"mrs", - "code":"341", + "code":"392", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HetuEngine Cross-Domain Rate Limit Function,Using HetuEngine Cross-Domain Function,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Cross-Domain Rate Limit Function", "githuburl":"" }, + { + "uri":"mrs_01_24533.html", + "node_id":"mrs_01_24533.xml", + "product_code":"", + "code":"393", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"Using HetuEngine Materialized Views", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Using HetuEngine Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24541.html", + "node_id":"mrs_01_24541.xml", + "product_code":"", + "code":"394", + "des":"Materialized Views applies to MRS 3.2.0 or later.HetuEngine provides the materialized view capability. It enables you to pre-compute frequently accessed and time-consumin", + "doc_type":"", + "kw":"Overview of Materialized Views,Using HetuEngine Materialized Views,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Overview of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24545.html", + "node_id":"mrs_01_24545.xml", + "product_code":"", + "code":"395", + "des":"For details about the SQL statements for materialized views, see Table 1.The AS SELECT clause for creating materialized views cannot contain reserved keywords in Calcite ", + "doc_type":"", + "kw":"SQL Statement Example of Materialized Views,Using HetuEngine Materialized Views,Component Operation ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SQL Statement Example of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24535.html", + "node_id":"mrs_01_24535.xml", + "product_code":"", + "code":"396", + "des":"A maintenance instance is a special compute instance that performs automatic tasks. Maintenance instances are used to automatically refresh, create, and delete materializ", + "doc_type":"", + "kw":"Configuring a HetuEngine Maintenance Instance,Using HetuEngine Materialized Views,Component Operatio", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring a HetuEngine Maintenance Instance", + "githuburl":"" + }, + { + "uri":"mrs_01_24543.html", + "node_id":"mrs_01_24543.xml", + "product_code":"", + "code":"397", + "des":"HetuEngine provides the materialized view rewriting capability at the system or session level.Enabling the materialized view rewriting capability at the session level:Run", + "doc_type":"", + "kw":"Configuring Rewriting of Materialized Views,Using HetuEngine Materialized Views,Component Operation ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Rewriting of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24776.html", + "node_id":"mrs_01_24776.xml", + "product_code":"", + "code":"398", + "des":"HetuEngine QAS module provides automatic detection, learning, and diagnosis of historical SQL execution records. After the materialized view recommendation function is en", + "doc_type":"", + "kw":"Configuring Recommendation of Materialized Views,Using HetuEngine Materialized Views,Component Opera", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Recommendation of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24544.html", + "node_id":"mrs_01_24544.xml", + "product_code":"", + "code":"399", + "des":"After a materialized view is created for an SQL statement, the SQL statement is rewritten to be queried through the materialized view when the SQL statement is executed. ", + "doc_type":"", + "kw":"Configuring Caching of Materialized Views,Using HetuEngine Materialized Views,Component Operation Gu", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Caching of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24546.html", + "node_id":"mrs_01_24546.xml", + "product_code":"", + "code":"400", + "des":"The mv_validity field for creating a materialized view indicates the validity period of the materialized view. HetuEngine allows you to rewrite the SQL statements using o", + "doc_type":"", + "kw":"Configuring the Validity Period and Data Update of Materialized Views,Using HetuEngine Materialized ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring the Validity Period and Data Update of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24798.html", + "node_id":"mrs_01_24798.xml", + "product_code":"", + "code":"401", + "des":"HetuEngine intelligent materialized views provide intelligent precalculation and cache acceleration. The HetuEngine QAS role can automatically extract historical SQL stat", + "doc_type":"", + "kw":"Configuring Intelligent Materialized Views,Using HetuEngine Materialized Views,Component Operation G", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Intelligent Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24505.html", + "node_id":"mrs_01_24505.xml", + "product_code":"", + "code":"402", + "des":"View the status and execution result of an automatic HetuEngine task on HSConsol. You can periodically view the task execution status and evaluate the cluster health stat", + "doc_type":"", + "kw":"Viewing Automatic Tasks of Materialized Views,Using HetuEngine Materialized Views,Component Operatio", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Viewing Automatic Tasks of Materialized Views", + "githuburl":"" + }, + { + "uri":"mrs_01_24838.html", + "node_id":"mrs_01_24838.xml", + "product_code":"", + "code":"403", + "des":"This section applies to MRS 3.2.0 or later.The HetuEngine QAS module provides automatic detection, learning, and diagnosis of historical SQL execution records for more ef", + "doc_type":"", + "kw":"Using HetuEngine SQL Diagnosis,Using HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Using HetuEngine SQL Diagnosis", + "githuburl":"" + }, { "uri":"mrs_01_2336.html", + "node_id":"mrs_01_2336.xml", "product_code":"mrs", - "code":"342", + "code":"404", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using a Third-Party Visualization Tool to Access HetuEngine", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using a Third-Party Visualization Tool to Access HetuEngine", "githuburl":"" }, { "uri":"mrs_01_24178.html", + "node_id":"mrs_01_24178.xml", "product_code":"mrs", - "code":"343", + "code":"405", "des":"To access the dual-plane environment, the cluster service plane must be able to communicate with the local Windows environment.", "doc_type":"usermanual", "kw":"Usage Instruction,Using a Third-Party Visualization Tool to Access HetuEngine,Component Operation Gu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Usage Instruction", "githuburl":"" }, { "uri":"mrs_01_2337.html", + "node_id":"mrs_01_2337.xml", "product_code":"mrs", - "code":"344", - "des":"This section uses DBeaver 6.3.5 as an example to describe how to perform operations on HetuEngine.The DBeaver has been installed properly. Download the DBeaver software f", + "code":"406", + "des":"Use DBeaver 7.2.0 as an example to describe how to access HetuEngine.The DBeaver has been installed properly. Download the DBeaver software from https://dbeaver.io/files/", "doc_type":"usermanual", "kw":"Using DBeaver to Access HetuEngine,Using a Third-Party Visualization Tool to Access HetuEngine,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using DBeaver to Access HetuEngine", "githuburl":"" }, { "uri":"mrs_01_24010.html", + "node_id":"mrs_01_24010.xml", "product_code":"mrs", - "code":"345", + "code":"407", "des":"Tableau has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to create a", "doc_type":"usermanual", "kw":"Using Tableau to Access HetuEngine,Using a Third-Party Visualization Tool to Access HetuEngine,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using Tableau to Access HetuEngine", "githuburl":"" }, - { - "uri":"mrs_01_24012.html", - "product_code":"mrs", - "code":"346", - "des":"PowerBI has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to create a", - "doc_type":"usermanual", - "kw":"Using PowerBI to Access HetuEngine,Using a Third-Party Visualization Tool to Access HetuEngine,Compo", - "title":"Using PowerBI to Access HetuEngine", - "githuburl":"" - }, { "uri":"mrs_01_24013.html", + "node_id":"mrs_01_24013.xml", "product_code":"mrs", - "code":"347", + "code":"408", "des":"Yonghong BI has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to crea", "doc_type":"usermanual", "kw":"Using Yonghong BI to Access HetuEngine,Using a Third-Party Visualization Tool to Access HetuEngine,C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using Yonghong BI to Access HetuEngine", "githuburl":"" }, { "uri":"mrs_01_2338.html", + "node_id":"mrs_01_2338.xml", "product_code":"mrs", - "code":"348", + "code":"409", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Function & UDF Development and Application", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Function & UDF Development and Application", "githuburl":"" }, { "uri":"mrs_01_2339.html", + "node_id":"mrs_01_2339.xml", "product_code":"mrs", - "code":"349", + "code":"410", "des":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called UDFs.This section describes how to develop and apply He", "doc_type":"usermanual", "kw":"HetuEngine Function Plugin Development and Application,Function & UDF Development and Application,Co", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Function Plugin Development and Application", "githuburl":"" }, { "uri":"mrs_01_1743.html", + "node_id":"mrs_01_1743.xml", "product_code":"mrs", - "code":"350", + "code":"411", "des":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called UDFs.This section describes how to develop and apply Hi", "doc_type":"usermanual", "kw":"Hive UDF Development and Application,Function & UDF Development and Application,Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive UDF Development and Application", "githuburl":"" }, { "uri":"mrs_01_1744.html", + "node_id":"mrs_01_1744.xml", "product_code":"mrs", - "code":"351", + "code":"412", "des":"Log paths:The HetuEngine logs are stored in /var/log/Bigdata/hetuengine/ and /var/log/Bigdata/audit/hetuengine/.Log archiving rules:Log archiving rules use the FixedWindo", "doc_type":"usermanual", "kw":"Introduction to HetuEngine Logs,Using HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to HetuEngine Logs", "githuburl":"" }, { "uri":"mrs_01_1745.html", + "node_id":"mrs_01_1745.xml", "product_code":"mrs", - "code":"352", + "code":"413", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"HetuEngine Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HetuEngine Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1740.html", + "node_id":"mrs_01_1740.xml", "product_code":"mrs", - "code":"353", + "code":"414", "des":"HetuEngine depends on the resource allocation and control capabilities provided by Yarn. You need to adjust the Yarn service configuration based on the actual service and", "doc_type":"usermanual", "kw":"Adjusting the Yarn Service Configuration,HetuEngine Performance Tuning,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adjusting the Yarn Service Configuration", "githuburl":"" }, { "uri":"mrs_01_1741.html", + "node_id":"mrs_01_1741.xml", "product_code":"mrs", - "code":"354", + "code":"415", "des":"The default memory size and disk overflow path of HetuEngine are not the best. You need to adjust node resources in the cluster based on the actual service and server con", "doc_type":"usermanual", "kw":"Adjusting Cluster Node Resource Configurations,HetuEngine Performance Tuning,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adjusting Cluster Node Resource Configurations", "githuburl":"" }, { "uri":"mrs_01_1742.html", + "node_id":"mrs_01_1742.xml", "product_code":"mrs", - "code":"355", + "code":"416", "des":"HetuEngine provides the execution plan cache function. For the same query that needs to be executed for multiple times, this function reduces the time required for genera", "doc_type":"usermanual", "kw":"Adjusting Execution Plan Cache,HetuEngine Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adjusting Execution Plan Cache", "githuburl":"" }, { "uri":"mrs_01_1746.html", + "node_id":"mrs_01_1746.xml", "product_code":"mrs", - "code":"356", + "code":"417", "des":"When HetuEngine accesses the Hive data source, it needs to access the Hive metastore to obtain the metadata information. HetuEngine provides the metadata cache function. ", "doc_type":"usermanual", "kw":"Adjusting Metadata Cache,HetuEngine Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adjusting Metadata Cache", "githuburl":"" }, { "uri":"mrs_01_24181.html", + "node_id":"mrs_01_24181.xml", "product_code":"", - "code":"357", + "code":"418", "des":"If a table or common table expression (CTE) contained in a query appears multiple times and has the same projection and filter, you can enable the CTE reuse function to c", "doc_type":"", "kw":"Modifying the CTE Configuration,HetuEngine Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Modifying the CTE Configuration", "githuburl":"" }, { "uri":"mrs_01_1747.html", + "node_id":"mrs_01_1747.xml", "product_code":"mrs", - "code":"358", + "code":"419", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About HetuEngine", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About HetuEngine", "githuburl":"" }, { "uri":"mrs_01_2321.html", + "node_id":"mrs_01_2321.xml", "product_code":"mrs", - "code":"359", + "code":"420", "des":"After the domain name is changed, the installed client configuration and data source configuration become invalid, and the created cluster is unavailable. When data sourc", "doc_type":"usermanual", "kw":"How Do I Perform Operations After the Domain Name Is Changed?,Common Issues About HetuEngine,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Perform Operations After the Domain Name Is Changed?", "githuburl":"" }, { "uri":"mrs_01_2322.html", + "node_id":"mrs_01_2322.xml", "product_code":"mrs", - "code":"360", + "code":"421", "des":"If the cluster startup on the client takes a long time, the waiting times out and the waiting page exits.If the cluster startup times out, the waiting page automatically ", "doc_type":"usermanual", "kw":"What Do I Do If Starting a Cluster on the Client Times Out?,Common Issues About HetuEngine,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Do I Do If Starting a Cluster on the Client Times Out?", "githuburl":"" }, { "uri":"mrs_01_2323.html", + "node_id":"mrs_01_2323.xml", "product_code":"mrs", - "code":"361", + "code":"422", "des":"Why is the data source lost when I log in to the client to check the data source connected to the HSConsole page?The possible cause of data source loss is that the DBServ", "doc_type":"usermanual", "kw":"How Do I Handle Data Source Loss?,Common Issues About HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Handle Data Source Loss?", "githuburl":"" }, { "uri":"mrs_01_2329.html", + "node_id":"mrs_01_2329.xml", "product_code":"mrs", - "code":"362", + "code":"423", "des":"Log in to FusionInsight Manager and HetuEngine alarms are generated for the cluster.Log in to FusionInsight Manager, go to the O&M page, and view alarm details. You can c", "doc_type":"usermanual", "kw":"How Do I Handle HetuEngine Alarms?,Common Issues About HetuEngine,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Handle HetuEngine Alarms?", "githuburl":"" }, { "uri":"mrs_01_24050.html", + "node_id":"mrs_01_24050.xml", "product_code":"mrs", - "code":"363", + "code":"424", "des":"A new host is added to the cluster in security mode, the NodeManager instance is added, and the parameters of the HetuEngine compute instance are adjusted. After the Hetu", "doc_type":"usermanual", "kw":"How Do I Do If Coordinators and Workers Cannot Be Started on the New Node?,Common Issues About HetuE", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Do If Coordinators and Workers Cannot Be Started on the New Node?", "githuburl":"" }, { "uri":"mrs_01_0581.html", + "node_id":"mrs_01_0581.xml", "product_code":"mrs", - "code":"364", + "code":"425", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Hive", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hive", "githuburl":"" }, { "uri":"mrs_01_0442.html", + "node_id":"mrs_01_0442.xml", "product_code":"mrs", - "code":"365", + "code":"426", "des":"Hive is a data warehouse framework built on Hadoop. It maps structured data files to a database table and provides SQL-like functions to analyze and process data. It also", "doc_type":"usermanual", "kw":"Using Hive from Scratch,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hive from Scratch", "githuburl":"" }, { "uri":"mrs_01_0582.html", + "node_id":"mrs_01_0582.xml", "product_code":"mrs", - "code":"366", + "code":"427", "des":"Go to the Hive configurations page by referring to Modifying Cluster Service Configuration Parameters.", "doc_type":"usermanual", "kw":"Configuring Hive Parameters,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Hive Parameters", "githuburl":"" }, { "uri":"mrs_01_2330.html", + "node_id":"mrs_01_2330.xml", "product_code":"mrs", - "code":"367", + "code":"428", "des":"Hive SQL supports all features of Hive-3.1.0. For details, see https://cwiki.apache.org/confluence/display/hive/languagemanual.Table 1 describes the extended Hive stateme", "doc_type":"usermanual", "kw":"Hive SQL,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive SQL", "githuburl":"" }, { "uri":"mrs_01_0947.html", + "node_id":"mrs_01_0947.xml", "product_code":"mrs", - "code":"368", + "code":"429", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Permission Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Permission Management", "githuburl":"" }, { "uri":"mrs_01_0948.html", + "node_id":"mrs_01_0948.xml", "product_code":"mrs", - "code":"369", + "code":"430", "des":"Hive is a data warehouse framework built on Hadoop. It provides basic data analysis services using the Hive query language (HQL), a language like the structured query lan", "doc_type":"usermanual", "kw":"Hive Permission,Permission Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Permission", "githuburl":"" }, { "uri":"mrs_01_0949.html", + "node_id":"mrs_01_0949.xml", "product_code":"mrs", - "code":"370", + "code":"431", "des":"This section describes how to create and configure a Hive role on Manager as the system administrator. The Hive role can be granted the permissions of the Hive administra", "doc_type":"usermanual", "kw":"Creating a Hive Role,Permission Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Hive Role", "githuburl":"" }, { "uri":"mrs_01_0950.html", + "node_id":"mrs_01_0950.xml", "product_code":"mrs", - "code":"371", + "code":"432", "des":"You can configure related permissions if you need to access tables or databases created by other users. Hive supports column-based permission control. If a user needs to ", "doc_type":"usermanual", "kw":"Configuring Permissions for Hive Tables, Columns, or Databases,Permission Management,Component Opera", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Permissions for Hive Tables, Columns, or Databases", "githuburl":"" }, { "uri":"mrs_01_0951.html", + "node_id":"mrs_01_0951.xml", "product_code":"mrs", - "code":"372", + "code":"433", "des":"Hive may need to be associated with other components. For example, Yarn permissions are required in the scenario of using HQL statements to trigger MapReduce jobs, and HB", "doc_type":"usermanual", "kw":"Configuring Permissions to Use Other Components for Hive,Permission Management,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Permissions to Use Other Components for Hive", "githuburl":"" }, { "uri":"mrs_01_0952.html", + "node_id":"mrs_01_0952.xml", "product_code":"mrs", - "code":"373", + "code":"434", "des":"This section guides users to use a Hive client in an O&M or service scenario.The client has been installed. For example, the client is installed in the /opt/hadoopclient ", "doc_type":"usermanual", "kw":"Using a Hive Client,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using a Hive Client", "githuburl":"" }, { "uri":"mrs_01_0953.html", + "node_id":"mrs_01_0953.xml", "product_code":"mrs", - "code":"374", + "code":"435", "des":"HDFS Colocation is the data location control function provided by HDFS. The HDFS Colocation API stores associated data or data on which associated operations are performe", "doc_type":"usermanual", "kw":"Using HDFS Colocation to Store Hive Tables,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HDFS Colocation to Store Hive Tables", "githuburl":"" }, { "uri":"mrs_01_0954.html", + "node_id":"mrs_01_0954.xml", "product_code":"mrs", - "code":"375", + "code":"436", "des":"Hive supports encryption of one or more columns in a table. When creating a Hive table, you can specify the columns to be encrypted and encryption algorithm. When data is", "doc_type":"usermanual", "kw":"Using the Hive Column Encryption Function,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Hive Column Encryption Function", "githuburl":"" }, { "uri":"mrs_01_0955.html", + "node_id":"mrs_01_0955.xml", "product_code":"mrs", - "code":"376", + "code":"437", "des":"In most cases, a carriage return character is used as the row delimiter in Hive tables stored in text files, that is, the carriage return character is used as the termina", "doc_type":"usermanual", "kw":"Customizing Row Separators,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Customizing Row Separators", "githuburl":"" }, { "uri":"mrs_01_0956.html", + "node_id":"mrs_01_0956.xml", "product_code":"mrs", - "code":"377", + "code":"438", "des":"Due to the limitations of underlying storage systems, Hive does not support the ability to delete a single piece of table data. In Hive on HBase, MRS Hive supports the ab", "doc_type":"usermanual", "kw":"Deleting Single-Row Records from Hive on HBase,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Deleting Single-Row Records from Hive on HBase", "githuburl":"" }, { "uri":"mrs_01_0957.html", + "node_id":"mrs_01_0957.xml", "product_code":"mrs", - "code":"378", + "code":"439", "des":"WebHCat provides external REST APIs for Hive. By default, the open-source community version uses the HTTP protocol.MRS Hive supports the HTTPS protocol that is more secur", "doc_type":"usermanual", "kw":"Configuring HTTPS/HTTP-based REST APIs,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring HTTPS/HTTP-based REST APIs", "githuburl":"" }, { "uri":"mrs_01_0958.html", + "node_id":"mrs_01_0958.xml", "product_code":"mrs", - "code":"379", + "code":"440", "des":"The Transform function is not allowed by Hive of the open source version.MRS Hive supports the configuration of the Transform function. The function is disabled by defaul", "doc_type":"usermanual", "kw":"Enabling or Disabling the Transform Function,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enabling or Disabling the Transform Function", "githuburl":"" }, { "uri":"mrs_01_0959.html", + "node_id":"mrs_01_0959.xml", "product_code":"mrs", - "code":"380", + "code":"441", "des":"This section describes how to create a view on Hive when MRS is configured in security mode, authorize access permissions to different users, and specify that different u", "doc_type":"usermanual", "kw":"Access Control of a Dynamic Table View on Hive,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Access Control of a Dynamic Table View on Hive", "githuburl":"" }, { "uri":"mrs_01_0960.html", + "node_id":"mrs_01_0960.xml", "product_code":"mrs", - "code":"381", + "code":"442", "des":"You must have ADMIN permission when creating temporary functions on Hive of the open source community version.MRS Hive supports the configuration of the function for crea", "doc_type":"usermanual", "kw":"Specifying Whether the ADMIN Permissions Is Required for Creating Temporary Functions,Using Hive,Com", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Specifying Whether the ADMIN Permissions Is Required for Creating Temporary Functions", "githuburl":"" }, { "uri":"mrs_01_0961.html", + "node_id":"mrs_01_0961.xml", "product_code":"mrs", - "code":"382", + "code":"443", "des":"Hive allows users to create external tables to associate with other relational databases. External tables read data from associated relational databases and support Join ", "doc_type":"usermanual", "kw":"Using Hive to Read Data in a Relational Database,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hive to Read Data in a Relational Database", "githuburl":"" }, { "uri":"mrs_01_0962.html", + "node_id":"mrs_01_0962.xml", "product_code":"mrs", - "code":"383", + "code":"444", "des":"Hive supports the following types of traditional relational database syntax:GroupingEXCEPT and INTERSECTSyntax description:Grouping takes effect only when the Group by st", "doc_type":"usermanual", "kw":"Supporting Traditional Relational Database Syntax in Hive,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Supporting Traditional Relational Database Syntax in Hive", "githuburl":"" }, { "uri":"mrs_01_0963.html", + "node_id":"mrs_01_0963.xml", "product_code":"mrs", - "code":"384", + "code":"445", "des":"When built-in functions of Hive cannot meet requirements, you can compile user-defined functions (UDFs) and use them for query.According to implementation methods, UDFs a", "doc_type":"usermanual", "kw":"Creating User-Defined Hive Functions,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating User-Defined Hive Functions", "githuburl":"" }, { "uri":"mrs_01_0965.html", + "node_id":"mrs_01_0965.xml", "product_code":"mrs", - "code":"385", + "code":"446", "des":"When the beeline client is disconnected due to network exceptions during the execution of a batch processing task, tasks submitted before beeline is disconnected can be p", "doc_type":"usermanual", "kw":"Enhancing beeline Reliability,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enhancing beeline Reliability", "githuburl":"" }, { "uri":"mrs_01_0966.html", + "node_id":"mrs_01_0966.xml", "product_code":"mrs", - "code":"386", + "code":"447", "des":"This function is applicable to Hive and Spark2x in.With this function enabled, if the select permission is granted to a user during Hive table creation, the user can run ", "doc_type":"usermanual", "kw":"Viewing Table Structures Using the show create Statement as Users with the select Permission,Using H", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Table Structures Using the show create Statement as Users with the select Permission", "githuburl":"" }, { "uri":"mrs_01_0967.html", + "node_id":"mrs_01_0967.xml", "product_code":"mrs", - "code":"387", + "code":"448", "des":"This function applies to Hive.After this function is enabled, run the following command to write a directory into Hive: insert overwrite directory \"/path1\".... After the ", "doc_type":"usermanual", "kw":"Writing a Directory into Hive with the Old Data Removed to the Recycle Bin,Using Hive,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Writing a Directory into Hive with the Old Data Removed to the Recycle Bin", "githuburl":"" }, { "uri":"mrs_01_0968.html", + "node_id":"mrs_01_0968.xml", "product_code":"mrs", - "code":"388", + "code":"449", "des":"This function applies to Hive.With this function enabled, run the insert overwrite directory/path1/path2/path3... command to write a subdirectory. The permission of the /", "doc_type":"usermanual", "kw":"Inserting Data to a Directory That Does Not Exist,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Inserting Data to a Directory That Does Not Exist", "githuburl":"" }, { "uri":"mrs_01_0969.html", + "node_id":"mrs_01_0969.xml", "product_code":"mrs", - "code":"389", + "code":"450", "des":"This function is applicable to Hive and Spark2x.After this function is enabled, only the Hive administrator can create databases and tables in the default database. Other", "doc_type":"usermanual", "kw":"Creating Databases and Creating Tables in the Default Database Only as the Hive Administrator,Using ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating Databases and Creating Tables in the Default Database Only as the Hive Administrator", "githuburl":"" }, { "uri":"mrs_01_0970.html", + "node_id":"mrs_01_0970.xml", "product_code":"mrs", - "code":"390", + "code":"451", "des":"This function is applicable to Hive and Spark2x.After this function is enabled, the location keyword cannot be specified when a Hive internal table is created. Specifical", "doc_type":"usermanual", "kw":"Disabling of Specifying the location Keyword When Creating an Internal Hive Table,Using Hive,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Disabling of Specifying the location Keyword When Creating an Internal Hive Table", "githuburl":"" }, { "uri":"mrs_01_0971.html", + "node_id":"mrs_01_0971.xml", "product_code":"mrs", - "code":"391", + "code":"452", "des":"This function is applicable to Hive and Spark2x.After this function is enabled, the user or user group that has the read and execute permissions on a directory can create", "doc_type":"usermanual", "kw":"Enabling the Function of Creating a Foreign Table in a Directory That Can Only Be Read,Using Hive,Co", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enabling the Function of Creating a Foreign Table in a Directory That Can Only Be Read", "githuburl":"" }, { "uri":"mrs_01_0972.html", + "node_id":"mrs_01_0972.xml", "product_code":"mrs", - "code":"392", + "code":"453", "des":"This function applies to Hive.The number of OS user groups is limited, and the number of roles that can be created in Hive cannot exceed 32. After this function is enable", "doc_type":"usermanual", "kw":"Authorizing Over 32 Roles in Hive,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Authorizing Over 32 Roles in Hive", "githuburl":"" }, { "uri":"mrs_01_0973.html", + "node_id":"mrs_01_0973.xml", "product_code":"mrs", - "code":"393", + "code":"454", "des":"This function applies to Hive.This function is used to limit the maximum number of maps for Hive tasks on the server to avoid performance deterioration caused by overload", "doc_type":"usermanual", "kw":"Restricting the Maximum Number of Maps for Hive Tasks,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Restricting the Maximum Number of Maps for Hive Tasks", "githuburl":"" }, { "uri":"mrs_01_0974.html", + "node_id":"mrs_01_0974.xml", "product_code":"mrs", - "code":"394", + "code":"455", "des":"This function applies to Hive.This function can be enabled to specify specific users to access HiveServer services on specific nodes, achieving HiveServer resource isolat", "doc_type":"usermanual", "kw":"HiveServer Lease Isolation,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HiveServer Lease Isolation", "githuburl":"" }, + { + "uri":"mrs_01_24467.html", + "node_id":"mrs_01_24467.xml", + "product_code":"", + "code":"456", + "des":"This function restricts components in a cluster to connect to specified Hive Metastore instances. By default, components can connect to all Metastore instances. This func", + "doc_type":"", + "kw":"Hive Supports Isolation of Metastore instances Based on Components,Using Hive,Component Operation Gu", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hive Supports Isolation of Metastore instances Based on Components", + "githuburl":"" + }, { "uri":"mrs_01_0975.html", + "node_id":"mrs_01_0975.xml", "product_code":"mrs", - "code":"395", + "code":"457", "des":"Hive supports transactions at the table and partition levels. When the transaction mode is enabled, transaction tables can be incrementally updated, deleted, and read, im", "doc_type":"usermanual", "kw":"Hive Supporting Transactions,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Supporting Transactions", "githuburl":"" }, { "uri":"mrs_01_1750.html", + "node_id":"mrs_01_1750.xml", "product_code":"mrs", - "code":"396", + "code":"458", "des":"Hive can use the Tez engine to process data computing tasks. Before executing a task, you can manually switch the execution engine to Tez.The TimelineServer role of the Y", "doc_type":"usermanual", "kw":"Switching the Hive Execution Engine to Tez,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Switching the Hive Execution Engine to Tez", "githuburl":"" }, { - "uri":"mrs_01_1751.html", + "uri":"mrs_01_17511.html", + "node_id":"mrs_01_17511.xml", "product_code":"mrs", - "code":"397", + "code":"459", "des":"RDS indicates the relational database in this section. This section describes how to connect Hive with the open-source MySQL and Postgres databases.After an external meta", "doc_type":"usermanual", "kw":"Connecting Hive with External RDS,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Connecting Hive with External RDS", "githuburl":"" }, { - "uri":"mrs_01_2302.html", + "uri":"mrs_01_1751.html", + "node_id":"mrs_01_1751.xml", "product_code":"mrs", - "code":"398", + "code":"460", + "des":"This section describes how to connect Hive with built-in relational databases open-source MySQL and Postgres.After an external metadata database is deployed in a cluster ", + "doc_type":"cmpntguide", + "kw":"Interconnecting Hive with External Self-Built Relational Databases,Using Hive,Component Operation Gu", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Interconnecting Hive with External Self-Built Relational Databases", + "githuburl":"" + }, + { + "uri":"mrs_01_2302.html", + "node_id":"mrs_01_2302.xml", + "product_code":"mrs", + "code":"461", "des":"The MetaStore service of Hive can cache the metadata of some tables in Redis.The Redis service has been installed in a cluster.If the cluster is installed in non-security", "doc_type":"usermanual", "kw":"Redis-based CacheStore of HiveMetaStore,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Redis-based CacheStore of HiveMetaStore", "githuburl":"" }, - { - "uri":"mrs_01_2311.html", - "product_code":"mrs", - "code":"399", - "des":"A Hive materialized view is a special table obtained based on the query results of Hive internal tables. A materialized view can be considered as an intermediate table th", - "doc_type":"usermanual", - "kw":"Hive Materialized View,Using Hive,Component Operation Guide (LTS)", - "title":"Hive Materialized View", - "githuburl":"" - }, { "uri":"mrs_01_24040.html", + "node_id":"mrs_01_24040.xml", "product_code":"mrs", - "code":"400", + "code":"462", "des":"A Hudi source table corresponds to a copy of HDFS data. The Hudi table data can be mapped to a Hive external table through the Spark component, Flink component, or Hudi c", "doc_type":"usermanual", "kw":"Hive Supporting Reading Hudi Tables,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Hive Supporting Reading Hudi Tables", "githuburl":"" }, { "uri":"mrs_01_24118.html", + "node_id":"mrs_01_24118.xml", "product_code":"mrs", - "code":"401", + "code":"463", "des":"The metadata that have not been used for a long time is moved to a backup table to reduce the pressure on metadata databases. This process is called partitioned data free", "doc_type":"usermanual", "kw":"Hive Supporting Cold and Hot Storage of Partitioned Metadata,Using Hive,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Supporting Cold and Hot Storage of Partitioned Metadata", "githuburl":"" }, { "uri":"mrs_01_24121.html", + "node_id":"mrs_01_24121.xml", "product_code":"mrs", - "code":"402", + "code":"464", "des":"Zstandard (ZSTD) is an open-source lossless data compression algorithm. Its compression performance and compression ratio are better than those of other compression algor", "doc_type":"usermanual", "kw":"Hive Supporting ZSTD Compression Formats,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Supporting ZSTD Compression Formats", "githuburl":"" }, + { + "uri":"mrs_01_24480.html", + "node_id":"mrs_01_24480.xml", + "product_code":"", + "code":"465", + "des":"Data files stored in Hive are abnormal due to misoperations or disk damage, thereby causing task execution failures or incorrect data results.Common non-text data files c", + "doc_type":"", + "kw":"Locating Abnormal Hive Files,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Locating Abnormal Hive Files", + "githuburl":"" + }, + { + "uri":"mrs_01_24507.html", + "node_id":"mrs_01_24507.xml", + "product_code":"", + "code":"466", + "des":"ZSTD_JNI is a native implementation of the ZSTD compression algorithm. Compared with ZSTD, ZSTD_JNI has higher compression read/write efficiency and compression ratio, an", + "doc_type":"", + "kw":"Using the ZSTD_JNI Compression Algorithm to Compress Hive ORC Tables,Using Hive,Component Operation ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Using the ZSTD_JNI Compression Algorithm to Compress Hive ORC Tables", + "githuburl":"" + }, + { + "uri":"mrs_01_24738.html", + "node_id":"mrs_01_24738.xml", + "product_code":"", + "code":"467", + "des":"The client connection of Hive MetaStore supports load balancing. That is, heavy load of a single MetaStore node during heavy service traffic can be avoided by connecting ", + "doc_type":"", + "kw":"Load Balancing for Hive MetaStore Client Connection,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Load Balancing for Hive MetaStore Client Connection", + "githuburl":"" + }, + { + "uri":"mrs_01_24744.html", + "node_id":"mrs_01_24744.xml", + "product_code":"", + "code":"468", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"Data Import and Export in Hive", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Data Import and Export in Hive", + "githuburl":"" + }, + { + "uri":"mrs_01_24741.html", + "node_id":"mrs_01_24741.xml", + "product_code":"", + "code":"469", + "des":"In big data application scenarios, data tables in Hive usually need to be migrated to another cluster. You can run the Hive import and export commands to migrate data in ", + "doc_type":"", + "kw":"Importing and Exporting Table/Partition Data in Hive,Data Import and Export in Hive,Component Operat", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Importing and Exporting Table/Partition Data in Hive", + "githuburl":"" + }, + { + "uri":"mrs_01_24742.html", + "node_id":"mrs_01_24742.xml", + "product_code":"", + "code":"470", + "des":"In big data application scenarios, Hive databases and all tables in these databases are usually migrated to another cluster. You can run the Hive database export and impo", + "doc_type":"", + "kw":"Importing and Exporting Hive Databases,Data Import and Export in Hive,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Importing and Exporting Hive Databases", + "githuburl":"" + }, { "uri":"mrs_01_0976.html", + "node_id":"mrs_01_0976.xml", "product_code":"mrs", - "code":"403", + "code":"471", "des":"Log path: The default save path of Hive logs is /var/log/Bigdata/hive/role name, the default save path of Hive1 logs is /var/log/Bigdata/hive1/role name, and the others f", "doc_type":"usermanual", "kw":"Hive Log Overview,Using Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Log Overview", "githuburl":"" }, { "uri":"mrs_01_0977.html", + "node_id":"mrs_01_0977.xml", "product_code":"mrs", - "code":"404", + "code":"472", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Hive Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_0978.html", + "node_id":"mrs_01_0978.xml", "product_code":"mrs", - "code":"405", + "code":"473", "des":"During the Select query, Hive generally scans the entire table, which is time-consuming. To improve query efficiency, create table partitions based on service requirement", "doc_type":"usermanual", "kw":"Creating Table Partitions,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating Table Partitions", "githuburl":"" }, { "uri":"mrs_01_0979.html", + "node_id":"mrs_01_0979.xml", "product_code":"mrs", - "code":"406", + "code":"474", "des":"When the Join statement is used, the command execution speed and query speed may be slow in case of large data volume. To resolve this problem, you can optimize Join.Join", "doc_type":"usermanual", "kw":"Optimizing Join,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Join", "githuburl":"" }, { "uri":"mrs_01_0980.html", + "node_id":"mrs_01_0980.xml", "product_code":"mrs", - "code":"407", + "code":"475", "des":"Optimize the Group by statement to accelerate the command execution and query speed.During the Group by operation, Map performs grouping and distributes the groups to Red", "doc_type":"usermanual", "kw":"Optimizing Group By,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Group By", "githuburl":"" }, { "uri":"mrs_01_0981.html", + "node_id":"mrs_01_0981.xml", "product_code":"mrs", - "code":"408", + "code":"476", "des":"ORC is an efficient column storage format and has higher compression ratio and reading efficiency than other file formats.You are advised to use ORC as the default Hive t", "doc_type":"usermanual", "kw":"Optimizing Data Storage,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Data Storage", "githuburl":"" }, { "uri":"mrs_01_0982.html", + "node_id":"mrs_01_0982.xml", "product_code":"mrs", - "code":"409", + "code":"477", "des":"When SQL statements are executed on Hive, if the (a&b) or (a&c) logic exists in the statements, you are advised to change the logic to a & (b or c).If condition a is p_pa", "doc_type":"usermanual", "kw":"Optimizing SQL Statements,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing SQL Statements", "githuburl":"" }, { "uri":"mrs_01_0983.html", + "node_id":"mrs_01_0983.xml", "product_code":"mrs", - "code":"410", + "code":"478", "des":"When joining multiple tables in Hive, Hive supports Cost-Based Optimization (CBO). The system automatically selects the optimal plan based on the table statistics, such a", "doc_type":"usermanual", "kw":"Optimizing the Query Function Using Hive CBO,Hive Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing the Query Function Using Hive CBO", "githuburl":"" }, { "uri":"mrs_01_1752.html", + "node_id":"mrs_01_1752.xml", "product_code":"mrs", - "code":"411", + "code":"479", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Hive", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Hive", "githuburl":"" }, { "uri":"mrs_01_1753.html", + "node_id":"mrs_01_1753.xml", "product_code":"mrs", - "code":"412", + "code":"480", "des":"How can I delete permanent user-defined functions (UDFs) on multiple HiveServers at the same time?Multiple HiveServers share one MetaStore database. Therefore, there is a", "doc_type":"usermanual", "kw":"How Do I Delete UDFs on Multiple HiveServers at the Same Time?,Common Issues About Hive,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Delete UDFs on Multiple HiveServers at the Same Time?", "githuburl":"" }, { "uri":"mrs_01_1754.html", + "node_id":"mrs_01_1754.xml", "product_code":"mrs", - "code":"413", + "code":"481", "des":"Why cannot the DROP operation be performed for a backed up Hive table?Snapshots have been created for an HDFS directory mapping to the backed up Hive table, so the HDFS d", "doc_type":"usermanual", "kw":"Why Cannot the DROP operation Be Performed on a Backed-up Hive Table?,Common Issues About Hive,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot the DROP operation Be Performed on a Backed-up Hive Table?", "githuburl":"" }, { "uri":"mrs_01_1755.html", + "node_id":"mrs_01_1755.xml", "product_code":"mrs", - "code":"414", + "code":"482", "des":"How to perform operations on local files (such as reading the content of a file) with Hive user-defined functions?By default, you can perform operations on local files wi", "doc_type":"usermanual", "kw":"How to Perform Operations on Local Files with Hive User-Defined Functions,Common Issues About Hive,C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Perform Operations on Local Files with Hive User-Defined Functions", "githuburl":"" }, { "uri":"mrs_01_1756.html", + "node_id":"mrs_01_1756.xml", "product_code":"mrs", - "code":"415", + "code":"483", "des":"How do I stop a MapReduce task manually if the task is suspended for a long time?", "doc_type":"usermanual", "kw":"How Do I Forcibly Stop MapReduce Jobs Executed by Hive?,Common Issues About Hive,Component Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Forcibly Stop MapReduce Jobs Executed by Hive?", "githuburl":"" }, { "uri":"mrs_01_1758.html", + "node_id":"mrs_01_1758.xml", "product_code":"mrs", - "code":"416", + "code":"484", "des":"How do I monitor the Hive table size?The HDFS refined monitoring function allows you to monitor the size of a specified table directory.The Hive and HDFS components are r", "doc_type":"usermanual", "kw":"How Do I Monitor the Hive Table Size?,Common Issues About Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Monitor the Hive Table Size?", "githuburl":"" }, { "uri":"mrs_01_1759.html", + "node_id":"mrs_01_1759.xml", "product_code":"mrs", - "code":"417", + "code":"485", "des":"How do I prevent key directories from data loss caused by misoperations of the insert overwrite statement?During monitoring of key Hive databases, tables, or directories,", "doc_type":"usermanual", "kw":"How Do I Prevent Key Directories from Data Loss Caused by Misoperations of the insert overwrite Stat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Prevent Key Directories from Data Loss Caused by Misoperations of the insert overwrite Statement?", "githuburl":"" }, { "uri":"mrs_01_1760.html", + "node_id":"mrs_01_1760.xml", "product_code":"mrs", - "code":"418", + "code":"486", "des":"This function applies to Hive.Perform the following operations to configure parameters. When Hive on Spark tasks are executed in the environment where the HBase is not in", "doc_type":"usermanual", "kw":"Why Is Hive on Spark Task Freezing When HBase Is Not Installed?,Common Issues About Hive,Component O", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is Hive on Spark Task Freezing When HBase Is Not Installed?", "githuburl":"" }, { "uri":"mrs_01_1761.html", + "node_id":"mrs_01_1761.xml", "product_code":"mrs", - "code":"419", + "code":"487", "des":"When a table with more than 32,000 partitions is created in Hive, an exception occurs during the query with the WHERE partition. In addition, the exception information pr", "doc_type":"usermanual", "kw":"Error Reported When the WHERE Condition Is Used to Query Tables with Excessive Partitions in FusionI", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Error Reported When the WHERE Condition Is Used to Query Tables with Excessive Partitions in FusionInsight Hive", "githuburl":"" }, { "uri":"mrs_01_1762.html", + "node_id":"mrs_01_1762.xml", "product_code":"mrs", - "code":"420", + "code":"488", "des":"When users check the JDK version used by the client, if the JDK version is IBM JDK, the Beeline client needs to be reconstructed. Otherwise, the client will fail to conne", "doc_type":"usermanual", "kw":"Why Cannot I Connect to HiveServer When I Use IBM JDK to Access the Beeline Client?,Common Issues Ab", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot I Connect to HiveServer When I Use IBM JDK to Access the Beeline Client?", "githuburl":"" }, { "uri":"mrs_01_1763.html", + "node_id":"mrs_01_1763.xml", "product_code":"mrs", - "code":"421", + "code":"489", "des":"Does a Hive Table Can Be Stored Either in OBS or HDFS?The location of a common Hive table stored on OBS can be set to an HDFS path.In the same Hive service, you can creat", "doc_type":"usermanual", "kw":"Description of Hive Table Location (Either Be an OBS or HDFS Path),Common Issues About Hive,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Description of Hive Table Location (Either Be an OBS or HDFS Path)", "githuburl":"" }, { "uri":"mrs_01_2309.html", + "node_id":"mrs_01_2309.xml", "product_code":"mrs", - "code":"422", + "code":"490", "des":"Hive uses the Tez engine to execute union-related statements to write data. After Hive is switched to the MapReduce engine for query, no data is found.When Hive uses the ", "doc_type":"usermanual", "kw":"Why Cannot Data Be Queried After the MapReduce Engine Is Switched After the Tez Engine Is Used to Ex", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot Data Be Queried After the MapReduce Engine Is Switched After the Tez Engine Is Used to Execute Union-related Statements?", "githuburl":"" }, { "uri":"mrs_01_2310.html", + "node_id":"mrs_01_2310.xml", "product_code":"mrs", - "code":"423", + "code":"491", "des":"Why Does Data Inconsistency Occur When Data Is Concurrently Written to a Hive Table Through an API?Hive does not support concurrent data insertion for the same table or p", "doc_type":"usermanual", "kw":"Why Does Hive Not Support Concurrent Data Writing to the Same Table or Partition?,Common Issues Abou", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does Hive Not Support Concurrent Data Writing to the Same Table or Partition?", "githuburl":"" }, { "uri":"mrs_01_2325.html", + "node_id":"mrs_01_2325.xml", "product_code":"mrs", - "code":"424", + "code":"492", "des":"When the vectorized parameterhive.vectorized.execution.enabled is set to true, why do some null pointers or type conversion exceptions occur occasionally when Hive on Tez", "doc_type":"usermanual", "kw":"Why Does Hive Not Support Vectorized Query?,Common Issues About Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does Hive Not Support Vectorized Query?", "githuburl":"" }, { "uri":"mrs_01_24117.html", + "node_id":"mrs_01_24117.xml", "product_code":"mrs", - "code":"425", + "code":"493", "des":"The error message \"java.lang.OutOfMemoryError: Java heap space.\" is displayed during Hive SQL execution.Solution:For MapReduce tasks, increase the values of the following", "doc_type":"usermanual", "kw":"Hive Configuration Problems,Common Issues About Hive,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Configuration Problems", "githuburl":"" }, { "uri":"mrs_01_24025.html", + "node_id":"mrs_01_24025.xml", "product_code":"mrs", - "code":"426", + "code":"494", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Hudi", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hudi", "githuburl":"" }, { "uri":"mrs_01_24033.html", + "node_id":"mrs_01_24033.xml", "product_code":"mrs", - "code":"427", + "code":"495", "des":"This section describes capabilities of Hudi using spark-shell. Using the Spark data source, this section describes how to insert and update a Hudi dataset of the default ", "doc_type":"usermanual", "kw":"Quick Start,Using Hudi,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Quick Start", "githuburl":"" }, { "uri":"mrs_01_24062.html", + "node_id":"mrs_01_24062.xml", "product_code":"mrs", - "code":"428", + "code":"496", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Basic Operations", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Basic Operations", "githuburl":"" }, { "uri":"mrs_01_24103.html", + "node_id":"mrs_01_24103.xml", "product_code":"mrs", - "code":"429", + "code":"497", "des":"When writing data, Hudi generates a Hudi table based on attributes such as the storage path, table name, and partition structure.Hudi table data files can be stored in th", "doc_type":"usermanual", "kw":"Hudi Table Schema,Basic Operations,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hudi Table Schema", "githuburl":"" }, { "uri":"mrs_01_24034.html", + "node_id":"mrs_01_24034.xml", "product_code":"mrs", - "code":"430", + "code":"498", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Write", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Write", "githuburl":"" }, { "uri":"mrs_01_24035.html", + "node_id":"mrs_01_24035.xml", "product_code":"mrs", - "code":"431", + "code":"499", "des":"Hudi provides multiple write modes. For details, see the configuration item hoodie.datasource.write.operation. This section describes upsert, insert, and bulk_insert.inse", "doc_type":"usermanual", "kw":"Batch Write,Write,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Batch Write", "githuburl":"" }, { "uri":"mrs_01_24036.html", + "node_id":"mrs_01_24036.xml", "product_code":"mrs", - "code":"432", + "code":"500", "des":"The HoodieDeltaStreamer tool provided by Hudi supports stream write. You can also use SparkStreaming to write data in microbatch mode. HoodieDeltaStreamer provides the fo", "doc_type":"usermanual", "kw":"Stream Write,Write,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Stream Write", "githuburl":"" }, { "uri":"mrs_01_24069.html", + "node_id":"mrs_01_24069.xml", "product_code":"mrs", - "code":"433", + "code":"501", "des":"The bootstrapping function provided by Hudi converts historical tables into Hudi tables without any change by generating Hoodie management files based on historical Parqu", "doc_type":"usermanual", "kw":"Bootstrapping,Write,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Bootstrapping", "githuburl":"" }, { "uri":"mrs_01_24064.html", + "node_id":"mrs_01_24064.xml", "product_code":"mrs", - "code":"434", + "code":"502", "des":"You can run run_hive_sync_tool.sh to synchronize data in the Hudi table to Hive.For example, run the following command to synchronize the Hudi table in the hdfs://haclust", "doc_type":"usermanual", "kw":"Synchronizing Hudi Table Data to Hive,Write,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Synchronizing Hudi Table Data to Hive", "githuburl":"" }, { "uri":"mrs_01_24037.html", + "node_id":"mrs_01_24037.xml", "product_code":"mrs", - "code":"435", + "code":"503", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Read", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Read", "githuburl":"" }, { "uri":"mrs_01_24098.html", + "node_id":"mrs_01_24098.xml", "product_code":"mrs", - "code":"436", + "code":"504", "des":"Reading the real-time view (using Hive and SparkSQL as an example): Directly read the Hudi table stored in Hive.select count(*) from test;Reading the real-time view (usin", "doc_type":"usermanual", "kw":"Reading COW Table Views,Read,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Reading COW Table Views", "githuburl":"" }, { "uri":"mrs_01_24099.html", + "node_id":"mrs_01_24099.xml", "product_code":"mrs", - "code":"437", + "code":"505", "des":"After the MOR table is synchronized to Hive, the following two tables are synchronized to Hive: Table name_rt and Table name_ro. The table suffixed with rt indicates the ", "doc_type":"usermanual", "kw":"Reading MOR Table Views,Read,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Reading MOR Table Views", "githuburl":"" }, { "uri":"mrs_01_24038.html", + "node_id":"mrs_01_24038.xml", "product_code":"mrs", - "code":"438", + "code":"506", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Data Management and Maintenance", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Data Management and Maintenance", "githuburl":"" }, { "uri":"mrs_01_24164.html", + "node_id":"mrs_01_24164.xml", "product_code":"mrs", - "code":"439", + "code":"507", "des":"IntroductionA metadata table is a special Hudi metadata table, which is hidden from users. The table stores metadata of a common Hudi table.The metadata table is included", "doc_type":"usermanual", "kw":"Metadata Table,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Metadata Table", "githuburl":"" }, { "uri":"mrs_01_24088.html", + "node_id":"mrs_01_24088.xml", "product_code":"mrs", - "code":"440", + "code":"508", "des":"Clustering reorganizes data layout to improve query performance without affecting the ingestion speed.Hudi provides different operations, such as insert, upsert, and bulk", "doc_type":"usermanual", "kw":"Clustering,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Clustering", "githuburl":"" }, { "uri":"mrs_01_24089.html", + "node_id":"mrs_01_24089.xml", "product_code":"mrs", - "code":"441", + "code":"509", "des":"Cleaning is used to delete data of versions that are no longer required.Hudi uses the cleaner working in the background to continuously delete unnecessary data of old ver", "doc_type":"usermanual", "kw":"Cleaning,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Cleaning", "githuburl":"" }, { "uri":"mrs_01_24090.html", + "node_id":"mrs_01_24090.xml", "product_code":"mrs", - "code":"442", + "code":"510", "des":"A compaction merges base and log files of MOR tables.For MOR tables, data is stored in columnar Parquet files and row-based Avro files, updates are recorded in incrementa", "doc_type":"usermanual", "kw":"Compaction,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Compaction", "githuburl":"" }, { "uri":"mrs_01_24091.html", + "node_id":"mrs_01_24091.xml", "product_code":"mrs", - "code":"443", + "code":"511", "des":"Savepoints are used to save and restore data of the customized version.Savepoints provided by Hudi can save different commits so that the cleaner program does not delete ", "doc_type":"usermanual", "kw":"Savepoint,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Savepoint", "githuburl":"" }, { "uri":"mrs_01_24165.html", + "node_id":"mrs_01_24165.xml", "product_code":"mrs", - "code":"444", + "code":"512", "des":"Uses an external service (ZooKeeper or Hive MetaStore) as the distributed mutex lock service.Files can be concurrently written, but commits cannot be concurrent. The comm", "doc_type":"usermanual", "kw":"Single-Table Concurrent Write,Data Management and Maintenance,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Single-Table Concurrent Write", "githuburl":"" }, { "uri":"mrs_01_24100.html", + "node_id":"mrs_01_24100.xml", "product_code":"mrs", - "code":"445", + "code":"513", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using the Hudi Client", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Hudi Client", "githuburl":"" }, { "uri":"mrs_01_24063.html", + "node_id":"mrs_01_24063.xml", "product_code":"mrs", - "code":"446", + "code":"514", "des":"You have created a user and added the user to user groups hadoop and hive on Manager.The Hudi client has been downloaded and installed.Log in to the client node as user r", "doc_type":"usermanual", "kw":"Operating a Hudi Table Using hudi-cli.sh,Using the Hudi Client,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Operating a Hudi Table Using hudi-cli.sh", "githuburl":"" }, { "uri":"mrs_01_24032.html", + "node_id":"mrs_01_24032.xml", "product_code":"mrs", - "code":"447", + "code":"515", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Configuration Reference", - "title":"Configuration Reference", + "kw":"Hudi Configuration Reference", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Hudi Configuration Reference", "githuburl":"" }, { "uri":"mrs_01_24093.html", + "node_id":"mrs_01_24093.xml", "product_code":"mrs", - "code":"448", + "code":"516", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Write Configuration,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Write Configuration,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Write Configuration", "githuburl":"" }, { "uri":"mrs_01_24094.html", + "node_id":"mrs_01_24094.xml", "product_code":"mrs", - "code":"449", + "code":"517", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Configuration of Hive Table Synchronization,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Configuration of Hive Table Synchronization,Hudi Configuration Reference,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuration of Hive Table Synchronization", "githuburl":"" }, { "uri":"mrs_01_24095.html", + "node_id":"mrs_01_24095.xml", "product_code":"mrs", - "code":"450", + "code":"518", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Index Configuration,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Index Configuration,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Index Configuration", "githuburl":"" }, { "uri":"mrs_01_24096.html", + "node_id":"mrs_01_24096.xml", "product_code":"mrs", - "code":"451", + "code":"519", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Storage Configuration,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Storage Configuration,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Storage Configuration", "githuburl":"" }, { "uri":"mrs_01_24097.html", + "node_id":"mrs_01_24097.xml", "product_code":"mrs", - "code":"452", + "code":"520", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Compaction and Cleaning Configurations,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Compaction and Cleaning Configurations,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Compaction and Cleaning Configurations", "githuburl":"" }, { "uri":"mrs_01_24166.html", + "node_id":"mrs_01_24166.xml", "product_code":"mrs", - "code":"453", + "code":"521", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Metadata Table Configuration,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Metadata Table Configuration,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Metadata Table Configuration", "githuburl":"" }, { "uri":"mrs_01_24167.html", + "node_id":"mrs_01_24167.xml", "product_code":"mrs", - "code":"454", + "code":"522", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", - "kw":"Single-Table Concurrent Write Configuration,Configuration Reference,Component Operation Guide (LTS)", + "kw":"Single-Table Concurrent Write Configuration,Hudi Configuration Reference,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Single-Table Concurrent Write Configuration", "githuburl":"" }, + { + "uri":"mrs_01_24804.html", + "node_id":"mrs_01_24804.xml", + "product_code":"", + "code":"523", + "des":"This section applies only to MRS 3.2.0 or later.Clustering has two strategies: hoodie.clustering.plan.strategy.class and hoodie.clustering.execution.strategy.class. Typic", + "doc_type":"", + "kw":"Clustering Configuration,Hudi Configuration Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Clustering Configuration", + "githuburl":"" + }, { "uri":"mrs_01_24039.html", + "node_id":"mrs_01_24039.xml", "product_code":"mrs", - "code":"455", + "code":"524", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Hudi Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hudi Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_24101.html", + "node_id":"mrs_01_24101.xml", "product_code":"mrs", - "code":"456", + "code":"525", "des":"In the current version, Spark is recommended for Hudi write operations. Therefore, the tuning methods of Hudi are similar to those of Spark. For details, see Spark2x Perf", "doc_type":"usermanual", "kw":"Performance Tuning Methods,Hudi Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Performance Tuning Methods", "githuburl":"" }, { "uri":"mrs_01_24102.html", + "node_id":"mrs_01_24102.xml", "product_code":"mrs", - "code":"457", + "code":"526", "des":"For MOR tables:The essence of MOR tables is to write incremental files, so the tuning is based on the data size (dataSize) of Hudi.If dataSize is only several GBs, you ar", "doc_type":"usermanual", "kw":"Recommended Resource Configuration,Hudi Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Recommended Resource Configuration", "githuburl":"" }, { "uri":"mrs_01_24261.html", + "node_id":"mrs_01_24261.xml", "product_code":"", - "code":"458", + "code":"527", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"", "kw":"Hudi SQL Syntax Reference", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Hudi SQL Syntax Reference", "githuburl":"" }, { "uri":"mrs_01_24262.html", + "node_id":"mrs_01_24262.xml", "product_code":"", - "code":"459", + "code":"528", "des":"Hudi 0.9.0 adds Spark SQL DDL and DML statements for using Hudi, making it easier for all users (including non-engineers or analysts) to access and operate Hudi.You can u", "doc_type":"", "kw":"Constraints,Hudi SQL Syntax Reference,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Constraints", "githuburl":"" }, { "uri":"mrs_01_24263.html", + "node_id":"mrs_01_24263.xml", "product_code":"", - "code":"460", + "code":"529", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"", - "kw":"DDL", - "title":"DDL", + "kw":"Hudi DDL", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hudi DDL", "githuburl":"" }, { "uri":"mrs_01_24264.html", + "node_id":"mrs_01_24264.xml", "product_code":"", - "code":"461", + "code":"530", "des":"This command is used to create a Hudi table by specifying the list of fields along with the table options.CREATE TABLE [ IF NOT EXISTS] [database_name.]table_name[ (colu", "doc_type":"", - "kw":"CREATE TABLE,DDL,Component Operation Guide (LTS)", - "title":"CREATE TABLE", + "kw":"CREATE Hudi TABLE,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CREATE Hudi TABLE", "githuburl":"" }, { "uri":"mrs_01_24265.html", + "node_id":"mrs_01_24265.xml", "product_code":"", - "code":"462", + "code":"531", "des":"This command is used to create a Hudi table by specifying the list of fields along with the table options.CREATE TABLE [ IF NOT EXISTS] [database_name.]table_nameUSING h", "doc_type":"", - "kw":"CREATE TABLE AS SELECT,DDL,Component Operation Guide (LTS)", - "title":"CREATE TABLE AS SELECT", + "kw":"CREATE Hudi TABLE AS SELECT,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CREATE Hudi TABLE AS SELECT", "githuburl":"" }, { "uri":"mrs_01_24266.html", + "node_id":"mrs_01_24266.xml", "product_code":"", - "code":"463", + "code":"532", "des":"This command is used to delete an existing table.DROP TABLE [IF EXISTS] [db_name.]table_name;In this command, IF EXISTS and db_name are optional.DROP TABLE IF EXISTS hudi", "doc_type":"", - "kw":"DROP TABLE,DDL,Component Operation Guide (LTS)", - "title":"DROP TABLE", + "kw":"DROP Hudi TABLE,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"DROP Hudi TABLE", "githuburl":"" }, { "uri":"mrs_01_24267.html", + "node_id":"mrs_01_24267.xml", "product_code":"", - "code":"464", + "code":"533", "des":"This command is used to display all tables in current database or all tables in a specific database.SHOW TABLES [IN db_name];IN db_Name is optional. It is required only w", "doc_type":"", - "kw":"SHOW TABLE,DDL,Component Operation Guide (LTS)", + "kw":"SHOW TABLE,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"SHOW TABLE", "githuburl":"" }, { "uri":"mrs_01_24268.html", + "node_id":"mrs_01_24268.xml", "product_code":"", - "code":"465", + "code":"534", "des":"This command is used to rename an existing table.ALTERTABLEoldTableName RENAMETO newTableNameThe table name is changed. You can run the SHOW TABLES command to display the", "doc_type":"", - "kw":"ALTER RENAME TABLE,DDL,Component Operation Guide (LTS)", + "kw":"ALTER RENAME TABLE,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"ALTER RENAME TABLE", "githuburl":"" }, { "uri":"mrs_01_24269.html", + "node_id":"mrs_01_24269.xml", "product_code":"", - "code":"466", + "code":"535", "des":"This command is used to add columns to an existing table.ALTER TABLEtableIdentifierADD COLUMNS(colAndType (,colAndType)*)The columns are added to the table. You can run t", "doc_type":"", - "kw":"ALTER ADD COLUMNS,DDL,Component Operation Guide (LTS)", + "kw":"ALTER ADD COLUMNS,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"ALTER ADD COLUMNS", "githuburl":"" }, { "uri":"mrs_01_24271.html", + "node_id":"mrs_01_24271.xml", "product_code":"", - "code":"467", + "code":"536", "des":"This command is used to clear all data in a specific table.TRUNCATE TABLEtableIdentifierData in the table is cleared. You can run the QUERY statement to check whether dat", "doc_type":"", - "kw":"TRUNCATE TABLE,DDL,Component Operation Guide (LTS)", - "title":"TRUNCATE TABLE", + "kw":"TRUNCATE Hudi TABLE,Hudi DDL,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"TRUNCATE Hudi TABLE", "githuburl":"" }, { "uri":"mrs_01_24272.html", + "node_id":"mrs_01_24272.xml", "product_code":"", - "code":"468", + "code":"537", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"", - "kw":"DML", - "title":"DML", + "kw":"Hudi DML", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hudi DML", "githuburl":"" }, { "uri":"mrs_01_24273.html", + "node_id":"mrs_01_24273.xml", "product_code":"", - "code":"469", + "code":"538", "des":"This command is used to insert the output of the SELECT statement to a Hudi table.INSERT INTOtableIndentifier select query;Insert mode: Hudi supports three insert modes f", "doc_type":"", - "kw":"INSERT INTO,DML,Component Operation Guide (LTS)", + "kw":"INSERT INTO,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"INSERT INTO", "githuburl":"" }, { "uri":"mrs_01_24274.html", + "node_id":"mrs_01_24274.xml", "product_code":"", - "code":"470", + "code":"539", "des":"This command is used to query another table based on the join condition of a table or subquery. If UPDATE or DELETE is executed for the table matching the join condition,", "doc_type":"", - "kw":"MERGE INTO,DML,Component Operation Guide (LTS)", + "kw":"MERGE INTO,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"MERGE INTO", "githuburl":"" }, { "uri":"mrs_01_24275.html", + "node_id":"mrs_01_24275.xml", "product_code":"", - "code":"471", + "code":"540", "des":"This command is used to update the Hudi table based on the column expression and optional filtering conditions.UPDATE tableIdentifier SET column = EXPRESSION(,column = EX", "doc_type":"", - "kw":"UPDATE,DML,Component Operation Guide (LTS)", - "title":"UPDATE", + "kw":"UPDATE Hudi Data,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"UPDATE Hudi Data", "githuburl":"" }, { "uri":"mrs_01_24276.html", + "node_id":"mrs_01_24276.xml", "product_code":"", - "code":"472", + "code":"541", "des":"This command is used to delete records from a Hudi table.DELETE from tableIdentifier [ WHERE boolExpression]Example 1:delete from h0 where column1 = 'country';Example 2:d", "doc_type":"", - "kw":"DELETE,DML,Component Operation Guide (LTS)", - "title":"DELETE", + "kw":"DELETE Hudi Data,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"DELETE Hudi Data", "githuburl":"" }, { "uri":"mrs_01_24277.html", + "node_id":"mrs_01_24277.xml", "product_code":"", - "code":"473", + "code":"542", "des":"This command is used to convert row-based log files in MOR tables into column-based data files in parquet tables to accelerate record search.SCHEDULE COMPACTION on tableI", "doc_type":"", - "kw":"COMPACTION,DML,Component Operation Guide (LTS)", - "title":"COMPACTION", + "kw":"COMPACTION Hudi Data,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"COMPACTION Hudi Data", "githuburl":"" }, { "uri":"mrs_01_24278.html", + "node_id":"mrs_01_24278.xml", "product_code":"", - "code":"474", + "code":"543", "des":"This command is used to dynamically add, update, display, or reset Hudi parameters without restarting the driver.Add or update a parameter value:SET parameter_name=parame", "doc_type":"", - "kw":"SET/RESET,DML,Component Operation Guide (LTS)", - "title":"SET/RESET", + "kw":"SET/RESET Hudi Data,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SET/RESET Hudi Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24783.html", + "node_id":"mrs_01_24783.xml", + "product_code":"", + "code":"544", + "des":"This section applies only to MRS 3.2.0 or later.Archives instants on the Timeline based on configurations and deletes archived instants from the Timeline to reduce the op", + "doc_type":"", + "kw":"ARCHIVELOG,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ARCHIVELOG", + "githuburl":"" + }, + { + "uri":"mrs_01_24801.html", + "node_id":"mrs_01_24801.xml", + "product_code":"", + "code":"545", + "des":"This section applies only to MRS 3.2.0 or later.Cleans instants on the Timeline based on configurations and deletes historical version files to reduce the data storage an", + "doc_type":"", + "kw":"CLEAN,Hudi DML,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CLEAN", + "githuburl":"" + }, + { + "uri":"mrs_01_24739.html", + "node_id":"mrs_01_24739.xml", + "product_code":"", + "code":"546", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"CALL COMMAND", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CALL COMMAND", + "githuburl":"" + }, + { + "uri":"mrs_01_24740.html", + "node_id":"mrs_01_24740.xml", + "product_code":"", + "code":"547", + "des":"The CHANGE_TABLE command can be used to modify the type and index of a table. Key parameters such as the type and index of Hudi tables cannot be modified. Therefore, this", + "doc_type":"", + "kw":"CHANGE_TABLE,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CHANGE_TABLE", + "githuburl":"" + }, + { + "uri":"mrs_01_24781.html", + "node_id":"mrs_01_24781.xml", + "product_code":"", + "code":"548", + "des":"Cleans invalid data files from the Hudi table directory.call clean_file(table => '[table_name]', mode=>'[op_type]', backup_path=>'[backup_path]', start_instant_time=>'[st", + "doc_type":"", + "kw":"CLEAN_FILE,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"CLEAN_FILE", + "githuburl":"" + }, + { + "uri":"mrs_01_24782.html", + "node_id":"mrs_01_24782.xml", + "product_code":"", + "code":"549", + "des":"Displays the effective or archived Hudi timelines and details of a specified instant time.Viewing the list of effective timelines of a table:call show_active_instant_list", + "doc_type":"", + "kw":"SHOW_TIME_LINE,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SHOW_TIME_LINE", + "githuburl":"" + }, + { + "uri":"mrs_01_24799.html", + "node_id":"mrs_01_24799.xml", + "product_code":"", + "code":"550", + "des":"Displays the configuration in the hoodie.properties file of a specified Hudi table.call show_hoodie_properties(table => '[table_name]');You can view query results on the ", + "doc_type":"", + "kw":"SHOW_HOODIE_PROPERTIES,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SHOW_HOODIE_PROPERTIES", + "githuburl":"" + }, + { + "uri":"mrs_01_24800.html", + "node_id":"mrs_01_24800.xml", + "product_code":"", + "code":"551", + "des":"Manages savepoints of Hudi tables.Creating a savepoint:call create_savepoints('[table_name]', '[commit_Time]', '[user]', '[comments]');call create_savepoints('[table_name", + "doc_type":"", + "kw":"SAVE_POINT,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SAVE_POINT", + "githuburl":"" + }, + { + "uri":"mrs_01_24803.html", + "node_id":"mrs_01_24803.xml", + "product_code":"", + "code":"552", + "des":"Rolls back a specified commit.call rollback_to_instant(table => '[table_name]', instant_time => '[instant]');Only the latest commit timestamps can be rolled back in seque", + "doc_type":"", + "kw":"ROLL_BACK,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"ROLL_BACK", + "githuburl":"" + }, + { + "uri":"mrs_01_24802.html", + "node_id":"mrs_01_24802.xml", + "product_code":"", + "code":"553", + "des":"Performs the clustering operation on Hudi tables. For details, see Clustering.Creating a savepoint:call run_clustering(table=>'[table]', path=>'[path]', predicate=>'[pred", + "doc_type":"", + "kw":"Hudi CLUSTERING,CALL COMMAND,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hudi CLUSTERING", + "githuburl":"" + }, + { + "uri":"mrs_01_24492.html", + "node_id":"mrs_01_24492.xml", + "product_code":"", + "code":"554", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"Hudi Schema Evolution", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Hudi Schema Evolution", + "githuburl":"" + }, + { + "uri":"mrs_01_24493.html", + "node_id":"mrs_01_24493.xml", + "product_code":"", + "code":"555", + "des":"Schema evolution allows users to easily change the current schema of a Hudi table to adapt to the data that is changing over time.This section applies only to MRS 3.1.3 o", + "doc_type":"", + "kw":"Evolution Introduction,Hudi Schema Evolution,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Evolution Introduction", + "githuburl":"" + }, + { + "uri":"mrs_01_24494.html", + "node_id":"mrs_01_24494.xml", + "product_code":"", + "code":"556", + "des":"Schema evolution scenariosColumns (including nested columns) can be added, deleted, modified, and moved.Partition columns cannot be evolved.You cannot add, delete, or per", + "doc_type":"", + "kw":"Schema Evolution Scenarios,Hudi Schema Evolution,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Schema Evolution Scenarios", + "githuburl":"" + }, + { + "uri":"mrs_01_24495.html", + "node_id":"mrs_01_24495.xml", + "product_code":"", + "code":"557", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"SparkSQL Schema Evolution and Syntax Description", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"SparkSQL Schema Evolution and Syntax Description", + "githuburl":"" + }, + { + "uri":"mrs_01_24496.html", + "node_id":"mrs_01_24496.xml", + "product_code":"", + "code":"558", + "des":"Schema evolution cannot be disabled once being enabled.To use spark-beeline, log in to FusionInsight Manager, choose Cluster > Services > Spark2x, and click the Configura", + "doc_type":"", + "kw":"Enabling Schema Evolution,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Enabling Schema Evolution", + "githuburl":"" + }, + { + "uri":"mrs_01_24498.html", + "node_id":"mrs_01_24498.xml", + "product_code":"", + "code":"559", + "des":"The ADD COLUMNS command is used to add a column to an existing table.ALTER TABLETable nameADD COLUMNS(col_spec[, col_spec ...])You can run the DESCRIBE command to view th", + "doc_type":"", + "kw":"Adding a Column,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Adding a Column", + "githuburl":"" + }, + { + "uri":"mrs_01_24499.html", + "node_id":"mrs_01_24499.xml", + "product_code":"", + "code":"560", + "des":"The ALTER TABLE ... ALTER COLUMN command is used to change the attributes of a column, such as the column type, position, and comment.ALTER TABLETable name ALTER[COLUMN]c", + "doc_type":"", + "kw":"Altering a Column,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Altering a Column", + "githuburl":"" + }, + { + "uri":"mrs_01_24500.html", + "node_id":"mrs_01_24500.xml", + "product_code":"", + "code":"561", + "des":"The ALTER TABLE ... DROP COLUMN command is used to delete a column.ALTER TABLEtableName DROP COLUMN|COLUMNScolsa.b.c indicates the full path of a nested column. For detai", + "doc_type":"", + "kw":"Deleting a Column,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Deleting a Column", + "githuburl":"" + }, + { + "uri":"mrs_01_24501.html", + "node_id":"mrs_01_24501.xml", + "product_code":"", + "code":"562", + "des":"The ALTER TABLE ... RENAME command is used to change the table name.ALTER TABLEtableNameRENAME TOnewTableNameYou can run the SHOW TABLES command to view the new table nam", + "doc_type":"", + "kw":"Changing a Table Name,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Changing a Table Name", + "githuburl":"" + }, + { + "uri":"mrs_01_24502.html", + "node_id":"mrs_01_24502.xml", + "product_code":"", + "code":"563", + "des":"The ALTER TABLE ... SET|UNSET command is used to modify table properties.ALTER TABLETable nameSET|UNSET tblpropertiesYou can run the DESCRIBE command to view new table pr", + "doc_type":"", + "kw":"Modifying Table Properties,SparkSQL Schema Evolution and Syntax Description,Component Operation Guid", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Modifying Table Properties", + "githuburl":"" + }, + { + "uri":"mrs_01_24503.html", + "node_id":"mrs_01_24503.xml", + "product_code":"", + "code":"564", + "des":"The ALTER TABLE ... RENAME COLUMN command is used to change the column name.ALTER TABLEtableNameRENAME COLUMNold_columnNameTOnew_columnNamea.b.c indicates the full path o", + "doc_type":"", + "kw":"Changing the Column Name,SparkSQL Schema Evolution and Syntax Description,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Changing the Column Name", + "githuburl":"" + }, + { + "uri":"mrs_01_24550.html", + "node_id":"mrs_01_24550.xml", + "product_code":"", + "code":"565", + "des":"When creating a table, you need to set hoodie.cleaner.policy.failed.writes to LAZY. Otherwise, rollback will be triggered when concurrent submission operations are perfor", + "doc_type":"", + "kw":"Concurrency for Schema Evolution,Hudi Schema Evolution,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Concurrency for Schema Evolution", "githuburl":"" }, { "uri":"mrs_01_24065.html", + "node_id":"mrs_01_24065.xml", "product_code":"mrs", - "code":"475", + "code":"566", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Hudi", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Hudi", "githuburl":"" }, { "uri":"mrs_01_24070.html", + "node_id":"mrs_01_24070.xml", "product_code":"mrs", - "code":"476", + "code":"567", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Data Write", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Data Write", "githuburl":"" }, { "uri":"mrs_01_24071.html", + "node_id":"mrs_01_24071.xml", "product_code":"mrs", - "code":"477", + "code":"568", "des":"The following error is reported when data is written:You are advised to evolve schemas in backward compatible mode while using Hudi. This error usually occurs when you de", "doc_type":"usermanual", "kw":"Parquet/Avro schema Is Reported When Updated Data Is Written,Data Write,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Parquet/Avro schema Is Reported When Updated Data Is Written", "githuburl":"" }, { "uri":"mrs_01_24072.html", + "node_id":"mrs_01_24072.xml", "product_code":"mrs", - "code":"478", + "code":"569", "des":"The following error is reported when data is written:This error will occur again because schema evolutions are in non-backwards compatible mode. Basically, there is some ", "doc_type":"usermanual", "kw":"UnsupportedOperationException Is Reported When Updated Data Is Written,Data Write,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"UnsupportedOperationException Is Reported When Updated Data Is Written", "githuburl":"" }, { "uri":"mrs_01_24073.html", + "node_id":"mrs_01_24073.xml", "product_code":"mrs", - "code":"479", + "code":"570", "des":"The following error is reported when data is written:This error may occur if a schema contains some non-nullable field whose value is not present or is null.You are advis", "doc_type":"usermanual", "kw":"SchemaCompatabilityException Is Reported When Updated Data Is Written,Data Write,Component Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SchemaCompatabilityException Is Reported When Updated Data Is Written", "githuburl":"" }, { "uri":"mrs_01_24074.html", + "node_id":"mrs_01_24074.xml", "product_code":"mrs", - "code":"480", + "code":"571", "des":"Hudi consumes much space in a temporary folder during upsert.Hudi will spill part of input data to disk if the maximum memory for merge is reached when much input data is", "doc_type":"usermanual", "kw":"What Should I Do If Hudi Consumes Much Space in a Temporary Folder During Upsert?,Data Write,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Should I Do If Hudi Consumes Much Space in a Temporary Folder During Upsert?", "githuburl":"" }, { "uri":"mrs_01_24075.html", + "node_id":"mrs_01_24075.xml", "product_code":"mrs", - "code":"481", + "code":"572", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Data Collection", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Data Collection", "githuburl":"" }, { "uri":"mrs_01_24077.html", + "node_id":"mrs_01_24077.xml", "product_code":"mrs", - "code":"482", + "code":"573", "des":"The error \"org.apache.kafka.common.KafkaException: Failed to construct kafka consumer\" is reported in the main thread, and the following error is reported.This error may ", "doc_type":"usermanual", "kw":"IllegalArgumentException Is Reported When Kafka Is Used to Collect Data,Data Collection,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"IllegalArgumentException Is Reported When Kafka Is Used to Collect Data", "githuburl":"" }, { "uri":"mrs_01_24078.html", + "node_id":"mrs_01_24078.xml", "product_code":"mrs", - "code":"483", + "code":"574", "des":"The following error is reported when data is collected:This error usually occurs when a field marked as recordKey or partitionKey is not present in the input record. Cros", "doc_type":"usermanual", "kw":"HoodieException Is Reported When Data Is Collected,Data Collection,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HoodieException Is Reported When Data Is Collected", "githuburl":"" }, { "uri":"mrs_01_24079.html", + "node_id":"mrs_01_24079.xml", "product_code":"mrs", - "code":"484", + "code":"575", "des":"Is it possible to use a nullable field that contains null records as a primary key when creating a Hudi table?No. HoodieKeyException will be thrown.", "doc_type":"usermanual", "kw":"HoodieKeyException Is Reported When Data Is Collected,Data Collection,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HoodieKeyException Is Reported When Data Is Collected", "githuburl":"" }, { "uri":"mrs_01_24080.html", + "node_id":"mrs_01_24080.xml", "product_code":"mrs", - "code":"485", + "code":"576", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Hive Synchronization", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Synchronization", "githuburl":"" }, { "uri":"mrs_01_24081.html", + "node_id":"mrs_01_24081.xml", "product_code":"mrs", - "code":"486", + "code":"577", "des":"The following error is reported during Hive data synchronization:This error usually occurs when you try to add a new column to an existing Hive table using the HiveSyncTo", "doc_type":"usermanual", "kw":"SQLException Is Reported During Hive Data Synchronization,Hive Synchronization,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SQLException Is Reported During Hive Data Synchronization", "githuburl":"" }, { "uri":"mrs_01_24082.html", + "node_id":"mrs_01_24082.xml", "product_code":"mrs", - "code":"487", + "code":"578", "des":"The following error is reported during Hive data synchronization:This error occurs because HiveSyncTool currently supports only few compatible data type conversions. The ", "doc_type":"usermanual", "kw":"HoodieHiveSyncException Is Reported During Hive Data Synchronization,Hive Synchronization,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HoodieHiveSyncException Is Reported During Hive Data Synchronization", "githuburl":"" }, { "uri":"mrs_01_24083.html", + "node_id":"mrs_01_24083.xml", "product_code":"mrs", - "code":"488", + "code":"579", "des":"The following error is reported during Hive data synchronization:This error usually occurs when Hive synchronization is performed on the Hudi dataset but the configured h", "doc_type":"usermanual", "kw":"SemanticException Is Reported During Hive Data Synchronization,Hive Synchronization,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SemanticException Is Reported During Hive Data Synchronization", "githuburl":"" }, { "uri":"mrs_01_0130.html", + "node_id":"mrs_01_0130.xml", "product_code":"mrs", - "code":"489", + "code":"580", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Hue", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hue", "githuburl":"" }, { "uri":"mrs_01_0131.html", + "node_id":"mrs_01_0131.xml", "product_code":"mrs", - "code":"490", + "code":"581", "des":"Hue aggregates interfaces which interact with most Apache Hadoop components and enables you to use Hadoop components with ease on a web UI. You can operate components suc", "doc_type":"usermanual", "kw":"Using Hue from Scratch,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hue from Scratch", "githuburl":"" }, { "uri":"mrs_01_0132.html", + "node_id":"mrs_01_0132.xml", "product_code":"mrs", - "code":"491", + "code":"582", "des":"After Hue is installed in an MRS cluster, users can use Hadoop-related components on the Hue web UI.This section describes how to open the Hue web UI on the MRS cluster.T", "doc_type":"usermanual", "kw":"Accessing the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Accessing the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_0133.html", + "node_id":"mrs_01_0133.xml", "product_code":"mrs", - "code":"492", + "code":"583", "des":"Go to the All Configurations page of the Hue service by referring to Modifying Cluster Service Configuration Parameters.For details about Hue common parameters, see Table", "doc_type":"usermanual", "kw":"Hue Common Parameters,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hue Common Parameters", "githuburl":"" }, { "uri":"mrs_01_0134.html", + "node_id":"mrs_01_0134.xml", "product_code":"mrs", - "code":"493", + "code":"584", "des":"Users can use the Hue web UI to execute HiveQL statements in an MRS cluster.Hive supports the following functions:Executes and manages HiveQL statements.Views the HiveQL ", "doc_type":"usermanual", "kw":"Using HiveQL Editor on the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HiveQL Editor on the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_0135.html", + "node_id":"mrs_01_0135.xml", "product_code":"mrs", - "code":"494", + "code":"585", "des":"Users can use the Hue web UI to manage Hive metadata in an MRS cluster.Access the Hue web UI. For details, see Accessing the Hue Web UI.Viewing metadata of Hive tablesCli", "doc_type":"usermanual", "kw":"Using the Metadata Browser on the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Metadata Browser on the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_0136.html", + "node_id":"mrs_01_0136.xml", "product_code":"mrs", - "code":"495", + "code":"586", "des":"Users can use the Hue web UI to manage files in HDFS.The Hue page is used to view and analyze data such as files and tables. Do not perform high-risk management operation", "doc_type":"usermanual", "kw":"Using File Browser on the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using File Browser on the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_0137.html", + "node_id":"mrs_01_0137.xml", "product_code":"mrs", - "code":"496", + "code":"587", "des":"Users can use the Hue web UI to query all jobs in an MRS cluster.View the jobs in the current cluster.The number on Job Browser indicates the total number of jobs in the ", "doc_type":"usermanual", "kw":"Using Job Browser on the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Job Browser on the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_2371.html", + "node_id":"mrs_01_2371.xml", "product_code":"mrs", - "code":"497", + "code":"588", "des":"You can use Hue to create or query HBase tables in a cluster and run tasks on the Hue web UI.", "doc_type":"usermanual", "kw":"Using HBase on the Hue Web UI,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using HBase on the Hue Web UI", "githuburl":"" }, { "uri":"mrs_01_0138.html", + "node_id":"mrs_01_0138.xml", "product_code":"mrs", - "code":"498", + "code":"589", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Typical Scenarios", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenarios", "githuburl":"" }, { "uri":"mrs_01_0139.html", + "node_id":"mrs_01_0139.xml", "product_code":"mrs", - "code":"499", + "code":"590", "des":"Hue provides the file browser function for users to use HDFS in GUI mode.The Hue page is used to view and analyze data such as files and tables. Do not perform high-risk ", "doc_type":"usermanual", "kw":"HDFS on Hue,Typical Scenarios,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HDFS on Hue", "githuburl":"" }, { "uri":"mrs_01_0141.html", + "node_id":"mrs_01_0141.xml", "product_code":"mrs", - "code":"500", + "code":"591", "des":"Hue provides the Hive GUI management function so that users can query Hive data in GUI mode.Access the Hue web UI. For details, see Accessing the Hue Web UI.In the naviga", "doc_type":"usermanual", "kw":"Hive on Hue,Typical Scenarios,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive on Hue", "githuburl":"" }, { "uri":"mrs_01_0144.html", + "node_id":"mrs_01_0144.xml", "product_code":"mrs", - "code":"501", + "code":"592", "des":"Hue provides the Oozie job manager function, in this case, you can use Oozie in GUI mode.The Hue page is used to view and analyze data such as files and tables. Do not pe", "doc_type":"usermanual", "kw":"Oozie on Hue,Typical Scenarios,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Oozie on Hue", "githuburl":"" }, { "uri":"mrs_01_0147.html", + "node_id":"mrs_01_0147.xml", "product_code":"mrs", - "code":"502", + "code":"593", "des":"Log paths: The default paths of Hue logs are /var/log/Bigdata/hue (for storing run logs) and /var/log/Bigdata/audit/hue (for storing audit logs).Log archive rules: The au", "doc_type":"usermanual", "kw":"Hue Log Overview,Using Hue,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hue Log Overview", "githuburl":"" }, { "uri":"mrs_01_1764.html", + "node_id":"mrs_01_1764.xml", "product_code":"mrs", - "code":"503", + "code":"594", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Hue", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Hue", "githuburl":"" }, { "uri":"mrs_01_1765.html", + "node_id":"mrs_01_1765.xml", "product_code":"mrs", - "code":"504", + "code":"595", "des":"What do I do if all HQL statements fail to be executed when I use Internet Explorer to access Hive Editor in Hue and the message \"There was an error with your query\" is d", "doc_type":"usermanual", "kw":"How Do I Solve the Problem that HQL Fails to Be Executed in Hue Using Internet Explorer?,Common Issu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Solve the Problem that HQL Fails to Be Executed in Hue Using Internet Explorer?", "githuburl":"" }, { "uri":"mrs_01_1766.html", + "node_id":"mrs_01_1766.xml", "product_code":"mrs", - "code":"505", + "code":"596", "des":"When Hive is used, the use database statement is entered in the text box to switch the database, and other statements are also entered, why does the database fail to be s", "doc_type":"usermanual", "kw":"Why Does the use database Statement Become Invalid When Hive Is Used?,Common Issues About Hue,Compon", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the use database Statement Become Invalid When Hive Is Used?", "githuburl":"" }, { "uri":"mrs_01_0156.html", + "node_id":"mrs_01_0156.xml", "product_code":"mrs", - "code":"506", + "code":"597", "des":"What can I do if an error message shown in the following figure is displayed, indicating that the HDFS file cannot be accessed when I use Hue web UI to access the HDFS fi", "doc_type":"usermanual", "kw":"What Can I Do If HDFS Files Fail to Be Accessed Using Hue WebUI?,Common Issues About Hue,Component O", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If HDFS Files Fail to Be Accessed Using Hue WebUI?", "githuburl":"" }, { "uri":"mrs_01_2368.html", + "node_id":"mrs_01_2368.xml", "product_code":"mrs", - "code":"507", + "code":"598", "des":"If the Hive service is not installed in the cluster, the native Hue service page is blank.In the current version, Hue depends on the Hive component. If this occurs, check", "doc_type":"usermanual", "kw":"Hue Page Cannot Be Displayed When the Hive Service Is Not Installed in a Cluster,Common Issues About", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hue Page Cannot Be Displayed When the Hive Service Is Not Installed in a Cluster", "githuburl":"" }, { - "uri":"mrs_01_0375.html", + "uri":"mrs_01_24144.html", + "node_id":"mrs_01_24144.xml", "product_code":"mrs", - "code":"508", + "code":"599", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"Using IoTDB", + "search_title":"", + "metedata":[ + { + "IsBot":"No", + "documenttype":"productdesc", + "prodname":"mrs" + } + ], + "title":"Using IoTDB", + "githuburl":"" + }, + { + "uri":"mrs_01_24157.html", + "node_id":"mrs_01_24157.xml", + "product_code":"mrs", + "code":"600", + "des":"IoTDB is a data management engine that integrates collection, storage, and analysis of time series data. It features lightweight, high performance, and ease of use. It pe", + "doc_type":"cmpntguide", + "kw":"Using IoTDB from Scratch,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Using IoTDB from Scratch", + "githuburl":"" + }, + { + "uri":"mrs_01_24158.html", + "node_id":"mrs_01_24158.xml", + "product_code":"mrs", + "code":"601", + "des":"This section describes how to use the IoTDB client in the O&M or service scenario.The client has been installed. For example, the installation directory is /opt/client. T", + "doc_type":"cmpntguide", + "kw":"Using the IoTDB Client,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Using the IoTDB Client", + "githuburl":"" + }, + { + "uri":"mrs_01_24159.html", + "node_id":"mrs_01_24159.xml", + "product_code":"mrs", + "code":"602", + "des":"IoTDB uses the multi-replica deployment architecture to implement cluster high availability. Each region (DataRegion and SchemaRegion) has three replicas by default. You ", + "doc_type":"cmpntguide", + "kw":"Configuring IoTDB Parameters,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Configuring IoTDB Parameters", + "githuburl":"" + }, + { + "uri":"mrs_01_24764.html", + "node_id":"mrs_01_24764.xml", + "product_code":"", + "code":"603", + "des":"IoTDB supports the following data types and encodings. For details, see Table 1.", + "doc_type":"", + "kw":"Data Types and Encodings Supported by IoTDB,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Data Types and Encodings Supported by IoTDB", + "githuburl":"" + }, + { + "uri":"mrs_01_24140.html", + "node_id":"mrs_01_24140.xml", + "product_code":"mrs", + "code":"604", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"cmpntguide", + "kw":"IoTDB Permission Management", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"IoTDB Permission Management", + "githuburl":"" + }, + { + "uri":"mrs_01_24141.html", + "node_id":"mrs_01_24141.xml", + "product_code":"mrs", + "code":"605", + "des":"MRS supports users, user groups, and roles. Permissions must be assigned to roles and then roles are bound to users or user groups. Users can obtain permissions only by b", + "doc_type":"cmpntguide", + "kw":"IoTDB Permissions,IoTDB Permission Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"IoTDB Permissions", + "githuburl":"" + }, + { + "uri":"mrs_01_24142.html", + "node_id":"mrs_01_24142.xml", + "product_code":"mrs", + "code":"606", + "des":"Create and configure an IoTDB role on Manager as an MRS cluster administrator. An IoTDB role can be configured with IoTDB administrator permissions or a common user's per", + "doc_type":"cmpntguide", + "kw":"Creating an IoTDB Role,IoTDB Permission Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Creating an IoTDB Role", + "githuburl":"" + }, + { + "uri":"mrs_01_24161.html", + "node_id":"mrs_01_24161.xml", + "product_code":"mrs", + "code":"607", + "des":"DescriptionLog paths: The default paths of IoTDB logs are /var/log/Bigdata/iotdb/iotdbserver (for storing run logs) and /var/log/Bigdata/audit/iotdb/iotdbserver (for stor", + "doc_type":"cmpntguide", + "kw":"IoTDB Log Overview,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"IoTDB Log Overview", + "githuburl":"" + }, + { + "uri":"mrs_01_24512.html", + "node_id":"mrs_01_24512.xml", + "product_code":"", + "code":"608", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"UDFs", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"UDFs", + "githuburl":"" + }, + { + "uri":"mrs_01_24513.html", + "node_id":"mrs_01_24513.xml", + "product_code":"", + "code":"609", + "des":"IoTDB provides multiple built-in functions and user-defined functions (UDFs) to meet users' computing requirements.Table 1 lists the UDF types supported by IoTDB.To write", + "doc_type":"", + "kw":"UDF Overview,UDFs,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"UDF Overview", + "githuburl":"" + }, + { + "uri":"mrs_01_24509.html", + "node_id":"mrs_01_24509.xml", + "product_code":"", + "code":"610", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"", + "kw":"IoTDB Data Import and Export", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"IoTDB Data Import and Export", + "githuburl":"" + }, + { + "uri":"mrs_01_24510.html", + "node_id":"mrs_01_24510.xml", + "product_code":"", + "code":"611", + "des":"This section describes how to use import-csv.sh to import data in CSV format to IoTDB.The client has been installed. For details, see . For example, the installation dire", + "doc_type":"", + "kw":"Importing IoTDB Data,IoTDB Data Import and Export,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Importing IoTDB Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24511.html", + "node_id":"mrs_01_24511.xml", + "product_code":"", + "code":"612", + "des":"This section describes how to use export-csv.sh to export data from IoTDB to a CSV file.Exporting data to CSV files may cause injection risks. Exercise caution when perfo", + "doc_type":"", + "kw":"Exporting IoTDB Data,IoTDB Data Import and Export,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Exporting IoTDB Data", + "githuburl":"" + }, + { + "uri":"mrs_01_24765.html", + "node_id":"mrs_01_24765.xml", + "product_code":"", + "code":"613", + "des":"IoTDB has the multi-replica mechanism. By default, both schema regions and data regions have three replicas. The ConfigNode stores the mapping between regions and the IoT", + "doc_type":"", + "kw":"Planning IoTDB Capacity,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Planning IoTDB Capacity", + "githuburl":"" + }, + { + "uri":"mrs_01_24162.html", + "node_id":"mrs_01_24162.xml", + "product_code":"mrs", + "code":"614", + "des":"You can increase IoTDB memory to improve IoTDB performance because read and write operations are performed in HBase memory.Log in to Manager, choose Cluster > Services > ", + "doc_type":"cmpntguide", + "kw":"IoTDB Performance Tuning,Using IoTDB,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"IoTDB Performance Tuning", + "githuburl":"" + }, + { + "uri":"mrs_01_0375.html", + "node_id":"mrs_01_0375.xml", + "product_code":"mrs", + "code":"615", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Kafka", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Kafka", "githuburl":"" }, { "uri":"mrs_01_1031.html", + "node_id":"mrs_01_1031.xml", "product_code":"mrs", - "code":"509", + "code":"616", "des":"You can create, query, and delete topics on a cluster client.The client has been installed. For example, the client is installed in the /opt/hadoopclient directory. The c", "doc_type":"usermanual", "kw":"Using Kafka from Scratch,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Kafka from Scratch", "githuburl":"" }, { "uri":"mrs_01_0376.html", + "node_id":"mrs_01_0376.xml", "product_code":"mrs", - "code":"510", + "code":"617", "des":"You can manage Kafka topics on a cluster client based on service requirements. Management permission is required for clusters with Kerberos authentication enabled.You hav", "doc_type":"usermanual", "kw":"Managing Kafka Topics,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Kafka Topics", "githuburl":"" }, { "uri":"mrs_01_0377.html", + "node_id":"mrs_01_0377.xml", "product_code":"mrs", - "code":"511", + "code":"618", "des":"You can query existing Kafka topics on MRS.Log in to FusionInsight Manager. For details, see Accessing FusionInsight Manager. Choose Cluster > Name of the desired cluster", "doc_type":"usermanual", "kw":"Querying Kafka Topics,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Querying Kafka Topics", "githuburl":"" }, { "uri":"mrs_01_0378.html", + "node_id":"mrs_01_0378.xml", "product_code":"mrs", - "code":"512", + "code":"619", "des":"For clusters with Kerberos authentication enabled, using Kafka requires relevant permissions. MRS clusters can grant the use permission of Kafka to different users.Table ", "doc_type":"usermanual", "kw":"Managing Kafka User Permissions,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Kafka User Permissions", "githuburl":"" }, { "uri":"mrs_01_0379.html", + "node_id":"mrs_01_0379.xml", "product_code":"mrs", - "code":"513", + "code":"620", "des":"You can produce or consume messages in Kafka topics using the MRS cluster client. For clusters with Kerberos authentication enabled, you must have the permission to perfo", "doc_type":"usermanual", "kw":"Managing Messages in Kafka Topics,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Messages in Kafka Topics", "githuburl":"" }, { "uri":"mrs_01_1032.html", + "node_id":"mrs_01_1032.xml", "product_code":"mrs", - "code":"514", + "code":"621", "des":"This section describes how to create and configure a Kafka role.Users can create Kafka roles only in security mode.If the current component uses Ranger for permission con", "doc_type":"usermanual", "kw":"Creating a Kafka Role,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Kafka Role", "githuburl":"" }, { "uri":"mrs_01_1033.html", + "node_id":"mrs_01_1033.xml", "product_code":"mrs", - "code":"515", + "code":"622", "des":"For details about how to set parameters, see Modifying Cluster Service Configuration Parameters.", "doc_type":"usermanual", "kw":"Kafka Common Parameters,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Common Parameters", "githuburl":"" }, { "uri":"mrs_01_1035.html", + "node_id":"mrs_01_1035.xml", "product_code":"mrs", - "code":"516", + "code":"623", "des":"Producer APIIndicates the API defined in org.apache.kafka.clients.producer.KafkaProducer. When kafka-console-producer.sh is used, the API is used by default.Indicates the", "doc_type":"usermanual", "kw":"Safety Instructions on Using Kafka,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Safety Instructions on Using Kafka", "githuburl":"" }, { "uri":"mrs_01_1036.html", + "node_id":"mrs_01_1036.xml", "product_code":"mrs", - "code":"517", + "code":"624", "des":"The maximum number of topics depends on the number of file handles (mainly used by data and index files on site) opened in the process.Run the ulimit -n command to view t", "doc_type":"usermanual", "kw":"Kafka Specifications,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Specifications", "githuburl":"" }, { "uri":"mrs_01_1767.html", + "node_id":"mrs_01_1767.xml", "product_code":"mrs", - "code":"518", + "code":"625", "des":"This section guides users to use a Kafka client in an O&M or service scenario.The client has been installed. For example, the installation directory is /opt/client.Servic", "doc_type":"usermanual", "kw":"Using the Kafka Client,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Kafka Client", "githuburl":"" }, { "uri":"mrs_01_1037.html", + "node_id":"mrs_01_1037.xml", "product_code":"mrs", - "code":"519", + "code":"626", "des":"For the Kafka message transmission assurance mechanism, different parameters are available for meeting different performance and reliability requirements. This section de", "doc_type":"usermanual", "kw":"Configuring Kafka HA and High Reliability Parameters,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Kafka HA and High Reliability Parameters", "githuburl":"" }, { "uri":"mrs_01_1038.html", + "node_id":"mrs_01_1038.xml", "product_code":"mrs", - "code":"520", + "code":"627", "des":"When a broker storage directory is added, the system administrator needs to change the broker storage directory on FusionInsight Manager, to ensure that the Kafka can wor", "doc_type":"usermanual", "kw":"Changing the Broker Storage Directory,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Changing the Broker Storage Directory", "githuburl":"" }, { "uri":"mrs_01_1039.html", + "node_id":"mrs_01_1039.xml", "product_code":"mrs", - "code":"521", + "code":"628", "des":"This section describes how to view the current expenditure on the client based on service requirements.The system administrator has understood service requirements and pr", "doc_type":"usermanual", "kw":"Checking the Consumption Status of Consumer Group,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Checking the Consumption Status of Consumer Group", "githuburl":"" }, { "uri":"mrs_01_1040.html", + "node_id":"mrs_01_1040.xml", "product_code":"mrs", - "code":"522", + "code":"629", "des":"This section describes how to use the Kafka balancing tool on a client to balance the load of the Kafka cluster based on service requirements in scenarios such as node de", "doc_type":"usermanual", "kw":"Kafka Balancing Tool Instructions,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Balancing Tool Instructions", "githuburl":"" }, { "uri":"mrs_01_1041.html", + "node_id":"mrs_01_1041.xml", "product_code":"mrs", - "code":"523", + "code":"630", "des":"Operations need to be performed on tokens when the token authentication mechanism is used.The system administrator has understood service requirements and prepared a syst", "doc_type":"usermanual", "kw":"Kafka Token Authentication Mechanism Tool Usage,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Token Authentication Mechanism Tool Usage", "githuburl":"" }, { "uri":"mrs_01_2312.html", + "node_id":"mrs_01_2312.xml", "product_code":"mrs", - "code":"524", + "code":"631", "des":"Feature description: The function of creating idempotent producers is introduced in Kafka 0.11.0.0. After this function is enabled, producers are automatically upgraded t", "doc_type":"usermanual", "kw":"Kafka Feature Description,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Feature Description", "githuburl":"" }, { "uri":"mrs_01_24130.html", + "node_id":"mrs_01_24130.xml", "product_code":"mrs", - "code":"525", + "code":"632", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Kafka UI", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24134.html", + "node_id":"mrs_01_24134.xml", "product_code":"mrs", - "code":"526", + "code":"633", "des":"After the Kafka component is installed in an MRS cluster, you can use Kafka UI to query cluster information, node status, topic partitions, and data production and consum", "doc_type":"usermanual", "kw":"Accessing Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Accessing Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24135.html", + "node_id":"mrs_01_24135.xml", "product_code":"mrs", - "code":"527", + "code":"634", "des":"After logging in to Kafka UI, you can view the basic information about the existing topics, brokers, and consumer groups in the current cluster on the home page. You can ", "doc_type":"usermanual", "kw":"Kafka UI Overview,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka UI Overview", "githuburl":"" }, { "uri":"mrs_01_24136.html", + "node_id":"mrs_01_24136.xml", "product_code":"mrs", - "code":"528", + "code":"635", "des":"Create a topic on Kafka UI.You can click Advanced Options to set advanced topic parameters based on service requirements. Generally, retain the default values.In a cluste", "doc_type":"usermanual", "kw":"Creating a Topic on Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Topic on Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24137.html", + "node_id":"mrs_01_24137.xml", "product_code":"mrs", - "code":"529", + "code":"636", "des":"Migrate a partition on Kafka UI.In security mode, the user who migrates a partition must belong to the kafkaadmin user group. Otherwise, the operation fails due to authen", "doc_type":"usermanual", "kw":"Migrating a Partition on Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Migrating a Partition on Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24138.html", + "node_id":"mrs_01_24138.xml", "product_code":"mrs", - "code":"530", + "code":"637", "des":"On Kafka UI, you can view topic details, modify topic configurations, add topic partitions, delete topics, and view the number of data records produced in different time ", "doc_type":"usermanual", "kw":"Managing Topics on Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Managing Topics on Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24139.html", + "node_id":"mrs_01_24139.xml", "product_code":"mrs", - "code":"531", + "code":"638", "des":"On Kafka UI, you can view broker details and JMX metrics of the broker node data traffic.", "doc_type":"usermanual", "kw":"Viewing Brokers on Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Brokers on Kafka UI", "githuburl":"" }, { "uri":"mrs_01_24133.html", + "node_id":"mrs_01_24133.xml", "product_code":"mrs", - "code":"532", + "code":"639", "des":"On Kafka UI, you can view the basic information about a consumer group and the consumption status of topics in the group.MRS clusters do not support redirection by clicki", "doc_type":"usermanual", "kw":"Viewing a Consumer Group on Kafka UI,Using Kafka UI,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing a Consumer Group on Kafka UI", "githuburl":"" }, { "uri":"mrs_01_1042.html", + "node_id":"mrs_01_1042.xml", "product_code":"mrs", - "code":"533", + "code":"640", "des":"Log paths: The default storage path of Kafka logs is /var/log/Bigdata/kafka. The default storage path of audit logs is /var/log/Bigdata/audit/kafka.Broker: /var/log/Bigda", "doc_type":"usermanual", "kw":"Introduction to Kafka Logs,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to Kafka Logs", "githuburl":"" }, { "uri":"mrs_01_1043.html", + "node_id":"mrs_01_1043.xml", "product_code":"mrs", - "code":"534", + "code":"641", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1044.html", + "node_id":"mrs_01_1044.xml", "product_code":"mrs", - "code":"535", + "code":"642", "des":"You can modify Kafka server parameters to improve Kafka processing capabilities in specific service scenarios.Modify the service configuration parameters. For details, se", "doc_type":"usermanual", "kw":"Kafka Performance Tuning,Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Kafka Performance Tuning", "githuburl":"" }, + { + "uri":"mrs_01_24534.html", + "node_id":"mrs_01_24534.xml", + "product_code":"", + "code":"643", + "des":"This section applies to MRS 3.2.0 or later.This section describes how to use Kafka client commands to migrate partition data between disks on a node without stopping the ", + "doc_type":"", + "kw":"Migrating Data Between Kafka Nodes,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Migrating Data Between Kafka Nodes", + "githuburl":"" + }, + { + "uri":"mrs_01_24576.html", + "node_id":"mrs_01_24576.xml", + "product_code":"", + "code":"644", + "des":"This section applies to MRS 3.2.0 or later.To access Kafka Broker deployed on a private network from the Kafka client via the Internet, enable the Kafka private and publi", + "doc_type":"", + "kw":"Configuring Intranet and Extranet Access for Kafka,Using Kafka,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Intranet and Extranet Access for Kafka", + "githuburl":"" + }, { "uri":"mrs_01_1768.html", + "node_id":"mrs_01_1768.xml", "product_code":"mrs", - "code":"536", + "code":"645", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Kafka", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Kafka", "githuburl":"" }, { "uri":"mrs_01_1769.html", + "node_id":"mrs_01_1769.xml", "product_code":"mrs", - "code":"537", + "code":"646", "des":"How do I delete a Kafka topic if it fails to be deleted?Possible cause 1: The delete.topic.enable configuration item is not set to true. The deletion can be performed onl", "doc_type":"usermanual", "kw":"How Do I Solve the Problem that Kafka Topics Cannot Be Deleted?,Common Issues About Kafka,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Solve the Problem that Kafka Topics Cannot Be Deleted?", "githuburl":"" }, { "uri":"mrs_01_0400.html", + "node_id":"mrs_01_0400.xml", "product_code":"mrs", - "code":"538", + "code":"647", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Loader", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Loader", "githuburl":"" }, { "uri":"mrs_01_1784.html", + "node_id":"mrs_01_1784.xml", "product_code":"mrs", - "code":"539", + "code":"648", "des":"For details about the how to set parameters, see Modifying Cluster Service Configuration Parameters.Because it needs time to calculate the fault tolerance rate, you are r", "doc_type":"usermanual", "kw":"Common Loader Parameters,Using Loader,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Loader Parameters", "githuburl":"" }, { "uri":"mrs_01_1085.html", + "node_id":"mrs_01_1085.xml", "product_code":"mrs", - "code":"540", + "code":"649", "des":"This section describes how to create and configure a Loader role on FusionInsight Manager. The Loader role can set Loader administrator permissions, job connections, job ", "doc_type":"usermanual", "kw":"Creating a Loader Role,Using Loader,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Loader Role", "githuburl":"" }, { "uri":"mrs_01_1086.html", + "node_id":"mrs_01_1086.xml", "product_code":"mrs", - "code":"541", + "code":"650", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Importing Data", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Importing Data", "githuburl":"" }, { "uri":"mrs_01_1087.html", + "node_id":"mrs_01_1087.xml", "product_code":"mrs", - "code":"542", + "code":"651", "des":"Loader is an ETL tool that enables MRS to exchange data and files with external data sources, such as relational databases, SFTP servers, and FTP servers. It allows data ", "doc_type":"usermanual", - "kw":"Overview,Importing Data,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"Loader Importing Data Overview,Importing Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Loader Importing Data Overview", "githuburl":"" }, { "uri":"mrs_01_1088.html", + "node_id":"mrs_01_1088.xml", "product_code":"mrs", - "code":"543", + "code":"652", "des":"This section describes how to import data from external data sources to MRS.Generally, you can manually manage data import and export jobs on the Loader UI. To use shell ", "doc_type":"usermanual", "kw":"Importing Data Using Loader,Importing Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Importing Data Using Loader", "githuburl":"" }, { "uri":"mrs_01_1089.html", + "node_id":"mrs_01_1089.xml", "product_code":"mrs", - "code":"544", + "code":"653", "des":"Use Loader to import data from an SFTP server to HDFS or OBS.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from an SFTP Server to HDFS or OBS,Importing Data,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from an SFTP Server to HDFS or OBS", "githuburl":"" }, { "uri":"mrs_01_1090.html", + "node_id":"mrs_01_1090.xml", "product_code":"mrs", - "code":"545", + "code":"654", "des":"Use Loader to import data from an SFTP server to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from an SFTP Server to HBase,Importing Data,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from an SFTP Server to HBase", "githuburl":"" }, { "uri":"mrs_01_1091.html", + "node_id":"mrs_01_1091.xml", "product_code":"mrs", - "code":"546", + "code":"655", "des":"Use Loader to import data from an SFTP server to Hive.You have obtained the service username and password for creating a Loader job.You have had the permission to access ", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from an SFTP Server to Hive,Importing Data,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from an SFTP Server to Hive", "githuburl":"" }, { "uri":"mrs_01_1092.html", + "node_id":"mrs_01_1092.xml", "product_code":"mrs", - "code":"547", + "code":"656", "des":"Use Loader to import data from an SFTP server to Spark.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from an SFTP Server to Spark,Importing Data,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from an SFTP Server to Spark", "githuburl":"" }, { "uri":"mrs_01_1093.html", + "node_id":"mrs_01_1093.xml", "product_code":"mrs", - "code":"548", + "code":"657", "des":"Use Loader to import data from an FTP server to HBase.You have obtained the service username and password for creating a Loader job.You have obtained the username and pas", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from an FTP Server to HBase,Importing Data,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from an FTP Server to HBase", "githuburl":"" }, { "uri":"mrs_01_1094.html", + "node_id":"mrs_01_1094.xml", "product_code":"mrs", - "code":"549", + "code":"658", "des":"Use Loader to import data from a relational database to HDFS or OBS.You have obtained the service username and password for creating a Loader job.You have had the permiss", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from a Relational Database to HDFS or OBS,Importing Data,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from a Relational Database to HDFS or OBS", "githuburl":"" }, { "uri":"mrs_01_1095.html", + "node_id":"mrs_01_1095.xml", "product_code":"mrs", - "code":"550", + "code":"659", "des":"Use Loader to import data from a relational database to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from a Relational Database to HBase,Importing Data,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from a Relational Database to HBase", "githuburl":"" }, { "uri":"mrs_01_1096.html", + "node_id":"mrs_01_1096.xml", "product_code":"mrs", - "code":"551", + "code":"660", "des":"Use Loader to import data from a relational database to Hive.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from a Relational Database to Hive,Importing Data,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from a Relational Database to Hive", "githuburl":"" }, { "uri":"mrs_01_1097.html", + "node_id":"mrs_01_1097.xml", "product_code":"mrs", - "code":"552", + "code":"661", "des":"Use Loader to import data from a relational database to Spark.You have obtained the service username and password for creating a Loader job.You have had the permission to", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from a Relational Database to Spark,Importing Data,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from a Relational Database to Spark", "githuburl":"" }, { "uri":"mrs_01_1098.html", + "node_id":"mrs_01_1098.xml", "product_code":"mrs", - "code":"553", + "code":"662", "des":"Use Loader to import data from HDFS or OBS to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to access th", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from HDFS or OBS to HBase,Importing Data,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from HDFS or OBS to HBase", "githuburl":"" }, { "uri":"mrs_01_24172.html", + "node_id":"mrs_01_24172.xml", "product_code":"", - "code":"554", + "code":"663", "des":"This section describes how to use Loader to import data from a relational database to ClickHouse using MySQL as an example.You have obtained the service username and pass", "doc_type":"", "kw":"Typical Scenario: Importing Data from a Relational Database to ClickHouse,Importing Data,Component O", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Typical Scenario: Importing Data from a Relational Database to ClickHouse", "githuburl":"" }, { "uri":"mrs_01_24173.html", + "node_id":"mrs_01_24173.xml", "product_code":"", - "code":"555", + "code":"664", "des":"Use Loader to import data from HDFS to ClickHouse.You have obtained the service username and password for creating a Loader job.You have had the permission to access the ", "doc_type":"", "kw":"Typical Scenario: Importing Data from HDFS to ClickHouse,Importing Data,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Typical Scenario: Importing Data from HDFS to ClickHouse", "githuburl":"" }, { "uri":"mrs_01_1100.html", + "node_id":"mrs_01_1100.xml", "product_code":"mrs", - "code":"556", + "code":"665", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Exporting Data", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Exporting Data", "githuburl":"" }, { "uri":"mrs_01_1101.html", + "node_id":"mrs_01_1101.xml", "product_code":"mrs", - "code":"557", + "code":"666", "des":"Loader is an extract, transform, and load (ETL) tool for exchanging data and files between MRS and relational databases and file systems. You can use the Loader to export", "doc_type":"usermanual", - "kw":"Overview,Exporting Data,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"Loader Exporting Data Overview,Exporting Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Loader Exporting Data Overview", "githuburl":"" }, { "uri":"mrs_01_1102.html", + "node_id":"mrs_01_1102.xml", "product_code":"mrs", - "code":"558", + "code":"667", "des":"This task enables you to export data from MRS to external data sources.Generally, users can manually manage data import and export jobs on the Loader UI. To use shell scr", "doc_type":"usermanual", "kw":"Using Loader to Export Data,Exporting Data,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Loader to Export Data", "githuburl":"" }, { "uri":"mrs_01_1103.html", + "node_id":"mrs_01_1103.xml", "product_code":"mrs", - "code":"559", + "code":"668", "des":"This section describes how to use Loader to export data from HDFS/OBS to an SFTP server.You have obtained the service username and password for creating a Loader job.You ", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from HDFS/OBS to an SFTP Server,Exporting Data,Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from HDFS/OBS to an SFTP Server", "githuburl":"" }, { "uri":"mrs_01_1104.html", + "node_id":"mrs_01_1104.xml", "product_code":"mrs", - "code":"560", + "code":"669", "des":"Use Loader to export data from HBase to an SFTP server.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from HBase to an SFTP Server,Exporting Data,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from HBase to an SFTP Server", "githuburl":"" }, { "uri":"mrs_01_1105.html", + "node_id":"mrs_01_1105.xml", "product_code":"mrs", - "code":"561", + "code":"670", "des":"Use Loader to export data from Hive to an SFTP server.You have obtained the service username and password for creating a Loader job.You have had the permission to access ", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from Hive to an SFTP Server,Exporting Data,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from Hive to an SFTP Server", "githuburl":"" }, { "uri":"mrs_01_1106.html", + "node_id":"mrs_01_1106.xml", "product_code":"mrs", - "code":"562", + "code":"671", "des":"This section describes how to use Loader to export data from Spark to an SFTP server.You have obtained the service username and password for creating a Loader job.You hav", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from Spark to an SFTP Server,Exporting Data,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from Spark to an SFTP Server", "githuburl":"" }, { "uri":"mrs_01_1107.html", + "node_id":"mrs_01_1107.xml", "product_code":"mrs", - "code":"563", + "code":"672", "des":"This section describes how to use Loader to export data from HDFS/OBS to a relational database.You have obtained the service username and password for creating a Loader j", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from HDFS/OBS to a Relational Database,Exporting Data,Component Ope", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from HDFS/OBS to a Relational Database", "githuburl":"" }, { "uri":"mrs_01_1108.html", + "node_id":"mrs_01_1108.xml", "product_code":"mrs", - "code":"564", + "code":"673", "des":"Use Loader to export data from HBase to a relational database.You have obtained the service username and password for creating a Loader job.You have had the permission to", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from HBase to a Relational Database,Exporting Data,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from HBase to a Relational Database", "githuburl":"" }, { "uri":"mrs_01_1109.html", + "node_id":"mrs_01_1109.xml", "product_code":"mrs", - "code":"565", + "code":"674", "des":"Use Loader to export data from Hive to a relational database.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from Hive to a Relational Database,Exporting Data,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from Hive to a Relational Database", "githuburl":"" }, { "uri":"mrs_01_1110.html", + "node_id":"mrs_01_1110.xml", "product_code":"mrs", - "code":"566", + "code":"675", "des":"This section describes how to use Loader to export data from Spark to a relational database.You have obtained the service username and password for creating a Loader job.", "doc_type":"usermanual", "kw":"Typical Scenario: Exporting Data from Spark to a Relational Database,Exporting Data,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Exporting Data from Spark to a Relational Database", "githuburl":"" }, { "uri":"mrs_01_1111.html", + "node_id":"mrs_01_1111.xml", "product_code":"mrs", - "code":"567", + "code":"676", "des":"This section describes how to use Loader to export data from HBase to HDFS/OBS.You have obtained the service user name and password for creating a Loader job.You have had", "doc_type":"usermanual", "kw":"Typical Scenario: Importing Data from HBase to HDFS/OBS,Exporting Data,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Typical Scenario: Importing Data from HBase to HDFS/OBS", "githuburl":"" }, { "uri":"mrs_01_1113.html", + "node_id":"mrs_01_1113.xml", "product_code":"mrs", - "code":"568", + "code":"677", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Job Management", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Job Management", "githuburl":"" }, { "uri":"mrs_01_1114.html", + "node_id":"mrs_01_1114.xml", "product_code":"mrs", - "code":"569", + "code":"678", "des":"Loader allows jobs to be migrated in batches from a group (source group) to another group (target group).The source group and target group exist.The current user has the ", "doc_type":"usermanual", "kw":"Migrating Loader Jobs in Batches,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Migrating Loader Jobs in Batches", "githuburl":"" }, { "uri":"mrs_01_1115.html", + "node_id":"mrs_01_1115.xml", "product_code":"mrs", - "code":"570", + "code":"679", "des":"Loader allows existing jobs to be deleted in batches.The current user has the Edit permission for the jobs to be deleted or the Jobs Edit permission for the group to whic", "doc_type":"usermanual", "kw":"Deleting Loader Jobs in Batches,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Deleting Loader Jobs in Batches", "githuburl":"" }, { "uri":"mrs_01_1116.html", + "node_id":"mrs_01_1116.xml", "product_code":"mrs", - "code":"571", + "code":"680", "des":"Loader allows all jobs of a configuration file to be imported in batches.The current user has the Jobs Edit permission of the group to which the jobs to be imported belon", "doc_type":"usermanual", "kw":"Importing Loader Jobs in Batches,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Importing Loader Jobs in Batches", "githuburl":"" }, { "uri":"mrs_01_1117.html", + "node_id":"mrs_01_1117.xml", "product_code":"mrs", - "code":"572", + "code":"681", "des":"Loader allows existing jobs to be exported in batches.The current user has the Edit permission for the jobs to be exported or the Jobs Edit permission of the group to whi", "doc_type":"usermanual", "kw":"Exporting Loader Jobs in Batches,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Exporting Loader Jobs in Batches", "githuburl":"" }, { "uri":"mrs_01_1118.html", + "node_id":"mrs_01_1118.xml", "product_code":"mrs", - "code":"573", + "code":"682", "des":"Query the execution status and execution duration of a Loader job during routine maintenance. You can perform the following operations on the job:Dirty Data: Query data t", "doc_type":"usermanual", "kw":"Viewing Historical Job Information,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Historical Job Information", "githuburl":"" }, + { + "uri":"mrs_01_24813.html", + "node_id":"mrs_01_24813.xml", + "product_code":"", + "code":"683", + "des":"This section applies to MRS 3.2.0 or later.Loader accumulates a large amount of historical data during service running. The historical data may affect job submission, run", + "doc_type":"", + "kw":"Purging Historical Loader Data,Job Management,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Purging Historical Loader Data", + "githuburl":"" + }, { "uri":"mrs_01_1119.html", + "node_id":"mrs_01_1119.xml", "product_code":"mrs", - "code":"574", + "code":"684", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Operator Help", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Operator Help", "githuburl":"" }, { "uri":"mrs_01_1120.html", + "node_id":"mrs_01_1120.xml", "product_code":"mrs", - "code":"575", + "code":"685", "des":"Loader reads data at the source end, uses an input operator to convert data into fields by certain rules, use a conversion operator to clean or convert the fields, and fi", "doc_type":"usermanual", - "kw":"Overview,Operator Help,Component Operation Guide (LTS)", - "title":"Overview", + "kw":"Loader Operator Overview,Operator Help,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Loader Operator Overview", "githuburl":"" }, { "uri":"mrs_01_1121.html", + "node_id":"mrs_01_1121.xml", "product_code":"mrs", - "code":"576", + "code":"686", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Input Operators", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Input Operators", "githuburl":"" }, { "uri":"mrs_01_1122.html", + "node_id":"mrs_01_1122.xml", "product_code":"mrs", - "code":"577", + "code":"687", "des":"The CSV File Input operator imports all files that can be opened by using a text editor.Input: test filesOutput: fieldsEach data line is separated into multiple fields by", "doc_type":"usermanual", "kw":"CSV File Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"CSV File Input", "githuburl":"" }, { "uri":"mrs_01_1123.html", + "node_id":"mrs_01_1123.xml", "product_code":"mrs", - "code":"578", + "code":"688", "des":"The Fixed File Input operator converts each line in a file into multiple fields by character or byte of a configurable length.Input: text fileOutput: fieldsThe source fil", "doc_type":"usermanual", "kw":"Fixed File Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Fixed File Input", "githuburl":"" }, { "uri":"mrs_01_1124.html", + "node_id":"mrs_01_1124.xml", "product_code":"mrs", - "code":"579", + "code":"689", "des":"Table Input operator converts specified columns in a relational database table into input fields of the same quantity.Input: table columnsOutput: fieldsFields are generat", "doc_type":"usermanual", "kw":"Table Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Table Input", "githuburl":"" }, { "uri":"mrs_01_1125.html", + "node_id":"mrs_01_1125.xml", "product_code":"mrs", - "code":"580", + "code":"690", "des":"The HBase Input operator converts specified columns in an HBase table into input fields of the same quantity.Input: HBase table columnsOutput: fieldsIf the HBase table na", "doc_type":"usermanual", "kw":"HBase Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HBase Input", "githuburl":"" }, { "uri":"mrs_01_1126.html", + "node_id":"mrs_01_1126.xml", "product_code":"mrs", - "code":"581", + "code":"691", "des":"HTML Input operator imports a regular HTML file and converts elements in the HTML file into input fields.Input: HTML fileOutput: multiple fieldsparent tag is configured f", "doc_type":"usermanual", "kw":"HTML Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HTML Input", "githuburl":"" }, { "uri":"mrs_01_1128.html", + "node_id":"mrs_01_1128.xml", "product_code":"mrs", - "code":"582", + "code":"692", "des":"The Hive Input operator converts specified columns in an HBase table into input fields of the same quantity.Input: Hive table columnsOutput: fieldsIf the Hive table name ", "doc_type":"usermanual", "kw":"Hive input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive input", "githuburl":"" }, { "uri":"mrs_01_1129.html", + "node_id":"mrs_01_1129.xml", "product_code":"mrs", - "code":"583", + "code":"693", "des":"The Spark Input operator converts specified columns in an SparkSQL table into input fields of the same quantity.Input: SparkSQL table columnOutput: fieldsIf the SparkSQL ", "doc_type":"usermanual", "kw":"Spark Input,Input Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark Input", "githuburl":"" }, { "uri":"mrs_01_1130.html", + "node_id":"mrs_01_1130.xml", "product_code":"mrs", - "code":"584", + "code":"694", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Conversion Operators", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Conversion Operators", "githuburl":"" }, { "uri":"mrs_01_1131.html", + "node_id":"mrs_01_1131.xml", "product_code":"mrs", - "code":"585", + "code":"695", "des":"The Long Date Conversion operator performs long integer and date conversion.Input: fields to be convertedOutput: new fieldsIf the original data includes null values, no c", "doc_type":"usermanual", "kw":"Long Date Conversion,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Long Date Conversion", "githuburl":"" }, { "uri":"mrs_01_1132.html", + "node_id":"mrs_01_1132.xml", "product_code":"mrs", - "code":"586", + "code":"696", "des":"The null value conversion operator replaces null values with specified values.Input: fields with null valuesOutput: original fields with new valuesWhen field values are e", "doc_type":"usermanual", "kw":"Null Value Conversion,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Null Value Conversion", "githuburl":"" }, { "uri":"mrs_01_1133.html", + "node_id":"mrs_01_1133.xml", "product_code":"mrs", - "code":"587", + "code":"697", "des":"The Add Constants operator generates constant fields.Input: noneOutput: constant fieldsThis operator generates constant fields of the specified type.Use the CSV File Inpu", "doc_type":"usermanual", "kw":"Constant Field Addition,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Constant Field Addition", "githuburl":"" }, { "uri":"mrs_01_1134.html", + "node_id":"mrs_01_1134.xml", "product_code":"mrs", - "code":"588", + "code":"698", "des":"Generate Random operator configures new values as random value fields.Input: noneOutput: random value fieldsThe operator generates random value fields of specified type.U", "doc_type":"usermanual", "kw":"Random Value Conversion,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Random Value Conversion", "githuburl":"" }, { "uri":"mrs_01_1135.html", + "node_id":"mrs_01_1135.xml", "product_code":"mrs", - "code":"589", + "code":"699", "des":"The Concat Fields operator concatenates existing fields by using delimiters to generate new fields.Input: fields to be concatenatedOutput: new fieldsUse delimiters to con", "doc_type":"usermanual", "kw":"Concat Fields,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Concat Fields", "githuburl":"" }, { "uri":"mrs_01_1136.html", + "node_id":"mrs_01_1136.xml", "product_code":"mrs", - "code":"590", + "code":"700", "des":"The Extract Fields separates an existing field by using delimiters to generate new fields.Input: field to be separatedOutput: new fieldsThe value of the input field is se", "doc_type":"usermanual", "kw":"Extract Fields,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Extract Fields", "githuburl":"" }, { "uri":"mrs_01_1137.html", + "node_id":"mrs_01_1137.xml", "product_code":"mrs", - "code":"591", + "code":"701", "des":"The Modulo Integer operator performs modulo operations on integer fields to generate new fields.Input: integer fieldsOutput: new fieldsThe operator generates new fields a", "doc_type":"usermanual", "kw":"Modulo Integer,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Modulo Integer", "githuburl":"" }, { "uri":"mrs_01_1138.html", + "node_id":"mrs_01_1138.xml", "product_code":"mrs", - "code":"592", + "code":"702", "des":"The String Cut operator cuts existing fields to generate new fields.Input: fields to be cutOutput: new fieldsstart position and end position are used to cut the original ", "doc_type":"usermanual", "kw":"String Cut,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"String Cut", "githuburl":"" }, { "uri":"mrs_01_1139.html", + "node_id":"mrs_01_1139.xml", "product_code":"mrs", - "code":"593", + "code":"703", "des":"The EL Operation operator calculates field values and generates new fields. The algorithms that are currently supported include md5sum, sha1sum, sha256sum, and sha512sum.", "doc_type":"usermanual", "kw":"EL Operation,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"EL Operation", "githuburl":"" }, { "uri":"mrs_01_1140.html", + "node_id":"mrs_01_1140.xml", "product_code":"mrs", - "code":"594", + "code":"704", "des":"The String Operations operator converts the upper and lower cases of existing fields to generate new fields.Input: fields whose case is to be convertedOutput: new fields ", "doc_type":"usermanual", "kw":"String Operations,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"String Operations", "githuburl":"" }, { "uri":"mrs_01_1141.html", + "node_id":"mrs_01_1141.xml", "product_code":"mrs", - "code":"595", + "code":"705", "des":"The String Reverse operator reverses existing fields to generate new fields.Input: fields to be reversedOutput: new fieldsValue reversal conversion is performed for field", "doc_type":"usermanual", "kw":"String Reverse,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"String Reverse", "githuburl":"" }, { "uri":"mrs_01_1142.html", + "node_id":"mrs_01_1142.xml", "product_code":"mrs", - "code":"596", + "code":"706", "des":"The String Trim operator clears spaces contained in existing fields to generate new fields.Input: fields whose spaces are to be clearedOutput: new fieldsClearing spaces a", "doc_type":"usermanual", "kw":"String Trim,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"String Trim", "githuburl":"" }, { "uri":"mrs_01_1143.html", + "node_id":"mrs_01_1143.xml", "product_code":"mrs", - "code":"597", + "code":"707", "des":"This Filter Rows operator filters rows that contain triggering conditions by configuring logic conditions.Input: fields used to create filter conditionsOutput: noneWhen t", "doc_type":"usermanual", "kw":"Filter Rows,Conversion Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Filter Rows", "githuburl":"" }, { "uri":"mrs_01_1145.html", + "node_id":"mrs_01_1145.xml", "product_code":"mrs", - "code":"598", + "code":"708", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Output Operators", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Output Operators", "githuburl":"" }, { "uri":"mrs_01_1146.html", + "node_id":"mrs_01_1146.xml", "product_code":"mrs", - "code":"599", + "code":"709", "des":"The Hive Output operator exports existing fields to specified columns of a Hive table.Input: fields to be exportedOutput: Hive tableThe field values are exported to the H", "doc_type":"usermanual", "kw":"Hive output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive output", "githuburl":"" }, { "uri":"mrs_01_1147.html", + "node_id":"mrs_01_1147.xml", "product_code":"mrs", - "code":"600", + "code":"710", "des":"The Spark Output operator exports existing fields to specified columns of a Spark SQL table.Input: fields to be exportedOutput: SparkSQL tableThe field values are exporte", "doc_type":"usermanual", "kw":"Spark Output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark Output", "githuburl":"" }, { "uri":"mrs_01_1148.html", + "node_id":"mrs_01_1148.xml", "product_code":"mrs", - "code":"601", + "code":"711", "des":"The Table Output operator exports output fields to specified columns in a relational database table.Input: fields to be exportedOutput: relational database tableThe field", "doc_type":"usermanual", "kw":"Table Output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Table Output", "githuburl":"" }, { "uri":"mrs_01_1149.html", + "node_id":"mrs_01_1149.xml", "product_code":"mrs", - "code":"602", + "code":"712", "des":"The File Output operator uses delimiters to concatenate existing fields and exports new fields to a file.Input: fields to be exportedOutput: filesThe field is exported to", "doc_type":"usermanual", "kw":"File Output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"File Output", "githuburl":"" }, { "uri":"mrs_01_1150.html", + "node_id":"mrs_01_1150.xml", "product_code":"mrs", - "code":"603", + "code":"713", "des":"The HBase Output operator exports existing fields to specified columns of an HBase Outputtable.Input: fields to be exportedOutput: HBase tableThe field values are exporte", "doc_type":"usermanual", "kw":"HBase Output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"HBase Output", "githuburl":"" }, { "uri":"mrs_01_24177.html", + "node_id":"mrs_01_24177.xml", "product_code":"", - "code":"604", + "code":"714", "des":"The ClickHouse Output operator exports existing fields to specified columns of a ClickHouse table.Input: fields to be exportedOutput: ClickHouse tableThe field values are", "doc_type":"", "kw":"ClickHouse Output,Output Operators,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"ClickHouse Output", "githuburl":"" }, { "uri":"mrs_01_1152.html", + "node_id":"mrs_01_1152.xml", "product_code":"mrs", - "code":"605", + "code":"715", "des":"This section describes how to associate, import, or export the field configuration information of an operator when creating or editing a Loader job.Associating the field ", "doc_type":"usermanual", "kw":"Associating, Editing, Importing, or Exporting the Field Configuration of an Operator,Operator Help,C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Associating, Editing, Importing, or Exporting the Field Configuration of an Operator", "githuburl":"" }, { "uri":"mrs_01_1153.html", + "node_id":"mrs_01_1153.xml", "product_code":"mrs", - "code":"606", + "code":"716", "des":"When creating or editing Loader jobs, users can use macro definitions during parameter configuration. Then the parameters can be automatically changed to corresponding ma", "doc_type":"usermanual", "kw":"Using Macro Definitions in Configuration Items,Operator Help,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Macro Definitions in Configuration Items", "githuburl":"" }, { "uri":"mrs_01_1154.html", + "node_id":"mrs_01_1154.xml", "product_code":"mrs", - "code":"607", + "code":"717", "des":"In Loader data import and export tasks, each operator defines different processing rules for null values and empty strings in raw data. Dirty data cannot be imported or e", "doc_type":"usermanual", "kw":"Operator Data Processing Rules,Operator Help,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Operator Data Processing Rules", "githuburl":"" }, { "uri":"mrs_01_1155.html", + "node_id":"mrs_01_1155.xml", "product_code":"mrs", - "code":"608", + "code":"718", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Client Tool Description", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Client Tool Description", "githuburl":"" }, { "uri":"mrs_01_1157.html", + "node_id":"mrs_01_1157.xml", "product_code":"mrs", - "code":"609", + "code":"719", "des":"loader-tool is a Loader client tool. It consists of three tools: lt-ucc, lt-ucj, lt-ctl.Loader supports two modes, parameter mode and job template mode. Either mode can b", "doc_type":"usermanual", "kw":"loader-tool Usage Guide,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"loader-tool Usage Guide", "githuburl":"" }, { "uri":"mrs_01_1158.html", + "node_id":"mrs_01_1158.xml", "product_code":"mrs", - "code":"610", + "code":"720", "des":"loader-tool can be used to create, update, query, and delete a connector or job by using a job template or setting parameters.This section describes how to use loader-too", "doc_type":"usermanual", "kw":"loader-tool Usage Example,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"loader-tool Usage Example", "githuburl":"" }, { "uri":"mrs_01_1159.html", + "node_id":"mrs_01_1159.xml", "product_code":"mrs", - "code":"611", + "code":"721", "des":"schedule-tool is used to submit jobs of SFTP data sources. You can modify the input path and file filtering criteria before submitting a job. You can modify the output pa", "doc_type":"usermanual", "kw":"schedule-tool Usage Guide,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"schedule-tool Usage Guide", "githuburl":"" }, { "uri":"mrs_01_1160.html", + "node_id":"mrs_01_1160.xml", "product_code":"mrs", - "code":"612", + "code":"722", "des":"After a job is created using the Loader WebUI or Loader-tool, use schedule-tool to execute the job.The Loader client has been installed and configured.cd /opt/hadoopclien", "doc_type":"usermanual", "kw":"schedule-tool Usage Example,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"schedule-tool Usage Example", "githuburl":"" }, { "uri":"mrs_01_1161.html", + "node_id":"mrs_01_1161.xml", "product_code":"mrs", - "code":"613", + "code":"723", "des":"After a job is created using the Loader WebUI or loader-tool, use loader-backup to back up data.Only Loader jobs of data export support data backup.This tool is an intern", "doc_type":"usermanual", "kw":"Using loader-backup to Back Up Job Data,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using loader-backup to Back Up Job Data", "githuburl":"" }, { "uri":"mrs_01_1162.html", + "node_id":"mrs_01_1162.xml", "product_code":"mrs", - "code":"614", + "code":"724", "des":"Sqoop-shell is a shell tool of Loader. All its functions are implemented by executing the sqoop2-shell script.The sqoop-shell tool provides the following functions:Creati", "doc_type":"usermanual", "kw":"Open Source sqoop-shell Tool Usage Guide,Client Tool Description,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Open Source sqoop-shell Tool Usage Guide", "githuburl":"" }, { "uri":"mrs_01_1163.html", + "node_id":"mrs_01_1163.xml", "product_code":"mrs", - "code":"615", + "code":"725", "des":"Taking importing data from SFTP to HDFS as an example, this section introduces how to use the sqoop-shell tool to create and start Loader jobs in the interaction mode and", "doc_type":"usermanual", "kw":"Example for Using the Open-Source sqoop-shell Tool (SFTP-HDFS),Client Tool Description,Component Ope", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Example for Using the Open-Source sqoop-shell Tool (SFTP-HDFS)", "githuburl":"" }, { "uri":"mrs_01_1164.html", + "node_id":"mrs_01_1164.xml", "product_code":"mrs", - "code":"616", + "code":"726", "des":"Taking Importing Data from Oracle to HBase as an example, this section introduces how to use the sqoop-shell tool to create and start Loader jobs in the interaction mode ", "doc_type":"usermanual", "kw":"Example for Using the Open-Source sqoop-shell Tool (Oracle-HBase),Client Tool Description,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Example for Using the Open-Source sqoop-shell Tool (Oracle-HBase)", "githuburl":"" }, { "uri":"mrs_01_1165.html", + "node_id":"mrs_01_1165.xml", "product_code":"mrs", - "code":"617", + "code":"727", "des":"Log path: The default storage path of Loader log files is /var/log/Bigdata/loader/Log category.runlog: /var/log/Bigdata/loader/runlog (run logs)scriptlog: /var/log/Bigdat", "doc_type":"usermanual", "kw":"Loader Log Overview,Using Loader,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Loader Log Overview", "githuburl":"" }, { "uri":"mrs_01_1785.html", + "node_id":"mrs_01_1785.xml", "product_code":"mrs", - "code":"618", + "code":"728", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Loader", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Loader", "githuburl":"" }, { "uri":"mrs_01_1786.html", + "node_id":"mrs_01_1786.xml", "product_code":"mrs", - "code":"619", + "code":"729", "des":"Internet Explorer 11 or Internet Explorer 10 is used to access the web UI of Loader. After data is submitted, an error occurs.SymptomWhen the submitted data is saved, a s", "doc_type":"usermanual", "kw":"How to Resolve the Problem that Failed to Save Data When Using Internet Explorer 10 or Internet Expl", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Resolve the Problem that Failed to Save Data When Using Internet Explorer 10 or Internet Explorer 11 ?", "githuburl":"" }, { "uri":"mrs_01_1787.html", + "node_id":"mrs_01_1787.xml", "product_code":"mrs", - "code":"620", + "code":"730", "des":"Three types of connectors are available for importing data from the Oracle database to HDFS using Loader. That is, generic-jdbc-connector, oracle-connector, and oracle-pa", "doc_type":"usermanual", "kw":"Differences Among Connectors Used During the Process of Importing Data from the Oracle Database to H", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Differences Among Connectors Used During the Process of Importing Data from the Oracle Database to HDFS", "githuburl":"" }, { "uri":"mrs_01_0834.html", + "node_id":"mrs_01_0834.xml", "product_code":"mrs", - "code":"621", + "code":"731", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using MapReduce", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using MapReduce", "githuburl":"" }, { "uri":"mrs_01_0835.html", + "node_id":"mrs_01_0835.xml", "product_code":"mrs", - "code":"622", + "code":"732", "des":"The JobHistoryServer service of MapReduce is a single instance, or the single instance is used to install the MapReduce service during cluster installation. To avoid the ", "doc_type":"usermanual", "kw":"Converting MapReduce from the Single Instance Mode to the HA Mode,Using MapReduce,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Converting MapReduce from the Single Instance Mode to the HA Mode", "githuburl":"" }, { "uri":"mrs_01_0836.html", + "node_id":"mrs_01_0836.xml", "product_code":"mrs", - "code":"623", + "code":"733", "des":"Job and task logs are generated during execution of a MapReduce application.Job logs are generated by the MRApplicationMaster, which record details about the start and ru", "doc_type":"usermanual", "kw":"Configuring the Log Archiving and Clearing Mechanism,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Log Archiving and Clearing Mechanism", "githuburl":"" }, { "uri":"mrs_01_0837.html", + "node_id":"mrs_01_0837.xml", "product_code":"mrs", - "code":"624", + "code":"734", "des":"When the network is unstable or the cluster I/O and CPU are overloaded, client applications might encounter running failures.Adjust the following parameters in the mapred", "doc_type":"usermanual", "kw":"Reducing Client Application Failure Rate,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Reducing Client Application Failure Rate", "githuburl":"" }, { "uri":"mrs_01_0838.html", + "node_id":"mrs_01_0838.xml", "product_code":"mrs", - "code":"625", + "code":"735", "des":"To submit MapReduce tasks from Windows to Linux, set mapreduce.app-submission.cross-platform to true. If this parameter does not exist in the cluster or the value of this", "doc_type":"usermanual", "kw":"Transmitting MapReduce Tasks from Windows to Linux,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Transmitting MapReduce Tasks from Windows to Linux", "githuburl":"" }, { "uri":"mrs_01_0839.html", + "node_id":"mrs_01_0839.xml", "product_code":"mrs", - "code":"626", + "code":"736", "des":"Distributed caching is useful in the following scenarios:Rolling UpgradeDuring the upgrade, applications must keep the text content (JAR file or configuration file) uncha", "doc_type":"usermanual", "kw":"Configuring the Distributed Cache,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Distributed Cache", "githuburl":"" }, { "uri":"mrs_01_0840.html", + "node_id":"mrs_01_0840.xml", "product_code":"mrs", - "code":"627", + "code":"737", "des":"When the MapReduce shuffle service is started, it attempts to bind an IP address based on local host. If the MapReduce shuffle service is required to connect to a specifi", "doc_type":"usermanual", "kw":"Configuring the MapReduce Shuffle Address,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the MapReduce Shuffle Address", "githuburl":"" }, { "uri":"mrs_01_0841.html", + "node_id":"mrs_01_0841.xml", "product_code":"mrs", - "code":"628", + "code":"738", "des":"This function is used to specify the MapReduce cluster administrator.The system administrator list is specified by mapreduce.cluster.administrators. The cluster administr", "doc_type":"usermanual", "kw":"Configuring the Cluster Administrator List,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Cluster Administrator List", "githuburl":"" }, { "uri":"mrs_01_0842.html", + "node_id":"mrs_01_0842.xml", "product_code":"mrs", - "code":"629", + "code":"739", "des":"Log paths:JobhistoryServer: /var/log/Bigdata/mapreduce/jobhistory (run log) and /var/log/Bigdata/audit/mapreduce/jobhistory (audit log)Container: /srv/BigData/hadoop/data", "doc_type":"usermanual", "kw":"Introduction to MapReduce Logs,Using MapReduce,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Introduction to MapReduce Logs", "githuburl":"" }, { "uri":"mrs_01_0843.html", + "node_id":"mrs_01_0843.xml", "product_code":"mrs", - "code":"630", + "code":"740", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"MapReduce Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"MapReduce Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_0844.html", + "node_id":"mrs_01_0844.xml", "product_code":"mrs", - "code":"631", + "code":"741", "des":"Optimization can be performed when the number of CPU cores is large, for example, the number of CPU cores is three times the number of disks.You can set the following par", "doc_type":"usermanual", "kw":"Optimization Configuration for Multiple CPU Cores,MapReduce Performance Tuning,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimization Configuration for Multiple CPU Cores", "githuburl":"" }, { "uri":"mrs_01_0845.html", + "node_id":"mrs_01_0845.xml", "product_code":"mrs", - "code":"632", + "code":"742", "des":"The performance optimization effect is verified by comparing actual values with the baseline data. Therefore, determining optimal job baseline is critical to performance ", "doc_type":"usermanual", "kw":"Determining the Job Baseline,MapReduce Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Determining the Job Baseline", "githuburl":"" }, { "uri":"mrs_01_0846.html", + "node_id":"mrs_01_0846.xml", "product_code":"mrs", - "code":"633", + "code":"743", "des":"During the shuffle procedure of MapReduce, the Map task writes intermediate data into disks, and the Reduce task copies and adds the data to the reduce function. Hadoop p", "doc_type":"usermanual", "kw":"Streamlining Shuffle,MapReduce Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Streamlining Shuffle", "githuburl":"" }, { "uri":"mrs_01_0847.html", + "node_id":"mrs_01_0847.xml", "product_code":"mrs", - "code":"634", + "code":"744", "des":"A big job containing 100,000 Map tasks fails. It is found that the failure is triggered by the slow response of ApplicationMaster (AM).When the number of tasks increases,", "doc_type":"usermanual", "kw":"AM Optimization for Big Tasks,MapReduce Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"AM Optimization for Big Tasks", "githuburl":"" }, { "uri":"mrs_01_0848.html", + "node_id":"mrs_01_0848.xml", "product_code":"mrs", - "code":"635", + "code":"745", "des":"If a cluster has hundreds or thousands of nodes, the hardware or software fault of a node may prolong the execution time of the entire task (as most tasks are already com", "doc_type":"usermanual", "kw":"Speculative Execution,MapReduce Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Speculative Execution", "githuburl":"" }, { "uri":"mrs_01_0849.html", + "node_id":"mrs_01_0849.xml", "product_code":"mrs", - "code":"636", + "code":"746", "des":"The Slow Start feature specifies the proportion of Map tasks to be completed before Reduce tasks are started. If the Reduce tasks are started too early, resources will be", "doc_type":"usermanual", "kw":"Using Slow Start,MapReduce Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Slow Start", "githuburl":"" }, { "uri":"mrs_01_0850.html", + "node_id":"mrs_01_0850.xml", "product_code":"mrs", - "code":"637", + "code":"747", "des":"By default, if an MR job generates a large number of output files, it takes a long time for the job to commit the temporary outputs of a task to the final output director", "doc_type":"usermanual", "kw":"Optimizing Performance for Committing MR Jobs,MapReduce Performance Tuning,Component Operation Guide", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Performance for Committing MR Jobs", "githuburl":"" }, { "uri":"mrs_01_1788.html", + "node_id":"mrs_01_1788.xml", "product_code":"mrs", - "code":"638", + "code":"748", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About MapReduce", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About MapReduce", "githuburl":"" }, { "uri":"mrs_01_1789.html", + "node_id":"mrs_01_1789.xml", "product_code":"mrs", - "code":"639", + "code":"749", "des":"MapReduce job takes a very long time (more than 10minutes) when the ResourceManager switch while the job is running.This is because, ResorceManager HA is enabled but the ", "doc_type":"usermanual", "kw":"Why Does It Take a Long Time to Run a Task Upon ResourceManager Active/Standby Switchover?,Common Is", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does It Take a Long Time to Run a Task Upon ResourceManager Active/Standby Switchover?", "githuburl":"" }, { "uri":"mrs_01_1790.html", + "node_id":"mrs_01_1790.xml", "product_code":"mrs", - "code":"640", + "code":"750", "des":"MapReduce job is not progressing for long timeThis is because of less memory. When the memory is less, the time taken by the job to copy the map output increases signific", "doc_type":"usermanual", "kw":"Why Does a MapReduce Task Stay Unchanged for a Long Time?,Common Issues About MapReduce,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does a MapReduce Task Stay Unchanged for a Long Time?", "githuburl":"" }, { "uri":"mrs_01_1791.html", + "node_id":"mrs_01_1791.xml", "product_code":"mrs", - "code":"641", + "code":"751", "des":"Why is the client unavailable when the MR ApplicationMaster or ResourceManager is moved to the D state during job running?When a task is running, the MR ApplicationMaster", "doc_type":"usermanual", "kw":"Why the Client Hangs During Job Running?,Common Issues About MapReduce,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why the Client Hangs During Job Running?", "githuburl":"" }, { "uri":"mrs_01_1792.html", + "node_id":"mrs_01_1792.xml", "product_code":"mrs", - "code":"642", + "code":"752", "des":"In security mode, why delegation token HDFS_DELEGATION_TOKEN is not found in the cache?In MapReduce, by default HDFS_DELEGATION_TOKEN will be canceled after the job compl", "doc_type":"usermanual", "kw":"Why Cannot HDFS_DELEGATION_TOKEN Be Found in the Cache?,Common Issues About MapReduce,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot HDFS_DELEGATION_TOKEN Be Found in the Cache?", "githuburl":"" }, { "uri":"mrs_01_1793.html", + "node_id":"mrs_01_1793.xml", "product_code":"mrs", - "code":"643", + "code":"753", "des":"How do I set the job priority when submitting a MapReduce task?You can add the parameter -Dmapreduce.job.priority= in the command to set task priority when subm", "doc_type":"usermanual", "kw":"How Do I Set the Task Priority When Submitting a MapReduce Task?,Common Issues About MapReduce,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Set the Task Priority When Submitting a MapReduce Task?", "githuburl":"" }, { "uri":"mrs_01_1797.html", + "node_id":"mrs_01_1797.xml", "product_code":"mrs", - "code":"644", + "code":"754", "des":"After the address of MapReduce JobHistoryServer is changed, why the wrong page is displayed when I click the tracking URL on the ResourceManager WebUI?JobHistoryServer ad", "doc_type":"usermanual", "kw":"After the Address of MapReduce JobHistoryServer Is Changed, Why the Wrong Page is Displayed When I C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"After the Address of MapReduce JobHistoryServer Is Changed, Why the Wrong Page is Displayed When I Click the Tracking URL on the ResourceManager WebUI?", "githuburl":"" }, { "uri":"mrs_01_1799.html", + "node_id":"mrs_01_1799.xml", "product_code":"mrs", - "code":"645", + "code":"755", "des":"MapReduce or Yarn job fails in multiple nameService environment using viewFS.When using viewFS only the mount directories are accessible, so the most possible cause is th", "doc_type":"usermanual", "kw":"MapReduce Job Failed in Multiple NameService Environment,Common Issues About MapReduce,Component Ope", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"MapReduce Job Failed in Multiple NameService Environment", "githuburl":"" }, { "uri":"mrs_01_1800.html", + "node_id":"mrs_01_1800.xml", "product_code":"mrs", - "code":"646", + "code":"756", "des":"MapReduce task fails and the ratio of fault nodes to all nodes is smaller than the blacklist threshold configured by yarn.resourcemanager.am-scheduling.node-blacklisting-", "doc_type":"usermanual", "kw":"Why a Fault MapReduce Node Is Not Blacklisted?,Common Issues About MapReduce,Component Operation Gui", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why a Fault MapReduce Node Is Not Blacklisted?", "githuburl":"" }, { "uri":"mrs_01_1807.html", + "node_id":"mrs_01_1807.xml", "product_code":"mrs", - "code":"647", + "code":"757", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Oozie", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Oozie", "githuburl":"" }, { "uri":"mrs_01_1808.html", + "node_id":"mrs_01_1808.xml", "product_code":"mrs", - "code":"648", + "code":"758", "des":"Oozie is an open-source workflow engine that is used to schedule and coordinate Hadoop jobs.Oozie can be used to submit a wide array of jobs, such as Hive, Spark2x, Loade", "doc_type":"usermanual", "kw":"Using Oozie from Scratch,Using Oozie,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Oozie from Scratch", "githuburl":"" }, { "uri":"mrs_01_1810.html", + "node_id":"mrs_01_1810.xml", "product_code":"mrs", - "code":"649", + "code":"759", "des":"This section describes how to use the Oozie client in an O&M scenario or service scenario.The client has been installed. For example, the installation directory is /opt/h", "doc_type":"usermanual", "kw":"Using the Oozie Client,Using Oozie,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Oozie Client", "githuburl":"" }, { "uri":"mrs_01_24233.html", + "node_id":"mrs_01_24233.xml", "product_code":"", - "code":"650", + "code":"760", "des":"When multiple Oozie nodes provide services at the same time, you can use ZooKeeper to provide high availability (HA), which helps avoid single points of failure (SPOFs) a", "doc_type":"", "kw":"Enabling Oozie High Availability (HA),Using Oozie,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Enabling Oozie High Availability (HA)", "githuburl":"" }, { "uri":"mrs_01_1812.html", + "node_id":"mrs_01_1812.xml", "product_code":"mrs", - "code":"651", + "code":"761", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Oozie Client to Submit an Oozie Job", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Oozie Client to Submit an Oozie Job", "githuburl":"" }, { "uri":"mrs_01_1813.html", + "node_id":"mrs_01_1813.xml", "product_code":"mrs", - "code":"652", + "code":"762", "des":"This section describes how to use the Oozie client to submit a Hive job.Hive jobs are divided into the following types:Hive jobHive job that is connected in JDBC modeHive", "doc_type":"usermanual", - "kw":"Submitting a Hive Job,Using Oozie Client to Submit an Oozie Job,Component Operation Guide (LTS)", - "title":"Submitting a Hive Job", + "kw":"Submitting a Hive Job with Oozie Client,Using Oozie Client to Submit an Oozie Job,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Hive Job with Oozie Client", "githuburl":"" }, { "uri":"mrs_01_1814.html", + "node_id":"mrs_01_1814.xml", "product_code":"mrs", - "code":"653", + "code":"763", "des":"This section describes how to submit a Spark2x job using the Oozie client.You are advised to download the latest client.The Spark2x and Oozie components and clients have ", "doc_type":"usermanual", - "kw":"Submitting a Spark2x Job,Using Oozie Client to Submit an Oozie Job,Component Operation Guide (LTS)", - "title":"Submitting a Spark2x Job", + "kw":"Submitting a Spark2x Job with Oozie Client,Using Oozie Client to Submit an Oozie Job,Component Opera", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Spark2x Job with Oozie Client", "githuburl":"" }, { "uri":"mrs_01_1815.html", + "node_id":"mrs_01_1815.xml", "product_code":"mrs", - "code":"654", + "code":"764", "des":"This section describes how to submit a Loader job using the Oozie client.You are advised to download the latest client.The Hive and Oozie components and clients have been", "doc_type":"usermanual", - "kw":"Submitting a Loader Job,Using Oozie Client to Submit an Oozie Job,Component Operation Guide (LTS)", - "title":"Submitting a Loader Job", + "kw":"Submitting a Loader Job with Oozie Client,Using Oozie Client to Submit an Oozie Job,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Loader Job with Oozie Client", "githuburl":"" }, { "uri":"mrs_01_2392.html", + "node_id":"mrs_01_2392.xml", "product_code":"mrs", - "code":"655", + "code":"765", "des":"This section describes how to submit a DistCp job using the Oozie client.You are advised to download the latest client.The HDFS and Oozie components and clients have been", "doc_type":"usermanual", - "kw":"Submitting a DistCp Job,Using Oozie Client to Submit an Oozie Job,Component Operation Guide (LTS)", - "title":"Submitting a DistCp Job", + "kw":"Submitting a DistCp Job with Oozie Client,Using Oozie Client to Submit an Oozie Job,Component Operat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a DistCp Job with Oozie Client", "githuburl":"" }, { "uri":"mrs_01_1816.html", + "node_id":"mrs_01_1816.xml", "product_code":"mrs", - "code":"656", + "code":"766", "des":"In addition to Hive, Spark2x, and Loader jobs, MapReduce, Java, Shell, HDFS, SSH, SubWorkflow, Streaming, and scheduled jobs can be submitted using the Oozie client.You a", "doc_type":"usermanual", - "kw":"Submitting Other Jobs,Using Oozie Client to Submit an Oozie Job,Component Operation Guide (LTS)", - "title":"Submitting Other Jobs", + "kw":"Submitting Other Jobs with Oozie Client,Using Oozie Client to Submit an Oozie Job,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting Other Jobs with Oozie Client", "githuburl":"" }, { "uri":"mrs_01_1817.html", + "node_id":"mrs_01_1817.xml", "product_code":"mrs", - "code":"657", + "code":"767", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Hue to Submit an Oozie Job", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Hue to Submit an Oozie Job", "githuburl":"" }, { "uri":"mrs_01_1818.html", + "node_id":"mrs_01_1818.xml", "product_code":"mrs", - "code":"658", + "code":"768", "des":"You can submit an Oozie job on the Hue management page, but a workflow must be created before the job is submitted.Before using Hue to submit an Oozie job, configure the ", "doc_type":"usermanual", "kw":"Creating a Workflow,Using Hue to Submit an Oozie Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Workflow", "githuburl":"" }, { "uri":"mrs_01_1819.html", + "node_id":"mrs_01_1819.xml", "product_code":"mrs", - "code":"659", + "code":"769", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Submitting a Workflow Job", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Submitting a Workflow Job", "githuburl":"" }, { "uri":"mrs_01_1820.html", + "node_id":"mrs_01_1820.xml", "product_code":"mrs", - "code":"660", + "code":"770", "des":"This section describes how to submit an Oozie job of the Hive2 type on the Hue web UI.For example, if the input parameter is INPUT=/user/admin/examples/input-data/table, ", "doc_type":"usermanual", - "kw":"Submitting a Hive2 Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Hive2 Job", + "kw":"Submitting a Hive2 Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Hive2 Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1821.html", + "node_id":"mrs_01_1821.xml", "product_code":"mrs", - "code":"661", + "code":"771", "des":"This section describes how to submit an Oozie job of the Spark2x type on Hue.For example, add the following parameters:hdfs://hacluster/user/admin/examples/input-data/tex", "doc_type":"usermanual", - "kw":"Submitting a Spark2x Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Spark2x Job", + "kw":"Submitting a Spark2x Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Spark2x Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1822.html", + "node_id":"mrs_01_1822.xml", "product_code":"mrs", - "code":"662", + "code":"772", "des":"This section describes how to submit an Oozie job of the Java type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow)", "doc_type":"usermanual", - "kw":"Submitting a Java Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Java Job", + "kw":"Submitting a Java Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Java Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1823.html", + "node_id":"mrs_01_1823.xml", "product_code":"mrs", - "code":"663", + "code":"773", "des":"This section describes how to submit an Oozie job of the Loader type on the Hue web UI.Job id is the ID of the Loader job to be orchestrated and can be obtained from the ", "doc_type":"usermanual", - "kw":"Submitting a Loader Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Loader Job", + "kw":"Submitting a Loader Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Loader Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1824.html", + "node_id":"mrs_01_1824.xml", "product_code":"mrs", - "code":"664", + "code":"774", "des":"This section describes how to submit an Oozie job of the MapReduce type on the Hue web UI.For example, set the value of mapred.input.dir to /user/admin/examples/input-dat", "doc_type":"usermanual", - "kw":"Submitting a MapReduce Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a MapReduce Job", + "kw":"Submitting a MapReduce Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a MapReduce Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1825.html", + "node_id":"mrs_01_1825.xml", "product_code":"mrs", - "code":"665", + "code":"775", "des":"This section describes how to submit an Oozie job of the Sub-workflow type on the Hue web UI.If you need to modify the job name before saving the job (default value: My W", "doc_type":"usermanual", - "kw":"Submitting a Sub-workflow Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Sub-workflow Job", + "kw":"Submitting a Sub-workflow Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Sub-workflow Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1826.html", + "node_id":"mrs_01_1826.xml", "product_code":"mrs", - "code":"666", + "code":"776", "des":"This section describes how to submit an Oozie job of the Shell type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow", "doc_type":"usermanual", - "kw":"Submitting a Shell Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Shell Job", + "kw":"Submitting a Shell Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Shell Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1827.html", + "node_id":"mrs_01_1827.xml", "product_code":"mrs", - "code":"667", + "code":"777", "des":"This section describes how to submit an Oozie job of the HDFS type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow)", "doc_type":"usermanual", - "kw":"Submitting an HDFS Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting an HDFS Job", + "kw":"Submitting an HDFS Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting an HDFS Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1829.html", + "node_id":"mrs_01_1829.xml", "product_code":"mrs", - "code":"668", + "code":"778", "des":"This section describes how to submit an Oozie job of the DistCp type on the Hue web UI.If yes, go to 4.If no, go to 7.source_ip: service address of the HDFS NameNode in t", "doc_type":"usermanual", - "kw":"Submitting a DistCp Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a DistCp Job", + "kw":"Submitting a DistCp Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a DistCp Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1830.html", + "node_id":"mrs_01_1830.xml", "product_code":"mrs", - "code":"669", + "code":"779", "des":"This section guides you to enable unidirectional password-free mutual trust when Oozie nodes are used to execute shell scripts of external nodes through SSH jobs.You have", "doc_type":"usermanual", - "kw":"Example of Mutual Trust Operations,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Example of Mutual Trust Operations", + "kw":"Example of Mutual Trust Operations in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Example of Mutual Trust Operations in Hue", "githuburl":"" }, { "uri":"mrs_01_1831.html", + "node_id":"mrs_01_1831.xml", "product_code":"mrs", - "code":"670", + "code":"780", "des":"This section guides you to submit an Oozie job of the SSH type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow), cl", "doc_type":"usermanual", - "kw":"Submitting an SSH Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting an SSH Job", + "kw":"Submitting an SSH Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting an SSH Job in Hue", "githuburl":"" }, { "uri":"mrs_01_2372.html", + "node_id":"mrs_01_2372.xml", "product_code":"mrs", - "code":"671", + "code":"781", "des":"This section describes how to submit a Hive job on the Hue web UI.After the job is submitted, you can view the related contents of the job, such as the detailed informati", "doc_type":"usermanual", - "kw":"Submitting a Hive Script,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting a Hive Script", + "kw":"Submitting a Hive Script in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting a Hive Script in Hue", "githuburl":"" }, { "uri":"mrs_01_24114.html", + "node_id":"mrs_01_24114.xml", "product_code":"mrs", - "code":"672", + "code":"782", "des":"This section describes how to add an email job on the Hue web UI.To addresses: specifies the recipient email address. Separate multiple email addresses with commas (,).Su", "doc_type":"usermanual", - "kw":"Submitting an Email Job,Submitting a Workflow Job,Component Operation Guide (LTS)", - "title":"Submitting an Email Job", + "kw":"Submitting an Email Job in Hue,Submitting a Workflow Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], + "title":"Submitting an Email Job in Hue", "githuburl":"" }, { "uri":"mrs_01_1840.html", + "node_id":"mrs_01_1840.xml", "product_code":"mrs", - "code":"673", + "code":"783", "des":"This section describes how to submit a job of the periodic scheduling type on the Hue web UI.Required workflow jobs have been configured before the coordinator task is su", "doc_type":"usermanual", "kw":"Submitting a Coordinator Periodic Scheduling Job,Using Hue to Submit an Oozie Job,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Submitting a Coordinator Periodic Scheduling Job", "githuburl":"" }, { "uri":"mrs_01_1841.html", + "node_id":"mrs_01_1841.xml", "product_code":"mrs", - "code":"674", + "code":"784", "des":"In the case that multiple scheduled jobs exist at the same time, you can manage the jobs in batches over the Bundle task. This section describes how to submit a job of th", "doc_type":"usermanual", "kw":"Submitting a Bundle Batch Processing Job,Using Hue to Submit an Oozie Job,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Submitting a Bundle Batch Processing Job", "githuburl":"" }, { "uri":"mrs_01_1842.html", + "node_id":"mrs_01_1842.xml", "product_code":"mrs", - "code":"675", + "code":"785", "des":"After the jobs are submitted, you can view the execution status of a specific job on Hue.", "doc_type":"usermanual", "kw":"Querying the Operation Results,Using Hue to Submit an Oozie Job,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Querying the Operation Results", "githuburl":"" }, { "uri":"mrs_01_1843.html", + "node_id":"mrs_01_1843.xml", "product_code":"mrs", - "code":"676", + "code":"786", "des":"Log path: The default storage paths of Oozie log files are as follows:Run log: /var/log/Bigdata/oozieAudit log: /var/log/Bigdata/audit/oozieLog archiving rule: Oozie logs", "doc_type":"usermanual", "kw":"Oozie Log Overview,Using Oozie,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Oozie Log Overview", "githuburl":"" }, { "uri":"mrs_01_1844.html", + "node_id":"mrs_01_1844.xml", "product_code":"mrs", - "code":"677", + "code":"787", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Oozie", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Oozie", "githuburl":"" }, { "uri":"mrs_01_1845.html", + "node_id":"mrs_01_1845.xml", "product_code":"mrs", - "code":"678", + "code":"788", "des":"The Oozie client fails to submit a MapReduce job and a message \"Error: AUTHENTICATION: Could not authenticate, Authentication failed, status: 403, message: Forbidden\" is ", "doc_type":"usermanual", "kw":"How Do I Resolve the Problem that the Oozie Client Fails to Submit a MapReduce Job?,Common Issues Ab", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Resolve the Problem that the Oozie Client Fails to Submit a MapReduce Job?", "githuburl":"" }, { "uri":"mrs_01_1846.html", + "node_id":"mrs_01_1846.xml", "product_code":"mrs", - "code":"679", + "code":"789", "des":"Why are not Coordinator scheduled jobs executed on time on the Hue or Oozie client?Use UTC time. For example, set start=2016-12-20T09:00Z in job.properties file.", "doc_type":"usermanual", "kw":"Oozie Scheduled Tasks Are Not Executed on Time,Common Issues About Oozie,Component Operation Guide (", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Oozie Scheduled Tasks Are Not Executed on Time", "githuburl":"" }, { "uri":"mrs_01_1847.html", + "node_id":"mrs_01_1847.xml", "product_code":"mrs", - "code":"680", + "code":"790", "des":"Why cannot a class error be found during task execution after a new JAR file is uploaded to the /user/oozie/share/lib directory on HDFS?Restart Oozie to make the director", "doc_type":"usermanual", "kw":"The Update of the share lib Directory of Oozie Does Not Take Effect,Common Issues About Oozie,Compon", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"The Update of the share lib Directory of Oozie Does Not Take Effect", "githuburl":"" }, { "uri":"mrs_01_1849.html", + "node_id":"mrs_01_1849.xml", "product_code":"mrs", - "code":"681", + "code":"791", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Ranger", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Ranger", "githuburl":"" }, { "uri":"mrs_01_1850.html", + "node_id":"mrs_01_1850.xml", "product_code":"mrs", - "code":"682", + "code":"792", "des":"Ranger provides a centralized permission management framework to implement fine-grained permission control on components such as HDFS, HBase, Hive, and Yarn. In addition,", "doc_type":"usermanual", "kw":"Logging In to the Ranger Web UI,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Logging In to the Ranger Web UI", "githuburl":"" }, { "uri":"mrs_01_2393.html", + "node_id":"mrs_01_2393.xml", "product_code":"mrs", - "code":"683", + "code":"793", "des":"This section guides you how to enable Ranger authentication. Ranger authentication is enabled by default in security mode and disabled by default in normal mode.If Enable", "doc_type":"usermanual", "kw":"Enabling Ranger Authentication,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enabling Ranger Authentication", "githuburl":"" }, { "uri":"mrs_01_1851.html", + "node_id":"mrs_01_1851.xml", "product_code":"mrs", - "code":"684", + "code":"794", "des":"In the newly installed MRS cluster, Ranger is installed by default, with the Ranger authentication model enabled. The system administrator can set fine-grained security p", "doc_type":"usermanual", "kw":"Configuring Component Permission Policies,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Component Permission Policies", "githuburl":"" }, { "uri":"mrs_01_1852.html", + "node_id":"mrs_01_1852.xml", "product_code":"mrs", - "code":"685", + "code":"795", "des":"The system administrator can view audit logs of the Ranger running and the permission control after Ranger authentication is enabled on the Ranger web UI.", "doc_type":"usermanual", "kw":"Viewing Ranger Audit Information,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Ranger Audit Information", "githuburl":"" }, { "uri":"mrs_01_1853.html", + "node_id":"mrs_01_1853.xml", "product_code":"mrs", - "code":"686", + "code":"796", "des":"Security zone can be configured using Ranger. Ranger administrators can divide resources of each component into multiple security zones where administrators set security ", "doc_type":"usermanual", "kw":"Configuring a Security Zone,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a Security Zone", "githuburl":"" }, { "uri":"mrs_01_2394.html", + "node_id":"mrs_01_2394.xml", "product_code":"mrs", - "code":"687", + "code":"797", "des":"By default, the Ranger data source of the security cluster can be accessed by FusionInsight Manager LDAP users. By default, the Ranger data source of a common cluster can", "doc_type":"usermanual", "kw":"Changing the Ranger Data Source to LDAP for a Normal Cluster,Using Ranger,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Changing the Ranger Data Source to LDAP for a Normal Cluster", "githuburl":"" }, { "uri":"mrs_01_1854.html", + "node_id":"mrs_01_1854.xml", "product_code":"mrs", - "code":"688", + "code":"798", "des":"You can view Ranger permission settings, such as users, user groups, and roles.Users: displays all user information synchronized from LDAP or OS to Ranger.Groups: display", "doc_type":"usermanual", "kw":"Viewing Ranger Permission Information,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Ranger Permission Information", "githuburl":"" }, { - "uri":"mrs_01_1856.html", + "uri":"mrs_01_24245.html", + "node_id":"mrs_01_24245.xml", "product_code":"mrs", - "code":"689", + "code":"799", + "des":"Ranger administrators can use Ranger to configure creation, execution, query, and deletion permissions for CDL users.The Ranger service has been installed and is running ", + "doc_type":"cmpntguide", + "kw":"Adding a Ranger Access Permission Policy for CDL,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "documenttype":"cmpntguide", + "prodname":"mrs" + } + ], + "title":"Adding a Ranger Access Permission Policy for CDL", + "githuburl":"" + }, + { + "uri":"mrs_01_1856.html", + "node_id":"mrs_01_1856.xml", + "product_code":"mrs", + "code":"800", "des":"The Ranger administrator can use Ranger to configure the read, write, and execution permissions on HDFS directories or files for HDFS users.The Ranger service has been in", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for HDFS,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for HDFS", "githuburl":"" }, { "uri":"mrs_01_1857.html", + "node_id":"mrs_01_1857.xml", "product_code":"mrs", - "code":"690", + "code":"801", "des":"Ranger administrators can use Ranger to configure permissions on HBase tables, column families, and columns for HBase users.The Ranger service has been installed and is r", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for HBase,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for HBase", "githuburl":"" }, { "uri":"mrs_01_1858.html", + "node_id":"mrs_01_1858.xml", "product_code":"mrs", - "code":"691", + "code":"802", "des":"The Ranger administrator can use Ranger to set permissions for Hive users. The default administrator account of Hive is hive and the initial password is Hive@123.The Rang", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for Hive,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for Hive", "githuburl":"" }, { "uri":"mrs_01_1859.html", + "node_id":"mrs_01_1859.xml", "product_code":"mrs", - "code":"692", + "code":"803", "des":"The Ranger administrator can use Ranger to configure Yarn administrator permissions for Yarn users, allowing them to manage Yarn queue resources.The Ranger service has be", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for Yarn,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for Yarn", "githuburl":"" }, { "uri":"mrs_01_1860.html", + "node_id":"mrs_01_1860.xml", "product_code":"mrs", - "code":"693", + "code":"804", "des":"The Ranger administrator can use Ranger to set permissions for Spark2x users.After Ranger authentication is enabled or disabled on Spark2x, you need to restart Spark2x.Do", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for Spark2x,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for Spark2x", "githuburl":"" }, { "uri":"mrs_01_1861.html", + "node_id":"mrs_01_1861.xml", "product_code":"mrs", - "code":"694", + "code":"805", "des":"The Ranger administrator can use Ranger to configure the read, write, and management permissions of the Kafka topic and the management permission of the cluster for the K", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for Kafka,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for Kafka", "githuburl":"" }, { "uri":"mrs_01_1862.html", + "node_id":"mrs_01_1862.xml", "product_code":"mrs", - "code":"695", + "code":"806", "des":"Ranger administrators can use Ranger to configure the permission to manage databases, tables, and columns of data sources for HetuEngine users.The Ranger service has been", "doc_type":"usermanual", "kw":"Adding a Ranger Access Permission Policy for HetuEngine,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adding a Ranger Access Permission Policy for HetuEngine", "githuburl":"" }, + { + "uri":"mrs_01_24767.html", + "node_id":"mrs_01_24767.xml", + "product_code":"", + "code":"807", + "des":"Ranger provides permission policies for services. When the number of service instances using Ranger increases, you need to adjust the specifications of Ranger.This sectio", + "doc_type":"", + "kw":"Configuring Ranger Specifications,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring Ranger Specifications", + "githuburl":"" + }, { "uri":"mrs_01_1865.html", + "node_id":"mrs_01_1865.xml", "product_code":"mrs", - "code":"696", + "code":"808", "des":"Log path: The default storage path of Ranger logs is /var/log/Bigdata/ranger/Role name.RangerAdmin: /var/log/Bigdata/ranger/rangeradmin (run logs)TagSync: /var/log/Bigdat", "doc_type":"usermanual", "kw":"Ranger Log Overview,Using Ranger,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Ranger Log Overview", "githuburl":"" }, { "uri":"mrs_01_1866.html", + "node_id":"mrs_01_1866.xml", "product_code":"mrs", - "code":"697", + "code":"809", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Ranger", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Ranger", "githuburl":"" }, { "uri":"mrs_01_1867.html", + "node_id":"mrs_01_1867.xml", "product_code":"mrs", - "code":"698", + "code":"810", "des":"During cluster installation, Ranger fails to be started, and the error message \"ERROR: cannot drop sequence X_POLICY_REF_ACCESS_TYPE_SEQ \" is displayed in the task list o", "doc_type":"usermanual", "kw":"Why Ranger Startup Fails During the Cluster Installation?,Common Issues About Ranger,Component Opera", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Ranger Startup Fails During the Cluster Installation?", "githuburl":"" }, { "uri":"mrs_01_1868.html", + "node_id":"mrs_01_1868.xml", "product_code":"mrs", - "code":"699", + "code":"811", "des":"How do I determine whether the Ranger authentication is enabled for a service that supports the authentication?Log in to FusionInsight Manager and choose Cluster > Servic", "doc_type":"usermanual", "kw":"How Do I Determine Whether the Ranger Authentication Is Used for a Service?,Common Issues About Rang", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Determine Whether the Ranger Authentication Is Used for a Service?", "githuburl":"" }, { "uri":"mrs_01_2300.html", + "node_id":"mrs_01_2300.xml", "product_code":"mrs", - "code":"700", + "code":"812", "des":"When a new user logs in to Ranger, why is the 401 error reported after the password is changed?The UserSync synchronizes user data at an interval of 5 minutes by default.", "doc_type":"usermanual", "kw":"Why Cannot a New User Log In to Ranger After Changing the Password?,Common Issues About Ranger,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot a New User Log In to Ranger After Changing the Password?", "githuburl":"" }, { "uri":"mrs_01_2355.html", + "node_id":"mrs_01_2355.xml", "product_code":"mrs", - "code":"701", + "code":"813", "des":"When a Ranger access permission policy is added for HBase and wildcard characters are used to search for an existing HBase table in the policy, the table cannot be found.", "doc_type":"usermanual", "kw":"When an HBase Policy Is Added or Modified on Ranger, Wildcard Characters Cannot Be Used to Search fo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"When an HBase Policy Is Added or Modified on Ranger, Wildcard Characters Cannot Be Used to Search for Existing HBase Tables", "githuburl":"" }, { "uri":"mrs_01_1926.html", + "node_id":"mrs_01_1926.xml", "product_code":"mrs", - "code":"702", + "code":"814", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Spark2x", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Spark2x", "githuburl":"" }, { "uri":"mrs_01_1928.html", + "node_id":"mrs_01_1928.xml", "product_code":"mrs", - "code":"703", + "code":"815", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Basic Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Basic Operation", "githuburl":"" }, { "uri":"mrs_01_1929.html", + "node_id":"mrs_01_1929.xml", "product_code":"mrs", - "code":"704", + "code":"816", "des":"This section describes how to use Spark2x to submit Spark applications, including Spark Core and Spark SQL. Spark Core is the kernel module of Spark. It executes tasks an", "doc_type":"usermanual", "kw":"Getting Started,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Getting Started", "githuburl":"" }, { "uri":"mrs_01_1930.html", + "node_id":"mrs_01_1930.xml", "product_code":"mrs", - "code":"705", + "code":"817", "des":"This section describes how to quickly configure common parameters and lists parameters that are not recommended to be modified when Spark2x is used.Some parameters have b", "doc_type":"usermanual", "kw":"Configuring Parameters Rapidly,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Parameters Rapidly", "githuburl":"" }, { "uri":"mrs_01_1931.html", + "node_id":"mrs_01_1931.xml", "product_code":"mrs", - "code":"706", + "code":"818", "des":"This section describes common configuration items used in Spark. This section is divided into sub-sections based on features to help you quickly find required configurati", "doc_type":"usermanual", "kw":"Common Parameters,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Parameters", "githuburl":"" }, { "uri":"mrs_01_1933.html", + "node_id":"mrs_01_1933.xml", "product_code":"mrs", - "code":"707", + "code":"819", "des":"Spark on HBase allows users to query HBase tables in Spark SQL and to store data for HBase tables by using the Beeline tool. You can use HBase APIs to create, read data f", "doc_type":"usermanual", "kw":"Spark on HBase Overview and Basic Applications,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark on HBase Overview and Basic Applications", "githuburl":"" }, { "uri":"mrs_01_1934.html", + "node_id":"mrs_01_1934.xml", "product_code":"mrs", - "code":"708", + "code":"820", "des":"Spark on HBase V2 allows users to query HBase tables in Spark SQL and to store data for HBase tables by using the Beeline tool. You can use HBase APIs to create, read dat", "doc_type":"usermanual", "kw":"Spark on HBase V2 Overview and Basic Applications,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark on HBase V2 Overview and Basic Applications", "githuburl":"" }, { "uri":"mrs_01_1935.html", + "node_id":"mrs_01_1935.xml", "product_code":"mrs", - "code":"709", + "code":"821", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"SparkSQL Permission Management(Security Mode)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SparkSQL Permission Management(Security Mode)", "githuburl":"" }, { "uri":"mrs_01_1936.html", + "node_id":"mrs_01_1936.xml", "product_code":"mrs", - "code":"710", + "code":"822", "des":"Similar to Hive, Spark SQL is a data warehouse framework built on Hadoop, providing storage of structured data like structured query language (SQL).MRS supports users, us", "doc_type":"usermanual", "kw":"Spark SQL Permissions,SparkSQL Permission Management(Security Mode),Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark SQL Permissions", "githuburl":"" }, { "uri":"mrs_01_1937.html", + "node_id":"mrs_01_1937.xml", "product_code":"mrs", - "code":"711", + "code":"823", "des":"This section describes how to create and configure a SparkSQL role on Manager as the system administrator. The Spark SQL role can be configured with the spark dministrato", "doc_type":"usermanual", "kw":"Creating a Spark SQL Role,SparkSQL Permission Management(Security Mode),Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating a Spark SQL Role", "githuburl":"" }, { "uri":"mrs_01_1938.html", + "node_id":"mrs_01_1938.xml", "product_code":"mrs", - "code":"712", + "code":"824", "des":"You can configure related permissions if you need to access tables or databases created by other users. SparkSQL supports column-based permission control. If a user needs", "doc_type":"usermanual", "kw":"Configuring Permissions for SparkSQL Tables, Columns, and Databases,SparkSQL Permission Management(S", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Permissions for SparkSQL Tables, Columns, and Databases", "githuburl":"" }, { "uri":"mrs_01_1939.html", + "node_id":"mrs_01_1939.xml", "product_code":"mrs", - "code":"713", + "code":"825", "des":"SparkSQL may need to be associated with other components. For example, Spark on HBase requires HBase permissions. The following describes how to associate SparkSQL with H", "doc_type":"usermanual", "kw":"Configuring Permissions for SparkSQL to Use Other Components,SparkSQL Permission Management(Security", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Permissions for SparkSQL to Use Other Components", "githuburl":"" }, { "uri":"mrs_01_1940.html", + "node_id":"mrs_01_1940.xml", "product_code":"mrs", - "code":"714", + "code":"826", "des":"This section describes how to configure SparkSQL permission management functions (client configuration is similar to server configuration). To enable table permission, ad", "doc_type":"usermanual", "kw":"Configuring the Client and Server,SparkSQL Permission Management(Security Mode),Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Client and Server", "githuburl":"" }, { "uri":"mrs_01_1941.html", + "node_id":"mrs_01_1941.xml", "product_code":"mrs", - "code":"715", + "code":"827", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Scenario-Specific Configuration", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Scenario-Specific Configuration", "githuburl":"" }, { "uri":"mrs_01_1942.html", + "node_id":"mrs_01_1942.xml", "product_code":"mrs", - "code":"716", + "code":"828", "des":"In this mode, multiple ThriftServers coexist in the cluster and the client can randomly connect any ThriftServer to perform service operations. When one or multiple Thrif", "doc_type":"usermanual", "kw":"Configuring Multi-active Instance Mode,Scenario-Specific Configuration,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Multi-active Instance Mode", "githuburl":"" }, { "uri":"mrs_01_1943.html", + "node_id":"mrs_01_1943.xml", "product_code":"mrs", - "code":"717", + "code":"829", "des":"In multi-tenant mode, JDBCServers are bound with tenants. Each tenant corresponds to one or more JDBCServers, and a JDBCServer provides services for only one tenant. Diff", "doc_type":"usermanual", "kw":"Configuring the Multi-tenant Mode,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Multi-tenant Mode", "githuburl":"" }, { "uri":"mrs_01_1944.html", + "node_id":"mrs_01_1944.xml", "product_code":"mrs", - "code":"718", + "code":"830", "des":"When using a cluster, if you want to switch between multi-active instance mode and multi-tenant mode, the following configurations are required.Switch from multi-tenant m", "doc_type":"usermanual", "kw":"Configuring the Switchover Between the Multi-active Instance Mode and the Multi-tenant Mode,Scenario", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Switchover Between the Multi-active Instance Mode and the Multi-tenant Mode", "githuburl":"" }, { "uri":"mrs_01_1945.html", + "node_id":"mrs_01_1945.xml", "product_code":"mrs", - "code":"719", + "code":"831", "des":"Functions such as UI, EventLog, and dynamic resource scheduling in Spark are implemented through event transfer. Events include SparkListenerJobStart and SparkListenerJob", "doc_type":"usermanual", "kw":"Configuring the Size of the Event Queue,Scenario-Specific Configuration,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Size of the Event Queue", "githuburl":"" }, { "uri":"mrs_01_1947.html", + "node_id":"mrs_01_1947.xml", "product_code":"mrs", - "code":"720", + "code":"832", "des":"When the executor off-heap memory is too small, or processes with higher priority preempt resources, the physical memory usage will exceed the maximal value. To prevent t", "doc_type":"usermanual", "kw":"Configuring Executor Off-Heap Memory,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Executor Off-Heap Memory", "githuburl":"" }, { "uri":"mrs_01_1948.html", + "node_id":"mrs_01_1948.xml", "product_code":"mrs", - "code":"721", + "code":"833", "des":"A large amount of memory is required when Spark SQL executes a query, especially during Aggregate and Join operations. If the memory is limited, OutOfMemoryError may occu", "doc_type":"usermanual", "kw":"Enhancing Stability in a Limited Memory Condition,Scenario-Specific Configuration,Component Operatio", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Enhancing Stability in a Limited Memory Condition", "githuburl":"" }, { "uri":"mrs_01_1949.html", + "node_id":"mrs_01_1949.xml", "product_code":"mrs", - "code":"722", + "code":"834", "des":"When yarn.log-aggregation-enable of Yarn is set to true, the container log aggregation function is enabled. Log aggregation indicates that after applications are run on Y", "doc_type":"usermanual", "kw":"Viewing Aggregated Container Logs on the Web UI,Scenario-Specific Configuration,Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Viewing Aggregated Container Logs on the Web UI", "githuburl":"" }, { "uri":"mrs_01_1950.html", + "node_id":"mrs_01_1950.xml", "product_code":"mrs", - "code":"723", + "code":"835", "des":"SQL statements executed by users may contain sensitive information (such as passwords). Disclosure of such information may incur security risks. You can configure the spa", "doc_type":"usermanual", "kw":"Configuring Whether to Display Spark SQL Statements Containing Sensitive Words,Scenario-Specific Con", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Whether to Display Spark SQL Statements Containing Sensitive Words", "githuburl":"" }, { "uri":"mrs_01_1951.html", + "node_id":"mrs_01_1951.xml", "product_code":"mrs", - "code":"724", + "code":"836", "des":"Values of some configuration parameters of Spark client vary depending on its work mode (YARN-Client or YARN-Cluster). If you switch Spark client between different modes ", "doc_type":"usermanual", "kw":"Configuring Environment Variables in Yarn-Client and Yarn-Cluster Modes,Scenario-Specific Configurat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Environment Variables in Yarn-Client and Yarn-Cluster Modes", "githuburl":"" }, { "uri":"mrs_01_1952.html", + "node_id":"mrs_01_1952.xml", "product_code":"mrs", - "code":"725", + "code":"837", "des":"By default, SparkSQL divides data into 200 data blocks during shuffle. In data-intensive scenarios, each data block may have excessive size. If a single data block of a t", "doc_type":"usermanual", "kw":"Configuring the Default Number of Data Blocks Divided by SparkSQL,Scenario-Specific Configuration,Co", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Default Number of Data Blocks Divided by SparkSQL", "githuburl":"" }, { "uri":"mrs_01_1953.html", + "node_id":"mrs_01_1953.xml", "product_code":"mrs", - "code":"726", + "code":"838", "des":"The compression format of a Parquet table can be configured as follows:If the Parquet table is a partitioned one, set the parquet.compression parameter of the Parquet tab", "doc_type":"usermanual", "kw":"Configuring the Compression Format of a Parquet Table,Scenario-Specific Configuration,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Compression Format of a Parquet Table", "githuburl":"" }, { "uri":"mrs_01_1954.html", + "node_id":"mrs_01_1954.xml", "product_code":"mrs", - "code":"727", + "code":"839", "des":"In Spark WebUI, the Executor page can display information about Lost Executor. Executors are dynamically recycled. If the JDBCServer tasks are large, there may be too man", "doc_type":"usermanual", "kw":"Configuring the Number of Lost Executors Displayed in WebUI,Scenario-Specific Configuration,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Number of Lost Executors Displayed in WebUI", "githuburl":"" }, { "uri":"mrs_01_1957.html", + "node_id":"mrs_01_1957.xml", "product_code":"mrs", - "code":"728", + "code":"840", "des":"In some scenarios, to locate problems or check information by changing the log level,you can add the -Dlog4j.configuration.watch=true parameter to the JVM parameter of a ", "doc_type":"usermanual", "kw":"Setting the Log Level Dynamically,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Setting the Log Level Dynamically", "githuburl":"" }, { "uri":"mrs_01_1958.html", + "node_id":"mrs_01_1958.xml", "product_code":"mrs", - "code":"729", + "code":"841", "des":"When Spark is used to submit tasks, the driver obtains tokens from HBase by default. To access HBase, you need to configure the jaas.conf file for security authentication", "doc_type":"usermanual", "kw":"Configuring Whether Spark Obtains HBase Tokens,Scenario-Specific Configuration,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Whether Spark Obtains HBase Tokens", "githuburl":"" }, { "uri":"mrs_01_1959.html", + "node_id":"mrs_01_1959.xml", "product_code":"mrs", - "code":"730", + "code":"842", "des":"If the Spark Streaming application is connected to Kafka, after the Spark Streaming application is terminated abnormally and restarted from the checkpoint, the system pre", "doc_type":"usermanual", "kw":"Configuring LIFO for Kafka,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring LIFO for Kafka", "githuburl":"" }, { "uri":"mrs_01_1960.html", + "node_id":"mrs_01_1960.xml", "product_code":"mrs", - "code":"731", + "code":"843", "des":"When the Spark Streaming application is connected to Kafka and the application is restarted, the application reads data from Kafka based on the last read topic offset and", "doc_type":"usermanual", "kw":"Configuring Reliability for Connected Kafka,Scenario-Specific Configuration,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Reliability for Connected Kafka", "githuburl":"" }, { "uri":"mrs_01_1961.html", + "node_id":"mrs_01_1961.xml", "product_code":"mrs", - "code":"732", + "code":"844", "des":"When a query statement is executed, the returned result may be large (containing more than 100,000 records). In this case, JDBCServer out of memory (OOM) may occur. There", "doc_type":"usermanual", "kw":"Configuring Streaming Reading of Driver Execution Results,Scenario-Specific Configuration,Component ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Streaming Reading of Driver Execution Results", "githuburl":"" }, { "uri":"mrs_01_1962.html", + "node_id":"mrs_01_1962.xml", "product_code":"mrs", - "code":"733", + "code":"845", "des":"When you perform the select query in Hive partitioned tables, the FileNotFoundException exception is displayed if a specified partition path does not exist in HDFS. To av", "doc_type":"usermanual", "kw":"Filtering Partitions without Paths in Partitioned Tables,Scenario-Specific Configuration,Component O", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Filtering Partitions without Paths in Partitioned Tables", "githuburl":"" }, { "uri":"mrs_01_1963.html", + "node_id":"mrs_01_1963.xml", "product_code":"mrs", - "code":"734", + "code":"846", "des":"Users need to implement security protection for Spark2x web UI when some data on the UI cannot be viewed by other users. Once a user attempts to log in to the UI, Spark2x", "doc_type":"usermanual", "kw":"Configuring Spark2x Web UI ACLs,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Spark2x Web UI ACLs", "githuburl":"" }, { "uri":"mrs_01_1964.html", + "node_id":"mrs_01_1964.xml", "product_code":"mrs", - "code":"735", + "code":"847", "des":"ORC is a column-based storage format in the Hadoop ecosystem. It originates from Apache Hive and is used to reduce the Hadoop data storage space and accelerate the Hive q", "doc_type":"usermanual", "kw":"Configuring Vector-based ORC Data Reading,Scenario-Specific Configuration,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Vector-based ORC Data Reading", "githuburl":"" }, { "uri":"mrs_01_1965.html", + "node_id":"mrs_01_1965.xml", "product_code":"mrs", - "code":"736", + "code":"848", "des":"In earlier versions, the predicate for pruning Hive table partitions is pushed down. Only comparison expressions between column names and integers or character strings ca", "doc_type":"usermanual", "kw":"Broaden Support for Hive Partition Pruning Predicate Pushdown,Scenario-Specific Configuration,Compon", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Broaden Support for Hive Partition Pruning Predicate Pushdown", "githuburl":"" }, { "uri":"mrs_01_1966.html", + "node_id":"mrs_01_1966.xml", "product_code":"mrs", - "code":"737", + "code":"849", "des":"In earlier versions, when the insert overwrite syntax is used to overwrite partition tables, only partitions with specified expressions are matched, and partitions withou", "doc_type":"usermanual", "kw":"Hive Dynamic Partition Overwriting Syntax,Scenario-Specific Configuration,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Hive Dynamic Partition Overwriting Syntax", "githuburl":"" }, { "uri":"mrs_01_1967.html", + "node_id":"mrs_01_1967.xml", "product_code":"mrs", - "code":"738", + "code":"850", "des":"The execution plan for SQL statements is optimized in Spark. Common optimization rules are heuristic optimization rules. Heuristic optimization rules are provided based o", "doc_type":"usermanual", "kw":"Configuring the Column Statistics Histogram to Enhance the CBO Accuracy,Scenario-Specific Configurat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Column Statistics Histogram to Enhance the CBO Accuracy", "githuburl":"" }, { "uri":"mrs_01_1969.html", + "node_id":"mrs_01_1969.xml", "product_code":"mrs", - "code":"739", + "code":"851", "des":"JobHistory can use local disks to cache the historical data of Spark applications to prevent the JobHistory memory from loading a large amount of application data, reduci", "doc_type":"usermanual", "kw":"Configuring Local Disk Cache for JobHistory,Scenario-Specific Configuration,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Local Disk Cache for JobHistory", "githuburl":"" }, { "uri":"mrs_01_1970.html", + "node_id":"mrs_01_1970.xml", "product_code":"mrs", - "code":"740", + "code":"852", "des":"The Spark SQL adaptive execution feature enables Spark SQL to optimize subsequent execution processes based on intermediate results to improve overall execution efficienc", "doc_type":"usermanual", "kw":"Configuring Spark SQL to Enable the Adaptive Execution Feature,Scenario-Specific Configuration,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Spark SQL to Enable the Adaptive Execution Feature", "githuburl":"" }, { "uri":"mrs_01_24170.html", + "node_id":"mrs_01_24170.xml", "product_code":"mrs", - "code":"741", + "code":"853", "des":"When the event log mode is enabled for Spark, that is, spark.eventLog.enabled is set to true, events are written to a configured log file to record the program running pr", "doc_type":"usermanual", "kw":"Configuring Event Log Rollover,Scenario-Specific Configuration,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Event Log Rollover", "githuburl":"" }, + { + "uri":"mrs_01_24745.html", + "node_id":"mrs_01_24745.xml", + "product_code":"", + "code":"854", + "des":"This section applies only to MRS 3.2.0 or later.Currently, the Drop Partition command in Spark supports partition deletion using only the equal sign (=). This configurati", + "doc_type":"", + "kw":"Configuring the Drop Partition Command to Support Batch Deletion,Scenario-Specific Configuration,Com", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring the Drop Partition Command to Support Batch Deletion", + "githuburl":"" + }, + { + "uri":"mrs_01_24805.html", + "node_id":"mrs_01_24805.xml", + "product_code":"", + "code":"855", + "des":"This section applies only to MRS 3.2.0 or later.You can configure the following parameters to execute custom code when Executor exits.Configure the following parameters i", + "doc_type":"", + "kw":"Enabling an Executor to Execute Custom Code When Exiting,Scenario-Specific Configuration,Component O", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Enabling an Executor to Execute Custom Code When Exiting", + "githuburl":"" + }, { "uri":"mrs_01_2317.html", + "node_id":"mrs_01_2317.xml", "product_code":"mrs", - "code":"742", + "code":"856", "des":"When Ranger is used as the permission management service of Spark SQL, the certificate in the cluster is required for accessing RangerAdmin. If you use a third-party JDK ", "doc_type":"usermanual", "kw":"Adapting to the Third-party JDK When Ranger Is Used,Basic Operation,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Adapting to the Third-party JDK When Ranger Is Used", "githuburl":"" }, { "uri":"mrs_01_1971.html", + "node_id":"mrs_01_1971.xml", "product_code":"mrs", - "code":"743", + "code":"857", "des":"Log paths:Executor run log: ${BIGDATA_DATA_HOME}/hadoop/data${i}/nm/containerlogs/application_${appid}/container_{$contid}The logs of running tasks are stored in the prec", "doc_type":"usermanual", "kw":"Spark2x Logs,Using Spark2x,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark2x Logs", "githuburl":"" }, { "uri":"mrs_01_1972.html", + "node_id":"mrs_01_1972.xml", "product_code":"mrs", - "code":"744", + "code":"858", "des":"Container logs of running Spark applications are distributed on multiple nodes. This section describes how to quickly obtain container logs.You can run the yarn logs comm", "doc_type":"usermanual", "kw":"Obtaining Container Logs of a Running Spark Application,Using Spark2x,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Obtaining Container Logs of a Running Spark Application", "githuburl":"" }, { "uri":"mrs_01_1973.html", + "node_id":"mrs_01_1973.xml", "product_code":"mrs", - "code":"745", + "code":"859", "des":"In a large-scale Hadoop production cluster, HDFS metadata is stored in the NameNode memory, and the cluster scale is restricted by the memory limitation of each NameNode.", "doc_type":"usermanual", "kw":"Small File Combination Tools,Using Spark2x,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Small File Combination Tools", "githuburl":"" }, { "uri":"mrs_01_2362.html", + "node_id":"mrs_01_2362.xml", "product_code":"mrs", - "code":"746", + "code":"860", "des":"The first query of CarbonData is slow, which may cause a delay for nodes that have high requirements on real-time performance.The tool provides the following functions:Pr", "doc_type":"usermanual", "kw":"Using CarbonData for First Query,Using Spark2x,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using CarbonData for First Query", "githuburl":"" }, { "uri":"mrs_01_1974.html", + "node_id":"mrs_01_1974.xml", "product_code":"mrs", - "code":"747", + "code":"861", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Spark2x Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark2x Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_1975.html", + "node_id":"mrs_01_1975.xml", "product_code":"mrs", - "code":"748", + "code":"862", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Spark Core Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark Core Tuning", "githuburl":"" }, { "uri":"mrs_01_1976.html", + "node_id":"mrs_01_1976.xml", "product_code":"mrs", - "code":"749", + "code":"863", "des":"Spark supports the following types of serialization:JavaSerializerKryoSerializerData serialization affects the Spark application performance. In specific data format, Kry", "doc_type":"usermanual", "kw":"Data Serialization,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Data Serialization", "githuburl":"" }, { "uri":"mrs_01_1977.html", + "node_id":"mrs_01_1977.xml", "product_code":"mrs", - "code":"750", + "code":"864", "des":"Spark is a memory-based computing frame. If the memory is insufficient during computing, the Spark execution efficiency will be adversely affected. You can determine whet", "doc_type":"usermanual", "kw":"Optimizing Memory Configuration,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Memory Configuration", "githuburl":"" }, { "uri":"mrs_01_1978.html", + "node_id":"mrs_01_1978.xml", "product_code":"mrs", - "code":"751", + "code":"865", "des":"The degree of parallelism (DOP) specifies the number of tasks to be executed concurrently. It determines the number of data blocks after the shuffle operation. Configure ", "doc_type":"usermanual", "kw":"Setting the DOP,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Setting the DOP", "githuburl":"" }, { "uri":"mrs_01_1979.html", + "node_id":"mrs_01_1979.xml", "product_code":"mrs", - "code":"752", + "code":"866", "des":"Broadcast distributes data sets to each node. It allows data to be obtained locally when a data set is needed during a Spark task. If broadcast is not used, data serializ", "doc_type":"usermanual", "kw":"Using Broadcast Variables,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Broadcast Variables", "githuburl":"" }, { "uri":"mrs_01_1980.html", + "node_id":"mrs_01_1980.xml", "product_code":"mrs", - "code":"753", + "code":"867", "des":"When the Spark system runs applications that contain a shuffle process, an executor process also writes shuffle data and provides shuffle data for other executors in addi", "doc_type":"usermanual", "kw":"Using the external shuffle service to improve performance,Spark Core Tuning,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the external shuffle service to improve performance", "githuburl":"" }, { "uri":"mrs_01_1981.html", + "node_id":"mrs_01_1981.xml", "product_code":"mrs", - "code":"754", + "code":"868", "des":"Resources are a key factor that affects Spark execution efficiency. When a long-running service (such as the JDBCServer) is allocated with multiple executors without task", "doc_type":"usermanual", "kw":"Configuring Dynamic Resource Scheduling in Yarn Mode,Spark Core Tuning,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Dynamic Resource Scheduling in Yarn Mode", "githuburl":"" }, { "uri":"mrs_01_1982.html", + "node_id":"mrs_01_1982.xml", "product_code":"mrs", - "code":"755", + "code":"869", "des":"There are three processes in Spark on Yarn mode: driver, ApplicationMaster, and executor. The Driver and Executor handle the scheduling and running of the task. The Appli", "doc_type":"usermanual", "kw":"Configuring Process Parameters,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Process Parameters", "githuburl":"" }, { "uri":"mrs_01_1983.html", + "node_id":"mrs_01_1983.xml", "product_code":"mrs", - "code":"756", + "code":"870", "des":"Optimal program structure helps increase execution efficiency. During application programming, avoid shuffle operations and combine narrow-dependency operations.This topi", "doc_type":"usermanual", "kw":"Designing the Direction Acyclic Graph (DAG),Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Designing the Direction Acyclic Graph (DAG)", "githuburl":"" }, { "uri":"mrs_01_1984.html", + "node_id":"mrs_01_1984.xml", "product_code":"mrs", - "code":"757", + "code":"871", "des":"If the overhead of each record is high, for example:Use mapPartitions to calculate data by partition.Use mapPartitions to flexibly operate data. For example, to calculate", "doc_type":"usermanual", "kw":"Experience,Spark Core Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Experience", "githuburl":"" }, { "uri":"mrs_01_1985.html", + "node_id":"mrs_01_1985.xml", "product_code":"mrs", - "code":"758", + "code":"872", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Spark SQL and DataFrame Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark SQL and DataFrame Tuning", "githuburl":"" }, { "uri":"mrs_01_1986.html", + "node_id":"mrs_01_1986.xml", "product_code":"mrs", - "code":"759", + "code":"873", "des":"When two tables are joined in Spark SQL, the broadcast function (see section \"Using Broadcast Variables\") can be used to broadcast tables to each node. This minimizes shu", "doc_type":"usermanual", "kw":"Optimizing the Spark SQL Join Operation,Spark SQL and DataFrame Tuning,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing the Spark SQL Join Operation", "githuburl":"" }, { "uri":"mrs_01_1987.html", + "node_id":"mrs_01_1987.xml", "product_code":"mrs", - "code":"760", + "code":"874", "des":"When multiple tables are joined in Spark SQL, skew occurs in join keys and the data volume in some Hash buckets is much higher than that in other buckets. As a result, so", "doc_type":"usermanual", "kw":"Improving Spark SQL Calculation Performance Under Data Skew,Spark SQL and DataFrame Tuning,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Improving Spark SQL Calculation Performance Under Data Skew", "githuburl":"" }, { "uri":"mrs_01_1988.html", + "node_id":"mrs_01_1988.xml", "product_code":"mrs", - "code":"761", + "code":"875", "des":"A Spark SQL table may have many small files (far smaller than an HDFS block), each of which maps to a partition on the Spark by default. In other words, each small file i", "doc_type":"usermanual", "kw":"Optimizing Spark SQL Performance in the Small File Scenario,Spark SQL and DataFrame Tuning,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Spark SQL Performance in the Small File Scenario", "githuburl":"" }, { "uri":"mrs_01_1989.html", + "node_id":"mrs_01_1989.xml", "product_code":"mrs", - "code":"762", + "code":"876", "des":"The INSERT...SELECT operation needs to be optimized if any of the following conditions is true:Many small files need to be queried.A few large files need to be queried.Th", "doc_type":"usermanual", "kw":"Optimizing the INSERT...SELECT Operation,Spark SQL and DataFrame Tuning,Component Operation Guide (L", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing the INSERT...SELECT Operation", "githuburl":"" }, { "uri":"mrs_01_1990.html", + "node_id":"mrs_01_1990.xml", "product_code":"mrs", - "code":"763", + "code":"877", "des":"Multiple clients can be connected to JDBCServer at the same time. However, if the number of concurrent tasks is too large, the default configuration of JDBCServer must be", "doc_type":"usermanual", "kw":"Multiple JDBC Clients Concurrently Connecting to JDBCServer,Spark SQL and DataFrame Tuning,Component", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Multiple JDBC Clients Concurrently Connecting to JDBCServer", "githuburl":"" }, { "uri":"mrs_01_1992.html", + "node_id":"mrs_01_1992.xml", "product_code":"mrs", - "code":"764", + "code":"878", "des":"When SparkSQL inserts data to dynamic partitioned tables, the more partitions there are, the more HDFS files a single task generates and the more memory metadata occupies", "doc_type":"usermanual", "kw":"Optimizing Memory when Data Is Inserted into Dynamic Partitioned Tables,Spark SQL and DataFrame Tuni", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Memory when Data Is Inserted into Dynamic Partitioned Tables", "githuburl":"" }, { "uri":"mrs_01_1995.html", + "node_id":"mrs_01_1995.xml", "product_code":"mrs", - "code":"765", + "code":"879", "des":"A Spark SQL table may have many small files (far smaller than an HDFS block), each of which maps to a partition on the Spark by default. In other words, each small file i", "doc_type":"usermanual", "kw":"Optimizing Small Files,Spark SQL and DataFrame Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Small Files", "githuburl":"" }, { "uri":"mrs_01_1996.html", + "node_id":"mrs_01_1996.xml", "product_code":"mrs", - "code":"766", + "code":"880", "des":"Spark SQL supports hash aggregate algorithm. Namely, use fast aggregate hashmap as cache to improve aggregate performance. The hashmap replaces the previous ColumnarBatch", "doc_type":"usermanual", "kw":"Optimizing the Aggregate Algorithms,Spark SQL and DataFrame Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing the Aggregate Algorithms", "githuburl":"" }, { "uri":"mrs_01_1997.html", + "node_id":"mrs_01_1997.xml", "product_code":"mrs", - "code":"767", + "code":"881", "des":"Save the partition information about the datasource table to the Metastore and process partition information in the Metastore.Optimize the datasource tables, support synt", "doc_type":"usermanual", "kw":"Optimizing Datasource Tables,Spark SQL and DataFrame Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Datasource Tables", "githuburl":"" }, { "uri":"mrs_01_1998.html", + "node_id":"mrs_01_1998.xml", "product_code":"mrs", - "code":"768", + "code":"882", "des":"Spark SQL supports rule-based optimization by default. However, the rule-based optimization cannot ensure that Spark selects the optimal query plan. Cost-Based Optimizer ", "doc_type":"usermanual", "kw":"Merging CBO,Spark SQL and DataFrame Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Merging CBO", "githuburl":"" }, { "uri":"mrs_01_1999.html", + "node_id":"mrs_01_1999.xml", "product_code":"mrs", - "code":"769", + "code":"883", "des":"This section describes how to enable or disable the query optimization for inter-source complex SQL.(Optional) Prepare for connecting to the MPPDB data source.If the data", "doc_type":"usermanual", "kw":"Optimizing SQL Query of Data of Multiple Sources,Spark SQL and DataFrame Tuning,Component Operation ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing SQL Query of Data of Multiple Sources", "githuburl":"" }, { "uri":"mrs_01_2000.html", + "node_id":"mrs_01_2000.xml", "product_code":"mrs", - "code":"770", + "code":"884", "des":"This section describes the optimization suggestions for SQL statements in multi-level nesting and hybrid join scenarios.The following provides an example of complex query", "doc_type":"usermanual", "kw":"SQL Optimization for Multi-level Nesting and Hybrid Join,Spark SQL and DataFrame Tuning,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"SQL Optimization for Multi-level Nesting and Hybrid Join", "githuburl":"" }, { "uri":"mrs_01_2001.html", + "node_id":"mrs_01_2001.xml", "product_code":"mrs", - "code":"771", + "code":"885", "des":"Streaming is a mini-batch streaming processing framework that features second-level delay and high throughput. To optimize Streaming is to improve its throughput while ma", "doc_type":"usermanual", "kw":"Spark Streaming Tuning,Spark2x Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark Streaming Tuning", "githuburl":"" }, { "uri":"mrs_01_24056.html", + "node_id":"mrs_01_24056.xml", "product_code":"mrs", - "code":"772", + "code":"886", "des":"In the scenario where a small number of requests are frequently sent from Spark on OBS to OBS, you can disable OBS monitoring to improve performance.Modify the configurat", "doc_type":"usermanual", "kw":"Spark on OBS Tuning,Spark2x Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark on OBS Tuning", "githuburl":"" }, { "uri":"mrs_01_2002.html", + "node_id":"mrs_01_2002.xml", "product_code":"mrs", - "code":"773", + "code":"887", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Spark2x", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Spark2x", "githuburl":"" }, { "uri":"mrs_01_2003.html", + "node_id":"mrs_01_2003.xml", "product_code":"mrs", - "code":"774", + "code":"888", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Spark Core", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark Core", "githuburl":"" }, { "uri":"mrs_01_2004.html", + "node_id":"mrs_01_2004.xml", "product_code":"mrs", - "code":"775", + "code":"889", "des":"How do I view the aggregated container logs on the page when the log aggregation function is enabled on YARN?For details, see Viewing Aggregated Container Logs on the Web", "doc_type":"usermanual", "kw":"How Do I View Aggregated Spark Application Logs?,Spark Core,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I View Aggregated Spark Application Logs?", "githuburl":"" }, { "uri":"mrs_01_2005.html", + "node_id":"mrs_01_2005.xml", "product_code":"mrs", - "code":"776", + "code":"890", "des":"Communication between ApplicationMaster and ResourceManager remains abnormal for a long time. Why is the driver return code inconsistent with application status on Resour", "doc_type":"usermanual", "kw":"Why Is the Return Code of Driver Inconsistent with Application State Displayed on ResourceManager We", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is the Return Code of Driver Inconsistent with Application State Displayed on ResourceManager WebUI?", "githuburl":"" }, { "uri":"mrs_01_2006.html", + "node_id":"mrs_01_2006.xml", "product_code":"mrs", - "code":"777", + "code":"891", "des":"Why cannot exit the Driver process after running the yarn application -kill applicationID command to stop the Spark Streaming application?Running the yarn application -ki", "doc_type":"usermanual", "kw":"Why Cannot Exit the Driver Process?,Spark Core,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot Exit the Driver Process?", "githuburl":"" }, { "uri":"mrs_01_2007.html", + "node_id":"mrs_01_2007.xml", "product_code":"mrs", - "code":"778", + "code":"892", "des":"On a large cluster of 380 nodes, run the ScalaSort test case in the HiBench test that runs the 29T data, and configure Executor as --executor-cores 4. The following abnor", "doc_type":"usermanual", "kw":"Why Does FetchFailedException Occur When the Network Connection Is Timed out,Spark Core,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does FetchFailedException Occur When the Network Connection Is Timed out", "githuburl":"" }, { "uri":"mrs_01_2008.html", + "node_id":"mrs_01_2008.xml", "product_code":"mrs", - "code":"779", + "code":"893", "des":"How to configure the event queue size if the following Driver log information is displayed indicating that the event queue overflows?Common applicationsDropping SparkList", "doc_type":"usermanual", "kw":"How to Configure Event Queue Size If Event Queue Overflows?,Spark Core,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Configure Event Queue Size If Event Queue Overflows?", "githuburl":"" }, { "uri":"mrs_01_2009.html", + "node_id":"mrs_01_2009.xml", "product_code":"mrs", - "code":"780", + "code":"894", "des":"During Spark application execution, if the driver fails to connect to ResourceManager, the following error is reported and it does not exit for a long time. What can I do", "doc_type":"usermanual", "kw":"What Can I Do If the getApplicationReport Exception Is Recorded in Logs During Spark Application Exe", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If the getApplicationReport Exception Is Recorded in Logs During Spark Application Execution and the Application Does Not Exit for a Long Time?", "githuburl":"" }, { "uri":"mrs_01_2010.html", + "node_id":"mrs_01_2010.xml", "product_code":"mrs", - "code":"781", + "code":"895", "des":"When Spark executes an application, an error similar to the following is reported and the application ends. What can I do?Symptom: The value of spark.rpc.io.connectionTim", "doc_type":"usermanual", "kw":"What Can I Do If \"Connection to ip:port has been quiet for xxx ms while there are outstanding reques", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If \"Connection to ip:port has been quiet for xxx ms while there are outstanding requests\" Is Reported When Spark Executes an Application and the Application Ends?", "githuburl":"" }, { "uri":"mrs_01_2011.html", + "node_id":"mrs_01_2011.xml", "product_code":"mrs", - "code":"782", + "code":"896", "des":"If the NodeManager is shut down with the Executor dynamic allocation enabled, the Executors on the node where the NodeManeger is shut down fail to be removed from the dri", "doc_type":"usermanual", "kw":"Why Do Executors Fail to be Removed After the NodeManeger Is Shut Down?,Spark Core,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do Executors Fail to be Removed After the NodeManeger Is Shut Down?", "githuburl":"" }, { "uri":"mrs_01_2012.html", + "node_id":"mrs_01_2012.xml", "product_code":"mrs", - "code":"783", + "code":"897", "des":"ExternalShuffle is enabled for the application that runs Spark. Task loss occurs in the application because the message \"java.lang.NullPointerException: Password cannot b", "doc_type":"usermanual", "kw":"What Can I Do If the Message \"Password cannot be null if SASL is enabled\" Is Displayed?,Spark Core,C", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If the Message \"Password cannot be null if SASL is enabled\" Is Displayed?", "githuburl":"" }, { "uri":"mrs_01_2013.html", + "node_id":"mrs_01_2013.xml", "product_code":"mrs", - "code":"784", + "code":"898", "des":"When inserting data into the dynamic partition table, a large number of shuffle files are damaged due to the disk disconnection, node error, and the like. In this case, w", "doc_type":"usermanual", "kw":"What Should I Do If the Message \"Failed to CREATE_FILE\" Is Displayed in the Restarted Tasks When Dat", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Should I Do If the Message \"Failed to CREATE_FILE\" Is Displayed in the Restarted Tasks When Data Is Inserted Into the Dynamic Partition Table?", "githuburl":"" }, { "uri":"mrs_01_2014.html", + "node_id":"mrs_01_2014.xml", "product_code":"mrs", - "code":"785", + "code":"899", "des":"When Hash shuffle is used to run a job that consists of 1000000 map tasks x 100000 reduce tasks, run logs report many message failures and Executor heartbeat timeout, lea", "doc_type":"usermanual", "kw":"Why Tasks Fail When Hash Shuffle Is Used?,Spark Core,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Tasks Fail When Hash Shuffle Is Used?", "githuburl":"" }, { "uri":"mrs_01_2015.html", + "node_id":"mrs_01_2015.xml", "product_code":"mrs", - "code":"786", + "code":"900", "des":"When the http(s)://: mode is used to access the Spark JobHistory page, if the displayed Spark JobHistory page is not the page of FusionInsight Manag", "doc_type":"usermanual", "kw":"What Can I Do If the Error Message \"DNS query failed\" Is Displayed When I Access the Aggregated Logs", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If the Error Message \"DNS query failed\" Is Displayed When I Access the Aggregated Logs Page of Spark Applications?", "githuburl":"" }, { "uri":"mrs_01_2016.html", + "node_id":"mrs_01_2016.xml", "product_code":"mrs", - "code":"787", + "code":"901", "des":"When I execute a 100 TB TPC-DS test suite in the JDBCServer mode, the \"Timeout waiting for task\" is displayed. As a result, shuffle fetch fails, the stage keeps retrying,", "doc_type":"usermanual", "kw":"What Can I Do If Shuffle Fetch Fails Due to the \"Timeout Waiting for Task\" Exception?,Spark Core,Com", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Can I Do If Shuffle Fetch Fails Due to the \"Timeout Waiting for Task\" Exception?", "githuburl":"" }, { "uri":"mrs_01_2017.html", + "node_id":"mrs_01_2017.xml", "product_code":"mrs", - "code":"788", + "code":"902", "des":"When I run Spark tasks with a large data volume, for example, 100 TB TPCDS test suite, why does the Stage retry due to Executor loss sometimes? The message \"Executor 532 ", "doc_type":"usermanual", "kw":"Why Does the Stage Retry due to the Crash of the Executor?,Spark Core,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the Stage Retry due to the Crash of the Executor?", "githuburl":"" }, { "uri":"mrs_01_2018.html", + "node_id":"mrs_01_2018.xml", "product_code":"mrs", - "code":"789", + "code":"903", "des":"When more than 50 terabytes of data is shuffled, some executors fail to register shuffle services due to timeout. The shuffle tasks then fail. Why? The error log is as fo", "doc_type":"usermanual", "kw":"Why Do the Executors Fail to Register Shuffle Services During the Shuffle of a Large Amount of Data?", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do the Executors Fail to Register Shuffle Services During the Shuffle of a Large Amount of Data?", "githuburl":"" }, { "uri":"mrs_01_2019.html", + "node_id":"mrs_01_2019.xml", "product_code":"mrs", - "code":"790", + "code":"904", "des":"During the execution of Spark applications, if the YARN External Shuffle service is enabled and there are too many shuffle tasks, the java.lang.OutofMemoryError: Direct b", "doc_type":"usermanual", "kw":"Why Does the Out of Memory Error Occur in NodeManager During the Execution of Spark Applications,Spa", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the Out of Memory Error Occur in NodeManager During the Execution of Spark Applications", "githuburl":"" }, { "uri":"mrs_01_2021.html", + "node_id":"mrs_01_2021.xml", "product_code":"mrs", - "code":"791", + "code":"905", "des":"Execution of the sparkbench task (for example, Wordcount) of HiBench6 fails. The bench.log indicates that the Yarn task fails to be executed. The failure information disp", "doc_type":"usermanual", "kw":"Why Does the Realm Information Fail to Be Obtained When SparkBench is Run on HiBench for the Cluster", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the Realm Information Fail to Be Obtained When SparkBench is Run on HiBench for the Cluster in Security Mode?", "githuburl":"" }, { "uri":"mrs_01_2022.html", + "node_id":"mrs_01_2022.xml", "product_code":"mrs", - "code":"792", + "code":"906", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Spark SQL and DataFrame", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Spark SQL and DataFrame", "githuburl":"" }, { "uri":"mrs_01_2023.html", + "node_id":"mrs_01_2023.xml", "product_code":"mrs", - "code":"793", + "code":"907", "des":"Suppose that there is a table src(d1, d2, m) with the following data:The results for statement \"select d1, sum(d1) from src group by d1, d2 with rollup\" are shown as belo", "doc_type":"usermanual", "kw":"What Do I have to Note When Using Spark SQL ROLLUP and CUBE?,Spark SQL and DataFrame,Component Opera", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Do I have to Note When Using Spark SQL ROLLUP and CUBE?", "githuburl":"" }, { "uri":"mrs_01_2024.html", + "node_id":"mrs_01_2024.xml", "product_code":"mrs", - "code":"794", + "code":"908", "des":"Why temporary tables of the previous database are displayed after the database is switched?Create a temporary DataSource table, for example:create temporary table ds_parq", "doc_type":"usermanual", "kw":"Why Spark SQL Is Displayed as a Temporary Table in Different Databases?,Spark SQL and DataFrame,Comp", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Spark SQL Is Displayed as a Temporary Table in Different Databases?", "githuburl":"" }, { "uri":"mrs_01_2025.html", + "node_id":"mrs_01_2025.xml", "product_code":"mrs", - "code":"795", + "code":"909", "des":"Is it possible to assign parameter values through Spark commands, in addition to through a user interface or a configuration file?Spark configuration options can be defin", "doc_type":"usermanual", "kw":"How to Assign a Parameter Value in a Spark Command?,Spark SQL and DataFrame,Component Operation Guid", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Assign a Parameter Value in a Spark Command?", "githuburl":"" }, { "uri":"mrs_01_2026.html", + "node_id":"mrs_01_2026.xml", "product_code":"mrs", - "code":"796", + "code":"910", "des":"The following error information is displayed when a new user creates a table using SparkSQL:When you create a table using Spark SQL, the interface of Hive is called by th", "doc_type":"usermanual", "kw":"What Directory Permissions Do I Need to Create a Table Using SparkSQL?,Spark SQL and DataFrame,Compo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Directory Permissions Do I Need to Create a Table Using SparkSQL?", "githuburl":"" }, { "uri":"mrs_01_2027.html", + "node_id":"mrs_01_2027.xml", "product_code":"mrs", - "code":"797", + "code":"911", "des":"Why do I fail to delete the UDF using another service, for example, delete the UDF created by Hive using Spark SQL.The UDF can be created using any of the following servi", "doc_type":"usermanual", "kw":"Why Do I Fail to Delete the UDF Using Another Service?,Spark SQL and DataFrame,Component Operation G", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do I Fail to Delete the UDF Using Another Service?", "githuburl":"" }, { "uri":"mrs_01_2028.html", + "node_id":"mrs_01_2028.xml", "product_code":"mrs", - "code":"798", + "code":"912", "des":"Why cannot I query newly inserted data in a parquet Hive table using SparkSQL? This problem occurs in the following scenarios:For partitioned tables and non-partitioned t", "doc_type":"usermanual", "kw":"Why Cannot I Query Newly Inserted Data in a Parquet Hive Table Using SparkSQL?,Spark SQL and DataFra", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot I Query Newly Inserted Data in a Parquet Hive Table Using SparkSQL?", "githuburl":"" }, { "uri":"mrs_01_2029.html", + "node_id":"mrs_01_2029.xml", "product_code":"mrs", - "code":"799", + "code":"913", "des":"What is cache table used for? Which point should I pay attention to while using cache table?Spark SQL caches tables into memory so that data can be directly read from mem", "doc_type":"usermanual", "kw":"How to Use Cache Table?,Spark SQL and DataFrame,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How to Use Cache Table?", "githuburl":"" }, { "uri":"mrs_01_2030.html", + "node_id":"mrs_01_2030.xml", "product_code":"mrs", - "code":"800", + "code":"914", "des":"During the repartition operation, the number of blocks (spark.sql.shuffle.partitions) is set to 4,500, and the number of keys used by repartition exceeds 4,000. It is exp", "doc_type":"usermanual", "kw":"Why Are Some Partitions Empty During Repartition?,Spark SQL and DataFrame,Component Operation Guide ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Are Some Partitions Empty During Repartition?", "githuburl":"" }, { "uri":"mrs_01_2031.html", + "node_id":"mrs_01_2031.xml", "product_code":"mrs", - "code":"801", + "code":"915", "des":"When the default configuration is used, 16 terabytes of text data fails to be converted into 4 terabytes of parquet data, and the error information below is displayed. Wh", "doc_type":"usermanual", "kw":"Why Does 16 Terabytes of Text Data Fails to Be Converted into 4 Terabytes of Parquet Data?,Spark SQL", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does 16 Terabytes of Text Data Fails to Be Converted into 4 Terabytes of Parquet Data?", "githuburl":"" }, { "uri":"mrs_01_2033.html", + "node_id":"mrs_01_2033.xml", "product_code":"mrs", - "code":"802", + "code":"916", "des":"When the table name is set to table, why the error information similar to the following is displayed after the drop table table command or other command is run?The word t", "doc_type":"usermanual", "kw":"Why the Operation Fails When the Table Name Is TABLE?,Spark SQL and DataFrame,Component Operation Gu", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why the Operation Fails When the Table Name Is TABLE?", "githuburl":"" }, { "uri":"mrs_01_2034.html", + "node_id":"mrs_01_2034.xml", "product_code":"mrs", - "code":"803", + "code":"917", "des":"When the analyze table statement is executed using spark-sql, the task is suspended and the information below is displayed. Why?When the statement is executed, the SQL st", "doc_type":"usermanual", "kw":"Why Is a Task Suspended When the ANALYZE TABLE Statement Is Executed and Resources Are Insufficient?", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is a Task Suspended When the ANALYZE TABLE Statement Is Executed and Resources Are Insufficient?", "githuburl":"" }, { "uri":"mrs_01_2035.html", + "node_id":"mrs_01_2035.xml", "product_code":"mrs", - "code":"804", + "code":"918", "des":"If I access a parquet table on which I do not have permission, why a job is run before \"Missing Privileges\" is displayed?The execution sequence of Spark SQL statement par", "doc_type":"usermanual", "kw":"If I Access a parquet Table on Which I Do not Have Permission, Why a Job Is Run Before \"Missing Priv", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"If I Access a parquet Table on Which I Do not Have Permission, Why a Job Is Run Before \"Missing Privileges\" Is Displayed?", "githuburl":"" }, { "uri":"mrs_01_2036.html", + "node_id":"mrs_01_2036.xml", "product_code":"mrs", - "code":"805", + "code":"919", "des":"When do I fail to modify the metadata in the datasource and Spark on HBase table by running the Hive command?The current Spark version does not support modifying the meta", "doc_type":"usermanual", "kw":"Why Do I Fail to Modify MetaData by Running the Hive Command?,Spark SQL and DataFrame,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do I Fail to Modify MetaData by Running the Hive Command?", "githuburl":"" }, { "uri":"mrs_01_2037.html", + "node_id":"mrs_01_2037.xml", "product_code":"mrs", - "code":"806", + "code":"920", "des":"After successfully running Spark tasks with large data volume, for example, 2-TB TPCDS test suite, why is the abnormal stack information \"RejectedExecutionException\" disp", "doc_type":"usermanual", "kw":"Why Is \"RejectedExecutionException\" Displayed When I Exit Spark SQL?,Spark SQL and DataFrame,Compone", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is \"RejectedExecutionException\" Displayed When I Exit Spark SQL?", "githuburl":"" }, { "uri":"mrs_01_2038.html", + "node_id":"mrs_01_2038.xml", "product_code":"mrs", - "code":"807", + "code":"921", "des":"During a health check, if the concurrent statements exceed the threshold of the thread pool, the health check statements fail to be executed, the health check program tim", "doc_type":"usermanual", "kw":"What Should I Do If the JDBCServer Process is Mistakenly Killed During a Health Check?,Spark SQL and", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Should I Do If the JDBCServer Process is Mistakenly Killed During a Health Check?", "githuburl":"" }, { "uri":"mrs_01_2039.html", + "node_id":"mrs_01_2039.xml", "product_code":"mrs", - "code":"808", + "code":"922", "des":"Why no result is found when 2016-6-30 is set in the date field as the filter condition?As shown in the following figure, trx_dte_par in the select count (*) from trxfintr", "doc_type":"usermanual", "kw":"Why No Result Is found When 2016-6-30 Is Set in the Date Field as the Filter Condition?,Spark SQL an", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why No Result Is found When 2016-6-30 Is Set in the Date Field as the Filter Condition?", "githuburl":"" }, { "uri":"mrs_01_2040.html", + "node_id":"mrs_01_2040.xml", "product_code":"mrs", - "code":"809", + "code":"923", "des":"Why does the --hivevaroption I specified in the command for starting spark-beeline fail to take effect?In the V100R002C60 version, if I use the --hivevar = Service > Tez > Configuration > All Configurations. Enter a parameter name in the search box.", "doc_type":"usermanual", "kw":"Common Tez Parameters,Using Tez,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Tez Parameters", "githuburl":"" }, { "uri":"mrs_01_2070.html", + "node_id":"mrs_01_2070.xml", "product_code":"mrs", - "code":"835", + "code":"950", "des":"Tez displays the Tez task execution process on a GUI. You can view the task execution details on the GUI.The TimelineServer instance of the Yarn service has been installe", "doc_type":"usermanual", "kw":"Accessing TezUI,Using Tez,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Accessing TezUI", "githuburl":"" }, { "uri":"mrs_01_2071.html", + "node_id":"mrs_01_2071.xml", "product_code":"mrs", - "code":"836", + "code":"951", "des":"Log path: The default save path of Tez logs is /var/log/Bigdata/tez/role name.TezUI: /var/log/Bigdata/tez/tezui (run logs) and /var/log/Bigdata/audit/tez/tezui (audit log", "doc_type":"usermanual", "kw":"Log Overview,Using Tez,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Log Overview", "githuburl":"" }, { "uri":"mrs_01_2072.html", + "node_id":"mrs_01_2072.xml", "product_code":"mrs", - "code":"837", + "code":"952", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues", "githuburl":"" }, { "uri":"mrs_01_2073.html", + "node_id":"mrs_01_2073.xml", "product_code":"mrs", - "code":"838", + "code":"953", "des":"After a user logs in to Manager and switches to the Tez web UI, the submitted Tez tasks are not displayed.The Tez task data displayed on the Tez WebUI requires the suppor", "doc_type":"usermanual", "kw":"TezUI Cannot Display Tez Task Execution Details,Common Issues,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"TezUI Cannot Display Tez Task Execution Details", "githuburl":"" }, { "uri":"mrs_01_2074.html", + "node_id":"mrs_01_2074.xml", "product_code":"mrs", - "code":"839", + "code":"954", "des":"When a user logs in to Manager and switches to the Tez web UI, error 404 or 503 is displayed.The Tez web UI depends on the TimelineServer instance of Yarn. Therefore, Tim", "doc_type":"usermanual", "kw":"Error Occurs When a User Switches to the Tez Web UI,Common Issues,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Error Occurs When a User Switches to the Tez Web UI", "githuburl":"" }, { "uri":"mrs_01_2075.html", + "node_id":"mrs_01_2075.xml", "product_code":"mrs", - "code":"840", + "code":"955", "des":"A user logs in to the Tez web UI and clicks Logs, but the Yarn log page fails to be displayed and data cannot be loaded.Currently, the hostname is used for the access to ", "doc_type":"usermanual", "kw":"Yarn Logs Cannot Be Viewed on the TezUI Page,Common Issues,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Yarn Logs Cannot Be Viewed on the TezUI Page", "githuburl":"" }, { "uri":"mrs_01_2076.html", + "node_id":"mrs_01_2076.xml", "product_code":"mrs", - "code":"841", + "code":"956", "des":"A user logs in to Manager and switches to the Tez web UI page, but no data for the submitted task is displayed on the Hive Queries page.To display Hive Queries task data ", "doc_type":"usermanual", "kw":"Table Data Is Empty on the TezUI HiveQueries Page,Common Issues,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Table Data Is Empty on the TezUI HiveQueries Page", "githuburl":"" }, { "uri":"mrs_01_0851.html", + "node_id":"mrs_01_0851.xml", "product_code":"mrs", - "code":"842", + "code":"957", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using Yarn", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using Yarn", "githuburl":"" }, { "uri":"mrs_01_0852.html", + "node_id":"mrs_01_0852.xml", "product_code":"mrs", - "code":"843", + "code":"958", "des":"The Yarn service provides one queue (default) for users. Users allocate system resources to each queue. After the configuration is complete, you can click Refresh Queue o", "doc_type":"usermanual", "kw":"Common Yarn Parameters,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Yarn Parameters", "githuburl":"" }, { "uri":"mrs_01_0853.html", + "node_id":"mrs_01_0853.xml", "product_code":"mrs", - "code":"844", + "code":"959", "des":"This section describes how to create and configure a Yarn role. The Yarn role can be assigned with Yarn administrator permission and manage Yarn queue resources.If the cu", "doc_type":"usermanual", "kw":"Creating Yarn Roles,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Creating Yarn Roles", "githuburl":"" }, { "uri":"mrs_01_0854.html", + "node_id":"mrs_01_0854.xml", "product_code":"mrs", - "code":"845", + "code":"960", "des":"This section guides users to use a Yarn client in an O&M or service scenario.The client has been installed.For example, the installation directory is /opt/hadoopclient. T", "doc_type":"usermanual", "kw":"Using the Yarn Client,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using the Yarn Client", "githuburl":"" }, { "uri":"mrs_01_0855.html", + "node_id":"mrs_01_0855.xml", "product_code":"mrs", - "code":"846", + "code":"961", "des":"If the hardware resources (such as the number of CPU cores and memory size) of the nodes for deploying NodeManagers are different but the NodeManager available hardware r", "doc_type":"usermanual", "kw":"Configuring Resources for a NodeManager Role Instance,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Resources for a NodeManager Role Instance", "githuburl":"" }, { "uri":"mrs_01_0856.html", + "node_id":"mrs_01_0856.xml", "product_code":"mrs", - "code":"847", + "code":"962", "des":"If the storage directories defined by the Yarn NodeManager are incorrect or the Yarn storage plan changes, the system administrator needs to modify the NodeManager storag", "doc_type":"usermanual", "kw":"Changing NodeManager Storage Directories,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Changing NodeManager Storage Directories", "githuburl":"" }, { "uri":"mrs_01_0857.html", + "node_id":"mrs_01_0857.xml", "product_code":"mrs", - "code":"848", + "code":"963", "des":"In the multi-tenant scenario in security mode, a cluster can be used by multiple users, and tasks of multiple users can be submitted and executed. Users are invisible to ", "doc_type":"usermanual", "kw":"Configuring Strict Permission Control for Yarn,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Strict Permission Control for Yarn", "githuburl":"" }, { "uri":"mrs_01_0858.html", + "node_id":"mrs_01_0858.xml", "product_code":"mrs", - "code":"849", + "code":"964", "des":"Yarn provides the container log aggregation function to collect logs generated by containers on each node to HDFS to release local disk space. You can collect logs in eit", "doc_type":"usermanual", "kw":"Configuring Container Log Aggregation,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Container Log Aggregation", "githuburl":"" }, { "uri":"mrs_01_0859.html", + "node_id":"mrs_01_0859.xml", "product_code":"mrs", - "code":"850", + "code":"965", "des":"CGroups is a Linux kernel feature. In YARN this feature allows containers to be limited in their resource usage (example, CPU usage). Without CGroups, it is hard to limit", "doc_type":"usermanual", "kw":"Using CGroups with YARN,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using CGroups with YARN", "githuburl":"" }, { "uri":"mrs_01_0860.html", + "node_id":"mrs_01_0860.xml", "product_code":"mrs", - "code":"851", + "code":"966", "des":"When resources are insufficient or ApplicationMaster fails to start, a client probably encounters running errors.Go to the All Configurations page of Yarn and enter a par", "doc_type":"usermanual", "kw":"Configuring the Number of ApplicationMaster Retries,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Number of ApplicationMaster Retries", "githuburl":"" }, { "uri":"mrs_01_0861.html", + "node_id":"mrs_01_0861.xml", "product_code":"mrs", - "code":"852", + "code":"967", "des":"During the process of starting the configuration, when the ApplicationMaster creates a container, the allocated memory is automatically adjusted according to the total nu", "doc_type":"usermanual", "kw":"Configure the ApplicationMaster to Automatically Adjust the Allocated Memory,Using Yarn,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configure the ApplicationMaster to Automatically Adjust the Allocated Memory", "githuburl":"" }, { "uri":"mrs_01_0862.html", + "node_id":"mrs_01_0862.xml", "product_code":"mrs", - "code":"853", + "code":"968", "des":"The value of the yarn.http.policy parameter must be consistent on both the server and clients. Web UIs on clients will be garbled if an inconsistency exists, for example,", "doc_type":"usermanual", "kw":"Configuring the Access Channel Protocol,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Access Channel Protocol", "githuburl":"" }, { "uri":"mrs_01_0863.html", + "node_id":"mrs_01_0863.xml", "product_code":"mrs", - "code":"854", + "code":"969", "des":"If memory usage of the submitted application cannot be estimated, you can modify the configuration on the server to determine whether to check the memory usage.If the mem", "doc_type":"usermanual", "kw":"Configuring Memory Usage Detection,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Memory Usage Detection", "githuburl":"" }, { "uri":"mrs_01_0864.html", + "node_id":"mrs_01_0864.xml", "product_code":"mrs", - "code":"855", + "code":"970", "des":"If the custom scheduler is set in ResourceManager, you can set the corresponding web page and other Web applications for the custom scheduler.Go to the All Configurations", "doc_type":"usermanual", "kw":"Configuring the Additional Scheduler WebUI,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Additional Scheduler WebUI", "githuburl":"" }, { "uri":"mrs_01_0865.html", + "node_id":"mrs_01_0865.xml", "product_code":"mrs", - "code":"856", + "code":"971", "des":"The Yarn Restart feature includes ResourceManager Restart and NodeManager Restart.When ResourceManager Restart is enabled, the new active ResourceManager node loads the i", "doc_type":"usermanual", "kw":"Configuring Yarn Restart,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Yarn Restart", "githuburl":"" }, { "uri":"mrs_01_0866.html", + "node_id":"mrs_01_0866.xml", "product_code":"mrs", - "code":"857", + "code":"972", "des":"In YARN, ApplicationMasters run on NodeManagers just like every other container (ignoring unmanaged ApplicationMasters in this context). ApplicationMasters may break down", "doc_type":"usermanual", "kw":"Configuring ApplicationMaster Work Preserving,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring ApplicationMaster Work Preserving", "githuburl":"" }, { "uri":"mrs_01_0867.html", + "node_id":"mrs_01_0867.xml", "product_code":"mrs", - "code":"858", + "code":"973", "des":"The default log level of localized container is INFO. You can change the log level by configuring yarn.nodemanager.container-localizer.java.opts.On Manager, choose Cluste", "doc_type":"usermanual", "kw":"Configuring the Localized Log Levels,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Localized Log Levels", "githuburl":"" }, { "uri":"mrs_01_0868.html", + "node_id":"mrs_01_0868.xml", "product_code":"mrs", - "code":"859", + "code":"974", "des":"Currently, YARN allows the user that starts the NodeManager to run the task submitted by all other users, or the users to run the task submitted by themselves.On Manager,", "doc_type":"usermanual", "kw":"Configuring Users That Run Tasks,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring Users That Run Tasks", "githuburl":"" }, + { + "uri":"mrs_01_24814.html", + "node_id":"mrs_01_24814.xml", + "product_code":"", + "code":"975", + "des":"As a role of the Yarn service, TimelineServer supports the HA mode since the current version. To prevent a single point of failure of TimelineServer, you can enable Timel", + "doc_type":"", + "kw":"Configuring HA for TimelineServer,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], + "title":"Configuring HA for TimelineServer", + "githuburl":"" + }, { "uri":"mrs_01_0870.html", + "node_id":"mrs_01_0870.xml", "product_code":"mrs", - "code":"860", + "code":"976", "des":"The default paths for saving Yarn logs are as follows:ResourceManager: /var/log/Bigdata/yarn/rm (run logs) and /var/log/Bigdata/audit/yarn/rm (audit logs)NodeManager: /va", "doc_type":"usermanual", "kw":"Yarn Log Overview,Using Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Yarn Log Overview", "githuburl":"" }, { "uri":"mrs_01_0871.html", + "node_id":"mrs_01_0871.xml", "product_code":"mrs", - "code":"861", + "code":"977", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Yarn Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Yarn Performance Tuning", "githuburl":"" }, { "uri":"mrs_01_0872.html", + "node_id":"mrs_01_0872.xml", "product_code":"mrs", - "code":"862", + "code":"978", "des":"The capacity scheduler of ResourceManager implements job preemption to simplify job running in queues and improve resource utilization. The process is as follows:Assume t", "doc_type":"usermanual", "kw":"Preempting a Task,Yarn Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Preempting a Task", "githuburl":"" }, { "uri":"mrs_01_0873.html", + "node_id":"mrs_01_0873.xml", "product_code":"mrs", - "code":"863", + "code":"979", "des":"The resource contention scenarios of a cluster are as follows:Submit two jobs (Job 1 and Job 2) with lower priorities.Some tasks of running Job 1 and Job 2 are in the run", "doc_type":"usermanual", "kw":"Setting the Task Priority,Yarn Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Setting the Task Priority", "githuburl":"" }, { "uri":"mrs_01_0874.html", + "node_id":"mrs_01_0874.xml", "product_code":"mrs", - "code":"864", + "code":"980", "des":"After the scheduler of a big data cluster is properly configured, you can adjust the available memory, CPU resources, and local disk of each node to optimize the performa", "doc_type":"usermanual", "kw":"Optimizing Node Configuration,Yarn Performance Tuning,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Optimizing Node Configuration", "githuburl":"" }, { "uri":"mrs_01_2077.html", + "node_id":"mrs_01_2077.xml", "product_code":"mrs", - "code":"865", + "code":"981", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About Yarn", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About Yarn", "githuburl":"" }, { "uri":"mrs_01_2078.html", + "node_id":"mrs_01_2078.xml", "product_code":"mrs", - "code":"866", + "code":"982", "des":"Why mounted directory for Container is not cleared after the completion of the job while using CGroups?The mounted path for the Container should be cleared even if job is", "doc_type":"usermanual", "kw":"Why Mounted Directory for Container is Not Cleared After the Completion of the Job While Using CGrou", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Mounted Directory for Container is Not Cleared After the Completion of the Job While Using CGroups?", "githuburl":"" }, { "uri":"mrs_01_2079.html", + "node_id":"mrs_01_2079.xml", "product_code":"mrs", - "code":"867", + "code":"983", "des":"Why is the HDFS_DELEGATION_TOKEN expired exception reported when a job fails in security mode?HDFS_DELEGATION_TOKEN expires because the token is not updated or it is acce", "doc_type":"usermanual", "kw":"Why the Job Fails with HDFS_DELEGATION_TOKEN Expired Exception?,Common Issues About Yarn,Component O", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why the Job Fails with HDFS_DELEGATION_TOKEN Expired Exception?", "githuburl":"" }, { "uri":"mrs_01_2080.html", + "node_id":"mrs_01_2080.xml", "product_code":"mrs", - "code":"868", + "code":"984", "des":"If Yarn is restarted in either of the following scenarios, local logs will not be deleted as scheduled and will be retained permanently:When Yarn is restarted during task", "doc_type":"usermanual", "kw":"Why Are Local Logs Not Deleted After YARN Is Restarted?,Common Issues About Yarn,Component Operation", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Are Local Logs Not Deleted After YARN Is Restarted?", "githuburl":"" }, { "uri":"mrs_01_2081.html", + "node_id":"mrs_01_2081.xml", "product_code":"mrs", - "code":"869", + "code":"985", "des":"Why the task does not fail even though AppAttempts restarts due to failure for more than two times?During the task execution process, if the ContainerExitStatus returns v", "doc_type":"usermanual", "kw":"Why the Task Does Not Fail Even Though AppAttempts Restarts for More Than Two Times?,Common Issues A", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why the Task Does Not Fail Even Though AppAttempts Restarts for More Than Two Times?", "githuburl":"" }, { "uri":"mrs_01_2082.html", + "node_id":"mrs_01_2082.xml", "product_code":"mrs", - "code":"870", + "code":"986", "des":"After I moved an application from one queue to another, why is it moved back to the original queue after ResourceManager restarts?This problem is caused by the constraint", "doc_type":"usermanual", "kw":"Why Is an Application Moved Back to the Original Queue After ResourceManager Restarts?,Common Issues", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is an Application Moved Back to the Original Queue After ResourceManager Restarts?", "githuburl":"" }, { "uri":"mrs_01_2083.html", + "node_id":"mrs_01_2083.xml", "product_code":"mrs", - "code":"871", + "code":"987", "des":"Why does Yarn not release the blacklist even all nodes are added to the blacklist?In Yarn, when the number of application nodes added to the blacklist by ApplicationMaste", "doc_type":"usermanual", "kw":"Why Does Yarn Not Release the Blacklist Even All Nodes Are Added to the Blacklist?,Common Issues Abo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does Yarn Not Release the Blacklist Even All Nodes Are Added to the Blacklist?", "githuburl":"" }, { "uri":"mrs_01_2084.html", + "node_id":"mrs_01_2084.xml", "product_code":"mrs", - "code":"872", + "code":"988", "des":"The switchover of ResourceManager occurs continuously when multiple, for example 2,000, tasks are running concurrently, causing the Yarn service unavailable.The cause is ", "doc_type":"usermanual", "kw":"Why Does the Switchover of ResourceManager Occur Continuously?,Common Issues About Yarn,Component Op", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the Switchover of ResourceManager Occur Continuously?", "githuburl":"" }, { "uri":"mrs_01_2085.html", + "node_id":"mrs_01_2085.xml", "product_code":"mrs", - "code":"873", + "code":"989", "des":"Why does a new application fail if a NodeManager has been in unhealthy status for 10 minutes?When nodeSelectPolicy is set to SEQUENCE and the first NodeManager connected ", "doc_type":"usermanual", "kw":"Why Does a New Application Fail If a NodeManager Has Been in Unhealthy Status for 10 Minutes?,Common", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does a New Application Fail If a NodeManager Has Been in Unhealthy Status for 10 Minutes?", "githuburl":"" }, { "uri":"mrs_01_2086.html", + "node_id":"mrs_01_2086.xml", "product_code":"mrs", - "code":"874", + "code":"990", "des":"If a user belongs to multiple user groups with different default queue configurations, which queue will be selected as the default queue when an application is submitted?", "doc_type":"usermanual", "kw":"What Is the Queue Replacement Policy?,Common Issues About Yarn,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Is the Queue Replacement Policy?", "githuburl":"" }, { "uri":"mrs_01_2087.html", + "node_id":"mrs_01_2087.xml", "product_code":"mrs", - "code":"875", + "code":"991", "des":"Why does an error occur when I query the applicationID of a completed or non-existing application using the RESTful APIs?The Superior scheduler only stores the applicatio", "doc_type":"usermanual", "kw":"Why Does an Error Occur When I Query the ApplicationID of a Completed or Non-existing Application Us", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does an Error Occur When I Query the ApplicationID of a Completed or Non-existing Application Using the RESTful APIs?", "githuburl":"" }, { "uri":"mrs_01_2088.html", + "node_id":"mrs_01_2088.xml", "product_code":"mrs", - "code":"876", + "code":"992", "des":"In Superior scheduling mode, if a single NodeManager is faulty, why may the MapReduce tasks fail?In normal cases, when the attempt of a single task of an application fail", "doc_type":"usermanual", "kw":"Why May A Single NodeManager Fault Cause MapReduce Task Failures in the Superior Scheduling Mode?,Co", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why May A Single NodeManager Fault Cause MapReduce Task Failures in the Superior Scheduling Mode?", "githuburl":"" }, { "uri":"mrs_01_2089.html", + "node_id":"mrs_01_2089.xml", "product_code":"mrs", - "code":"877", + "code":"993", "des":"When a queue is deleted when there are applications running in it, these applications are moved to the \"lost_and_found\" queue. When these applications are moved back to a", "doc_type":"usermanual", "kw":"Why Are Applications Suspended After They Are Moved From Lost_and_Found Queue to Another Queue?,Comm", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Are Applications Suspended After They Are Moved From Lost_and_Found Queue to Another Queue?", "githuburl":"" }, { "uri":"mrs_01_2090.html", + "node_id":"mrs_01_2090.xml", "product_code":"mrs", - "code":"878", + "code":"994", "des":"How do I limit the size of application diagnostic messages stored in the ZKstore?In some cases, it has been observed that diagnostic messages may grow infinitely. Because", "doc_type":"usermanual", "kw":"How Do I Limit the Size of Application Diagnostic Messages Stored in the ZKstore?,Common Issues Abou", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Limit the Size of Application Diagnostic Messages Stored in the ZKstore?", "githuburl":"" }, { "uri":"mrs_01_2091.html", + "node_id":"mrs_01_2091.xml", "product_code":"mrs", - "code":"879", + "code":"995", "des":"Why does a MapReduce job fail to run when a non-ViewFS file system is configured as ViewFS?When a non-ViewFS file system is configured as a ViewFS using cluster, the user", "doc_type":"usermanual", "kw":"Why Does a MapReduce Job Fail to Run When a Non-ViewFS File System Is Configured as ViewFS?,Common I", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does a MapReduce Job Fail to Run When a Non-ViewFS File System Is Configured as ViewFS?", "githuburl":"" }, { "uri":"mrs_01_24051.html", + "node_id":"mrs_01_24051.xml", "product_code":"mrs", - "code":"880", + "code":"996", "des":"After the Native Task feature is enabled, Reduce tasks fail to run in some OSs.When -Dmapreduce.job.map.output.collector.class=org.apache.hadoop.mapred.nativetask.NativeM", "doc_type":"usermanual", "kw":"Why Do Reduce Tasks Fail to Run in Some OSs After the Native Task Feature is Enabled?,Common Issues ", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do Reduce Tasks Fail to Run in Some OSs After the Native Task Feature is Enabled?", "githuburl":"" }, { "uri":"mrs_01_2092.html", + "node_id":"mrs_01_2092.xml", "product_code":"mrs", - "code":"881", + "code":"997", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using ZooKeeper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using ZooKeeper", "githuburl":"" }, { "uri":"mrs_01_2093.html", + "node_id":"mrs_01_2093.xml", "product_code":"mrs", - "code":"882", + "code":"998", "des":"ZooKeeper is an open-source, highly reliable, and distributed consistency coordination service. ZooKeeper is designed to solve the problem that data consistency cannot be", "doc_type":"usermanual", "kw":"Using ZooKeeper from Scratch,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using ZooKeeper from Scratch", "githuburl":"" }, { "uri":"mrs_01_2094.html", + "node_id":"mrs_01_2094.xml", "product_code":"mrs", - "code":"883", + "code":"999", "des":"Navigation path for setting parameters:Go to the All Configurations page of ZooKeeper by referring to Modifying Cluster Service Configuration Parameters. Enter a paramete", "doc_type":"usermanual", "kw":"Common ZooKeeper Parameters,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common ZooKeeper Parameters", "githuburl":"" }, { "uri":"mrs_01_2095.html", + "node_id":"mrs_01_2095.xml", "product_code":"mrs", - "code":"884", + "code":"1000", "des":"Use a ZooKeeper client in an O&M scenario or service scenario.You have installed the client. For example, the installation directory is /opt/client. The client directory ", "doc_type":"usermanual", "kw":"Using a ZooKeeper Client,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using a ZooKeeper Client", "githuburl":"" }, { "uri":"mrs_01_2097.html", + "node_id":"mrs_01_2097.xml", "product_code":"mrs", - "code":"885", + "code":"1001", "des":"Configure znode permission of ZooKeeper.ZooKeeper uses an access control list (ACL) to implement znode access control. The ZooKeeper client specifies a znode ACL, and the", "doc_type":"usermanual", "kw":"Configuring the ZooKeeper Permissions,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the ZooKeeper Permissions", "githuburl":"" }, { "uri":"mrs_01_2096.html", + "node_id":"mrs_01_2096.xml", "product_code":"mrs", - "code":"886", + "code":"1002", "des":"When the defined storage directory of ZooKeeper is incorrect, or when the storage plan of ZooKeeper is changed, log in to FusionInsight Manager to change the storage dire", "doc_type":"usermanual", "kw":"Changing the ZooKeeper Storage Directory,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Changing the ZooKeeper Storage Directory", "githuburl":"" }, { "uri":"mrs_01_2098.html", + "node_id":"mrs_01_2098.xml", "product_code":"mrs", - "code":"887", + "code":"1003", "des":"ZooKeeper has maxClientCnxn configuration at the server side, and this configuration will verify the connections from each client IP address. But many clients can create ", "doc_type":"usermanual", "kw":"Configuring the ZooKeeper Connection,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the ZooKeeper Connection", "githuburl":"" }, { "uri":"mrs_01_2099.html", + "node_id":"mrs_01_2099.xml", "product_code":"mrs", - "code":"888", + "code":"1004", "des":"The ZooKeeper client uses the FIFO queue to send a request to the server and waits for a response from the server. The client maintains the FIFO queue until it acknowledg", "doc_type":"usermanual", "kw":"Configuring ZooKeeper Response Timeout Interval,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring ZooKeeper Response Timeout Interval", "githuburl":"" }, { "uri":"mrs_01_2100.html", + "node_id":"mrs_01_2100.xml", "product_code":"mrs", - "code":"889", + "code":"1005", "des":"To prevent multiple IP nodes, bind the current ZooKeeper client to any available IP address. The data flow layer, management layer, and other network layers in the produc", "doc_type":"usermanual", "kw":"Binding the Client to an IP Address,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Binding the Client to an IP Address", "githuburl":"" }, { "uri":"mrs_01_2101.html", + "node_id":"mrs_01_2101.xml", "product_code":"mrs", - "code":"890", + "code":"1006", "des":"When the ZooKeeper client is started, it is bound to a random port. In most cases, you want to bind the ZooKeeper client to a specific port. For example, for the client c", "doc_type":"usermanual", "kw":"Configuring the Port Range Bound to the Client,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring the Port Range Bound to the Client", "githuburl":"" }, { "uri":"mrs_01_2102.html", + "node_id":"mrs_01_2102.xml", "product_code":"mrs", - "code":"891", + "code":"1007", "des":"Currently, ZooKeeper client properties can be configured only through Java system properties. Therefore, all clients in the same JVM have the same configuration. In some ", "doc_type":"usermanual", "kw":"Performing Special Configuration on ZooKeeper Clients in the Same JVM,Using ZooKeeper,Component Oper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Performing Special Configuration on ZooKeeper Clients in the Same JVM", "githuburl":"" }, { "uri":"mrs_01_2104.html", + "node_id":"mrs_01_2104.xml", "product_code":"mrs", - "code":"892", + "code":"1008", "des":"Set a quota for Znodes in ZooKeeper of a security cluster in O&M scenarios or service scenarios to restrict the quantity and byte space of Znodes and subnodes.Two modes a", "doc_type":"usermanual", "kw":"Configuring a Quota for a Znode,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Configuring a Quota for a Znode", "githuburl":"" }, { "uri":"mrs_01_2106.html", + "node_id":"mrs_01_2106.xml", "product_code":"mrs", - "code":"893", + "code":"1009", "des":"Log path: /var/log/Bigdata/zookeeper/quorumpeer (Run log), /var/log/Bigdata/audit/zookeeper/quorumpeer (Audit log)Log archive rule: The automatic ZooKeeper log compressio", "doc_type":"usermanual", "kw":"ZooKeeper Log Overview,Using ZooKeeper,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"ZooKeeper Log Overview", "githuburl":"" }, { "uri":"mrs_01_2107.html", + "node_id":"mrs_01_2107.xml", "product_code":"mrs", - "code":"894", + "code":"1010", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Common Issues About ZooKeeper", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Common Issues About ZooKeeper", "githuburl":"" }, { "uri":"mrs_01_2108.html", + "node_id":"mrs_01_2108.xml", "product_code":"mrs", - "code":"895", + "code":"1011", "des":"After a large number of znodes are created, ZooKeeper servers in the ZooKeeper cluster become faulty and cannot be automatically recovered or restarted.Logs of followers:", "doc_type":"usermanual", "kw":"Why Do ZooKeeper Servers Fail to Start After Many znodes Are Created?,Common Issues About ZooKeeper,", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Do ZooKeeper Servers Fail to Start After Many znodes Are Created?", "githuburl":"" }, { "uri":"mrs_01_2109.html", + "node_id":"mrs_01_2109.xml", "product_code":"mrs", - "code":"896", + "code":"1012", "des":"After a large number of znodes are created in a parent directory, the ZooKeeper client will fail to fetch all child nodes of this parent directory in a single request.Log", "doc_type":"usermanual", "kw":"Why Does the ZooKeeper Server Display the java.io.IOException: Len Error Log?,Common Issues About Zo", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Does the ZooKeeper Server Display the java.io.IOException: Len Error Log?", "githuburl":"" }, { "uri":"mrs_01_2110.html", + "node_id":"mrs_01_2110.xml", "product_code":"mrs", - "code":"897", + "code":"1013", "des":"Why four letter commands do not work with linux netcat command when secure netty configurations are enabled at Zookeeper server?For example,echo stat |netcat host portLin", "doc_type":"usermanual", "kw":"Why Four Letter Commands Don't Work With Linux netcat Command When Secure Netty Configurations Are E", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Four Letter Commands Don't Work With Linux netcat Command When Secure Netty Configurations Are Enabled at Zookeeper Server?", "githuburl":"" }, { "uri":"mrs_01_2111.html", + "node_id":"mrs_01_2111.xml", "product_code":"mrs", - "code":"898", + "code":"1014", "des":"How to check whether the role of a ZooKeeper instance is a leader or follower.Log in to Manager and choose Cluster > Name of the desired cluster > Service > ZooKeeper > I", "doc_type":"usermanual", "kw":"How Do I Check Which ZooKeeper Instance Is a Leader?,Common Issues About ZooKeeper,Component Operati", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"How Do I Check Which ZooKeeper Instance Is a Leader?", "githuburl":"" }, { "uri":"mrs_01_2112.html", + "node_id":"mrs_01_2112.xml", "product_code":"mrs", - "code":"899", + "code":"1015", "des":"When the IBM JDK is used, the client fails to connect to ZooKeeper.The possible cause is that the jaas.conf file format of the IBM JDK is different from that of the commo", "doc_type":"usermanual", "kw":"Why Cannot the Client Connect to ZooKeeper using the IBM JDK?,Common Issues About ZooKeeper,Componen", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Cannot the Client Connect to ZooKeeper using the IBM JDK?", "githuburl":"" }, { "uri":"mrs_01_2113.html", + "node_id":"mrs_01_2113.xml", "product_code":"mrs", - "code":"900", + "code":"1016", "des":"The ZooKeeper client fails to refresh a TGT and therefore ZooKeeper cannot be accessed. The error message is as follows:ZooKeeper uses the system command kinit – R to ref", "doc_type":"usermanual", "kw":"What Should I Do When the ZooKeeper Client Fails to Refresh a TGT?,Common Issues About ZooKeeper,Com", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"What Should I Do When the ZooKeeper Client Fails to Refresh a TGT?", "githuburl":"" }, { "uri":"mrs_01_2114.html", + "node_id":"mrs_01_2114.xml", "product_code":"mrs", - "code":"901", + "code":"1017", "des":"When the client connects to a non-leader instance, run the deleteall command to delete a large number of znodes, the error message \"Node does not exist\" is displayed, but", "doc_type":"usermanual", "kw":"Why Is Message \"Node does not exist\" Displayed when A Large Number of Znodes Are Deleted Using the d", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Why Is Message \"Node does not exist\" Displayed when A Large Number of Znodes Are Deleted Using the deleteallCommand", "githuburl":"" }, { "uri":"mrs_01_2122.html", + "node_id":"mrs_01_2122.xml", "product_code":"mrs", - "code":"902", + "code":"1018", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Appendix", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Appendix", "githuburl":"" }, { "uri":"mrs_01_1293.html", + "node_id":"mrs_01_1293.xml", "product_code":"mrs", - "code":"903", + "code":"1019", "des":"Modify the configuration parameters of each service on FusionInsight Manager.The Basic Configuration tab page is displayed by default. To modify more parameters, click th", "doc_type":"usermanual", "kw":"Modifying Cluster Service Configuration Parameters,Appendix,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Modifying Cluster Service Configuration Parameters", "githuburl":"" }, { "uri":"mrs_01_2124.html", + "node_id":"mrs_01_2124.xml", "product_code":"mrs", - "code":"904", + "code":"1020", "des":"FusionInsight Manager is used to monitor, configure, and manage clusters. After the cluster is installed, you can use the account to log in to FusionInsight Manager.Curre", "doc_type":"usermanual", "kw":"Accessing FusionInsight Manager,Appendix,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Accessing FusionInsight Manager", "githuburl":"" }, { "uri":"mrs_01_0787.html", + "node_id":"mrs_01_0787.xml", "product_code":"mrs", - "code":"905", + "code":"1021", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Using an MRS Client", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"cmpntguide" + } + ], "title":"Using an MRS Client", "githuburl":"" }, { "uri":"mrs_01_0788.html", + "node_id":"mrs_01_0788.xml", "product_code":"mrs", - "code":"906", + "code":"1022", "des":"Before using the client, you need to install the client. For example, the installation directory is /opt/hadoopclient.cd /opt/hadoopclientsource bigdata_envkinit MRS clus", "doc_type":"usermanual", "kw":"Using an MRS Client on Nodes Inside a MRS Cluster,Using an MRS Client,Component Operation Guide (LTS", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using an MRS Client on Nodes Inside a MRS Cluster", "githuburl":"" }, { "uri":"mrs_01_0800.html", + "node_id":"mrs_01_0800.xml", "product_code":"mrs", - "code":"907", + "code":"1023", "des":"After a client is installed, you can use the client on a node outside an MRS cluster.A Linux ECS has been prepared. For details about the OS and its version of the ECS, s", "doc_type":"usermanual", "kw":"Using an MRS Client on Nodes Outside a MRS Cluster,Using an MRS Client,Component Operation Guide (LT", + "search_title":"", + "metedata":[ + { + "prodname":"mrs", + "documenttype":"component" + } + ], "title":"Using an MRS Client on Nodes Outside a MRS Cluster", "githuburl":"" }, { - "uri":"en-us_topic_0000001298722056.html", + "uri":"mrs_01_17512.html", + "node_id":"mrs_01_17512.xml", "product_code":"", - "code":"908", + "code":"1024", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"", "kw":"Change History,Component Operation Guide (LTS)", + "search_title":"", + "metedata":[ + { + + } + ], "title":"Change History", "githuburl":"" } diff --git a/docs/mrs/component-operation-guide-lts/CLASS.TXT.json b/docs/mrs/component-operation-guide-lts/CLASS.TXT.json index 6cf886c7..60d0ce92 100644 --- a/docs/mrs/component-operation-guide-lts/CLASS.TXT.json +++ b/docs/mrs/component-operation-guide-lts/CLASS.TXT.json @@ -4,16 +4,16 @@ "product_code":"mrs", "title":"Using CarbonData", "uri":"mrs_01_1400.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", "code":"1" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", - "title":"Overview", + "title":"Spark CarbonData Overview", "uri":"mrs_01_1401.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"2" }, @@ -22,7 +22,7 @@ "product_code":"mrs", "title":"CarbonData Overview", "uri":"mrs_01_1402.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"2", "code":"3" }, @@ -31,7 +31,7 @@ "product_code":"mrs", "title":"Main Specifications of CarbonData", "uri":"mrs_01_1403.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"2", "code":"4" }, @@ -40,7 +40,7 @@ "product_code":"mrs", "title":"Configuration Reference", "uri":"mrs_01_1404.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"5" }, @@ -49,7 +49,7 @@ "product_code":"mrs", "title":"CarbonData Operation Guide", "uri":"mrs_01_1405.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"6" }, @@ -58,7 +58,7 @@ "product_code":"mrs", "title":"CarbonData Quick Start", "uri":"mrs_01_1406.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"6", "code":"7" }, @@ -67,7 +67,7 @@ "product_code":"mrs", "title":"CarbonData Table Management", "uri":"mrs_01_1407.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"6", "code":"8" }, @@ -76,7 +76,7 @@ "product_code":"mrs", "title":"About CarbonData Table", "uri":"mrs_01_1408.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"8", "code":"9" }, @@ -85,7 +85,7 @@ "product_code":"mrs", "title":"Creating a CarbonData Table", "uri":"mrs_01_1409.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"8", "code":"10" }, @@ -94,7 +94,7 @@ "product_code":"mrs", "title":"Deleting a CarbonData Table", "uri":"mrs_01_1410.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"8", "code":"11" }, @@ -103,7 +103,7 @@ "product_code":"mrs", "title":"Modify the CarbonData Table", "uri":"mrs_01_1411.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"8", "code":"12" }, @@ -112,7 +112,7 @@ "product_code":"mrs", "title":"CarbonData Table Data Management", "uri":"mrs_01_1412.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"6", "code":"13" }, @@ -121,7 +121,7 @@ "product_code":"mrs", "title":"Loading Data", "uri":"mrs_01_1413.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"13", "code":"14" }, @@ -130,7 +130,7 @@ "product_code":"mrs", "title":"Deleting Segments", "uri":"mrs_01_1414.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"13", "code":"15" }, @@ -139,7 +139,7 @@ "product_code":"mrs", "title":"Combining Segments", "uri":"mrs_01_1415.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"13", "code":"16" }, @@ -148,7 +148,7 @@ "product_code":"mrs", "title":"CarbonData Data Migration", "uri":"mrs_01_1416.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"6", "code":"17" }, @@ -157,7 +157,7 @@ "product_code":"mrs", "title":"Migrating Data on CarbonData from Spark1.5 to Spark2x", "uri":"mrs_01_2301.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"6", "code":"18" }, @@ -166,7 +166,7 @@ "product_code":"mrs", "title":"CarbonData Performance Tuning", "uri":"mrs_01_1417.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"19" }, @@ -175,7 +175,7 @@ "product_code":"mrs", "title":"Tuning Guidelines", "uri":"mrs_01_1418.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"19", "code":"20" }, @@ -184,7 +184,7 @@ "product_code":"mrs", "title":"Suggestions for Creating CarbonData Tables", "uri":"mrs_01_1419.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"19", "code":"21" }, @@ -193,7 +193,7 @@ "product_code":"mrs", "title":"Configurations for Performance Tuning", "uri":"mrs_01_1421.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"19", "code":"22" }, @@ -202,7 +202,7 @@ "product_code":"mrs", "title":"CarbonData Access Control", "uri":"mrs_01_1422.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"23" }, @@ -211,7 +211,7 @@ "product_code":"mrs", "title":"CarbonData Syntax Reference", "uri":"mrs_01_1423.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"24" }, @@ -220,7 +220,7 @@ "product_code":"mrs", "title":"DDL", "uri":"mrs_01_1424.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"24", "code":"25" }, @@ -229,7 +229,7 @@ "product_code":"mrs", "title":"CREATE TABLE", "uri":"mrs_01_1425.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"26" }, @@ -238,7 +238,7 @@ "product_code":"mrs", "title":"CREATE TABLE As SELECT", "uri":"mrs_01_1426.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"27" }, @@ -247,7 +247,7 @@ "product_code":"mrs", "title":"DROP TABLE", "uri":"mrs_01_1427.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"28" }, @@ -256,7 +256,7 @@ "product_code":"mrs", "title":"SHOW TABLES", "uri":"mrs_01_1428.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"29" }, @@ -265,7 +265,7 @@ "product_code":"mrs", "title":"ALTER TABLE COMPACTION", "uri":"mrs_01_1429.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"30" }, @@ -274,7 +274,7 @@ "product_code":"mrs", "title":"TABLE RENAME", "uri":"mrs_01_1430.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"31" }, @@ -283,7 +283,7 @@ "product_code":"mrs", "title":"ADD COLUMNS", "uri":"mrs_01_1431.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"32" }, @@ -292,7 +292,7 @@ "product_code":"mrs", "title":"DROP COLUMNS", "uri":"mrs_01_1432.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"33" }, @@ -301,7 +301,7 @@ "product_code":"mrs", "title":"CHANGE DATA TYPE", "uri":"mrs_01_1433.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"34" }, @@ -310,7 +310,7 @@ "product_code":"mrs", "title":"REFRESH TABLE", "uri":"mrs_01_1434.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"35" }, @@ -319,7 +319,7 @@ "product_code":"mrs", "title":"REGISTER INDEX TABLE", "uri":"mrs_01_1435.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"36" }, @@ -328,7 +328,7 @@ "product_code":"mrs", "title":"REFRESH INDEX", "uri":"mrs_01_1436.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"25", "code":"37" }, @@ -337,7 +337,7 @@ "product_code":"mrs", "title":"DML", "uri":"mrs_01_1437.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"24", "code":"38" }, @@ -346,7 +346,7 @@ "product_code":"mrs", "title":"LOAD DATA", "uri":"mrs_01_1438.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"39" }, @@ -355,7 +355,7 @@ "product_code":"mrs", "title":"UPDATE CARBON TABLE", "uri":"mrs_01_1439.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"40" }, @@ -364,7 +364,7 @@ "product_code":"mrs", "title":"DELETE RECORDS from CARBON TABLE", "uri":"mrs_01_1440.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"41" }, @@ -373,7 +373,7 @@ "product_code":"mrs", "title":"INSERT INTO CARBON TABLE", "uri":"mrs_01_1441.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"42" }, @@ -382,7 +382,7 @@ "product_code":"mrs", "title":"DELETE SEGMENT by ID", "uri":"mrs_01_1442.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"43" }, @@ -391,7 +391,7 @@ "product_code":"mrs", "title":"DELETE SEGMENT by DATE", "uri":"mrs_01_1443.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"44" }, @@ -400,7 +400,7 @@ "product_code":"mrs", "title":"SHOW SEGMENTS", "uri":"mrs_01_1444.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"45" }, @@ -409,7 +409,7 @@ "product_code":"mrs", "title":"CREATE SECONDARY INDEX", "uri":"mrs_01_1445.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"46" }, @@ -418,7 +418,7 @@ "product_code":"mrs", "title":"SHOW SECONDARY INDEXES", "uri":"mrs_01_1446.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"47" }, @@ -427,7 +427,7 @@ "product_code":"mrs", "title":"DROP SECONDARY INDEX", "uri":"mrs_01_1447.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"48" }, @@ -436,7 +436,7 @@ "product_code":"mrs", "title":"CLEAN FILES", "uri":"mrs_01_1448.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"49" }, @@ -445,7 +445,7 @@ "product_code":"mrs", "title":"SET/RESET", "uri":"mrs_01_1449.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"38", "code":"50" }, @@ -454,7 +454,7 @@ "product_code":"mrs", "title":"Operation Concurrent Execution", "uri":"mrs_01_24046.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"24", "code":"51" }, @@ -463,7 +463,7 @@ "product_code":"mrs", "title":"API", "uri":"mrs_01_1450.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"24", "code":"52" }, @@ -472,7 +472,7 @@ "product_code":"mrs", "title":"Spatial Indexes", "uri":"mrs_01_1451.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"24", "code":"53" }, @@ -481,7 +481,7 @@ "product_code":"mrs", "title":"CarbonData Troubleshooting", "uri":"mrs_01_1454.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"54" }, @@ -490,7 +490,7 @@ "product_code":"mrs", "title":"Filter Result Is not Consistent with Hive when a Big Double Type Value Is Used in Filter", "uri":"mrs_01_1455.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"54", "code":"55" }, @@ -499,7 +499,7 @@ "product_code":"mrs", "title":"Query Performance Deterioration", "uri":"mrs_01_1456.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"54", "code":"56" }, @@ -508,7 +508,7 @@ "product_code":"mrs", "title":"CarbonData FAQ", "uri":"mrs_01_1457.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"1", "code":"57" }, @@ -517,7 +517,7 @@ "product_code":"mrs", "title":"Why Is Incorrect Output Displayed When I Perform Query with Filter on Decimal Data Type Values?", "uri":"mrs_01_1458.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"58" }, @@ -526,7 +526,7 @@ "product_code":"mrs", "title":"How to Avoid Minor Compaction for Historical Data?", "uri":"mrs_01_1459.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"59" }, @@ -535,7 +535,7 @@ "product_code":"mrs", "title":"How to Change the Default Group Name for CarbonData Data Loading?", "uri":"mrs_01_1460.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"60" }, @@ -544,7 +544,7 @@ "product_code":"mrs", "title":"Why Does INSERT INTO CARBON TABLE Command Fail?", "uri":"mrs_01_1461.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"61" }, @@ -553,7 +553,7 @@ "product_code":"mrs", "title":"Why Is the Data Logged in Bad Records Different from the Original Input Data with Escape Characters?", "uri":"mrs_01_1462.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"62" }, @@ -562,7 +562,7 @@ "product_code":"mrs", "title":"Why Data Load Performance Decreases due to Bad Records?", "uri":"mrs_01_1463.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"63" }, @@ -571,7 +571,7 @@ "product_code":"mrs", "title":"Why INSERT INTO/LOAD DATA Task Distribution Is Incorrect and the Opened Tasks Are Less Than the Available Executors when the Number of Initial Executors Is Zero?", "uri":"mrs_01_1464.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"64" }, @@ -580,7 +580,7 @@ "product_code":"mrs", "title":"Why Does CarbonData Require Additional Executors Even Though the Parallelism Is Greater Than the Number of Blocks to Be Processed?", "uri":"mrs_01_1465.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"65" }, @@ -589,7 +589,7 @@ "product_code":"mrs", "title":"Why Data loading Fails During off heap?", "uri":"mrs_01_1466.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"66" }, @@ -598,7 +598,7 @@ "product_code":"mrs", "title":"Why Do I Fail to Create a Hive Table?", "uri":"mrs_01_1467.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"67" }, @@ -607,7 +607,7 @@ "product_code":"mrs", "title":"Why CarbonData tables created in V100R002C50RC1 not reflecting the privileges provided in Hive Privileges for non-owner?", "uri":"mrs_01_1468.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"68" }, @@ -616,7 +616,7 @@ "product_code":"mrs", "title":"How Do I Logically Split Data Across Different Namespaces?", "uri":"mrs_01_1469.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"69" }, @@ -625,7 +625,7 @@ "product_code":"mrs", "title":"Why Missing Privileges Exception is Reported When I Perform Drop Operation on Databases?", "uri":"mrs_01_1470.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"70" }, @@ -634,7 +634,7 @@ "product_code":"mrs", "title":"Why the UPDATE Command Cannot Be Executed in Spark Shell?", "uri":"mrs_01_1471.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"71" }, @@ -643,7 +643,7 @@ "product_code":"mrs", "title":"How Do I Configure Unsafe Memory in CarbonData?", "uri":"mrs_01_1472.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"72" }, @@ -652,7 +652,7 @@ "product_code":"mrs", "title":"Why Exception Occurs in CarbonData When Disk Space Quota is Set for Storage Directory in HDFS?", "uri":"mrs_01_1473.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"73" }, @@ -661,7514 +661,8558 @@ "product_code":"mrs", "title":"Why Does Data Query or Loading Fail and \"org.apache.carbondata.core.memory.MemoryException: Not enough memory\" Is Displayed?", "uri":"mrs_01_1474.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"57", "code":"74" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", - "title":"Using ClickHouse", - "uri":"mrs_01_2344.html", - "doc_type":"cmpntguide-lts", + "title":"Using CDL", + "uri":"mrs_01_24123.html", + "doc_type":"cmpntguide", "p_code":"", "code":"75" }, + { + "desc":"CDL is a simple and efficient real-time data integration service. It captures data change events from various OLTP databases and pushes them to Kafka. The Sink Connector ", + "product_code":"mrs", + "title":"CDL Usage Instructions", + "uri":"mrs_01_24124.html", + "doc_type":"cmpntguide", + "p_code":"75", + "code":"76" + }, + { + "desc":"CDL supports data synchronization or comparison tasks in multiple scenarios. This section describes how to import data from PgSQL to Kafka on the CDLService WebUI of a cl", + "product_code":"", + "title":"Using CDL from Scratch", + "uri":"mrs_01_24232.html", + "doc_type":"", + "p_code":"75", + "code":"77" + }, + { + "desc":"Before using the CDL service, a cluster administrator needs to create a user and grant operation permissions to the user to meet service requirements.CDL users are classi", + "product_code":"", + "title":"Creating a CDL User", + "uri":"mrs_01_24234.html", + "doc_type":"", + "p_code":"75", + "code":"78" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Preparing for Creating a CDL Job", + "uri":"mrs_01_24235.html", + "doc_type":"cmpntguide", + "p_code":"75", + "code":"79" + }, + { + "desc":"After CDL is installed in an MRS cluster, you can manage data connections and visualized jobs using the CDL web UI.This section describes how to access the CDL web UI in ", + "product_code":"mrs", + "title":"Logging In to the CDLService WebUI", + "uri":"mrs_01_24236.html", + "doc_type":"cmpntguide", + "p_code":"79", + "code":"80" + }, + { + "desc":"CDL is a simple and efficient real-time data integration service. It captures events from various OLTP databases and pushes them to Kafka. When creating a database connec", + "product_code":"", + "title":"Uploading a Driver File", + "uri":"mrs_01_24237.html", + "doc_type":"", + "p_code":"79", + "code":"81" + }, + { + "desc":"Create a database link on the CDLService web UI.You have obtained the driver JAR package of the data to be connected.A user with the CDL management permission has been cr", + "product_code":"", + "title":"Creating a Database Link", + "uri":"mrs_01_24238.html", + "doc_type":"", + "p_code":"79", + "code":"82" + }, + { + "desc":"To capture data to or from Hudi, create and manage Hudi environment variables by performing the operations in this section.A user with the CDL management permission has b", + "product_code":"", + "title":"Managing ENV", + "uri":"mrs_01_24255.html", + "doc_type":"", + "p_code":"79", + "code":"83" + }, + { + "desc":"The heartbeat and data consistency check function is used to collect full-link information about CDL synchronization tasks, including the time required for sending data f", + "product_code":"", + "title":"Configuring Heartbeat and Data Consistency Check for a Synchronization Task", + "uri":"mrs_01_24811.html", + "doc_type":"", + "p_code":"79", + "code":"84" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"Creating a CDL Job", + "uri":"mrs_01_24774.html", + "doc_type":"", + "p_code":"75", + "code":"85" + }, + { + "desc":"The CDLService web UI provides a visualized page for users to quickly create CDL jobs and import real-time data into the data lake.A user with the CDL management permissi", + "product_code":"", + "title":"Creating a CDL Data Synchronization Job", + "uri":"mrs_01_24239.html", + "doc_type":"", + "p_code":"85", + "code":"86" + }, + { + "desc":"Data comparison checks the consistency between data in the source database and that in the target Hive. If the data is inconsistent, CDL can attempt to repair the inconsi", + "product_code":"", + "title":"Creating a CDL Data Comparison Job", + "uri":"mrs_01_24775.html", + "doc_type":"", + "p_code":"85", + "code":"87" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"Common CDL Jobs", + "uri":"mrs_01_24240.html", + "doc_type":"", + "p_code":"85", + "code":"88" + }, + { + "desc":"This section describes how to import data from PgSQL to Kafka by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Kafka services ", + "product_code":"", + "title":"Synchronizing Data from PgSQL to Kafka", + "uri":"mrs_01_24254.html", + "doc_type":"", + "p_code":"88", + "code":"89" + }, + { + "desc":"This section describes how to import data from MySQL to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services ha", + "product_code":"", + "title":"Synchronizing Data from MySQL to Hudi", + "uri":"mrs_01_24751.html", + "doc_type":"", + "p_code":"88", + "code":"90" + }, + { + "desc":"This section describes how to import data from PgSQL to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services ha", + "product_code":"", + "title":"Synchronizing Data from PgSQL to Hudi", + "uri":"mrs_01_24752.html", + "doc_type":"", + "p_code":"88", + "code":"91" + }, + { + "desc":"This section describes how to import data from ThirdKafka to Hudi by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi servic", + "product_code":"", + "title":"Synchronizing Data from ThirdKafka to Hudi", + "uri":"mrs_01_24763.html", + "doc_type":"", + "p_code":"88", + "code":"92" + }, + { + "desc":"This section describes how to import data from Hudi to DWS by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL and Hudi services have", + "product_code":"", + "title":"Synchronizing Data from Hudi to DWS", + "uri":"mrs_01_24753.html", + "doc_type":"", + "p_code":"88", + "code":"93" + }, + { + "desc":"This section describes how to import data from Hudi to ClickHouse by using the CDLService web UI of a cluster with Kerberos authentication enabled.The CDL, Hudi, and Clic", + "product_code":"", + "title":"Synchronizing Data from Hudi to ClickHouse", + "uri":"mrs_01_24754.html", + "doc_type":"", + "p_code":"88", + "code":"94" + }, + { + "desc":"Log path: The default log storage path of CDL is /var/log/Bigdata/cdl/Role name abbreviation.CDLService: /var/log/Bigdata/cdl/service (run logs) and /var/log/Bigdata/audi", + "product_code":"mrs", + "title":"CDL Log Overview", + "uri":"mrs_01_24129.html", + "doc_type":"cmpntguide", + "p_code":"75", + "code":"95" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"CDL FAQs", + "uri":"mrs_01_24288.html", + "doc_type":"cmpntguide", + "p_code":"75", + "code":"96" + }, + { + "desc":"After the CDL job for capturing data to Hudi is executed, related data exists in Kafka, but no record exists in Spark RDD, no related data exists in Hudi, and the error m", + "product_code":"", + "title":"Hudi Does Not Receive Data After a CDL Job Is Executed", + "uri":"mrs_01_24793.html", + "doc_type":"", + "p_code":"96", + "code":"97" + }, + { + "desc":"After an CDL job runs for a period of time, the YARN job fails and the status code 104 or 143 is returned.A large amount of data is captured to Hudi. As a result, the mem", + "product_code":"", + "title":"Error 104 or 143 Is Reported After a CDL Job Runs for a Period of Time", + "uri":"mrs_01_24794.html", + "doc_type":"", + "p_code":"96", + "code":"98" + }, + { + "desc":"The error message \"Record key is empty\" is displayed when the job of capturing data from PgSQL to Hudi is started.The primary key parameter table.primarykey.mapping of th", + "product_code":"", + "title":"Error Is Reported When the Job of Capturing Data From PgSQL to Hudi Is Started", + "uri":"mrs_01_24795.html", + "doc_type":"", + "p_code":"96", + "code":"99" + }, + { + "desc":"The error message \"parameter exception with code: 403\" is displayed when a CDL job is stopped on the CDLService web UI.The current user does not have the permission to st", + "product_code":"", + "title":"Error 403 Is Reported When a CDL Job Is Stopped", + "uri":"mrs_01_24796.html", + "doc_type":"", + "p_code":"96", + "code":"100" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Using ClickHouse", + "uri":"mrs_01_2344.html", + "doc_type":"usermanual", + "p_code":"", + "code":"101" + }, { "desc":"ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provides good query performance. The aggregation analysis and ", "product_code":"mrs", "title":"Using ClickHouse from Scratch", "uri":"mrs_01_2345.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"76" + "doc_type":"usermanual", + "p_code":"101", + "code":"102" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.During data migration, one-click balancing, decommissioning and capacity reduction, ClickHouse allows you to set the only_", + "product_code":"", + "title":"Enabling the Read-Only Mode of the ClickHouse Table", + "uri":"mrs_01_24451.html", + "doc_type":"", + "p_code":"101", + "code":"103" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common ClickHouse SQL Syntax", "uri":"mrs_01_24199.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"77" + "doc_type":"cmpntguide", + "p_code":"101", + "code":"104" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for creating a ClickHouse database.CREATE DATABASE [IF NOT EXISTS] Database_name [ON CLUSTERClickHo", "product_code":"mrs", "title":"CREATE DATABASE: Creating a Database", "uri":"mrs_01_24200.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"78" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"105" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for creating a ClickHouse table.Method 1: Creating a table named table_name in the specified databa", "product_code":"mrs", "title":"CREATE TABLE: Creating a Table", "uri":"mrs_01_24201.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"79" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"106" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for inserting data to a table in ClickHouse.Method 1: Inserting data in standard formatINSERT INTO ", "product_code":"mrs", "title":"INSERT INTO: Inserting Data into a Table", "uri":"mrs_01_24202.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"80" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"107" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for querying table data in ClickHouse.SELECT [DISTINCT] expr_list[FROM[database_name.]table| (subqu", "product_code":"mrs", "title":"SELECT: Querying Table Data", "uri":"mrs_01_24203.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"81" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"108" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for modifying a table structure in ClickHouse.ALTER TABLE [database_name].name[ON CLUSTER cluster] ", "product_code":"mrs", "title":"ALTER TABLE: Modifying a Table Structure", "uri":"mrs_01_24204.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"82" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"109" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for querying a table structure in ClickHouse.DESC|DESCRIBETABLE[database_name.]table[INTOOUTFILE fi", "product_code":"mrs", "title":"DESC: Querying a Table Structure", "uri":"mrs_01_24205.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"83" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"110" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for deleting a ClickHouse table.DROP[TEMPORARY] TABLE[IF EXISTS] [database_name.]name[ON CLUSTER cl", "product_code":"mrs", "title":"DROP: Deleting a Table", "uri":"mrs_01_24208.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"84" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"111" }, { "desc":"This section describes the basic syntax and usage of the SQL statement for displaying information about databases and tables in ClickHouse.show databasesshow tables", "product_code":"mrs", "title":"SHOW: Displaying Information About Databases and Tables", "uri":"mrs_01_24207.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"85" - }, - { - "desc":"This section describes the basic syntax and usage of the SQL statement for importing and exporting file data in ClickHouse.Importing data in CSV formatclickhouse client -", - "product_code":"mrs", - "title":"Importing and Exporting File Data", - "uri":"mrs_01_24206.html", - "doc_type":"cmpntguide-lts", - "p_code":"77", - "code":"86" + "doc_type":"cmpntguide", + "p_code":"104", + "code":"112" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"User Management and Authentication", "uri":"mrs_01_24251.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"87" + "doc_type":"cmpntguide", + "p_code":"101", + "code":"113" }, { "desc":"ClickHouse user permission management enables unified management of users, roles, and permissions on each ClickHouse instance in the cluster. You can use the permission m", "product_code":"mrs", "title":"ClickHouse User and Permission Management", "uri":"mrs_01_24057.html", - "doc_type":"cmpntguide-lts", - "p_code":"87", - "code":"88" + "doc_type":"usermanual", + "p_code":"113", + "code":"114" }, { "desc":"After a ClickHouse cluster is created, you can use the ClickHouse client to connect to the ClickHouse server. The default username is default.This section describes how t", "product_code":"mrs", - "title":"Setting the ClickHouse Username and Password", + "title":"Configuring the Password of the Default Account of a ClickHouse Cluster(for MRS 3.1.2)", "uri":"mrs_01_2395.html", - "doc_type":"cmpntguide-lts", - "p_code":"87", - "code":"89" + "doc_type":"cmpntguide", + "p_code":"113", + "code":"115" + }, + { + "desc":"After a ClickHouse cluster is created, you can use the ClickHouse client to connect to the ClickHouse server.Configure the passwords of the default accounts default and c", + "product_code":"", + "title":"Configuring the Password of the Default Account of a ClickHouse Cluster", + "uri":"mrs_01_24575.html", + "doc_type":"", + "p_code":"113", + "code":"116" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"ClickHouse Multi-Tenancy", + "uri":"mrs_01_24784.html", + "doc_type":"", + "p_code":"101", + "code":"117" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.The ClickHouse multi-tenancy feature enables you to manage cluster resources through the user > tenant role > resource pro", + "product_code":"", + "title":"ClickHouse Multi-Tenancy Overview", + "uri":"mrs_01_24790.html", + "doc_type":"", + "p_code":"117", + "code":"118" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.ClickHouse tenants support CPU priorities. This feature depends on CAP_SYS_NICE of the OS and takes effect only after bein", + "product_code":"", + "title":"Enabling the CPU Priority Feature", + "uri":"mrs_01_24789.html", + "doc_type":"", + "p_code":"117", + "code":"119" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.On FusionInsight Manager, cluster administrators can create a ClickHouse tenant and associate it with a logical cluster. A", + "product_code":"", + "title":"Managing ClickHouse Tenants", + "uri":"mrs_01_24791.html", + "doc_type":"", + "p_code":"117", + "code":"120" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Modify the maximum memory allowed for ClickHouse on a ClickHouseServer node to ensure the normal use of other service inst", + "product_code":"", + "title":"Modifying the Memory Limit of ClickHouse on a ClickHouseServer Node", + "uri":"mrs_01_24786.html", + "doc_type":"", + "p_code":"117", + "code":"121" }, { "desc":"Table engines play a key role in ClickHouse to determine:Where to write and read dataSupported query modesWhether concurrent data access is supportedWhether indexes can b", "product_code":"mrs", "title":"ClickHouse Table Engine Overview", "uri":"mrs_01_24105.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"90" + "doc_type":"usermanual", + "p_code":"101", + "code":"122" }, { "desc":"ClickHouse implements the replicated table mechanism based on the ReplicatedMergeTree engine and ZooKeeper. When creating a table, you can specify an engine to determine ", "product_code":"mrs", "title":"Creating a ClickHouse Table", "uri":"mrs_01_2398.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"91" + "doc_type":"usermanual", + "p_code":"101", + "code":"123" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Migrating ClickHouse Data", + "uri":"mrs_01_24250.html", + "doc_type":"cmpntguide", + "p_code":"101", + "code":"124" + }, + { + "desc":"Use the ClickHouse client to import and export data.Importing data in CSV formatclickhouse client --hostHost name or IP address of the ClickHouse instance--databaseDataba", + "product_code":"mrs", + "title":"Using ClickHouse to Import and Export Data", + "uri":"mrs_01_24206.html", + "doc_type":"cmpntguide", + "p_code":"124", + "code":"125" + }, + { + "desc":"This section describes how to create a Kafka table to automatically synchronize Kafka data to the ClickHouse cluster.You have created a Kafka cluster. The Kafka client ha", + "product_code":"", + "title":"Synchronizing Kafka Data to ClickHouse", + "uri":"mrs_01_24377.html", + "doc_type":"", + "p_code":"124", + "code":"126" }, { "desc":"The ClickHouse data migration tool can migrate some partitions of one or more partitioned MergeTree tables on several ClickHouseServer nodes to the same tables on other C", "product_code":"mrs", "title":"Using the ClickHouse Data Migration Tool", "uri":"mrs_01_24053.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"92" + "doc_type":"usermanual", + "p_code":"124", + "code":"127" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Scenario 1: As the number of MRS ClickHouse services increases, the storage and compute resources of clusters cannot meet ", + "product_code":"", + "title":"Using the Migration Tool to Quickly Migrate ClickHouse Cluster Data", + "uri":"mrs_01_24508.html", + "doc_type":"", + "p_code":"124", + "code":"128" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Monitoring of Slow ClickHouse Query Statements and Replication Table Data Synchronization", "uri":"mrs_01_24229.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"93" + "doc_type":"cmpntguide", + "p_code":"101", + "code":"129" }, { "desc":"The SQL statement query in ClickHouse is slow because the conditions such as partitions, where conditions, and indexes of SQL statements are set improperly. As a result, ", "product_code":"mrs", "title":"Slow Query Statement Monitoring", "uri":"mrs_01_24230.html", - "doc_type":"cmpntguide-lts", - "p_code":"93", - "code":"94" + "doc_type":"cmpntguide", + "p_code":"129", + "code":"130" }, { "desc":"MRS monitors the synchronization between multiple copies of data in the same shard of a Replicated*MergeTree table.Currently, you can monitor and query only Replicated*Me", "product_code":"mrs", "title":"Replication Table Data Synchronization Monitoring", "uri":"mrs_01_24231.html", - "doc_type":"cmpntguide-lts", - "p_code":"93", - "code":"95" + "doc_type":"cmpntguide", + "p_code":"129", + "code":"131" }, { "desc":"Materialized views (MVs) are used in ClickHouse to save the precomputed result of time-consuming operations. When querying data, you can query the materialized views rath", - "product_code":"mrs", + "product_code":"", "title":"Adaptive MV Usage in ClickHouse", "uri":"mrs_01_24287.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"96" + "doc_type":"", + "p_code":"101", + "code":"132" }, { "desc":"Log path: The default storage path of ClickHouse log files is as follows: ${BIGDATA_LOG_HOME}/clickhouseLog archive rule: The automatic ClickHouse log compression functio", "product_code":"mrs", "title":"ClickHouse Log Overview", "uri":"mrs_01_2399.html", - "doc_type":"cmpntguide-lts", - "p_code":"75", - "code":"97" + "doc_type":"usermanual", + "p_code":"101", + "code":"133" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"ClickHouse Performance Tuning", + "uri":"mrs_01_24848.html", + "doc_type":"", + "p_code":"101", + "code":"134" + }, + { + "desc":"Log in to the ClickHouse client and check whether abnormal merge exists.select database, table, elapsed, progress, merge_type from system.merges;select database, table, e", + "product_code":"", + "title":"Solution to the \"Too many parts\" Error in Data Tables", + "uri":"mrs_01_24849.html", + "doc_type":"", + "p_code":"134", + "code":"135" + }, + { + "desc":"To accelerate background tasks, adjust the ZooKeeper service configuration first. Otherwise, the ClickHouse service and background tasks will be abnormal due to insuffici", + "product_code":"", + "title":"Accelerating Merge Operations", + "uri":"mrs_01_24853.html", + "doc_type":"", + "p_code":"134", + "code":"136" + }, + { + "desc":"When TTL is triggered in ClickHouse, a large amount of CPU and memory are consumed.Log in to FusionInsight Manager and choose Cluster > Services > ClickHouse. Click Confi", + "product_code":"", + "title":"Accelerating TTL Operations", + "uri":"mrs_01_24855.html", + "doc_type":"", + "p_code":"134", + "code":"137" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"ClickHouse FAQ", + "uri":"mrs_01_24777.html", + "doc_type":"", + "p_code":"101", + "code":"138" + }, + { + "desc":"How do I do if the disk status displayed in the System.disks table is fault or abnormal?This problem is caused by I/O errors on the disk. To rectify the fault, perform th", + "product_code":"", + "title":"How Do I Do If the Disk Status Displayed in the System.disks Table Is fault or abnormal?", + "uri":"mrs_01_24778.html", + "doc_type":"", + "p_code":"138", + "code":"139" + }, + { + "desc":"How do I migrate Hive/HDFS data to ClickHouse?You can export data from Hive as CSV files and import the CSV files to ClickHouse.Export data from Hive as CSV files.hive -e", + "product_code":"", + "title":"How Do I Migrate Data from Hive/HDFS to ClickHouse?", + "uri":"mrs_01_24831.html", + "doc_type":"", + "p_code":"138", + "code":"140" + }, + { + "desc":"An error is reported in logs when the auxiliary ZooKeeper or replica data is used to synchronize table data.The versions of replication table replicas are inconsistent, c", + "product_code":"", + "title":"An Error Is Reported in Logs When the Auxiliary ZooKeeper or Replica Data Is Used to Synchronize Table Data", + "uri":"mrs_01_24837.html", + "doc_type":"", + "p_code":"138", + "code":"141" + }, + { + "desc":"su - ommsource{Client installation directory}/bigdata_envkinitComponent user (You do not need to run the kinit command for normal clusters.)clickhouse client --hostIP ad", + "product_code":"", + "title":"How Do I Grant the Select Permission at the Database Level to ClickHouse Users?", + "uri":"mrs_01_24846.html", + "doc_type":"", + "p_code":"138", + "code":"142" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using DBService", "uri":"mrs_01_2356.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"98" + "code":"143" }, { "desc":"This section describes how to manually configure SSL for the HA module of DBService in the cluster where DBService is installed.After this operation is performed, if you ", "product_code":"mrs", "title":"Configuring SSL for the HA Module", "uri":"mrs_01_2346.html", - "doc_type":"cmpntguide-lts", - "p_code":"98", - "code":"99" + "doc_type":"usermanual", + "p_code":"143", + "code":"144" }, { "desc":"This section describes how to restore SSL for the HA module of DBService in the cluster where DBService is installed.SSL has been enabled for the HA module of DBService.C", "product_code":"mrs", "title":"Restoring SSL for the HA Module", "uri":"mrs_01_2347.html", - "doc_type":"cmpntguide-lts", - "p_code":"98", - "code":"100" + "doc_type":"usermanual", + "p_code":"143", + "code":"145" }, { "desc":"The default timeout interval of DBService backup tasks is 2 hours. When the data volume in DBService is too large, the backup task may fail to be executed because the tim", - "product_code":"mrs", + "product_code":"", "title":"Configuring the Timeout Interval of DBService Backup Tasks", "uri":"mrs_01_24283.html", - "doc_type":"cmpntguide-lts", - "p_code":"98", - "code":"101" + "doc_type":"", + "p_code":"143", + "code":"146" }, { "desc":"Log path: The default storage path of DBService log files is /var/log/Bigdata/dbservice.GaussDB: /var/log/Bigdata/dbservice/DB (GaussDB run log directory), /var/log/Bigda", "product_code":"mrs", "title":"DBService Log Overview", "uri":"mrs_01_0789.html", - "doc_type":"cmpntguide-lts", - "p_code":"98", - "code":"102" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Using Flink", - "uri":"mrs_01_0591.html", - "doc_type":"cmpntguide-lts", - "p_code":"", - "code":"103" - }, - { - "desc":"This section describes how to use Flink to run wordcount jobs.Flink has been installed in the MRS cluster and all components in the cluster are running properly.The clust", - "product_code":"mrs", - "title":"Using Flink from Scratch", - "uri":"mrs_01_0473.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"104" - }, - { - "desc":"You can view Flink job information on the Yarn web UI.The Flink service has been installed in a cluster.Log in to FusionInsight Manager. For details, see Accessing Fusion", - "product_code":"mrs", - "title":"Viewing Flink Job Information", - "uri":"mrs_01_0784.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"105" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Flink Configuration Management", - "uri":"mrs_01_0592.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"106" - }, - { - "desc":"All parameters of Flink must be set on a client. The path of a configuration file is as follows: Client installation path/Flink/flink/conf/flink-conf.yaml.You are advised", - "product_code":"mrs", - "title":"Configuring Parameter Paths", - "uri":"mrs_01_1565.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"107" - }, - { - "desc":"JobManager and TaskManager are main components of Flink. You can configure the parameters for different security and performance scenarios on the client.Main configuratio", - "product_code":"mrs", - "title":"JobManager & TaskManager", - "uri":"mrs_01_1566.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"108" - }, - { - "desc":"The Blob server on the JobManager node is used to receive JAR files uploaded by users on the client, send JAR files to TaskManager, and transfer log files. Flink provides", - "product_code":"mrs", - "title":"Blob", - "uri":"mrs_01_1567.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"109" - }, - { - "desc":"The Akka actor model is the basis of communications between the Flink client and JobManager, JobManager and TaskManager, as well as TaskManager and TaskManager. Flink ena", - "product_code":"mrs", - "title":"Distributed Coordination (via Akka)", - "uri":"mrs_01_1568.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"110" - }, - { - "desc":"When the secure Flink cluster is required, SSL-related configuration items must be set.Configuration items include the SSL switch, certificate, password, and encryption a", - "product_code":"mrs", - "title":"SSL", - "uri":"mrs_01_1569.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"111" - }, - { - "desc":"When Flink runs a job, data transmission and reverse pressure detection between tasks depend on Netty. In certain environments, Netty parameters should be configured.For ", - "product_code":"mrs", - "title":"Network communication (via Netty)", - "uri":"mrs_01_1570.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"112" - }, - { - "desc":"When JobManager is started, the web server in the same process is also started.You can access the web server to obtain information about the current Flink cluster, includ", - "product_code":"mrs", - "title":"JobManager Web Frontend", - "uri":"mrs_01_1571.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"113" - }, - { - "desc":"Result files are created when tasks are running. Flink enables you to configure parameters for file creation.Configuration items include overwriting policy and directory ", - "product_code":"mrs", - "title":"File Systems", - "uri":"mrs_01_1572.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"114" - }, - { - "desc":"Flink enables HA and job exception, as well as job pause and recovery during version upgrade. Flink depends on state backend to store job states and on the restart strate", - "product_code":"mrs", - "title":"State Backend", - "uri":"mrs_01_1573.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"115" - }, - { - "desc":"Flink Kerberos configuration items must be configured in security mode.The configuration items include keytab, principal, and cookie of Kerberos.", - "product_code":"mrs", - "title":"Kerberos-based Security", - "uri":"mrs_01_1574.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"116" - }, - { - "desc":"The Flink HA mode depends on ZooKeeper. Therefore, ZooKeeper-related configuration items must be set.Configuration items include the ZooKeeper address, path, and security", - "product_code":"mrs", - "title":"HA", - "uri":"mrs_01_1575.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"117" - }, - { - "desc":"In scenarios raising special requirements on JVM configuration, users can use configuration items to transfer JVM parameters to the client, JobManager, and TaskManager.Co", - "product_code":"mrs", - "title":"Environment", - "uri":"mrs_01_1576.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"118" - }, - { - "desc":"Flink runs on a Yarn cluster and JobManager runs on ApplicationMaster. Certain configuration parameters of JobManager depend on Yarn. By setting Yarn-related configuratio", - "product_code":"mrs", - "title":"Yarn", - "uri":"mrs_01_1577.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"119" - }, - { - "desc":"The Netty connection is used among multiple jobs to reduce latency. In this case, NettySink is used on the server and NettySource is used on the client for data transmiss", - "product_code":"mrs", - "title":"Pipeline", - "uri":"mrs_01_1578.html", - "doc_type":"cmpntguide-lts", - "p_code":"106", - "code":"120" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Security Configuration", - "uri":"mrs_01_0593.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"121" - }, - { - "desc":"All Flink cluster components support authentication.The Kerberos authentication is supported between Flink cluster components and external components, such as Yarn, HDFS,", - "product_code":"mrs", - "title":"Security Features", - "uri":"mrs_01_1579.html", - "doc_type":"cmpntguide-lts", - "p_code":"121", - "code":"122" - }, - { - "desc":"Sample project data of Flink is stored in Kafka. A user with Kafka permission can send data to Kafka and receive data from it.Run Linux command line to create a topic. Be", - "product_code":"mrs", - "title":"Configuring Kafka", - "uri":"mrs_01_1580.html", - "doc_type":"cmpntguide-lts", - "p_code":"121", - "code":"123" - }, - { - "desc":"File configurationnettyconnector.registerserver.topic.storage: (Mandatory) Configures the path (on a third-party server) to information about IP address, port numbers, an", - "product_code":"mrs", - "title":"Configuring Pipeline", - "uri":"mrs_01_1581.html", - "doc_type":"cmpntguide-lts", - "p_code":"121", - "code":"124" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Security Hardening", - "uri":"mrs_01_0594.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"125" - }, - { - "desc":"Flink uses the following three authentication modes:Kerberos authentication: It is used between the Flink Yarn client and Yarn ResourceManager, JobManager and ZooKeeper, ", - "product_code":"mrs", - "title":"Authentication and Encryption", - "uri":"mrs_01_1583.html", - "doc_type":"cmpntguide-lts", - "p_code":"125", - "code":"126" - }, - { - "desc":"In HA mode of Flink, ZooKeeper can be used to manage clusters and discover services. Zookeeper supports SASL ACL control. Only users who have passed the SASL (Kerberos) a", - "product_code":"mrs", - "title":"ACL Control", - "uri":"mrs_01_1584.html", - "doc_type":"cmpntguide-lts", - "p_code":"125", - "code":"127" - }, - { - "desc":"Note: The same coding mode is used on the web service client and server to prevent garbled characters and to enable input verification.Security hardening: apply UTF-8 to ", - "product_code":"mrs", - "title":"Web Security", - "uri":"mrs_01_1585.html", - "doc_type":"cmpntguide-lts", - "p_code":"125", - "code":"128" - }, - { - "desc":"All security functions of Flink are provided by the open source community or self-developed. Security features that need to be configured by users, such as authentication", - "product_code":"mrs", - "title":"Security Statement", - "uri":"mrs_01_1586.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"129" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Using the Flink Web UI", - "uri":"mrs_01_24014.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"130" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Overview", - "uri":"mrs_01_24015.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"131" - }, - { - "desc":"Flink web UI provides a web-based visual development platform. You only need to compile SQL statements to develop jobs, slashing the job development threshold. In additio", - "product_code":"mrs", - "title":"Introduction to Flink Web UI", - "uri":"mrs_01_24016.html", - "doc_type":"cmpntguide-lts", - "p_code":"131", - "code":"132" - }, - { - "desc":"The Flink web UI application process is shown as follows:", - "product_code":"mrs", - "title":"Flink Web UI Application Process", - "uri":"mrs_01_24017.html", - "doc_type":"cmpntguide-lts", - "p_code":"131", - "code":"133" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"FlinkServer Permissions Management", - "uri":"mrs_01_24047.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"134" - }, - { - "desc":"User admin of Manager does not have the FlinkServer service operation permission. To perform FlinkServer service operations, you need to grant related permission to the u", - "product_code":"mrs", - "title":"Overview", - "uri":"mrs_01_24048.html", - "doc_type":"cmpntguide-lts", - "p_code":"134", - "code":"135" - }, - { - "desc":"This section describes how to create and configure a FlinkServer role on Manager as the system administrator. A FlinkServer role can be configured with FlinkServer admini", - "product_code":"mrs", - "title":"Authentication Based on Users and Roles", - "uri":"mrs_01_24049.html", - "doc_type":"cmpntguide-lts", - "p_code":"134", - "code":"136" - }, - { - "desc":"After Flink is installed in an MRS cluster, you can connect to clusters and data as well as manage stream tables and jobs using the Flink web UI.This section describes ho", - "product_code":"mrs", - "title":"Accessing the Flink Web UI", - "uri":"mrs_01_24019.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"137" - }, - { - "desc":"Applications can be used to isolate different upper-layer services.After the application is created, you can switch to the application to be operated in the upper left co", - "product_code":"mrs", - "title":"Creating an Application on the Flink Web UI", - "uri":"mrs_01_24020.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"138" - }, - { - "desc":"Different clusters can be accessed by configuring the cluster connection.To obtain the cluster client configuration files, perform the following steps:Log in to FusionIns", - "product_code":"mrs", - "title":"Creating a Cluster Connection on the Flink Web UI", - "uri":"mrs_01_24021.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"139" - }, - { - "desc":"Different data services can be accessed through data connections. Currently, FlinkServer supports HDFS, Kafka data connections.", - "product_code":"mrs", - "title":"Creating a Data Connection on the Flink Web UI", - "uri":"mrs_01_24022.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"140" - }, - { - "desc":"Data tables can be used to define basic attributes and parameters of source tables, dimension tables, and output tables.", - "product_code":"mrs", - "title":"Managing Tables on the Flink Web UI", - "uri":"mrs_01_24023.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"141" - }, - { - "desc":"Define Flink jobs, including Flink SQL and Flink JAR jobs.Creating a Flink SQL jobDevelop the job on the job development page.Click Check Semantic to check the input cont", - "product_code":"mrs", - "title":"Managing Jobs on the Flink Web UI", - "uri":"mrs_01_24024.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"142" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"Managing UDFs on the Flink Web UI", - "uri":"mrs_01_24223.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", - "code":"143" - }, - { - "desc":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called user-defined functions (UDFs). You can upload and manag", - "product_code":"mrs", - "title":"Managing UDFs on the Flink Web UI", - "uri":"mrs_01_24211.html", - "doc_type":"cmpntguide-lts", - "p_code":"143", - "code":"144" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"UDF Java and SQL Examples", - "uri":"mrs_01_24224.html", - "doc_type":"cmpntguide-lts", - "p_code":"143", - "code":"145" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"UDAF Java and SQL Examples", - "uri":"mrs_01_24225.html", - "doc_type":"cmpntguide-lts", - "p_code":"143", - "code":"146" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"UDTF Java and SQL Examples", - "uri":"mrs_01_24227.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"143", "code":"147" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", - "title":"Interconnecting FlinkServer with External Components", - "uri":"mrs_01_24226.html", - "doc_type":"cmpntguide-lts", - "p_code":"130", + "title":"Using Flink", + "uri":"mrs_01_0591.html", + "doc_type":"usermanual", + "p_code":"", "code":"148" }, { - "desc":"Flink interconnects with the ClickHouseBalancer instance of ClickHouse to read and write data, preventing ClickHouse traffic distribution problems.Services such as ClickH", + "desc":"This section describes how to use Flink to run wordcount jobs.Flink has been installed in the MRS cluster and all components in the cluster are running properly.The clust", + "product_code":"mrs", + "title":"Using Flink from Scratch", + "uri":"mrs_01_0473.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"149" + }, + { + "desc":"You can view Flink job information on the Yarn web UI.The Flink service has been installed in a cluster.Log in to FusionInsight Manager. For details, see Accessing Fusion", + "product_code":"mrs", + "title":"Viewing Flink Job Information", + "uri":"mrs_01_0784.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"150" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Flink Configuration Management", + "uri":"mrs_01_0592.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"151" + }, + { + "desc":"All parameters of Flink must be set on a client. The path of a configuration file is as follows: Client installation path/Flink/flink/conf/flink-conf.yaml.You are advised", + "product_code":"mrs", + "title":"Configuring Parameter Paths", + "uri":"mrs_01_1565.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"152" + }, + { + "desc":"JobManager and TaskManager are main components of Flink. You can configure the parameters for different security and performance scenarios on the client.Main configuratio", + "product_code":"mrs", + "title":"JobManager & TaskManager", + "uri":"mrs_01_1566.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"153" + }, + { + "desc":"The Blob server on the JobManager node is used to receive JAR files uploaded by users on the client, send JAR files to TaskManager, and transfer log files. Flink provides", + "product_code":"mrs", + "title":"Blob", + "uri":"mrs_01_1567.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"154" + }, + { + "desc":"The Akka actor model is the basis of communications between the Flink client and JobManager, JobManager and TaskManager, as well as TaskManager and TaskManager. Flink ena", + "product_code":"mrs", + "title":"Distributed Coordination (via Akka)", + "uri":"mrs_01_1568.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"155" + }, + { + "desc":"When the secure Flink cluster is required, SSL-related configuration items must be set.Configuration items include the SSL switch, certificate, password, and encryption a", + "product_code":"mrs", + "title":"SSL", + "uri":"mrs_01_1569.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"156" + }, + { + "desc":"When Flink runs a job, data transmission and reverse pressure detection between tasks depend on Netty. In certain environments, Netty parameters should be configured.For ", + "product_code":"mrs", + "title":"Network communication (via Netty)", + "uri":"mrs_01_1570.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"157" + }, + { + "desc":"When JobManager is started, the web server in the same process is also started.You can access the web server to obtain information about the current Flink cluster, includ", + "product_code":"mrs", + "title":"JobManager Web Frontend", + "uri":"mrs_01_1571.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"158" + }, + { + "desc":"Result files are created when tasks are running. Flink enables you to configure parameters for file creation.Configuration items include overwriting policy and directory ", + "product_code":"mrs", + "title":"File Systems", + "uri":"mrs_01_1572.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"159" + }, + { + "desc":"Flink enables HA and job exception, as well as job pause and recovery during version upgrade. Flink depends on state backend to store job states and on the restart strate", + "product_code":"mrs", + "title":"State Backend", + "uri":"mrs_01_1573.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"160" + }, + { + "desc":"Flink Kerberos configuration items must be configured in security mode.The configuration items include keytab, principal, and cookie of Kerberos.", + "product_code":"mrs", + "title":"Kerberos-based Security", + "uri":"mrs_01_1574.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"161" + }, + { + "desc":"The Flink HA mode depends on ZooKeeper. Therefore, ZooKeeper-related configuration items must be set.Configuration items include the ZooKeeper address, path, and security", + "product_code":"mrs", + "title":"HA", + "uri":"mrs_01_1575.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"162" + }, + { + "desc":"In scenarios raising special requirements on JVM configuration, users can use configuration items to transfer JVM parameters to the client, JobManager, and TaskManager.Co", + "product_code":"mrs", + "title":"Environment", + "uri":"mrs_01_1576.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"163" + }, + { + "desc":"Flink runs on a Yarn cluster and JobManager runs on ApplicationMaster. Certain configuration parameters of JobManager depend on Yarn. By setting Yarn-related configuratio", + "product_code":"mrs", + "title":"Yarn", + "uri":"mrs_01_1577.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"164" + }, + { + "desc":"The Netty connection is used among multiple jobs to reduce latency. In this case, NettySink is used on the server and NettySource is used on the client for data transmiss", + "product_code":"mrs", + "title":"Pipeline", + "uri":"mrs_01_1578.html", + "doc_type":"usermanual", + "p_code":"151", + "code":"165" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Security Configuration", + "uri":"mrs_01_0593.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"166" + }, + { + "desc":"All Flink cluster components support authentication.The Kerberos authentication is supported between Flink cluster components and external components, such as Yarn, HDFS,", + "product_code":"mrs", + "title":"Security Features", + "uri":"mrs_01_1579.html", + "doc_type":"usermanual", + "p_code":"166", + "code":"167" + }, + { + "desc":"Sample project data of Flink is stored in Kafka. A user with Kafka permission can send data to Kafka and receive data from it.Run Linux command line to create a topic. Be", + "product_code":"mrs", + "title":"Configuring Kafka", + "uri":"mrs_01_1580.html", + "doc_type":"usermanual", + "p_code":"166", + "code":"168" + }, + { + "desc":"File configurationnettyconnector.registerserver.topic.storage: (Mandatory) Configures the path (on a third-party server) to information about IP address, port numbers, an", + "product_code":"mrs", + "title":"Configuring Pipeline", + "uri":"mrs_01_1581.html", + "doc_type":"usermanual", + "p_code":"166", + "code":"169" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Security Hardening", + "uri":"mrs_01_0594.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"170" + }, + { + "desc":"Flink uses the following three authentication modes:Kerberos authentication: It is used between the Flink Yarn client and Yarn ResourceManager, JobManager and ZooKeeper, ", + "product_code":"mrs", + "title":"Authentication and Encryption", + "uri":"mrs_01_1583.html", + "doc_type":"usermanual", + "p_code":"170", + "code":"171" + }, + { + "desc":"In HA mode of Flink, ZooKeeper can be used to manage clusters and discover services. Zookeeper supports SASL ACL control. Only users who have passed the SASL (Kerberos) a", + "product_code":"mrs", + "title":"ACL Control", + "uri":"mrs_01_1584.html", + "doc_type":"usermanual", + "p_code":"170", + "code":"172" + }, + { + "desc":"Note: The same coding mode is used on the web service client and server to prevent garbled characters and to enable input verification.Security hardening: apply UTF-8 to ", + "product_code":"mrs", + "title":"Web Security", + "uri":"mrs_01_1585.html", + "doc_type":"usermanual", + "p_code":"170", + "code":"173" + }, + { + "desc":"All security functions of Flink are provided by the open source community or self-developed. Security features that need to be configured by users, such as authentication", + "product_code":"mrs", + "title":"Security Statement", + "uri":"mrs_01_1586.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"174" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Using the Flink Web UI", + "uri":"mrs_01_24014.html", + "doc_type":"usermanual", + "p_code":"148", + "code":"175" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Flink Web UI Overview", + "uri":"mrs_01_24015.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"176" + }, + { + "desc":"Flink web UI provides a web-based visual development platform. You only need to compile SQL statements to develop jobs, slashing the job development threshold. In additio", + "product_code":"mrs", + "title":"Introduction to Flink Web UI", + "uri":"mrs_01_24016.html", + "doc_type":"usermanual", + "p_code":"176", + "code":"177" + }, + { + "desc":"The Flink web UI application process is shown as follows:", + "product_code":"mrs", + "title":"Flink Web UI Application Process", + "uri":"mrs_01_24017.html", + "doc_type":"usermanual", + "p_code":"176", + "code":"178" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"FlinkServer Permissions Management", + "uri":"mrs_01_24047.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"179" + }, + { + "desc":"User admin of Manager does not have the FlinkServer service operation permission. To perform FlinkServer service operations, you need to grant related permission to the u", + "product_code":"mrs", + "title":"FlinkServer Permissions Overview", + "uri":"mrs_01_24048.html", + "doc_type":"usermanual", + "p_code":"179", + "code":"180" + }, + { + "desc":"This section describes how to create and configure a FlinkServer role on Manager as the system administrator. A FlinkServer role can be configured with FlinkServer admini", + "product_code":"mrs", + "title":"Authentication Based on Users and Roles", + "uri":"mrs_01_24049.html", + "doc_type":"usermanual", + "p_code":"179", + "code":"181" + }, + { + "desc":"After Flink is installed in an MRS cluster, you can connect to clusters and data as well as manage stream tables and jobs using the Flink web UI.This section describes ho", + "product_code":"mrs", + "title":"Accessing the Flink Web UI", + "uri":"mrs_01_24019.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"182" + }, + { + "desc":"Applications can be used to isolate different upper-layer services.After the application is created, you can switch to the application to be operated in the upper left co", + "product_code":"mrs", + "title":"Creating an Application on the Flink Web UI", + "uri":"mrs_01_24020.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"183" + }, + { + "desc":"Different clusters can be accessed by configuring the cluster connection.To obtain the cluster client configuration files, perform the following steps:Log in to FusionIns", + "product_code":"mrs", + "title":"Creating a Cluster Connection on the Flink Web UI", + "uri":"mrs_01_24021.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"184" + }, + { + "desc":"Different data services can be accessed through data connections. Currently, FlinkServer supports HDFS, Kafka data connections.", + "product_code":"mrs", + "title":"Creating a Data Connection on the Flink Web UI", + "uri":"mrs_01_24022.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"185" + }, + { + "desc":"Data tables can be used to define basic attributes and parameters of source tables, dimension tables, and output tables.", + "product_code":"mrs", + "title":"Managing Tables on the Flink Web UI", + "uri":"mrs_01_24023.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"186" + }, + { + "desc":"Define Flink jobs, including Flink SQL and Flink JAR jobs.Creating a Flink SQL jobDevelop the job on the job development page.Click Check Semantic to check the input cont", + "product_code":"mrs", + "title":"Managing Jobs on the Flink Web UI", + "uri":"mrs_01_24024.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"187" + }, + { + "desc":"The FlinkServer web UI enables you only to import and export jobs, UDFs, andstream tables.Jobs, flow tables, and UDFs with the same name cannot be imported to the same cl", + "product_code":"", + "title":"Importing and Exporting Jobs", + "uri":"mrs_01_24481.html", + "doc_type":"", + "p_code":"175", + "code":"188" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Managing UDFs", + "uri":"mrs_01_24223.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"189" + }, + { + "desc":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called user-defined functions (UDFs). You can upload and manag", + "product_code":"mrs", + "title":"Managing UDFs on the Flink Web UI", + "uri":"mrs_01_24211.html", + "doc_type":"usermanual", + "p_code":"189", + "code":"190" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"UDF Java and SQL Examples", + "uri":"mrs_01_24224.html", + "doc_type":"usermanual", + "p_code":"189", + "code":"191" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"UDAF Java and SQL Examples", + "uri":"mrs_01_24225.html", + "doc_type":"usermanual", + "p_code":"189", + "code":"192" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"UDTF Java and SQL Examples", + "uri":"mrs_01_24227.html", + "doc_type":"usermanual", + "p_code":"189", + "code":"193" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Interconnecting FlinkServer with External Components", + "uri":"mrs_01_24226.html", + "doc_type":"usermanual", + "p_code":"175", + "code":"194" + }, + { + "desc":"Flink interconnects with the ClickHouseBalancer instance of ClickHouse to read and write data, preventing ClickHouse traffic distribution problems.When \"FlinkSQL\" is disp", "product_code":"mrs", "title":"Interconnecting FlinkServer with ClickHouse", "uri":"mrs_01_24148.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"149" + "doc_type":"usermanual", + "p_code":"194", + "code":"195" }, { "desc":"FlinkServer can be interconnected with HBase. The details are as follows:It can be interconnected with dimension tables and sink tables.When HBase and Flink are in the sa", "product_code":"mrs", "title":"Interconnecting FlinkServer with HBase", "uri":"mrs_01_24120.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"150" + "doc_type":"usermanual", + "p_code":"194", + "code":"196" }, { "desc":"This section describes the data definition language (DDL) of HDFS as a sink table, as well as the WITH parameters and example code for creating a sink table, and provides", "product_code":"mrs", "title":"Interconnecting FlinkServer with HDFS", "uri":"mrs_01_24247.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"151" + "doc_type":"cmpntguide", + "p_code":"194", + "code":"197" }, { "desc":"Currently, FlinkServer interconnects with Hive MetaStore. Therefore, the MetaStore function must be enabled for Hive. Hive can be used as source, sink, and dimension tabl", "product_code":"mrs", "title":"Interconnecting FlinkServer with Hive", "uri":"mrs_01_24179.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"152" + "doc_type":"usermanual", + "p_code":"194", + "code":"198" }, { "desc":"This section describes how to interconnect FlinkServer with Hudi through Flink SQL jobs.The HDFS, Yarn, Flink, and Hudi services have been installed in a cluster.The clie", "product_code":"mrs", "title":"Interconnecting FlinkServer with Hudi", "uri":"mrs_01_24180.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"153" + "doc_type":"usermanual", + "p_code":"194", + "code":"199" }, { "desc":"This section describes the data definition language (DDL) of Kafka as a source or sink table, as well as the WITH parameters and example code for creating a table, and pr", "product_code":"mrs", "title":"Interconnecting FlinkServer with Kafka", "uri":"mrs_01_24248.html", - "doc_type":"cmpntguide-lts", - "p_code":"148", - "code":"154" + "doc_type":"cmpntguide", + "p_code":"194", + "code":"200" }, { "desc":"If a Flink task stops unexpectedly, some directories may reside in the ZooKeeper and HDFS services. To delete the residual directories, set ClearUpEnabled to true.A Flink", "product_code":"mrs", "title":"Deleting Residual Information About Flink Tasks", "uri":"mrs_01_24256.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"155" + "doc_type":"usermanual", + "p_code":"148", + "code":"201" }, { "desc":"Log path:Run logs of a Flink job: ${BIGDATA_DATA_HOME}/hadoop/data${i}/nm/containerlogs/application_${appid}/container_{$contid}The logs of executing tasks are stored in ", "product_code":"mrs", "title":"Flink Log Overview", "uri":"mrs_01_0596.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"156" + "doc_type":"usermanual", + "p_code":"148", + "code":"202" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Flink Performance Tuning", "uri":"mrs_01_0597.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"157" + "doc_type":"usermanual", + "p_code":"148", + "code":"203" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Optimization DataStream", "uri":"mrs_01_1587.html", - "doc_type":"cmpntguide-lts", - "p_code":"157", - "code":"158" + "doc_type":"usermanual", + "p_code":"203", + "code":"204" }, { "desc":"The computing of Flink depends on memory. If the memory is insufficient, the performance of Flink will be greatly deteriorated. One solution is to monitor garbage collect", "product_code":"mrs", "title":"Memory Configuration Optimization", "uri":"mrs_01_1588.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"159" + "doc_type":"usermanual", + "p_code":"204", + "code":"205" }, { "desc":"The degree of parallelism (DOP) indicates the number of tasks to be executed concurrently. It determines the number of data blocks after the operation. Configuring the DO", "product_code":"mrs", "title":"Configuring DOP", "uri":"mrs_01_1589.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"160" + "doc_type":"usermanual", + "p_code":"204", + "code":"206" }, { "desc":"In Flink on Yarn mode, there are JobManagers and TaskManagers. JobManagers and TaskManagers schedule and run tasks.Therefore, configuring parameters of JobManagers and Ta", "product_code":"mrs", - "title":"Configuring Process Parameters", + "title":"Configuring Flink Process Parameters", "uri":"mrs_01_1590.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"161" + "doc_type":"usermanual", + "p_code":"204", + "code":"207" }, { "desc":"The divide of tasks can be optimized by optimizing the partitioning method. If data skew occurs in a certain task, the whole execution process is delayed. Therefore, when", "product_code":"mrs", "title":"Optimizing the Design of Partitioning Method", "uri":"mrs_01_1591.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"162" + "doc_type":"usermanual", + "p_code":"204", + "code":"208" }, { "desc":"The communication of Flink is based on Netty network. The network performance determines the data switching speed and task execution efficiency. Therefore, the performanc", "product_code":"mrs", "title":"Configuring the Netty Network Communication", "uri":"mrs_01_1592.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"163" + "doc_type":"usermanual", + "p_code":"204", + "code":"209" }, { "desc":"If data skew occurs (certain data volume is large), the execution time of tasks is inconsistent even if no garbage collection is performed.Redefine keys. Use keys of smal", "product_code":"mrs", "title":"Summarization", "uri":"mrs_01_1593.html", - "doc_type":"cmpntguide-lts", - "p_code":"158", - "code":"164" + "doc_type":"usermanual", + "p_code":"204", + "code":"210" }, { "desc":"Before running the Flink shell commands, perform the following steps:source /opt/client/bigdata_envkinit Service user", "product_code":"mrs", "title":"Common Flink Shell Commands", "uri":"mrs_01_0598.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"165" + "doc_type":"usermanual", + "p_code":"148", + "code":"211" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Reference", "uri":"mrs_01_0620.html", - "doc_type":"cmpntguide-lts", - "p_code":"103", - "code":"166" + "doc_type":"usermanual", + "p_code":"148", + "code":"212" }, { "desc":"Generate the generate_keystore.sh script based on the sample code and save the script to the bin directory on the Flink client.Run the sh generate_keystore.sh c", "product_code":"mrs", "title":"Example of Issuing a Certificate", "uri":"mrs_01_0621.html", - "doc_type":"cmpntguide-lts", - "p_code":"166", - "code":"167" + "doc_type":"usermanual", + "p_code":"212", + "code":"213" + }, + { + "desc":"Flink supports different restart policies to control whether and how to restart a job when a fault occurs. If no restart policy is specified, the cluster uses the default", + "product_code":"", + "title":"Flink Restart Policy", + "uri":"mrs_01_24779.html", + "doc_type":"", + "p_code":"148", + "code":"214" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Flume", "uri":"mrs_01_0390.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"168" + "code":"215" }, { "desc":"You can use Flume to import collected log information to Kafka.A streaming cluster with Kerberos authentication enabled has been created.The Flume client has been install", "product_code":"mrs", "title":"Using Flume from Scratch", "uri":"mrs_01_0397.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"169" + "doc_type":"usermanual", + "p_code":"215", + "code":"216" }, { "desc":"Flume is a distributed, reliable, and highly available system for aggregating massive logs, which can efficiently collect, aggregate, and move massive log data from diffe", "product_code":"mrs", - "title":"Overview", + "title":"Flume Overview", "uri":"mrs_01_0391.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"170" + "doc_type":"usermanual", + "p_code":"215", + "code":"217" }, { "desc":"To use Flume to collect logs, you must install the Flume client on a log host.A cluster with the Flume component has been created.The log host is in the same VPC and subn", "product_code":"mrs", "title":"Installing the Flume Client on Clusters", "uri":"mrs_01_1595.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"171" + "doc_type":"usermanual", + "p_code":"215", + "code":"218" }, { "desc":"You can view logs to locate faults.The Flume client has been installed.ls -lR flume-client-*A log file is shown as follows:In the log file, FlumeClient.log is the run log", "product_code":"mrs", "title":"Viewing Flume Client Logs", "uri":"mrs_01_0393.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"172" + "doc_type":"usermanual", + "p_code":"215", + "code":"219" }, { "desc":"You can stop and start the Flume client or uninstall the Flume client when the Flume data ingestion channel is not required.Stop the Flume client of the Flume role.Assume", "product_code":"mrs", "title":"Stopping or Uninstalling the Flume Client", "uri":"mrs_01_0394.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"173" + "doc_type":"usermanual", + "p_code":"215", + "code":"220" }, { "desc":"You can use the encryption tool provided by the Flume client to encrypt some parameter values in the configuration file.The Flume client has been installed.cd fusioninsig", "product_code":"mrs", "title":"Using the Encryption Tool of the Flume Client", "uri":"mrs_01_0395.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"174" + "doc_type":"usermanual", + "p_code":"215", + "code":"221" }, { "desc":"This configuration guide describes how to configure common Flume services. For non-common Source, Channel, and Sink configuration, see the user manual provided by the Flu", "product_code":"mrs", "title":"Flume Service Configuration Guide", "uri":"mrs_01_1057.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"175" + "doc_type":"usermanual", + "p_code":"215", + "code":"222" }, { "desc":"Some parameters can be configured on Manager.This section describes how to configure the sources, channels, and sinks of Flume, and modify the configuration items of each", "product_code":"mrs", "title":"Flume Configuration Parameter Description", "uri":"mrs_01_0396.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"176" + "doc_type":"usermanual", + "p_code":"215", + "code":"223" }, { "desc":"This section describes how to use environment variables in the properties.properties configuration file.The Flume service is running properly and the Flume client has bee", "product_code":"mrs", "title":"Using Environment Variables in the properties.properties File", "uri":"mrs_01_1058.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"177" + "doc_type":"usermanual", + "p_code":"215", + "code":"224" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Non-Encrypted Transmission", "uri":"mrs_01_1059.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"178" + "doc_type":"usermanual", + "p_code":"215", + "code":"225" }, { "desc":"This section describes how to configure Flume server and client parameters after the cluster and the Flume service are installed to ensure proper running of the service.B", "product_code":"mrs", "title":"Configuring Non-encrypted Transmission", "uri":"mrs_01_1060.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"179" + "doc_type":"usermanual", + "p_code":"225", + "code":"226" }, { "desc":"This section describes how to use Flume client to collect static logs from a local host and save them to the topic list (test1) of Kafka.By default, the cluster network e", "product_code":"mrs", "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to Kafka", "uri":"mrs_01_1061.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"180" + "doc_type":"usermanual", + "p_code":"225", + "code":"227" }, { "desc":"This section describes how to use Flume client to collect static logs from a local PC and save them to the /flume/test directory on HDFS.By default, the cluster network e", "product_code":"mrs", "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS", "uri":"mrs_01_1063.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"181" + "doc_type":"usermanual", + "p_code":"225", + "code":"228" }, { "desc":"This section describes how to use Flume client to collect dynamic logs from a local PC and save them to the /flume/test directory on HDFS.By default, the cluster network ", "product_code":"mrs", "title":"Typical Scenario: Collecting Local Dynamic Logs and Uploading Them to HDFS", "uri":"mrs_01_1064.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"182" + "doc_type":"usermanual", + "p_code":"225", + "code":"229" }, { "desc":"This section describes how to use Flume client to collect logs from the Topic list (test1) of Kafka and save them to the /flume/test directory on HDFS.By default, the clu", "product_code":"mrs", "title":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS", "uri":"mrs_01_1065.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"183" + "doc_type":"usermanual", + "p_code":"225", + "code":"230" }, { "desc":"This section describes how to use Flume client to collect logs from the Topic list (test1) of Kafka client and save them to the /flume/test directory on HDFS.By default, ", "product_code":"mrs", "title":"Typical Scenario: Collecting Logs from Kafka and Uploading Them to HDFS Through the Flume Client", "uri":"mrs_01_1066.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"184" + "doc_type":"usermanual", + "p_code":"225", + "code":"231" }, { "desc":"This section describes how to use Flume client to collect static logs from a local computer and upload them to the flume_test table of HBase.By default, the cluster netwo", "product_code":"mrs", "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HBase", "uri":"mrs_01_1067.html", - "doc_type":"cmpntguide-lts", - "p_code":"178", - "code":"185" + "doc_type":"usermanual", + "p_code":"225", + "code":"232" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Encrypted Transmission", "uri":"mrs_01_1068.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"186" + "doc_type":"usermanual", + "p_code":"215", + "code":"233" }, { "desc":"This section describes how to configure the server and client parameters of the Flume service (including the Flume and MonitorServer roles) after the cluster is installed", "product_code":"mrs", "title":"Configuring the Encrypted Transmission", "uri":"mrs_01_1069.html", - "doc_type":"cmpntguide-lts", - "p_code":"186", - "code":"187" + "doc_type":"usermanual", + "p_code":"233", + "code":"234" }, { "desc":"This section describes how to use Flume client to collect static logs from a local PC and save them to the /flume/test directory on HDFS.The cluster, HDFS and Flume servi", "product_code":"mrs", - "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS", + "title":"Typical Scenario: Collecting Local Static Logs and Uploading Them to HDFS Encrypted Transmission", "uri":"mrs_01_1070.html", - "doc_type":"cmpntguide-lts", - "p_code":"186", - "code":"188" + "doc_type":"usermanual", + "p_code":"233", + "code":"235" }, { "desc":"The Flume client outside the FusionInsight cluster is a part of the end-to-end data collection. Both the Flume client outside the cluster and the Flume server in the clus", "product_code":"mrs", "title":"Viewing Flume Client Monitoring Information", "uri":"mrs_01_1596.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"189" + "doc_type":"usermanual", + "p_code":"215", + "code":"236" }, { "desc":"This section describes how to connect to Kafka using the Flume client in security mode.Set keyTab and principal based on site requirements. The configured principal must ", "product_code":"mrs", "title":"Connecting Flume to Kafka in Security Mode", "uri":"mrs_01_1071.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"190" + "doc_type":"usermanual", + "p_code":"215", + "code":"237" }, { "desc":"This section describes how to use Flume to connect to Hive (version 3.1.0) in the cluster.Flume and Hive have been correctly installed in the cluster. The services are ru", "product_code":"mrs", "title":"Connecting Flume with Hive in Security Mode", "uri":"mrs_01_1072.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"191" + "doc_type":"usermanual", + "p_code":"215", + "code":"238" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Configuring the Flume Service Model", "uri":"mrs_01_1073.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"192" + "doc_type":"usermanual", + "p_code":"215", + "code":"239" }, { "desc":"Guide a reasonable Flume service configuration by providing performance differences between Flume common modules, to avoid a nonstandard overall service performance cause", "product_code":"mrs", - "title":"Overview", + "title":"Flume Service Model Overview", "uri":"mrs_01_1074.html", - "doc_type":"cmpntguide-lts", - "p_code":"192", - "code":"193" + "doc_type":"usermanual", + "p_code":"239", + "code":"240" }, { "desc":"During Flume service configuration and module selection, the ultimate throughput of a sink must be greater than the maximum throughput of a source. Otherwise, in extreme ", "product_code":"mrs", "title":"Service Model Configuration Guide", "uri":"mrs_01_1075.html", - "doc_type":"cmpntguide-lts", - "p_code":"192", - "code":"194" + "doc_type":"usermanual", + "p_code":"239", + "code":"241" }, { "desc":"Log path: The default path of Flume log files is /var/log/Bigdata/Role name.FlumeServer: /var/log/Bigdata/flume/flumeFlumeClient: /var/log/Bigdata/flume-client-n/flumeMon", "product_code":"mrs", "title":"Introduction to Flume Logs", "uri":"mrs_01_1081.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"195" + "doc_type":"usermanual", + "p_code":"215", + "code":"242" }, { "desc":"This section describes how to join and log out of a cgroup, query the cgroup status, and change the cgroup CPU threshold.Join CgroupAssume that the Flume client installat", "product_code":"mrs", "title":"Flume Client Cgroup Usage Guide", "uri":"mrs_01_1082.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"196" + "doc_type":"usermanual", + "p_code":"215", + "code":"243" }, { "desc":"This section describes how to perform secondary development for third-party plug-ins.You have obtained the third-party JAR package.You have installed Flume server or clie", "product_code":"mrs", "title":"Secondary Development Guide for Flume Third-Party Plug-ins", "uri":"mrs_01_1083.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"197" + "doc_type":"usermanual", + "p_code":"215", + "code":"244" }, { "desc":"Flume logs are stored in /var/log/Bigdata/flume/flume/flumeServer.log. Most data transmission exceptions and data transmission failures are recorded in logs. You can run ", "product_code":"mrs", "title":"Common Issues About Flume", "uri":"mrs_01_1598.html", - "doc_type":"cmpntguide-lts", - "p_code":"168", - "code":"198" + "doc_type":"usermanual", + "p_code":"215", + "code":"245" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using HBase", "uri":"mrs_01_0500.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"199" + "code":"246" }, { "desc":"HBase is a column-based distributed storage system that features high reliability, performance, and scalability. This section describes how to use HBase from scratch, inc", "product_code":"mrs", "title":"Using HBase from Scratch", "uri":"mrs_01_0368.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"200" + "doc_type":"usermanual", + "p_code":"246", + "code":"247" }, { "desc":"This section guides the system administrator to create and configure an HBase role on Manager. The HBase role can set HBase administrator permissions and read (R), write ", "product_code":"mrs", "title":"Creating HBase Roles", "uri":"mrs_01_1608.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"201" + "doc_type":"usermanual", + "p_code":"246", + "code":"248" }, { "desc":"This section describes how to use the HBase client in an O&M scenario or a service scenario.The client has been installed. For example, the installation directory is /opt", "product_code":"mrs", "title":"Using an HBase Client", "uri":"mrs_01_24041.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"202" + "doc_type":"usermanual", + "p_code":"246", + "code":"249" }, { "desc":"As a key feature to ensure high availability of the HBase cluster system, HBase cluster replication provides HBase with remote data replication in real time. It provides ", "product_code":"mrs", "title":"Configuring HBase Replication", "uri":"mrs_01_0501.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"203" + "doc_type":"usermanual", + "p_code":"246", + "code":"250" }, { "desc":"DistCp is used to copy the data stored on HDFS from a cluster to another cluster. DistCp depends on the cross-cluster copy function, which is disabled by default. This fu", "product_code":"mrs", "title":"Enabling Cross-Cluster Copy", "uri":"mrs_01_0502.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"204" + "doc_type":"usermanual", + "p_code":"246", + "code":"251" }, { "desc":"You can create tables and indexes using createTable of org.apache.luna.client.LunaAdmin and specify table names, column family names, requests for creating indexes, as we", "product_code":"mrs", "title":"Supporting Full-Text Index", "uri":"mrs_01_0493.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"205" + "doc_type":"usermanual", + "p_code":"246", + "code":"252" }, { "desc":"Active and standby clusters have been installed and started.Time is consistent between the active and standby clusters and the NTP service on the active and standby clust", "product_code":"mrs", "title":"Using the ReplicationSyncUp Tool", "uri":"mrs_01_0510.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"206" + "doc_type":"usermanual", + "p_code":"246", + "code":"253" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"In-House Enhanced Phoenix", + "uri":"mrs_01_24579.html", + "doc_type":"", + "p_code":"246", + "code":"254" + }, + { + "desc":"Phoenix provides CsvBulkloadTool, a batch data import tool. This tool supports import of user-defined delimiters. Specifically, users can use any visible characters withi", + "product_code":"", + "title":"CsvBulkloadTool Supports Parsing User-Defined Delimiters in Data Files", + "uri":"mrs_01_24580.html", + "doc_type":"", + "p_code":"254", + "code":"255" }, { "desc":"HBase disaster recovery (DR), a key feature that is used to ensure high availability (HA) of the HBase cluster system, provides the real-time remote DR function for HBase", "product_code":"mrs", "title":"Configuring HBase DR", "uri":"mrs_01_1609.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"207" + "doc_type":"usermanual", + "p_code":"246", + "code":"256" }, { "desc":"The system administrator can configure HBase cluster DR to improve system availability. If the active cluster in the DR environment is faulty and the connection to the HB", "product_code":"mrs", "title":"Performing an HBase DR Service Switchover", "uri":"mrs_01_1610.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"208" + "doc_type":"usermanual", + "p_code":"246", + "code":"257" }, { "desc":"HBase encodes data blocks in HFiles to reduce duplicate keys in KeyValues, reducing used space. Currently, the following data block encoding modes are supported: NONE, PR", "product_code":"mrs", "title":"Configuring HBase Data Compression and Encoding", - "uri":"en-us_topic_0000001295898904.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"209" + "uri":"mrs_01_24112.html", + "doc_type":"usermanual", + "p_code":"246", + "code":"258" }, { "desc":"The HBase cluster in the current environment is a DR cluster. Due to some reasons, the active and standby clusters need to be switched over. That is, the standby cluster ", "product_code":"mrs", "title":"Performing an HBase DR Active/Standby Cluster Switchover", "uri":"mrs_01_1611.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"210" + "doc_type":"usermanual", + "p_code":"246", + "code":"259" }, { "desc":"The Apache HBase official website provides the function of importing data in batches. For details, see the description of the Import and ImportTsv tools at http://hbase.a", "product_code":"mrs", "title":"Community BulkLoad Tool", "uri":"mrs_01_1612.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"211" + "doc_type":"usermanual", + "p_code":"246", + "code":"260" }, { "desc":"In the actual application scenario, data in various sizes needs to be stored, for example, image data and documents. Data whose size is smaller than 10 MB can be stored i", "product_code":"mrs", "title":"Configuring the MOB", "uri":"mrs_01_1631.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"212" + "doc_type":"usermanual", + "p_code":"246", + "code":"261" }, { "desc":"This topic provides the procedure to configure the secure HBase replication during cross-realm Kerberos setup in security mode.Mapping for all the FQDNs to their realms s", "product_code":"mrs", "title":"Configuring Secure HBase Replication", "uri":"mrs_01_1009.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"213" + "doc_type":"usermanual", + "p_code":"246", + "code":"262" }, { "desc":"In a faulty environment, there are possibilities that a region may be stuck in transition for longer duration due to various reasons like slow region server response, uns", "product_code":"mrs", "title":"Configuring Region In Transition Recovery Chore Service", "uri":"mrs_01_1010.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"214" + "doc_type":"usermanual", + "p_code":"246", + "code":"263" }, { "desc":"HIndex enables HBase indexing based on specific column values, making the retrieval of data highly efficient and fast.Column families are separated by semicolons (;).Colu", "product_code":"mrs", "title":"Using a Secondary Index", "uri":"mrs_01_1635.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"215" + "doc_type":"usermanual", + "p_code":"246", + "code":"264" }, { "desc":"Log path: The default storage path of HBase logs is /var/log/Bigdata/hbase/Role name.HMaster: /var/log/Bigdata/hbase/hm (run logs) and /var/log/Bigdata/audit/hbase/hm (au", "product_code":"mrs", "title":"HBase Log Overview", "uri":"mrs_01_1056.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"216" + "doc_type":"usermanual", + "p_code":"246", + "code":"265" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HBase Performance Tuning", "uri":"mrs_01_1013.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"217" + "doc_type":"usermanual", + "p_code":"246", + "code":"266" }, { "desc":"BulkLoad uses MapReduce jobs to directly generate files that comply with the internal data format of HBase, and then loads the generated StoreFiles to a running cluster. ", "product_code":"mrs", "title":"Improving the BulkLoad Efficiency", "uri":"mrs_01_1636.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"218" + "doc_type":"usermanual", + "p_code":"266", + "code":"267" }, { "desc":"In the scenario where a large number of requests are continuously put, setting the following two parameters to false can greatly improve the Put performance.hbase.regions", "product_code":"mrs", "title":"Improving Put Performance", "uri":"mrs_01_1637.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"219" + "doc_type":"usermanual", + "p_code":"266", + "code":"268" }, { "desc":"HBase has many configuration parameters related to read and write performance. The configuration parameters need to be adjusted based on the read/write request loads. Thi", "product_code":"mrs", "title":"Optimizing Put and Scan Performance", "uri":"mrs_01_1016.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"220" + "doc_type":"usermanual", + "p_code":"266", + "code":"269" }, { "desc":"Scenarios where data needs to be written to HBase in real time, or large-scale and consecutive put scenariosThe HBase put or delete interface can be used to save data to ", "product_code":"mrs", "title":"Improving Real-time Data Write Performance", "uri":"mrs_01_1017.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"221" + "doc_type":"usermanual", + "p_code":"266", + "code":"270" }, { "desc":"HBase data needs to be read.The get or scan interface of HBase has been invoked and data is read in real time from HBase.Data reading server tuningParameter portal:Go to ", "product_code":"mrs", "title":"Improving Real-time Data Read Performance", "uri":"mrs_01_1018.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"222" + "doc_type":"usermanual", + "p_code":"266", + "code":"271" }, { "desc":"When the number of clusters reaches a certain scale, the default settings of the Java virtual machine (JVM) cannot meet the cluster requirements. In this case, the cluste", "product_code":"mrs", - "title":"Optimizing JVM Parameters", + "title":"Optimizing HBase JVM Parameters", "uri":"mrs_01_1019.html", - "doc_type":"cmpntguide-lts", - "p_code":"217", - "code":"223" + "doc_type":"usermanual", + "p_code":"266", + "code":"272" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About HBase", "uri":"mrs_01_1638.html", - "doc_type":"cmpntguide-lts", - "p_code":"199", - "code":"224" + "doc_type":"usermanual", + "p_code":"246", + "code":"273" }, { "desc":"A HBase server is faulty and cannot provide services. In this case, when a table operation is performed on the HBase client, why is the operation suspended and no respons", "product_code":"mrs", "title":"Why Does a Client Keep Failing to Connect to a Server for a Long Time?", "uri":"mrs_01_1639.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"225" + "doc_type":"usermanual", + "p_code":"273", + "code":"274" }, { "desc":"Why submitted operations fail by stopping BulkLoad on the client during BulkLoad data importing?When BulkLoad is enabled on the client, a partitioner file is generated an", "product_code":"mrs", "title":"Operation Failures Occur in Stopping BulkLoad On the Client", "uri":"mrs_01_1640.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"226" + "doc_type":"usermanual", + "p_code":"273", + "code":"275" }, { "desc":"When HBase consecutively deletes and creates the same table, why may a table creation exception occur?Execution process: Disable Table > Drop Table > Create Table > Disab", "product_code":"mrs", "title":"Why May a Table Creation Exception Occur When HBase Deletes or Creates the Same Table Consecutively?", "uri":"mrs_01_1641.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"227" + "doc_type":"usermanual", + "p_code":"273", + "code":"276" }, { "desc":"Why other services become unstable if HBase sets up a large number of connections over the network port?When the OS command lsof or netstat is run, it is found that many ", "product_code":"mrs", "title":"Why Other Services Become Unstable If HBase Sets up A Large Number of Connections over the Network Port?", "uri":"mrs_01_1642.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"228" + "doc_type":"usermanual", + "p_code":"273", + "code":"277" }, { "desc":"The HBase bulkLoad task (a single table contains 26 TB data) has 210,000 maps and 10,000 reduce tasks, and the task fails.ZooKeeper I/O bottleneck observation methods:On ", "product_code":"mrs", "title":"Why Does the HBase BulkLoad Task (One Table Has 26 TB Data) Consisting of 210,000 Map Tasks and 10,000 Reduce Tasks Fail?", "uri":"mrs_01_1643.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"229" + "doc_type":"usermanual", + "p_code":"273", + "code":"278" }, { "desc":"How do I restore a region in the RIT state for a long time?Log in to the HMaster WebUI, choose Procedure & Locks in the navigation tree, and check whether any process ID ", "product_code":"mrs", "title":"How Do I Restore a Region in the RIT State for a Long Time?", "uri":"mrs_01_1644.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"230" + "doc_type":"usermanual", + "p_code":"273", + "code":"279" }, { "desc":"Why does HMaster exit due to timeout when waiting for the namespace table to go online?During the HMaster active/standby switchover or startup, HMaster performs WAL split", "product_code":"mrs", "title":"Why Does HMaster Exits Due to Timeout When Waiting for the Namespace Table to Go Online?", "uri":"mrs_01_1645.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"231" + "doc_type":"usermanual", + "p_code":"273", + "code":"280" }, { "desc":"Why does the following exception occur on the client when I use the HBase client to operate table data?At the same time, the following log is displayed on RegionServer:Th", "product_code":"mrs", "title":"Why Does SocketTimeoutException Occur When a Client Queries HBase?", "uri":"mrs_01_1646.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"232" + "doc_type":"usermanual", + "p_code":"273", + "code":"281" }, { "desc":"Why modified and deleted data can still be queried by using the scan command?Because of the scalability of HBase, all values specific to the versions in the queried colum", "product_code":"mrs", "title":"Why Modified and Deleted Data Can Still Be Queried by Using the Scan Command?", "uri":"mrs_01_1647.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"233" + "doc_type":"usermanual", + "p_code":"273", + "code":"282" }, { "desc":"Why \"java.lang.UnsatisfiedLinkError: Permission denied\" exception thrown while starting HBase shell?During HBase shell execution JRuby create temporary files under java.i", "product_code":"mrs", "title":"Why \"java.lang.UnsatisfiedLinkError: Permission denied\" exception thrown while starting HBase shell?", "uri":"mrs_01_1648.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"234" + "doc_type":"usermanual", + "p_code":"273", + "code":"283" }, { "desc":"When does the RegionServers listed under \"Dead Region Servers\" on HMaster WebUI gets cleared?When an online RegionServer goes down abruptly, it is displayed under \"Dead R", "product_code":"mrs", "title":"When does the RegionServers listed under \"Dead Region Servers\" on HMaster WebUI gets cleared?", "uri":"mrs_01_1649.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"235" + "doc_type":"usermanual", + "p_code":"273", + "code":"284" }, { "desc":"If the data to be imported by HBase bulkload has identical rowkeys, the data import is successful but identical query criteria produce different query results.Data with a", "product_code":"mrs", "title":"Why Are Different Query Results Returned After I Use Same Query Criteria to Query Data Successfully Imported by HBase bulkload?", "uri":"mrs_01_1650.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"236" + "doc_type":"usermanual", + "p_code":"273", + "code":"285" }, { "desc":"What should I do if I fail to create tables due to the FAILED_OPEN state of Regions?If a network, HDFS, or Active HMaster fault occurs during the creation of tables, some", "product_code":"mrs", "title":"What Should I Do If I Fail to Create Tables Due to the FAILED_OPEN State of Regions?", "uri":"mrs_01_1651.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"237" + "doc_type":"usermanual", + "p_code":"273", + "code":"286" }, { "desc":"In security mode, names of tables that failed to be created are unnecessarily retained in the table-lock node (default directory is /hbase/table-lock) of ZooKeeper. How d", "product_code":"mrs", "title":"How Do I Delete Residual Table Names in the /hbase/table-lock Directory of ZooKeeper?", "uri":"mrs_01_1652.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"238" + "doc_type":"usermanual", + "p_code":"273", + "code":"287" }, { "desc":"Why does HBase become faulty when I set quota for the directory used by HBase in HDFS?The flush operation of a table is to write memstore data to HDFS.If the HDFS directo", "product_code":"mrs", "title":"Why Does HBase Become Faulty When I Set a Quota for the Directory Used by HBase in HDFS?", "uri":"mrs_01_1653.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"239" + "doc_type":"usermanual", + "p_code":"273", + "code":"288" }, { "desc":"Why HMaster times out while waiting for namespace table to be assigned after rebuilding meta using OfflineMetaRepair tool and startups failed?HMaster abort with following", "product_code":"mrs", "title":"Why HMaster Times Out While Waiting for Namespace Table to be Assigned After Rebuilding Meta Using OfflineMetaRepair Tool and Startups Failed", "uri":"mrs_01_1654.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"240" + "doc_type":"usermanual", + "p_code":"273", + "code":"289" }, { "desc":"Why messages containing FileNotFoundException and no lease are frequently displayed in the HMaster logs during the WAL splitting process?During the WAL splitting process,", "product_code":"mrs", "title":"Why Messages Containing FileNotFoundException and no lease Are Frequently Displayed in the HMaster Logs During the WAL Splitting Process?", "uri":"mrs_01_1655.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"241" + "doc_type":"usermanual", + "p_code":"273", + "code":"290" }, { "desc":"When a tenant accesses Phoenix, a message is displayed indicating that the tenant has insufficient rights.You need to associate the HBase service and Yarn queues when cre", "product_code":"mrs", "title":"Insufficient Rights When a Tenant Accesses Phoenix", "uri":"mrs_01_1657.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"242" + "doc_type":"usermanual", + "p_code":"273", + "code":"291" }, { "desc":"The system automatically rolls back data after an HBase recovery task fails. If \"Rollback recovery failed\" is displayed, the rollback fails. After the rollback fails, dat", "product_code":"mrs", "title":"What Can I Do When HBase Fails to Recover a Task and a Message Is Displayed Stating \"Rollback recovery failed\"?", "uri":"mrs_01_1659.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"243" + "doc_type":"usermanual", + "p_code":"273", + "code":"292" }, { "desc":"When the HBaseFsck tool is used to check the region status, if the log contains ERROR: (regions region1 and region2) There is an overlap in the region chain or ERROR: (re", "product_code":"mrs", "title":"How Do I Fix Region Overlapping?", "uri":"mrs_01_1660.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"244" + "doc_type":"usermanual", + "p_code":"273", + "code":"293" }, { "desc":"Check the hbase-omm-*.out log of the node where RegionServer fails to be started. It is found that the log contains An error report file with more information is saved as", "product_code":"mrs", "title":"Why Does RegionServer Fail to Be Started When GC Parameters Xms and Xmx of HBase RegionServer Are Set to 31 GB?", "uri":"mrs_01_1661.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"245" + "doc_type":"usermanual", + "p_code":"273", + "code":"294" }, { "desc":"Why does the LoadIncrementalHFiles tool fail to be executed and \"Permission denied\" is displayed when a Linux user is manually created in a normal cluster and DataNode in", "product_code":"mrs", "title":"Why Does the LoadIncrementalHFiles Tool Fail to Be Executed and \"Permission denied\" Is Displayed When Nodes in a Cluster Are Used to Import Data in Batches?", "uri":"mrs_01_0625.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"246" + "doc_type":"usermanual", + "p_code":"273", + "code":"295" }, { "desc":"When the sqlline script is used on the client, the error message \"import argparse\" is displayed.", "product_code":"mrs", "title":"Why Is the Error Message \"import argparse\" Displayed When the Phoenix sqlline Script Is Used?", "uri":"mrs_01_2210.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"247" + "doc_type":"usermanual", + "p_code":"273", + "code":"296" }, { "desc":"When the indexed field data is updated, if a batch of data exists in the user table, the BulkLoad tool cannot update the global and partial mutable indexes.Problem Analys", "product_code":"mrs", "title":"How Do I Deal with the Restrictions of the Phoenix BulkLoad Tool?", "uri":"mrs_01_2211.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"248" + "doc_type":"usermanual", + "p_code":"273", + "code":"297" }, { "desc":"When CTBase accesses the HBase service with the Ranger plug-ins enabled and you are creating a cluster table, a message is displayed indicating that the permission is ins", "product_code":"mrs", "title":"Why a Message Is Displayed Indicating that the Permission is Insufficient When CTBase Connects to the Ranger Plug-ins?", "uri":"mrs_01_2212.html", - "doc_type":"cmpntguide-lts", - "p_code":"224", - "code":"249" + "doc_type":"usermanual", + "p_code":"273", + "code":"298" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using HDFS", "uri":"mrs_01_0790.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"250" + "code":"299" }, { "desc":"In HDFS, each file object needs to register corresponding information in the NameNode and occupies certain storage space. As the number of files increases, if the origina", "product_code":"mrs", "title":"Configuring Memory Management", "uri":"mrs_01_0791.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"251" + "doc_type":"usermanual", + "p_code":"299", + "code":"300" }, { "desc":"This section describes how to create and configure an HDFS role on FusionInsight Manager. The HDFS role is granted the rights to read, write, and execute HDFS directories", "product_code":"mrs", "title":"Creating an HDFS Role", "uri":"mrs_01_1662.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"252" + "doc_type":"usermanual", + "p_code":"299", + "code":"301" }, { "desc":"This section describes how to use the HDFS client in an O&M scenario or service scenario.The client has been installed.For example, the installation directory is /opt/had", "product_code":"mrs", "title":"Using the HDFS Client", "uri":"mrs_01_1663.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"253" + "doc_type":"usermanual", + "p_code":"299", + "code":"302" }, { "desc":"DistCp is a tool used to perform large-amount data replication between clusters or in a cluster. It uses MapReduce tasks to implement distributed copy of a large amount o", "product_code":"mrs", "title":"Running the DistCp Command", "uri":"mrs_01_0794.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"254" + "doc_type":"usermanual", + "p_code":"299", + "code":"303" }, { "desc":"This section describes the directory structure in HDFS, as shown in the following table.", "product_code":"mrs", "title":"Overview of HDFS File System Directories", "uri":"mrs_01_0795.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"255" + "doc_type":"usermanual", + "p_code":"299", + "code":"304" }, { "desc":"If the storage directory defined by the HDFS DataNode is incorrect or the HDFS storage plan changes, the system administrator needs to modify the DataNode storage directo", "product_code":"mrs", "title":"Changing the DataNode Storage Directory", "uri":"mrs_01_1664.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"256" + "doc_type":"usermanual", + "p_code":"299", + "code":"305" }, { "desc":"The permission for some HDFS directories is 777 or 750 by default, which brings potential security risks. You are advised to modify the permission for the HDFS directorie", "product_code":"mrs", "title":"Configuring HDFS Directory Permission", "uri":"mrs_01_0797.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"257" + "doc_type":"usermanual", + "p_code":"299", + "code":"306" }, { "desc":"Before deploying a cluster, you can deploy a Network File System (NFS) server based on requirements to store NameNode metadata to enhance data reliability.If the NFS serv", "product_code":"mrs", "title":"Configuring NFS", "uri":"mrs_01_1665.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"258" + "doc_type":"usermanual", + "p_code":"299", + "code":"307" }, { "desc":"In HDFS, DataNode stores user files and directories as blocks, and file objects are generated on the NameNode to map each file, directory, and block on the DataNode.The f", "product_code":"mrs", "title":"Planning HDFS Capacity", "uri":"mrs_01_0799.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"259" + "doc_type":"usermanual", + "p_code":"299", + "code":"308" }, { "desc":"When you open an HDFS file, an error occurs due to the limit on the number of file handles. Information similar to the following is displayed.You can contact the system a", "product_code":"mrs", "title":"Configuring ulimit for HBase and HDFS", "uri":"mrs_01_0801.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"260" + "doc_type":"usermanual", + "p_code":"299", + "code":"309" }, { "desc":"In the HDFS cluster, unbalanced disk usage among DataNodes may occur, for example, when new DataNodes are added to the cluster. Unbalanced disk usage may result in multip", "product_code":"mrs", "title":"Balancing DataNode Capacity", "uri":"mrs_01_1667.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"261" + "doc_type":"usermanual", + "p_code":"299", + "code":"310" }, { "desc":"By default, NameNode randomly selects a DataNode to write files. If the disk capacity of some DataNodes in a cluster is inconsistent (the total disk capacity of some node", "product_code":"mrs", "title":"Configuring Replica Replacement Policy for Heterogeneous Capacity Among DataNodes", "uri":"mrs_01_0804.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"262" + "doc_type":"usermanual", + "p_code":"299", + "code":"311" }, { "desc":"Generally, multiple services are deployed in a cluster, and the storage of most services depends on the HDFS file system. Different components such as Spark and Yarn or c", "product_code":"mrs", "title":"Configuring the Number of Files in a Single HDFS Directory", "uri":"mrs_01_0805.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"263" + "doc_type":"usermanual", + "p_code":"299", + "code":"312" }, { "desc":"On HDFS, deleted files are moved to the recycle bin (trash can) so that the data deleted by mistake can be restored.You can set the time threshold for storing files in th", "product_code":"mrs", "title":"Configuring the Recycle Bin Mechanism", "uri":"mrs_01_0806.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"264" + "doc_type":"usermanual", + "p_code":"299", + "code":"313" }, { "desc":"HDFS allows users to modify the default permissions of files and directories. The default mask provided by the HDFS for creating file and directory permissions is 022. If", "product_code":"mrs", "title":"Setting Permissions on Files and Directories", "uri":"mrs_01_0807.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"265" + "doc_type":"usermanual", + "p_code":"299", + "code":"314" }, { "desc":"In security mode, users can flexibly set the maximum token lifetime and token renewal interval in HDFS based on cluster requirements.Navigation path for setting parameter", "product_code":"mrs", "title":"Setting the Maximum Lifetime and Renewal Interval of a Token", "uri":"mrs_01_0808.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"266" + "doc_type":"usermanual", + "p_code":"299", + "code":"315" }, { "desc":"In the open source version, if multiple data storage volumes are configured for a DataNode, the DataNode stops providing services by default if one of the volumes is dama", "product_code":"mrs", "title":"Configuring the Damaged Disk Volume", "uri":"mrs_01_1669.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"267" + "doc_type":"usermanual", + "p_code":"299", + "code":"316" }, { "desc":"Encrypted channel is an encryption protocol of remote procedure call (RPC) in HDFS. When a user invokes RPC, the user's login name will be transmitted to RPC through RPC ", "product_code":"mrs", "title":"Configuring Encrypted Channels", "uri":"mrs_01_0810.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"268" + "doc_type":"usermanual", + "p_code":"299", + "code":"317" }, { "desc":"Clients probably encounter running errors when the network is not stable. Users can adjust the following parameter values to improve the running efficiency.Go to the All ", "product_code":"mrs", "title":"Reducing the Probability of Abnormal Client Application Operation When the Network Is Not Stable", "uri":"mrs_01_0811.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"269" + "doc_type":"usermanual", + "p_code":"299", + "code":"318" }, { "desc":"In the existing default DFSclient failover proxy provider, if a NameNode in a process is faulty, all HDFS client instances in the same process attempt to connect to the N", "product_code":"mrs", "title":"Configuring the NameNode Blacklist", "uri":"mrs_01_1670.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"270" + "doc_type":"usermanual", + "p_code":"299", + "code":"319" }, { "desc":"Several finished Hadoop clusters are faulty because the NameNode is overloaded and unresponsive.Such problem is caused by the initial design of Hadoop: In Hadoop, the Nam", "product_code":"mrs", "title":"Optimizing HDFS NameNode RPC QoS", "uri":"mrs_01_1672.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"271" + "doc_type":"usermanual", + "p_code":"299", + "code":"320" }, { "desc":"When the speed at which the client writes data to the HDFS is greater than the disk bandwidth of the DataNode, the disk bandwidth is fully occupied. As a result, the Data", "product_code":"mrs", "title":"Optimizing HDFS DataNode RPC QoS", "uri":"mrs_01_1673.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"272" + "doc_type":"usermanual", + "p_code":"299", + "code":"321" }, { "desc":"When the Yarn local directory and DataNode directory are on the same disk, the disk with larger capacity can run more tasks. Therefore, more intermediate data is stored i", "product_code":"mrs", "title":"Configuring Reserved Percentage of Disk Usage on DataNodes", "uri":"mrs_01_1675.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"273" + "doc_type":"usermanual", + "p_code":"299", + "code":"322" }, { "desc":"You need to configure the nodes for storing HDFS file data blocks based on data features. You can configure a label expression to an HDFS directory or file and assign one", "product_code":"mrs", "title":"Configuring HDFS NodeLabel", "uri":"mrs_01_1676.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"274" + "doc_type":"usermanual", + "p_code":"299", + "code":"323" }, { "desc":"DiskBalancer is an online disk balancer that balances disk data on running DataNodes based on various indicators. It works in the similar way of the HDFS Balancer. The di", "product_code":"mrs", "title":"Configuring HDFS DiskBalancer", "uri":"mrs_01_1678.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"275" + "doc_type":"usermanual", + "p_code":"299", + "code":"324" }, { "desc":"Performing this operation can concurrently modify file and directory permissions and access control tools in a cluster.Performing concurrent file modification operations ", "product_code":"mrs", "title":"Performing Concurrent Operations on HDFS Files", "uri":"mrs_01_1684.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"276" + "doc_type":"cmpntguide", + "p_code":"299", + "code":"325" + }, + { + "desc":"By default, an HDFS file can be closed only if all blocks are reported (in the COMPLETED state). Therefore, the write performance of HDFS is affected by waiting for DataN", + "product_code":"", + "title":"Closing HDFS Files", + "uri":"mrs_01_24485.html", + "doc_type":"", + "p_code":"299", + "code":"326" }, { "desc":"Log path: The default path of HDFS logs is /var/log/Bigdata/hdfs/Role name.NameNode: /var/log/Bigdata/hdfs/nn (run logs) and /var/log/Bigdata/audit/hdfs/nn (audit logs)Da", "product_code":"mrs", "title":"Introduction to HDFS Logs", "uri":"mrs_01_0828.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"277" + "doc_type":"usermanual", + "p_code":"299", + "code":"327" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HDFS Performance Tuning", "uri":"mrs_01_0829.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"278" + "doc_type":"usermanual", + "p_code":"299", + "code":"328" }, { "desc":"Improve the HDFS write performance by modifying the HDFS attributes.Navigation path for setting parameters:On FusionInsight Manager, choose Cluster >Name of the desired c", "product_code":"mrs", "title":"Improving Write Performance", "uri":"mrs_01_1687.html", - "doc_type":"cmpntguide-lts", - "p_code":"278", - "code":"279" + "doc_type":"usermanual", + "p_code":"328", + "code":"329" }, { "desc":"Improve the HDFS read performance by using the client to cache the metadata for block locations.This function is recommended only for reading files that are not modified ", "product_code":"mrs", "title":"Improving Read Performance Using Client Metadata Cache", "uri":"mrs_01_1688.html", - "doc_type":"cmpntguide-lts", - "p_code":"278", - "code":"280" + "doc_type":"usermanual", + "p_code":"328", + "code":"330" }, { "desc":"When HDFS is deployed in high availability (HA) mode with multiple NameNode instances, the HDFS client needs to connect to each NameNode in sequence to determine which is", "product_code":"mrs", "title":"Improving the Connection Between the Client and NameNode Using Current Active Cache", "uri":"mrs_01_1689.html", - "doc_type":"cmpntguide-lts", - "p_code":"278", - "code":"281" + "doc_type":"usermanual", + "p_code":"328", + "code":"331" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"FAQ", "uri":"mrs_01_1690.html", - "doc_type":"cmpntguide-lts", - "p_code":"250", - "code":"282" + "doc_type":"usermanual", + "p_code":"299", + "code":"332" }, { "desc":"The NameNode startup is slow when it is restarted immediately after a large number of files (for example, 1 million files) are deleted.It takes time for the DataNode to d", "product_code":"mrs", "title":"NameNode Startup Is Slow", "uri":"mrs_01_1691.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"283" + "doc_type":"usermanual", + "p_code":"332", + "code":"333" }, { "desc":"Why MapReduce or Yarn tasks using the viewFS function fail to be executed in the environment with multiple NameServices?When viewFS is used, only directories mounted to v", "product_code":"mrs", "title":"Why MapReduce Tasks Fails in the Environment with Multiple NameServices?", "uri":"mrs_01_1692.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"284" + "doc_type":"usermanual", + "p_code":"332", + "code":"334" }, { "desc":"The DataNode is normal, but cannot report data blocks. As a result, the existing data blocks cannot be used.This error may occur when the number of data blocks in a data ", "product_code":"mrs", "title":"DataNode Is Normal but Cannot Report Data Blocks", "uri":"mrs_01_1693.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"285" + "doc_type":"usermanual", + "p_code":"332", + "code":"335" }, { "desc":"When errors occur in the dfs.datanode.data.dir directory of DataNode due to the permission or disk damage, HDFS WebUI does not display information about damaged data.Afte", "product_code":"mrs", "title":"HDFS WebUI Cannot Properly Update Information About Damaged Data", "uri":"mrs_01_1694.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"286" + "doc_type":"usermanual", + "p_code":"332", + "code":"336" }, { "desc":"Why distcp command fails in the secure cluster with the following error displayed?Client side exceptionServer side exceptionThe preceding error may occur if webhdfs:// is", "product_code":"mrs", "title":"Why Does the Distcp Command Fail in the Secure Cluster, Causing an Exception?", "uri":"mrs_01_1695.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"287" + "doc_type":"usermanual", + "p_code":"332", + "code":"337" }, { "desc":"If the number of disks specified by dfs.datanode.data.dir is equal to the value of dfs.datanode.failed.volumes.tolerated, DataNode startup will fail.By default, the failu", "product_code":"mrs", "title":"Why Does DataNode Fail to Start When the Number of Disks Specified by dfs.datanode.data.dir Equals dfs.datanode.failed.volumes.tolerated?", "uri":"mrs_01_1696.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"288" + "doc_type":"usermanual", + "p_code":"332", + "code":"338" }, { "desc":"DataNode capacity count incorrect if several data.dir configured in one disk partition.Currently calculation will be done based on the disk like df command in linux. Idea", "product_code":"mrs", "title":"Why Does an Error Occur During DataNode Capacity Calculation When Multiple data.dir Are Configured in a Partition?", "uri":"mrs_01_1697.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"289" + "doc_type":"usermanual", + "p_code":"332", + "code":"339" }, { "desc":"When the standby NameNode is powered off during metadata (namespace) storage, it fails to be started and the following error information is displayed.When the standby Nam", "product_code":"mrs", "title":"Standby NameNode Fails to Be Restarted When the System Is Powered off During Metadata (Namespace) Storage", "uri":"mrs_01_1698.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"290" + "doc_type":"usermanual", + "p_code":"332", + "code":"340" }, { "desc":"Why data in the buffer is lost if a power outage occurs during storage of small files?Because of a power outage, the blocks in the buffer are not written to the disk imme", "product_code":"mrs", "title":"Why Data in the Buffer Is Lost If a Power Outage Occurs During Storage of Small Files", "uri":"mrs_01_1699.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"291" + "doc_type":"usermanual", + "p_code":"332", + "code":"341" }, { "desc":"When HDFS calls the FileInputFormat getSplit method, the ArrayIndexOutOfBoundsException: 0 appears in the following log:The elements of each block correspondent frame are", "product_code":"mrs", "title":"Why Does Array Border-crossing Occur During FileInputFormat Split?", "uri":"mrs_01_1700.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"292" + "doc_type":"usermanual", + "p_code":"332", + "code":"342" }, { "desc":"When the storage policy of the file is set to LAZY_PERSIST, the storage type of the first replica should be RAM_DISK, and the storage type of other replicas should be DIS", "product_code":"mrs", "title":"Why Is the Storage Type of File Copies DISK When the Tiered Storage Policy Is LAZY_PERSIST?", "uri":"mrs_01_1701.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"293" + "doc_type":"usermanual", + "p_code":"332", + "code":"343" }, { "desc":"When the NameNode node is overloaded (100% of the CPU is occupied), the NameNode is unresponsive. The HDFS clients that are connected to the overloaded NameNode fail to r", "product_code":"mrs", "title":"The HDFS Client Is Unresponsive When the NameNode Is Overloaded for a Long Time", "uri":"mrs_01_1702.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"294" + "doc_type":"usermanual", + "p_code":"332", + "code":"344" }, { "desc":"In DataNode, the storage directory of data blocks is specified by dfs.datanode.data.dir.Can I modify dfs.datanode.data.dir tomodify the data storage directory?Can I modif", "product_code":"mrs", "title":"Can I Delete or Modify the Data Storage Directory in DataNode?", "uri":"mrs_01_1703.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"295" + "doc_type":"usermanual", + "p_code":"332", + "code":"345" }, { "desc":"Why are some blocks missing on the NameNode UI after the rollback is successful?This problem occurs because blocks with new IDs or genstamps may exist on the DataNode. Th", "product_code":"mrs", "title":"Blocks Miss on the NameNode UI After the Successful Rollback", "uri":"mrs_01_1704.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"296" + "doc_type":"usermanual", + "p_code":"332", + "code":"346" }, { "desc":"Why is an \"java.net.SocketException: No buffer space available\" exception reported when data is written to HDFS?This problem occurs when files are written to the HDFS. Ch", "product_code":"mrs", "title":"Why Is \"java.net.SocketException: No buffer space available\" Reported When Data Is Written to HDFS", "uri":"mrs_01_1705.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"297" + "doc_type":"usermanual", + "p_code":"332", + "code":"347" }, { "desc":"Why are there two standby NameNodes after the active NameNode is restarted?When this problem occurs, check the ZooKeeper and ZooKeeper FC logs. You can find that the sess", "product_code":"mrs", "title":"Why are There Two Standby NameNodes After the active NameNode Is Restarted?", "uri":"mrs_01_1706.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"298" + "doc_type":"usermanual", + "p_code":"332", + "code":"348" }, { "desc":"After I start a Balance process in HDFS, the process is shut down abnormally. If I attempt to execute the Balance process again, it fails again.After a Balance process is", "product_code":"mrs", "title":"When Does a Balance Process in HDFS, Shut Down and Fail to be Executed Again?", "uri":"mrs_01_1707.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"299" + "doc_type":"usermanual", + "p_code":"332", + "code":"349" }, { "desc":"Occasionally, nternet Explorer 9, Explorer 10, or Explorer 11 fails to access the native HDFS UI.Internet Explorer 9, Explorer 10, or Explorer 11 fails to access the nati", "product_code":"mrs", "title":"\"This page can't be displayed\" Is Displayed When Internet Explorer Fails to Access the Native HDFS UI", "uri":"mrs_01_1708.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"300" + "doc_type":"usermanual", + "p_code":"332", + "code":"350" }, { "desc":"If a JournalNode server is powered off, the data directory disk is fully occupied, and the network is abnormal, the EditLog sequence number on the JournalNode is inconsec", "product_code":"mrs", "title":"NameNode Fails to Be Restarted Due to EditLog Discontinuity", "uri":"mrs_01_1709.html", - "doc_type":"cmpntguide-lts", - "p_code":"282", - "code":"301" + "doc_type":"usermanual", + "p_code":"332", + "code":"351" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using HetuEngine", "uri":"mrs_01_1710.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"302" + "code":"352" }, { "desc":"This section describes how to use HetuEngine to connect to the Hive data source and query database tables of the Hive data source of the cluster through HetuEngine.The He", "product_code":"mrs", "title":"Using HetuEngine from Scratch", "uri":"mrs_01_1711.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"303" + "doc_type":"usermanual", + "p_code":"352", + "code":"353" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HetuEngine Permission Management", "uri":"mrs_01_1721.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"304" + "doc_type":"usermanual", + "p_code":"352", + "code":"354" }, { "desc":"HetuEngine supports permission control for clusters in security mode. For clusters in non-security mode, permission control is not performed.In security mode, HetuEngine ", "product_code":"mrs", "title":"HetuEngine Permission Management Overview", "uri":"mrs_01_1722.html", - "doc_type":"cmpntguide-lts", - "p_code":"304", - "code":"305" + "doc_type":"usermanual", + "p_code":"354", + "code":"355" }, { "desc":"Before using the HetuEngine service in a security cluster, a cluster administrator needs to create a user and grant operation permissions to the user to meet service requ", "product_code":"mrs", "title":"Creating a HetuEngine User", "uri":"mrs_01_1714.html", - "doc_type":"cmpntguide-lts", - "p_code":"304", - "code":"306" + "doc_type":"cmpntguide", + "p_code":"354", + "code":"356" }, { "desc":"Newly installed clusters use Ranger for authentication by default. System administrators can use Ranger to configure the permissions to manage databases, tables, and colu", "product_code":"mrs", "title":"HetuEngine Ranger-based Permission Control", "uri":"mrs_01_1723.html", - "doc_type":"cmpntguide-lts", - "p_code":"304", - "code":"307" + "doc_type":"usermanual", + "p_code":"354", + "code":"357" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HetuEngine MetaStore-based Permission Control", "uri":"mrs_01_1724.html", - "doc_type":"cmpntguide-lts", - "p_code":"304", - "code":"308" + "doc_type":"usermanual", + "p_code":"354", + "code":"358" }, { "desc":"Constraints: This parameter applies only to the Hive data source.When multiple HetuEngine clusters are deployed for collaborative computing, the metadata is centrally man", "product_code":"mrs", - "title":"Overview", + "title":"MetaStore Permission Overview", "uri":"mrs_01_1725.html", - "doc_type":"cmpntguide-lts", - "p_code":"308", - "code":"309" + "doc_type":"usermanual", + "p_code":"358", + "code":"359" }, { "desc":"The system administrator can create and set a HetuEngine role on FusionInsight Manager. The HetuEngine role can be configured with the HetuEngine administrator permission", "product_code":"mrs", "title":"Creating a HetuEngine Role", "uri":"mrs_01_2350.html", - "doc_type":"cmpntguide-lts", - "p_code":"308", - "code":"310" + "doc_type":"usermanual", + "p_code":"358", + "code":"360" }, { "desc":"If a user needs to access HetuEngine tables or databases created by other users, the user needs to be granted with related permissions. HetuEngine supports permission con", "product_code":"mrs", "title":"Configuring Permissions for Tables, Columns, and Databases", "uri":"mrs_01_2352.html", - "doc_type":"cmpntguide-lts", - "p_code":"308", - "code":"311" + "doc_type":"usermanual", + "p_code":"358", + "code":"361" }, { "desc":"Access data sources in the same cluster using HetuEngineIf Ranger authentication is enabled for HetuEngine, the PBAC permission policy of Ranger is used for authenticatio", "product_code":"mrs", "title":"Permission Principles and Constraints", "uri":"mrs_01_1728.html", - "doc_type":"cmpntguide-lts", - "p_code":"304", - "code":"312" + "doc_type":"usermanual", + "p_code":"354", + "code":"362" }, { "desc":"This section describes how to create a HetuEngine compute instance. If you want to stop the cluster where compute instances are successfully created, you need to manually", "product_code":"mrs", "title":"Creating HetuEngine Compute Instances", "uri":"mrs_01_1731.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"313" + "doc_type":"usermanual", + "p_code":"352", + "code":"363" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Configuring Data Sources", "uri":"mrs_01_2314.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"314" + "doc_type":"usermanual", + "p_code":"352", + "code":"364" }, { "desc":"HetuEngine supports quick joint query of multiple data sources and GUI-based data source configuration and management. You can quickly add a data source on the HSConsole ", "product_code":"mrs", "title":"Before You Start", "uri":"mrs_01_2315.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"315" + "doc_type":"usermanual", + "p_code":"364", + "code":"365" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Configuring a Hive Data Source", "uri":"mrs_01_24174.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"316" + "doc_type":"usermanual", + "p_code":"364", + "code":"366" }, { "desc":"This section describes how to add a Hive data source of the same Hadoop cluster as HetuEngine on HSConsole.Currently, HetuEngine supports data sources of the following tr", "product_code":"mrs", "title":"Configuring a Co-deployed Hive Data Source", "uri":"mrs_01_24253.html", - "doc_type":"cmpntguide-lts", - "p_code":"316", - "code":"317" + "doc_type":"cmpntguide", + "p_code":"366", + "code":"367" }, { "desc":"This section describes how to add a Hive data source on HSConsole.Currently, HetuEngine supports data sources of the following traditional data formats: AVRO, TEXT, RCTEX", "product_code":"mrs", "title":"Configuring a Traditional Data Source", "uri":"mrs_01_2348.html", - "doc_type":"cmpntguide-lts", - "p_code":"316", - "code":"318" + "doc_type":"usermanual", + "p_code":"366", + "code":"368" }, { "desc":"HetuEngine can be connected to the Hudi data source of the cluster of MRS 3.1.1 or later.HetuEngine does not support the reading of Hudi bootstrap tables.You have created", "product_code":"mrs", "title":"Configuring a Hudi Data Source", "uri":"mrs_01_2363.html", - "doc_type":"cmpntguide-lts", - "p_code":"316", - "code":"319" + "doc_type":"usermanual", + "p_code":"366", + "code":"369" }, { "desc":"This section describes how to add an HBase data source on HSConsole.The domain name of the cluster where the data source is located must be different from the HetuEngine ", "product_code":"mrs", "title":"Configuring an HBase Data Source", "uri":"mrs_01_2349.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"320" + "doc_type":"usermanual", + "p_code":"364", + "code":"370" }, { "desc":"This section describes how to add a GaussDB JDBC data source on the HSConsole page.The domain name of the cluster where the data source is located must be different from ", "product_code":"mrs", "title":"Configuring a GaussDB Data Source", "uri":"mrs_01_2351.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"321" + "doc_type":"usermanual", + "p_code":"364", + "code":"371" }, { "desc":"This section describes how to add another HetuEngine data source on the HSConsole page for a cluster in security mode.Currently, the following data sources are supported:", "product_code":"mrs", "title":"Configuring a HetuEngine Data Source", "uri":"mrs_01_1719.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"322" + "doc_type":"usermanual", + "p_code":"364", + "code":"372" }, { "desc":"Currently, HetuEngine supports the interconnection with the ClickHouse data source in the cluster of MRS 3.1.1 or later.The HetuEngine cluster in security mode supports t", "product_code":"mrs", "title":"Configuring a ClickHouse Data Source", "uri":"mrs_01_24146.html", - "doc_type":"cmpntguide-lts", - "p_code":"314", - "code":"323" + "doc_type":"usermanual", + "p_code":"364", + "code":"373" + }, + { + "desc":"This section applies to MRS 3.2.0 or later.Add an IoTDB JDBC data source on HSConsole of a cluster in security mode.The domain name of the cluster where the data source i", + "product_code":"mrs", + "title":"Configuring an IoTDB Data Source", + "uri":"mrs_01_24743.html", + "doc_type":"cmpntguide", + "p_code":"364", + "code":"374" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Managing Data Sources", "uri":"mrs_01_1720.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"324" + "doc_type":"usermanual", + "p_code":"352", + "code":"375" }, { "desc":"On the HetuEngine web UI, you can view, edit, and delete an added data source.You have created a HetuEngine administrator for accessing the HetuEngine web UI. For details", "product_code":"mrs", "title":"Managing an External Data Source", "uri":"mrs_01_24061.html", - "doc_type":"cmpntguide-lts", - "p_code":"324", - "code":"325" + "doc_type":"usermanual", + "p_code":"375", + "code":"376" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Managing Compute Instances", "uri":"mrs_01_1729.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"326" + "doc_type":"usermanual", + "p_code":"352", + "code":"377" }, { "desc":"The resource group mechanism controls the overall query load of the instance from the perspective of resource allocation and implements queuing policies for queries. Mult", "product_code":"mrs", "title":"Configuring Resource Groups", "uri":"mrs_01_1732.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"327" + "doc_type":"usermanual", + "p_code":"377", + "code":"378" }, { "desc":"On the HetuEngine web UI, you can adjust the number of worker nodes for a compute instance. In this way, resources can be expanded for the compute instance when resources", "product_code":"mrs", "title":"Adjusting the Number of Worker Nodes", "uri":"mrs_01_2320.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"328" + "doc_type":"usermanual", + "p_code":"377", + "code":"379" }, { "desc":"On the HetuEngine web UI, you can start, stop, delete, and roll-restart a single compute instance or compute instances in batches.Restarting HetuEngineDuring the restart ", "product_code":"mrs", "title":"Managing a HetuEngine Compute Instance", "uri":"mrs_01_1736.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"329" + "doc_type":"usermanual", + "p_code":"377", + "code":"380" }, { "desc":"On the HetuEngine web UI, you can import or export the instance configuration file and download the instance configuration template.You have created a user for accessing ", "product_code":"mrs", "title":"Importing and Exporting Compute Instance Configurations", "uri":"mrs_01_1733.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"330" + "doc_type":"usermanual", + "p_code":"377", + "code":"381" }, { "desc":"On the HetuEngine web UI, you can view the detailed information about a specified service, including the execution status of each SQL statement. If the current cluster us", "product_code":"mrs", "title":"Viewing the Instance Monitoring Page", "uri":"mrs_01_1734.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"331" + "doc_type":"usermanual", + "p_code":"377", + "code":"382" }, { "desc":"On the HetuEngine web UI, you can view Coordinator and Worker logs on the Yarn web UI.You have created a user for accessing the HetuEngine web UI. For details, see Creati", "product_code":"mrs", "title":"Viewing Coordinator and Worker Logs", "uri":"mrs_01_1735.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"332" + "doc_type":"usermanual", + "p_code":"377", + "code":"383" }, { "desc":"By default, coordinator and worker nodes randomly start on Yarn NodeManager nodes, and you have to open all ports on all NodeManager nodes. Using resource labels of Yarn,", "product_code":"mrs", "title":"Using Resource Labels to Specify on Which Node Coordinators Should Run", "uri":"mrs_01_24260.html", - "doc_type":"cmpntguide-lts", - "p_code":"326", - "code":"333" + "doc_type":"usermanual", + "p_code":"377", + "code":"384" }, { "desc":"If a compute instance is not created or started, you can log in to the HetuEngine client to create or start the compute instance. This section describes how to manage a c", "product_code":"mrs", "title":"Using the HetuEngine Client", "uri":"mrs_01_1737.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"334" + "doc_type":"usermanual", + "p_code":"352", + "code":"385" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using the HetuEngine Cross-Source Function", "uri":"mrs_01_1738.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"335" + "doc_type":"usermanual", + "p_code":"352", + "code":"386" }, { "desc":"Enterprises usually store massive data, such as from various databases and warehouses, for management and information collection. However, diversified data sources, hybri", "product_code":"mrs", "title":"Introduction to HetuEngine Cross-Source Function", "uri":"mrs_01_1739.html", - "doc_type":"cmpntguide-lts", - "p_code":"335", - "code":"336" + "doc_type":"usermanual", + "p_code":"386", + "code":"387" }, { "desc":"The format of the statement for creating a mapping table is as follows:CREATE TABLE schemaName.tableName (\n rowId VARCHAR,\n qualifier1 TINYINT,\n qualifier2 SMALLINT,\n ", "product_code":"mrs", "title":"Usage Guide of HetuEngine Cross-Source Function", "uri":"mrs_01_2341.html", - "doc_type":"cmpntguide-lts", - "p_code":"335", - "code":"337" + "doc_type":"usermanual", + "p_code":"386", + "code":"388" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using HetuEngine Cross-Domain Function", "uri":"mrs_01_2342.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"338" + "doc_type":"usermanual", + "p_code":"352", + "code":"389" }, { "desc":"HetuEngine provide unified standard SQL to implement efficient access to multiple data sources distributed in multiple regions (or data centers), shields data differences", "product_code":"mrs", - "title":"Introduction to HetuEngine Cross-Source Function", + "title":"Introduction to HetuEngine Cross-Domain Function", "uri":"mrs_01_2334.html", - "doc_type":"cmpntguide-lts", - "p_code":"338", - "code":"339" + "doc_type":"usermanual", + "p_code":"389", + "code":"390" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HetuEngine Cross-Domain Function Usage", "uri":"mrs_01_2335.html", - "doc_type":"cmpntguide-lts", - "p_code":"338", - "code":"340" + "doc_type":"usermanual", + "p_code":"389", + "code":"391" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HetuEngine Cross-Domain Rate Limit Function", "uri":"mrs_01_24284.html", - "doc_type":"cmpntguide-lts", - "p_code":"338", - "code":"341" + "doc_type":"usermanual", + "p_code":"389", + "code":"392" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"Using HetuEngine Materialized Views", + "uri":"mrs_01_24533.html", + "doc_type":"", + "p_code":"352", + "code":"393" + }, + { + "desc":"Materialized Views applies to MRS 3.2.0 or later.HetuEngine provides the materialized view capability. It enables you to pre-compute frequently accessed and time-consumin", + "product_code":"", + "title":"Overview of Materialized Views", + "uri":"mrs_01_24541.html", + "doc_type":"", + "p_code":"393", + "code":"394" + }, + { + "desc":"For details about the SQL statements for materialized views, see Table 1.The AS SELECT clause for creating materialized views cannot contain reserved keywords in Calcite ", + "product_code":"", + "title":"SQL Statement Example of Materialized Views", + "uri":"mrs_01_24545.html", + "doc_type":"", + "p_code":"393", + "code":"395" + }, + { + "desc":"A maintenance instance is a special compute instance that performs automatic tasks. Maintenance instances are used to automatically refresh, create, and delete materializ", + "product_code":"", + "title":"Configuring a HetuEngine Maintenance Instance", + "uri":"mrs_01_24535.html", + "doc_type":"", + "p_code":"393", + "code":"396" + }, + { + "desc":"HetuEngine provides the materialized view rewriting capability at the system or session level.Enabling the materialized view rewriting capability at the session level:Run", + "product_code":"", + "title":"Configuring Rewriting of Materialized Views", + "uri":"mrs_01_24543.html", + "doc_type":"", + "p_code":"393", + "code":"397" + }, + { + "desc":"HetuEngine QAS module provides automatic detection, learning, and diagnosis of historical SQL execution records. After the materialized view recommendation function is en", + "product_code":"", + "title":"Configuring Recommendation of Materialized Views", + "uri":"mrs_01_24776.html", + "doc_type":"", + "p_code":"393", + "code":"398" + }, + { + "desc":"After a materialized view is created for an SQL statement, the SQL statement is rewritten to be queried through the materialized view when the SQL statement is executed. ", + "product_code":"", + "title":"Configuring Caching of Materialized Views", + "uri":"mrs_01_24544.html", + "doc_type":"", + "p_code":"393", + "code":"399" + }, + { + "desc":"The mv_validity field for creating a materialized view indicates the validity period of the materialized view. HetuEngine allows you to rewrite the SQL statements using o", + "product_code":"", + "title":"Configuring the Validity Period and Data Update of Materialized Views", + "uri":"mrs_01_24546.html", + "doc_type":"", + "p_code":"393", + "code":"400" + }, + { + "desc":"HetuEngine intelligent materialized views provide intelligent precalculation and cache acceleration. The HetuEngine QAS role can automatically extract historical SQL stat", + "product_code":"", + "title":"Configuring Intelligent Materialized Views", + "uri":"mrs_01_24798.html", + "doc_type":"", + "p_code":"393", + "code":"401" + }, + { + "desc":"View the status and execution result of an automatic HetuEngine task on HSConsol. You can periodically view the task execution status and evaluate the cluster health stat", + "product_code":"", + "title":"Viewing Automatic Tasks of Materialized Views", + "uri":"mrs_01_24505.html", + "doc_type":"", + "p_code":"393", + "code":"402" + }, + { + "desc":"This section applies to MRS 3.2.0 or later.The HetuEngine QAS module provides automatic detection, learning, and diagnosis of historical SQL execution records for more ef", + "product_code":"", + "title":"Using HetuEngine SQL Diagnosis", + "uri":"mrs_01_24838.html", + "doc_type":"", + "p_code":"352", + "code":"403" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using a Third-Party Visualization Tool to Access HetuEngine", "uri":"mrs_01_2336.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"342" + "doc_type":"usermanual", + "p_code":"352", + "code":"404" }, { "desc":"To access the dual-plane environment, the cluster service plane must be able to communicate with the local Windows environment.", "product_code":"mrs", "title":"Usage Instruction", "uri":"mrs_01_24178.html", - "doc_type":"cmpntguide-lts", - "p_code":"342", - "code":"343" + "doc_type":"usermanual", + "p_code":"404", + "code":"405" }, { - "desc":"This section uses DBeaver 6.3.5 as an example to describe how to perform operations on HetuEngine.The DBeaver has been installed properly. Download the DBeaver software f", + "desc":"Use DBeaver 7.2.0 as an example to describe how to access HetuEngine.The DBeaver has been installed properly. Download the DBeaver software from https://dbeaver.io/files/", "product_code":"mrs", "title":"Using DBeaver to Access HetuEngine", "uri":"mrs_01_2337.html", - "doc_type":"cmpntguide-lts", - "p_code":"342", - "code":"344" + "doc_type":"usermanual", + "p_code":"404", + "code":"406" }, { "desc":"Tableau has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to create a", "product_code":"mrs", "title":"Using Tableau to Access HetuEngine", "uri":"mrs_01_24010.html", - "doc_type":"cmpntguide-lts", - "p_code":"342", - "code":"345" - }, - { - "desc":"PowerBI has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to create a", - "product_code":"mrs", - "title":"Using PowerBI to Access HetuEngine", - "uri":"mrs_01_24012.html", - "doc_type":"cmpntguide-lts", - "p_code":"342", - "code":"346" + "doc_type":"usermanual", + "p_code":"404", + "code":"407" }, { "desc":"Yonghong BI has been installed.The JDBC JAR file has been obtained. For details, see 1.A human-machine user has been created in the cluster. For details about how to crea", "product_code":"mrs", "title":"Using Yonghong BI to Access HetuEngine", "uri":"mrs_01_24013.html", - "doc_type":"cmpntguide-lts", - "p_code":"342", - "code":"347" + "doc_type":"usermanual", + "p_code":"404", + "code":"408" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Function & UDF Development and Application", "uri":"mrs_01_2338.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"348" + "doc_type":"usermanual", + "p_code":"352", + "code":"409" }, { "desc":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called UDFs.This section describes how to develop and apply He", "product_code":"mrs", "title":"HetuEngine Function Plugin Development and Application", "uri":"mrs_01_2339.html", - "doc_type":"cmpntguide-lts", - "p_code":"348", - "code":"349" + "doc_type":"usermanual", + "p_code":"409", + "code":"410" }, { "desc":"You can customize functions to extend SQL statements to meet personalized requirements. These functions are called UDFs.This section describes how to develop and apply Hi", "product_code":"mrs", "title":"Hive UDF Development and Application", "uri":"mrs_01_1743.html", - "doc_type":"cmpntguide-lts", - "p_code":"348", - "code":"350" + "doc_type":"usermanual", + "p_code":"409", + "code":"411" }, { "desc":"Log paths:The HetuEngine logs are stored in /var/log/Bigdata/hetuengine/ and /var/log/Bigdata/audit/hetuengine/.Log archiving rules:Log archiving rules use the FixedWindo", "product_code":"mrs", "title":"Introduction to HetuEngine Logs", "uri":"mrs_01_1744.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"351" + "doc_type":"usermanual", + "p_code":"352", + "code":"412" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"HetuEngine Performance Tuning", "uri":"mrs_01_1745.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"352" + "doc_type":"usermanual", + "p_code":"352", + "code":"413" }, { "desc":"HetuEngine depends on the resource allocation and control capabilities provided by Yarn. You need to adjust the Yarn service configuration based on the actual service and", "product_code":"mrs", "title":"Adjusting the Yarn Service Configuration", "uri":"mrs_01_1740.html", - "doc_type":"cmpntguide-lts", - "p_code":"352", - "code":"353" + "doc_type":"usermanual", + "p_code":"413", + "code":"414" }, { "desc":"The default memory size and disk overflow path of HetuEngine are not the best. You need to adjust node resources in the cluster based on the actual service and server con", "product_code":"mrs", "title":"Adjusting Cluster Node Resource Configurations", "uri":"mrs_01_1741.html", - "doc_type":"cmpntguide-lts", - "p_code":"352", - "code":"354" + "doc_type":"usermanual", + "p_code":"413", + "code":"415" }, { "desc":"HetuEngine provides the execution plan cache function. For the same query that needs to be executed for multiple times, this function reduces the time required for genera", "product_code":"mrs", "title":"Adjusting Execution Plan Cache", "uri":"mrs_01_1742.html", - "doc_type":"cmpntguide-lts", - "p_code":"352", - "code":"355" + "doc_type":"usermanual", + "p_code":"413", + "code":"416" }, { "desc":"When HetuEngine accesses the Hive data source, it needs to access the Hive metastore to obtain the metadata information. HetuEngine provides the metadata cache function. ", "product_code":"mrs", "title":"Adjusting Metadata Cache", "uri":"mrs_01_1746.html", - "doc_type":"cmpntguide-lts", - "p_code":"352", - "code":"356" + "doc_type":"usermanual", + "p_code":"413", + "code":"417" }, { "desc":"If a table or common table expression (CTE) contained in a query appears multiple times and has the same projection and filter, you can enable the CTE reuse function to c", - "product_code":"mrs", + "product_code":"", "title":"Modifying the CTE Configuration", "uri":"mrs_01_24181.html", - "doc_type":"cmpntguide-lts", - "p_code":"352", - "code":"357" + "doc_type":"", + "p_code":"413", + "code":"418" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About HetuEngine", "uri":"mrs_01_1747.html", - "doc_type":"cmpntguide-lts", - "p_code":"302", - "code":"358" + "doc_type":"usermanual", + "p_code":"352", + "code":"419" }, { "desc":"After the domain name is changed, the installed client configuration and data source configuration become invalid, and the created cluster is unavailable. When data sourc", "product_code":"mrs", "title":"How Do I Perform Operations After the Domain Name Is Changed?", "uri":"mrs_01_2321.html", - "doc_type":"cmpntguide-lts", - "p_code":"358", - "code":"359" + "doc_type":"usermanual", + "p_code":"419", + "code":"420" }, { "desc":"If the cluster startup on the client takes a long time, the waiting times out and the waiting page exits.If the cluster startup times out, the waiting page automatically ", "product_code":"mrs", "title":"What Do I Do If Starting a Cluster on the Client Times Out?", "uri":"mrs_01_2322.html", - "doc_type":"cmpntguide-lts", - "p_code":"358", - "code":"360" + "doc_type":"usermanual", + "p_code":"419", + "code":"421" }, { "desc":"Why is the data source lost when I log in to the client to check the data source connected to the HSConsole page?The possible cause of data source loss is that the DBServ", "product_code":"mrs", "title":"How Do I Handle Data Source Loss?", "uri":"mrs_01_2323.html", - "doc_type":"cmpntguide-lts", - "p_code":"358", - "code":"361" + "doc_type":"usermanual", + "p_code":"419", + "code":"422" }, { "desc":"Log in to FusionInsight Manager and HetuEngine alarms are generated for the cluster.Log in to FusionInsight Manager, go to the O&M page, and view alarm details. You can c", "product_code":"mrs", "title":"How Do I Handle HetuEngine Alarms?", "uri":"mrs_01_2329.html", - "doc_type":"cmpntguide-lts", - "p_code":"358", - "code":"362" + "doc_type":"usermanual", + "p_code":"419", + "code":"423" }, { "desc":"A new host is added to the cluster in security mode, the NodeManager instance is added, and the parameters of the HetuEngine compute instance are adjusted. After the Hetu", "product_code":"mrs", "title":"How Do I Do If Coordinators and Workers Cannot Be Started on the New Node?", "uri":"mrs_01_24050.html", - "doc_type":"cmpntguide-lts", - "p_code":"358", - "code":"363" + "doc_type":"usermanual", + "p_code":"419", + "code":"424" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Hive", "uri":"mrs_01_0581.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"364" + "code":"425" }, { "desc":"Hive is a data warehouse framework built on Hadoop. It maps structured data files to a database table and provides SQL-like functions to analyze and process data. It also", "product_code":"mrs", "title":"Using Hive from Scratch", "uri":"mrs_01_0442.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"365" + "doc_type":"usermanual", + "p_code":"425", + "code":"426" }, { "desc":"Go to the Hive configurations page by referring to Modifying Cluster Service Configuration Parameters.", "product_code":"mrs", "title":"Configuring Hive Parameters", "uri":"mrs_01_0582.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"366" + "doc_type":"usermanual", + "p_code":"425", + "code":"427" }, { "desc":"Hive SQL supports all features of Hive-3.1.0. For details, see https://cwiki.apache.org/confluence/display/hive/languagemanual.Table 1 describes the extended Hive stateme", "product_code":"mrs", "title":"Hive SQL", "uri":"mrs_01_2330.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"367" + "doc_type":"usermanual", + "p_code":"425", + "code":"428" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Permission Management", "uri":"mrs_01_0947.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"368" + "doc_type":"usermanual", + "p_code":"425", + "code":"429" }, { "desc":"Hive is a data warehouse framework built on Hadoop. It provides basic data analysis services using the Hive query language (HQL), a language like the structured query lan", "product_code":"mrs", "title":"Hive Permission", "uri":"mrs_01_0948.html", - "doc_type":"cmpntguide-lts", - "p_code":"368", - "code":"369" + "doc_type":"usermanual", + "p_code":"429", + "code":"430" }, { "desc":"This section describes how to create and configure a Hive role on Manager as the system administrator. The Hive role can be granted the permissions of the Hive administra", "product_code":"mrs", "title":"Creating a Hive Role", "uri":"mrs_01_0949.html", - "doc_type":"cmpntguide-lts", - "p_code":"368", - "code":"370" + "doc_type":"usermanual", + "p_code":"429", + "code":"431" }, { "desc":"You can configure related permissions if you need to access tables or databases created by other users. Hive supports column-based permission control. If a user needs to ", "product_code":"mrs", "title":"Configuring Permissions for Hive Tables, Columns, or Databases", "uri":"mrs_01_0950.html", - "doc_type":"cmpntguide-lts", - "p_code":"368", - "code":"371" + "doc_type":"usermanual", + "p_code":"429", + "code":"432" }, { "desc":"Hive may need to be associated with other components. For example, Yarn permissions are required in the scenario of using HQL statements to trigger MapReduce jobs, and HB", "product_code":"mrs", "title":"Configuring Permissions to Use Other Components for Hive", "uri":"mrs_01_0951.html", - "doc_type":"cmpntguide-lts", - "p_code":"368", - "code":"372" + "doc_type":"usermanual", + "p_code":"429", + "code":"433" }, { "desc":"This section guides users to use a Hive client in an O&M or service scenario.The client has been installed. For example, the client is installed in the /opt/hadoopclient ", "product_code":"mrs", "title":"Using a Hive Client", "uri":"mrs_01_0952.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"373" + "doc_type":"usermanual", + "p_code":"425", + "code":"434" }, { "desc":"HDFS Colocation is the data location control function provided by HDFS. The HDFS Colocation API stores associated data or data on which associated operations are performe", "product_code":"mrs", "title":"Using HDFS Colocation to Store Hive Tables", "uri":"mrs_01_0953.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"374" + "doc_type":"usermanual", + "p_code":"425", + "code":"435" }, { "desc":"Hive supports encryption of one or more columns in a table. When creating a Hive table, you can specify the columns to be encrypted and encryption algorithm. When data is", "product_code":"mrs", "title":"Using the Hive Column Encryption Function", "uri":"mrs_01_0954.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"375" + "doc_type":"usermanual", + "p_code":"425", + "code":"436" }, { "desc":"In most cases, a carriage return character is used as the row delimiter in Hive tables stored in text files, that is, the carriage return character is used as the termina", "product_code":"mrs", "title":"Customizing Row Separators", "uri":"mrs_01_0955.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"376" + "doc_type":"usermanual", + "p_code":"425", + "code":"437" }, { "desc":"Due to the limitations of underlying storage systems, Hive does not support the ability to delete a single piece of table data. In Hive on HBase, MRS Hive supports the ab", "product_code":"mrs", "title":"Deleting Single-Row Records from Hive on HBase", "uri":"mrs_01_0956.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"377" + "doc_type":"usermanual", + "p_code":"425", + "code":"438" }, { "desc":"WebHCat provides external REST APIs for Hive. By default, the open-source community version uses the HTTP protocol.MRS Hive supports the HTTPS protocol that is more secur", "product_code":"mrs", "title":"Configuring HTTPS/HTTP-based REST APIs", "uri":"mrs_01_0957.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"378" + "doc_type":"usermanual", + "p_code":"425", + "code":"439" }, { "desc":"The Transform function is not allowed by Hive of the open source version.MRS Hive supports the configuration of the Transform function. The function is disabled by defaul", "product_code":"mrs", "title":"Enabling or Disabling the Transform Function", "uri":"mrs_01_0958.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"379" + "doc_type":"usermanual", + "p_code":"425", + "code":"440" }, { "desc":"This section describes how to create a view on Hive when MRS is configured in security mode, authorize access permissions to different users, and specify that different u", "product_code":"mrs", "title":"Access Control of a Dynamic Table View on Hive", "uri":"mrs_01_0959.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"380" + "doc_type":"usermanual", + "p_code":"425", + "code":"441" }, { "desc":"You must have ADMIN permission when creating temporary functions on Hive of the open source community version.MRS Hive supports the configuration of the function for crea", "product_code":"mrs", "title":"Specifying Whether the ADMIN Permissions Is Required for Creating Temporary Functions", "uri":"mrs_01_0960.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"381" + "doc_type":"usermanual", + "p_code":"425", + "code":"442" }, { "desc":"Hive allows users to create external tables to associate with other relational databases. External tables read data from associated relational databases and support Join ", "product_code":"mrs", "title":"Using Hive to Read Data in a Relational Database", "uri":"mrs_01_0961.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"382" + "doc_type":"usermanual", + "p_code":"425", + "code":"443" }, { "desc":"Hive supports the following types of traditional relational database syntax:GroupingEXCEPT and INTERSECTSyntax description:Grouping takes effect only when the Group by st", "product_code":"mrs", "title":"Supporting Traditional Relational Database Syntax in Hive", "uri":"mrs_01_0962.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"383" + "doc_type":"usermanual", + "p_code":"425", + "code":"444" }, { "desc":"When built-in functions of Hive cannot meet requirements, you can compile user-defined functions (UDFs) and use them for query.According to implementation methods, UDFs a", "product_code":"mrs", "title":"Creating User-Defined Hive Functions", "uri":"mrs_01_0963.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"384" + "doc_type":"usermanual", + "p_code":"425", + "code":"445" }, { "desc":"When the beeline client is disconnected due to network exceptions during the execution of a batch processing task, tasks submitted before beeline is disconnected can be p", "product_code":"mrs", "title":"Enhancing beeline Reliability", "uri":"mrs_01_0965.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"385" + "doc_type":"usermanual", + "p_code":"425", + "code":"446" }, { "desc":"This function is applicable to Hive and Spark2x in.With this function enabled, if the select permission is granted to a user during Hive table creation, the user can run ", "product_code":"mrs", "title":"Viewing Table Structures Using the show create Statement as Users with the select Permission", "uri":"mrs_01_0966.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"386" + "doc_type":"usermanual", + "p_code":"425", + "code":"447" }, { "desc":"This function applies to Hive.After this function is enabled, run the following command to write a directory into Hive: insert overwrite directory \"/path1\".... After the ", "product_code":"mrs", "title":"Writing a Directory into Hive with the Old Data Removed to the Recycle Bin", "uri":"mrs_01_0967.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"387" + "doc_type":"usermanual", + "p_code":"425", + "code":"448" }, { "desc":"This function applies to Hive.With this function enabled, run the insert overwrite directory/path1/path2/path3... command to write a subdirectory. The permission of the /", "product_code":"mrs", "title":"Inserting Data to a Directory That Does Not Exist", "uri":"mrs_01_0968.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"388" + "doc_type":"usermanual", + "p_code":"425", + "code":"449" }, { "desc":"This function is applicable to Hive and Spark2x.After this function is enabled, only the Hive administrator can create databases and tables in the default database. Other", "product_code":"mrs", "title":"Creating Databases and Creating Tables in the Default Database Only as the Hive Administrator", "uri":"mrs_01_0969.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"389" + "doc_type":"usermanual", + "p_code":"425", + "code":"450" }, { "desc":"This function is applicable to Hive and Spark2x.After this function is enabled, the location keyword cannot be specified when a Hive internal table is created. Specifical", "product_code":"mrs", "title":"Disabling of Specifying the location Keyword When Creating an Internal Hive Table", "uri":"mrs_01_0970.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"390" + "doc_type":"usermanual", + "p_code":"425", + "code":"451" }, { "desc":"This function is applicable to Hive and Spark2x.After this function is enabled, the user or user group that has the read and execute permissions on a directory can create", "product_code":"mrs", "title":"Enabling the Function of Creating a Foreign Table in a Directory That Can Only Be Read", "uri":"mrs_01_0971.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"391" + "doc_type":"usermanual", + "p_code":"425", + "code":"452" }, { "desc":"This function applies to Hive.The number of OS user groups is limited, and the number of roles that can be created in Hive cannot exceed 32. After this function is enable", "product_code":"mrs", "title":"Authorizing Over 32 Roles in Hive", "uri":"mrs_01_0972.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"392" + "doc_type":"usermanual", + "p_code":"425", + "code":"453" }, { "desc":"This function applies to Hive.This function is used to limit the maximum number of maps for Hive tasks on the server to avoid performance deterioration caused by overload", "product_code":"mrs", "title":"Restricting the Maximum Number of Maps for Hive Tasks", "uri":"mrs_01_0973.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"393" + "doc_type":"usermanual", + "p_code":"425", + "code":"454" }, { "desc":"This function applies to Hive.This function can be enabled to specify specific users to access HiveServer services on specific nodes, achieving HiveServer resource isolat", "product_code":"mrs", "title":"HiveServer Lease Isolation", "uri":"mrs_01_0974.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"394" + "doc_type":"usermanual", + "p_code":"425", + "code":"455" + }, + { + "desc":"This function restricts components in a cluster to connect to specified Hive Metastore instances. By default, components can connect to all Metastore instances. This func", + "product_code":"", + "title":"Hive Supports Isolation of Metastore instances Based on Components", + "uri":"mrs_01_24467.html", + "doc_type":"", + "p_code":"425", + "code":"456" }, { "desc":"Hive supports transactions at the table and partition levels. When the transaction mode is enabled, transaction tables can be incrementally updated, deleted, and read, im", "product_code":"mrs", "title":"Hive Supporting Transactions", "uri":"mrs_01_0975.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"395" + "doc_type":"usermanual", + "p_code":"425", + "code":"457" }, { "desc":"Hive can use the Tez engine to process data computing tasks. Before executing a task, you can manually switch the execution engine to Tez.The TimelineServer role of the Y", "product_code":"mrs", "title":"Switching the Hive Execution Engine to Tez", "uri":"mrs_01_1750.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"396" + "doc_type":"usermanual", + "p_code":"425", + "code":"458" }, { "desc":"RDS indicates the relational database in this section. This section describes how to connect Hive with the open-source MySQL and Postgres databases.After an external meta", "product_code":"mrs", "title":"Connecting Hive with External RDS", + "uri":"mrs_01_17511.html", + "doc_type":"usermanual", + "p_code":"425", + "code":"459" + }, + { + "desc":"This section describes how to connect Hive with built-in relational databases open-source MySQL and Postgres.After an external metadata database is deployed in a cluster ", + "product_code":"mrs", + "title":"Interconnecting Hive with External Self-Built Relational Databases", "uri":"mrs_01_1751.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"397" + "doc_type":"cmpntguide", + "p_code":"425", + "code":"460" }, { "desc":"The MetaStore service of Hive can cache the metadata of some tables in Redis.The Redis service has been installed in a cluster.If the cluster is installed in non-security", "product_code":"mrs", "title":"Redis-based CacheStore of HiveMetaStore", "uri":"mrs_01_2302.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"398" - }, - { - "desc":"A Hive materialized view is a special table obtained based on the query results of Hive internal tables. A materialized view can be considered as an intermediate table th", - "product_code":"mrs", - "title":"Hive Materialized View", - "uri":"mrs_01_2311.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"399" + "doc_type":"usermanual", + "p_code":"425", + "code":"461" }, { "desc":"A Hudi source table corresponds to a copy of HDFS data. The Hudi table data can be mapped to a Hive external table through the Spark component, Flink component, or Hudi c", "product_code":"mrs", "title":"Hive Supporting Reading Hudi Tables", "uri":"mrs_01_24040.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"400" + "doc_type":"usermanual", + "p_code":"425", + "code":"462" }, { "desc":"The metadata that have not been used for a long time is moved to a backup table to reduce the pressure on metadata databases. This process is called partitioned data free", "product_code":"mrs", "title":"Hive Supporting Cold and Hot Storage of Partitioned Metadata", "uri":"mrs_01_24118.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"401" + "doc_type":"usermanual", + "p_code":"425", + "code":"463" }, { "desc":"Zstandard (ZSTD) is an open-source lossless data compression algorithm. Its compression performance and compression ratio are better than those of other compression algor", "product_code":"mrs", "title":"Hive Supporting ZSTD Compression Formats", "uri":"mrs_01_24121.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"402" + "doc_type":"usermanual", + "p_code":"425", + "code":"464" + }, + { + "desc":"Data files stored in Hive are abnormal due to misoperations or disk damage, thereby causing task execution failures or incorrect data results.Common non-text data files c", + "product_code":"", + "title":"Locating Abnormal Hive Files", + "uri":"mrs_01_24480.html", + "doc_type":"", + "p_code":"425", + "code":"465" + }, + { + "desc":"ZSTD_JNI is a native implementation of the ZSTD compression algorithm. Compared with ZSTD, ZSTD_JNI has higher compression read/write efficiency and compression ratio, an", + "product_code":"", + "title":"Using the ZSTD_JNI Compression Algorithm to Compress Hive ORC Tables", + "uri":"mrs_01_24507.html", + "doc_type":"", + "p_code":"425", + "code":"466" + }, + { + "desc":"The client connection of Hive MetaStore supports load balancing. That is, heavy load of a single MetaStore node during heavy service traffic can be avoided by connecting ", + "product_code":"", + "title":"Load Balancing for Hive MetaStore Client Connection", + "uri":"mrs_01_24738.html", + "doc_type":"", + "p_code":"425", + "code":"467" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"Data Import and Export in Hive", + "uri":"mrs_01_24744.html", + "doc_type":"", + "p_code":"425", + "code":"468" + }, + { + "desc":"In big data application scenarios, data tables in Hive usually need to be migrated to another cluster. You can run the Hive import and export commands to migrate data in ", + "product_code":"", + "title":"Importing and Exporting Table/Partition Data in Hive", + "uri":"mrs_01_24741.html", + "doc_type":"", + "p_code":"468", + "code":"469" + }, + { + "desc":"In big data application scenarios, Hive databases and all tables in these databases are usually migrated to another cluster. You can run the Hive database export and impo", + "product_code":"", + "title":"Importing and Exporting Hive Databases", + "uri":"mrs_01_24742.html", + "doc_type":"", + "p_code":"468", + "code":"470" }, { "desc":"Log path: The default save path of Hive logs is /var/log/Bigdata/hive/role name, the default save path of Hive1 logs is /var/log/Bigdata/hive1/role name, and the others f", "product_code":"mrs", "title":"Hive Log Overview", "uri":"mrs_01_0976.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"403" + "doc_type":"usermanual", + "p_code":"425", + "code":"471" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Hive Performance Tuning", "uri":"mrs_01_0977.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"404" + "doc_type":"usermanual", + "p_code":"425", + "code":"472" }, { "desc":"During the Select query, Hive generally scans the entire table, which is time-consuming. To improve query efficiency, create table partitions based on service requirement", "product_code":"mrs", "title":"Creating Table Partitions", "uri":"mrs_01_0978.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"405" + "doc_type":"usermanual", + "p_code":"472", + "code":"473" }, { "desc":"When the Join statement is used, the command execution speed and query speed may be slow in case of large data volume. To resolve this problem, you can optimize Join.Join", "product_code":"mrs", "title":"Optimizing Join", "uri":"mrs_01_0979.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"406" + "doc_type":"usermanual", + "p_code":"472", + "code":"474" }, { "desc":"Optimize the Group by statement to accelerate the command execution and query speed.During the Group by operation, Map performs grouping and distributes the groups to Red", "product_code":"mrs", "title":"Optimizing Group By", "uri":"mrs_01_0980.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"407" + "doc_type":"usermanual", + "p_code":"472", + "code":"475" }, { "desc":"ORC is an efficient column storage format and has higher compression ratio and reading efficiency than other file formats.You are advised to use ORC as the default Hive t", "product_code":"mrs", "title":"Optimizing Data Storage", "uri":"mrs_01_0981.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"408" + "doc_type":"usermanual", + "p_code":"472", + "code":"476" }, { "desc":"When SQL statements are executed on Hive, if the (a&b) or (a&c) logic exists in the statements, you are advised to change the logic to a & (b or c).If condition a is p_pa", "product_code":"mrs", "title":"Optimizing SQL Statements", "uri":"mrs_01_0982.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"409" + "doc_type":"usermanual", + "p_code":"472", + "code":"477" }, { "desc":"When joining multiple tables in Hive, Hive supports Cost-Based Optimization (CBO). The system automatically selects the optimal plan based on the table statistics, such a", "product_code":"mrs", "title":"Optimizing the Query Function Using Hive CBO", "uri":"mrs_01_0983.html", - "doc_type":"cmpntguide-lts", - "p_code":"404", - "code":"410" + "doc_type":"usermanual", + "p_code":"472", + "code":"478" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Hive", "uri":"mrs_01_1752.html", - "doc_type":"cmpntguide-lts", - "p_code":"364", - "code":"411" + "doc_type":"usermanual", + "p_code":"425", + "code":"479" }, { "desc":"How can I delete permanent user-defined functions (UDFs) on multiple HiveServers at the same time?Multiple HiveServers share one MetaStore database. Therefore, there is a", "product_code":"mrs", "title":"How Do I Delete UDFs on Multiple HiveServers at the Same Time?", "uri":"mrs_01_1753.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"412" + "doc_type":"usermanual", + "p_code":"479", + "code":"480" }, { "desc":"Why cannot the DROP operation be performed for a backed up Hive table?Snapshots have been created for an HDFS directory mapping to the backed up Hive table, so the HDFS d", "product_code":"mrs", "title":"Why Cannot the DROP operation Be Performed on a Backed-up Hive Table?", "uri":"mrs_01_1754.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"413" + "doc_type":"usermanual", + "p_code":"479", + "code":"481" }, { "desc":"How to perform operations on local files (such as reading the content of a file) with Hive user-defined functions?By default, you can perform operations on local files wi", "product_code":"mrs", "title":"How to Perform Operations on Local Files with Hive User-Defined Functions", "uri":"mrs_01_1755.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"414" + "doc_type":"usermanual", + "p_code":"479", + "code":"482" }, { "desc":"How do I stop a MapReduce task manually if the task is suspended for a long time?", "product_code":"mrs", "title":"How Do I Forcibly Stop MapReduce Jobs Executed by Hive?", "uri":"mrs_01_1756.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"415" + "doc_type":"usermanual", + "p_code":"479", + "code":"483" }, { "desc":"How do I monitor the Hive table size?The HDFS refined monitoring function allows you to monitor the size of a specified table directory.The Hive and HDFS components are r", "product_code":"mrs", "title":"How Do I Monitor the Hive Table Size?", "uri":"mrs_01_1758.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"416" + "doc_type":"usermanual", + "p_code":"479", + "code":"484" }, { "desc":"How do I prevent key directories from data loss caused by misoperations of the insert overwrite statement?During monitoring of key Hive databases, tables, or directories,", "product_code":"mrs", "title":"How Do I Prevent Key Directories from Data Loss Caused by Misoperations of the insert overwrite Statement?", "uri":"mrs_01_1759.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"417" + "doc_type":"usermanual", + "p_code":"479", + "code":"485" }, { "desc":"This function applies to Hive.Perform the following operations to configure parameters. When Hive on Spark tasks are executed in the environment where the HBase is not in", "product_code":"mrs", "title":"Why Is Hive on Spark Task Freezing When HBase Is Not Installed?", "uri":"mrs_01_1760.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"418" + "doc_type":"usermanual", + "p_code":"479", + "code":"486" }, { "desc":"When a table with more than 32,000 partitions is created in Hive, an exception occurs during the query with the WHERE partition. In addition, the exception information pr", "product_code":"mrs", "title":"Error Reported When the WHERE Condition Is Used to Query Tables with Excessive Partitions in FusionInsight Hive", "uri":"mrs_01_1761.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"419" + "doc_type":"usermanual", + "p_code":"479", + "code":"487" }, { "desc":"When users check the JDK version used by the client, if the JDK version is IBM JDK, the Beeline client needs to be reconstructed. Otherwise, the client will fail to conne", "product_code":"mrs", "title":"Why Cannot I Connect to HiveServer When I Use IBM JDK to Access the Beeline Client?", "uri":"mrs_01_1762.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"420" + "doc_type":"usermanual", + "p_code":"479", + "code":"488" }, { "desc":"Does a Hive Table Can Be Stored Either in OBS or HDFS?The location of a common Hive table stored on OBS can be set to an HDFS path.In the same Hive service, you can creat", "product_code":"mrs", "title":"Description of Hive Table Location (Either Be an OBS or HDFS Path)", "uri":"mrs_01_1763.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"421" + "doc_type":"usermanual", + "p_code":"479", + "code":"489" }, { "desc":"Hive uses the Tez engine to execute union-related statements to write data. After Hive is switched to the MapReduce engine for query, no data is found.When Hive uses the ", "product_code":"mrs", "title":"Why Cannot Data Be Queried After the MapReduce Engine Is Switched After the Tez Engine Is Used to Execute Union-related Statements?", "uri":"mrs_01_2309.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"422" + "doc_type":"usermanual", + "p_code":"479", + "code":"490" }, { "desc":"Why Does Data Inconsistency Occur When Data Is Concurrently Written to a Hive Table Through an API?Hive does not support concurrent data insertion for the same table or p", "product_code":"mrs", "title":"Why Does Hive Not Support Concurrent Data Writing to the Same Table or Partition?", "uri":"mrs_01_2310.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"423" + "doc_type":"usermanual", + "p_code":"479", + "code":"491" }, { "desc":"When the vectorized parameterhive.vectorized.execution.enabled is set to true, why do some null pointers or type conversion exceptions occur occasionally when Hive on Tez", "product_code":"mrs", "title":"Why Does Hive Not Support Vectorized Query?", "uri":"mrs_01_2325.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"424" + "doc_type":"usermanual", + "p_code":"479", + "code":"492" }, { "desc":"The error message \"java.lang.OutOfMemoryError: Java heap space.\" is displayed during Hive SQL execution.Solution:For MapReduce tasks, increase the values of the following", "product_code":"mrs", "title":"Hive Configuration Problems", "uri":"mrs_01_24117.html", - "doc_type":"cmpntguide-lts", - "p_code":"411", - "code":"425" + "doc_type":"usermanual", + "p_code":"479", + "code":"493" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Hudi", "uri":"mrs_01_24025.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"426" + "code":"494" }, { "desc":"This section describes capabilities of Hudi using spark-shell. Using the Spark data source, this section describes how to insert and update a Hudi dataset of the default ", "product_code":"mrs", "title":"Quick Start", "uri":"mrs_01_24033.html", - "doc_type":"cmpntguide-lts", - "p_code":"426", - "code":"427" + "doc_type":"usermanual", + "p_code":"494", + "code":"495" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Basic Operations", "uri":"mrs_01_24062.html", - "doc_type":"cmpntguide-lts", - "p_code":"426", - "code":"428" + "doc_type":"usermanual", + "p_code":"494", + "code":"496" }, { "desc":"When writing data, Hudi generates a Hudi table based on attributes such as the storage path, table name, and partition structure.Hudi table data files can be stored in th", "product_code":"mrs", "title":"Hudi Table Schema", "uri":"mrs_01_24103.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"429" + "doc_type":"usermanual", + "p_code":"496", + "code":"497" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Write", "uri":"mrs_01_24034.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"430" + "doc_type":"usermanual", + "p_code":"496", + "code":"498" }, { "desc":"Hudi provides multiple write modes. For details, see the configuration item hoodie.datasource.write.operation. This section describes upsert, insert, and bulk_insert.inse", "product_code":"mrs", "title":"Batch Write", "uri":"mrs_01_24035.html", - "doc_type":"cmpntguide-lts", - "p_code":"430", - "code":"431" + "doc_type":"usermanual", + "p_code":"498", + "code":"499" }, { "desc":"The HoodieDeltaStreamer tool provided by Hudi supports stream write. You can also use SparkStreaming to write data in microbatch mode. HoodieDeltaStreamer provides the fo", "product_code":"mrs", "title":"Stream Write", "uri":"mrs_01_24036.html", - "doc_type":"cmpntguide-lts", - "p_code":"430", - "code":"432" + "doc_type":"usermanual", + "p_code":"498", + "code":"500" }, { "desc":"The bootstrapping function provided by Hudi converts historical tables into Hudi tables without any change by generating Hoodie management files based on historical Parqu", "product_code":"mrs", "title":"Bootstrapping", "uri":"mrs_01_24069.html", - "doc_type":"cmpntguide-lts", - "p_code":"430", - "code":"433" + "doc_type":"usermanual", + "p_code":"498", + "code":"501" }, { "desc":"You can run run_hive_sync_tool.sh to synchronize data in the Hudi table to Hive.For example, run the following command to synchronize the Hudi table in the hdfs://haclust", "product_code":"mrs", "title":"Synchronizing Hudi Table Data to Hive", "uri":"mrs_01_24064.html", - "doc_type":"cmpntguide-lts", - "p_code":"430", - "code":"434" + "doc_type":"usermanual", + "p_code":"498", + "code":"502" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Read", "uri":"mrs_01_24037.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"435" + "doc_type":"usermanual", + "p_code":"496", + "code":"503" }, { "desc":"Reading the real-time view (using Hive and SparkSQL as an example): Directly read the Hudi table stored in Hive.select count(*) from test;Reading the real-time view (usin", "product_code":"mrs", "title":"Reading COW Table Views", "uri":"mrs_01_24098.html", - "doc_type":"cmpntguide-lts", - "p_code":"435", - "code":"436" + "doc_type":"usermanual", + "p_code":"503", + "code":"504" }, { "desc":"After the MOR table is synchronized to Hive, the following two tables are synchronized to Hive: Table name_rt and Table name_ro. The table suffixed with rt indicates the ", "product_code":"mrs", "title":"Reading MOR Table Views", "uri":"mrs_01_24099.html", - "doc_type":"cmpntguide-lts", - "p_code":"435", - "code":"437" + "doc_type":"usermanual", + "p_code":"503", + "code":"505" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Data Management and Maintenance", "uri":"mrs_01_24038.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"438" + "doc_type":"usermanual", + "p_code":"496", + "code":"506" }, { "desc":"IntroductionA metadata table is a special Hudi metadata table, which is hidden from users. The table stores metadata of a common Hudi table.The metadata table is included", "product_code":"mrs", "title":"Metadata Table", "uri":"mrs_01_24164.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"439" + "doc_type":"usermanual", + "p_code":"506", + "code":"507" }, { "desc":"Clustering reorganizes data layout to improve query performance without affecting the ingestion speed.Hudi provides different operations, such as insert, upsert, and bulk", "product_code":"mrs", "title":"Clustering", "uri":"mrs_01_24088.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"440" + "doc_type":"usermanual", + "p_code":"506", + "code":"508" }, { "desc":"Cleaning is used to delete data of versions that are no longer required.Hudi uses the cleaner working in the background to continuously delete unnecessary data of old ver", "product_code":"mrs", "title":"Cleaning", "uri":"mrs_01_24089.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"441" + "doc_type":"usermanual", + "p_code":"506", + "code":"509" }, { "desc":"A compaction merges base and log files of MOR tables.For MOR tables, data is stored in columnar Parquet files and row-based Avro files, updates are recorded in incrementa", "product_code":"mrs", "title":"Compaction", "uri":"mrs_01_24090.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"442" + "doc_type":"usermanual", + "p_code":"506", + "code":"510" }, { "desc":"Savepoints are used to save and restore data of the customized version.Savepoints provided by Hudi can save different commits so that the cleaner program does not delete ", "product_code":"mrs", "title":"Savepoint", "uri":"mrs_01_24091.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"443" + "doc_type":"usermanual", + "p_code":"506", + "code":"511" }, { "desc":"Uses an external service (ZooKeeper or Hive MetaStore) as the distributed mutex lock service.Files can be concurrently written, but commits cannot be concurrent. The comm", "product_code":"mrs", "title":"Single-Table Concurrent Write", "uri":"mrs_01_24165.html", - "doc_type":"cmpntguide-lts", - "p_code":"438", - "code":"444" + "doc_type":"usermanual", + "p_code":"506", + "code":"512" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using the Hudi Client", "uri":"mrs_01_24100.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"445" + "doc_type":"usermanual", + "p_code":"496", + "code":"513" }, { "desc":"You have created a user and added the user to user groups hadoop and hive on Manager.The Hudi client has been downloaded and installed.Log in to the client node as user r", "product_code":"mrs", "title":"Operating a Hudi Table Using hudi-cli.sh", "uri":"mrs_01_24063.html", - "doc_type":"cmpntguide-lts", - "p_code":"445", - "code":"446" + "doc_type":"usermanual", + "p_code":"513", + "code":"514" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", - "title":"Configuration Reference", + "title":"Hudi Configuration Reference", "uri":"mrs_01_24032.html", - "doc_type":"cmpntguide-lts", - "p_code":"428", - "code":"447" + "doc_type":"usermanual", + "p_code":"496", + "code":"515" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Write Configuration", "uri":"mrs_01_24093.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"448" + "doc_type":"usermanual", + "p_code":"515", + "code":"516" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Configuration of Hive Table Synchronization", "uri":"mrs_01_24094.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"449" + "doc_type":"usermanual", + "p_code":"515", + "code":"517" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Index Configuration", "uri":"mrs_01_24095.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"450" + "doc_type":"usermanual", + "p_code":"515", + "code":"518" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Storage Configuration", "uri":"mrs_01_24096.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"451" + "doc_type":"usermanual", + "p_code":"515", + "code":"519" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Compaction and Cleaning Configurations", "uri":"mrs_01_24097.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"452" + "doc_type":"usermanual", + "p_code":"515", + "code":"520" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Metadata Table Configuration", "uri":"mrs_01_24166.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"453" + "doc_type":"usermanual", + "p_code":"515", + "code":"521" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Single-Table Concurrent Write Configuration", "uri":"mrs_01_24167.html", - "doc_type":"cmpntguide-lts", - "p_code":"447", - "code":"454" + "doc_type":"usermanual", + "p_code":"515", + "code":"522" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Clustering has two strategies: hoodie.clustering.plan.strategy.class and hoodie.clustering.execution.strategy.class. Typic", + "product_code":"", + "title":"Clustering Configuration", + "uri":"mrs_01_24804.html", + "doc_type":"", + "p_code":"515", + "code":"523" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Hudi Performance Tuning", "uri":"mrs_01_24039.html", - "doc_type":"cmpntguide-lts", - "p_code":"426", - "code":"455" + "doc_type":"usermanual", + "p_code":"494", + "code":"524" }, { "desc":"In the current version, Spark is recommended for Hudi write operations. Therefore, the tuning methods of Hudi are similar to those of Spark. For details, see Spark2x Perf", "product_code":"mrs", "title":"Performance Tuning Methods", "uri":"mrs_01_24101.html", - "doc_type":"cmpntguide-lts", - "p_code":"455", - "code":"456" + "doc_type":"usermanual", + "p_code":"524", + "code":"525" }, { "desc":"For MOR tables:The essence of MOR tables is to write incremental files, so the tuning is based on the data size (dataSize) of Hudi.If dataSize is only several GBs, you ar", "product_code":"mrs", "title":"Recommended Resource Configuration", "uri":"mrs_01_24102.html", - "doc_type":"cmpntguide-lts", - "p_code":"455", - "code":"457" + "doc_type":"usermanual", + "p_code":"524", + "code":"526" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", + "product_code":"", "title":"Hudi SQL Syntax Reference", "uri":"mrs_01_24261.html", - "doc_type":"cmpntguide-lts", - "p_code":"426", - "code":"458" + "doc_type":"", + "p_code":"494", + "code":"527" }, { "desc":"Hudi 0.9.0 adds Spark SQL DDL and DML statements for using Hudi, making it easier for all users (including non-engineers or analysts) to access and operate Hudi.You can u", - "product_code":"mrs", + "product_code":"", "title":"Constraints", "uri":"mrs_01_24262.html", - "doc_type":"cmpntguide-lts", - "p_code":"458", - "code":"459" + "doc_type":"", + "p_code":"527", + "code":"528" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"DDL", + "product_code":"", + "title":"Hudi DDL", "uri":"mrs_01_24263.html", - "doc_type":"cmpntguide-lts", - "p_code":"458", - "code":"460" + "doc_type":"", + "p_code":"527", + "code":"529" }, { "desc":"This command is used to create a Hudi table by specifying the list of fields along with the table options.CREATE TABLE [ IF NOT EXISTS] [database_name.]table_name[ (colu", - "product_code":"mrs", - "title":"CREATE TABLE", + "product_code":"", + "title":"CREATE Hudi TABLE", "uri":"mrs_01_24264.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"461" + "doc_type":"", + "p_code":"529", + "code":"530" }, { "desc":"This command is used to create a Hudi table by specifying the list of fields along with the table options.CREATE TABLE [ IF NOT EXISTS] [database_name.]table_nameUSING h", - "product_code":"mrs", - "title":"CREATE TABLE AS SELECT", + "product_code":"", + "title":"CREATE Hudi TABLE AS SELECT", "uri":"mrs_01_24265.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"462" + "doc_type":"", + "p_code":"529", + "code":"531" }, { "desc":"This command is used to delete an existing table.DROP TABLE [IF EXISTS] [db_name.]table_name;In this command, IF EXISTS and db_name are optional.DROP TABLE IF EXISTS hudi", - "product_code":"mrs", - "title":"DROP TABLE", + "product_code":"", + "title":"DROP Hudi TABLE", "uri":"mrs_01_24266.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"463" + "doc_type":"", + "p_code":"529", + "code":"532" }, { "desc":"This command is used to display all tables in current database or all tables in a specific database.SHOW TABLES [IN db_name];IN db_Name is optional. It is required only w", - "product_code":"mrs", + "product_code":"", "title":"SHOW TABLE", "uri":"mrs_01_24267.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"464" + "doc_type":"", + "p_code":"529", + "code":"533" }, { "desc":"This command is used to rename an existing table.ALTERTABLEoldTableName RENAMETO newTableNameThe table name is changed. You can run the SHOW TABLES command to display the", - "product_code":"mrs", + "product_code":"", "title":"ALTER RENAME TABLE", "uri":"mrs_01_24268.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"465" + "doc_type":"", + "p_code":"529", + "code":"534" }, { "desc":"This command is used to add columns to an existing table.ALTER TABLEtableIdentifierADD COLUMNS(colAndType (,colAndType)*)The columns are added to the table. You can run t", - "product_code":"mrs", + "product_code":"", "title":"ALTER ADD COLUMNS", "uri":"mrs_01_24269.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"466" + "doc_type":"", + "p_code":"529", + "code":"535" }, { "desc":"This command is used to clear all data in a specific table.TRUNCATE TABLEtableIdentifierData in the table is cleared. You can run the QUERY statement to check whether dat", - "product_code":"mrs", - "title":"TRUNCATE TABLE", + "product_code":"", + "title":"TRUNCATE Hudi TABLE", "uri":"mrs_01_24271.html", - "doc_type":"cmpntguide-lts", - "p_code":"460", - "code":"467" + "doc_type":"", + "p_code":"529", + "code":"536" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", - "title":"DML", + "product_code":"", + "title":"Hudi DML", "uri":"mrs_01_24272.html", - "doc_type":"cmpntguide-lts", - "p_code":"458", - "code":"468" + "doc_type":"", + "p_code":"527", + "code":"537" }, { "desc":"This command is used to insert the output of the SELECT statement to a Hudi table.INSERT INTOtableIndentifier select query;Insert mode: Hudi supports three insert modes f", - "product_code":"mrs", + "product_code":"", "title":"INSERT INTO", "uri":"mrs_01_24273.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"469" + "doc_type":"", + "p_code":"537", + "code":"538" }, { "desc":"This command is used to query another table based on the join condition of a table or subquery. If UPDATE or DELETE is executed for the table matching the join condition,", - "product_code":"mrs", + "product_code":"", "title":"MERGE INTO", "uri":"mrs_01_24274.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"470" + "doc_type":"", + "p_code":"537", + "code":"539" }, { "desc":"This command is used to update the Hudi table based on the column expression and optional filtering conditions.UPDATE tableIdentifier SET column = EXPRESSION(,column = EX", - "product_code":"mrs", - "title":"UPDATE", + "product_code":"", + "title":"UPDATE Hudi Data", "uri":"mrs_01_24275.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"471" + "doc_type":"", + "p_code":"537", + "code":"540" }, { "desc":"This command is used to delete records from a Hudi table.DELETE from tableIdentifier [ WHERE boolExpression]Example 1:delete from h0 where column1 = 'country';Example 2:d", - "product_code":"mrs", - "title":"DELETE", + "product_code":"", + "title":"DELETE Hudi Data", "uri":"mrs_01_24276.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"472" + "doc_type":"", + "p_code":"537", + "code":"541" }, { "desc":"This command is used to convert row-based log files in MOR tables into column-based data files in parquet tables to accelerate record search.SCHEDULE COMPACTION on tableI", - "product_code":"mrs", - "title":"COMPACTION", + "product_code":"", + "title":"COMPACTION Hudi Data", "uri":"mrs_01_24277.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"473" + "doc_type":"", + "p_code":"537", + "code":"542" }, { "desc":"This command is used to dynamically add, update, display, or reset Hudi parameters without restarting the driver.Add or update a parameter value:SET parameter_name=parame", - "product_code":"mrs", - "title":"SET/RESET", + "product_code":"", + "title":"SET/RESET Hudi Data", "uri":"mrs_01_24278.html", - "doc_type":"cmpntguide-lts", - "p_code":"468", - "code":"474" + "doc_type":"", + "p_code":"537", + "code":"543" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Archives instants on the Timeline based on configurations and deletes archived instants from the Timeline to reduce the op", + "product_code":"", + "title":"ARCHIVELOG", + "uri":"mrs_01_24783.html", + "doc_type":"", + "p_code":"537", + "code":"544" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Cleans instants on the Timeline based on configurations and deletes historical version files to reduce the data storage an", + "product_code":"", + "title":"CLEAN", + "uri":"mrs_01_24801.html", + "doc_type":"", + "p_code":"537", + "code":"545" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"CALL COMMAND", + "uri":"mrs_01_24739.html", + "doc_type":"", + "p_code":"527", + "code":"546" + }, + { + "desc":"The CHANGE_TABLE command can be used to modify the type and index of a table. Key parameters such as the type and index of Hudi tables cannot be modified. Therefore, this", + "product_code":"", + "title":"CHANGE_TABLE", + "uri":"mrs_01_24740.html", + "doc_type":"", + "p_code":"546", + "code":"547" + }, + { + "desc":"Cleans invalid data files from the Hudi table directory.call clean_file(table => '[table_name]', mode=>'[op_type]', backup_path=>'[backup_path]', start_instant_time=>'[st", + "product_code":"", + "title":"CLEAN_FILE", + "uri":"mrs_01_24781.html", + "doc_type":"", + "p_code":"546", + "code":"548" + }, + { + "desc":"Displays the effective or archived Hudi timelines and details of a specified instant time.Viewing the list of effective timelines of a table:call show_active_instant_list", + "product_code":"", + "title":"SHOW_TIME_LINE", + "uri":"mrs_01_24782.html", + "doc_type":"", + "p_code":"546", + "code":"549" + }, + { + "desc":"Displays the configuration in the hoodie.properties file of a specified Hudi table.call show_hoodie_properties(table => '[table_name]');You can view query results on the ", + "product_code":"", + "title":"SHOW_HOODIE_PROPERTIES", + "uri":"mrs_01_24799.html", + "doc_type":"", + "p_code":"546", + "code":"550" + }, + { + "desc":"Manages savepoints of Hudi tables.Creating a savepoint:call create_savepoints('[table_name]', '[commit_Time]', '[user]', '[comments]');call create_savepoints('[table_name", + "product_code":"", + "title":"SAVE_POINT", + "uri":"mrs_01_24800.html", + "doc_type":"", + "p_code":"546", + "code":"551" + }, + { + "desc":"Rolls back a specified commit.call rollback_to_instant(table => '[table_name]', instant_time => '[instant]');Only the latest commit timestamps can be rolled back in seque", + "product_code":"", + "title":"ROLL_BACK", + "uri":"mrs_01_24803.html", + "doc_type":"", + "p_code":"546", + "code":"552" + }, + { + "desc":"Performs the clustering operation on Hudi tables. For details, see Clustering.Creating a savepoint:call run_clustering(table=>'[table]', path=>'[path]', predicate=>'[pred", + "product_code":"", + "title":"Hudi CLUSTERING", + "uri":"mrs_01_24802.html", + "doc_type":"", + "p_code":"546", + "code":"553" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"Hudi Schema Evolution", + "uri":"mrs_01_24492.html", + "doc_type":"", + "p_code":"494", + "code":"554" + }, + { + "desc":"Schema evolution allows users to easily change the current schema of a Hudi table to adapt to the data that is changing over time.This section applies only to MRS 3.1.3 o", + "product_code":"", + "title":"Evolution Introduction", + "uri":"mrs_01_24493.html", + "doc_type":"", + "p_code":"554", + "code":"555" + }, + { + "desc":"Schema evolution scenariosColumns (including nested columns) can be added, deleted, modified, and moved.Partition columns cannot be evolved.You cannot add, delete, or per", + "product_code":"", + "title":"Schema Evolution Scenarios", + "uri":"mrs_01_24494.html", + "doc_type":"", + "p_code":"554", + "code":"556" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"SparkSQL Schema Evolution and Syntax Description", + "uri":"mrs_01_24495.html", + "doc_type":"", + "p_code":"554", + "code":"557" + }, + { + "desc":"Schema evolution cannot be disabled once being enabled.To use spark-beeline, log in to FusionInsight Manager, choose Cluster > Services > Spark2x, and click the Configura", + "product_code":"", + "title":"Enabling Schema Evolution", + "uri":"mrs_01_24496.html", + "doc_type":"", + "p_code":"557", + "code":"558" + }, + { + "desc":"The ADD COLUMNS command is used to add a column to an existing table.ALTER TABLETable nameADD COLUMNS(col_spec[, col_spec ...])You can run the DESCRIBE command to view th", + "product_code":"", + "title":"Adding a Column", + "uri":"mrs_01_24498.html", + "doc_type":"", + "p_code":"557", + "code":"559" + }, + { + "desc":"The ALTER TABLE ... ALTER COLUMN command is used to change the attributes of a column, such as the column type, position, and comment.ALTER TABLETable name ALTER[COLUMN]c", + "product_code":"", + "title":"Altering a Column", + "uri":"mrs_01_24499.html", + "doc_type":"", + "p_code":"557", + "code":"560" + }, + { + "desc":"The ALTER TABLE ... DROP COLUMN command is used to delete a column.ALTER TABLEtableName DROP COLUMN|COLUMNScolsa.b.c indicates the full path of a nested column. For detai", + "product_code":"", + "title":"Deleting a Column", + "uri":"mrs_01_24500.html", + "doc_type":"", + "p_code":"557", + "code":"561" + }, + { + "desc":"The ALTER TABLE ... RENAME command is used to change the table name.ALTER TABLEtableNameRENAME TOnewTableNameYou can run the SHOW TABLES command to view the new table nam", + "product_code":"", + "title":"Changing a Table Name", + "uri":"mrs_01_24501.html", + "doc_type":"", + "p_code":"557", + "code":"562" + }, + { + "desc":"The ALTER TABLE ... SET|UNSET command is used to modify table properties.ALTER TABLETable nameSET|UNSET tblpropertiesYou can run the DESCRIBE command to view new table pr", + "product_code":"", + "title":"Modifying Table Properties", + "uri":"mrs_01_24502.html", + "doc_type":"", + "p_code":"557", + "code":"563" + }, + { + "desc":"The ALTER TABLE ... RENAME COLUMN command is used to change the column name.ALTER TABLEtableNameRENAME COLUMNold_columnNameTOnew_columnNamea.b.c indicates the full path o", + "product_code":"", + "title":"Changing the Column Name", + "uri":"mrs_01_24503.html", + "doc_type":"", + "p_code":"557", + "code":"564" + }, + { + "desc":"When creating a table, you need to set hoodie.cleaner.policy.failed.writes to LAZY. Otherwise, rollback will be triggered when concurrent submission operations are perfor", + "product_code":"", + "title":"Concurrency for Schema Evolution", + "uri":"mrs_01_24550.html", + "doc_type":"", + "p_code":"554", + "code":"565" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Hudi", "uri":"mrs_01_24065.html", - "doc_type":"cmpntguide-lts", - "p_code":"426", - "code":"475" + "doc_type":"usermanual", + "p_code":"494", + "code":"566" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Data Write", "uri":"mrs_01_24070.html", - "doc_type":"cmpntguide-lts", - "p_code":"475", - "code":"476" + "doc_type":"usermanual", + "p_code":"566", + "code":"567" }, { "desc":"The following error is reported when data is written:You are advised to evolve schemas in backward compatible mode while using Hudi. This error usually occurs when you de", "product_code":"mrs", "title":"Parquet/Avro schema Is Reported When Updated Data Is Written", "uri":"mrs_01_24071.html", - "doc_type":"cmpntguide-lts", - "p_code":"476", - "code":"477" + "doc_type":"usermanual", + "p_code":"567", + "code":"568" }, { "desc":"The following error is reported when data is written:This error will occur again because schema evolutions are in non-backwards compatible mode. Basically, there is some ", "product_code":"mrs", "title":"UnsupportedOperationException Is Reported When Updated Data Is Written", "uri":"mrs_01_24072.html", - "doc_type":"cmpntguide-lts", - "p_code":"476", - "code":"478" + "doc_type":"usermanual", + "p_code":"567", + "code":"569" }, { "desc":"The following error is reported when data is written:This error may occur if a schema contains some non-nullable field whose value is not present or is null.You are advis", "product_code":"mrs", "title":"SchemaCompatabilityException Is Reported When Updated Data Is Written", "uri":"mrs_01_24073.html", - "doc_type":"cmpntguide-lts", - "p_code":"476", - "code":"479" + "doc_type":"usermanual", + "p_code":"567", + "code":"570" }, { "desc":"Hudi consumes much space in a temporary folder during upsert.Hudi will spill part of input data to disk if the maximum memory for merge is reached when much input data is", "product_code":"mrs", "title":"What Should I Do If Hudi Consumes Much Space in a Temporary Folder During Upsert?", "uri":"mrs_01_24074.html", - "doc_type":"cmpntguide-lts", - "p_code":"476", - "code":"480" + "doc_type":"usermanual", + "p_code":"567", + "code":"571" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Data Collection", "uri":"mrs_01_24075.html", - "doc_type":"cmpntguide-lts", - "p_code":"475", - "code":"481" + "doc_type":"usermanual", + "p_code":"566", + "code":"572" }, { "desc":"The error \"org.apache.kafka.common.KafkaException: Failed to construct kafka consumer\" is reported in the main thread, and the following error is reported.This error may ", "product_code":"mrs", "title":"IllegalArgumentException Is Reported When Kafka Is Used to Collect Data", "uri":"mrs_01_24077.html", - "doc_type":"cmpntguide-lts", - "p_code":"481", - "code":"482" + "doc_type":"usermanual", + "p_code":"572", + "code":"573" }, { "desc":"The following error is reported when data is collected:This error usually occurs when a field marked as recordKey or partitionKey is not present in the input record. Cros", "product_code":"mrs", "title":"HoodieException Is Reported When Data Is Collected", "uri":"mrs_01_24078.html", - "doc_type":"cmpntguide-lts", - "p_code":"481", - "code":"483" + "doc_type":"usermanual", + "p_code":"572", + "code":"574" }, { "desc":"Is it possible to use a nullable field that contains null records as a primary key when creating a Hudi table?No. HoodieKeyException will be thrown.", "product_code":"mrs", "title":"HoodieKeyException Is Reported When Data Is Collected", "uri":"mrs_01_24079.html", - "doc_type":"cmpntguide-lts", - "p_code":"481", - "code":"484" + "doc_type":"usermanual", + "p_code":"572", + "code":"575" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Hive Synchronization", "uri":"mrs_01_24080.html", - "doc_type":"cmpntguide-lts", - "p_code":"475", - "code":"485" + "doc_type":"usermanual", + "p_code":"566", + "code":"576" }, { "desc":"The following error is reported during Hive data synchronization:This error usually occurs when you try to add a new column to an existing Hive table using the HiveSyncTo", "product_code":"mrs", "title":"SQLException Is Reported During Hive Data Synchronization", "uri":"mrs_01_24081.html", - "doc_type":"cmpntguide-lts", - "p_code":"485", - "code":"486" + "doc_type":"usermanual", + "p_code":"576", + "code":"577" }, { "desc":"The following error is reported during Hive data synchronization:This error occurs because HiveSyncTool currently supports only few compatible data type conversions. The ", "product_code":"mrs", "title":"HoodieHiveSyncException Is Reported During Hive Data Synchronization", "uri":"mrs_01_24082.html", - "doc_type":"cmpntguide-lts", - "p_code":"485", - "code":"487" + "doc_type":"usermanual", + "p_code":"576", + "code":"578" }, { "desc":"The following error is reported during Hive data synchronization:This error usually occurs when Hive synchronization is performed on the Hudi dataset but the configured h", "product_code":"mrs", "title":"SemanticException Is Reported During Hive Data Synchronization", "uri":"mrs_01_24083.html", - "doc_type":"cmpntguide-lts", - "p_code":"485", - "code":"488" + "doc_type":"usermanual", + "p_code":"576", + "code":"579" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Hue", "uri":"mrs_01_0130.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"489" + "code":"580" }, { "desc":"Hue aggregates interfaces which interact with most Apache Hadoop components and enables you to use Hadoop components with ease on a web UI. You can operate components suc", "product_code":"mrs", "title":"Using Hue from Scratch", "uri":"mrs_01_0131.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"490" + "doc_type":"usermanual", + "p_code":"580", + "code":"581" }, { "desc":"After Hue is installed in an MRS cluster, users can use Hadoop-related components on the Hue web UI.This section describes how to open the Hue web UI on the MRS cluster.T", "product_code":"mrs", "title":"Accessing the Hue Web UI", "uri":"mrs_01_0132.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"491" + "doc_type":"usermanual", + "p_code":"580", + "code":"582" }, { "desc":"Go to the All Configurations page of the Hue service by referring to Modifying Cluster Service Configuration Parameters.For details about Hue common parameters, see Table", "product_code":"mrs", "title":"Hue Common Parameters", "uri":"mrs_01_0133.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"492" + "doc_type":"usermanual", + "p_code":"580", + "code":"583" }, { "desc":"Users can use the Hue web UI to execute HiveQL statements in an MRS cluster.Hive supports the following functions:Executes and manages HiveQL statements.Views the HiveQL ", "product_code":"mrs", "title":"Using HiveQL Editor on the Hue Web UI", "uri":"mrs_01_0134.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"493" + "doc_type":"usermanual", + "p_code":"580", + "code":"584" }, { "desc":"Users can use the Hue web UI to manage Hive metadata in an MRS cluster.Access the Hue web UI. For details, see Accessing the Hue Web UI.Viewing metadata of Hive tablesCli", "product_code":"mrs", "title":"Using the Metadata Browser on the Hue Web UI", "uri":"mrs_01_0135.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"494" + "doc_type":"usermanual", + "p_code":"580", + "code":"585" }, { "desc":"Users can use the Hue web UI to manage files in HDFS.The Hue page is used to view and analyze data such as files and tables. Do not perform high-risk management operation", "product_code":"mrs", "title":"Using File Browser on the Hue Web UI", "uri":"mrs_01_0136.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"495" + "doc_type":"usermanual", + "p_code":"580", + "code":"586" }, { "desc":"Users can use the Hue web UI to query all jobs in an MRS cluster.View the jobs in the current cluster.The number on Job Browser indicates the total number of jobs in the ", "product_code":"mrs", "title":"Using Job Browser on the Hue Web UI", "uri":"mrs_01_0137.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"496" + "doc_type":"usermanual", + "p_code":"580", + "code":"587" }, { "desc":"You can use Hue to create or query HBase tables in a cluster and run tasks on the Hue web UI.", "product_code":"mrs", "title":"Using HBase on the Hue Web UI", "uri":"mrs_01_2371.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"497" + "doc_type":"usermanual", + "p_code":"580", + "code":"588" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Typical Scenarios", "uri":"mrs_01_0138.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"498" + "doc_type":"usermanual", + "p_code":"580", + "code":"589" }, { "desc":"Hue provides the file browser function for users to use HDFS in GUI mode.The Hue page is used to view and analyze data such as files and tables. Do not perform high-risk ", "product_code":"mrs", "title":"HDFS on Hue", "uri":"mrs_01_0139.html", - "doc_type":"cmpntguide-lts", - "p_code":"498", - "code":"499" + "doc_type":"usermanual", + "p_code":"589", + "code":"590" }, { "desc":"Hue provides the Hive GUI management function so that users can query Hive data in GUI mode.Access the Hue web UI. For details, see Accessing the Hue Web UI.In the naviga", "product_code":"mrs", "title":"Hive on Hue", "uri":"mrs_01_0141.html", - "doc_type":"cmpntguide-lts", - "p_code":"498", - "code":"500" + "doc_type":"usermanual", + "p_code":"589", + "code":"591" }, { "desc":"Hue provides the Oozie job manager function, in this case, you can use Oozie in GUI mode.The Hue page is used to view and analyze data such as files and tables. Do not pe", "product_code":"mrs", "title":"Oozie on Hue", "uri":"mrs_01_0144.html", - "doc_type":"cmpntguide-lts", - "p_code":"498", - "code":"501" + "doc_type":"usermanual", + "p_code":"589", + "code":"592" }, { "desc":"Log paths: The default paths of Hue logs are /var/log/Bigdata/hue (for storing run logs) and /var/log/Bigdata/audit/hue (for storing audit logs).Log archive rules: The au", "product_code":"mrs", "title":"Hue Log Overview", "uri":"mrs_01_0147.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"502" + "doc_type":"usermanual", + "p_code":"580", + "code":"593" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Hue", "uri":"mrs_01_1764.html", - "doc_type":"cmpntguide-lts", - "p_code":"489", - "code":"503" + "doc_type":"usermanual", + "p_code":"580", + "code":"594" }, { "desc":"What do I do if all HQL statements fail to be executed when I use Internet Explorer to access Hive Editor in Hue and the message \"There was an error with your query\" is d", "product_code":"mrs", "title":"How Do I Solve the Problem that HQL Fails to Be Executed in Hue Using Internet Explorer?", "uri":"mrs_01_1765.html", - "doc_type":"cmpntguide-lts", - "p_code":"503", - "code":"504" + "doc_type":"usermanual", + "p_code":"594", + "code":"595" }, { "desc":"When Hive is used, the use database statement is entered in the text box to switch the database, and other statements are also entered, why does the database fail to be s", "product_code":"mrs", "title":"Why Does the use database Statement Become Invalid When Hive Is Used?", "uri":"mrs_01_1766.html", - "doc_type":"cmpntguide-lts", - "p_code":"503", - "code":"505" + "doc_type":"usermanual", + "p_code":"594", + "code":"596" }, { "desc":"What can I do if an error message shown in the following figure is displayed, indicating that the HDFS file cannot be accessed when I use Hue web UI to access the HDFS fi", "product_code":"mrs", "title":"What Can I Do If HDFS Files Fail to Be Accessed Using Hue WebUI?", "uri":"mrs_01_0156.html", - "doc_type":"cmpntguide-lts", - "p_code":"503", - "code":"506" + "doc_type":"usermanual", + "p_code":"594", + "code":"597" }, { "desc":"If the Hive service is not installed in the cluster, the native Hue service page is blank.In the current version, Hue depends on the Hive component. If this occurs, check", "product_code":"mrs", "title":"Hue Page Cannot Be Displayed When the Hive Service Is Not Installed in a Cluster", "uri":"mrs_01_2368.html", - "doc_type":"cmpntguide-lts", - "p_code":"503", - "code":"507" + "doc_type":"usermanual", + "p_code":"594", + "code":"598" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"Using IoTDB", + "uri":"mrs_01_24144.html", + "doc_type":"cmpntguide", + "p_code":"", + "code":"599" + }, + { + "desc":"IoTDB is a data management engine that integrates collection, storage, and analysis of time series data. It features lightweight, high performance, and ease of use. It pe", + "product_code":"mrs", + "title":"Using IoTDB from Scratch", + "uri":"mrs_01_24157.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"600" + }, + { + "desc":"This section describes how to use the IoTDB client in the O&M or service scenario.The client has been installed. For example, the installation directory is /opt/client. T", + "product_code":"mrs", + "title":"Using the IoTDB Client", + "uri":"mrs_01_24158.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"601" + }, + { + "desc":"IoTDB uses the multi-replica deployment architecture to implement cluster high availability. Each region (DataRegion and SchemaRegion) has three replicas by default. You ", + "product_code":"mrs", + "title":"Configuring IoTDB Parameters", + "uri":"mrs_01_24159.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"602" + }, + { + "desc":"IoTDB supports the following data types and encodings. For details, see Table 1.", + "product_code":"", + "title":"Data Types and Encodings Supported by IoTDB", + "uri":"mrs_01_24764.html", + "doc_type":"", + "p_code":"599", + "code":"603" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"mrs", + "title":"IoTDB Permission Management", + "uri":"mrs_01_24140.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"604" + }, + { + "desc":"MRS supports users, user groups, and roles. Permissions must be assigned to roles and then roles are bound to users or user groups. Users can obtain permissions only by b", + "product_code":"mrs", + "title":"IoTDB Permissions", + "uri":"mrs_01_24141.html", + "doc_type":"cmpntguide", + "p_code":"604", + "code":"605" + }, + { + "desc":"Create and configure an IoTDB role on Manager as an MRS cluster administrator. An IoTDB role can be configured with IoTDB administrator permissions or a common user's per", + "product_code":"mrs", + "title":"Creating an IoTDB Role", + "uri":"mrs_01_24142.html", + "doc_type":"cmpntguide", + "p_code":"604", + "code":"606" + }, + { + "desc":"DescriptionLog paths: The default paths of IoTDB logs are /var/log/Bigdata/iotdb/iotdbserver (for storing run logs) and /var/log/Bigdata/audit/iotdb/iotdbserver (for stor", + "product_code":"mrs", + "title":"IoTDB Log Overview", + "uri":"mrs_01_24161.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"607" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"UDFs", + "uri":"mrs_01_24512.html", + "doc_type":"", + "p_code":"599", + "code":"608" + }, + { + "desc":"IoTDB provides multiple built-in functions and user-defined functions (UDFs) to meet users' computing requirements.Table 1 lists the UDF types supported by IoTDB.To write", + "product_code":"", + "title":"UDF Overview", + "uri":"mrs_01_24513.html", + "doc_type":"", + "p_code":"608", + "code":"609" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"", + "title":"IoTDB Data Import and Export", + "uri":"mrs_01_24509.html", + "doc_type":"", + "p_code":"599", + "code":"610" + }, + { + "desc":"This section describes how to use import-csv.sh to import data in CSV format to IoTDB.The client has been installed. For details, see . For example, the installation dire", + "product_code":"", + "title":"Importing IoTDB Data", + "uri":"mrs_01_24510.html", + "doc_type":"", + "p_code":"610", + "code":"611" + }, + { + "desc":"This section describes how to use export-csv.sh to export data from IoTDB to a CSV file.Exporting data to CSV files may cause injection risks. Exercise caution when perfo", + "product_code":"", + "title":"Exporting IoTDB Data", + "uri":"mrs_01_24511.html", + "doc_type":"", + "p_code":"610", + "code":"612" + }, + { + "desc":"IoTDB has the multi-replica mechanism. By default, both schema regions and data regions have three replicas. The ConfigNode stores the mapping between regions and the IoT", + "product_code":"", + "title":"Planning IoTDB Capacity", + "uri":"mrs_01_24765.html", + "doc_type":"", + "p_code":"599", + "code":"613" + }, + { + "desc":"You can increase IoTDB memory to improve IoTDB performance because read and write operations are performed in HBase memory.Log in to Manager, choose Cluster > Services > ", + "product_code":"mrs", + "title":"IoTDB Performance Tuning", + "uri":"mrs_01_24162.html", + "doc_type":"cmpntguide", + "p_code":"599", + "code":"614" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Kafka", "uri":"mrs_01_0375.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"508" + "code":"615" }, { "desc":"You can create, query, and delete topics on a cluster client.The client has been installed. For example, the client is installed in the /opt/hadoopclient directory. The c", "product_code":"mrs", "title":"Using Kafka from Scratch", "uri":"mrs_01_1031.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"509" + "doc_type":"usermanual", + "p_code":"615", + "code":"616" }, { "desc":"You can manage Kafka topics on a cluster client based on service requirements. Management permission is required for clusters with Kerberos authentication enabled.You hav", "product_code":"mrs", "title":"Managing Kafka Topics", "uri":"mrs_01_0376.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"510" + "doc_type":"usermanual", + "p_code":"615", + "code":"617" }, { "desc":"You can query existing Kafka topics on MRS.Log in to FusionInsight Manager. For details, see Accessing FusionInsight Manager. Choose Cluster > Name of the desired cluster", "product_code":"mrs", "title":"Querying Kafka Topics", "uri":"mrs_01_0377.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"511" + "doc_type":"usermanual", + "p_code":"615", + "code":"618" }, { "desc":"For clusters with Kerberos authentication enabled, using Kafka requires relevant permissions. MRS clusters can grant the use permission of Kafka to different users.Table ", "product_code":"mrs", "title":"Managing Kafka User Permissions", "uri":"mrs_01_0378.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"512" + "doc_type":"usermanual", + "p_code":"615", + "code":"619" }, { "desc":"You can produce or consume messages in Kafka topics using the MRS cluster client. For clusters with Kerberos authentication enabled, you must have the permission to perfo", "product_code":"mrs", "title":"Managing Messages in Kafka Topics", "uri":"mrs_01_0379.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"513" + "doc_type":"usermanual", + "p_code":"615", + "code":"620" }, { "desc":"This section describes how to create and configure a Kafka role.Users can create Kafka roles only in security mode.If the current component uses Ranger for permission con", "product_code":"mrs", "title":"Creating a Kafka Role", "uri":"mrs_01_1032.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"514" + "doc_type":"usermanual", + "p_code":"615", + "code":"621" }, { "desc":"For details about how to set parameters, see Modifying Cluster Service Configuration Parameters.", "product_code":"mrs", "title":"Kafka Common Parameters", "uri":"mrs_01_1033.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"515" + "doc_type":"usermanual", + "p_code":"615", + "code":"622" }, { "desc":"Producer APIIndicates the API defined in org.apache.kafka.clients.producer.KafkaProducer. When kafka-console-producer.sh is used, the API is used by default.Indicates the", "product_code":"mrs", "title":"Safety Instructions on Using Kafka", "uri":"mrs_01_1035.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"516" + "doc_type":"usermanual", + "p_code":"615", + "code":"623" }, { "desc":"The maximum number of topics depends on the number of file handles (mainly used by data and index files on site) opened in the process.Run the ulimit -n command to view t", "product_code":"mrs", "title":"Kafka Specifications", "uri":"mrs_01_1036.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"517" + "doc_type":"usermanual", + "p_code":"615", + "code":"624" }, { "desc":"This section guides users to use a Kafka client in an O&M or service scenario.The client has been installed. For example, the installation directory is /opt/client.Servic", "product_code":"mrs", "title":"Using the Kafka Client", "uri":"mrs_01_1767.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"518" + "doc_type":"usermanual", + "p_code":"615", + "code":"625" }, { "desc":"For the Kafka message transmission assurance mechanism, different parameters are available for meeting different performance and reliability requirements. This section de", "product_code":"mrs", "title":"Configuring Kafka HA and High Reliability Parameters", "uri":"mrs_01_1037.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"519" + "doc_type":"usermanual", + "p_code":"615", + "code":"626" }, { "desc":"When a broker storage directory is added, the system administrator needs to change the broker storage directory on FusionInsight Manager, to ensure that the Kafka can wor", "product_code":"mrs", "title":"Changing the Broker Storage Directory", "uri":"mrs_01_1038.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"520" + "doc_type":"usermanual", + "p_code":"615", + "code":"627" }, { "desc":"This section describes how to view the current expenditure on the client based on service requirements.The system administrator has understood service requirements and pr", "product_code":"mrs", "title":"Checking the Consumption Status of Consumer Group", "uri":"mrs_01_1039.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"521" + "doc_type":"usermanual", + "p_code":"615", + "code":"628" }, { "desc":"This section describes how to use the Kafka balancing tool on a client to balance the load of the Kafka cluster based on service requirements in scenarios such as node de", "product_code":"mrs", "title":"Kafka Balancing Tool Instructions", "uri":"mrs_01_1040.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"522" + "doc_type":"usermanual", + "p_code":"615", + "code":"629" }, { "desc":"Operations need to be performed on tokens when the token authentication mechanism is used.The system administrator has understood service requirements and prepared a syst", "product_code":"mrs", "title":"Kafka Token Authentication Mechanism Tool Usage", "uri":"mrs_01_1041.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"523" + "doc_type":"usermanual", + "p_code":"615", + "code":"630" }, { "desc":"Feature description: The function of creating idempotent producers is introduced in Kafka 0.11.0.0. After this function is enabled, producers are automatically upgraded t", "product_code":"mrs", "title":"Kafka Feature Description", "uri":"mrs_01_2312.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"524" + "doc_type":"usermanual", + "p_code":"615", + "code":"631" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Kafka UI", "uri":"mrs_01_24130.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"525" + "doc_type":"usermanual", + "p_code":"615", + "code":"632" }, { "desc":"After the Kafka component is installed in an MRS cluster, you can use Kafka UI to query cluster information, node status, topic partitions, and data production and consum", "product_code":"mrs", "title":"Accessing Kafka UI", "uri":"mrs_01_24134.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"526" + "doc_type":"usermanual", + "p_code":"632", + "code":"633" }, { "desc":"After logging in to Kafka UI, you can view the basic information about the existing topics, brokers, and consumer groups in the current cluster on the home page. You can ", "product_code":"mrs", "title":"Kafka UI Overview", "uri":"mrs_01_24135.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"527" + "doc_type":"usermanual", + "p_code":"632", + "code":"634" }, { "desc":"Create a topic on Kafka UI.You can click Advanced Options to set advanced topic parameters based on service requirements. Generally, retain the default values.In a cluste", "product_code":"mrs", "title":"Creating a Topic on Kafka UI", "uri":"mrs_01_24136.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"528" + "doc_type":"usermanual", + "p_code":"632", + "code":"635" }, { "desc":"Migrate a partition on Kafka UI.In security mode, the user who migrates a partition must belong to the kafkaadmin user group. Otherwise, the operation fails due to authen", "product_code":"mrs", "title":"Migrating a Partition on Kafka UI", "uri":"mrs_01_24137.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"529" + "doc_type":"usermanual", + "p_code":"632", + "code":"636" }, { "desc":"On Kafka UI, you can view topic details, modify topic configurations, add topic partitions, delete topics, and view the number of data records produced in different time ", "product_code":"mrs", "title":"Managing Topics on Kafka UI", "uri":"mrs_01_24138.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"530" + "doc_type":"usermanual", + "p_code":"632", + "code":"637" }, { "desc":"On Kafka UI, you can view broker details and JMX metrics of the broker node data traffic.", "product_code":"mrs", "title":"Viewing Brokers on Kafka UI", "uri":"mrs_01_24139.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"531" + "doc_type":"usermanual", + "p_code":"632", + "code":"638" }, { "desc":"On Kafka UI, you can view the basic information about a consumer group and the consumption status of topics in the group.MRS clusters do not support redirection by clicki", "product_code":"mrs", "title":"Viewing a Consumer Group on Kafka UI", "uri":"mrs_01_24133.html", - "doc_type":"cmpntguide-lts", - "p_code":"525", - "code":"532" + "doc_type":"usermanual", + "p_code":"632", + "code":"639" }, { "desc":"Log paths: The default storage path of Kafka logs is /var/log/Bigdata/kafka. The default storage path of audit logs is /var/log/Bigdata/audit/kafka.Broker: /var/log/Bigda", "product_code":"mrs", "title":"Introduction to Kafka Logs", "uri":"mrs_01_1042.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"533" + "doc_type":"usermanual", + "p_code":"615", + "code":"640" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Performance Tuning", "uri":"mrs_01_1043.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"534" + "doc_type":"usermanual", + "p_code":"615", + "code":"641" }, { "desc":"You can modify Kafka server parameters to improve Kafka processing capabilities in specific service scenarios.Modify the service configuration parameters. For details, se", "product_code":"mrs", "title":"Kafka Performance Tuning", "uri":"mrs_01_1044.html", - "doc_type":"cmpntguide-lts", - "p_code":"534", - "code":"535" + "doc_type":"usermanual", + "p_code":"641", + "code":"642" + }, + { + "desc":"This section applies to MRS 3.2.0 or later.This section describes how to use Kafka client commands to migrate partition data between disks on a node without stopping the ", + "product_code":"", + "title":"Migrating Data Between Kafka Nodes", + "uri":"mrs_01_24534.html", + "doc_type":"", + "p_code":"615", + "code":"643" + }, + { + "desc":"This section applies to MRS 3.2.0 or later.To access Kafka Broker deployed on a private network from the Kafka client via the Internet, enable the Kafka private and publi", + "product_code":"", + "title":"Configuring Intranet and Extranet Access for Kafka", + "uri":"mrs_01_24576.html", + "doc_type":"", + "p_code":"615", + "code":"644" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Kafka", "uri":"mrs_01_1768.html", - "doc_type":"cmpntguide-lts", - "p_code":"508", - "code":"536" + "doc_type":"usermanual", + "p_code":"615", + "code":"645" }, { "desc":"How do I delete a Kafka topic if it fails to be deleted?Possible cause 1: The delete.topic.enable configuration item is not set to true. The deletion can be performed onl", "product_code":"mrs", "title":"How Do I Solve the Problem that Kafka Topics Cannot Be Deleted?", "uri":"mrs_01_1769.html", - "doc_type":"cmpntguide-lts", - "p_code":"536", - "code":"537" + "doc_type":"usermanual", + "p_code":"645", + "code":"646" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Loader", "uri":"mrs_01_0400.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"538" + "code":"647" }, { "desc":"For details about the how to set parameters, see Modifying Cluster Service Configuration Parameters.Because it needs time to calculate the fault tolerance rate, you are r", "product_code":"mrs", "title":"Common Loader Parameters", "uri":"mrs_01_1784.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"539" + "doc_type":"usermanual", + "p_code":"647", + "code":"648" }, { "desc":"This section describes how to create and configure a Loader role on FusionInsight Manager. The Loader role can set Loader administrator permissions, job connections, job ", "product_code":"mrs", "title":"Creating a Loader Role", "uri":"mrs_01_1085.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"540" + "doc_type":"usermanual", + "p_code":"647", + "code":"649" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Importing Data", "uri":"mrs_01_1086.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"541" + "doc_type":"usermanual", + "p_code":"647", + "code":"650" }, { "desc":"Loader is an ETL tool that enables MRS to exchange data and files with external data sources, such as relational databases, SFTP servers, and FTP servers. It allows data ", "product_code":"mrs", - "title":"Overview", + "title":"Loader Importing Data Overview", "uri":"mrs_01_1087.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"542" + "doc_type":"usermanual", + "p_code":"650", + "code":"651" }, { "desc":"This section describes how to import data from external data sources to MRS.Generally, you can manually manage data import and export jobs on the Loader UI. To use shell ", "product_code":"mrs", "title":"Importing Data Using Loader", "uri":"mrs_01_1088.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"543" + "doc_type":"usermanual", + "p_code":"650", + "code":"652" }, { "desc":"Use Loader to import data from an SFTP server to HDFS or OBS.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "product_code":"mrs", "title":"Typical Scenario: Importing Data from an SFTP Server to HDFS or OBS", "uri":"mrs_01_1089.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"544" + "doc_type":"usermanual", + "p_code":"650", + "code":"653" }, { "desc":"Use Loader to import data from an SFTP server to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "product_code":"mrs", "title":"Typical Scenario: Importing Data from an SFTP Server to HBase", "uri":"mrs_01_1090.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"545" + "doc_type":"usermanual", + "p_code":"650", + "code":"654" }, { "desc":"Use Loader to import data from an SFTP server to Hive.You have obtained the service username and password for creating a Loader job.You have had the permission to access ", "product_code":"mrs", "title":"Typical Scenario: Importing Data from an SFTP Server to Hive", "uri":"mrs_01_1091.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"546" + "doc_type":"usermanual", + "p_code":"650", + "code":"655" }, { "desc":"Use Loader to import data from an SFTP server to Spark.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "product_code":"mrs", "title":"Typical Scenario: Importing Data from an SFTP Server to Spark", "uri":"mrs_01_1092.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"547" + "doc_type":"usermanual", + "p_code":"650", + "code":"656" }, { "desc":"Use Loader to import data from an FTP server to HBase.You have obtained the service username and password for creating a Loader job.You have obtained the username and pas", "product_code":"mrs", "title":"Typical Scenario: Importing Data from an FTP Server to HBase", "uri":"mrs_01_1093.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"548" + "doc_type":"usermanual", + "p_code":"650", + "code":"657" }, { "desc":"Use Loader to import data from a relational database to HDFS or OBS.You have obtained the service username and password for creating a Loader job.You have had the permiss", "product_code":"mrs", "title":"Typical Scenario: Importing Data from a Relational Database to HDFS or OBS", "uri":"mrs_01_1094.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"549" + "doc_type":"usermanual", + "p_code":"650", + "code":"658" }, { "desc":"Use Loader to import data from a relational database to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to", "product_code":"mrs", "title":"Typical Scenario: Importing Data from a Relational Database to HBase", "uri":"mrs_01_1095.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"550" + "doc_type":"usermanual", + "p_code":"650", + "code":"659" }, { "desc":"Use Loader to import data from a relational database to Hive.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "product_code":"mrs", "title":"Typical Scenario: Importing Data from a Relational Database to Hive", "uri":"mrs_01_1096.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"551" + "doc_type":"usermanual", + "p_code":"650", + "code":"660" }, { "desc":"Use Loader to import data from a relational database to Spark.You have obtained the service username and password for creating a Loader job.You have had the permission to", "product_code":"mrs", "title":"Typical Scenario: Importing Data from a Relational Database to Spark", "uri":"mrs_01_1097.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"552" + "doc_type":"usermanual", + "p_code":"650", + "code":"661" }, { "desc":"Use Loader to import data from HDFS or OBS to HBase.You have obtained the service username and password for creating a Loader job.You have had the permission to access th", "product_code":"mrs", "title":"Typical Scenario: Importing Data from HDFS or OBS to HBase", "uri":"mrs_01_1098.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"553" + "doc_type":"usermanual", + "p_code":"650", + "code":"662" }, { "desc":"This section describes how to use Loader to import data from a relational database to ClickHouse using MySQL as an example.You have obtained the service username and pass", - "product_code":"mrs", + "product_code":"", "title":"Typical Scenario: Importing Data from a Relational Database to ClickHouse", "uri":"mrs_01_24172.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"554" + "doc_type":"", + "p_code":"650", + "code":"663" }, { "desc":"Use Loader to import data from HDFS to ClickHouse.You have obtained the service username and password for creating a Loader job.You have had the permission to access the ", - "product_code":"mrs", + "product_code":"", "title":"Typical Scenario: Importing Data from HDFS to ClickHouse", "uri":"mrs_01_24173.html", - "doc_type":"cmpntguide-lts", - "p_code":"541", - "code":"555" + "doc_type":"", + "p_code":"650", + "code":"664" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Exporting Data", "uri":"mrs_01_1100.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"556" + "doc_type":"usermanual", + "p_code":"647", + "code":"665" }, { "desc":"Loader is an extract, transform, and load (ETL) tool for exchanging data and files between MRS and relational databases and file systems. You can use the Loader to export", "product_code":"mrs", - "title":"Overview", + "title":"Loader Exporting Data Overview", "uri":"mrs_01_1101.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"557" + "doc_type":"usermanual", + "p_code":"665", + "code":"666" }, { "desc":"This task enables you to export data from MRS to external data sources.Generally, users can manually manage data import and export jobs on the Loader UI. To use shell scr", "product_code":"mrs", "title":"Using Loader to Export Data", "uri":"mrs_01_1102.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"558" + "doc_type":"usermanual", + "p_code":"665", + "code":"667" }, { "desc":"This section describes how to use Loader to export data from HDFS/OBS to an SFTP server.You have obtained the service username and password for creating a Loader job.You ", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from HDFS/OBS to an SFTP Server", "uri":"mrs_01_1103.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"559" + "doc_type":"usermanual", + "p_code":"665", + "code":"668" }, { "desc":"Use Loader to export data from HBase to an SFTP server.You have obtained the service username and password for creating a Loader job.You have had the permission to access", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from HBase to an SFTP Server", "uri":"mrs_01_1104.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"560" + "doc_type":"usermanual", + "p_code":"665", + "code":"669" }, { "desc":"Use Loader to export data from Hive to an SFTP server.You have obtained the service username and password for creating a Loader job.You have had the permission to access ", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from Hive to an SFTP Server", "uri":"mrs_01_1105.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"561" + "doc_type":"usermanual", + "p_code":"665", + "code":"670" }, { "desc":"This section describes how to use Loader to export data from Spark to an SFTP server.You have obtained the service username and password for creating a Loader job.You hav", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from Spark to an SFTP Server", "uri":"mrs_01_1106.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"562" + "doc_type":"usermanual", + "p_code":"665", + "code":"671" }, { "desc":"This section describes how to use Loader to export data from HDFS/OBS to a relational database.You have obtained the service username and password for creating a Loader j", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from HDFS/OBS to a Relational Database", "uri":"mrs_01_1107.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"563" + "doc_type":"usermanual", + "p_code":"665", + "code":"672" }, { "desc":"Use Loader to export data from HBase to a relational database.You have obtained the service username and password for creating a Loader job.You have had the permission to", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from HBase to a Relational Database", "uri":"mrs_01_1108.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"564" + "doc_type":"usermanual", + "p_code":"665", + "code":"673" }, { "desc":"Use Loader to export data from Hive to a relational database.You have obtained the service username and password for creating a Loader job.You have had the permission to ", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from Hive to a Relational Database", "uri":"mrs_01_1109.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"565" + "doc_type":"usermanual", + "p_code":"665", + "code":"674" }, { "desc":"This section describes how to use Loader to export data from Spark to a relational database.You have obtained the service username and password for creating a Loader job.", "product_code":"mrs", "title":"Typical Scenario: Exporting Data from Spark to a Relational Database", "uri":"mrs_01_1110.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"566" + "doc_type":"usermanual", + "p_code":"665", + "code":"675" }, { "desc":"This section describes how to use Loader to export data from HBase to HDFS/OBS.You have obtained the service user name and password for creating a Loader job.You have had", "product_code":"mrs", "title":"Typical Scenario: Importing Data from HBase to HDFS/OBS", "uri":"mrs_01_1111.html", - "doc_type":"cmpntguide-lts", - "p_code":"556", - "code":"567" + "doc_type":"usermanual", + "p_code":"665", + "code":"676" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Job Management", "uri":"mrs_01_1113.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"568" + "doc_type":"usermanual", + "p_code":"647", + "code":"677" }, { "desc":"Loader allows jobs to be migrated in batches from a group (source group) to another group (target group).The source group and target group exist.The current user has the ", "product_code":"mrs", "title":"Migrating Loader Jobs in Batches", "uri":"mrs_01_1114.html", - "doc_type":"cmpntguide-lts", - "p_code":"568", - "code":"569" + "doc_type":"usermanual", + "p_code":"677", + "code":"678" }, { "desc":"Loader allows existing jobs to be deleted in batches.The current user has the Edit permission for the jobs to be deleted or the Jobs Edit permission for the group to whic", "product_code":"mrs", "title":"Deleting Loader Jobs in Batches", "uri":"mrs_01_1115.html", - "doc_type":"cmpntguide-lts", - "p_code":"568", - "code":"570" + "doc_type":"usermanual", + "p_code":"677", + "code":"679" }, { "desc":"Loader allows all jobs of a configuration file to be imported in batches.The current user has the Jobs Edit permission of the group to which the jobs to be imported belon", "product_code":"mrs", "title":"Importing Loader Jobs in Batches", "uri":"mrs_01_1116.html", - "doc_type":"cmpntguide-lts", - "p_code":"568", - "code":"571" + "doc_type":"usermanual", + "p_code":"677", + "code":"680" }, { "desc":"Loader allows existing jobs to be exported in batches.The current user has the Edit permission for the jobs to be exported or the Jobs Edit permission of the group to whi", "product_code":"mrs", "title":"Exporting Loader Jobs in Batches", "uri":"mrs_01_1117.html", - "doc_type":"cmpntguide-lts", - "p_code":"568", - "code":"572" + "doc_type":"usermanual", + "p_code":"677", + "code":"681" }, { "desc":"Query the execution status and execution duration of a Loader job during routine maintenance. You can perform the following operations on the job:Dirty Data: Query data t", "product_code":"mrs", "title":"Viewing Historical Job Information", "uri":"mrs_01_1118.html", - "doc_type":"cmpntguide-lts", - "p_code":"568", - "code":"573" + "doc_type":"usermanual", + "p_code":"677", + "code":"682" + }, + { + "desc":"This section applies to MRS 3.2.0 or later.Loader accumulates a large amount of historical data during service running. The historical data may affect job submission, run", + "product_code":"", + "title":"Purging Historical Loader Data", + "uri":"mrs_01_24813.html", + "doc_type":"", + "p_code":"677", + "code":"683" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Operator Help", "uri":"mrs_01_1119.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"574" + "doc_type":"usermanual", + "p_code":"647", + "code":"684" }, { "desc":"Loader reads data at the source end, uses an input operator to convert data into fields by certain rules, use a conversion operator to clean or convert the fields, and fi", "product_code":"mrs", - "title":"Overview", + "title":"Loader Operator Overview", "uri":"mrs_01_1120.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"575" + "doc_type":"usermanual", + "p_code":"684", + "code":"685" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Input Operators", "uri":"mrs_01_1121.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"576" + "doc_type":"usermanual", + "p_code":"684", + "code":"686" }, { "desc":"The CSV File Input operator imports all files that can be opened by using a text editor.Input: test filesOutput: fieldsEach data line is separated into multiple fields by", "product_code":"mrs", "title":"CSV File Input", "uri":"mrs_01_1122.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"577" + "doc_type":"usermanual", + "p_code":"686", + "code":"687" }, { "desc":"The Fixed File Input operator converts each line in a file into multiple fields by character or byte of a configurable length.Input: text fileOutput: fieldsThe source fil", "product_code":"mrs", "title":"Fixed File Input", "uri":"mrs_01_1123.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"578" + "doc_type":"usermanual", + "p_code":"686", + "code":"688" }, { "desc":"Table Input operator converts specified columns in a relational database table into input fields of the same quantity.Input: table columnsOutput: fieldsFields are generat", "product_code":"mrs", "title":"Table Input", "uri":"mrs_01_1124.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"579" + "doc_type":"usermanual", + "p_code":"686", + "code":"689" }, { "desc":"The HBase Input operator converts specified columns in an HBase table into input fields of the same quantity.Input: HBase table columnsOutput: fieldsIf the HBase table na", "product_code":"mrs", "title":"HBase Input", "uri":"mrs_01_1125.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"580" + "doc_type":"usermanual", + "p_code":"686", + "code":"690" }, { "desc":"HTML Input operator imports a regular HTML file and converts elements in the HTML file into input fields.Input: HTML fileOutput: multiple fieldsparent tag is configured f", "product_code":"mrs", "title":"HTML Input", "uri":"mrs_01_1126.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"581" + "doc_type":"usermanual", + "p_code":"686", + "code":"691" }, { "desc":"The Hive Input operator converts specified columns in an HBase table into input fields of the same quantity.Input: Hive table columnsOutput: fieldsIf the Hive table name ", "product_code":"mrs", "title":"Hive input", "uri":"mrs_01_1128.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"582" + "doc_type":"usermanual", + "p_code":"686", + "code":"692" }, { "desc":"The Spark Input operator converts specified columns in an SparkSQL table into input fields of the same quantity.Input: SparkSQL table columnOutput: fieldsIf the SparkSQL ", "product_code":"mrs", "title":"Spark Input", "uri":"mrs_01_1129.html", - "doc_type":"cmpntguide-lts", - "p_code":"576", - "code":"583" + "doc_type":"usermanual", + "p_code":"686", + "code":"693" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Conversion Operators", "uri":"mrs_01_1130.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"584" + "doc_type":"usermanual", + "p_code":"684", + "code":"694" }, { "desc":"The Long Date Conversion operator performs long integer and date conversion.Input: fields to be convertedOutput: new fieldsIf the original data includes null values, no c", "product_code":"mrs", "title":"Long Date Conversion", "uri":"mrs_01_1131.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"585" + "doc_type":"usermanual", + "p_code":"694", + "code":"695" }, { "desc":"The null value conversion operator replaces null values with specified values.Input: fields with null valuesOutput: original fields with new valuesWhen field values are e", "product_code":"mrs", "title":"Null Value Conversion", "uri":"mrs_01_1132.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"586" + "doc_type":"usermanual", + "p_code":"694", + "code":"696" }, { "desc":"The Add Constants operator generates constant fields.Input: noneOutput: constant fieldsThis operator generates constant fields of the specified type.Use the CSV File Inpu", "product_code":"mrs", "title":"Constant Field Addition", "uri":"mrs_01_1133.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"587" + "doc_type":"usermanual", + "p_code":"694", + "code":"697" }, { "desc":"Generate Random operator configures new values as random value fields.Input: noneOutput: random value fieldsThe operator generates random value fields of specified type.U", "product_code":"mrs", "title":"Random Value Conversion", "uri":"mrs_01_1134.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"588" + "doc_type":"usermanual", + "p_code":"694", + "code":"698" }, { "desc":"The Concat Fields operator concatenates existing fields by using delimiters to generate new fields.Input: fields to be concatenatedOutput: new fieldsUse delimiters to con", "product_code":"mrs", "title":"Concat Fields", "uri":"mrs_01_1135.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"589" + "doc_type":"usermanual", + "p_code":"694", + "code":"699" }, { "desc":"The Extract Fields separates an existing field by using delimiters to generate new fields.Input: field to be separatedOutput: new fieldsThe value of the input field is se", "product_code":"mrs", "title":"Extract Fields", "uri":"mrs_01_1136.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"590" + "doc_type":"usermanual", + "p_code":"694", + "code":"700" }, { "desc":"The Modulo Integer operator performs modulo operations on integer fields to generate new fields.Input: integer fieldsOutput: new fieldsThe operator generates new fields a", "product_code":"mrs", "title":"Modulo Integer", "uri":"mrs_01_1137.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"591" + "doc_type":"usermanual", + "p_code":"694", + "code":"701" }, { "desc":"The String Cut operator cuts existing fields to generate new fields.Input: fields to be cutOutput: new fieldsstart position and end position are used to cut the original ", "product_code":"mrs", "title":"String Cut", "uri":"mrs_01_1138.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"592" + "doc_type":"usermanual", + "p_code":"694", + "code":"702" }, { "desc":"The EL Operation operator calculates field values and generates new fields. The algorithms that are currently supported include md5sum, sha1sum, sha256sum, and sha512sum.", "product_code":"mrs", "title":"EL Operation", "uri":"mrs_01_1139.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"593" + "doc_type":"usermanual", + "p_code":"694", + "code":"703" }, { "desc":"The String Operations operator converts the upper and lower cases of existing fields to generate new fields.Input: fields whose case is to be convertedOutput: new fields ", "product_code":"mrs", "title":"String Operations", "uri":"mrs_01_1140.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"594" + "doc_type":"usermanual", + "p_code":"694", + "code":"704" }, { "desc":"The String Reverse operator reverses existing fields to generate new fields.Input: fields to be reversedOutput: new fieldsValue reversal conversion is performed for field", "product_code":"mrs", "title":"String Reverse", "uri":"mrs_01_1141.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"595" + "doc_type":"usermanual", + "p_code":"694", + "code":"705" }, { "desc":"The String Trim operator clears spaces contained in existing fields to generate new fields.Input: fields whose spaces are to be clearedOutput: new fieldsClearing spaces a", "product_code":"mrs", "title":"String Trim", "uri":"mrs_01_1142.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"596" + "doc_type":"usermanual", + "p_code":"694", + "code":"706" }, { "desc":"This Filter Rows operator filters rows that contain triggering conditions by configuring logic conditions.Input: fields used to create filter conditionsOutput: noneWhen t", "product_code":"mrs", "title":"Filter Rows", "uri":"mrs_01_1143.html", - "doc_type":"cmpntguide-lts", - "p_code":"584", - "code":"597" + "doc_type":"usermanual", + "p_code":"694", + "code":"707" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Output Operators", "uri":"mrs_01_1145.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"598" + "doc_type":"usermanual", + "p_code":"684", + "code":"708" }, { "desc":"The Hive Output operator exports existing fields to specified columns of a Hive table.Input: fields to be exportedOutput: Hive tableThe field values are exported to the H", "product_code":"mrs", "title":"Hive output", "uri":"mrs_01_1146.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"599" + "doc_type":"usermanual", + "p_code":"708", + "code":"709" }, { "desc":"The Spark Output operator exports existing fields to specified columns of a Spark SQL table.Input: fields to be exportedOutput: SparkSQL tableThe field values are exporte", "product_code":"mrs", "title":"Spark Output", "uri":"mrs_01_1147.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"600" + "doc_type":"usermanual", + "p_code":"708", + "code":"710" }, { "desc":"The Table Output operator exports output fields to specified columns in a relational database table.Input: fields to be exportedOutput: relational database tableThe field", "product_code":"mrs", "title":"Table Output", "uri":"mrs_01_1148.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"601" + "doc_type":"usermanual", + "p_code":"708", + "code":"711" }, { "desc":"The File Output operator uses delimiters to concatenate existing fields and exports new fields to a file.Input: fields to be exportedOutput: filesThe field is exported to", "product_code":"mrs", "title":"File Output", "uri":"mrs_01_1149.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"602" + "doc_type":"usermanual", + "p_code":"708", + "code":"712" }, { "desc":"The HBase Output operator exports existing fields to specified columns of an HBase Outputtable.Input: fields to be exportedOutput: HBase tableThe field values are exporte", "product_code":"mrs", "title":"HBase Output", "uri":"mrs_01_1150.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"603" + "doc_type":"usermanual", + "p_code":"708", + "code":"713" }, { "desc":"The ClickHouse Output operator exports existing fields to specified columns of a ClickHouse table.Input: fields to be exportedOutput: ClickHouse tableThe field values are", - "product_code":"mrs", + "product_code":"", "title":"ClickHouse Output", "uri":"mrs_01_24177.html", - "doc_type":"cmpntguide-lts", - "p_code":"598", - "code":"604" + "doc_type":"", + "p_code":"708", + "code":"714" }, { "desc":"This section describes how to associate, import, or export the field configuration information of an operator when creating or editing a Loader job.Associating the field ", "product_code":"mrs", "title":"Associating, Editing, Importing, or Exporting the Field Configuration of an Operator", "uri":"mrs_01_1152.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"605" + "doc_type":"usermanual", + "p_code":"684", + "code":"715" }, { "desc":"When creating or editing Loader jobs, users can use macro definitions during parameter configuration. Then the parameters can be automatically changed to corresponding ma", "product_code":"mrs", "title":"Using Macro Definitions in Configuration Items", "uri":"mrs_01_1153.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"606" + "doc_type":"usermanual", + "p_code":"684", + "code":"716" }, { "desc":"In Loader data import and export tasks, each operator defines different processing rules for null values and empty strings in raw data. Dirty data cannot be imported or e", "product_code":"mrs", "title":"Operator Data Processing Rules", "uri":"mrs_01_1154.html", - "doc_type":"cmpntguide-lts", - "p_code":"574", - "code":"607" + "doc_type":"usermanual", + "p_code":"684", + "code":"717" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Client Tool Description", "uri":"mrs_01_1155.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"608" + "doc_type":"usermanual", + "p_code":"647", + "code":"718" }, { "desc":"loader-tool is a Loader client tool. It consists of three tools: lt-ucc, lt-ucj, lt-ctl.Loader supports two modes, parameter mode and job template mode. Either mode can b", "product_code":"mrs", "title":"loader-tool Usage Guide", "uri":"mrs_01_1157.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"609" + "doc_type":"usermanual", + "p_code":"718", + "code":"719" }, { "desc":"loader-tool can be used to create, update, query, and delete a connector or job by using a job template or setting parameters.This section describes how to use loader-too", "product_code":"mrs", "title":"loader-tool Usage Example", "uri":"mrs_01_1158.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"610" + "doc_type":"usermanual", + "p_code":"718", + "code":"720" }, { "desc":"schedule-tool is used to submit jobs of SFTP data sources. You can modify the input path and file filtering criteria before submitting a job. You can modify the output pa", "product_code":"mrs", "title":"schedule-tool Usage Guide", "uri":"mrs_01_1159.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"611" + "doc_type":"usermanual", + "p_code":"718", + "code":"721" }, { "desc":"After a job is created using the Loader WebUI or Loader-tool, use schedule-tool to execute the job.The Loader client has been installed and configured.cd /opt/hadoopclien", "product_code":"mrs", "title":"schedule-tool Usage Example", "uri":"mrs_01_1160.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"612" + "doc_type":"usermanual", + "p_code":"718", + "code":"722" }, { "desc":"After a job is created using the Loader WebUI or loader-tool, use loader-backup to back up data.Only Loader jobs of data export support data backup.This tool is an intern", "product_code":"mrs", "title":"Using loader-backup to Back Up Job Data", "uri":"mrs_01_1161.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"613" + "doc_type":"usermanual", + "p_code":"718", + "code":"723" }, { "desc":"Sqoop-shell is a shell tool of Loader. All its functions are implemented by executing the sqoop2-shell script.The sqoop-shell tool provides the following functions:Creati", "product_code":"mrs", "title":"Open Source sqoop-shell Tool Usage Guide", "uri":"mrs_01_1162.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"614" + "doc_type":"usermanual", + "p_code":"718", + "code":"724" }, { "desc":"Taking importing data from SFTP to HDFS as an example, this section introduces how to use the sqoop-shell tool to create and start Loader jobs in the interaction mode and", "product_code":"mrs", "title":"Example for Using the Open-Source sqoop-shell Tool (SFTP-HDFS)", "uri":"mrs_01_1163.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"615" + "doc_type":"usermanual", + "p_code":"718", + "code":"725" }, { "desc":"Taking Importing Data from Oracle to HBase as an example, this section introduces how to use the sqoop-shell tool to create and start Loader jobs in the interaction mode ", "product_code":"mrs", "title":"Example for Using the Open-Source sqoop-shell Tool (Oracle-HBase)", "uri":"mrs_01_1164.html", - "doc_type":"cmpntguide-lts", - "p_code":"608", - "code":"616" + "doc_type":"usermanual", + "p_code":"718", + "code":"726" }, { "desc":"Log path: The default storage path of Loader log files is /var/log/Bigdata/loader/Log category.runlog: /var/log/Bigdata/loader/runlog (run logs)scriptlog: /var/log/Bigdat", "product_code":"mrs", "title":"Loader Log Overview", "uri":"mrs_01_1165.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"617" + "doc_type":"usermanual", + "p_code":"647", + "code":"727" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Loader", "uri":"mrs_01_1785.html", - "doc_type":"cmpntguide-lts", - "p_code":"538", - "code":"618" + "doc_type":"usermanual", + "p_code":"647", + "code":"728" }, { "desc":"Internet Explorer 11 or Internet Explorer 10 is used to access the web UI of Loader. After data is submitted, an error occurs.SymptomWhen the submitted data is saved, a s", "product_code":"mrs", "title":"How to Resolve the Problem that Failed to Save Data When Using Internet Explorer 10 or Internet Explorer 11 ?", "uri":"mrs_01_1786.html", - "doc_type":"cmpntguide-lts", - "p_code":"618", - "code":"619" + "doc_type":"usermanual", + "p_code":"728", + "code":"729" }, { "desc":"Three types of connectors are available for importing data from the Oracle database to HDFS using Loader. That is, generic-jdbc-connector, oracle-connector, and oracle-pa", "product_code":"mrs", "title":"Differences Among Connectors Used During the Process of Importing Data from the Oracle Database to HDFS", "uri":"mrs_01_1787.html", - "doc_type":"cmpntguide-lts", - "p_code":"618", - "code":"620" + "doc_type":"usermanual", + "p_code":"728", + "code":"730" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using MapReduce", "uri":"mrs_01_0834.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"621" + "code":"731" }, { "desc":"The JobHistoryServer service of MapReduce is a single instance, or the single instance is used to install the MapReduce service during cluster installation. To avoid the ", "product_code":"mrs", "title":"Converting MapReduce from the Single Instance Mode to the HA Mode", "uri":"mrs_01_0835.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"622" + "doc_type":"usermanual", + "p_code":"731", + "code":"732" }, { "desc":"Job and task logs are generated during execution of a MapReduce application.Job logs are generated by the MRApplicationMaster, which record details about the start and ru", "product_code":"mrs", "title":"Configuring the Log Archiving and Clearing Mechanism", "uri":"mrs_01_0836.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"623" + "doc_type":"usermanual", + "p_code":"731", + "code":"733" }, { "desc":"When the network is unstable or the cluster I/O and CPU are overloaded, client applications might encounter running failures.Adjust the following parameters in the mapred", "product_code":"mrs", "title":"Reducing Client Application Failure Rate", "uri":"mrs_01_0837.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"624" + "doc_type":"usermanual", + "p_code":"731", + "code":"734" }, { "desc":"To submit MapReduce tasks from Windows to Linux, set mapreduce.app-submission.cross-platform to true. If this parameter does not exist in the cluster or the value of this", "product_code":"mrs", "title":"Transmitting MapReduce Tasks from Windows to Linux", "uri":"mrs_01_0838.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"625" + "doc_type":"usermanual", + "p_code":"731", + "code":"735" }, { "desc":"Distributed caching is useful in the following scenarios:Rolling UpgradeDuring the upgrade, applications must keep the text content (JAR file or configuration file) uncha", "product_code":"mrs", "title":"Configuring the Distributed Cache", "uri":"mrs_01_0839.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"626" + "doc_type":"usermanual", + "p_code":"731", + "code":"736" }, { "desc":"When the MapReduce shuffle service is started, it attempts to bind an IP address based on local host. If the MapReduce shuffle service is required to connect to a specifi", "product_code":"mrs", "title":"Configuring the MapReduce Shuffle Address", "uri":"mrs_01_0840.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"627" + "doc_type":"usermanual", + "p_code":"731", + "code":"737" }, { "desc":"This function is used to specify the MapReduce cluster administrator.The system administrator list is specified by mapreduce.cluster.administrators. The cluster administr", "product_code":"mrs", "title":"Configuring the Cluster Administrator List", "uri":"mrs_01_0841.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"628" + "doc_type":"usermanual", + "p_code":"731", + "code":"738" }, { "desc":"Log paths:JobhistoryServer: /var/log/Bigdata/mapreduce/jobhistory (run log) and /var/log/Bigdata/audit/mapreduce/jobhistory (audit log)Container: /srv/BigData/hadoop/data", "product_code":"mrs", "title":"Introduction to MapReduce Logs", "uri":"mrs_01_0842.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"629" + "doc_type":"usermanual", + "p_code":"731", + "code":"739" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"MapReduce Performance Tuning", "uri":"mrs_01_0843.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"630" + "doc_type":"usermanual", + "p_code":"731", + "code":"740" }, { "desc":"Optimization can be performed when the number of CPU cores is large, for example, the number of CPU cores is three times the number of disks.You can set the following par", "product_code":"mrs", "title":"Optimization Configuration for Multiple CPU Cores", "uri":"mrs_01_0844.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"631" + "doc_type":"usermanual", + "p_code":"740", + "code":"741" }, { "desc":"The performance optimization effect is verified by comparing actual values with the baseline data. Therefore, determining optimal job baseline is critical to performance ", "product_code":"mrs", "title":"Determining the Job Baseline", "uri":"mrs_01_0845.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"632" + "doc_type":"usermanual", + "p_code":"740", + "code":"742" }, { "desc":"During the shuffle procedure of MapReduce, the Map task writes intermediate data into disks, and the Reduce task copies and adds the data to the reduce function. Hadoop p", "product_code":"mrs", "title":"Streamlining Shuffle", "uri":"mrs_01_0846.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"633" + "doc_type":"usermanual", + "p_code":"740", + "code":"743" }, { "desc":"A big job containing 100,000 Map tasks fails. It is found that the failure is triggered by the slow response of ApplicationMaster (AM).When the number of tasks increases,", "product_code":"mrs", "title":"AM Optimization for Big Tasks", "uri":"mrs_01_0847.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"634" + "doc_type":"usermanual", + "p_code":"740", + "code":"744" }, { "desc":"If a cluster has hundreds or thousands of nodes, the hardware or software fault of a node may prolong the execution time of the entire task (as most tasks are already com", "product_code":"mrs", "title":"Speculative Execution", "uri":"mrs_01_0848.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"635" + "doc_type":"usermanual", + "p_code":"740", + "code":"745" }, { "desc":"The Slow Start feature specifies the proportion of Map tasks to be completed before Reduce tasks are started. If the Reduce tasks are started too early, resources will be", "product_code":"mrs", "title":"Using Slow Start", "uri":"mrs_01_0849.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"636" + "doc_type":"usermanual", + "p_code":"740", + "code":"746" }, { "desc":"By default, if an MR job generates a large number of output files, it takes a long time for the job to commit the temporary outputs of a task to the final output director", "product_code":"mrs", "title":"Optimizing Performance for Committing MR Jobs", "uri":"mrs_01_0850.html", - "doc_type":"cmpntguide-lts", - "p_code":"630", - "code":"637" + "doc_type":"usermanual", + "p_code":"740", + "code":"747" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About MapReduce", "uri":"mrs_01_1788.html", - "doc_type":"cmpntguide-lts", - "p_code":"621", - "code":"638" + "doc_type":"usermanual", + "p_code":"731", + "code":"748" }, { "desc":"MapReduce job takes a very long time (more than 10minutes) when the ResourceManager switch while the job is running.This is because, ResorceManager HA is enabled but the ", "product_code":"mrs", "title":"Why Does It Take a Long Time to Run a Task Upon ResourceManager Active/Standby Switchover?", "uri":"mrs_01_1789.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"639" + "doc_type":"usermanual", + "p_code":"748", + "code":"749" }, { "desc":"MapReduce job is not progressing for long timeThis is because of less memory. When the memory is less, the time taken by the job to copy the map output increases signific", "product_code":"mrs", "title":"Why Does a MapReduce Task Stay Unchanged for a Long Time?", "uri":"mrs_01_1790.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"640" + "doc_type":"usermanual", + "p_code":"748", + "code":"750" }, { "desc":"Why is the client unavailable when the MR ApplicationMaster or ResourceManager is moved to the D state during job running?When a task is running, the MR ApplicationMaster", "product_code":"mrs", "title":"Why the Client Hangs During Job Running?", "uri":"mrs_01_1791.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"641" + "doc_type":"usermanual", + "p_code":"748", + "code":"751" }, { "desc":"In security mode, why delegation token HDFS_DELEGATION_TOKEN is not found in the cache?In MapReduce, by default HDFS_DELEGATION_TOKEN will be canceled after the job compl", "product_code":"mrs", "title":"Why Cannot HDFS_DELEGATION_TOKEN Be Found in the Cache?", "uri":"mrs_01_1792.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"642" + "doc_type":"usermanual", + "p_code":"748", + "code":"752" }, { "desc":"How do I set the job priority when submitting a MapReduce task?You can add the parameter -Dmapreduce.job.priority= in the command to set task priority when subm", "product_code":"mrs", "title":"How Do I Set the Task Priority When Submitting a MapReduce Task?", "uri":"mrs_01_1793.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"643" + "doc_type":"usermanual", + "p_code":"748", + "code":"753" }, { "desc":"After the address of MapReduce JobHistoryServer is changed, why the wrong page is displayed when I click the tracking URL on the ResourceManager WebUI?JobHistoryServer ad", "product_code":"mrs", "title":"After the Address of MapReduce JobHistoryServer Is Changed, Why the Wrong Page is Displayed When I Click the Tracking URL on the ResourceManager WebUI?", "uri":"mrs_01_1797.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"644" + "doc_type":"usermanual", + "p_code":"748", + "code":"754" }, { "desc":"MapReduce or Yarn job fails in multiple nameService environment using viewFS.When using viewFS only the mount directories are accessible, so the most possible cause is th", "product_code":"mrs", "title":"MapReduce Job Failed in Multiple NameService Environment", "uri":"mrs_01_1799.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"645" + "doc_type":"usermanual", + "p_code":"748", + "code":"755" }, { "desc":"MapReduce task fails and the ratio of fault nodes to all nodes is smaller than the blacklist threshold configured by yarn.resourcemanager.am-scheduling.node-blacklisting-", "product_code":"mrs", "title":"Why a Fault MapReduce Node Is Not Blacklisted?", "uri":"mrs_01_1800.html", - "doc_type":"cmpntguide-lts", - "p_code":"638", - "code":"646" + "doc_type":"usermanual", + "p_code":"748", + "code":"756" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Oozie", "uri":"mrs_01_1807.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"647" + "code":"757" }, { "desc":"Oozie is an open-source workflow engine that is used to schedule and coordinate Hadoop jobs.Oozie can be used to submit a wide array of jobs, such as Hive, Spark2x, Loade", "product_code":"mrs", "title":"Using Oozie from Scratch", "uri":"mrs_01_1808.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"648" + "doc_type":"usermanual", + "p_code":"757", + "code":"758" }, { "desc":"This section describes how to use the Oozie client in an O&M scenario or service scenario.The client has been installed. For example, the installation directory is /opt/h", "product_code":"mrs", "title":"Using the Oozie Client", "uri":"mrs_01_1810.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"649" + "doc_type":"usermanual", + "p_code":"757", + "code":"759" }, { "desc":"When multiple Oozie nodes provide services at the same time, you can use ZooKeeper to provide high availability (HA), which helps avoid single points of failure (SPOFs) a", - "product_code":"mrs", + "product_code":"", "title":"Enabling Oozie High Availability (HA)", "uri":"mrs_01_24233.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"650" + "doc_type":"", + "p_code":"757", + "code":"760" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Oozie Client to Submit an Oozie Job", "uri":"mrs_01_1812.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"651" + "doc_type":"usermanual", + "p_code":"757", + "code":"761" }, { "desc":"This section describes how to use the Oozie client to submit a Hive job.Hive jobs are divided into the following types:Hive jobHive job that is connected in JDBC modeHive", "product_code":"mrs", - "title":"Submitting a Hive Job", + "title":"Submitting a Hive Job with Oozie Client", "uri":"mrs_01_1813.html", - "doc_type":"cmpntguide-lts", - "p_code":"651", - "code":"652" + "doc_type":"usermanual", + "p_code":"761", + "code":"762" }, { "desc":"This section describes how to submit a Spark2x job using the Oozie client.You are advised to download the latest client.The Spark2x and Oozie components and clients have ", "product_code":"mrs", - "title":"Submitting a Spark2x Job", + "title":"Submitting a Spark2x Job with Oozie Client", "uri":"mrs_01_1814.html", - "doc_type":"cmpntguide-lts", - "p_code":"651", - "code":"653" + "doc_type":"usermanual", + "p_code":"761", + "code":"763" }, { "desc":"This section describes how to submit a Loader job using the Oozie client.You are advised to download the latest client.The Hive and Oozie components and clients have been", "product_code":"mrs", - "title":"Submitting a Loader Job", + "title":"Submitting a Loader Job with Oozie Client", "uri":"mrs_01_1815.html", - "doc_type":"cmpntguide-lts", - "p_code":"651", - "code":"654" + "doc_type":"usermanual", + "p_code":"761", + "code":"764" }, { "desc":"This section describes how to submit a DistCp job using the Oozie client.You are advised to download the latest client.The HDFS and Oozie components and clients have been", "product_code":"mrs", - "title":"Submitting a DistCp Job", + "title":"Submitting a DistCp Job with Oozie Client", "uri":"mrs_01_2392.html", - "doc_type":"cmpntguide-lts", - "p_code":"651", - "code":"655" + "doc_type":"usermanual", + "p_code":"761", + "code":"765" }, { "desc":"In addition to Hive, Spark2x, and Loader jobs, MapReduce, Java, Shell, HDFS, SSH, SubWorkflow, Streaming, and scheduled jobs can be submitted using the Oozie client.You a", "product_code":"mrs", - "title":"Submitting Other Jobs", + "title":"Submitting Other Jobs with Oozie Client", "uri":"mrs_01_1816.html", - "doc_type":"cmpntguide-lts", - "p_code":"651", - "code":"656" + "doc_type":"usermanual", + "p_code":"761", + "code":"766" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Hue to Submit an Oozie Job", "uri":"mrs_01_1817.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"657" + "doc_type":"usermanual", + "p_code":"757", + "code":"767" }, { "desc":"You can submit an Oozie job on the Hue management page, but a workflow must be created before the job is submitted.Before using Hue to submit an Oozie job, configure the ", "product_code":"mrs", "title":"Creating a Workflow", "uri":"mrs_01_1818.html", - "doc_type":"cmpntguide-lts", - "p_code":"657", - "code":"658" + "doc_type":"usermanual", + "p_code":"767", + "code":"768" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Submitting a Workflow Job", "uri":"mrs_01_1819.html", - "doc_type":"cmpntguide-lts", - "p_code":"657", - "code":"659" + "doc_type":"usermanual", + "p_code":"767", + "code":"769" }, { "desc":"This section describes how to submit an Oozie job of the Hive2 type on the Hue web UI.For example, if the input parameter is INPUT=/user/admin/examples/input-data/table, ", "product_code":"mrs", - "title":"Submitting a Hive2 Job", + "title":"Submitting a Hive2 Job in Hue", "uri":"mrs_01_1820.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"660" + "doc_type":"usermanual", + "p_code":"769", + "code":"770" }, { "desc":"This section describes how to submit an Oozie job of the Spark2x type on Hue.For example, add the following parameters:hdfs://hacluster/user/admin/examples/input-data/tex", "product_code":"mrs", - "title":"Submitting a Spark2x Job", + "title":"Submitting a Spark2x Job in Hue", "uri":"mrs_01_1821.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"661" + "doc_type":"usermanual", + "p_code":"769", + "code":"771" }, { "desc":"This section describes how to submit an Oozie job of the Java type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow)", "product_code":"mrs", - "title":"Submitting a Java Job", + "title":"Submitting a Java Job in Hue", "uri":"mrs_01_1822.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"662" + "doc_type":"usermanual", + "p_code":"769", + "code":"772" }, { "desc":"This section describes how to submit an Oozie job of the Loader type on the Hue web UI.Job id is the ID of the Loader job to be orchestrated and can be obtained from the ", "product_code":"mrs", - "title":"Submitting a Loader Job", + "title":"Submitting a Loader Job in Hue", "uri":"mrs_01_1823.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"663" + "doc_type":"usermanual", + "p_code":"769", + "code":"773" }, { "desc":"This section describes how to submit an Oozie job of the MapReduce type on the Hue web UI.For example, set the value of mapred.input.dir to /user/admin/examples/input-dat", "product_code":"mrs", - "title":"Submitting a MapReduce Job", + "title":"Submitting a MapReduce Job in Hue", "uri":"mrs_01_1824.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"664" + "doc_type":"usermanual", + "p_code":"769", + "code":"774" }, { "desc":"This section describes how to submit an Oozie job of the Sub-workflow type on the Hue web UI.If you need to modify the job name before saving the job (default value: My W", "product_code":"mrs", - "title":"Submitting a Sub-workflow Job", + "title":"Submitting a Sub-workflow Job in Hue", "uri":"mrs_01_1825.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"665" + "doc_type":"usermanual", + "p_code":"769", + "code":"775" }, { "desc":"This section describes how to submit an Oozie job of the Shell type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow", "product_code":"mrs", - "title":"Submitting a Shell Job", + "title":"Submitting a Shell Job in Hue", "uri":"mrs_01_1826.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"666" + "doc_type":"usermanual", + "p_code":"769", + "code":"776" }, { "desc":"This section describes how to submit an Oozie job of the HDFS type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow)", "product_code":"mrs", - "title":"Submitting an HDFS Job", + "title":"Submitting an HDFS Job in Hue", "uri":"mrs_01_1827.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"667" + "doc_type":"usermanual", + "p_code":"769", + "code":"777" }, { "desc":"This section describes how to submit an Oozie job of the DistCp type on the Hue web UI.If yes, go to 4.If no, go to 7.source_ip: service address of the HDFS NameNode in t", "product_code":"mrs", - "title":"Submitting a DistCp Job", + "title":"Submitting a DistCp Job in Hue", "uri":"mrs_01_1829.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"668" + "doc_type":"usermanual", + "p_code":"769", + "code":"778" }, { "desc":"This section guides you to enable unidirectional password-free mutual trust when Oozie nodes are used to execute shell scripts of external nodes through SSH jobs.You have", "product_code":"mrs", - "title":"Example of Mutual Trust Operations", + "title":"Example of Mutual Trust Operations in Hue", "uri":"mrs_01_1830.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"669" + "doc_type":"usermanual", + "p_code":"769", + "code":"779" }, { "desc":"This section guides you to submit an Oozie job of the SSH type on the Hue web UI.If you need to modify the job name before saving the job (default value: My Workflow), cl", "product_code":"mrs", - "title":"Submitting an SSH Job", + "title":"Submitting an SSH Job in Hue", "uri":"mrs_01_1831.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"670" + "doc_type":"usermanual", + "p_code":"769", + "code":"780" }, { "desc":"This section describes how to submit a Hive job on the Hue web UI.After the job is submitted, you can view the related contents of the job, such as the detailed informati", "product_code":"mrs", - "title":"Submitting a Hive Script", + "title":"Submitting a Hive Script in Hue", "uri":"mrs_01_2372.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"671" + "doc_type":"usermanual", + "p_code":"769", + "code":"781" }, { "desc":"This section describes how to add an email job on the Hue web UI.To addresses: specifies the recipient email address. Separate multiple email addresses with commas (,).Su", "product_code":"mrs", - "title":"Submitting an Email Job", + "title":"Submitting an Email Job in Hue", "uri":"mrs_01_24114.html", - "doc_type":"cmpntguide-lts", - "p_code":"659", - "code":"672" + "doc_type":"usermanual", + "p_code":"769", + "code":"782" }, { "desc":"This section describes how to submit a job of the periodic scheduling type on the Hue web UI.Required workflow jobs have been configured before the coordinator task is su", "product_code":"mrs", "title":"Submitting a Coordinator Periodic Scheduling Job", "uri":"mrs_01_1840.html", - "doc_type":"cmpntguide-lts", - "p_code":"657", - "code":"673" + "doc_type":"usermanual", + "p_code":"767", + "code":"783" }, { "desc":"In the case that multiple scheduled jobs exist at the same time, you can manage the jobs in batches over the Bundle task. This section describes how to submit a job of th", "product_code":"mrs", "title":"Submitting a Bundle Batch Processing Job", "uri":"mrs_01_1841.html", - "doc_type":"cmpntguide-lts", - "p_code":"657", - "code":"674" + "doc_type":"usermanual", + "p_code":"767", + "code":"784" }, { "desc":"After the jobs are submitted, you can view the execution status of a specific job on Hue.", "product_code":"mrs", "title":"Querying the Operation Results", "uri":"mrs_01_1842.html", - "doc_type":"cmpntguide-lts", - "p_code":"657", - "code":"675" + "doc_type":"usermanual", + "p_code":"767", + "code":"785" }, { "desc":"Log path: The default storage paths of Oozie log files are as follows:Run log: /var/log/Bigdata/oozieAudit log: /var/log/Bigdata/audit/oozieLog archiving rule: Oozie logs", "product_code":"mrs", "title":"Oozie Log Overview", "uri":"mrs_01_1843.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"676" + "doc_type":"usermanual", + "p_code":"757", + "code":"786" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Oozie", "uri":"mrs_01_1844.html", - "doc_type":"cmpntguide-lts", - "p_code":"647", - "code":"677" + "doc_type":"usermanual", + "p_code":"757", + "code":"787" }, { "desc":"The Oozie client fails to submit a MapReduce job and a message \"Error: AUTHENTICATION: Could not authenticate, Authentication failed, status: 403, message: Forbidden\" is ", "product_code":"mrs", "title":"How Do I Resolve the Problem that the Oozie Client Fails to Submit a MapReduce Job?", "uri":"mrs_01_1845.html", - "doc_type":"cmpntguide-lts", - "p_code":"677", - "code":"678" + "doc_type":"usermanual", + "p_code":"787", + "code":"788" }, { "desc":"Why are not Coordinator scheduled jobs executed on time on the Hue or Oozie client?Use UTC time. For example, set start=2016-12-20T09:00Z in job.properties file.", "product_code":"mrs", "title":"Oozie Scheduled Tasks Are Not Executed on Time", "uri":"mrs_01_1846.html", - "doc_type":"cmpntguide-lts", - "p_code":"677", - "code":"679" + "doc_type":"usermanual", + "p_code":"787", + "code":"789" }, { "desc":"Why cannot a class error be found during task execution after a new JAR file is uploaded to the /user/oozie/share/lib directory on HDFS?Restart Oozie to make the director", "product_code":"mrs", "title":"The Update of the share lib Directory of Oozie Does Not Take Effect", "uri":"mrs_01_1847.html", - "doc_type":"cmpntguide-lts", - "p_code":"677", - "code":"680" + "doc_type":"usermanual", + "p_code":"787", + "code":"790" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Ranger", "uri":"mrs_01_1849.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"681" + "code":"791" }, { "desc":"Ranger provides a centralized permission management framework to implement fine-grained permission control on components such as HDFS, HBase, Hive, and Yarn. In addition,", "product_code":"mrs", "title":"Logging In to the Ranger Web UI", "uri":"mrs_01_1850.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"682" + "doc_type":"usermanual", + "p_code":"791", + "code":"792" }, { "desc":"This section guides you how to enable Ranger authentication. Ranger authentication is enabled by default in security mode and disabled by default in normal mode.If Enable", "product_code":"mrs", "title":"Enabling Ranger Authentication", "uri":"mrs_01_2393.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"683" + "doc_type":"usermanual", + "p_code":"791", + "code":"793" }, { "desc":"In the newly installed MRS cluster, Ranger is installed by default, with the Ranger authentication model enabled. The system administrator can set fine-grained security p", "product_code":"mrs", "title":"Configuring Component Permission Policies", "uri":"mrs_01_1851.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"684" + "doc_type":"usermanual", + "p_code":"791", + "code":"794" }, { "desc":"The system administrator can view audit logs of the Ranger running and the permission control after Ranger authentication is enabled on the Ranger web UI.", "product_code":"mrs", "title":"Viewing Ranger Audit Information", "uri":"mrs_01_1852.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"685" + "doc_type":"usermanual", + "p_code":"791", + "code":"795" }, { "desc":"Security zone can be configured using Ranger. Ranger administrators can divide resources of each component into multiple security zones where administrators set security ", "product_code":"mrs", "title":"Configuring a Security Zone", "uri":"mrs_01_1853.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"686" + "doc_type":"usermanual", + "p_code":"791", + "code":"796" }, { "desc":"By default, the Ranger data source of the security cluster can be accessed by FusionInsight Manager LDAP users. By default, the Ranger data source of a common cluster can", "product_code":"mrs", "title":"Changing the Ranger Data Source to LDAP for a Normal Cluster", "uri":"mrs_01_2394.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"687" + "doc_type":"usermanual", + "p_code":"791", + "code":"797" }, { "desc":"You can view Ranger permission settings, such as users, user groups, and roles.Users: displays all user information synchronized from LDAP or OS to Ranger.Groups: display", "product_code":"mrs", "title":"Viewing Ranger Permission Information", "uri":"mrs_01_1854.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"688" + "doc_type":"usermanual", + "p_code":"791", + "code":"798" + }, + { + "desc":"Ranger administrators can use Ranger to configure creation, execution, query, and deletion permissions for CDL users.The Ranger service has been installed and is running ", + "product_code":"mrs", + "title":"Adding a Ranger Access Permission Policy for CDL", + "uri":"mrs_01_24245.html", + "doc_type":"cmpntguide", + "p_code":"791", + "code":"799" }, { "desc":"The Ranger administrator can use Ranger to configure the read, write, and execution permissions on HDFS directories or files for HDFS users.The Ranger service has been in", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for HDFS", "uri":"mrs_01_1856.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"689" + "doc_type":"usermanual", + "p_code":"791", + "code":"800" }, { "desc":"Ranger administrators can use Ranger to configure permissions on HBase tables, column families, and columns for HBase users.The Ranger service has been installed and is r", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for HBase", "uri":"mrs_01_1857.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"690" + "doc_type":"usermanual", + "p_code":"791", + "code":"801" }, { "desc":"The Ranger administrator can use Ranger to set permissions for Hive users. The default administrator account of Hive is hive and the initial password is Hive@123.The Rang", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for Hive", "uri":"mrs_01_1858.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"691" + "doc_type":"usermanual", + "p_code":"791", + "code":"802" }, { "desc":"The Ranger administrator can use Ranger to configure Yarn administrator permissions for Yarn users, allowing them to manage Yarn queue resources.The Ranger service has be", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for Yarn", "uri":"mrs_01_1859.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"692" + "doc_type":"usermanual", + "p_code":"791", + "code":"803" }, { "desc":"The Ranger administrator can use Ranger to set permissions for Spark2x users.After Ranger authentication is enabled or disabled on Spark2x, you need to restart Spark2x.Do", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for Spark2x", "uri":"mrs_01_1860.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"693" + "doc_type":"usermanual", + "p_code":"791", + "code":"804" }, { "desc":"The Ranger administrator can use Ranger to configure the read, write, and management permissions of the Kafka topic and the management permission of the cluster for the K", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for Kafka", "uri":"mrs_01_1861.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"694" + "doc_type":"usermanual", + "p_code":"791", + "code":"805" }, { "desc":"Ranger administrators can use Ranger to configure the permission to manage databases, tables, and columns of data sources for HetuEngine users.The Ranger service has been", "product_code":"mrs", "title":"Adding a Ranger Access Permission Policy for HetuEngine", "uri":"mrs_01_1862.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"695" + "doc_type":"usermanual", + "p_code":"791", + "code":"806" + }, + { + "desc":"Ranger provides permission policies for services. When the number of service instances using Ranger increases, you need to adjust the specifications of Ranger.This sectio", + "product_code":"", + "title":"Configuring Ranger Specifications", + "uri":"mrs_01_24767.html", + "doc_type":"", + "p_code":"791", + "code":"807" }, { "desc":"Log path: The default storage path of Ranger logs is /var/log/Bigdata/ranger/Role name.RangerAdmin: /var/log/Bigdata/ranger/rangeradmin (run logs)TagSync: /var/log/Bigdat", "product_code":"mrs", "title":"Ranger Log Overview", "uri":"mrs_01_1865.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"696" + "doc_type":"usermanual", + "p_code":"791", + "code":"808" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Ranger", "uri":"mrs_01_1866.html", - "doc_type":"cmpntguide-lts", - "p_code":"681", - "code":"697" + "doc_type":"usermanual", + "p_code":"791", + "code":"809" }, { "desc":"During cluster installation, Ranger fails to be started, and the error message \"ERROR: cannot drop sequence X_POLICY_REF_ACCESS_TYPE_SEQ \" is displayed in the task list o", "product_code":"mrs", "title":"Why Ranger Startup Fails During the Cluster Installation?", "uri":"mrs_01_1867.html", - "doc_type":"cmpntguide-lts", - "p_code":"697", - "code":"698" + "doc_type":"usermanual", + "p_code":"809", + "code":"810" }, { "desc":"How do I determine whether the Ranger authentication is enabled for a service that supports the authentication?Log in to FusionInsight Manager and choose Cluster > Servic", "product_code":"mrs", "title":"How Do I Determine Whether the Ranger Authentication Is Used for a Service?", "uri":"mrs_01_1868.html", - "doc_type":"cmpntguide-lts", - "p_code":"697", - "code":"699" + "doc_type":"usermanual", + "p_code":"809", + "code":"811" }, { "desc":"When a new user logs in to Ranger, why is the 401 error reported after the password is changed?The UserSync synchronizes user data at an interval of 5 minutes by default.", "product_code":"mrs", "title":"Why Cannot a New User Log In to Ranger After Changing the Password?", "uri":"mrs_01_2300.html", - "doc_type":"cmpntguide-lts", - "p_code":"697", - "code":"700" + "doc_type":"usermanual", + "p_code":"809", + "code":"812" }, { "desc":"When a Ranger access permission policy is added for HBase and wildcard characters are used to search for an existing HBase table in the policy, the table cannot be found.", "product_code":"mrs", "title":"When an HBase Policy Is Added or Modified on Ranger, Wildcard Characters Cannot Be Used to Search for Existing HBase Tables", "uri":"mrs_01_2355.html", - "doc_type":"cmpntguide-lts", - "p_code":"697", - "code":"701" + "doc_type":"usermanual", + "p_code":"809", + "code":"813" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Spark2x", "uri":"mrs_01_1926.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"702" + "code":"814" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Basic Operation", "uri":"mrs_01_1928.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"703" + "doc_type":"usermanual", + "p_code":"814", + "code":"815" }, { "desc":"This section describes how to use Spark2x to submit Spark applications, including Spark Core and Spark SQL. Spark Core is the kernel module of Spark. It executes tasks an", "product_code":"mrs", "title":"Getting Started", "uri":"mrs_01_1929.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"704" + "doc_type":"usermanual", + "p_code":"815", + "code":"816" }, { "desc":"This section describes how to quickly configure common parameters and lists parameters that are not recommended to be modified when Spark2x is used.Some parameters have b", "product_code":"mrs", "title":"Configuring Parameters Rapidly", "uri":"mrs_01_1930.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"705" + "doc_type":"usermanual", + "p_code":"815", + "code":"817" }, { "desc":"This section describes common configuration items used in Spark. This section is divided into sub-sections based on features to help you quickly find required configurati", "product_code":"mrs", "title":"Common Parameters", "uri":"mrs_01_1931.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"706" + "doc_type":"usermanual", + "p_code":"815", + "code":"818" }, { "desc":"Spark on HBase allows users to query HBase tables in Spark SQL and to store data for HBase tables by using the Beeline tool. You can use HBase APIs to create, read data f", "product_code":"mrs", "title":"Spark on HBase Overview and Basic Applications", "uri":"mrs_01_1933.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"707" + "doc_type":"usermanual", + "p_code":"815", + "code":"819" }, { "desc":"Spark on HBase V2 allows users to query HBase tables in Spark SQL and to store data for HBase tables by using the Beeline tool. You can use HBase APIs to create, read dat", "product_code":"mrs", "title":"Spark on HBase V2 Overview and Basic Applications", "uri":"mrs_01_1934.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"708" + "doc_type":"usermanual", + "p_code":"815", + "code":"820" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"SparkSQL Permission Management(Security Mode)", "uri":"mrs_01_1935.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"709" + "doc_type":"usermanual", + "p_code":"815", + "code":"821" }, { "desc":"Similar to Hive, Spark SQL is a data warehouse framework built on Hadoop, providing storage of structured data like structured query language (SQL).MRS supports users, us", "product_code":"mrs", "title":"Spark SQL Permissions", "uri":"mrs_01_1936.html", - "doc_type":"cmpntguide-lts", - "p_code":"709", - "code":"710" + "doc_type":"usermanual", + "p_code":"821", + "code":"822" }, { "desc":"This section describes how to create and configure a SparkSQL role on Manager as the system administrator. The Spark SQL role can be configured with the spark dministrato", "product_code":"mrs", "title":"Creating a Spark SQL Role", "uri":"mrs_01_1937.html", - "doc_type":"cmpntguide-lts", - "p_code":"709", - "code":"711" + "doc_type":"usermanual", + "p_code":"821", + "code":"823" }, { "desc":"You can configure related permissions if you need to access tables or databases created by other users. SparkSQL supports column-based permission control. If a user needs", "product_code":"mrs", "title":"Configuring Permissions for SparkSQL Tables, Columns, and Databases", "uri":"mrs_01_1938.html", - "doc_type":"cmpntguide-lts", - "p_code":"709", - "code":"712" + "doc_type":"usermanual", + "p_code":"821", + "code":"824" }, { "desc":"SparkSQL may need to be associated with other components. For example, Spark on HBase requires HBase permissions. The following describes how to associate SparkSQL with H", "product_code":"mrs", "title":"Configuring Permissions for SparkSQL to Use Other Components", "uri":"mrs_01_1939.html", - "doc_type":"cmpntguide-lts", - "p_code":"709", - "code":"713" + "doc_type":"usermanual", + "p_code":"821", + "code":"825" }, { "desc":"This section describes how to configure SparkSQL permission management functions (client configuration is similar to server configuration). To enable table permission, ad", "product_code":"mrs", "title":"Configuring the Client and Server", "uri":"mrs_01_1940.html", - "doc_type":"cmpntguide-lts", - "p_code":"709", - "code":"714" + "doc_type":"usermanual", + "p_code":"821", + "code":"826" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Scenario-Specific Configuration", "uri":"mrs_01_1941.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"715" + "doc_type":"usermanual", + "p_code":"815", + "code":"827" }, { "desc":"In this mode, multiple ThriftServers coexist in the cluster and the client can randomly connect any ThriftServer to perform service operations. When one or multiple Thrif", "product_code":"mrs", "title":"Configuring Multi-active Instance Mode", "uri":"mrs_01_1942.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"716" + "doc_type":"usermanual", + "p_code":"827", + "code":"828" }, { "desc":"In multi-tenant mode, JDBCServers are bound with tenants. Each tenant corresponds to one or more JDBCServers, and a JDBCServer provides services for only one tenant. Diff", "product_code":"mrs", "title":"Configuring the Multi-tenant Mode", "uri":"mrs_01_1943.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"717" + "doc_type":"usermanual", + "p_code":"827", + "code":"829" }, { "desc":"When using a cluster, if you want to switch between multi-active instance mode and multi-tenant mode, the following configurations are required.Switch from multi-tenant m", "product_code":"mrs", "title":"Configuring the Switchover Between the Multi-active Instance Mode and the Multi-tenant Mode", "uri":"mrs_01_1944.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"718" + "doc_type":"usermanual", + "p_code":"827", + "code":"830" }, { "desc":"Functions such as UI, EventLog, and dynamic resource scheduling in Spark are implemented through event transfer. Events include SparkListenerJobStart and SparkListenerJob", "product_code":"mrs", "title":"Configuring the Size of the Event Queue", "uri":"mrs_01_1945.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"719" + "doc_type":"usermanual", + "p_code":"827", + "code":"831" }, { "desc":"When the executor off-heap memory is too small, or processes with higher priority preempt resources, the physical memory usage will exceed the maximal value. To prevent t", "product_code":"mrs", "title":"Configuring Executor Off-Heap Memory", "uri":"mrs_01_1947.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"720" + "doc_type":"usermanual", + "p_code":"827", + "code":"832" }, { "desc":"A large amount of memory is required when Spark SQL executes a query, especially during Aggregate and Join operations. If the memory is limited, OutOfMemoryError may occu", "product_code":"mrs", "title":"Enhancing Stability in a Limited Memory Condition", "uri":"mrs_01_1948.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"721" + "doc_type":"usermanual", + "p_code":"827", + "code":"833" }, { "desc":"When yarn.log-aggregation-enable of Yarn is set to true, the container log aggregation function is enabled. Log aggregation indicates that after applications are run on Y", "product_code":"mrs", "title":"Viewing Aggregated Container Logs on the Web UI", "uri":"mrs_01_1949.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"722" + "doc_type":"usermanual", + "p_code":"827", + "code":"834" }, { "desc":"SQL statements executed by users may contain sensitive information (such as passwords). Disclosure of such information may incur security risks. You can configure the spa", "product_code":"mrs", "title":"Configuring Whether to Display Spark SQL Statements Containing Sensitive Words", "uri":"mrs_01_1950.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"723" + "doc_type":"usermanual", + "p_code":"827", + "code":"835" }, { "desc":"Values of some configuration parameters of Spark client vary depending on its work mode (YARN-Client or YARN-Cluster). If you switch Spark client between different modes ", "product_code":"mrs", "title":"Configuring Environment Variables in Yarn-Client and Yarn-Cluster Modes", "uri":"mrs_01_1951.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"724" + "doc_type":"usermanual", + "p_code":"827", + "code":"836" }, { "desc":"By default, SparkSQL divides data into 200 data blocks during shuffle. In data-intensive scenarios, each data block may have excessive size. If a single data block of a t", "product_code":"mrs", "title":"Configuring the Default Number of Data Blocks Divided by SparkSQL", "uri":"mrs_01_1952.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"725" + "doc_type":"usermanual", + "p_code":"827", + "code":"837" }, { "desc":"The compression format of a Parquet table can be configured as follows:If the Parquet table is a partitioned one, set the parquet.compression parameter of the Parquet tab", "product_code":"mrs", "title":"Configuring the Compression Format of a Parquet Table", "uri":"mrs_01_1953.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"726" + "doc_type":"usermanual", + "p_code":"827", + "code":"838" }, { "desc":"In Spark WebUI, the Executor page can display information about Lost Executor. Executors are dynamically recycled. If the JDBCServer tasks are large, there may be too man", "product_code":"mrs", "title":"Configuring the Number of Lost Executors Displayed in WebUI", "uri":"mrs_01_1954.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"727" + "doc_type":"usermanual", + "p_code":"827", + "code":"839" }, { "desc":"In some scenarios, to locate problems or check information by changing the log level,you can add the -Dlog4j.configuration.watch=true parameter to the JVM parameter of a ", "product_code":"mrs", "title":"Setting the Log Level Dynamically", "uri":"mrs_01_1957.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"728" + "doc_type":"usermanual", + "p_code":"827", + "code":"840" }, { "desc":"When Spark is used to submit tasks, the driver obtains tokens from HBase by default. To access HBase, you need to configure the jaas.conf file for security authentication", "product_code":"mrs", "title":"Configuring Whether Spark Obtains HBase Tokens", "uri":"mrs_01_1958.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"729" + "doc_type":"usermanual", + "p_code":"827", + "code":"841" }, { "desc":"If the Spark Streaming application is connected to Kafka, after the Spark Streaming application is terminated abnormally and restarted from the checkpoint, the system pre", "product_code":"mrs", "title":"Configuring LIFO for Kafka", "uri":"mrs_01_1959.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"730" + "doc_type":"usermanual", + "p_code":"827", + "code":"842" }, { "desc":"When the Spark Streaming application is connected to Kafka and the application is restarted, the application reads data from Kafka based on the last read topic offset and", "product_code":"mrs", "title":"Configuring Reliability for Connected Kafka", "uri":"mrs_01_1960.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"731" + "doc_type":"usermanual", + "p_code":"827", + "code":"843" }, { "desc":"When a query statement is executed, the returned result may be large (containing more than 100,000 records). In this case, JDBCServer out of memory (OOM) may occur. There", "product_code":"mrs", "title":"Configuring Streaming Reading of Driver Execution Results", "uri":"mrs_01_1961.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"732" + "doc_type":"usermanual", + "p_code":"827", + "code":"844" }, { "desc":"When you perform the select query in Hive partitioned tables, the FileNotFoundException exception is displayed if a specified partition path does not exist in HDFS. To av", "product_code":"mrs", "title":"Filtering Partitions without Paths in Partitioned Tables", "uri":"mrs_01_1962.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"733" + "doc_type":"usermanual", + "p_code":"827", + "code":"845" }, { "desc":"Users need to implement security protection for Spark2x web UI when some data on the UI cannot be viewed by other users. Once a user attempts to log in to the UI, Spark2x", "product_code":"mrs", "title":"Configuring Spark2x Web UI ACLs", "uri":"mrs_01_1963.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"734" + "doc_type":"usermanual", + "p_code":"827", + "code":"846" }, { "desc":"ORC is a column-based storage format in the Hadoop ecosystem. It originates from Apache Hive and is used to reduce the Hadoop data storage space and accelerate the Hive q", "product_code":"mrs", "title":"Configuring Vector-based ORC Data Reading", "uri":"mrs_01_1964.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"735" + "doc_type":"usermanual", + "p_code":"827", + "code":"847" }, { "desc":"In earlier versions, the predicate for pruning Hive table partitions is pushed down. Only comparison expressions between column names and integers or character strings ca", "product_code":"mrs", "title":"Broaden Support for Hive Partition Pruning Predicate Pushdown", "uri":"mrs_01_1965.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"736" + "doc_type":"usermanual", + "p_code":"827", + "code":"848" }, { "desc":"In earlier versions, when the insert overwrite syntax is used to overwrite partition tables, only partitions with specified expressions are matched, and partitions withou", "product_code":"mrs", "title":"Hive Dynamic Partition Overwriting Syntax", "uri":"mrs_01_1966.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"737" + "doc_type":"usermanual", + "p_code":"827", + "code":"849" }, { "desc":"The execution plan for SQL statements is optimized in Spark. Common optimization rules are heuristic optimization rules. Heuristic optimization rules are provided based o", "product_code":"mrs", "title":"Configuring the Column Statistics Histogram to Enhance the CBO Accuracy", "uri":"mrs_01_1967.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"738" + "doc_type":"usermanual", + "p_code":"827", + "code":"850" }, { "desc":"JobHistory can use local disks to cache the historical data of Spark applications to prevent the JobHistory memory from loading a large amount of application data, reduci", "product_code":"mrs", "title":"Configuring Local Disk Cache for JobHistory", "uri":"mrs_01_1969.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"739" + "doc_type":"usermanual", + "p_code":"827", + "code":"851" }, { "desc":"The Spark SQL adaptive execution feature enables Spark SQL to optimize subsequent execution processes based on intermediate results to improve overall execution efficienc", "product_code":"mrs", "title":"Configuring Spark SQL to Enable the Adaptive Execution Feature", "uri":"mrs_01_1970.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"740" + "doc_type":"usermanual", + "p_code":"827", + "code":"852" }, { "desc":"When the event log mode is enabled for Spark, that is, spark.eventLog.enabled is set to true, events are written to a configured log file to record the program running pr", "product_code":"mrs", "title":"Configuring Event Log Rollover", "uri":"mrs_01_24170.html", - "doc_type":"cmpntguide-lts", - "p_code":"715", - "code":"741" + "doc_type":"usermanual", + "p_code":"827", + "code":"853" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.Currently, the Drop Partition command in Spark supports partition deletion using only the equal sign (=). This configurati", + "product_code":"", + "title":"Configuring the Drop Partition Command to Support Batch Deletion", + "uri":"mrs_01_24745.html", + "doc_type":"", + "p_code":"827", + "code":"854" + }, + { + "desc":"This section applies only to MRS 3.2.0 or later.You can configure the following parameters to execute custom code when Executor exits.Configure the following parameters i", + "product_code":"", + "title":"Enabling an Executor to Execute Custom Code When Exiting", + "uri":"mrs_01_24805.html", + "doc_type":"", + "p_code":"827", + "code":"855" }, { "desc":"When Ranger is used as the permission management service of Spark SQL, the certificate in the cluster is required for accessing RangerAdmin. If you use a third-party JDK ", "product_code":"mrs", "title":"Adapting to the Third-party JDK When Ranger Is Used", "uri":"mrs_01_2317.html", - "doc_type":"cmpntguide-lts", - "p_code":"703", - "code":"742" + "doc_type":"usermanual", + "p_code":"815", + "code":"856" }, { "desc":"Log paths:Executor run log: ${BIGDATA_DATA_HOME}/hadoop/data${i}/nm/containerlogs/application_${appid}/container_{$contid}The logs of running tasks are stored in the prec", "product_code":"mrs", "title":"Spark2x Logs", "uri":"mrs_01_1971.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"743" + "doc_type":"usermanual", + "p_code":"814", + "code":"857" }, { "desc":"Container logs of running Spark applications are distributed on multiple nodes. This section describes how to quickly obtain container logs.You can run the yarn logs comm", "product_code":"mrs", "title":"Obtaining Container Logs of a Running Spark Application", "uri":"mrs_01_1972.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"744" + "doc_type":"usermanual", + "p_code":"814", + "code":"858" }, { "desc":"In a large-scale Hadoop production cluster, HDFS metadata is stored in the NameNode memory, and the cluster scale is restricted by the memory limitation of each NameNode.", "product_code":"mrs", "title":"Small File Combination Tools", "uri":"mrs_01_1973.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"745" + "doc_type":"usermanual", + "p_code":"814", + "code":"859" }, { "desc":"The first query of CarbonData is slow, which may cause a delay for nodes that have high requirements on real-time performance.The tool provides the following functions:Pr", "product_code":"mrs", "title":"Using CarbonData for First Query", "uri":"mrs_01_2362.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"746" + "doc_type":"usermanual", + "p_code":"814", + "code":"860" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Spark2x Performance Tuning", "uri":"mrs_01_1974.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"747" + "doc_type":"usermanual", + "p_code":"814", + "code":"861" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Spark Core Tuning", "uri":"mrs_01_1975.html", - "doc_type":"cmpntguide-lts", - "p_code":"747", - "code":"748" + "doc_type":"usermanual", + "p_code":"861", + "code":"862" }, { "desc":"Spark supports the following types of serialization:JavaSerializerKryoSerializerData serialization affects the Spark application performance. In specific data format, Kry", "product_code":"mrs", "title":"Data Serialization", "uri":"mrs_01_1976.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"749" + "doc_type":"usermanual", + "p_code":"862", + "code":"863" }, { "desc":"Spark is a memory-based computing frame. If the memory is insufficient during computing, the Spark execution efficiency will be adversely affected. You can determine whet", "product_code":"mrs", "title":"Optimizing Memory Configuration", "uri":"mrs_01_1977.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"750" + "doc_type":"usermanual", + "p_code":"862", + "code":"864" }, { "desc":"The degree of parallelism (DOP) specifies the number of tasks to be executed concurrently. It determines the number of data blocks after the shuffle operation. Configure ", "product_code":"mrs", "title":"Setting the DOP", "uri":"mrs_01_1978.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"751" + "doc_type":"usermanual", + "p_code":"862", + "code":"865" }, { "desc":"Broadcast distributes data sets to each node. It allows data to be obtained locally when a data set is needed during a Spark task. If broadcast is not used, data serializ", "product_code":"mrs", "title":"Using Broadcast Variables", "uri":"mrs_01_1979.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"752" + "doc_type":"usermanual", + "p_code":"862", + "code":"866" }, { "desc":"When the Spark system runs applications that contain a shuffle process, an executor process also writes shuffle data and provides shuffle data for other executors in addi", "product_code":"mrs", "title":"Using the external shuffle service to improve performance", "uri":"mrs_01_1980.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"753" + "doc_type":"usermanual", + "p_code":"862", + "code":"867" }, { "desc":"Resources are a key factor that affects Spark execution efficiency. When a long-running service (such as the JDBCServer) is allocated with multiple executors without task", "product_code":"mrs", "title":"Configuring Dynamic Resource Scheduling in Yarn Mode", "uri":"mrs_01_1981.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"754" + "doc_type":"usermanual", + "p_code":"862", + "code":"868" }, { "desc":"There are three processes in Spark on Yarn mode: driver, ApplicationMaster, and executor. The Driver and Executor handle the scheduling and running of the task. The Appli", "product_code":"mrs", "title":"Configuring Process Parameters", "uri":"mrs_01_1982.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"755" + "doc_type":"usermanual", + "p_code":"862", + "code":"869" }, { "desc":"Optimal program structure helps increase execution efficiency. During application programming, avoid shuffle operations and combine narrow-dependency operations.This topi", "product_code":"mrs", "title":"Designing the Direction Acyclic Graph (DAG)", "uri":"mrs_01_1983.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"756" + "doc_type":"usermanual", + "p_code":"862", + "code":"870" }, { "desc":"If the overhead of each record is high, for example:Use mapPartitions to calculate data by partition.Use mapPartitions to flexibly operate data. For example, to calculate", "product_code":"mrs", "title":"Experience", "uri":"mrs_01_1984.html", - "doc_type":"cmpntguide-lts", - "p_code":"748", - "code":"757" + "doc_type":"usermanual", + "p_code":"862", + "code":"871" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Spark SQL and DataFrame Tuning", "uri":"mrs_01_1985.html", - "doc_type":"cmpntguide-lts", - "p_code":"747", - "code":"758" + "doc_type":"usermanual", + "p_code":"861", + "code":"872" }, { "desc":"When two tables are joined in Spark SQL, the broadcast function (see section \"Using Broadcast Variables\") can be used to broadcast tables to each node. This minimizes shu", "product_code":"mrs", "title":"Optimizing the Spark SQL Join Operation", "uri":"mrs_01_1986.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"759" + "doc_type":"usermanual", + "p_code":"872", + "code":"873" }, { "desc":"When multiple tables are joined in Spark SQL, skew occurs in join keys and the data volume in some Hash buckets is much higher than that in other buckets. As a result, so", "product_code":"mrs", "title":"Improving Spark SQL Calculation Performance Under Data Skew", "uri":"mrs_01_1987.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"760" + "doc_type":"usermanual", + "p_code":"872", + "code":"874" }, { "desc":"A Spark SQL table may have many small files (far smaller than an HDFS block), each of which maps to a partition on the Spark by default. In other words, each small file i", "product_code":"mrs", "title":"Optimizing Spark SQL Performance in the Small File Scenario", "uri":"mrs_01_1988.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"761" + "doc_type":"usermanual", + "p_code":"872", + "code":"875" }, { "desc":"The INSERT...SELECT operation needs to be optimized if any of the following conditions is true:Many small files need to be queried.A few large files need to be queried.Th", "product_code":"mrs", "title":"Optimizing the INSERT...SELECT Operation", "uri":"mrs_01_1989.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"762" + "doc_type":"usermanual", + "p_code":"872", + "code":"876" }, { "desc":"Multiple clients can be connected to JDBCServer at the same time. However, if the number of concurrent tasks is too large, the default configuration of JDBCServer must be", "product_code":"mrs", "title":"Multiple JDBC Clients Concurrently Connecting to JDBCServer", "uri":"mrs_01_1990.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"763" + "doc_type":"usermanual", + "p_code":"872", + "code":"877" }, { "desc":"When SparkSQL inserts data to dynamic partitioned tables, the more partitions there are, the more HDFS files a single task generates and the more memory metadata occupies", "product_code":"mrs", "title":"Optimizing Memory when Data Is Inserted into Dynamic Partitioned Tables", "uri":"mrs_01_1992.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"764" + "doc_type":"usermanual", + "p_code":"872", + "code":"878" }, { "desc":"A Spark SQL table may have many small files (far smaller than an HDFS block), each of which maps to a partition on the Spark by default. In other words, each small file i", "product_code":"mrs", "title":"Optimizing Small Files", "uri":"mrs_01_1995.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"765" + "doc_type":"usermanual", + "p_code":"872", + "code":"879" }, { "desc":"Spark SQL supports hash aggregate algorithm. Namely, use fast aggregate hashmap as cache to improve aggregate performance. The hashmap replaces the previous ColumnarBatch", "product_code":"mrs", "title":"Optimizing the Aggregate Algorithms", "uri":"mrs_01_1996.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"766" + "doc_type":"usermanual", + "p_code":"872", + "code":"880" }, { "desc":"Save the partition information about the datasource table to the Metastore and process partition information in the Metastore.Optimize the datasource tables, support synt", "product_code":"mrs", "title":"Optimizing Datasource Tables", "uri":"mrs_01_1997.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"767" + "doc_type":"usermanual", + "p_code":"872", + "code":"881" }, { "desc":"Spark SQL supports rule-based optimization by default. However, the rule-based optimization cannot ensure that Spark selects the optimal query plan. Cost-Based Optimizer ", "product_code":"mrs", "title":"Merging CBO", "uri":"mrs_01_1998.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"768" + "doc_type":"usermanual", + "p_code":"872", + "code":"882" }, { "desc":"This section describes how to enable or disable the query optimization for inter-source complex SQL.(Optional) Prepare for connecting to the MPPDB data source.If the data", "product_code":"mrs", "title":"Optimizing SQL Query of Data of Multiple Sources", "uri":"mrs_01_1999.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"769" + "doc_type":"usermanual", + "p_code":"872", + "code":"883" }, { "desc":"This section describes the optimization suggestions for SQL statements in multi-level nesting and hybrid join scenarios.The following provides an example of complex query", "product_code":"mrs", "title":"SQL Optimization for Multi-level Nesting and Hybrid Join", "uri":"mrs_01_2000.html", - "doc_type":"cmpntguide-lts", - "p_code":"758", - "code":"770" + "doc_type":"usermanual", + "p_code":"872", + "code":"884" }, { "desc":"Streaming is a mini-batch streaming processing framework that features second-level delay and high throughput. To optimize Streaming is to improve its throughput while ma", "product_code":"mrs", "title":"Spark Streaming Tuning", "uri":"mrs_01_2001.html", - "doc_type":"cmpntguide-lts", - "p_code":"747", - "code":"771" + "doc_type":"usermanual", + "p_code":"861", + "code":"885" }, { "desc":"In the scenario where a small number of requests are frequently sent from Spark on OBS to OBS, you can disable OBS monitoring to improve performance.Modify the configurat", "product_code":"mrs", "title":"Spark on OBS Tuning", "uri":"mrs_01_24056.html", - "doc_type":"cmpntguide-lts", - "p_code":"747", - "code":"772" + "doc_type":"usermanual", + "p_code":"861", + "code":"886" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Spark2x", "uri":"mrs_01_2002.html", - "doc_type":"cmpntguide-lts", - "p_code":"702", - "code":"773" + "doc_type":"usermanual", + "p_code":"814", + "code":"887" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Spark Core", "uri":"mrs_01_2003.html", - "doc_type":"cmpntguide-lts", - "p_code":"773", - "code":"774" + "doc_type":"usermanual", + "p_code":"887", + "code":"888" }, { "desc":"How do I view the aggregated container logs on the page when the log aggregation function is enabled on YARN?For details, see Viewing Aggregated Container Logs on the Web", "product_code":"mrs", "title":"How Do I View Aggregated Spark Application Logs?", "uri":"mrs_01_2004.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"775" + "doc_type":"usermanual", + "p_code":"888", + "code":"889" }, { "desc":"Communication between ApplicationMaster and ResourceManager remains abnormal for a long time. Why is the driver return code inconsistent with application status on Resour", "product_code":"mrs", "title":"Why Is the Return Code of Driver Inconsistent with Application State Displayed on ResourceManager WebUI?", "uri":"mrs_01_2005.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"776" + "doc_type":"usermanual", + "p_code":"888", + "code":"890" }, { "desc":"Why cannot exit the Driver process after running the yarn application -kill applicationID command to stop the Spark Streaming application?Running the yarn application -ki", "product_code":"mrs", "title":"Why Cannot Exit the Driver Process?", "uri":"mrs_01_2006.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"777" + "doc_type":"usermanual", + "p_code":"888", + "code":"891" }, { "desc":"On a large cluster of 380 nodes, run the ScalaSort test case in the HiBench test that runs the 29T data, and configure Executor as --executor-cores 4. The following abnor", "product_code":"mrs", "title":"Why Does FetchFailedException Occur When the Network Connection Is Timed out", "uri":"mrs_01_2007.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"778" + "doc_type":"usermanual", + "p_code":"888", + "code":"892" }, { "desc":"How to configure the event queue size if the following Driver log information is displayed indicating that the event queue overflows?Common applicationsDropping SparkList", "product_code":"mrs", "title":"How to Configure Event Queue Size If Event Queue Overflows?", "uri":"mrs_01_2008.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"779" + "doc_type":"usermanual", + "p_code":"888", + "code":"893" }, { "desc":"During Spark application execution, if the driver fails to connect to ResourceManager, the following error is reported and it does not exit for a long time. What can I do", "product_code":"mrs", "title":"What Can I Do If the getApplicationReport Exception Is Recorded in Logs During Spark Application Execution and the Application Does Not Exit for a Long Time?", "uri":"mrs_01_2009.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"780" + "doc_type":"usermanual", + "p_code":"888", + "code":"894" }, { "desc":"When Spark executes an application, an error similar to the following is reported and the application ends. What can I do?Symptom: The value of spark.rpc.io.connectionTim", "product_code":"mrs", "title":"What Can I Do If \"Connection to ip:port has been quiet for xxx ms while there are outstanding requests\" Is Reported When Spark Executes an Application and the Application Ends?", "uri":"mrs_01_2010.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"781" + "doc_type":"usermanual", + "p_code":"888", + "code":"895" }, { "desc":"If the NodeManager is shut down with the Executor dynamic allocation enabled, the Executors on the node where the NodeManeger is shut down fail to be removed from the dri", "product_code":"mrs", "title":"Why Do Executors Fail to be Removed After the NodeManeger Is Shut Down?", "uri":"mrs_01_2011.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"782" + "doc_type":"usermanual", + "p_code":"888", + "code":"896" }, { "desc":"ExternalShuffle is enabled for the application that runs Spark. Task loss occurs in the application because the message \"java.lang.NullPointerException: Password cannot b", "product_code":"mrs", "title":"What Can I Do If the Message \"Password cannot be null if SASL is enabled\" Is Displayed?", "uri":"mrs_01_2012.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"783" + "doc_type":"usermanual", + "p_code":"888", + "code":"897" }, { "desc":"When inserting data into the dynamic partition table, a large number of shuffle files are damaged due to the disk disconnection, node error, and the like. In this case, w", "product_code":"mrs", "title":"What Should I Do If the Message \"Failed to CREATE_FILE\" Is Displayed in the Restarted Tasks When Data Is Inserted Into the Dynamic Partition Table?", "uri":"mrs_01_2013.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"784" + "doc_type":"usermanual", + "p_code":"888", + "code":"898" }, { "desc":"When Hash shuffle is used to run a job that consists of 1000000 map tasks x 100000 reduce tasks, run logs report many message failures and Executor heartbeat timeout, lea", "product_code":"mrs", "title":"Why Tasks Fail When Hash Shuffle Is Used?", "uri":"mrs_01_2014.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"785" + "doc_type":"usermanual", + "p_code":"888", + "code":"899" }, { "desc":"When the http(s)://: mode is used to access the Spark JobHistory page, if the displayed Spark JobHistory page is not the page of FusionInsight Manag", "product_code":"mrs", "title":"What Can I Do If the Error Message \"DNS query failed\" Is Displayed When I Access the Aggregated Logs Page of Spark Applications?", "uri":"mrs_01_2015.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"786" + "doc_type":"usermanual", + "p_code":"888", + "code":"900" }, { "desc":"When I execute a 100 TB TPC-DS test suite in the JDBCServer mode, the \"Timeout waiting for task\" is displayed. As a result, shuffle fetch fails, the stage keeps retrying,", "product_code":"mrs", "title":"What Can I Do If Shuffle Fetch Fails Due to the \"Timeout Waiting for Task\" Exception?", "uri":"mrs_01_2016.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"787" + "doc_type":"usermanual", + "p_code":"888", + "code":"901" }, { "desc":"When I run Spark tasks with a large data volume, for example, 100 TB TPCDS test suite, why does the Stage retry due to Executor loss sometimes? The message \"Executor 532 ", "product_code":"mrs", "title":"Why Does the Stage Retry due to the Crash of the Executor?", "uri":"mrs_01_2017.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"788" + "doc_type":"usermanual", + "p_code":"888", + "code":"902" }, { "desc":"When more than 50 terabytes of data is shuffled, some executors fail to register shuffle services due to timeout. The shuffle tasks then fail. Why? The error log is as fo", "product_code":"mrs", "title":"Why Do the Executors Fail to Register Shuffle Services During the Shuffle of a Large Amount of Data?", "uri":"mrs_01_2018.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"789" + "doc_type":"usermanual", + "p_code":"888", + "code":"903" }, { "desc":"During the execution of Spark applications, if the YARN External Shuffle service is enabled and there are too many shuffle tasks, the java.lang.OutofMemoryError: Direct b", "product_code":"mrs", "title":"Why Does the Out of Memory Error Occur in NodeManager During the Execution of Spark Applications", "uri":"mrs_01_2019.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"790" + "doc_type":"usermanual", + "p_code":"888", + "code":"904" }, { "desc":"Execution of the sparkbench task (for example, Wordcount) of HiBench6 fails. The bench.log indicates that the Yarn task fails to be executed. The failure information disp", "product_code":"mrs", "title":"Why Does the Realm Information Fail to Be Obtained When SparkBench is Run on HiBench for the Cluster in Security Mode?", "uri":"mrs_01_2021.html", - "doc_type":"cmpntguide-lts", - "p_code":"774", - "code":"791" + "doc_type":"usermanual", + "p_code":"888", + "code":"905" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Spark SQL and DataFrame", "uri":"mrs_01_2022.html", - "doc_type":"cmpntguide-lts", - "p_code":"773", - "code":"792" + "doc_type":"usermanual", + "p_code":"887", + "code":"906" }, { "desc":"Suppose that there is a table src(d1, d2, m) with the following data:The results for statement \"select d1, sum(d1) from src group by d1, d2 with rollup\" are shown as belo", "product_code":"mrs", "title":"What Do I have to Note When Using Spark SQL ROLLUP and CUBE?", "uri":"mrs_01_2023.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"793" + "doc_type":"usermanual", + "p_code":"906", + "code":"907" }, { "desc":"Why temporary tables of the previous database are displayed after the database is switched?Create a temporary DataSource table, for example:create temporary table ds_parq", "product_code":"mrs", "title":"Why Spark SQL Is Displayed as a Temporary Table in Different Databases?", "uri":"mrs_01_2024.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"794" + "doc_type":"usermanual", + "p_code":"906", + "code":"908" }, { "desc":"Is it possible to assign parameter values through Spark commands, in addition to through a user interface or a configuration file?Spark configuration options can be defin", "product_code":"mrs", "title":"How to Assign a Parameter Value in a Spark Command?", "uri":"mrs_01_2025.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"795" + "doc_type":"usermanual", + "p_code":"906", + "code":"909" }, { "desc":"The following error information is displayed when a new user creates a table using SparkSQL:When you create a table using Spark SQL, the interface of Hive is called by th", "product_code":"mrs", "title":"What Directory Permissions Do I Need to Create a Table Using SparkSQL?", "uri":"mrs_01_2026.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"796" + "doc_type":"usermanual", + "p_code":"906", + "code":"910" }, { "desc":"Why do I fail to delete the UDF using another service, for example, delete the UDF created by Hive using Spark SQL.The UDF can be created using any of the following servi", "product_code":"mrs", "title":"Why Do I Fail to Delete the UDF Using Another Service?", "uri":"mrs_01_2027.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"797" + "doc_type":"usermanual", + "p_code":"906", + "code":"911" }, { "desc":"Why cannot I query newly inserted data in a parquet Hive table using SparkSQL? This problem occurs in the following scenarios:For partitioned tables and non-partitioned t", "product_code":"mrs", "title":"Why Cannot I Query Newly Inserted Data in a Parquet Hive Table Using SparkSQL?", "uri":"mrs_01_2028.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"798" + "doc_type":"usermanual", + "p_code":"906", + "code":"912" }, { "desc":"What is cache table used for? Which point should I pay attention to while using cache table?Spark SQL caches tables into memory so that data can be directly read from mem", "product_code":"mrs", "title":"How to Use Cache Table?", "uri":"mrs_01_2029.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"799" + "doc_type":"usermanual", + "p_code":"906", + "code":"913" }, { "desc":"During the repartition operation, the number of blocks (spark.sql.shuffle.partitions) is set to 4,500, and the number of keys used by repartition exceeds 4,000. It is exp", "product_code":"mrs", "title":"Why Are Some Partitions Empty During Repartition?", "uri":"mrs_01_2030.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"800" + "doc_type":"usermanual", + "p_code":"906", + "code":"914" }, { "desc":"When the default configuration is used, 16 terabytes of text data fails to be converted into 4 terabytes of parquet data, and the error information below is displayed. Wh", "product_code":"mrs", "title":"Why Does 16 Terabytes of Text Data Fails to Be Converted into 4 Terabytes of Parquet Data?", "uri":"mrs_01_2031.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"801" + "doc_type":"usermanual", + "p_code":"906", + "code":"915" }, { "desc":"When the table name is set to table, why the error information similar to the following is displayed after the drop table table command or other command is run?The word t", "product_code":"mrs", "title":"Why the Operation Fails When the Table Name Is TABLE?", "uri":"mrs_01_2033.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"802" + "doc_type":"usermanual", + "p_code":"906", + "code":"916" }, { "desc":"When the analyze table statement is executed using spark-sql, the task is suspended and the information below is displayed. Why?When the statement is executed, the SQL st", "product_code":"mrs", "title":"Why Is a Task Suspended When the ANALYZE TABLE Statement Is Executed and Resources Are Insufficient?", "uri":"mrs_01_2034.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"803" + "doc_type":"usermanual", + "p_code":"906", + "code":"917" }, { "desc":"If I access a parquet table on which I do not have permission, why a job is run before \"Missing Privileges\" is displayed?The execution sequence of Spark SQL statement par", "product_code":"mrs", "title":"If I Access a parquet Table on Which I Do not Have Permission, Why a Job Is Run Before \"Missing Privileges\" Is Displayed?", "uri":"mrs_01_2035.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"804" + "doc_type":"usermanual", + "p_code":"906", + "code":"918" }, { "desc":"When do I fail to modify the metadata in the datasource and Spark on HBase table by running the Hive command?The current Spark version does not support modifying the meta", "product_code":"mrs", "title":"Why Do I Fail to Modify MetaData by Running the Hive Command?", "uri":"mrs_01_2036.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"805" + "doc_type":"usermanual", + "p_code":"906", + "code":"919" }, { "desc":"After successfully running Spark tasks with large data volume, for example, 2-TB TPCDS test suite, why is the abnormal stack information \"RejectedExecutionException\" disp", "product_code":"mrs", "title":"Why Is \"RejectedExecutionException\" Displayed When I Exit Spark SQL?", "uri":"mrs_01_2037.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"806" + "doc_type":"usermanual", + "p_code":"906", + "code":"920" }, { "desc":"During a health check, if the concurrent statements exceed the threshold of the thread pool, the health check statements fail to be executed, the health check program tim", "product_code":"mrs", "title":"What Should I Do If the JDBCServer Process is Mistakenly Killed During a Health Check?", "uri":"mrs_01_2038.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"807" + "doc_type":"usermanual", + "p_code":"906", + "code":"921" }, { "desc":"Why no result is found when 2016-6-30 is set in the date field as the filter condition?As shown in the following figure, trx_dte_par in the select count (*) from trxfintr", "product_code":"mrs", "title":"Why No Result Is found When 2016-6-30 Is Set in the Date Field as the Filter Condition?", "uri":"mrs_01_2039.html", - "doc_type":"cmpntguide-lts", - "p_code":"792", - "code":"808" + "doc_type":"usermanual", + "p_code":"906", + "code":"922" }, { "desc":"Why does the --hivevaroption I specified in the command for starting spark-beeline fail to take effect?In the V100R002C60 version, if I use the --hivevar = Service > Tez > Configuration > All Configurations. Enter a parameter name in the search box.", "product_code":"mrs", "title":"Common Tez Parameters", "uri":"mrs_01_2069.html", - "doc_type":"cmpntguide-lts", - "p_code":"833", - "code":"834" + "doc_type":"usermanual", + "p_code":"948", + "code":"949" }, { "desc":"Tez displays the Tez task execution process on a GUI. You can view the task execution details on the GUI.The TimelineServer instance of the Yarn service has been installe", "product_code":"mrs", "title":"Accessing TezUI", "uri":"mrs_01_2070.html", - "doc_type":"cmpntguide-lts", - "p_code":"833", - "code":"835" + "doc_type":"usermanual", + "p_code":"948", + "code":"950" }, { "desc":"Log path: The default save path of Tez logs is /var/log/Bigdata/tez/role name.TezUI: /var/log/Bigdata/tez/tezui (run logs) and /var/log/Bigdata/audit/tez/tezui (audit log", "product_code":"mrs", "title":"Log Overview", "uri":"mrs_01_2071.html", - "doc_type":"cmpntguide-lts", - "p_code":"833", - "code":"836" + "doc_type":"usermanual", + "p_code":"948", + "code":"951" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues", "uri":"mrs_01_2072.html", - "doc_type":"cmpntguide-lts", - "p_code":"833", - "code":"837" + "doc_type":"usermanual", + "p_code":"948", + "code":"952" }, { "desc":"After a user logs in to Manager and switches to the Tez web UI, the submitted Tez tasks are not displayed.The Tez task data displayed on the Tez WebUI requires the suppor", "product_code":"mrs", "title":"TezUI Cannot Display Tez Task Execution Details", "uri":"mrs_01_2073.html", - "doc_type":"cmpntguide-lts", - "p_code":"837", - "code":"838" + "doc_type":"usermanual", + "p_code":"952", + "code":"953" }, { "desc":"When a user logs in to Manager and switches to the Tez web UI, error 404 or 503 is displayed.The Tez web UI depends on the TimelineServer instance of Yarn. Therefore, Tim", "product_code":"mrs", "title":"Error Occurs When a User Switches to the Tez Web UI", "uri":"mrs_01_2074.html", - "doc_type":"cmpntguide-lts", - "p_code":"837", - "code":"839" + "doc_type":"usermanual", + "p_code":"952", + "code":"954" }, { "desc":"A user logs in to the Tez web UI and clicks Logs, but the Yarn log page fails to be displayed and data cannot be loaded.Currently, the hostname is used for the access to ", "product_code":"mrs", "title":"Yarn Logs Cannot Be Viewed on the TezUI Page", "uri":"mrs_01_2075.html", - "doc_type":"cmpntguide-lts", - "p_code":"837", - "code":"840" + "doc_type":"usermanual", + "p_code":"952", + "code":"955" }, { "desc":"A user logs in to Manager and switches to the Tez web UI page, but no data for the submitted task is displayed on the Hive Queries page.To display Hive Queries task data ", "product_code":"mrs", "title":"Table Data Is Empty on the TezUI HiveQueries Page", "uri":"mrs_01_2076.html", - "doc_type":"cmpntguide-lts", - "p_code":"837", - "code":"841" + "doc_type":"usermanual", + "p_code":"952", + "code":"956" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using Yarn", "uri":"mrs_01_0851.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"842" + "code":"957" }, { "desc":"The Yarn service provides one queue (default) for users. Users allocate system resources to each queue. After the configuration is complete, you can click Refresh Queue o", "product_code":"mrs", "title":"Common Yarn Parameters", "uri":"mrs_01_0852.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"843" + "doc_type":"usermanual", + "p_code":"957", + "code":"958" }, { "desc":"This section describes how to create and configure a Yarn role. The Yarn role can be assigned with Yarn administrator permission and manage Yarn queue resources.If the cu", "product_code":"mrs", "title":"Creating Yarn Roles", "uri":"mrs_01_0853.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"844" + "doc_type":"usermanual", + "p_code":"957", + "code":"959" }, { "desc":"This section guides users to use a Yarn client in an O&M or service scenario.The client has been installed.For example, the installation directory is /opt/hadoopclient. T", "product_code":"mrs", "title":"Using the Yarn Client", "uri":"mrs_01_0854.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"845" + "doc_type":"usermanual", + "p_code":"957", + "code":"960" }, { "desc":"If the hardware resources (such as the number of CPU cores and memory size) of the nodes for deploying NodeManagers are different but the NodeManager available hardware r", "product_code":"mrs", "title":"Configuring Resources for a NodeManager Role Instance", "uri":"mrs_01_0855.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"846" + "doc_type":"usermanual", + "p_code":"957", + "code":"961" }, { "desc":"If the storage directories defined by the Yarn NodeManager are incorrect or the Yarn storage plan changes, the system administrator needs to modify the NodeManager storag", "product_code":"mrs", "title":"Changing NodeManager Storage Directories", "uri":"mrs_01_0856.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"847" + "doc_type":"usermanual", + "p_code":"957", + "code":"962" }, { "desc":"In the multi-tenant scenario in security mode, a cluster can be used by multiple users, and tasks of multiple users can be submitted and executed. Users are invisible to ", "product_code":"mrs", "title":"Configuring Strict Permission Control for Yarn", "uri":"mrs_01_0857.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"848" + "doc_type":"usermanual", + "p_code":"957", + "code":"963" }, { "desc":"Yarn provides the container log aggregation function to collect logs generated by containers on each node to HDFS to release local disk space. You can collect logs in eit", "product_code":"mrs", "title":"Configuring Container Log Aggregation", "uri":"mrs_01_0858.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"849" + "doc_type":"usermanual", + "p_code":"957", + "code":"964" }, { "desc":"CGroups is a Linux kernel feature. In YARN this feature allows containers to be limited in their resource usage (example, CPU usage). Without CGroups, it is hard to limit", "product_code":"mrs", "title":"Using CGroups with YARN", "uri":"mrs_01_0859.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"850" + "doc_type":"usermanual", + "p_code":"957", + "code":"965" }, { "desc":"When resources are insufficient or ApplicationMaster fails to start, a client probably encounters running errors.Go to the All Configurations page of Yarn and enter a par", "product_code":"mrs", "title":"Configuring the Number of ApplicationMaster Retries", "uri":"mrs_01_0860.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"851" + "doc_type":"usermanual", + "p_code":"957", + "code":"966" }, { "desc":"During the process of starting the configuration, when the ApplicationMaster creates a container, the allocated memory is automatically adjusted according to the total nu", "product_code":"mrs", "title":"Configure the ApplicationMaster to Automatically Adjust the Allocated Memory", "uri":"mrs_01_0861.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"852" + "doc_type":"usermanual", + "p_code":"957", + "code":"967" }, { "desc":"The value of the yarn.http.policy parameter must be consistent on both the server and clients. Web UIs on clients will be garbled if an inconsistency exists, for example,", "product_code":"mrs", "title":"Configuring the Access Channel Protocol", "uri":"mrs_01_0862.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"853" + "doc_type":"usermanual", + "p_code":"957", + "code":"968" }, { "desc":"If memory usage of the submitted application cannot be estimated, you can modify the configuration on the server to determine whether to check the memory usage.If the mem", "product_code":"mrs", "title":"Configuring Memory Usage Detection", "uri":"mrs_01_0863.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"854" + "doc_type":"usermanual", + "p_code":"957", + "code":"969" }, { "desc":"If the custom scheduler is set in ResourceManager, you can set the corresponding web page and other Web applications for the custom scheduler.Go to the All Configurations", "product_code":"mrs", "title":"Configuring the Additional Scheduler WebUI", "uri":"mrs_01_0864.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"855" + "doc_type":"usermanual", + "p_code":"957", + "code":"970" }, { "desc":"The Yarn Restart feature includes ResourceManager Restart and NodeManager Restart.When ResourceManager Restart is enabled, the new active ResourceManager node loads the i", "product_code":"mrs", "title":"Configuring Yarn Restart", "uri":"mrs_01_0865.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"856" + "doc_type":"usermanual", + "p_code":"957", + "code":"971" }, { "desc":"In YARN, ApplicationMasters run on NodeManagers just like every other container (ignoring unmanaged ApplicationMasters in this context). ApplicationMasters may break down", "product_code":"mrs", "title":"Configuring ApplicationMaster Work Preserving", "uri":"mrs_01_0866.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"857" + "doc_type":"usermanual", + "p_code":"957", + "code":"972" }, { "desc":"The default log level of localized container is INFO. You can change the log level by configuring yarn.nodemanager.container-localizer.java.opts.On Manager, choose Cluste", "product_code":"mrs", "title":"Configuring the Localized Log Levels", "uri":"mrs_01_0867.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"858" + "doc_type":"usermanual", + "p_code":"957", + "code":"973" }, { "desc":"Currently, YARN allows the user that starts the NodeManager to run the task submitted by all other users, or the users to run the task submitted by themselves.On Manager,", "product_code":"mrs", "title":"Configuring Users That Run Tasks", "uri":"mrs_01_0868.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"859" + "doc_type":"usermanual", + "p_code":"957", + "code":"974" + }, + { + "desc":"As a role of the Yarn service, TimelineServer supports the HA mode since the current version. To prevent a single point of failure of TimelineServer, you can enable Timel", + "product_code":"", + "title":"Configuring HA for TimelineServer", + "uri":"mrs_01_24814.html", + "doc_type":"", + "p_code":"957", + "code":"975" }, { "desc":"The default paths for saving Yarn logs are as follows:ResourceManager: /var/log/Bigdata/yarn/rm (run logs) and /var/log/Bigdata/audit/yarn/rm (audit logs)NodeManager: /va", "product_code":"mrs", "title":"Yarn Log Overview", "uri":"mrs_01_0870.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"860" + "doc_type":"usermanual", + "p_code":"957", + "code":"976" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Yarn Performance Tuning", "uri":"mrs_01_0871.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"861" + "doc_type":"usermanual", + "p_code":"957", + "code":"977" }, { "desc":"The capacity scheduler of ResourceManager implements job preemption to simplify job running in queues and improve resource utilization. The process is as follows:Assume t", "product_code":"mrs", "title":"Preempting a Task", "uri":"mrs_01_0872.html", - "doc_type":"cmpntguide-lts", - "p_code":"861", - "code":"862" + "doc_type":"usermanual", + "p_code":"977", + "code":"978" }, { "desc":"The resource contention scenarios of a cluster are as follows:Submit two jobs (Job 1 and Job 2) with lower priorities.Some tasks of running Job 1 and Job 2 are in the run", "product_code":"mrs", "title":"Setting the Task Priority", "uri":"mrs_01_0873.html", - "doc_type":"cmpntguide-lts", - "p_code":"861", - "code":"863" + "doc_type":"usermanual", + "p_code":"977", + "code":"979" }, { "desc":"After the scheduler of a big data cluster is properly configured, you can adjust the available memory, CPU resources, and local disk of each node to optimize the performa", "product_code":"mrs", "title":"Optimizing Node Configuration", "uri":"mrs_01_0874.html", - "doc_type":"cmpntguide-lts", - "p_code":"861", - "code":"864" + "doc_type":"usermanual", + "p_code":"977", + "code":"980" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About Yarn", "uri":"mrs_01_2077.html", - "doc_type":"cmpntguide-lts", - "p_code":"842", - "code":"865" + "doc_type":"usermanual", + "p_code":"957", + "code":"981" }, { "desc":"Why mounted directory for Container is not cleared after the completion of the job while using CGroups?The mounted path for the Container should be cleared even if job is", "product_code":"mrs", "title":"Why Mounted Directory for Container is Not Cleared After the Completion of the Job While Using CGroups?", "uri":"mrs_01_2078.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"866" + "doc_type":"usermanual", + "p_code":"981", + "code":"982" }, { "desc":"Why is the HDFS_DELEGATION_TOKEN expired exception reported when a job fails in security mode?HDFS_DELEGATION_TOKEN expires because the token is not updated or it is acce", "product_code":"mrs", "title":"Why the Job Fails with HDFS_DELEGATION_TOKEN Expired Exception?", "uri":"mrs_01_2079.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"867" + "doc_type":"usermanual", + "p_code":"981", + "code":"983" }, { "desc":"If Yarn is restarted in either of the following scenarios, local logs will not be deleted as scheduled and will be retained permanently:When Yarn is restarted during task", "product_code":"mrs", "title":"Why Are Local Logs Not Deleted After YARN Is Restarted?", "uri":"mrs_01_2080.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"868" + "doc_type":"usermanual", + "p_code":"981", + "code":"984" }, { "desc":"Why the task does not fail even though AppAttempts restarts due to failure for more than two times?During the task execution process, if the ContainerExitStatus returns v", "product_code":"mrs", "title":"Why the Task Does Not Fail Even Though AppAttempts Restarts for More Than Two Times?", "uri":"mrs_01_2081.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"869" + "doc_type":"usermanual", + "p_code":"981", + "code":"985" }, { "desc":"After I moved an application from one queue to another, why is it moved back to the original queue after ResourceManager restarts?This problem is caused by the constraint", "product_code":"mrs", "title":"Why Is an Application Moved Back to the Original Queue After ResourceManager Restarts?", "uri":"mrs_01_2082.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"870" + "doc_type":"usermanual", + "p_code":"981", + "code":"986" }, { "desc":"Why does Yarn not release the blacklist even all nodes are added to the blacklist?In Yarn, when the number of application nodes added to the blacklist by ApplicationMaste", "product_code":"mrs", "title":"Why Does Yarn Not Release the Blacklist Even All Nodes Are Added to the Blacklist?", "uri":"mrs_01_2083.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"871" + "doc_type":"usermanual", + "p_code":"981", + "code":"987" }, { "desc":"The switchover of ResourceManager occurs continuously when multiple, for example 2,000, tasks are running concurrently, causing the Yarn service unavailable.The cause is ", "product_code":"mrs", "title":"Why Does the Switchover of ResourceManager Occur Continuously?", "uri":"mrs_01_2084.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"872" + "doc_type":"usermanual", + "p_code":"981", + "code":"988" }, { "desc":"Why does a new application fail if a NodeManager has been in unhealthy status for 10 minutes?When nodeSelectPolicy is set to SEQUENCE and the first NodeManager connected ", "product_code":"mrs", "title":"Why Does a New Application Fail If a NodeManager Has Been in Unhealthy Status for 10 Minutes?", "uri":"mrs_01_2085.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"873" + "doc_type":"usermanual", + "p_code":"981", + "code":"989" }, { "desc":"If a user belongs to multiple user groups with different default queue configurations, which queue will be selected as the default queue when an application is submitted?", "product_code":"mrs", "title":"What Is the Queue Replacement Policy?", "uri":"mrs_01_2086.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"874" + "doc_type":"usermanual", + "p_code":"981", + "code":"990" }, { "desc":"Why does an error occur when I query the applicationID of a completed or non-existing application using the RESTful APIs?The Superior scheduler only stores the applicatio", "product_code":"mrs", "title":"Why Does an Error Occur When I Query the ApplicationID of a Completed or Non-existing Application Using the RESTful APIs?", "uri":"mrs_01_2087.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"875" + "doc_type":"usermanual", + "p_code":"981", + "code":"991" }, { "desc":"In Superior scheduling mode, if a single NodeManager is faulty, why may the MapReduce tasks fail?In normal cases, when the attempt of a single task of an application fail", "product_code":"mrs", "title":"Why May A Single NodeManager Fault Cause MapReduce Task Failures in the Superior Scheduling Mode?", "uri":"mrs_01_2088.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"876" + "doc_type":"usermanual", + "p_code":"981", + "code":"992" }, { "desc":"When a queue is deleted when there are applications running in it, these applications are moved to the \"lost_and_found\" queue. When these applications are moved back to a", "product_code":"mrs", "title":"Why Are Applications Suspended After They Are Moved From Lost_and_Found Queue to Another Queue?", "uri":"mrs_01_2089.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"877" + "doc_type":"usermanual", + "p_code":"981", + "code":"993" }, { "desc":"How do I limit the size of application diagnostic messages stored in the ZKstore?In some cases, it has been observed that diagnostic messages may grow infinitely. Because", "product_code":"mrs", "title":"How Do I Limit the Size of Application Diagnostic Messages Stored in the ZKstore?", "uri":"mrs_01_2090.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"878" + "doc_type":"usermanual", + "p_code":"981", + "code":"994" }, { "desc":"Why does a MapReduce job fail to run when a non-ViewFS file system is configured as ViewFS?When a non-ViewFS file system is configured as a ViewFS using cluster, the user", "product_code":"mrs", "title":"Why Does a MapReduce Job Fail to Run When a Non-ViewFS File System Is Configured as ViewFS?", "uri":"mrs_01_2091.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"879" + "doc_type":"usermanual", + "p_code":"981", + "code":"995" }, { "desc":"After the Native Task feature is enabled, Reduce tasks fail to run in some OSs.When -Dmapreduce.job.map.output.collector.class=org.apache.hadoop.mapred.nativetask.NativeM", "product_code":"mrs", "title":"Why Do Reduce Tasks Fail to Run in Some OSs After the Native Task Feature is Enabled?", "uri":"mrs_01_24051.html", - "doc_type":"cmpntguide-lts", - "p_code":"865", - "code":"880" + "doc_type":"usermanual", + "p_code":"981", + "code":"996" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using ZooKeeper", "uri":"mrs_01_2092.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"881" + "code":"997" }, { "desc":"ZooKeeper is an open-source, highly reliable, and distributed consistency coordination service. ZooKeeper is designed to solve the problem that data consistency cannot be", "product_code":"mrs", "title":"Using ZooKeeper from Scratch", "uri":"mrs_01_2093.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"882" + "doc_type":"usermanual", + "p_code":"997", + "code":"998" }, { "desc":"Navigation path for setting parameters:Go to the All Configurations page of ZooKeeper by referring to Modifying Cluster Service Configuration Parameters. Enter a paramete", "product_code":"mrs", "title":"Common ZooKeeper Parameters", "uri":"mrs_01_2094.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"883" + "doc_type":"usermanual", + "p_code":"997", + "code":"999" }, { "desc":"Use a ZooKeeper client in an O&M scenario or service scenario.You have installed the client. For example, the installation directory is /opt/client. The client directory ", "product_code":"mrs", "title":"Using a ZooKeeper Client", "uri":"mrs_01_2095.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"884" + "doc_type":"usermanual", + "p_code":"997", + "code":"1000" }, { "desc":"Configure znode permission of ZooKeeper.ZooKeeper uses an access control list (ACL) to implement znode access control. The ZooKeeper client specifies a znode ACL, and the", "product_code":"mrs", "title":"Configuring the ZooKeeper Permissions", "uri":"mrs_01_2097.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"885" + "doc_type":"usermanual", + "p_code":"997", + "code":"1001" }, { "desc":"When the defined storage directory of ZooKeeper is incorrect, or when the storage plan of ZooKeeper is changed, log in to FusionInsight Manager to change the storage dire", "product_code":"mrs", "title":"Changing the ZooKeeper Storage Directory", "uri":"mrs_01_2096.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"886" + "doc_type":"usermanual", + "p_code":"997", + "code":"1002" }, { "desc":"ZooKeeper has maxClientCnxn configuration at the server side, and this configuration will verify the connections from each client IP address. But many clients can create ", "product_code":"mrs", "title":"Configuring the ZooKeeper Connection", "uri":"mrs_01_2098.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"887" + "doc_type":"usermanual", + "p_code":"997", + "code":"1003" }, { "desc":"The ZooKeeper client uses the FIFO queue to send a request to the server and waits for a response from the server. The client maintains the FIFO queue until it acknowledg", "product_code":"mrs", "title":"Configuring ZooKeeper Response Timeout Interval", "uri":"mrs_01_2099.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"888" + "doc_type":"usermanual", + "p_code":"997", + "code":"1004" }, { "desc":"To prevent multiple IP nodes, bind the current ZooKeeper client to any available IP address. The data flow layer, management layer, and other network layers in the produc", "product_code":"mrs", "title":"Binding the Client to an IP Address", "uri":"mrs_01_2100.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"889" + "doc_type":"usermanual", + "p_code":"997", + "code":"1005" }, { "desc":"When the ZooKeeper client is started, it is bound to a random port. In most cases, you want to bind the ZooKeeper client to a specific port. For example, for the client c", "product_code":"mrs", "title":"Configuring the Port Range Bound to the Client", "uri":"mrs_01_2101.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"890" + "doc_type":"usermanual", + "p_code":"997", + "code":"1006" }, { "desc":"Currently, ZooKeeper client properties can be configured only through Java system properties. Therefore, all clients in the same JVM have the same configuration. In some ", "product_code":"mrs", "title":"Performing Special Configuration on ZooKeeper Clients in the Same JVM", "uri":"mrs_01_2102.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"891" + "doc_type":"usermanual", + "p_code":"997", + "code":"1007" }, { "desc":"Set a quota for Znodes in ZooKeeper of a security cluster in O&M scenarios or service scenarios to restrict the quantity and byte space of Znodes and subnodes.Two modes a", "product_code":"mrs", "title":"Configuring a Quota for a Znode", "uri":"mrs_01_2104.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"892" + "doc_type":"usermanual", + "p_code":"997", + "code":"1008" }, { "desc":"Log path: /var/log/Bigdata/zookeeper/quorumpeer (Run log), /var/log/Bigdata/audit/zookeeper/quorumpeer (Audit log)Log archive rule: The automatic ZooKeeper log compressio", "product_code":"mrs", "title":"ZooKeeper Log Overview", "uri":"mrs_01_2106.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"893" + "doc_type":"usermanual", + "p_code":"997", + "code":"1009" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Common Issues About ZooKeeper", "uri":"mrs_01_2107.html", - "doc_type":"cmpntguide-lts", - "p_code":"881", - "code":"894" + "doc_type":"usermanual", + "p_code":"997", + "code":"1010" }, { "desc":"After a large number of znodes are created, ZooKeeper servers in the ZooKeeper cluster become faulty and cannot be automatically recovered or restarted.Logs of followers:", "product_code":"mrs", "title":"Why Do ZooKeeper Servers Fail to Start After Many znodes Are Created?", "uri":"mrs_01_2108.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"895" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1011" }, { "desc":"After a large number of znodes are created in a parent directory, the ZooKeeper client will fail to fetch all child nodes of this parent directory in a single request.Log", "product_code":"mrs", "title":"Why Does the ZooKeeper Server Display the java.io.IOException: Len Error Log?", "uri":"mrs_01_2109.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"896" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1012" }, { "desc":"Why four letter commands do not work with linux netcat command when secure netty configurations are enabled at Zookeeper server?For example,echo stat |netcat host portLin", "product_code":"mrs", "title":"Why Four Letter Commands Don't Work With Linux netcat Command When Secure Netty Configurations Are Enabled at Zookeeper Server?", "uri":"mrs_01_2110.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"897" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1013" }, { "desc":"How to check whether the role of a ZooKeeper instance is a leader or follower.Log in to Manager and choose Cluster > Name of the desired cluster > Service > ZooKeeper > I", "product_code":"mrs", "title":"How Do I Check Which ZooKeeper Instance Is a Leader?", "uri":"mrs_01_2111.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"898" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1014" }, { "desc":"When the IBM JDK is used, the client fails to connect to ZooKeeper.The possible cause is that the jaas.conf file format of the IBM JDK is different from that of the commo", "product_code":"mrs", "title":"Why Cannot the Client Connect to ZooKeeper using the IBM JDK?", "uri":"mrs_01_2112.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"899" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1015" }, { "desc":"The ZooKeeper client fails to refresh a TGT and therefore ZooKeeper cannot be accessed. The error message is as follows:ZooKeeper uses the system command kinit – R to ref", "product_code":"mrs", "title":"What Should I Do When the ZooKeeper Client Fails to Refresh a TGT?", "uri":"mrs_01_2113.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"900" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1016" }, { "desc":"When the client connects to a non-leader instance, run the deleteall command to delete a large number of znodes, the error message \"Node does not exist\" is displayed, but", "product_code":"mrs", "title":"Why Is Message \"Node does not exist\" Displayed when A Large Number of Znodes Are Deleted Using the deleteallCommand", "uri":"mrs_01_2114.html", - "doc_type":"cmpntguide-lts", - "p_code":"894", - "code":"901" + "doc_type":"usermanual", + "p_code":"1010", + "code":"1017" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Appendix", "uri":"mrs_01_2122.html", - "doc_type":"cmpntguide-lts", + "doc_type":"usermanual", "p_code":"", - "code":"902" + "code":"1018" }, { "desc":"Modify the configuration parameters of each service on FusionInsight Manager.The Basic Configuration tab page is displayed by default. To modify more parameters, click th", "product_code":"mrs", "title":"Modifying Cluster Service Configuration Parameters", "uri":"mrs_01_1293.html", - "doc_type":"cmpntguide-lts", - "p_code":"902", - "code":"903" + "doc_type":"usermanual", + "p_code":"1018", + "code":"1019" }, { "desc":"FusionInsight Manager is used to monitor, configure, and manage clusters. After the cluster is installed, you can use the account to log in to FusionInsight Manager.Curre", "product_code":"mrs", "title":"Accessing FusionInsight Manager", "uri":"mrs_01_2124.html", - "doc_type":"cmpntguide-lts", - "p_code":"902", - "code":"904" + "doc_type":"usermanual", + "p_code":"1018", + "code":"1020" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"mrs", "title":"Using an MRS Client", "uri":"mrs_01_0787.html", - "doc_type":"cmpntguide-lts", - "p_code":"902", - "code":"905" + "doc_type":"usermanual", + "p_code":"1018", + "code":"1021" }, { "desc":"Before using the client, you need to install the client. For example, the installation directory is /opt/hadoopclient.cd /opt/hadoopclientsource bigdata_envkinit MRS clus", "product_code":"mrs", "title":"Using an MRS Client on Nodes Inside a MRS Cluster", "uri":"mrs_01_0788.html", - "doc_type":"cmpntguide-lts", - "p_code":"905", - "code":"906" + "doc_type":"usermanual", + "p_code":"1021", + "code":"1022" }, { "desc":"After a client is installed, you can use the client on a node outside an MRS cluster.A Linux ECS has been prepared. For details about the OS and its version of the ECS, s", "product_code":"mrs", "title":"Using an MRS Client on Nodes Outside a MRS Cluster", "uri":"mrs_01_0800.html", - "doc_type":"cmpntguide-lts", - "p_code":"905", - "code":"907" + "doc_type":"usermanual", + "p_code":"1021", + "code":"1023" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"mrs", + "product_code":"", "title":"Change History", - "uri":"en-us_topic_0000001298722056.html", - "doc_type":"cmpntguide-lts", + "uri":"mrs_01_17512.html", + "doc_type":"", "p_code":"", - "code":"908" + "code":"1024" } ] \ No newline at end of file diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739896.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739896.png deleted file mode 100644 index a7386606..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739896.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739900.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739900.png deleted file mode 100644 index bcc25640..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295739900.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899860.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899860.png deleted file mode 100644 index e1ba5ae7..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899860.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899864.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899864.png deleted file mode 100644 index 1b7e7555..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001295899864.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296059704.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001296059704.png deleted file mode 100644 index f1197f6e..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296059704.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219332.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219332.png deleted file mode 100644 index 3b28ad5c..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219332.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219336.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219336.png deleted file mode 100644 index 49f804cc..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001296219336.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739725.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739725.png deleted file mode 100644 index 284cb796..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739725.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739857.gif b/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739857.gif deleted file mode 100644 index 1470c6be..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348739857.gif and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348740045.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001348740045.png index ae7f920b..c7456c89 100644 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001348740045.png and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001348740045.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059549.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059549.png deleted file mode 100644 index 0d135b40..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059549.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059937.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059937.png deleted file mode 100644 index 06737e13..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349059937.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139413.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139413.png deleted file mode 100644 index c84e94e0..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139413.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139417.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139417.png deleted file mode 100644 index 9632c842..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349139417.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259001.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259001.png deleted file mode 100644 index 6a06cad0..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259001.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259005.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259005.png deleted file mode 100644 index 934505b5..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259005.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259429.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259429.png deleted file mode 100644 index d40da22f..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001349259429.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001387905484.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001387905484.png deleted file mode 100644 index 72f53601..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001387905484.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001438431645.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001438431645.png deleted file mode 100644 index 1a7780b8..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001438431645.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001441091233.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001441091233.png deleted file mode 100644 index 7af9c595..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001441091233.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001441208981.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001441208981.png deleted file mode 100644 index 62de1804..00000000 Binary files a/docs/mrs/component-operation-guide-lts/en-us_image_0000001441208981.png and /dev/null differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001446755301.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001446755301.png new file mode 100644 index 00000000..527a0f21 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001446755301.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001446835121.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001446835121.png new file mode 100644 index 00000000..30e52c7a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001446835121.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472704.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472704.png new file mode 100644 index 00000000..cf4cf0f5 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472704.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472712.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472712.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472712.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472724.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472724.png new file mode 100644 index 00000000..702ac834 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472724.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472728.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472728.png new file mode 100644 index 00000000..0364571f Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472728.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472732.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472732.png new file mode 100644 index 00000000..019e2694 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472732.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472784.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472784.png new file mode 100644 index 00000000..99bd42ae Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532472784.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532503042.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532503042.png new file mode 100644 index 00000000..5b7b4014 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532503042.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532516862.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532516862.png new file mode 100644 index 00000000..29c9d740 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532516862.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532549720.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532549720.png new file mode 100644 index 00000000..c3f42839 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532549720.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632184.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632184.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632184.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632196.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632196.png new file mode 100644 index 00000000..b2f45086 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632196.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632200.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632200.png new file mode 100644 index 00000000..03413129 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632200.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632204.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632204.png new file mode 100644 index 00000000..274d9038 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632204.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632208.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632208.png new file mode 100644 index 00000000..61c3cf32 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632208.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632212.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632212.png new file mode 100644 index 00000000..00c0937a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532632212.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676350.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676350.png new file mode 100644 index 00000000..e3c61e74 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676350.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676354.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676354.png new file mode 100644 index 00000000..1d240edb Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532676354.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532677010.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532677010.png new file mode 100644 index 00000000..451fad43 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532677010.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532709204.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532709204.png new file mode 100644 index 00000000..2a9177d3 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532709204.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791924.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791924.png new file mode 100644 index 00000000..99ca361f Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791924.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791932.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791932.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791932.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791944.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791944.png new file mode 100644 index 00000000..a354bf3b Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791944.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791948.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791948.png new file mode 100644 index 00000000..340b198a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791948.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791952.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791952.png new file mode 100644 index 00000000..43499c8d Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791952.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791956.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791956.png new file mode 100644 index 00000000..fc1933db Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791956.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791960.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791960.png new file mode 100644 index 00000000..038267da Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532791960.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532792008.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532792008.png new file mode 100644 index 00000000..6b5650c4 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532792008.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836094.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836094.png new file mode 100644 index 00000000..6a9c4cf0 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836094.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836098.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836098.png new file mode 100644 index 00000000..60cc25a6 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532836098.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951860.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951860.png new file mode 100644 index 00000000..a354bf3b Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951860.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951868.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951868.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951868.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951876.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951876.png new file mode 100644 index 00000000..cb825bbb Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951876.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951880.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951880.png new file mode 100644 index 00000000..2ad46159 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951880.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951884.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951884.png new file mode 100644 index 00000000..138accf0 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951884.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951888.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951888.png new file mode 100644 index 00000000..d0163131 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951888.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951892.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951892.png new file mode 100644 index 00000000..46e9010d Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951892.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951928.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951928.png new file mode 100644 index 00000000..038267da Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951928.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951944.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951944.png new file mode 100644 index 00000000..23fd6103 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532951944.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001532996022.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532996022.png new file mode 100644 index 00000000..cb54d2d5 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001532996022.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533162146.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533162146.png new file mode 100644 index 00000000..88c4617d Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533162146.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533198872.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533198872.png new file mode 100644 index 00000000..44d0fa66 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533198872.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533358396.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533358396.png new file mode 100644 index 00000000..7f20a76d Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533358396.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533359808.jpg b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533359808.jpg new file mode 100644 index 00000000..ded48132 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533359808.jpg differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533481354.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533481354.png new file mode 100644 index 00000000..8506e84e Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533481354.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533544798.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533544798.png new file mode 100644 index 00000000..53611082 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533544798.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533639950.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533639950.png new file mode 100644 index 00000000..b267f515 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533639950.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533641294.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533641294.png new file mode 100644 index 00000000..e4334068 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533641294.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001533678044.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533678044.png new file mode 100644 index 00000000..23905ee2 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001533678044.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001536916934.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001536916934.png new file mode 100644 index 00000000..30e52c7a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001536916934.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001537076386.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537076386.png new file mode 100644 index 00000000..527a0f21 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537076386.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001537090654.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537090654.png new file mode 100644 index 00000000..8c2c79e4 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537090654.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001537269552.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537269552.png new file mode 100644 index 00000000..a0607273 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537269552.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001537413022.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537413022.png new file mode 100644 index 00000000..f8eebba8 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001537413022.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952073.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952073.png new file mode 100644 index 00000000..2931dd6a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952073.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952089.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952089.png new file mode 100644 index 00000000..17e58579 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952089.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952097.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952097.png new file mode 100644 index 00000000..8bb092e5 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952097.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952105.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952105.png new file mode 100644 index 00000000..bfe756b9 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952105.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952133.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952133.png new file mode 100644 index 00000000..00c0937a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952133.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952137.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952137.png new file mode 100644 index 00000000..0b1a69f4 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001582952137.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151841.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151841.png new file mode 100644 index 00000000..2ad46159 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151841.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151845.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151845.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151845.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151849.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151849.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151849.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151865.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151865.png new file mode 100644 index 00000000..67e41cac Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151865.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151869.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151869.png new file mode 100644 index 00000000..3606e4aa Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151869.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151877.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151877.png new file mode 100644 index 00000000..bfe756b9 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151877.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151917.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151917.png new file mode 100644 index 00000000..fc5870c1 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583151917.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583182157.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583182157.png new file mode 100644 index 00000000..620b8ec9 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583182157.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195981.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195981.png new file mode 100644 index 00000000..287d3d81 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195981.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195985.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195985.png new file mode 100644 index 00000000..2f2c70c4 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583195985.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272137.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272137.png new file mode 100644 index 00000000..27e3f4cf Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272137.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272145.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272145.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272145.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272157.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272157.png new file mode 100644 index 00000000..801fe095 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272157.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272169.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272169.png new file mode 100644 index 00000000..12405541 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272169.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272185.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272185.png new file mode 100644 index 00000000..f37aa660 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272185.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272201.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272201.png new file mode 100644 index 00000000..bfe756b9 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583272201.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316317.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316317.png new file mode 100644 index 00000000..11e5cde5 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316317.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316321.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316321.png new file mode 100644 index 00000000..3924b9cc Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316321.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316949.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316949.png new file mode 100644 index 00000000..77fddbfc Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583316949.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583349121.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583349121.png new file mode 100644 index 00000000..eb375e8f Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583349121.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391837.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391837.png new file mode 100644 index 00000000..dcb40c16 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391837.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391841.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391841.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391841.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391845.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391845.png new file mode 100644 index 00000000..8c9e423c Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391845.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391853.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391853.png new file mode 100644 index 00000000..ac917e8f Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391853.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391861.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391861.png new file mode 100644 index 00000000..3805721e Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391861.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391865.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391865.png new file mode 100644 index 00000000..52db820a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391865.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391869.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391869.png new file mode 100644 index 00000000..038267da Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391869.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391873.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391873.png new file mode 100644 index 00000000..00c0937a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391873.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391913.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391913.png new file mode 100644 index 00000000..60abc9fc Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583391913.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583435997.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583435997.png new file mode 100644 index 00000000..f63e57c3 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583435997.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583436657.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583436657.png new file mode 100644 index 00000000..1ff4bff9 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583436657.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583468825.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583468825.png new file mode 100644 index 00000000..defaffcc Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583468825.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583504773.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583504773.png new file mode 100644 index 00000000..315a5f98 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583504773.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583757997.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583757997.png new file mode 100644 index 00000000..25985e08 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583757997.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583881265.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583881265.png new file mode 100644 index 00000000..51f1f160 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583881265.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583957937.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583957937.png new file mode 100644 index 00000000..f8374612 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583957937.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001583961513.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583961513.png new file mode 100644 index 00000000..8506e84e Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001583961513.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001584077717.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584077717.png new file mode 100644 index 00000000..03bef956 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584077717.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001584081289.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584081289.png new file mode 100644 index 00000000..51f1f160 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584081289.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001584317997.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584317997.png new file mode 100644 index 00000000..f05133bb Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001584317997.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001587755985.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587755985.png new file mode 100644 index 00000000..527a0f21 Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587755985.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001587840761.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587840761.png new file mode 100644 index 00000000..da4b81cf Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587840761.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001587875989.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587875989.png new file mode 100644 index 00000000..30e52c7a Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001587875989.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_image_0000001829035105.png b/docs/mrs/component-operation-guide-lts/en-us_image_0000001829035105.png new file mode 100644 index 00000000..9052683e Binary files /dev/null and b/docs/mrs/component-operation-guide-lts/en-us_image_0000001829035105.png differ diff --git a/docs/mrs/component-operation-guide-lts/en-us_topic_0000001295898904.html b/docs/mrs/component-operation-guide-lts/en-us_topic_0000001295898904.html deleted file mode 100644 index e737b213..00000000 --- a/docs/mrs/component-operation-guide-lts/en-us_topic_0000001295898904.html +++ /dev/null @@ -1,50 +0,0 @@ - - -

Configuring HBase Data Compression and Encoding

-

Scenario

HBase encodes data blocks in HFiles to reduce duplicate keys in KeyValues, reducing used space. Currently, the following data block encoding modes are supported: NONE, PREFIX, DIFF, FAST_DIFF, and ROW_INDEX_V1. NONE indicates that data blocks are not encoded. HBase also supports compression algorithms for HFile compression. The following algorithms are supported by default: NONE, GZ, SNAPPY, and ZSTD. NONE indicates that HFiles are not compressed.

-

The two methods are used on the HBase column family. They can be used together or separately.

-
-

Prerequisites

  • You have installed an HBase client. For example, the client is installed in opt/client.
  • If authentication has been enabled for HBase, you must have the corresponding operation permissions. For example, you must have the creation (C) or administration (A) permission on the corresponding namespace or higher-level items to create a table, and the creation (C) or administration (A) permission on the created table or higher-level items to modify a table. For details about how to grant permissions, see Creating HBase Roles.
-
-

Procedure

Setting data block encoding and compression algorithms during creation

-
  • Method 1: Using hbase shell
    1. Log in to the node where the client is installed as the client installation user.
    2. Run the following command to go to the client directory:

      cd /opt/client

      -
    3. Run the following command to configure environment variables:

      source bigdata_env

      -
    4. If the Kerberos authentication is enabled for the current cluster, run the following command to authenticate the user. If Kerberos authentication is disabled for the current cluster, skip this step:

      kinit Component service user

      -

      For example, kinit hbaseuser.

      -
    5. Run the following HBase client command:

      hbase shell

      -
    6. Create a table.
      create 't1', {NAME => 'f1', COMPRESSION => 'SNAPPY', DATA_BLOCK_ENCODING => 'FAST_DIFF'}
      • t1: indicates the table name.
      • f1: indicates the column family name.
      • SNAPPY: indicates the column family uses the SNAPPY compression algorithm.
      • FAST_DIFF: indicates FAST_DIFF is used for encoding.
      • The parameter in the braces specifies the column family. You can specify multiple column families using multiple braces and separate them by commas (,). For details about table creation statements, run the help 'create' statement in the HBase shell.
      -
      -
      -
    -
  • Method 2: Using Java APIs
    The following code snippet shows only how to set the encoding and compression modes of a column family when creating a table.
    TableDescriptorBuilder htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("t1"));// Create a descriptor for table t1.
    -ColumnFamilyDescriptorBuilder hcd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1"));// Create a builder for column family f1.
    -hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);// Set the encoding mode of column family f1 to FAST_DIFF.
    -hcd.setCompressionType(Compression.Algorithm.SNAPPY);// Set the compression algorithm of column family f1 to SNAPPY.
    -htd.setColumnFamily(hcd.build())// Add the column family f1 to the descriptor of table t1.
    -
    -
-

Setting or modifying the data block encoding mode and compression algorithm for an existing table

-
  • Method 1: Using hbase shell
    1. Log in to the node where the client is installed as the client installation user.
    2. Run the following command to go to the client directory:

      cd /opt/client

      -
    3. Run the following command to configure environment variables:

      source bigdata_env

      -
    4. If the Kerberos authentication is enabled for the current cluster, run the following command to authenticate the user. If Kerberos authentication is disabled for the current cluster, skip this step:

      kinit Component service user

      -

      For example, kinit hbaseuser.

      -
    5. Run the following HBase client command:

      hbase shell

      -
    6. Run the following command to modify the table:

      alter 't1', {NAME => 'f1', COMPRESSION => 'SNAPPY', DATA_BLOCK_ENCODING => 'FAST_DIFF'}

      -
    -
  • Method 2: Using Java APIs

    The following code snippet shows only how to modify the encoding and compression modes of a column family in an existing table. For complete code for modifying a table and how to use the code to modify a table, see "HBase Development Guide".

    -
    TableDescriptor htd = admin.getDescriptor(TableName.valueOf("t1"));// Obtain the descriptor of table t1.
    -ColumnFamilyDescriptor originCF = htd.getColumnFamily(Bytes.toBytes("f1"));// Obtain the descriptor of column family f1.
    -builder.ColumnFamilyDescriptorBuilder hcd = ColumnFamilyDescriptorBuilder.newBuilder(originCF);// Create a builder based on the existing column family attributes.
    -hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);// Change the encoding mode of the column family to FAST_DIFF.
    -hcd.setCompressionType(Compression.Algorithm.SNAPPY);// Change the compression algorithm of the column family to SNAPPY.
    -admin.modifyColumnFamily(TableName.valueOf("t1"), hcd.build());// Submit to the server to modify the attributes of column family f1.
    -

    After the modification, the encoding and compression modes of the existing HFile will take effect after the next compaction.

    -
-
-
-
- -
- diff --git a/docs/mrs/component-operation-guide-lts/en-us_topic_0000001298722056.html b/docs/mrs/component-operation-guide-lts/en-us_topic_0000001298722056.html deleted file mode 100644 index b5658189..00000000 --- a/docs/mrs/component-operation-guide-lts/en-us_topic_0000001298722056.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Change History

-
-
- - - - - - - -

Released On

-

What's New

-

2022-11-01

-

This issue is the first official release.

-
-
-
- diff --git a/docs/mrs/component-operation-guide-lts/mrs_01_0375.html b/docs/mrs/component-operation-guide-lts/mrs_01_0375.html index 373d0dfd..7f3cceb8 100644 --- a/docs/mrs/component-operation-guide-lts/mrs_01_0375.html +++ b/docs/mrs/component-operation-guide-lts/mrs_01_0375.html @@ -42,6 +42,10 @@ + + diff --git a/docs/mrs/component-operation-guide-lts/mrs_01_0390.html b/docs/mrs/component-operation-guide-lts/mrs_01_0390.html index f6c10b1b..047a6095 100644 --- a/docs/mrs/component-operation-guide-lts/mrs_01_0390.html +++ b/docs/mrs/component-operation-guide-lts/mrs_01_0390.html @@ -6,7 +6,7 @@ diff --git a/docs/mrs/component-operation-guide-lts/mrs_01_0790.html b/docs/mrs/component-operation-guide-lts/mrs_01_0790.html index f24f2e38..6c32f7b6 100644 --- a/docs/mrs/component-operation-guide-lts/mrs_01_0790.html +++ b/docs/mrs/component-operation-guide-lts/mrs_01_0790.html @@ -56,6 +56,8 @@ + + +

Manual Compression Procedure

If you do not want the system to automatically determine when to compress a table, configure the table attribute NO_AUTO_Compaction to disable automatic compression. After automatic compression is disabled, you can still use the ALTER Table /Partition Compact statement to perform manual compression.

+

This operation applies only to MRS 8.2.0 and later versions.

+
+
  1. Log in to the Hive client by referring to Using a Hive Client and run the following commands to disable automatic compression when creating a table:

    CREATE TABLE table_name (
    + id int, name string
    +)
    +CLUSTERED BY (id) INTO 2 BUCKETS STORED AS ORC
    +TBLPROPERTIES ("transactional"="true",
    +  "NO_AUTO_COMPACTION"="true"
    +);
    +

    You can also run the following command to disable automatic compression after a table is created:

    +

    ALTER TABLE table_name set TBLPROPERTIES ("NO_AUTO_COMPACTION"="true");

    +
    +

  2. Run the following command to set the compression type of the table. compaction_type indicates the compression type, which can be minor or major.

    ALTER TABLE table_name COMPACT 'compaction_type';

    +

+
+

Procedure for Specifying a Queue for Running a Compression Task

This operation applies only to MRS 8.2.0 and later versions.

+
+
  1. Create a queue.
  2. Log in to FusionInsight Manager and choose Cluster > Services > Hive. Click Configuration then All Configurations, click MetaStore(Role), and select Transaction.
  3. Set the following parameters as required:

    +

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 2 Parameter description

    Parameter

    +

    Description

    +

    hive.compactor.job.queue

    +

    The name of the Hadoop queue to which the compression job is submitted, that is, the name of the queue created in 1.

    +

    hive.compactor.check.interval

    +

    The interval for executing the compression thread, in seconds. The default value is 300.

    +

    hive.compactor.cleaner.run.interval

    +

    The interval for executing the clearance thread, in milliseconds. The default value is 5000.

    +

    hive.compactor.delta.num.threshold

    +

    The threshold of the number of incremental files that triggers minor compression. The default value is 10.

    +

    hive.compactor.delta.pct.threshold

    +

    The ratio threshold of the total size of incremental files (delta) that trigger major compression to the size of base files. The value 0.1 indicates that major compression is triggered when the ratio of the total size of delta files to the size of base files is 10%. The default value is 0.1.

    +

    hive.compactor.max.num.delta

    +

    The maximum number of incremental files that the compressor will attempt to process in a single job. The default value is 500.

    +

    metastore.compactor.initiator.on

    +

    Whether to run the startup program thread and cleanup program thread on the MetaStore instance. To start a transaction, set this parameter to true. The default value is false.

    +

    metastore.compactor.worker.threads

    +

    The number of compression program work threads running on MetaStore. If this parameter is set to 0, no compression is performed. To use a transaction, set this parameter to a positive number on one or more instances of the MetaStore service. The unit is second. The default value is 0.

    +
    +
    +

  4. Log in to the Hive client and perform compression. For details, see Using a Hive Client.

    CREATE TABLE table_name (
    + id int, name string
    +)
    +CLUSTERED BY (id) INTO 2 BUCKETS STORED AS ORC
    +TBLPROPERTIES ("transactional"="true",
    +  "compactor.mapreduce.map.memory.mb"="2048",                   -- Specify the properties of a compression map job.
    +  "compactorthreshold.hive.compactor.delta.num.threshold"="4", -- If there are more than four incremental directories, slight compression is triggered.
    +  "compactorthreshold.hive.compactor.delta.pct.threshold"="0.5" -- If the ratio of the incremental file size to the basic file size is greater than 50%, deep compression is triggered.
    +);
    +

    or

    +
    ALTER TABLE table_name COMPACT 'minor' WITH OVERWRITE TBLPROPERTIES ("compactor.mapreduce.map.memory.mb"="3072"); -- Specify the properties of a compression map job.
    +ALTER TABLE table_name COMPACT 'major' WITH OVERWRITE TBLPROPERTIES ("tblprops.orc.compress.size"="8192");        -- Modify any other Hive table attributes.
    +

    After compression, small files are not deleted immediately. After the cleaner thread performs cleaning, the files are deleted in batches.

    +
    +