From 12dd64efc74356614a202207426fb626b5bb554f Mon Sep 17 00:00:00 2001 From: "Su, Xiaomeng" Date: Wed, 15 May 2024 11:56:22 +0000 Subject: [PATCH] dli_umn_20240430 Reviewed-by: Pruthi, Vineet Co-authored-by: Su, Xiaomeng Co-committed-by: Su, Xiaomeng --- docs/dli/umn/ALL_META.TXT.json | 1562 +++++++++++++---- docs/dli/umn/CLASS.TXT.json | 1271 +++++++++----- docs/dli/umn/dli_01_00006.html | 8 +- docs/dli/umn/dli_01_0002.html | 6 +- docs/dli/umn/dli_01_0005.html | 18 +- docs/dli/umn/dli_01_0006.html | 4 +- docs/dli/umn/dli_01_0007.html | 2 +- docs/dli/umn/dli_01_0008.html | 2 +- docs/dli/umn/dli_01_0010.html | 2 +- docs/dli/umn/dli_01_0011.html | 2 +- docs/dli/umn/dli_01_0012.html | 4 + docs/dli/umn/dli_01_0013.html | 2 +- docs/dli/umn/dli_01_0014.html | 1 + docs/dli/umn/dli_01_0017.html | 16 +- docs/dli/umn/dli_01_0019.html | 2 +- docs/dli/umn/dli_01_0022.html | 2 +- docs/dli/umn/dli_01_0253.html | 8 +- docs/dli/umn/dli_01_0320.html | 16 +- docs/dli/umn/dli_01_0363.html | 30 +- docs/dli/umn/dli_01_0367.html | 2 +- docs/dli/umn/dli_01_0376.html | 2 +- docs/dli/umn/dli_01_0378.html | 51 +- docs/dli/umn/dli_01_0384.html | 5 +- docs/dli/umn/dli_01_0403.html | 54 +- docs/dli/umn/dli_01_0410.html | 2 +- docs/dli/umn/dli_01_0418.html | 4 +- docs/dli/umn/dli_01_0440.html | 8 +- docs/dli/umn/dli_01_0441.html | 552 +++--- docs/dli/umn/dli_01_0447.html | 2 +- docs/dli/umn/dli_01_0448.html | 20 +- docs/dli/umn/dli_01_0451.html | 6 +- docs/dli/umn/dli_01_0455.html | 4 +- docs/dli/umn/dli_01_0457.html | 4 +- docs/dli/umn/dli_01_0461.html | 4 +- docs/dli/umn/dli_01_0462.html | 4 +- docs/dli/umn/dli_01_0463.html | 2 +- docs/dli/umn/dli_01_0464.html | 54 +- docs/dli/umn/dli_01_0485.html | 2 - docs/dli/umn/dli_01_0486.html | 57 - docs/dli/umn/dli_01_0489.html | 2 +- docs/dli/umn/dli_01_0498.html | 4 +- docs/dli/umn/dli_01_0504.html | 113 ++ docs/dli/umn/dli_01_0505.html | 83 + docs/dli/umn/dli_01_0506.html | 177 ++ docs/dli/umn/dli_01_0507.html | 26 + docs/dli/umn/dli_01_0508.html | 13 + docs/dli/umn/dli_01_0509.html | 104 ++ docs/dli/umn/dli_01_0513.html | 2 +- docs/dli/umn/dli_01_0514.html | 2 +- docs/dli/umn/dli_01_0515.html | 94 + docs/dli/umn/dli_01_0516.html | 121 ++ docs/dli/umn/dli_01_0524.html | 23 + docs/dli/umn/dli_01_0525.html | 48 + docs/dli/umn/dli_01_0526.html | 45 + docs/dli/umn/dli_01_0528.html | 19 + docs/dli/umn/dli_01_0529.html | 33 + docs/dli/umn/dli_01_0530.html | 21 + docs/dli/umn/dli_01_0531.html | 16 +- docs/dli/umn/dli_01_0532.html | 20 + docs/dli/umn/dli_01_0550.html | 20 +- docs/dli/umn/dli_01_0552.html | 6 +- docs/dli/umn/dli_01_0563.html | 51 + docs/dli/umn/dli_01_0565.html | 26 + docs/dli/umn/dli_01_0566.html | 27 + docs/dli/umn/dli_03_0001.html | 24 +- docs/dli/umn/dli_03_0002.html | 2 +- docs/dli/umn/dli_03_0008.html | 2 +- docs/dli/umn/dli_03_0009.html | 2 +- docs/dli/umn/dli_03_0010.html | 2 +- docs/dli/umn/dli_03_0011.html | 2 +- docs/dli/umn/dli_03_0013.html | 2 +- docs/dli/umn/dli_03_0014.html | 2 +- docs/dli/umn/dli_03_0017.html | 2 +- docs/dli/umn/dli_03_0020.html | 78 +- docs/dli/umn/dli_03_0021.html | 32 +- docs/dli/umn/dli_03_0022.html | 38 +- docs/dli/umn/dli_03_0025.html | 2 +- docs/dli/umn/dli_03_0028.html | 2 +- docs/dli/umn/dli_03_0029.html | 2 +- docs/dli/umn/dli_03_0036.html | 2 +- docs/dli/umn/dli_03_0037.html | 76 +- docs/dli/umn/dli_03_0038.html | 2 +- docs/dli/umn/dli_03_0040.html | 2 +- docs/dli/umn/dli_03_0043.html | 2 +- docs/dli/umn/dli_03_0044.html | 2 +- docs/dli/umn/dli_03_0045.html | 2 +- docs/dli/umn/dli_03_0046.html | 2 +- docs/dli/umn/dli_03_0047.html | 2 +- docs/dli/umn/dli_03_0048.html | 2 +- docs/dli/umn/dli_03_0049.html | 22 +- docs/dli/umn/dli_03_0054.html | 16 +- docs/dli/umn/dli_03_0057.html | 2 +- docs/dli/umn/dli_03_0061.html | 2 +- docs/dli/umn/dli_03_0064.html | 2 +- docs/dli/umn/dli_03_0065.html | 2 +- docs/dli/umn/dli_03_0066.html | 2 +- docs/dli/umn/dli_03_0067.html | 2 +- docs/dli/umn/dli_03_0068.html | 2 +- docs/dli/umn/dli_03_0069.html | 2 +- docs/dli/umn/dli_03_0071.html | 2 +- docs/dli/umn/dli_03_0072.html | 2 +- docs/dli/umn/dli_03_0075.html | 2 +- docs/dli/umn/dli_03_0076.html | 2 +- docs/dli/umn/dli_03_0077.html | 2 +- docs/dli/umn/dli_03_0080.html | 2 +- docs/dli/umn/dli_03_0082.html | 2 +- docs/dli/umn/dli_03_0083.html | 2 +- docs/dli/umn/dli_03_0085.html | 2 +- docs/dli/umn/dli_03_0086.html | 2 +- docs/dli/umn/dli_03_0087.html | 2 +- docs/dli/umn/dli_03_0088.html | 2 +- docs/dli/umn/dli_03_0089.html | 4 +- docs/dli/umn/dli_03_0090.html | 2 +- docs/dli/umn/dli_03_0091.html | 2 +- docs/dli/umn/dli_03_0092.html | 2 +- docs/dli/umn/dli_03_0093.html | 2 +- docs/dli/umn/dli_03_0095.html | 2 +- docs/dli/umn/dli_03_0096.html | 2 +- docs/dli/umn/dli_03_0098.html | 2 +- docs/dli/umn/dli_03_0099.html | 2 +- docs/dli/umn/dli_03_0100.html | 2 +- docs/dli/umn/dli_03_0102.html | 2 +- docs/dli/umn/dli_03_0103.html | 2 +- docs/dli/umn/dli_03_0105.html | 2 +- docs/dli/umn/dli_03_0106.html | 2 +- docs/dli/umn/dli_03_0107.html | 2 +- docs/dli/umn/dli_03_0108.html | 2 +- docs/dli/umn/dli_03_0109.html | 2 +- docs/dli/umn/dli_03_0110.html | 28 + docs/dli/umn/dli_03_0111.html | 2 +- docs/dli/umn/dli_03_0112.html | 20 + docs/dli/umn/dli_03_0115.html | 2 +- docs/dli/umn/dli_03_0116.html | 2 +- docs/dli/umn/dli_03_0117.html | 2 +- docs/dli/umn/dli_03_0118.html | 2 +- docs/dli/umn/dli_03_0119.html | 2 +- docs/dli/umn/dli_03_0126.html | 2 +- docs/dli/umn/dli_03_0127.html | 2 +- docs/dli/umn/dli_03_0128.html | 2 +- docs/dli/umn/dli_03_0129.html | 2 +- docs/dli/umn/dli_03_0130.html | 2 +- docs/dli/umn/dli_03_0131.html | 33 + docs/dli/umn/dli_03_0132.html | 29 + docs/dli/umn/dli_03_0133.html | 20 + docs/dli/umn/dli_03_0135.html | 32 + docs/dli/umn/dli_03_0136.html | 2 +- docs/dli/umn/dli_03_0137.html | 30 + docs/dli/umn/dli_03_0138.html | 2 +- docs/dli/umn/dli_03_0139.html | 2 +- docs/dli/umn/dli_03_0140.html | 2 +- docs/dli/umn/dli_03_0145.html | 2 +- docs/dli/umn/dli_03_0156.html | 2 +- docs/dli/umn/dli_03_0157.html | 2 +- docs/dli/umn/dli_03_0159.html | 2 +- docs/dli/umn/dli_03_0160.html | 2 +- docs/dli/umn/dli_03_0161.html | 2 +- docs/dli/umn/dli_03_0162.html | 2 +- docs/dli/umn/dli_03_0163.html | 16 + docs/dli/umn/dli_03_0164.html | 2 +- docs/dli/umn/dli_03_0165.html | 2 +- docs/dli/umn/dli_03_0166.html | 2 +- docs/dli/umn/dli_03_0167.html | 2 +- docs/dli/umn/dli_03_0168.html | 2 +- docs/dli/umn/dli_03_0169.html | 2 +- docs/dli/umn/dli_03_0170.html | 2 +- docs/dli/umn/dli_03_0171.html | 2 +- docs/dli/umn/dli_03_0172.html | 2 +- docs/dli/umn/dli_03_0173.html | 2 +- docs/dli/umn/dli_03_0174.html | 2 +- docs/dli/umn/dli_03_0175.html | 2 +- docs/dli/umn/dli_03_0176.html | 2 +- docs/dli/umn/dli_03_0177.html | 2 +- docs/dli/umn/dli_03_0179.html | 9 +- docs/dli/umn/dli_03_0180.html | 2 +- docs/dli/umn/dli_03_0181.html | 2 +- docs/dli/umn/dli_03_0182.html | 2 +- docs/dli/umn/dli_03_0183.html | 2 +- docs/dli/umn/dli_03_0184.html | 2 +- docs/dli/umn/dli_03_0186.html | 2 +- docs/dli/umn/dli_03_0187.html | 2 +- docs/dli/umn/dli_03_0188.html | 2 +- docs/dli/umn/dli_03_0189.html | 2 +- docs/dli/umn/dli_03_0190.html | 2 +- docs/dli/umn/dli_03_0191.html | 2 +- docs/dli/umn/dli_03_0192.html | 2 +- docs/dli/umn/dli_03_0193.html | 2 +- docs/dli/umn/dli_03_0195.html | 2 +- docs/dli/umn/dli_03_0196.html | 2 +- docs/dli/umn/dli_03_0200.html | 2 +- docs/dli/umn/dli_03_0201.html | 2 +- docs/dli/umn/dli_03_0204.html | 28 + docs/dli/umn/dli_03_0206.html | 40 + docs/dli/umn/dli_03_0207.html | 2 +- docs/dli/umn/dli_03_0208.html | 2 +- docs/dli/umn/dli_03_0209.html | 2 +- docs/dli/umn/dli_03_0210.html | 2 +- docs/dli/umn/dli_03_0211.html | 50 + docs/dli/umn/dli_03_0212.html | 2 +- docs/dli/umn/dli_03_0213.html | 2 +- docs/dli/umn/dli_03_0214.html | 2 +- docs/dli/umn/dli_03_0215.html | 2 +- docs/dli/umn/dli_03_0216.html | 16 + docs/dli/umn/dli_03_0217.html | 30 + docs/dli/umn/dli_03_0218.html | 24 + docs/dli/umn/dli_03_0219.html | 18 + docs/dli/umn/dli_03_0220.html | 2 +- docs/dli/umn/dli_03_0221.html | 32 + docs/dli/umn/dli_03_0222.html | 18 + docs/dli/umn/dli_03_0223.html | 17 + docs/dli/umn/dli_03_0226.html | 26 + docs/dli/umn/dli_03_0227.html | 2 +- docs/dli/umn/dli_03_0228.html | 2 +- docs/dli/umn/dli_03_0229.html | 22 + docs/dli/umn/dli_03_0230.html | 30 + docs/dli/umn/dli_03_0231.html | 2 +- docs/dli/umn/dli_03_0232.html | 2 +- docs/dli/umn/dli_03_0233.html | 2 +- docs/dli/umn/dli_03_0234.html | 2 +- docs/dli/umn/dli_03_0235.html | 2 +- docs/dli/umn/dli_03_0236.html | 2 +- docs/dli/umn/dli_03_0237.html | 2 +- docs/dli/umn/dli_03_0238.html | 2 +- docs/dli/umn/dli_03_0239.html | 2 +- docs/dli/umn/dli_03_0250.html | 2 +- docs/dli/umn/dli_03_0251.html | 2 +- docs/dli/umn/dli_03_0252.html | 2 +- docs/dli/umn/dli_03_0253.html | 2 +- docs/dli/umn/dli_03_0254.html | 2 +- docs/dli/umn/dli_03_0256.html | 32 + docs/dli/umn/dli_03_0257.html | 2 +- docs/dli/umn/dli_03_0259.html | 2 +- docs/dli/umn/dli_03_0260.html | 2 +- docs/dli/umn/dli_03_0261.html | 16 + docs/dli/umn/dli_03_0263.html | 2 +- docs/dli/umn/dli_03_0264.html | 4 +- docs/dli/umn/dli_03_0265.html | 2 +- docs/dli/umn/dli_03_0266.html | 2 +- docs/dli/umn/dli_03_0276.html | 32 + docs/dli/umn/dli_07_0002.html | 2 +- docs/dli/umn/dli_07_0003.html | 2 - docs/dli/umn/dli_07_0005.html | 5 + docs/dli/umn/dli_07_0006.html | 16 + docs/dli/umn/dli_07_0009.html | 2 +- docs/dli/umn/en-us_image_0000001262007480.png | Bin 0 -> 305 bytes docs/dli/umn/en-us_image_0000001309687485.png | Bin 0 -> 68213 bytes docs/dli/umn/en-us_image_0000001309807469.png | Bin 0 -> 17950 bytes docs/dli/umn/en-us_image_0000001309847545.png | Bin 0 -> 21398 bytes docs/dli/umn/en-us_image_0000001309847549.png | Bin 0 -> 305 bytes docs/dli/umn/en-us_image_0000001323141682.png | Bin 0 -> 350 bytes docs/dli/umn/en-us_image_0000001372847466.png | Bin 221 -> 0 bytes docs/dli/umn/en-us_image_0000001373007370.png | Bin 1004 -> 0 bytes docs/dli/umn/en-us_image_0000001377545298.png | Bin 24201 -> 0 bytes docs/dli/umn/en-us_image_0000001427744557.png | Bin 35291 -> 0 bytes 253 files changed, 4402 insertions(+), 1740 deletions(-) delete mode 100644 docs/dli/umn/dli_01_0486.html create mode 100644 docs/dli/umn/dli_01_0504.html create mode 100644 docs/dli/umn/dli_01_0505.html create mode 100644 docs/dli/umn/dli_01_0506.html create mode 100644 docs/dli/umn/dli_01_0507.html create mode 100644 docs/dli/umn/dli_01_0508.html create mode 100644 docs/dli/umn/dli_01_0509.html create mode 100644 docs/dli/umn/dli_01_0515.html create mode 100644 docs/dli/umn/dli_01_0516.html create mode 100644 docs/dli/umn/dli_01_0524.html create mode 100644 docs/dli/umn/dli_01_0525.html create mode 100644 docs/dli/umn/dli_01_0526.html create mode 100644 docs/dli/umn/dli_01_0528.html create mode 100644 docs/dli/umn/dli_01_0529.html create mode 100644 docs/dli/umn/dli_01_0530.html create mode 100644 docs/dli/umn/dli_01_0532.html create mode 100644 docs/dli/umn/dli_01_0563.html create mode 100644 docs/dli/umn/dli_01_0565.html create mode 100644 docs/dli/umn/dli_01_0566.html create mode 100644 docs/dli/umn/dli_03_0110.html create mode 100644 docs/dli/umn/dli_03_0112.html create mode 100644 docs/dli/umn/dli_03_0131.html create mode 100644 docs/dli/umn/dli_03_0132.html create mode 100644 docs/dli/umn/dli_03_0133.html create mode 100644 docs/dli/umn/dli_03_0135.html create mode 100644 docs/dli/umn/dli_03_0137.html create mode 100644 docs/dli/umn/dli_03_0163.html create mode 100644 docs/dli/umn/dli_03_0204.html create mode 100644 docs/dli/umn/dli_03_0206.html create mode 100644 docs/dli/umn/dli_03_0211.html create mode 100644 docs/dli/umn/dli_03_0216.html create mode 100644 docs/dli/umn/dli_03_0217.html create mode 100644 docs/dli/umn/dli_03_0218.html create mode 100644 docs/dli/umn/dli_03_0219.html create mode 100644 docs/dli/umn/dli_03_0221.html create mode 100644 docs/dli/umn/dli_03_0222.html create mode 100644 docs/dli/umn/dli_03_0223.html create mode 100644 docs/dli/umn/dli_03_0226.html create mode 100644 docs/dli/umn/dli_03_0229.html create mode 100644 docs/dli/umn/dli_03_0230.html create mode 100644 docs/dli/umn/dli_03_0256.html create mode 100644 docs/dli/umn/dli_03_0261.html create mode 100644 docs/dli/umn/dli_03_0276.html create mode 100644 docs/dli/umn/en-us_image_0000001262007480.png create mode 100644 docs/dli/umn/en-us_image_0000001309687485.png create mode 100644 docs/dli/umn/en-us_image_0000001309807469.png create mode 100644 docs/dli/umn/en-us_image_0000001309847545.png create mode 100644 docs/dli/umn/en-us_image_0000001309847549.png create mode 100644 docs/dli/umn/en-us_image_0000001323141682.png delete mode 100644 docs/dli/umn/en-us_image_0000001372847466.png delete mode 100644 docs/dli/umn/en-us_image_0000001373007370.png delete mode 100644 docs/dli/umn/en-us_image_0000001377545298.png delete mode 100644 docs/dli/umn/en-us_image_0000001427744557.png diff --git a/docs/dli/umn/ALL_META.TXT.json b/docs/dli/umn/ALL_META.TXT.json index 2b5b0b17..8f4d0d36 100644 --- a/docs/dli/umn/ALL_META.TXT.json +++ b/docs/dli/umn/ALL_META.TXT.json @@ -72,9 +72,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -347,7 +346,7 @@ "node_id":"dli_01_0017.xml", "product_code":"dli", "code":"18", - "des":"SQL jobs allow you to execute SQL statements entered in the SQL job editing window, import data, and export data.SQL job management provides the following functions:Searc", + "des":"SQL jobs allow you to execute SQL statements in the SQL job editing window, import data, and export data.SQL job management provides the following functions:Searching for", "doc_type":"usermanual", "kw":"SQL Job Management,Job Management,User Guide", "search_title":"", @@ -530,7 +529,7 @@ "node_id":"dli_01_0462.xml", "product_code":"dli", "code":"27", - "des":"After creating a job, you can view the job details to learn about the following information:Viewing Job DetailsChecking the Job Monitoring InformationViewing the Task Lis", + "des":"After creating a job, you can view the job details to learn about the following information:Viewing Job DetailsChecking Job Monitoring InformationViewing the Task List of", "doc_type":"usermanual", "kw":"Flink Job Details,Flink Job Management,User Guide", "search_title":"", @@ -769,11 +768,29 @@ "title":"Deleting a Queue", "githuburl":"" }, + { + "uri":"dli_01_0565.html", + "node_id":"dli_01_0565.xml", + "product_code":"dli", + "code":"39", + "des":"You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can c", + "doc_type":"usermanual", + "kw":"Allocating a Queue to an Enterprise Project,Queue Management,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "documenttype":"usermanual" + } + ], + "title":"Allocating a Queue to an Enterprise Project", + "githuburl":"" + }, { "uri":"dli_01_0443.html", "node_id":"dli_01_0443.xml", "product_code":"dli", - "code":"39", + "code":"40", "des":"If the CIDR block of the DLI queue conflicts with that of the user data source, you can change the CIDR block of the queue.If the queue whose CIDR block is to be modified", "doc_type":"usermanual", "kw":"Modifying the CIDR Block,Queue Management,User Guide", @@ -794,7 +811,7 @@ "uri":"dli_01_0487.html", "node_id":"dli_01_0487.xml", "product_code":"dli", - "code":"40", + "code":"41", "des":"Elastic scaling can be performed for a newly created queue only when there were jobs running in this queue.Queues with 16 CUs do not support scale-out or scale-in.Queues ", "doc_type":"usermanual", "kw":"Elastic Queue Scaling,Queue Management,User Guide", @@ -814,7 +831,7 @@ "uri":"dli_01_0488.html", "node_id":"dli_01_0488.xml", "product_code":"dli", - "code":"41", + "code":"42", "des":"When services are busy, you might need to use more compute resources to process services in a period. After this period, you do not require the same amount of resources. ", "doc_type":"usermanual", "kw":"Scheduling CU Changes,Queue Management,User Guide", @@ -834,7 +851,7 @@ "uri":"dli_01_0489.html", "node_id":"dli_01_0489.xml", "product_code":"dli", - "code":"42", + "code":"43", "des":"It can be used to test the connectivity between the DLI queue and the peer IP address specified by the user in common scenarios, or the connectivity between the DLI queue", "doc_type":"usermanual", "kw":"Testing Address Connectivity,Queue Management,User Guide", @@ -842,9 +859,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -855,7 +871,7 @@ "uri":"dli_01_0421.html", "node_id":"dli_01_0421.xml", "product_code":"dli", - "code":"43", + "code":"44", "des":"Once you have created an SMN topic, you can easily subscribe to it by going to the Topic Management > Topics page of the SMN console. You can choose to receive notificati", "doc_type":"usermanual", "kw":"Creating an SMN Topic,Queue Management,User Guide", @@ -875,7 +891,7 @@ "uri":"dli_01_0022.html", "node_id":"dli_01_0022.xml", "product_code":"dli", - "code":"44", + "code":"45", "des":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", "doc_type":"usermanual", "kw":"Managing Queue Tags,Queue Management,User Guide", @@ -889,11 +905,350 @@ "title":"Managing Queue Tags", "githuburl":"" }, + { + "uri":"dli_01_0563.html", + "node_id":"dli_01_0563.xml", + "product_code":"dli", + "code":"46", + "des":"DLI allows you to set properties for queues.You can set Spark driver parameters to improve the scheduling efficiency of queues.This section describes how to set queue pro", + "doc_type":"usermanual", + "kw":"Setting Queue Properties,Queue Management,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "documenttype":"usermanual" + } + ], + "title":"Setting Queue Properties", + "githuburl":"" + }, + { + "uri":"dli_01_0508.html", + "node_id":"dli_01_0508.xml", + "product_code":"dli", + "code":"47", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Elastic Resource Pool", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Elastic Resource Pool", + "githuburl":"" + }, + { + "uri":"dli_01_0528.html", + "node_id":"dli_01_0528.xml", + "product_code":"dli", + "code":"48", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Before You Start", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Before You Start", + "githuburl":"" + }, + { + "uri":"dli_01_0504.html", + "node_id":"dli_01_0504.xml", + "product_code":"dli", + "code":"49", + "des":"An elastic resource pool provides compute resources (CPU and memory) for running DLI jobs. The unit is CU. One CU contains one CPU and 4 GB memory.You can create multiple", + "doc_type":"usermanual", + "kw":"Overview,Before You Start,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dli_01_0515.html", + "node_id":"dli_01_0515.xml", + "product_code":"dli", + "code":"50", + "des":"This section walks you through the procedure of adding a queue to an elastic resource pool and binding an enhanced datasource connection to the elastic resource pool.Proc", + "doc_type":"usermanual", + "kw":"Creating an Elastic Resource Pool and Running a Job,Before You Start,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Creating an Elastic Resource Pool and Running a Job", + "githuburl":"" + }, + { + "uri":"dli_01_0516.html", + "node_id":"dli_01_0516.xml", + "product_code":"dli", + "code":"51", + "des":"A company has multiple departments that perform data analysis in different periods during a day.Department A requires a large number of compute resources from 00:00 a.m. ", + "doc_type":"usermanual", + "kw":"Configuring Scaling Policies for Queues,Before You Start,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Configuring Scaling Policies for Queues", + "githuburl":"" + }, + { + "uri":"dli_01_0529.html", + "node_id":"dli_01_0529.xml", + "product_code":"dli", + "code":"52", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Regular Operations", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Regular Operations", + "githuburl":"" + }, + { + "uri":"dli_01_0505.html", + "node_id":"dli_01_0505.xml", + "product_code":"dli", + "code":"53", + "des":"For details about the application scenarios of elastic resource pools, see the Overview. This section describes how to create an elastic resource pool.If you use an enhan", + "doc_type":"usermanual", + "kw":"Creating an Elastic Resource Pool,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Creating an Elastic Resource Pool", + "githuburl":"" + }, + { + "uri":"dli_01_0526.html", + "node_id":"dli_01_0526.xml", + "product_code":"dli", + "code":"54", + "des":"Administrators can assign permissions of different operation scopes to users for each elastic resource pool.The administrator and elastic resource pool owner have all per", + "doc_type":"usermanual", + "kw":"Managing Permissions,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Managing Permissions", + "githuburl":"" + }, + { + "uri":"dli_01_0509.html", + "node_id":"dli_01_0509.xml", + "product_code":"dli", + "code":"55", + "des":"You can add one or more queues to an elastic resource pool to run jobs. This section describes how to add a queue to an elastic resource pool.Automatic scaling of an elas", + "doc_type":"usermanual", + "kw":"Adding a Queue,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Adding a Queue", + "githuburl":"" + }, + { + "uri":"dli_01_0530.html", + "node_id":"dli_01_0530.xml", + "product_code":"dli", + "code":"56", + "des":"If you want a queue to use resources in an elastic resource pool, bind the queue to the pool.You can click Associate Queue on the Resource Pool page to bind a queue to an", + "doc_type":"usermanual", + "kw":"Binding a Queue,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Binding a Queue", + "githuburl":"" + }, + { + "uri":"dli_01_0506.html", + "node_id":"dli_01_0506.xml", + "product_code":"dli", + "code":"57", + "des":"Multiple queues can be added to an elastic resource pool. For details about how to add a queue, see Adding a Queue. You can configure the number of CUs you want based on ", + "doc_type":"usermanual", + "kw":"Managing Queues,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Managing Queues", + "githuburl":"" + }, + { + "uri":"dli_01_0507.html", + "node_id":"dli_01_0507.xml", + "product_code":"dli", + "code":"58", + "des":"CU settings are used to control the maximum and minimum CU ranges for elastic resource pools to avoid unlimited resource scaling.For example, an elastic resource pool has", + "doc_type":"usermanual", + "kw":"Setting CUs,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Setting CUs", + "githuburl":"" + }, + { + "uri":"dli_01_0524.html", + "node_id":"dli_01_0524.xml", + "product_code":"dli", + "code":"59", + "des":"If CUs of a yearly/monthly elastic resource pool cannot meet your service requirements, you can modify the CUs. In this case, you will be charged based on the number of C", + "doc_type":"usermanual", + "kw":"Modifying Specifications,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Modifying Specifications", + "githuburl":"" + }, + { + "uri":"dli_01_0525.html", + "node_id":"dli_01_0525.xml", + "product_code":"dli", + "code":"60", + "des":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", + "doc_type":"usermanual", + "kw":"Managing Tags,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Managing Tags", + "githuburl":"" + }, + { + "uri":"dli_01_0532.html", + "node_id":"dli_01_0532.xml", + "product_code":"dli", + "code":"61", + "des":"If you added a queue to or deleted one from an elastic resource pool, or you scaled an added queue, the CU quantity of the elastic resource pool may be changed. You can v", + "doc_type":"usermanual", + "kw":"Viewing Scaling History,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "IsMulti":"No", + "documenttype":"usermanual", + "IsBot":"Yes" + } + ], + "title":"Viewing Scaling History", + "githuburl":"" + }, + { + "uri":"dli_01_0566.html", + "node_id":"dli_01_0566.xml", + "product_code":"dli", + "code":"62", + "des":"You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can c", + "doc_type":"usermanual", + "kw":"Allocating to an Enterprise Project,Regular Operations,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "documenttype":"usermanual" + } + ], + "title":"Allocating to an Enterprise Project", + "githuburl":"" + }, { "uri":"dli_01_0004.html", "node_id":"dli_01_0004.xml", "product_code":"dli", - "code":"45", + "code":"63", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Data Management", @@ -914,7 +1269,7 @@ "uri":"dli_01_0390.html", "node_id":"dli_01_0390.xml", "product_code":"dli", - "code":"46", + "code":"64", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Databases and Tables", @@ -935,7 +1290,7 @@ "uri":"dli_01_0228.html", "node_id":"dli_01_0228.xml", "product_code":"dli", - "code":"47", + "code":"65", "des":"DLI database and table management provide the following functions:Database Permission ManagementTable Permission ManagementCreating a Database or a TableDeleting a Databa", "doc_type":"usermanual", "kw":"Overview,Databases and Tables,User Guide", @@ -955,7 +1310,7 @@ "uri":"dli_01_0447.html", "node_id":"dli_01_0447.xml", "product_code":"dli", - "code":"48", + "code":"66", "des":"By setting permissions, you can assign varying database permissions to different users.The administrator and database owner have all permissions, which cannot be set or m", "doc_type":"usermanual", "kw":"Managing Database Permissions,Databases and Tables,User Guide", @@ -975,7 +1330,7 @@ "uri":"dli_01_0448.html", "node_id":"dli_01_0448.xml", "product_code":"dli", - "code":"49", + "code":"67", "des":"By setting permissions, you can assign varying table permissions to different users.The administrator and table owner have all permissions, which cannot be set or modifie", "doc_type":"usermanual", "kw":"Managing Table Permissions,Databases and Tables,User Guide", @@ -995,7 +1350,7 @@ "uri":"dli_01_0005.html", "node_id":"dli_01_0005.xml", "product_code":"dli", - "code":"50", + "code":"68", "des":"A database, built on the computer storage device, is a data warehouse where data is organized, stored, and managed based on its structure.The table is an important part o", "doc_type":"usermanual", "kw":"Creating a Database or a Table,Databases and Tables,User Guide", @@ -1015,7 +1370,7 @@ "uri":"dli_01_0011.html", "node_id":"dli_01_0011.xml", "product_code":"dli", - "code":"51", + "code":"69", "des":"You can delete unnecessary databases and tables based on actual conditions.You are not allowed to delete databases or tables that are being used for running jobs.The admi", "doc_type":"usermanual", "kw":"Deleting a Database or a Table,Databases and Tables,User Guide", @@ -1023,9 +1378,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1036,7 +1390,7 @@ "uri":"dli_01_0376.html", "node_id":"dli_01_0376.xml", "product_code":"dli", - "code":"52", + "code":"70", "des":"During actual use, developers create databases and tables and submit them to test personnel for testing. After the test is complete, the databases and tables are transfer", "doc_type":"usermanual", "kw":"Modifying the Owners of Databases and Tables,Databases and Tables,User Guide", @@ -1044,9 +1398,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1057,7 +1410,7 @@ "uri":"dli_01_0253.html", "node_id":"dli_01_0253.xml", "product_code":"dli", - "code":"53", + "code":"71", "des":"You can import data from OBS to a table created in DLI.Only one path can be specified during data import. The path cannot contain commas (,).To import data in CSV format ", "doc_type":"usermanual", "kw":"Importing Data to the Table,Databases and Tables,User Guide", @@ -1065,9 +1418,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1078,7 +1430,7 @@ "uri":"dli_01_0010.html", "node_id":"dli_01_0010.xml", "product_code":"dli", - "code":"54", + "code":"72", "des":"You can export data from a DLI table to OBS. During the export, a folder is created in OBS or the content in the existing folder is overwritten.The exported file can be i", "doc_type":"usermanual", "kw":"Exporting Data from DLI to OBS,Databases and Tables,User Guide", @@ -1086,9 +1438,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1099,7 +1450,7 @@ "uri":"dli_01_0008.html", "node_id":"dli_01_0008.xml", "product_code":"dli", - "code":"55", + "code":"73", "des":"Metadata is used to define data types. It describes information about the data, including the source, size, format, and other data features. In database fields, metadata ", "doc_type":"usermanual", "kw":"Viewing Metadata,Databases and Tables,User Guide", @@ -1107,9 +1458,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1120,7 +1470,7 @@ "uri":"dli_01_0007.html", "node_id":"dli_01_0007.xml", "product_code":"dli", - "code":"56", + "code":"74", "des":"The Preview page displays the first 10 records in the table.You can preview data on either the Data Management page or the SQL Editor page.To preview data on the Data Man", "doc_type":"usermanual", "kw":"Previewing Data,Databases and Tables,User Guide", @@ -1128,9 +1478,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1141,7 +1490,7 @@ "uri":"dli_01_0552.html", "node_id":"dli_01_0552.xml", "product_code":"dli", - "code":"57", + "code":"75", "des":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", "doc_type":"usermanual", "kw":"Managing Tags,Databases and Tables,User Guide", @@ -1161,7 +1510,7 @@ "uri":"dli_01_0366.html", "node_id":"dli_01_0366.xml", "product_code":"dli", - "code":"58", + "code":"76", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Package Management", @@ -1182,7 +1531,7 @@ "uri":"dli_01_0407.html", "node_id":"dli_01_0407.xml", "product_code":"dli", - "code":"59", + "code":"77", "des":"Package management provides the following functions:Managing Package PermissionsCreating a PackageDeleting a PackageYou can delete program packages in batches.You can del", "doc_type":"usermanual", "kw":"Overview,Package Management,User Guide", @@ -1202,7 +1551,7 @@ "uri":"dli_01_0477.html", "node_id":"dli_01_0477.xml", "product_code":"dli", - "code":"60", + "code":"78", "des":"You can isolate package groups or packages allocated to different users by setting permissions to ensure data query performance.The administrator and the owner of a packa", "doc_type":"usermanual", "kw":"Managing Permissions on Packages and Package Groups,Package Management,User Guide", @@ -1222,7 +1571,7 @@ "uri":"dli_01_0367.html", "node_id":"dli_01_0367.xml", "product_code":"dli", - "code":"61", + "code":"79", "des":"DLI allows you to submit program packages in batches to the general-use queue for running.If you need to update a package, you can use the same package or file to upload ", "doc_type":"usermanual", "kw":"Creating a Package,Package Management,User Guide", @@ -1242,7 +1591,7 @@ "uri":"dli_01_0369.html", "node_id":"dli_01_0369.xml", "product_code":"dli", - "code":"62", + "code":"80", "des":"You can delete a package based on actual conditions.On the left of the management console, choose Data Management > Package Management.Click Delete in the Operation colum", "doc_type":"usermanual", "kw":"Deleting a Package,Package Management,User Guide", @@ -1263,7 +1612,7 @@ "uri":"dli_01_0478.html", "node_id":"dli_01_0478.xml", "product_code":"dli", - "code":"63", + "code":"81", "des":"To change the owner of a package, click More > Modify Owner in the Operation column of a package on the Package Management page.If the package has been grouped, you can m", "doc_type":"usermanual", "kw":"Modifying the Owner,Package Management,User Guide", @@ -1284,7 +1633,7 @@ "uri":"dli_01_0397.html", "node_id":"dli_01_0397.xml", "product_code":"dli", - "code":"64", + "code":"82", "des":"DLI built-in dependencies are provided by the platform by default. In case of conflicts, you do not need to upload them when packaging JAR packages of Spark or Flink Jar ", "doc_type":"usermanual", "kw":"Built-in Dependencies,Package Management,User Guide", @@ -1304,7 +1653,7 @@ "uri":"dli_01_0379.html", "node_id":"dli_01_0379.xml", "product_code":"dli", - "code":"65", + "code":"83", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Job Templates", @@ -1325,7 +1674,7 @@ "uri":"dli_01_0021.html", "node_id":"dli_01_0021.xml", "product_code":"dli", - "code":"66", + "code":"84", "des":"To facilitate SQL operation execution, DLI allows you to customize query templates or save the SQL statements in use as templates. After templates are saved, you do not n", "doc_type":"usermanual", "kw":"Managing SQL Templates,Job Templates,User Guide", @@ -1346,7 +1695,7 @@ "uri":"dli_01_0464.html", "node_id":"dli_01_0464.xml", "product_code":"dli", - "code":"67", + "code":"85", "des":"Flink templates include sample templates and custom templates. You can modify an existing sample template to meet the actual job logic requirements and save time for edit", "doc_type":"usermanual", "kw":"Managing Flink Templates,Job Templates,User Guide", @@ -1366,7 +1715,7 @@ "uri":"dli_01_0551.html", "node_id":"dli_01_0551.xml", "product_code":"dli", - "code":"68", + "code":"86", "des":"You can modify a sample template to meet the Spark job requirements, saving time for editing SQL statements.Currently, the cloud platform does not provide preset Spark te", "doc_type":"usermanual", "kw":"Managing Spark SQL Templates,Job Templates,User Guide", @@ -1387,7 +1736,7 @@ "uri":"dli_01_05110.html", "node_id":"dli_01_05110.xml", "product_code":"dli", - "code":"69", + "code":"87", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Appendix", @@ -1408,7 +1757,7 @@ "uri":"dli_01_05111.html", "node_id":"dli_01_05111.xml", "product_code":"dli", - "code":"70", + "code":"88", "des":"TPC-H is a test set developed by the Transaction Processing Performance Council (TPC) to simulate decision-making support applications. It is widely used in academia and ", "doc_type":"usermanual", "kw":"TPC-H Sample Data in the SQL Template,Appendix,User Guide", @@ -1428,7 +1777,7 @@ "uri":"dli_01_0426.html", "node_id":"dli_01_0426.xml", "product_code":"dli", - "code":"71", + "code":"89", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Enhanced Datasource Connections", @@ -1449,7 +1798,7 @@ "uri":"dli_01_0003.html", "node_id":"dli_01_0003.xml", "product_code":"dli", - "code":"72", + "code":"90", "des":"In cross-source data analysis scenarios, DLI needs to connect to external data sources. However, due to the different VPCs between the data source and DLI, the network ca", "doc_type":"usermanual", "kw":"Overview,Enhanced Datasource Connections,User Guide", @@ -1470,7 +1819,7 @@ "uri":"dli_01_0410.html", "node_id":"dli_01_0410.xml", "product_code":"dli", - "code":"73", + "code":"91", "des":"If DLI needs to access external data sources, you need to establish enhanced datasource connections to enable the network between DLI and the data sources, and then devel", "doc_type":"usermanual", "kw":"Cross-Source Analysis Development Methods,Enhanced Datasource Connections,User Guide", @@ -1491,7 +1840,7 @@ "uri":"dli_01_0006.html", "node_id":"dli_01_0006.xml", "product_code":"dli", - "code":"74", + "code":"92", "des":"Create an enhanced datasource connection for DLI to access, import, query, and analyze data of other data sources.For example, to connect DLI to the MRS, RDS, CSS, Kafka,", "doc_type":"usermanual", "kw":"Creating an Enhanced Datasource Connection,Enhanced Datasource Connections,User Guide", @@ -1512,7 +1861,7 @@ "uri":"dli_01_0553.html", "node_id":"dli_01_0553.xml", "product_code":"dli", - "code":"75", + "code":"93", "des":"Delete an enhanced datasource connection that is no longer used on the console.Log in to the DLI management console.In the left navigation pane, choose Datasource Connect", "doc_type":"usermanual", "kw":"Deleting an Enhanced Datasource Connection,Enhanced Datasource Connections,User Guide", @@ -1533,7 +1882,7 @@ "uri":"dli_01_0013.html", "node_id":"dli_01_0013.xml", "product_code":"dli", - "code":"76", + "code":"94", "des":"Host information is the mapping between host IP addresses and domain names. After you configure host information, jobs can only use the configured domain names to access ", "doc_type":"usermanual", "kw":"Modifying Host Information,Enhanced Datasource Connections,User Guide", @@ -1554,7 +1903,7 @@ "uri":"dli_01_0514.html", "node_id":"dli_01_0514.xml", "product_code":"dli", - "code":"77", + "code":"95", "des":"The CIDR block of the DLI queue that is bound with a datasource connection cannot overlap with that of the data source.The default queue cannot be bound with a connection", "doc_type":"usermanual", "kw":"Binding and Unbinding a Queue,Enhanced Datasource Connections,User Guide", @@ -1562,9 +1911,9 @@ "metedata":[ { "prodname":"dli", + "IsMulti":"Yes", "opensource":"true", "documenttype":"usermanual", - "IsMulti":"No", "IsBot":"Yes" } ], @@ -1575,7 +1924,7 @@ "uri":"dli_01_0014.html", "node_id":"dli_01_0014.xml", "product_code":"dli", - "code":"78", + "code":"96", "des":"A route is configured with the destination, next hop type, and next hop to determine where the network traffic is directed. Routes are classified into system routes and c", "doc_type":"usermanual", "kw":"Adding a Route,Enhanced Datasource Connections,User Guide", @@ -1596,7 +1945,7 @@ "uri":"dli_01_0556.html", "node_id":"dli_01_0556.xml", "product_code":"dli", - "code":"79", + "code":"97", "des":"Delete a route that is no longer used.A custom route table cannot be deleted if it is associated with a subnet.Log in to the DLI management console.In the left navigation", "doc_type":"usermanual", "kw":"Deleting a Route,Enhanced Datasource Connections,User Guide", @@ -1617,7 +1966,7 @@ "uri":"dli_01_0018.html", "node_id":"dli_01_0018.xml", "product_code":"dli", - "code":"80", + "code":"98", "des":"Enhanced connections support user authorization by project. After authorization, users in the project have the permission to perform operations on the enhanced connection", "doc_type":"usermanual", "kw":"Enhanced Connection Permission Management,Enhanced Datasource Connections,User Guide", @@ -1625,9 +1974,9 @@ "metedata":[ { "prodname":"dli", + "IsMulti":"Yes", "opensource":"true", "documenttype":"usermanual", - "IsMulti":"No", "IsBot":"Yes" } ], @@ -1638,7 +1987,7 @@ "uri":"dli_01_0019.html", "node_id":"dli_01_0019.xml", "product_code":"dli", - "code":"81", + "code":"99", "des":"A tag is a key-value pair customized by users and used to identify cloud resources. It helps users to classify and search for cloud resources. A tag consists of a tag key", "doc_type":"usermanual", "kw":"Enhanced Datasource Connection Tag Management,Enhanced Datasource Connections,User Guide", @@ -1659,7 +2008,7 @@ "uri":"dli_01_0422.html", "node_id":"dli_01_0422.xml", "product_code":"dli", - "code":"82", + "code":"100", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Datasource Authentication", @@ -1680,7 +2029,7 @@ "uri":"dli_01_0561.html", "node_id":"dli_01_0561.xml", "product_code":"dli", - "code":"83", + "code":"101", "des":"When analyzing across multiple sources, it is not recommended to configure authentication information directly in a job as it can lead to password leakage. Instead, you a", "doc_type":"usermanual", "kw":"Overview,Datasource Authentication,User Guide", @@ -1701,7 +2050,7 @@ "uri":"dli_01_0427.html", "node_id":"dli_01_0427.xml", "product_code":"dli", - "code":"84", + "code":"102", "des":"Create a CSS datasource authentication on the DLI console to store the authentication information of the CSS security cluster to DLI. This will allow you to access to the", "doc_type":"usermanual", "kw":"Creating a CSS Datasource Authentication,Datasource Authentication,User Guide", @@ -1722,7 +2071,7 @@ "uri":"dli_01_0558.html", "node_id":"dli_01_0558.xml", "product_code":"dli", - "code":"85", + "code":"103", "des":"Create a Kerberos datasource authentication on the DLI console to store the authentication information of the data source to DLI. This will allow you to access to the dat", "doc_type":"usermanual", "kw":"Creating a Kerberos Datasource Authentication,Datasource Authentication,User Guide", @@ -1743,7 +2092,7 @@ "uri":"dli_01_0560.html", "node_id":"dli_01_0560.xml", "product_code":"dli", - "code":"86", + "code":"104", "des":"Create a Kafka_SSL datasource authentication on the DLI console to store the Kafka authentication information to DLI. This will allow you to access to Kafka instances wit", "doc_type":"usermanual", "kw":"Creating a Kafka_SSL Datasource Authentication,Datasource Authentication,User Guide", @@ -1764,7 +2113,7 @@ "uri":"dli_01_0559.html", "node_id":"dli_01_0559.xml", "product_code":"dli", - "code":"87", + "code":"105", "des":"Create a password datasource authentication on the DLI console to store passwords of the GaussDB(DWS), RDS, DCS, and DDS data sources to DLI. This will allow you to acces", "doc_type":"usermanual", "kw":"Creating a Password Datasource Authentication,Datasource Authentication,User Guide", @@ -1785,7 +2134,7 @@ "uri":"dli_01_0480.html", "node_id":"dli_01_0480.xml", "product_code":"dli", - "code":"88", + "code":"106", "des":"Grant permissions on a datasource authentication to users so multiple user jobs can use the datasource authentication without affecting each other.The administrator and t", "doc_type":"usermanual", "kw":"Datasource Authentication Permission Management,Datasource Authentication,User Guide", @@ -1806,7 +2155,7 @@ "uri":"dli_01_0485.html", "node_id":"dli_01_0485.xml", "product_code":"dli", - "code":"89", + "code":"107", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Global Configuration", @@ -1827,7 +2176,7 @@ "uri":"dli_01_0476.html", "node_id":"dli_01_0476.xml", "product_code":"dli", - "code":"90", + "code":"108", "des":"DLI allows you to set variables that are frequently used during job development as global variables on the DLI management console. This avoids repeated definitions during", "doc_type":"usermanual", "kw":"Global Variables,Global Configuration,User Guide", @@ -1847,7 +2196,7 @@ "uri":"dli_01_0533.html", "node_id":"dli_01_0533.xml", "product_code":"dli", - "code":"91", + "code":"109", "des":"You can grant permissions on a global variable to users.The administrator and the global variable owner have all permissions. You do not need to set permissions for them,", "doc_type":"usermanual", "kw":"Permission Management for Global Variables,Global Configuration,User Guide", @@ -1863,32 +2212,11 @@ "title":"Permission Management for Global Variables", "githuburl":"" }, - { - "uri":"dli_01_0486.html", - "node_id":"dli_01_0486.xml", - "product_code":"dli", - "code":"92", - "des":"Only the tenant account or a subaccount of user group admin can authorize access.After entering the DLI management console, you are advised to set agency permissions to e", - "doc_type":"usermanual", - "kw":"Service Authorization,Global Configuration,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", - "IsMulti":"No", - "IsBot":"Yes" - } - ], - "title":"Service Authorization", - "githuburl":"" - }, { "uri":"dli_01_0408.html", "node_id":"dli_01_0408.xml", "product_code":"dli", - "code":"93", + "code":"110", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Permissions Management", @@ -1906,7 +2234,7 @@ "uri":"dli_01_0440.html", "node_id":"dli_01_0440.xml", "product_code":"dli", - "code":"94", + "code":"111", "des":"DLI has a comprehensive permission control mechanism and supports fine-grained authentication through Identity and Access Management (IAM). You can create policies in IAM", "doc_type":"usermanual", "kw":"Overview,Permissions Management,User Guide", @@ -1926,7 +2254,7 @@ "uri":"dli_01_0418.html", "node_id":"dli_01_0418.xml", "product_code":"dli", - "code":"95", + "code":"112", "des":"You can use Identity and Access Management (IAM) to implement fine-grained permissions control on DLI resources. For details, see Overview.If your cloud account does not ", "doc_type":"usermanual", "kw":"Creating an IAM User and Granting Permissions,Permissions Management,User Guide", @@ -1934,9 +2262,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -1947,7 +2274,7 @@ "uri":"dli_01_0451.html", "node_id":"dli_01_0451.xml", "product_code":"dli", - "code":"96", + "code":"113", "des":"Custom policies can be created as a supplement to the system policies of DLI. You can add actions to custom policies. For the actions supported for custom policies, see \"", "doc_type":"usermanual", "kw":"Creating a Custom Policy,Permissions Management,User Guide", @@ -1967,7 +2294,7 @@ "uri":"dli_01_0417.html", "node_id":"dli_01_0417.xml", "product_code":"dli", - "code":"97", + "code":"114", "des":"A resource is an object that exists within a service. You can select DLI resources by specifying their paths.", "doc_type":"usermanual", "kw":"DLI Resources,Permissions Management,User Guide", @@ -1987,7 +2314,7 @@ "uri":"dli_01_0475.html", "node_id":"dli_01_0475.xml", "product_code":"dli", - "code":"98", + "code":"115", "des":"Request conditions are useful in determining when a custom policy takes effect. A request condition consists of a condition key and operator. Condition keys are either gl", "doc_type":"usermanual", "kw":"DLI Request Conditions,Permissions Management,User Guide", @@ -2007,7 +2334,7 @@ "uri":"dli_01_0441.html", "node_id":"dli_01_0441.xml", "product_code":"dli", - "code":"99", + "code":"116", "des":"Table 1 lists the common operations supported by each system policy of DLI. Choose proper system policies according to this table. For details about the SQL statement per", "doc_type":"usermanual", "kw":"Common Operations Supported by DLI System Policy,Permissions Management,User Guide", @@ -2015,9 +2342,8 @@ "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], @@ -2028,7 +2354,7 @@ "uri":"dli_01_0513.html", "node_id":"dli_01_0513.xml", "product_code":"dli", - "code":"100", + "code":"117", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Other Common Operations", @@ -2049,7 +2375,7 @@ "uri":"dli_01_0420.html", "node_id":"dli_01_0420.xml", "product_code":"dli", - "code":"101", + "code":"118", "des":"On the DLI management console, you can import data stored on OBS to DLI tables from Data Management > Databases and Tables > Table Management and SQL Editor pages. For de", "doc_type":"usermanual", "kw":"Importing Data to a DLI Table,Other Common Operations,User Guide", @@ -2070,7 +2396,7 @@ "uri":"dli_01_0445.html", "node_id":"dli_01_0445.xml", "product_code":"dli", - "code":"102", + "code":"119", "des":"This section describes metrics reported by DLI to Cloud Eye as well as their namespaces and dimensions. You can use the management console or APIs provided by Cloud Eye t", "doc_type":"usermanual", "kw":"Viewing Monitoring Metrics,Other Common Operations,User Guide", @@ -2090,7 +2416,7 @@ "uri":"dli_01_0318.html", "node_id":"dli_01_0318.xml", "product_code":"dli", - "code":"103", + "code":"120", "des":"With CTS, you can record operations associated with DLI for later query, audit, and backtrack operations.", "doc_type":"usermanual", "kw":"DLI Operations That Can Be Recorded by CTS,Other Common Operations,User Guide", @@ -2110,28 +2436,27 @@ "uri":"dli_01_0550.html", "node_id":"dli_01_0550.xml", "product_code":"dli", - "code":"104", + "code":"121", "des":"A quota limits the quantity of a resource available to users, thereby preventing spikes in the usage of the resource.You can also request for an increased quota if your e", "doc_type":"usermanual", - "kw":"Quotas,Other Common Operations,User Guide", + "kw":"Quota Management,Other Common Operations,User Guide", "search_title":"", "metedata":[ { "prodname":"dli", - "opensource":"true", - "documenttype":"usermanual", "IsMulti":"No", + "documenttype":"usermanual", "IsBot":"Yes" } ], - "title":"Quotas", + "title":"Quota Management", "githuburl":"" }, { "uri":"dli_01_0539.html", "node_id":"dli_01_0539.xml", "product_code":"dli", - "code":"105", + "code":"122", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"FAQ", @@ -2152,7 +2477,7 @@ "uri":"dli_03_0037.html", "node_id":"dli_03_0037.xml", "product_code":"dli", - "code":"106", + "code":"123", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Flink Jobs", @@ -2169,14 +2494,35 @@ "title":"Flink Jobs", "githuburl":"" }, + { + "uri":"dli_03_0137.html", + "node_id":"dli_03_0137.xml", + "product_code":"dli", + "code":"124", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0083.html", "node_id":"dli_03_0083.xml", "product_code":"dli", - "code":"107", + "code":"125", "des":"DLI Flink jobs support the following data formats:Avro, Avro_merge, BLOB, CSV, EMAIL, JSON, ORC, Parquet, and XML.DLI Flink jobs support data from the following data sour", "doc_type":"usermanual", - "kw":"What Data Formats and Data Sources Are Supported by DLI Flink Jobs?,Flink Jobs,User Guide", + "kw":"What Data Formats and Data Sources Are Supported by DLI Flink Jobs?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -2194,10 +2540,10 @@ "uri":"dli_03_0139.html", "node_id":"dli_03_0139.xml", "product_code":"dli", - "code":"108", + "code":"126", "des":"A sub-user can view queues but cannot view Flink jobs. You can authorize the sub-user using DLI or IAM.Authorization on DLILog in to the DLI console using a tenant accoun", "doc_type":"usermanual", - "kw":"How Do I Authorize a Subuser to View Flink Jobs?,Flink Jobs,User Guide", + "kw":"How Do I Authorize a Subuser to View Flink Jobs?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -2215,10 +2561,10 @@ "uri":"dli_03_0090.html", "node_id":"dli_03_0090.xml", "product_code":"dli", - "code":"109", + "code":"127", "des":"DLI Flink jobs are highly available. You can enable the automatic restart function to automatically restart your jobs after short-time faults of peripheral services are r", "doc_type":"usermanual", - "kw":"How Do I Set Auto Restart upon Exception for a Flink Job?,Flink Jobs,User Guide", + "kw":"How Do I Set Auto Restart upon Exception for a Flink Job?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -2236,10 +2582,10 @@ "uri":"dli_03_0099.html", "node_id":"dli_03_0099.xml", "product_code":"dli", - "code":"110", + "code":"128", "des":"When you create a Flink SQL job or Flink Jar job, you can select Save Job Log on the job editing page to save job running logs to OBS.To set the OBS bucket for storing th", "doc_type":"usermanual", - "kw":"How Do I Save Flink Job Logs?,Flink Jobs,User Guide", + "kw":"How Do I Save Flink Job Logs?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -2257,10 +2603,10 @@ "uri":"dli_03_0043.html", "node_id":"dli_03_0043.xml", "product_code":"dli", - "code":"111", + "code":"129", "des":"DLI can output Flink job results to DIS. You can view the results in DIS. For details, see \"Obtaining Data from DIS\" in Data Ingestion Service User Guide.DLI can output F", "doc_type":"usermanual", - "kw":"How Can I Check Flink Job Results?,Flink Jobs,User Guide", + "kw":"How Can I Check Flink Job Results?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -2278,7 +2624,7 @@ "uri":"dli_03_0160.html", "node_id":"dli_03_0160.xml", "product_code":"dli", - "code":"112", + "code":"130", "des":"Choose Job Management > Flink Jobs. In the Operation column of the target job, choose More > Permissions. When a new user is authorized, No such user. userName:xxxx. is d", "doc_type":"usermanual", "kw":"Why Is Error \"No such user. userName:xxxx.\" Reported on the Flink Job Management Page When I Grant P", @@ -2299,7 +2645,7 @@ "uri":"dli_03_0180.html", "node_id":"dli_03_0180.xml", "product_code":"dli", - "code":"113", + "code":"131", "des":"Checkpoint was enabled when a Flink job is created, and the OBS bucket for storing checkpoints was specified. After a Flink job is manually stopped, no message is display", "doc_type":"usermanual", "kw":"How Do I Know Which Checkpoint the Flink Job I Stopped Will Be Restored to When I Start the Job Agai", @@ -2320,7 +2666,7 @@ "uri":"dli_03_0036.html", "node_id":"dli_03_0036.xml", "product_code":"dli", - "code":"114", + "code":"132", "des":"When you set running parameters of a DLI Flink job, you can enable Alarm Generation upon Job Exception to receive alarms when the job runs abnormally or is in arrears.If ", "doc_type":"usermanual", "kw":"Why Is a Message Displayed Indicating That the SMN Topic Does Not Exist When I Use the SMN Topic in ", @@ -2337,14 +2683,35 @@ "title":"Why Is a Message Displayed Indicating That the SMN Topic Does Not Exist When I Use the SMN Topic in DLI?", "githuburl":"" }, + { + "uri":"dli_03_0131.html", + "node_id":"dli_03_0131.xml", + "product_code":"dli", + "code":"133", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Flink SQL", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Flink SQL", + "githuburl":"" + }, { "uri":"dli_03_0130.html", "node_id":"dli_03_0130.xml", "product_code":"dli", - "code":"115", + "code":"134", "des":"The consumption capability of a Flink SQL job depends on the data source transmission, queue size, and job parameter settings. The peak consumption is 10 Mbit/s.", "doc_type":"usermanual", - "kw":"How Much Data Can Be Processed in a Day by a Flink SQL Job?,Flink Jobs,User Guide", + "kw":"How Much Data Can Be Processed in a Day by a Flink SQL Job?,Flink SQL,User Guide", "search_title":"", "metedata":[ { @@ -2362,7 +2729,7 @@ "uri":"dli_03_0061.html", "node_id":"dli_03_0061.xml", "product_code":"dli", - "code":"116", + "code":"135", "des":"The temp stream in Flink SQL is similar to a subquery. It is a logical stream used to simplify the SQL logic and does not generate data storage. Therefore, there is no ne", "doc_type":"usermanual", "kw":"Does Data in the Temporary Stream of Flink SQL Need to Be Cleared Periodically? How Do I Clear the D", @@ -2383,7 +2750,7 @@ "uri":"dli_03_0138.html", "node_id":"dli_03_0138.xml", "product_code":"dli", - "code":"117", + "code":"136", "des":"SymptomWhen you create a Flink SQL job and configure the parameters, you select an OBS bucket you have created. The system displays a message indicating that the OBS buck", "doc_type":"usermanual", "kw":"Why Is a Message Displayed Indicating That the OBS Bucket Is Not Authorized When I Select an OBS Buc", @@ -2404,10 +2771,10 @@ "uri":"dli_03_0089.html", "node_id":"dli_03_0089.xml", "product_code":"dli", - "code":"118", + "code":"137", "des":"When using a Flink SQL job, you need to create an OBS partition table for subsequent batch processing.In the following example, the day field is used as the partition fie", "doc_type":"usermanual", - "kw":"How Do I Create an OBS Partitioned Table for a Flink SQL Job?,Flink Jobs,User Guide", + "kw":"How Do I Create an OBS Partitioned Table for a Flink SQL Job?,Flink SQL,User Guide", "search_title":"", "metedata":[ { @@ -2425,10 +2792,10 @@ "uri":"dli_03_0075.html", "node_id":"dli_03_0075.xml", "product_code":"dli", - "code":"119", + "code":"138", "des":"In this example, the day field is used as the partition field with the parquet encoding format (only the parquet format is supported currently) to dump car_info data to O", "doc_type":"usermanual", - "kw":"How Do I Dump Data to OBS and Create an OBS Partitioned Table?,Flink Jobs,User Guide", + "kw":"How Do I Dump Data to OBS and Create an OBS Partitioned Table?,Flink SQL,User Guide", "search_title":"", "metedata":[ { @@ -2446,7 +2813,7 @@ "uri":"dli_03_0167.html", "node_id":"dli_03_0167.xml", "product_code":"dli", - "code":"120", + "code":"139", "des":"When I run the creation statement with an EL expression in the table name in a Flink SQL job, the following error message is displayed:DLI.0005: AnalysisException: t_user", "doc_type":"usermanual", "kw":"Why Is Error Message \"DLI.0005\" Displayed When I Use an EL Expression to Create a Table in a Flink S", @@ -2467,7 +2834,7 @@ "uri":"dli_03_0168.html", "node_id":"dli_03_0168.xml", "product_code":"dli", - "code":"121", + "code":"140", "des":"After data is written to OBS through the Flink job output stream, data cannot be queried from the DLI table created in the OBS file path.For example, use the following Fl", "doc_type":"usermanual", "kw":"Why Is No Data Queried in the DLI Table Created Using the OBS File Path When Data Is Written to OBS ", @@ -2488,7 +2855,7 @@ "uri":"dli_03_0174.html", "node_id":"dli_03_0174.xml", "product_code":"dli", - "code":"122", + "code":"141", "des":"After a Flink SQL job is submitted on DLI, the job fails to be executed. The following error information is displayed in the job log:connect to DIS failed java.lang.Illeg", "doc_type":"usermanual", "kw":"Why Does a Flink SQL Job Fails to Be Executed, and Is \"connect to DIS failed java.lang.IllegalArgume", @@ -2509,10 +2876,10 @@ "uri":"dli_03_0176.html", "node_id":"dli_03_0176.xml", "product_code":"dli", - "code":"123", + "code":"142", "des":"Semantic verification for a Flink SQL job (reading DIS data) fails. The following information is displayed when the job fails:Get dis channel xxxinfo failed. error info: ", "doc_type":"usermanual", - "kw":"Why Is Error \"Not authorized\" Reported When a Flink SQL Job Reads DIS Data?,Flink Jobs,User Guide", + "kw":"Why Is Error \"Not authorized\" Reported When a Flink SQL Job Reads DIS Data?,Flink SQL,User Guide", "search_title":"", "metedata":[ { @@ -2530,7 +2897,7 @@ "uri":"dli_03_0232.html", "node_id":"dli_03_0232.xml", "product_code":"dli", - "code":"124", + "code":"143", "des":"After a Flink SQL job consumed Kafka and sent data to the Elasticsearch cluster, the job was successfully executed, but no data is available.Possible causes are as follow", "doc_type":"usermanual", "kw":"Data Writing Fails After a Flink SQL Job Consumed Kafka and Sank Data to the Elasticsearch Cluster,F", @@ -2547,14 +2914,35 @@ "title":"Data Writing Fails After a Flink SQL Job Consumed Kafka and Sank Data to the Elasticsearch Cluster", "githuburl":"" }, + { + "uri":"dli_03_0132.html", + "node_id":"dli_03_0132.xml", + "product_code":"dli", + "code":"144", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Flink Jar Jobs", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Flink Jar Jobs", + "githuburl":"" + }, { "uri":"dli_03_0038.html", "node_id":"dli_03_0038.xml", "product_code":"dli", - "code":"125", + "code":"145", "des":"The procedure is as follows:Add the following code to the JAR file code of the Flink Jar job:// Configure the pom file on which the StreamExecutionEnvironment depends.\nSt", "doc_type":"usermanual", - "kw":"How Do I Configure Checkpoints for Flink Jar Jobs and Save the Checkpoints to OBS?,Flink Jobs,User G", + "kw":"How Do I Configure Checkpoints for Flink Jar Jobs and Save the Checkpoints to OBS?,Flink Jar Jobs,Us", "search_title":"", "metedata":[ { @@ -2572,7 +2960,7 @@ "uri":"dli_03_0044.html", "node_id":"dli_03_0044.xml", "product_code":"dli", - "code":"126", + "code":"146", "des":"Configuration files can be uploaded for user-defined jobs (JAR).Upload the configuration file to DLI through Package Management.In the Other Dependencies area of the Flin", "doc_type":"usermanual", "kw":"Does a Flink JAR Job Support Configuration File Upload? How Do I Upload a Configuration File?,Flink ", @@ -2593,10 +2981,10 @@ "uri":"dli_03_0119.html", "node_id":"dli_03_0119.xml", "product_code":"dli", - "code":"127", + "code":"147", "des":"The dependency of your Flink job conflicts with a built-in dependency of the DLI Flink platform. As a result, the job submission fails.Delete your JAR file that is the sa", "doc_type":"usermanual", - "kw":"Why Does the Submission Fail Due to Flink JAR File Conflict?,Flink Jobs,User Guide", + "kw":"Why Does the Submission Fail Due to Flink JAR File Conflict?,Flink Jar Jobs,User Guide", "search_title":"", "metedata":[ { @@ -2614,7 +3002,7 @@ "uri":"dli_03_0161.html", "node_id":"dli_03_0161.xml", "product_code":"dli", - "code":"128", + "code":"148", "des":"When a Flink Jar job is submitted to access GaussDB(DWS), an error message is displayed indicating that the job fails to be started. The job log contains the following er", "doc_type":"usermanual", "kw":"Why Does a Flink Jar Job Fail to Access GaussDB(DWS) and a Message Is Displayed Indicating Too Many ", @@ -2635,10 +3023,10 @@ "uri":"dli_03_0165.html", "node_id":"dli_03_0165.xml", "product_code":"dli", - "code":"129", + "code":"149", "des":"An exception occurred when a Flink Jar job is running. The following error information is displayed in the job log:org.apache.flink.shaded.curator.org.apache.curator.Conn", "doc_type":"usermanual", - "kw":"Why Is Error Message \"Authentication failed\" Displayed During Flink Jar Job Running?,Flink Jobs,User", + "kw":"Why Is Error Message \"Authentication failed\" Displayed During Flink Jar Job Running?,Flink Jar Jobs,", "search_title":"", "metedata":[ { @@ -2656,10 +3044,10 @@ "uri":"dli_03_0233.html", "node_id":"dli_03_0233.xml", "product_code":"dli", - "code":"130", + "code":"150", "des":"The storage path of the Flink Jar job checkpoints was set to an OBS bucket. The job failed to be submitted, and an error message indicating an invalid OBS bucket name was", "doc_type":"usermanual", - "kw":"Why Is Error Invalid OBS Bucket Name Reported After a Flink Job Submission Failed?,Flink Jobs,User G", + "kw":"Why Is Error Invalid OBS Bucket Name Reported After a Flink Job Submission Failed?,Flink Jar Jobs,Us", "search_title":"", "metedata":[ { @@ -2677,10 +3065,10 @@ "uri":"dli_03_0234.html", "node_id":"dli_03_0234.xml", "product_code":"dli", - "code":"131", + "code":"151", "des":"Flink Job submission failed. The exception information is as follows:Flink JAR files conflicted. The submitted Flink JAR file conflicted with the HDFS JAR file of the DLI", "doc_type":"usermanual", - "kw":"Why Does the Flink Submission Fail Due to Hadoop JAR File Conflict?,Flink Jobs,User Guide", + "kw":"Why Does the Flink Submission Fail Due to Hadoop JAR File Conflict?,Flink Jar Jobs,User Guide", "search_title":"", "metedata":[ { @@ -2698,10 +3086,10 @@ "uri":"dli_03_0266.html", "node_id":"dli_03_0266.xml", "product_code":"dli", - "code":"132", + "code":"152", "des":"You can use Flink Jar to connect to Kafka with SASL SSL authentication enabled.", "doc_type":"usermanual", - "kw":"How Do I Connect a Flink jar Job to SASL_SSL?,Flink Jobs,User Guide", + "kw":"How Do I Connect a Flink jar Job to SASL_SSL?,Flink Jar Jobs,User Guide", "search_title":"", "metedata":[ { @@ -2715,14 +3103,35 @@ "title":"How Do I Connect a Flink jar Job to SASL_SSL?", "githuburl":"" }, + { + "uri":"dli_03_0133.html", + "node_id":"dli_03_0133.xml", + "product_code":"dli", + "code":"153", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Performance Tuning", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Performance Tuning", + "githuburl":"" + }, { "uri":"dli_03_0106.html", "node_id":"dli_03_0106.xml", "product_code":"dli", - "code":"133", + "code":"154", "des":"Data Stacking in a Consumer GroupThe accumulated data of a consumer group can be calculated by the following formula: Total amount of data to be consumed by the consumer ", "doc_type":"usermanual", - "kw":"How Do I Optimize Performance of a Flink Job?,Flink Jobs,User Guide", + "kw":"How Do I Optimize Performance of a Flink Job?,Performance Tuning,User Guide", "search_title":"", "metedata":[ { @@ -2740,10 +3149,10 @@ "uri":"dli_03_0048.html", "node_id":"dli_03_0048.xml", "product_code":"dli", - "code":"134", + "code":"155", "des":"Add the following SQL statements to the Flink job:", "doc_type":"usermanual", - "kw":"How Do I Write Data to Different Elasticsearch Clusters in a Flink Job?,Flink Jobs,User Guide", + "kw":"How Do I Write Data to Different Elasticsearch Clusters in a Flink Job?,Performance Tuning,User Guid", "search_title":"", "metedata":[ { @@ -2761,10 +3170,10 @@ "uri":"dli_03_0096.html", "node_id":"dli_03_0096.xml", "product_code":"dli", - "code":"135", + "code":"156", "des":"The DLI Flink checkpoint/savepoint mechanism is complete and reliable. You can use this mechanism to prevent data loss when a job is manually restarted or restarted due t", "doc_type":"usermanual", - "kw":"How Do I Prevent Data Loss After Flink Job Restart?,Flink Jobs,User Guide", + "kw":"How Do I Prevent Data Loss After Flink Job Restart?,Performance Tuning,User Guide", "search_title":"", "metedata":[ { @@ -2778,14 +3187,35 @@ "title":"How Do I Prevent Data Loss After Flink Job Restart?", "githuburl":"" }, + { + "uri":"dli_03_0135.html", + "node_id":"dli_03_0135.xml", + "product_code":"dli", + "code":"157", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"O&M Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"O&M Guide", + "githuburl":"" + }, { "uri":"dli_03_0103.html", "node_id":"dli_03_0103.xml", "product_code":"dli", - "code":"136", + "code":"158", "des":"On the Flink job management page, hover the cursor on the status of the job that fails to be submitted to view the brief information about the failure.The possible causes", "doc_type":"usermanual", - "kw":"How Do I Locate a Flink Job Submission Error?,Flink Jobs,User Guide", + "kw":"How Do I Locate a Flink Job Submission Error?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -2803,10 +3233,10 @@ "uri":"dli_03_0105.html", "node_id":"dli_03_0105.xml", "product_code":"dli", - "code":"137", + "code":"159", "des":"On the Flink job management, click Edit in the Operation column of the target job. On the displayed page, check whether Save Job Log in the Running Parameters tab is enab", "doc_type":"usermanual", - "kw":"How Do I Locate a Flink Job Running Error?,Flink Jobs,User Guide", + "kw":"How Do I Locate a Flink Job Running Error?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -2824,10 +3254,10 @@ "uri":"dli_03_0136.html", "node_id":"dli_03_0136.xml", "product_code":"dli", - "code":"138", + "code":"160", "des":"Flink's checkpointing is a fault tolerance and recovery mechanism. This mechanism ensures that real-time programs can self-recover in case of exceptions or machine issues", "doc_type":"usermanual", - "kw":"How Can I Check if a Flink Job Can Be Restored From a Checkpoint After Restarting It?,Flink Jobs,Use", + "kw":"How Can I Check if a Flink Job Can Be Restored From a Checkpoint After Restarting It?,O&M Guide,User", "search_title":"", "metedata":[ { @@ -2845,10 +3275,10 @@ "uri":"dli_03_0040.html", "node_id":"dli_03_0040.xml", "product_code":"dli", - "code":"139", + "code":"161", "des":"To rectify this fault, perform the following steps:Log in to the DIS management console. In the navigation pane, choose Stream Management. View the Flink job SQL statemen", "doc_type":"usermanual", - "kw":"Why Does DIS Stream Not Exist During Job Semantic Check?,Flink Jobs,User Guide", + "kw":"Why Does DIS Stream Not Exist During Job Semantic Check?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -2866,10 +3296,10 @@ "uri":"dli_03_0045.html", "node_id":"dli_03_0045.xml", "product_code":"dli", - "code":"140", + "code":"162", "des":"If the OBS bucket selected for a job is not authorized, perform the following steps:Select Enable Checkpointing or Save Job Log.Specify OBS Bucket.Select Authorize OBS.", "doc_type":"usermanual", - "kw":"Why Is the OBS Bucket Selected for Job Not Authorized?,Flink Jobs,User Guide", + "kw":"Why Is the OBS Bucket Selected for Job Not Authorized?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -2887,7 +3317,7 @@ "uri":"dli_03_0064.html", "node_id":"dli_03_0064.xml", "product_code":"dli", - "code":"141", + "code":"163", "des":"Mode for storing generated job logs when a DLI Flink job fails to be submitted or executed. The options are as follows:If the submission fails, a submission log is genera", "doc_type":"usermanual", "kw":"Why Are Logs Not Written to the OBS Bucket After a DLI Flink Job Fails to Be Submitted for Running?,", @@ -2908,10 +3338,10 @@ "uri":"dli_03_0235.html", "node_id":"dli_03_0235.xml", "product_code":"dli", - "code":"142", + "code":"164", "des":"The Flink/Spark UI was displayed with incomplete information.When the queue is used to run a job, the system releases the cluster and takes about 10 minutes to create a n", "doc_type":"usermanual", - "kw":"Why Is Information Displayed on the FlinkUI/Spark UI Page Incomplete?,Flink Jobs,User Guide", + "kw":"Why Is Information Displayed on the FlinkUI/Spark UI Page Incomplete?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -2929,10 +3359,10 @@ "uri":"dli_03_0236.html", "node_id":"dli_03_0236.xml", "product_code":"dli", - "code":"143", + "code":"165", "des":"JobManager and TaskManager heartbeats timed out. As a result, the Flink job is abnormal.Check whether the network is intermittently disconnected and whether the cluster l", "doc_type":"usermanual", - "kw":"Why Is the Flink Job Abnormal Due to Heartbeat Timeout Between JobManager and TaskManager?,Flink Job", + "kw":"Why Is the Flink Job Abnormal Due to Heartbeat Timeout Between JobManager and TaskManager?,O&M Guide", "search_title":"", "metedata":[ { @@ -2950,7 +3380,7 @@ "uri":"dli_03_0265.html", "node_id":"dli_03_0265.xml", "product_code":"dli", - "code":"144", + "code":"166", "des":"Test address connectivity.If the network is unreachable, rectify the network connection first. Ensure that the network connection between the DLI queue and the external d", "doc_type":"usermanual", "kw":"Why Is Error \"Timeout expired while fetching topic metadata\" Repeatedly Reported in Flink JobManager", @@ -2971,7 +3401,7 @@ "uri":"dli_03_0020.html", "node_id":"dli_03_0020.xml", "product_code":"dli", - "code":"145", + "code":"167", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Problems Related to SQL Jobs", @@ -2988,14 +3418,35 @@ "title":"Problems Related to SQL Jobs", "githuburl":"" }, + { + "uri":"dli_03_0216.html", + "node_id":"dli_03_0216.xml", + "product_code":"dli", + "code":"168", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0200.html", "node_id":"dli_03_0200.xml", "product_code":"dli", - "code":"146", + "code":"169", "des":"A temporary table is used to store intermediate results. When a transaction or session ends, the data in the temporary table can be automatically deleted. For example, in", "doc_type":"usermanual", - "kw":"SQL Jobs,Problems Related to SQL Jobs,User Guide", + "kw":"SQL Jobs,Usage,User Guide", "search_title":"", "metedata":[ { @@ -3009,14 +3460,35 @@ "title":"SQL Jobs", "githuburl":"" }, + { + "uri":"dli_03_0204.html", + "node_id":"dli_03_0204.xml", + "product_code":"dli", + "code":"170", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Job Development", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Job Development", + "githuburl":"" + }, { "uri":"dli_03_0086.html", "node_id":"dli_03_0086.xml", "product_code":"dli", - "code":"147", + "code":"171", "des":"If a large number of small files are generated during SQL execution, job execution and table query will take a long time. In this case, you should merge small files.Set t", "doc_type":"usermanual", - "kw":"How Do I Merge Small Files?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Merge Small Files?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3034,10 +3506,10 @@ "uri":"dli_03_0092.html", "node_id":"dli_03_0092.xml", "product_code":"dli", - "code":"148", + "code":"172", "des":"When creating an OBS table, you must specify a table path in the database. The path format is as follows: obs://xxx/database name/table name.If the specified path is akdc", "doc_type":"usermanual", - "kw":"How Do I Specify an OBS Path When Creating an OBS Table?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Specify an OBS Path When Creating an OBS Table?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3055,10 +3527,10 @@ "uri":"dli_03_0108.html", "node_id":"dli_03_0108.xml", "product_code":"dli", - "code":"149", + "code":"173", "des":"DLI allows you to associate JSON data in an OBS bucket to create tables in asynchronous mode.The statement for creating the table is as follows:", "doc_type":"usermanual", - "kw":"How Do I Create a Table Using JSON Data in an OBS Bucket?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Create a Table Using JSON Data in an OBS Bucket?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3076,10 +3548,10 @@ "uri":"dli_03_0087.html", "node_id":"dli_03_0087.xml", "product_code":"dli", - "code":"150", + "code":"174", "des":"You can use the where condition statement in the select statement to filter data. For example:", "doc_type":"usermanual", - "kw":"How Do I Set Local Variables in SQL Statements?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Set Local Variables in SQL Statements?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3097,10 +3569,10 @@ "uri":"dli_03_0069.html", "node_id":"dli_03_0069.xml", "product_code":"dli", - "code":"151", + "code":"175", "des":"The correct method for using the count function to perform aggregation is as follows:OrIf an incorrect method is used, an error will be reported.", "doc_type":"usermanual", - "kw":"How Can I Use the count Function to Perform Aggregation?,Problems Related to SQL Jobs,User Guide", + "kw":"How Can I Use the count Function to Perform Aggregation?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3118,10 +3590,10 @@ "uri":"dli_03_0072.html", "node_id":"dli_03_0072.xml", "product_code":"dli", - "code":"152", + "code":"176", "des":"You can use the cross-region replication function of OBS. The procedure is as follows:Export the DLI table data in region 1 to the user-defined OBS bucket.Use the OBS cro", "doc_type":"usermanual", - "kw":"How Do I Synchronize DLI Table Data from One Region to Another?,Problems Related to SQL Jobs,User Gu", + "kw":"How Do I Synchronize DLI Table Data from One Region to Another?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3139,10 +3611,10 @@ "uri":"dli_03_0191.html", "node_id":"dli_03_0191.xml", "product_code":"dli", - "code":"153", + "code":"177", "des":"Currently, DLI does not allow you to insert table data into specific fields. To insert table data, you must insert data of all table fields at a time.", "doc_type":"usermanual", - "kw":"How Do I Insert Table Data into Specific Fields of a Table Using a SQL Job?,Problems Related to SQL ", + "kw":"How Do I Insert Table Data into Specific Fields of a Table Using a SQL Job?,Job Development,User Gui", "search_title":"", "metedata":[ { @@ -3156,14 +3628,35 @@ "title":"How Do I Insert Table Data into Specific Fields of a Table Using a SQL Job?", "githuburl":"" }, + { + "uri":"dli_03_0206.html", + "node_id":"dli_03_0206.xml", + "product_code":"dli", + "code":"178", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Job O&M Errors", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Job O&M Errors", + "githuburl":"" + }, { "uri":"dli_03_0014.html", "node_id":"dli_03_0014.xml", "product_code":"dli", - "code":"154", + "code":"179", "des":"Create an OBS directory with a unique name. Alternatively, you can manually delete the existing OBS directory and submit the job again. However, exercise caution when del", "doc_type":"usermanual", - "kw":"Why Is Error \"path obs://xxx already exists\" Reported When Data Is Exported to OBS?,Problems Related", + "kw":"Why Is Error \"path obs://xxx already exists\" Reported When Data Is Exported to OBS?,Job O&M Errors,U", "search_title":"", "metedata":[ { @@ -3181,7 +3674,7 @@ "uri":"dli_03_0066.html", "node_id":"dli_03_0066.xml", "product_code":"dli", - "code":"155", + "code":"180", "des":"This message indicates that the two tables to be joined contain the same column, but the owner of the column is not specified when the command is executed.For example, ta", "doc_type":"usermanual", "kw":"Why Is Error \"SQL_ANALYSIS_ERROR: Reference 't.id' is ambiguous, could be: t.id, t.id.;\" Displayed W", @@ -3202,7 +3695,7 @@ "uri":"dli_03_0071.html", "node_id":"dli_03_0071.xml", "product_code":"dli", - "code":"156", + "code":"181", "des":"Check if your account is in arrears and top it up if necessary.If the same error message persists after the top-up, log out of your account and log back in.", "doc_type":"usermanual", "kw":"Why Is Error \"The current account does not have permission to perform this operation,the current acc", @@ -3223,7 +3716,7 @@ "uri":"dli_03_0145.html", "node_id":"dli_03_0145.xml", "product_code":"dli", - "code":"157", + "code":"182", "des":"Cause AnalysisWhen you query the partitioned table XX.YYY, the partition column is not specified in the search criteria.A partitioned table can be queried only when the q", "doc_type":"usermanual", "kw":"Why Is Error \"There should be at least one partition pruning predicate on partitioned table XX.YYY\" ", @@ -3244,7 +3737,7 @@ "uri":"dli_03_0169.html", "node_id":"dli_03_0169.xml", "product_code":"dli", - "code":"158", + "code":"183", "des":"The following error message is displayed when the LOAD DATA command is executed by a Spark SQL job to import data to a DLI table:In some cases ,the following error messag", "doc_type":"usermanual", "kw":"Why Is Error \"IllegalArgumentException: Buffer size too small. size\" Reported When Data Is Loaded to", @@ -3265,10 +3758,10 @@ "uri":"dli_03_0189.html", "node_id":"dli_03_0189.xml", "product_code":"dli", - "code":"159", + "code":"184", "des":"An error is reported during SQL job execution:Please contact DLI service. DLI.0002: FileNotFoundException: getFileStatus on obs://xxx: status [404]Check whether there is ", "doc_type":"usermanual", - "kw":"Why Is Error \"DLI.0002 FileNotFoundException\" Reported During SQL Job Running?,Problems Related to S", + "kw":"Why Is Error \"DLI.0002 FileNotFoundException\" Reported During SQL Job Running?,Job O&M Errors,User G", "search_title":"", "metedata":[ { @@ -3286,10 +3779,10 @@ "uri":"dli_03_0046.html", "node_id":"dli_03_0046.xml", "product_code":"dli", - "code":"160", + "code":"185", "des":"Currently, DLI supports the Hive syntax for creating tables of the TEXTFILE, SEQUENCEFILE, RCFILE, ORC, AVRO, and PARQUET file types. If the file format specified for cre", "doc_type":"usermanual", - "kw":"Why Is a Schema Parsing Error Reported When I Create a Hive Table Using CTAS?,Problems Related to SQ", + "kw":"Why Is a Schema Parsing Error Reported When I Create a Hive Table Using CTAS?,Job O&M Errors,User Gu", "search_title":"", "metedata":[ { @@ -3307,7 +3800,7 @@ "uri":"dli_03_0173.html", "node_id":"dli_03_0173.xml", "product_code":"dli", - "code":"161", + "code":"186", "des":"When you run a DLI SQL script on DataArts Studio, the log shows that the statements fail to be executed. The error information is as follows:DLI.0999: RuntimeException: o", "doc_type":"usermanual", "kw":"Why Is Error \"org.apache.hadoop.fs.obs.OBSIOException\" Reported When I Run DLI SQL Scripts on DataAr", @@ -3328,7 +3821,7 @@ "uri":"dli_03_0172.html", "node_id":"dli_03_0172.xml", "product_code":"dli", - "code":"162", + "code":"187", "des":"After the migration job is submitted, the following error information is displayed in the log:org.apache.sqoop.common.SqoopException:UQUERY_CONNECTOR_0001:Invoke DLI serv", "doc_type":"usermanual", "kw":"Why Is Error \"UQUERY_CONNECTOR_0001:Invoke DLI service api failed\" Reported in the Job Log When I Us", @@ -3349,10 +3842,10 @@ "uri":"dli_03_0207.html", "node_id":"dli_03_0207.xml", "product_code":"dli", - "code":"163", + "code":"188", "des":"Error message \"File not Found\" is displayed when a SQL job is accessed.Generally, the file cannot be found due to a read/write conflict. Check whether a job is overwritin", "doc_type":"usermanual", - "kw":"Why Is Error \"File not Found\" Reported When I Access a SQL Job?,Problems Related to SQL Jobs,User Gu", + "kw":"Why Is Error \"File not Found\" Reported When I Access a SQL Job?,Job O&M Errors,User Guide", "search_title":"", "metedata":[ { @@ -3370,10 +3863,10 @@ "uri":"dli_03_0208.html", "node_id":"dli_03_0208.xml", "product_code":"dli", - "code":"164", + "code":"189", "des":"Error message \"DLI.0003: AccessControlException XXX\" is reported when a SQL job is accessed.Check the OBS bucket written in the AccessControlException to confirm if your ", "doc_type":"usermanual", - "kw":"Why Is Error \"DLI.0003: AccessControlException XXX\" Reported When I Access a SQL Job?,Problems Relat", + "kw":"Why Is Error \"DLI.0003: AccessControlException XXX\" Reported When I Access a SQL Job?,Job O&M Errors", "search_title":"", "metedata":[ { @@ -3391,7 +3884,7 @@ "uri":"dli_03_0209.html", "node_id":"dli_03_0209.xml", "product_code":"dli", - "code":"165", + "code":"190", "des":"Error message \"DLI.0001: org.apache.hadoop.security.AccessControlException: verifyBucketExists on {{bucket name}}: status [403]\" is reported when a SQL job is Accessed.Yo", "doc_type":"usermanual", "kw":"Why Is Error \"DLI.0001: org.apache.hadoop.security.AccessControlException: verifyBucketExists on {{b", @@ -3412,7 +3905,7 @@ "uri":"dli_03_0210.html", "node_id":"dli_03_0210.xml", "product_code":"dli", - "code":"166", + "code":"191", "des":"Error message \"The current account does not have permission to perform this operation,the current account was restricted.\" is reported during SQL statement execution.Chec", "doc_type":"usermanual", "kw":"Why Is Error \"The current account does not have permission to perform this operation,the current acc", @@ -3429,14 +3922,35 @@ "title":"Why Is Error \"The current account does not have permission to perform this operation,the current account was restricted. Restricted for no budget\" Reported During SQL Statement Execution? Restricted for no budget.", "githuburl":"" }, + { + "uri":"dli_03_0211.html", + "node_id":"dli_03_0211.xml", + "product_code":"dli", + "code":"192", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"O&M Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"O&M Guide", + "githuburl":"" + }, { "uri":"dli_03_0196.html", "node_id":"dli_03_0196.xml", "product_code":"dli", - "code":"167", + "code":"193", "des":"If the job runs slowly, perform the following steps to find the causes and rectify the fault:Check whether the problem is caused by FullGC.Log in to the DLI console. In t", "doc_type":"usermanual", - "kw":"How Do I Troubleshoot Slow SQL Jobs?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Troubleshoot Slow SQL Jobs?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3454,10 +3968,10 @@ "uri":"dli_03_0091.html", "node_id":"dli_03_0091.xml", "product_code":"dli", - "code":"168", + "code":"194", "des":"You can view SQL job logs for routine O&M.Obtain the ID of the DLI job executed on the DataArts Studio console.Job IDOn the DLI console, choose Job Management > SQL Jobs.", "doc_type":"usermanual", - "kw":"How Do I View DLI SQL Logs?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I View DLI SQL Logs?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3475,10 +3989,10 @@ "uri":"dli_03_0116.html", "node_id":"dli_03_0116.xml", "product_code":"dli", - "code":"169", + "code":"195", "des":"You can view the job execution records when a job is running.Log in to the DLI management console.In the navigation pane on the left, choose Job Management > SQL Jobs.Ent", "doc_type":"usermanual", - "kw":"How Do I View SQL Execution Records?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I View SQL Execution Records?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3496,10 +4010,10 @@ "uri":"dli_03_0093.html", "node_id":"dli_03_0093.xml", "product_code":"dli", - "code":"170", + "code":"196", "des":"If the execution of an SQL statement takes a long time, you need to access the Spark UI to check the execution status.If data skew occurs, the running time of a stage exc", "doc_type":"usermanual", - "kw":"How Do I Eliminate Data Skew by Configuring AE Parameters?,Problems Related to SQL Jobs,User Guide", + "kw":"How Do I Eliminate Data Skew by Configuring AE Parameters?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3517,10 +4031,10 @@ "uri":"dli_03_0184.html", "node_id":"dli_03_0184.xml", "product_code":"dli", - "code":"171", + "code":"197", "des":"A DLI table exists but cannot be queried on the DLI console.If a table exists but cannot be queried, there is a high probability that the current user does not have the p", "doc_type":"usermanual", - "kw":"What Can I Do If a Table Cannot Be Queried on the DLI Console?,Problems Related to SQL Jobs,User Gui", + "kw":"What Can I Do If a Table Cannot Be Queried on the DLI Console?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3538,10 +4052,10 @@ "uri":"dli_03_0013.html", "node_id":"dli_03_0013.xml", "product_code":"dli", - "code":"172", + "code":"198", "des":"A high compression ratio of OBS tables in the Parquet or ORC format (for example, a compression ratio of 5 or higher compared with text compression) will lead to large da", "doc_type":"usermanual", - "kw":"The Compression Ratio of OBS Tables Is Too High,Problems Related to SQL Jobs,User Guide", + "kw":"The Compression Ratio of OBS Tables Is Too High,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3559,10 +4073,10 @@ "uri":"dli_03_0009.html", "node_id":"dli_03_0009.xml", "product_code":"dli", - "code":"173", + "code":"199", "des":"DLI supports only UTF-8-encoded texts. Ensure that data is encoded using UTF-8 during table creation and import.", "doc_type":"usermanual", - "kw":"How Can I Avoid Garbled Characters Caused by Inconsistent Character Codes?,Problems Related to SQL J", + "kw":"How Can I Avoid Garbled Characters Caused by Inconsistent Character Codes?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3580,7 +4094,7 @@ "uri":"dli_03_0175.html", "node_id":"dli_03_0175.xml", "product_code":"dli", - "code":"174", + "code":"200", "des":"User A created the testTable table in a database through a SQL job and granted user B the permission to insert and delete table data. User A deleted the testTable table a", "doc_type":"usermanual", "kw":"Do I Need to Grant Table Permissions to a User and Project After I Delete a Table and Create One wit", @@ -3601,7 +4115,7 @@ "uri":"dli_03_0177.html", "node_id":"dli_03_0177.xml", "product_code":"dli", - "code":"175", + "code":"201", "des":"A CSV file is imported to a DLI partitioned table, but the imported file data does not contain the data in the partitioning column. The partitioning column needs to be sp", "doc_type":"usermanual", "kw":"Why Can't I Query Table Data After Data Is Imported to a DLI Partitioned Table Because the File to B", @@ -3622,7 +4136,7 @@ "uri":"dli_03_0181.html", "node_id":"dli_03_0181.xml", "product_code":"dli", - "code":"176", + "code":"202", "des":"When an OBS foreign table is created, a field in the specified OBS file contains a carriage return line feed (CRLF) character. As a result, the data is incorrect.The stat", "doc_type":"usermanual", "kw":"How Do I Fix the Data Error Caused by CRLF Characters in a Field of the OBS File Used to Create an E", @@ -3643,10 +4157,10 @@ "uri":"dli_03_0182.html", "node_id":"dli_03_0182.xml", "product_code":"dli", - "code":"177", + "code":"203", "des":"A SQL job contains join operations. After the job is submitted, it is stuck in the Running state and no result is returned.When a Spark SQL job has join operations on sma", "doc_type":"usermanual", - "kw":"Why Does a SQL Job That Has Join Operations Stay in the Running State?,Problems Related to SQL Jobs,", + "kw":"Why Does a SQL Job That Has Join Operations Stay in the Running State?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3664,7 +4178,7 @@ "uri":"dli_03_0187.html", "node_id":"dli_03_0187.xml", "product_code":"dli", - "code":"178", + "code":"204", "des":"The on clause was not added to the SQL statement for joining tables. As a result, the Cartesian product query occurs due to multi-table association, and the queue resourc", "doc_type":"usermanual", "kw":"The on Clause Is Not Added When Tables Are Joined. Cartesian Product Query Causes High Resource Usag", @@ -3685,10 +4199,10 @@ "uri":"dli_03_0190.html", "node_id":"dli_03_0190.xml", "product_code":"dli", - "code":"179", + "code":"205", "des":"Partition data is manually uploaded to a partition of an OBS table. However, the data cannot be queried using DLI SQL editor.After manually adding partition data, you nee", "doc_type":"usermanual", - "kw":"Why Can't I Query Data After I Manually Add Data to the Partition Directory of an OBS Table?,Problem", + "kw":"Why Can't I Query Data After I Manually Add Data to the Partition Directory of an OBS Table?,O&M Gui", "search_title":"", "metedata":[ { @@ -3706,10 +4220,10 @@ "uri":"dli_03_0212.html", "node_id":"dli_03_0212.xml", "product_code":"dli", - "code":"180", + "code":"206", "des":"To dynamically overwrite the specified partitioned data in the DataSource table, set dli.sql.dynamicPartitionOverwrite.enabled to true and then run the insert overwrite s", "doc_type":"usermanual", - "kw":"Why Is All Data Overwritten When insert overwrite Is Used to Overwrite Partitioned Table?,Problems R", + "kw":"Why Is All Data Overwritten When insert overwrite Is Used to Overwrite Partitioned Table?,O&M Guide,", "search_title":"", "metedata":[ { @@ -3727,10 +4241,10 @@ "uri":"dli_03_0213.html", "node_id":"dli_03_0213.xml", "product_code":"dli", - "code":"181", + "code":"207", "des":"The possible causes and solutions are as follows:After you purchase a DLI queue and submit a SQL job for the first time, wait for 5 to 10 minutes. After the cluster is st", "doc_type":"usermanual", - "kw":"Why Is a SQL Job Stuck in the Submitting State?,Problems Related to SQL Jobs,User Guide", + "kw":"Why Is a SQL Job Stuck in the Submitting State?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3748,10 +4262,10 @@ "uri":"dli_03_0214.html", "node_id":"dli_03_0214.xml", "product_code":"dli", - "code":"182", + "code":"208", "des":"Spark does not have the datetime type and uses the TIMESTAMP type instead.You can use a function to convert data types.The following is an example.select cast(create_date", "doc_type":"usermanual", - "kw":"Why Is the create_date Field in the RDS Table Is a Timestamp in the DLI query result?,Problems Relat", + "kw":"Why Is the create_date Field in the RDS Table Is a Timestamp in the DLI query result?,O&M Guide,User", "search_title":"", "metedata":[ { @@ -3769,10 +4283,10 @@ "uri":"dli_03_0215.html", "node_id":"dli_03_0215.xml", "product_code":"dli", - "code":"183", + "code":"209", "des":"If the table name is changed immediately after SQL statements are executed, the data size of the table may be incorrect.If you need to change the table name, change it 5 ", "doc_type":"usermanual", - "kw":"What Can I Do If datasize Cannot Be Changed After the Table Name Is Changed in a Finished SQL Job?,P", + "kw":"What Can I Do If datasize Cannot Be Changed After the Table Name Is Changed in a Finished SQL Job?,O", "search_title":"", "metedata":[ { @@ -3790,10 +4304,10 @@ "uri":"dli_03_0231.html", "node_id":"dli_03_0231.xml", "product_code":"dli", - "code":"184", + "code":"210", "des":"When DLI is used to insert data into an OBS temporary table, only part of data is imported.Possible causes are as follows:The amount of data read during job execution is ", "doc_type":"usermanual", - "kw":"Why Is the Data Volume Changes When Data Is Imported from DLI to OBS?,Problems Related to SQL Jobs,U", + "kw":"Why Is the Data Volume Changes When Data Is Imported from DLI to OBS?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -3811,7 +4325,7 @@ "uri":"dli_03_0021.html", "node_id":"dli_03_0021.xml", "product_code":"dli", - "code":"185", + "code":"211", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Problems Related to Spark Jobs", @@ -3828,14 +4342,35 @@ "title":"Problems Related to Spark Jobs", "githuburl":"" }, + { + "uri":"dli_03_0163.html", + "node_id":"dli_03_0163.xml", + "product_code":"dli", + "code":"212", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0201.html", "node_id":"dli_03_0201.xml", "product_code":"dli", - "code":"186", + "code":"213", "des":"DLI Spark does not support job scheduling. You can use other services, such as DataArts Studio, or use APIs or SDKs to customize job schedule.The Spark SQL syntax does no", "doc_type":"usermanual", - "kw":"Spark Jobs,Problems Related to Spark Jobs,User Guide", + "kw":"Spark Jobs,Usage,User Guide", "search_title":"", "metedata":[ { @@ -3849,14 +4384,35 @@ "title":"Spark Jobs", "githuburl":"" }, + { + "uri":"dli_03_0217.html", + "node_id":"dli_03_0217.xml", + "product_code":"dli", + "code":"214", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Job Development", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Job Development", + "githuburl":"" + }, { "uri":"dli_03_0107.html", "node_id":"dli_03_0107.xml", "product_code":"dli", - "code":"187", + "code":"215", "des":"To use Spark to write data into a DLI table, configure the following parameters:fs.obs.access.keyfs.obs.secret.keyfs.obs.implfs.obs.endpointThe following is an example:", "doc_type":"usermanual", - "kw":"How Do I Use Spark to Write Data into a DLI Table?,Problems Related to Spark Jobs,User Guide", + "kw":"How Do I Use Spark to Write Data into a DLI Table?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3874,10 +4430,10 @@ "uri":"dli_03_0017.html", "node_id":"dli_03_0017.xml", "product_code":"dli", - "code":"188", + "code":"216", "des":"Hard-coded or plaintext AK and SK pose significant security risks. To ensure security, encrypt your AK and SK, store them in configuration files or environment variables,", "doc_type":"usermanual", - "kw":"How Do I Set the AK/SK for a Queue to Operate an OBS Table?,Problems Related to Spark Jobs,User Guid", + "kw":"How Do I Set the AK/SK for a Queue to Operate an OBS Table?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3895,10 +4451,10 @@ "uri":"dli_03_0102.html", "node_id":"dli_03_0102.xml", "product_code":"dli", - "code":"189", + "code":"217", "des":"Log in to the DLI console. In the navigation pane, choose Job Management > Spark Jobs. In the job list, locate the target job and click next to Job ID to view the parame", "doc_type":"usermanual", - "kw":"How Do I View the Resource Usage of DLI Spark Jobs?,Problems Related to Spark Jobs,User Guide", + "kw":"How Do I View the Resource Usage of DLI Spark Jobs?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3916,7 +4472,7 @@ "uri":"dli_03_0076.html", "node_id":"dli_03_0076.xml", "product_code":"dli", - "code":"190", + "code":"218", "des":"If the pymysql module is missing, check whether the corresponding EGG package exists. If the package does not exist, upload the pyFile package on the Package Management p", "doc_type":"usermanual", "kw":"How Do I Use Python Scripts to Access the MySQL Database If the pymysql Module Is Missing from the S", @@ -3937,10 +4493,10 @@ "uri":"dli_03_0082.html", "node_id":"dli_03_0082.xml", "product_code":"dli", - "code":"191", + "code":"219", "des":"DLI natively supports PySpark.For most cases, Python is preferred for data analysis, and PySpark is the best choice for big data analysis. Generally, JVM programs are pac", "doc_type":"usermanual", - "kw":"How Do I Run a Complex PySpark Program in DLI?,Problems Related to Spark Jobs,User Guide", + "kw":"How Do I Run a Complex PySpark Program in DLI?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3958,10 +4514,10 @@ "uri":"dli_03_0127.html", "node_id":"dli_03_0127.xml", "product_code":"dli", - "code":"192", + "code":"220", "des":"You can use DLI Spark jobs to access data in the MySQL database using either of the following methods:Solution 1: Buy a queue, create an enhanced datasource connection, a", "doc_type":"usermanual", - "kw":"How Does a Spark Job Access a MySQL Database?,Problems Related to Spark Jobs,User Guide", + "kw":"How Does a Spark Job Access a MySQL Database?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -3979,7 +4535,7 @@ "uri":"dli_03_0068.html", "node_id":"dli_03_0068.xml", "product_code":"dli", - "code":"193", + "code":"221", "des":"When shuffle statements, such as GROUP BY and JOIN, are executed in Spark jobs, data skew occurs, which slows down the job execution.To solve this problem, you can config", "doc_type":"usermanual", "kw":"How Do I Use JDBC to Set the spark.sql.shuffle.partitions Parameter to Improve the Task Concurrency?", @@ -4000,10 +4556,10 @@ "uri":"dli_03_0118.html", "node_id":"dli_03_0118.xml", "product_code":"dli", - "code":"194", + "code":"222", "des":"You can use SparkFiles to read the file submitted using –-file form a local path: SparkFiles.get(\"Name of the uploaded file\").The file path in the Driver is different fro", "doc_type":"usermanual", - "kw":"How Do I Read Uploaded Files for a Spark Jar Job?,Problems Related to Spark Jobs,User Guide", + "kw":"How Do I Read Uploaded Files for a Spark Jar Job?,Job Development,User Guide", "search_title":"", "metedata":[ { @@ -4017,11 +4573,32 @@ "title":"How Do I Read Uploaded Files for a Spark Jar Job?", "githuburl":"" }, + { + "uri":"dli_03_0218.html", + "node_id":"dli_03_0218.xml", + "product_code":"dli", + "code":"223", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Job O&M Errors", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Job O&M Errors", + "githuburl":"" + }, { "uri":"dli_03_0156.html", "node_id":"dli_03_0156.xml", "product_code":"dli", - "code":"195", + "code":"224", "des":"The following error is reported when a Spark job accesses OBS data:Set the AK/SK to enable Spark jobs to access OBS data. For details, see How Do I Set the AK/SK for a Qu", "doc_type":"usermanual", "kw":"Why Are Errors \"ResponseCode: 403\" and \"ResponseStatus: Forbidden\" Reported When a Spark Job Accesse", @@ -4042,7 +4619,7 @@ "uri":"dli_03_0164.html", "node_id":"dli_03_0164.xml", "product_code":"dli", - "code":"196", + "code":"225", "des":"Check whether the OBS bucket is used to store DLI logs on the Global Configuration > Job Configurations page. The job log bucket cannot be used for other purpose.", "doc_type":"usermanual", "kw":"Why Is Error \"verifyBucketExists on XXXX: status [403]\" Reported When I Use a Spark Job to Access an", @@ -4063,10 +4640,10 @@ "uri":"dli_03_0157.html", "node_id":"dli_03_0157.xml", "product_code":"dli", - "code":"197", + "code":"226", "des":"When a Spark job accesses a large amount of data, for example, accessing data in a GaussDB(DWS) database, you are advised to set the number of concurrent tasks and enable", "doc_type":"usermanual", - "kw":"Why Is a Job Running Timeout Reported When a Spark Job Runs a Large Amount of Data?,Problems Related", + "kw":"Why Is a Job Running Timeout Reported When a Spark Job Runs a Large Amount of Data?,Job O&M Errors,U", "search_title":"", "metedata":[ { @@ -4084,7 +4661,7 @@ "uri":"dli_03_0188.html", "node_id":"dli_03_0188.xml", "product_code":"dli", - "code":"198", + "code":"227", "des":"Spark jobs cannot access SFTP. Upload the files you want to access to OBS and then you can analyze the data using Spark jobs.", "doc_type":"usermanual", "kw":"Why Does the Job Fail to Be Executed and the Log Shows that the File Directory Is Abnormal When I Us", @@ -4105,10 +4682,10 @@ "uri":"dli_03_0192.html", "node_id":"dli_03_0192.xml", "product_code":"dli", - "code":"199", + "code":"228", "des":"When a Spark job is running, an error message is displayed, indicating that the user does not have the database permission. The error information is as follows:org.apache", "doc_type":"usermanual", - "kw":"Why Does the Job Fail to Be Executed Due to Insufficient Database and Table Permissions?,Problems Re", + "kw":"Why Does the Job Fail to Be Executed Due to Insufficient Database and Table Permissions?,Job O&M Err", "search_title":"", "metedata":[ { @@ -4122,14 +4699,35 @@ "title":"Why Does the Job Fail to Be Executed Due to Insufficient Database and Table Permissions?", "githuburl":"" }, + { + "uri":"dli_03_0219.html", + "node_id":"dli_03_0219.xml", + "product_code":"dli", + "code":"229", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"O&M Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"O&M Guide", + "githuburl":"" + }, { "uri":"dli_03_0077.html", "node_id":"dli_03_0077.xml", "product_code":"dli", - "code":"200", + "code":"230", "des":"I cannot find the specified Python environment after adding the Python 3 package.Set spark.yarn.appMasterEnv.PYSPARK_PYTHON to python3 in the conf file to specify the Pyt", "doc_type":"usermanual", - "kw":"Why Can't I Find the Specified Python Environment After Adding the Python Package?,Problems Related ", + "kw":"Why Can't I Find the Specified Python Environment After Adding the Python Package?,O&M Guide,User Gu", "search_title":"", "metedata":[ { @@ -4147,10 +4745,10 @@ "uri":"dli_03_0220.html", "node_id":"dli_03_0220.xml", "product_code":"dli", - "code":"201", + "code":"231", "des":"The remaining CUs in the queue may be insufficient. As a result, the job cannot be submitted.To view the remaining CUs of a queue, perform the following steps:Check the C", "doc_type":"usermanual", - "kw":"Why Is a Spark Jar Job Stuck in the Submitting State?,Problems Related to Spark Jobs,User Guide", + "kw":"Why Is a Spark Jar Job Stuck in the Submitting State?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4168,7 +4766,7 @@ "uri":"dli_03_0001.html", "node_id":"dli_03_0001.xml", "product_code":"dli", - "code":"202", + "code":"232", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Product Consultation", @@ -4185,14 +4783,35 @@ "title":"Product Consultation", "githuburl":"" }, + { + "uri":"dli_03_0221.html", + "node_id":"dli_03_0221.xml", + "product_code":"dli", + "code":"233", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0002.html", "node_id":"dli_03_0002.xml", "product_code":"dli", - "code":"203", + "code":"234", "des":"Data Lake Insight (DLI) is a serverless data processing and analysis service fully compatible with Apache Spark and Apache Flink ecosystems. It frees you from managing an", "doc_type":"usermanual", - "kw":"What Is DLI?,Product Consultation,User Guide", + "kw":"What Is DLI?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4210,10 +4829,10 @@ "uri":"dli_03_0025.html", "node_id":"dli_03_0025.xml", "product_code":"dli", - "code":"204", + "code":"235", "des":"DLI supports the following data formats:ParquetCSVORCJsonAvro", "doc_type":"usermanual", - "kw":"Which Data Formats Does DLI Support?,Product Consultation,User Guide", + "kw":"Which Data Formats Does DLI Support?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4231,10 +4850,10 @@ "uri":"dli_03_0115.html", "node_id":"dli_03_0115.xml", "product_code":"dli", - "code":"205", + "code":"236", "des":"The Spark component of DLI is a fully managed service. You can only use the DLI Spark through its APIs. .The Spark component of MRS is built on the VM in an MRS cluster. ", "doc_type":"usermanual", - "kw":"What Are the Differences Between MRS Spark and DLI Spark?,Product Consultation,User Guide", + "kw":"What Are the Differences Between MRS Spark and DLI Spark?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4252,10 +4871,10 @@ "uri":"dli_03_0029.html", "node_id":"dli_03_0029.xml", "product_code":"dli", - "code":"206", + "code":"237", "des":"DLI data can be stored in either of the following:OBS: Data used by SQL jobs, Spark jobs, and Flink jobs can be stored in OBS, reducing storage costs.DLI: The column-base", "doc_type":"usermanual", - "kw":"Where Can DLI Data Be Stored?,Product Consultation,User Guide", + "kw":"Where Can DLI Data Be Stored?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4273,10 +4892,10 @@ "uri":"dli_03_0117.html", "node_id":"dli_03_0117.xml", "product_code":"dli", - "code":"207", + "code":"238", "des":"DLI tables store data within the DLI service, and you do not need to know the data storage path.OBS tables store data in your OBS buckets, and you need to manage the sour", "doc_type":"usermanual", - "kw":"What Are the Differences Between DLI Tables and OBS Tables?,Product Consultation,User Guide", + "kw":"What Are the Differences Between DLI Tables and OBS Tables?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4294,10 +4913,10 @@ "uri":"dli_03_0010.html", "node_id":"dli_03_0010.xml", "product_code":"dli", - "code":"208", + "code":"239", "des":"Currently, DLI supports analysis only on the data uploaded to the cloud. In scenarios where regular (for example, on a per day basis) one-off analysis on incremental data", "doc_type":"usermanual", - "kw":"How Can I Use DLI If Data Is Not Uploaded to OBS?,Product Consultation,User Guide", + "kw":"How Can I Use DLI If Data Is Not Uploaded to OBS?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4315,10 +4934,10 @@ "uri":"dli_03_0129.html", "node_id":"dli_03_0129.xml", "product_code":"dli", - "code":"209", + "code":"240", "des":"Data in the OBS bucket shared by IAM users under the same account can be imported. You cannot import data in the OBS bucket shared with other IAM account.", "doc_type":"usermanual", - "kw":"Can I Import OBS Bucket Data Shared by Other Tenants into DLI?,Product Consultation,User Guide", + "kw":"Can I Import OBS Bucket Data Shared by Other Tenants into DLI?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4336,7 +4955,7 @@ "uri":"dli_03_0264.html", "node_id":"dli_03_0264.xml", "product_code":"dli", - "code":"210", + "code":"241", "des":"Log in to the management console.Click in the upper left corner and select a region and a project.Click the My Quota icon in the upper right corner of the page.The Serv", "doc_type":"usermanual", "kw":"Why Is Error \"Failed to create the database. {\"error_code\":\"DLI.1028\";\"error_msg\":\"Already reached t", @@ -4357,10 +4976,10 @@ "uri":"dli_03_0263.html", "node_id":"dli_03_0263.xml", "product_code":"dli", - "code":"211", + "code":"242", "des":"No, a global variable can only be used by the user who created it. Global variables can be used to simplify complex parameters. For example, long and difficult variables ", "doc_type":"usermanual", - "kw":"Can a Member Account Use Global Variables Created by Other Member Accounts?,Product Consultation,Use", + "kw":"Can a Member Account Use Global Variables Created by Other Member Accounts?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4374,14 +4993,35 @@ "title":"Can a Member Account Use Global Variables Created by Other Member Accounts?", "githuburl":"" }, + { + "uri":"dli_03_0222.html", + "node_id":"dli_03_0222.xml", + "product_code":"dli", + "code":"243", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Job Management", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Job Management", + "githuburl":"" + }, { "uri":"dli_03_0126.html", "node_id":"dli_03_0126.xml", "product_code":"dli", - "code":"212", + "code":"244", "des":"If you are suggested to perform following operations to run a large number of DLI jobs:Group the DLI jobs by type, and run each group on a queue.Alternatively, create IAM", "doc_type":"usermanual", - "kw":"How Do I Manage Tens of Thousands of Jobs Running on DLI?,Product Consultation,User Guide", + "kw":"How Do I Manage Tens of Thousands of Jobs Running on DLI?,Job Management,User Guide", "search_title":"", "metedata":[ { @@ -4399,10 +5039,10 @@ "uri":"dli_03_0162.html", "node_id":"dli_03_0162.xml", "product_code":"dli", - "code":"213", + "code":"245", "des":"The field names of tables that have been created cannot be changed.You can create a table, define new table fields, and migrate data from the old table to the new one.", "doc_type":"usermanual", - "kw":"How Do I Change the Name of a Field in a Created Table?,Product Consultation,User Guide", + "kw":"How Do I Change the Name of a Field in a Created Table?,Job Management,User Guide", "search_title":"", "metedata":[ { @@ -4416,14 +5056,35 @@ "title":"How Do I Change the Name of a Field in a Created Table?", "githuburl":"" }, + { + "uri":"dli_03_0261.html", + "node_id":"dli_03_0261.xml", + "product_code":"dli", + "code":"246", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Privacy and Security", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Privacy and Security", + "githuburl":"" + }, { "uri":"dli_03_0260.html", "node_id":"dli_03_0260.xml", "product_code":"dli", - "code":"214", + "code":"247", "des":"No. The spark.acls.enable configuration item is not used in DLI. The Apache Spark command injection vulnerability (CVE-2022-33891) does not exist in DLI.", "doc_type":"usermanual", - "kw":"Does DLI Have the Apache Spark Command Injection Vulnerability (CVE-2022-33891)?,Product Consultatio", + "kw":"Does DLI Have the Apache Spark Command Injection Vulnerability (CVE-2022-33891)?,Privacy and Securit", "search_title":"", "metedata":[ { @@ -4441,7 +5102,7 @@ "uri":"dli_03_0053.html", "node_id":"dli_03_0053.xml", "product_code":"dli", - "code":"215", + "code":"248", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Quota", @@ -4462,7 +5123,7 @@ "uri":"dli_03_0031.html", "node_id":"dli_03_0031.xml", "product_code":"dli", - "code":"216", + "code":"249", "des":"Log in to the management console.Click in the upper left corner and select Region and Project.Click (the My Quotas icon) in the upper right corner.The Service Quota pag", "doc_type":"usermanual", "kw":"How Do I View My Quotas?,Quota,User Guide", @@ -4483,7 +5144,7 @@ "uri":"dli_03_0032.html", "node_id":"dli_03_0032.xml", "product_code":"dli", - "code":"217", + "code":"250", "des":"The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your applica", "doc_type":"usermanual", "kw":"How Do I Increase a Quota?,Quota,User Guide", @@ -4504,7 +5165,7 @@ "uri":"dli_03_0054.html", "node_id":"dli_03_0054.xml", "product_code":"dli", - "code":"218", + "code":"251", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Permission", @@ -4521,14 +5182,35 @@ "title":"Permission", "githuburl":"" }, + { + "uri":"dli_03_0223.html", + "node_id":"dli_03_0223.xml", + "product_code":"dli", + "code":"252", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0100.html", "node_id":"dli_03_0100.xml", "product_code":"dli", - "code":"219", + "code":"253", "des":"DLI has a comprehensive permission control mechanism and supports fine-grained authentication through Identity and Access Management (IAM). You can create policies in IAM", "doc_type":"usermanual", - "kw":"How Do I Manage Fine-Grained DLI Permissions?,Permission,User Guide", + "kw":"How Do I Manage Fine-Grained DLI Permissions?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4546,10 +5228,10 @@ "uri":"dli_03_0008.html", "node_id":"dli_03_0008.xml", "product_code":"dli", - "code":"220", + "code":"254", "des":"You cannot perform permission-related operations on the partition column of a partitioned table.However, when you grant the permission of any non-partition column in a pa", "doc_type":"usermanual", - "kw":"What Is Column Permission Granting of a DLI Partition Table?,Permission,User Guide", + "kw":"What Is Column Permission Granting of a DLI Partition Table?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4563,14 +5245,35 @@ "title":"What Is Column Permission Granting of a DLI Partition Table?", "githuburl":"" }, + { + "uri":"dli_03_0226.html", + "node_id":"dli_03_0226.xml", + "product_code":"dli", + "code":"255", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"O&M Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"O&M Guide", + "githuburl":"" + }, { "uri":"dli_03_0140.html", "node_id":"dli_03_0140.xml", "product_code":"dli", - "code":"221", + "code":"256", "des":"When you submit a job, a message is displayed indicating that the job fails to be submitted due to insufficient permission caused by arrears. In this case, you need to ch", "doc_type":"usermanual", - "kw":"Why Does My Account Have Insufficient Permissions Due to Arrears?,Permission,User Guide", + "kw":"Why Does My Account Have Insufficient Permissions Due to Arrears?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4588,7 +5291,7 @@ "uri":"dli_03_0195.html", "node_id":"dli_03_0195.xml", "product_code":"dli", - "code":"222", + "code":"257", "des":"When the user update an existing program package, the following error information is displayed:\"error_code\"*DLI.0003\",\"error_msg\":\"Permission denied for resource 'resourc", "doc_type":"usermanual", "kw":"Why Does the System Display a Message Indicating Insufficient Permissions When I Update a Program Pa", @@ -4609,10 +5312,10 @@ "uri":"dli_03_0227.html", "node_id":"dli_03_0227.xml", "product_code":"dli", - "code":"223", + "code":"258", "des":"When the SQL query statement is executed, the system displays a message indicating that the user does not have the permission to query resources.Error information: DLI.00", "doc_type":"usermanual", - "kw":"Why Is Error \"DLI.0003: Permission denied for resource...\" Reported When I Run a SQL Statement?,Perm", + "kw":"Why Is Error \"DLI.0003: Permission denied for resource...\" Reported When I Run a SQL Statement?,O&M ", "search_title":"", "metedata":[ { @@ -4630,10 +5333,10 @@ "uri":"dli_03_0228.html", "node_id":"dli_03_0228.xml", "product_code":"dli", - "code":"224", + "code":"259", "des":"The table permission has been granted and verified. However, after a period of time, an error is reported indicating that the table query fails.There are two possible rea", "doc_type":"usermanual", - "kw":"Why Can't I Query Table Data After I've Been Granted Table Permissions?,Permission,User Guide", + "kw":"Why Can't I Query Table Data After I've Been Granted Table Permissions?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4651,7 +5354,7 @@ "uri":"dli_03_0057.html", "node_id":"dli_03_0057.xml", "product_code":"dli", - "code":"225", + "code":"260", "des":"If a table inherits database permissions, you do not need to regrant the inherited permissions to the table.When you grant permissions on a table on the console:If you se", "doc_type":"usermanual", "kw":"Will an Error Be Reported if the Inherited Permissions Are Regranted to a Table That Inherits Databa", @@ -4672,10 +5375,10 @@ "uri":"dli_03_0067.html", "node_id":"dli_03_0067.xml", "product_code":"dli", - "code":"226", + "code":"261", "des":"User A created Table1.User B created View1 based on Table1.After the Select Table permission on Table1 is granted to user C, user C fails to query View1.User B does not h", "doc_type":"usermanual", - "kw":"Why Can't I Query a View After I'm Granted the Select Table Permission on the View?,Permission,User ", + "kw":"Why Can't I Query a View After I'm Granted the Select Table Permission on the View?,O&M Guide,User G", "search_title":"", "metedata":[ { @@ -4693,7 +5396,7 @@ "uri":"dli_03_0049.html", "node_id":"dli_03_0049.xml", "product_code":"dli", - "code":"227", + "code":"262", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Queue", @@ -4710,14 +5413,35 @@ "title":"Queue", "githuburl":"" }, + { + "uri":"dli_03_0229.html", + "node_id":"dli_03_0229.xml", + "product_code":"dli", + "code":"263", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Usage", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Usage", + "githuburl":"" + }, { "uri":"dli_03_0109.html", "node_id":"dli_03_0109.xml", "product_code":"dli", - "code":"228", + "code":"264", "des":"Currently, you are not allowed to modify the description of a created queue. You can add the description when purchasing the queue.", "doc_type":"usermanual", - "kw":"Does the Description of a DLI Queue Can Be Modified?,Queue,User Guide", + "kw":"Does the Description of a DLI Queue Can Be Modified?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4735,10 +5459,10 @@ "uri":"dli_03_0166.html", "node_id":"dli_03_0166.xml", "product_code":"dli", - "code":"229", + "code":"265", "des":"Deleting a queue does not cause table data loss in your database.", "doc_type":"usermanual", - "kw":"Will Table Data in My Database Be Lost If I Delete a Queue?,Queue,User Guide", + "kw":"Will Table Data in My Database Be Lost If I Delete a Queue?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4756,10 +5480,10 @@ "uri":"dli_03_0170.html", "node_id":"dli_03_0170.xml", "product_code":"dli", - "code":"230", + "code":"266", "des":"You need to develop a mechanism to retry failed jobs. When a faulty queue is recovered, your application tries to submit the failed jobs to the queue again.", "doc_type":"usermanual", - "kw":"How Does DLI Ensure the Reliability of Spark Jobs When a Queue Is Abnormal?,Queue,User Guide", + "kw":"How Does DLI Ensure the Reliability of Spark Jobs When a Queue Is Abnormal?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4777,10 +5501,10 @@ "uri":"dli_03_0098.html", "node_id":"dli_03_0098.xml", "product_code":"dli", - "code":"231", + "code":"267", "des":"DLI allows you to subscribe to an SMN topic for failed jobs.Log in to the DLI console.In the navigation pane on the left, choose Queue Management.On the Queue Management ", "doc_type":"usermanual", - "kw":"How Do I Monitor Queue Exceptions?,Queue,User Guide", + "kw":"How Do I Monitor Queue Exceptions?,Usage,User Guide", "search_title":"", "metedata":[ { @@ -4794,14 +5518,35 @@ "title":"How Do I Monitor Queue Exceptions?", "githuburl":"" }, + { + "uri":"dli_03_0230.html", + "node_id":"dli_03_0230.xml", + "product_code":"dli", + "code":"268", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"O&M Guide", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"O&M Guide", + "githuburl":"" + }, { "uri":"dli_03_0095.html", "node_id":"dli_03_0095.xml", "product_code":"dli", - "code":"232", + "code":"269", "des":"To check the running status of the DLI queue and determine whether to run more jobs on that queue, you need to check the queue load.Search for Cloud Eye on the console.In", "doc_type":"usermanual", - "kw":"How Do I View DLI Queue Load?,Queue,User Guide", + "kw":"How Do I View DLI Queue Load?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4819,10 +5564,10 @@ "uri":"dli_03_0183.html", "node_id":"dli_03_0183.xml", "product_code":"dli", - "code":"233", + "code":"270", "des":"You need to check the large number of jobs in the Submitting and Running states on the queue.Use Cloud Eye to view jobs in different states on the queue. The procedure is", "doc_type":"usermanual", - "kw":"How Do I Determine Whether There Are Too Many Jobs in the Current Queue?,Queue,User Guide", + "kw":"How Do I Determine Whether There Are Too Many Jobs in the Current Queue?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4840,10 +5585,10 @@ "uri":"dli_03_0065.html", "node_id":"dli_03_0065.xml", "product_code":"dli", - "code":"234", + "code":"271", "des":"Currently, DLI provides two types of queues, For SQL and For general use. SQL queues are used to run SQL jobs. General-use queues are compatible with Spark queues of earl", "doc_type":"usermanual", - "kw":"How Do I Switch an Earlier-Version Spark Queue to a General-Purpose Queue?,Queue,User Guide", + "kw":"How Do I Switch an Earlier-Version Spark Queue to a General-Purpose Queue?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4861,10 +5606,10 @@ "uri":"dli_03_0193.html", "node_id":"dli_03_0193.xml", "product_code":"dli", - "code":"235", + "code":"272", "des":"DLI queues do not use resources or bandwidth when no job is running. In this case, the running status of DLI queues is not displayed on CES.", "doc_type":"usermanual", - "kw":"Why Cannot I View the Resource Running Status of DLI Queues on Cloud Eye?,Queue,User Guide", + "kw":"Why Cannot I View the Resource Running Status of DLI Queues on Cloud Eye?,O&M Guide,User Guide", "search_title":"", "metedata":[ { @@ -4882,10 +5627,10 @@ "uri":"dli_03_0088.html", "node_id":"dli_03_0088.xml", "product_code":"dli", - "code":"236", + "code":"273", "des":"In DLI, 64 CU = 64 cores and 256 GB memory.In a Spark job, if the driver occupies 4 cores and 16 GB memory, the executor can occupy 60 cores and 240 GB memory.", "doc_type":"usermanual", - "kw":"How Do I Allocate Queue Resources for Running Spark Jobs If I Have Purchased 64 CUs?,Queue,User Guid", + "kw":"How Do I Allocate Queue Resources for Running Spark Jobs If I Have Purchased 64 CUs?,O&M Guide,User ", "search_title":"", "metedata":[ { @@ -4903,7 +5648,7 @@ "uri":"dli_03_0159.html", "node_id":"dli_03_0159.xml", "product_code":"dli", - "code":"237", + "code":"274", "des":"Queue plans create failed. The plan xxx target cu is out of quota is displayed when you create a scheduled scaling task.The CU quota of the current account is insufficien", "doc_type":"usermanual", "kw":"Why Is Error \"Queue plans create failed. The plan xxx target cu is out of quota\" Reported When I Sch", @@ -4924,7 +5669,7 @@ "uri":"dli_03_0171.html", "node_id":"dli_03_0171.xml", "product_code":"dli", - "code":"238", + "code":"275", "des":"After a SQL job was submitted to the default queue, the job runs abnormally. The job log reported that the execution timed out. The exception logs are as follows:[ERROR] ", "doc_type":"usermanual", "kw":"Why Is a Timeout Exception Reported When a DLI SQL Statement Fails to Be Executed on the Default Que", @@ -4941,11 +5686,50 @@ "title":"Why Is a Timeout Exception Reported When a DLI SQL Statement Fails to Be Executed on the Default Queue?", "githuburl":"" }, + { + "uri":"dli_03_0276.html", + "node_id":"dli_03_0276.xml", + "product_code":"dli", + "code":"276", + "des":"In daily big data analysis work, it is important to allocate and manage compute resources properly to provide a good job execution environment.You can allocate resources ", + "doc_type":"usermanual", + "kw":"How Can I Check the Actual and Used CUs for an Elastic Resource Pool as Well as the Required CUs for", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "documenttype":"usermanual" + } + ], + "title":"How Can I Check the Actual and Used CUs for an Elastic Resource Pool as Well as the Required CUs for a Job?", + "githuburl":"" + }, { "uri":"dli_03_0022.html", "node_id":"dli_03_0022.xml", "product_code":"dli", - "code":"239", + "code":"277", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Datasource Connections", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Datasource Connections", + "githuburl":"" + }, + { + "uri":"dli_03_0110.html", + "node_id":"dli_03_0110.xml", + "product_code":"dli", + "code":"278", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Datasource Connections", @@ -4966,7 +5750,7 @@ "uri":"dli_03_0128.html", "node_id":"dli_03_0128.xml", "product_code":"dli", - "code":"240", + "code":"279", "des":"You need to create a VPC peering connection to enable network connectivity. Take MRS as an example. If DLI and MRS clusters are in the same VPC, and the security group is", "doc_type":"usermanual", "kw":"Why Do I Need to Create a VPC Peering Connection for an Enhanced Datasource Connection?,Datasource C", @@ -4987,7 +5771,7 @@ "uri":"dli_03_0237.html", "node_id":"dli_03_0237.xml", "product_code":"dli", - "code":"241", + "code":"280", "des":"An enhanced datasource connection failed to pass the network connectivity test. Datasource connection cannot be bound to a queue. The following error information is displ", "doc_type":"usermanual", "kw":"Failed to Bind a Queue to an Enhanced Datasource Connection,Datasource Connections,User Guide", @@ -5008,7 +5792,7 @@ "uri":"dli_03_0238.html", "node_id":"dli_03_0238.xml", "product_code":"dli", - "code":"242", + "code":"281", "des":"The outbound rule had been configured for the security group of the queue associated with the enhanced datasource connection. The datasource authentication used a passwor", "doc_type":"usermanual", "kw":"DLI Failed to Connect to GaussDB(DWS) Through an Enhanced Datasource Connection,Datasource Connectio", @@ -5029,7 +5813,7 @@ "uri":"dli_03_0179.html", "node_id":"dli_03_0179.xml", "product_code":"dli", - "code":"243", + "code":"282", "des":"A datasource connection is created and bound to a queue. The connectivity test fails and the following error information is displayed:failed to connect to specified addre", "doc_type":"usermanual", "kw":"How Do I Do if the Datasource Connection Is Created But the Network Connectivity Test Fails?,Datasou", @@ -5050,7 +5834,7 @@ "uri":"dli_03_0186.html", "node_id":"dli_03_0186.xml", "product_code":"dli", - "code":"244", + "code":"283", "des":"Configuring the Connection Between a DLI Queue and a Data Source in a Private NetworkIf your DLI job needs to connect to a data source, for example, MRS, RDS, CSS, Kafka,", "doc_type":"usermanual", "kw":"How Do I Configure the Network Between a DLI Queue and a Data Source?,Datasource Connections,User Gu", @@ -5071,7 +5855,7 @@ "uri":"dli_03_0257.html", "node_id":"dli_03_0257.xml", "product_code":"dli", - "code":"245", + "code":"284", "des":"The possible causes and solutions are as follows:If you have created a queue, do not bind it to a datasource connection immediately. Wait for 5 to 10 minutes. After the c", "doc_type":"usermanual", "kw":"What Can I Do If a Datasource Connection Is Stuck in Creating State When I Try to Bind a Queue to It", @@ -5092,7 +5876,7 @@ "uri":"dli_03_0259.html", "node_id":"dli_03_0259.xml", "product_code":"dli", - "code":"246", + "code":"285", "des":"DLI enhanced datasource connection uses VPC peering to directly connect the VPC networks of the desired data sources for point-to-point data exchanges.", "doc_type":"usermanual", "kw":"How Do I Connect DLI to Data Sources?,Datasource Connections,User Guide", @@ -5109,14 +5893,35 @@ "title":"How Do I Connect DLI to Data Sources?", "githuburl":"" }, + { + "uri":"dli_03_0112.html", + "node_id":"dli_03_0112.xml", + "product_code":"dli", + "code":"286", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Cross-Source Analysis", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Cross-Source Analysis", + "githuburl":"" + }, { "uri":"dli_03_0011.html", "node_id":"dli_03_0011.xml", "product_code":"dli", - "code":"247", + "code":"287", "des":"To perform query on data stored on services rather than DLI, perform the following steps:Assume that the data to be queried is stored on multiple services (for example, O", "doc_type":"usermanual", - "kw":"How Can I Perform Query on Data Stored on Services Rather Than DLI?,Datasource Connections,User Guid", + "kw":"How Can I Perform Query on Data Stored on Services Rather Than DLI?,Cross-Source Analysis,User Guide", "search_title":"", "metedata":[ { @@ -5134,10 +5939,10 @@ "uri":"dli_03_0085.html", "node_id":"dli_03_0085.xml", "product_code":"dli", - "code":"248", + "code":"288", "des":"Connect VPCs in different regions.Create an enhanced datasource connection on DLI and bind it to a queue.Add a DLI route.", "doc_type":"usermanual", - "kw":"How Can I Access Data Across Regions?,Datasource Connections,User Guide", + "kw":"How Can I Access Data Across Regions?,Cross-Source Analysis,User Guide", "search_title":"", "metedata":[ { @@ -5155,7 +5960,7 @@ "uri":"dli_03_0028.html", "node_id":"dli_03_0028.xml", "product_code":"dli", - "code":"249", + "code":"289", "des":"When data is inserted into DLI, set the ID field to NULL.", "doc_type":"usermanual", "kw":"How Do I Set the Auto-increment Primary Key or Other Fields That Are Automatically Filled in the RDS", @@ -5172,11 +5977,32 @@ "title":"How Do I Set the Auto-increment Primary Key or Other Fields That Are Automatically Filled in the RDS Table When Creating a DLI and Associating It with the RDS Table?", "githuburl":"" }, + { + "uri":"dli_03_0256.html", + "node_id":"dli_03_0256.xml", + "product_code":"dli", + "code":"290", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual", + "kw":"Datasource Connection O&M", + "search_title":"", + "metedata":[ + { + "prodname":"dli", + "opensource":"true", + "documenttype":"usermanual", + "IsMulti":"No", + "IsBot":"Yes" + } + ], + "title":"Datasource Connection O&M", + "githuburl":"" + }, { "uri":"dli_03_0047.html", "node_id":"dli_03_0047.xml", "product_code":"dli", - "code":"250", + "code":"291", "des":"Possible CausesThe network connectivity is abnormal. Check whether the security group is correctly selected and whether the VPC is correctly configured.The network connec", "doc_type":"usermanual", "kw":"Why Is the Error Message \"communication link failure\" Displayed When I Use a Newly Activated Datasou", @@ -5197,7 +6023,7 @@ "uri":"dli_03_0080.html", "node_id":"dli_03_0080.xml", "product_code":"dli", - "code":"251", + "code":"292", "des":"The cluster host information is not added to the datasource connection. As a result, the KRB authentication fails, the connection times out, and no error is recorded in l", "doc_type":"usermanual", "kw":"Connection Times Out During MRS HBase Datasource Connection, and No Error Is Recorded in Logs,Dataso", @@ -5218,10 +6044,10 @@ "uri":"dli_03_0111.html", "node_id":"dli_03_0111.xml", "product_code":"dli", - "code":"252", + "code":"293", "des":"When you create a VPC peering connection for the datasource connection, the following error information is displayed:Before you create a datasource connection, check whet", "doc_type":"usermanual", - "kw":"Why Can't I Find the Subnet When Creating a DLI Datasource Connection?,Datasource Connections,User G", + "kw":"Why Can't I Find the Subnet When Creating a DLI Datasource Connection?,Datasource Connection O&M,Use", "search_title":"", "metedata":[ { @@ -5239,7 +6065,7 @@ "uri":"dli_03_0239.html", "node_id":"dli_03_0239.xml", "product_code":"dli", - "code":"253", + "code":"294", "des":"A datasource RDS table was created in the DataArts Studio, and the insert overwrite statement was executed to write data into RDS. DLI.0999: BatchUpdateException: Incorre", "doc_type":"usermanual", "kw":"Error Message \"Incorrect string value\" Is Displayed When insert overwrite Is Executed on a Datasourc", @@ -5260,7 +6086,7 @@ "uri":"dli_03_0250.html", "node_id":"dli_03_0250.xml", "product_code":"dli", - "code":"254", + "code":"295", "des":"The system failed to create a datasource RDS table, and null pointer error was reported.The following table creation statement was used:The RDS database is in a PostGre c", "doc_type":"usermanual", "kw":"Null Pointer Error Is Displayed When the System Creates a Datasource RDS Table,Datasource Connection", @@ -5281,7 +6107,7 @@ "uri":"dli_03_0251.html", "node_id":"dli_03_0251.xml", "product_code":"dli", - "code":"255", + "code":"296", "des":"The system failed to execute insert overwrite on the datasource GaussDB(DWS) table, and org.postgresql.util.PSQLException: ERROR: tuple concurrently updated was displayed", "doc_type":"usermanual", "kw":"Error Message \"org.postgresql.util.PSQLException: ERROR: tuple concurrently updated\" Is Displayed Wh", @@ -5302,7 +6128,7 @@ "uri":"dli_03_0252.html", "node_id":"dli_03_0252.xml", "product_code":"dli", - "code":"256", + "code":"297", "des":"A datasource table was used to import data to a CloudTable HBase table. This HBase table contains a column family and a rowkey for 100 million simulating data records. Th", "doc_type":"usermanual", "kw":"RegionTooBusyException Is Reported When Data Is Imported to a CloudTable HBase Table Through a Datas", @@ -5323,7 +6149,7 @@ "uri":"dli_03_0253.html", "node_id":"dli_03_0253.xml", "product_code":"dli", - "code":"257", + "code":"298", "des":"A table was created on GaussDB(DWS) and then a datasource connection was created on DLI to read and write data. An error message was displayed during data writing, indica", "doc_type":"usermanual", "kw":"A Null Value Is Written Into a Non-Null Field When a DLI Datasource Connection Is Used to Connect to", @@ -5344,7 +6170,7 @@ "uri":"dli_03_0254.html", "node_id":"dli_03_0254.xml", "product_code":"dli", - "code":"258", + "code":"299", "des":"A datasource GaussDB(DWS) table and the datasource connection were created in DLI, and the schema of the source table in GaussDB(DWS) were updated. During the job executi", "doc_type":"usermanual", "kw":"An Insert Operation Failed After the Schema of the GaussDB(DWS) Source Table Is Updated,Datasource C", @@ -5365,7 +6191,7 @@ "uri":"dli_03_0056.html", "node_id":"dli_03_0056.xml", "product_code":"dli", - "code":"259", + "code":"300", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"APIs", @@ -5386,7 +6212,7 @@ "uri":"dli_03_0060.html", "node_id":"dli_03_0060.xml", "product_code":"dli", - "code":"260", + "code":"301", "des":"In the REST API provided by DLI, the request header can be added to the request URI, for example, Content-Type.Content-Type indicates the request body type or format. The", "doc_type":"usermanual", "kw":"Why Is Error \"unsupported media Type\" Reported When I Subimt a SQL Job?,APIs,User Guide", @@ -5407,7 +6233,7 @@ "uri":"dli_03_0125.html", "node_id":"dli_03_0125.xml", "product_code":"dli", - "code":"261", + "code":"302", "des":"When different IAM users call an API under the same enterprise project in the same region, the project ID is the same.", "doc_type":"usermanual", "kw":"Is the Project ID Fixed when Different IAM Users Call an API?,APIs,User Guide", @@ -5428,7 +6254,7 @@ "uri":"dli_03_0178.html", "node_id":"dli_03_0178.xml", "product_code":"dli", - "code":"262", + "code":"303", "des":"When the API call for submitting a SQL job times out, and the following error information is displayed:There are currently no resources tracked in the state, so there is ", "doc_type":"usermanual", "kw":"What Can I Do If an Error Is Reported When the Execution of the API for Creating a SQL Job Times Out", @@ -5449,7 +6275,7 @@ "uri":"dli_03_0058.html", "node_id":"dli_03_0058.xml", "product_code":"dli", - "code":"263", + "code":"304", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"SDKs", @@ -5470,7 +6296,7 @@ "uri":"dli_03_0073.html", "node_id":"dli_03_0073.xml", "product_code":"dli", - "code":"264", + "code":"305", "des":"When you query the SQL job results using SDK, the system checks the job status when the job is submitted. The timeout interval set in the system is 300s. If the job is no", "doc_type":"usermanual", "kw":"How Do I Set the Timeout Duration for Querying SQL Job Results Using SDK?,SDKs,User Guide", @@ -5491,7 +6317,7 @@ "uri":"dli_03_0255.html", "node_id":"dli_03_0255.xml", "product_code":"dli", - "code":"265", + "code":"306", "des":"Run the ping command to check whether dli.xxx can be accessed.If dli.xxx can be accessed, check whether DNS resolution is correctly configured.If dli.xxx can be accessed,", "doc_type":"usermanual", "kw":"How Do I Handle the dli.xxx,unable to resolve host address Error?,SDKs,User Guide", @@ -5512,7 +6338,7 @@ "uri":"dli_01_00006.html", "node_id":"dli_01_00006.xml", "product_code":"dli", - "code":"266", + "code":"307", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual", "kw":"Change History,User Guide", diff --git a/docs/dli/umn/CLASS.TXT.json b/docs/dli/umn/CLASS.TXT.json index fb44d47c..082ba8e3 100644 --- a/docs/dli/umn/CLASS.TXT.json +++ b/docs/dli/umn/CLASS.TXT.json @@ -153,7 +153,7 @@ "code":"17" }, { - "desc":"SQL jobs allow you to execute SQL statements entered in the SQL job editing window, import data, and export data.SQL job management provides the following functions:Searc", + "desc":"SQL jobs allow you to execute SQL statements in the SQL job editing window, import data, and export data.SQL job management provides the following functions:Searching for", "product_code":"dli", "title":"SQL Job Management", "uri":"dli_01_0017.html", @@ -234,7 +234,7 @@ "code":"26" }, { - "desc":"After creating a job, you can view the job details to learn about the following information:Viewing Job DetailsChecking the Job Monitoring InformationViewing the Task Lis", + "desc":"After creating a job, you can view the job details to learn about the following information:Viewing Job DetailsChecking Job Monitoring InformationViewing the Task List of", "product_code":"dli", "title":"Flink Job Details", "uri":"dli_01_0462.html", @@ -341,6 +341,15 @@ "p_code":"34", "code":"38" }, + { + "desc":"You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can c", + "product_code":"dli", + "title":"Allocating a Queue to an Enterprise Project", + "uri":"dli_01_0565.html", + "doc_type":"usermanual", + "p_code":"34", + "code":"39" + }, { "desc":"If the CIDR block of the DLI queue conflicts with that of the user data source, you can change the CIDR block of the queue.If the queue whose CIDR block is to be modified", "product_code":"dli", @@ -348,7 +357,7 @@ "uri":"dli_01_0443.html", "doc_type":"usermanual", "p_code":"34", - "code":"39" + "code":"40" }, { "desc":"Elastic scaling can be performed for a newly created queue only when there were jobs running in this queue.Queues with 16 CUs do not support scale-out or scale-in.Queues ", @@ -357,7 +366,7 @@ "uri":"dli_01_0487.html", "doc_type":"usermanual", "p_code":"34", - "code":"40" + "code":"41" }, { "desc":"When services are busy, you might need to use more compute resources to process services in a period. After this period, you do not require the same amount of resources. ", @@ -366,7 +375,7 @@ "uri":"dli_01_0488.html", "doc_type":"usermanual", "p_code":"34", - "code":"41" + "code":"42" }, { "desc":"It can be used to test the connectivity between the DLI queue and the peer IP address specified by the user in common scenarios, or the connectivity between the DLI queue", @@ -375,7 +384,7 @@ "uri":"dli_01_0489.html", "doc_type":"usermanual", "p_code":"34", - "code":"42" + "code":"43" }, { "desc":"Once you have created an SMN topic, you can easily subscribe to it by going to the Topic Management > Topics page of the SMN console. You can choose to receive notificati", @@ -384,7 +393,7 @@ "uri":"dli_01_0421.html", "doc_type":"usermanual", "p_code":"34", - "code":"43" + "code":"44" }, { "desc":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", @@ -393,7 +402,160 @@ "uri":"dli_01_0022.html", "doc_type":"usermanual", "p_code":"34", - "code":"44" + "code":"45" + }, + { + "desc":"DLI allows you to set properties for queues.You can set Spark driver parameters to improve the scheduling efficiency of queues.This section describes how to set queue pro", + "product_code":"dli", + "title":"Setting Queue Properties", + "uri":"dli_01_0563.html", + "doc_type":"usermanual", + "p_code":"34", + "code":"46" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Elastic Resource Pool", + "uri":"dli_01_0508.html", + "doc_type":"usermanual", + "p_code":"", + "code":"47" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Before You Start", + "uri":"dli_01_0528.html", + "doc_type":"usermanual", + "p_code":"47", + "code":"48" + }, + { + "desc":"An elastic resource pool provides compute resources (CPU and memory) for running DLI jobs. The unit is CU. One CU contains one CPU and 4 GB memory.You can create multiple", + "product_code":"dli", + "title":"Overview", + "uri":"dli_01_0504.html", + "doc_type":"usermanual", + "p_code":"48", + "code":"49" + }, + { + "desc":"This section walks you through the procedure of adding a queue to an elastic resource pool and binding an enhanced datasource connection to the elastic resource pool.Proc", + "product_code":"dli", + "title":"Creating an Elastic Resource Pool and Running a Job", + "uri":"dli_01_0515.html", + "doc_type":"usermanual", + "p_code":"48", + "code":"50" + }, + { + "desc":"A company has multiple departments that perform data analysis in different periods during a day.Department A requires a large number of compute resources from 00:00 a.m. ", + "product_code":"dli", + "title":"Configuring Scaling Policies for Queues", + "uri":"dli_01_0516.html", + "doc_type":"usermanual", + "p_code":"48", + "code":"51" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Regular Operations", + "uri":"dli_01_0529.html", + "doc_type":"usermanual", + "p_code":"47", + "code":"52" + }, + { + "desc":"For details about the application scenarios of elastic resource pools, see the Overview. This section describes how to create an elastic resource pool.If you use an enhan", + "product_code":"dli", + "title":"Creating an Elastic Resource Pool", + "uri":"dli_01_0505.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"53" + }, + { + "desc":"Administrators can assign permissions of different operation scopes to users for each elastic resource pool.The administrator and elastic resource pool owner have all per", + "product_code":"dli", + "title":"Managing Permissions", + "uri":"dli_01_0526.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"54" + }, + { + "desc":"You can add one or more queues to an elastic resource pool to run jobs. This section describes how to add a queue to an elastic resource pool.Automatic scaling of an elas", + "product_code":"dli", + "title":"Adding a Queue", + "uri":"dli_01_0509.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"55" + }, + { + "desc":"If you want a queue to use resources in an elastic resource pool, bind the queue to the pool.You can click Associate Queue on the Resource Pool page to bind a queue to an", + "product_code":"dli", + "title":"Binding a Queue", + "uri":"dli_01_0530.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"56" + }, + { + "desc":"Multiple queues can be added to an elastic resource pool. For details about how to add a queue, see Adding a Queue. You can configure the number of CUs you want based on ", + "product_code":"dli", + "title":"Managing Queues", + "uri":"dli_01_0506.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"57" + }, + { + "desc":"CU settings are used to control the maximum and minimum CU ranges for elastic resource pools to avoid unlimited resource scaling.For example, an elastic resource pool has", + "product_code":"dli", + "title":"Setting CUs", + "uri":"dli_01_0507.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"58" + }, + { + "desc":"If CUs of a yearly/monthly elastic resource pool cannot meet your service requirements, you can modify the CUs. In this case, you will be charged based on the number of C", + "product_code":"dli", + "title":"Modifying Specifications", + "uri":"dli_01_0524.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"59" + }, + { + "desc":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", + "product_code":"dli", + "title":"Managing Tags", + "uri":"dli_01_0525.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"60" + }, + { + "desc":"If you added a queue to or deleted one from an elastic resource pool, or you scaled an added queue, the CU quantity of the elastic resource pool may be changed. You can v", + "product_code":"dli", + "title":"Viewing Scaling History", + "uri":"dli_01_0532.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"61" + }, + { + "desc":"You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can c", + "product_code":"dli", + "title":"Allocating to an Enterprise Project", + "uri":"dli_01_0566.html", + "doc_type":"usermanual", + "p_code":"52", + "code":"62" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -402,7 +564,7 @@ "uri":"dli_01_0004.html", "doc_type":"usermanual", "p_code":"", - "code":"45" + "code":"63" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -410,8 +572,8 @@ "title":"Databases and Tables", "uri":"dli_01_0390.html", "doc_type":"usermanual", - "p_code":"45", - "code":"46" + "p_code":"63", + "code":"64" }, { "desc":"DLI database and table management provide the following functions:Database Permission ManagementTable Permission ManagementCreating a Database or a TableDeleting a Databa", @@ -419,8 +581,8 @@ "title":"Overview", "uri":"dli_01_0228.html", "doc_type":"usermanual", - "p_code":"46", - "code":"47" + "p_code":"64", + "code":"65" }, { "desc":"By setting permissions, you can assign varying database permissions to different users.The administrator and database owner have all permissions, which cannot be set or m", @@ -428,8 +590,8 @@ "title":"Managing Database Permissions", "uri":"dli_01_0447.html", "doc_type":"usermanual", - "p_code":"46", - "code":"48" + "p_code":"64", + "code":"66" }, { "desc":"By setting permissions, you can assign varying table permissions to different users.The administrator and table owner have all permissions, which cannot be set or modifie", @@ -437,8 +599,8 @@ "title":"Managing Table Permissions", "uri":"dli_01_0448.html", "doc_type":"usermanual", - "p_code":"46", - "code":"49" + "p_code":"64", + "code":"67" }, { "desc":"A database, built on the computer storage device, is a data warehouse where data is organized, stored, and managed based on its structure.The table is an important part o", @@ -446,8 +608,8 @@ "title":"Creating a Database or a Table", "uri":"dli_01_0005.html", "doc_type":"usermanual", - "p_code":"46", - "code":"50" + "p_code":"64", + "code":"68" }, { "desc":"You can delete unnecessary databases and tables based on actual conditions.You are not allowed to delete databases or tables that are being used for running jobs.The admi", @@ -455,8 +617,8 @@ "title":"Deleting a Database or a Table", "uri":"dli_01_0011.html", "doc_type":"usermanual", - "p_code":"46", - "code":"51" + "p_code":"64", + "code":"69" }, { "desc":"During actual use, developers create databases and tables and submit them to test personnel for testing. After the test is complete, the databases and tables are transfer", @@ -464,8 +626,8 @@ "title":"Modifying the Owners of Databases and Tables", "uri":"dli_01_0376.html", "doc_type":"usermanual", - "p_code":"46", - "code":"52" + "p_code":"64", + "code":"70" }, { "desc":"You can import data from OBS to a table created in DLI.Only one path can be specified during data import. The path cannot contain commas (,).To import data in CSV format ", @@ -473,8 +635,8 @@ "title":"Importing Data to the Table", "uri":"dli_01_0253.html", "doc_type":"usermanual", - "p_code":"46", - "code":"53" + "p_code":"64", + "code":"71" }, { "desc":"You can export data from a DLI table to OBS. During the export, a folder is created in OBS or the content in the existing folder is overwritten.The exported file can be i", @@ -482,8 +644,8 @@ "title":"Exporting Data from DLI to OBS", "uri":"dli_01_0010.html", "doc_type":"usermanual", - "p_code":"46", - "code":"54" + "p_code":"64", + "code":"72" }, { "desc":"Metadata is used to define data types. It describes information about the data, including the source, size, format, and other data features. In database fields, metadata ", @@ -491,8 +653,8 @@ "title":"Viewing Metadata", "uri":"dli_01_0008.html", "doc_type":"usermanual", - "p_code":"46", - "code":"55" + "p_code":"64", + "code":"73" }, { "desc":"The Preview page displays the first 10 records in the table.You can preview data on either the Data Management page or the SQL Editor page.To preview data on the Data Man", @@ -500,8 +662,8 @@ "title":"Previewing Data", "uri":"dli_01_0007.html", "doc_type":"usermanual", - "p_code":"46", - "code":"56" + "p_code":"64", + "code":"74" }, { "desc":"A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a t", @@ -509,8 +671,8 @@ "title":"Managing Tags", "uri":"dli_01_0552.html", "doc_type":"usermanual", - "p_code":"46", - "code":"57" + "p_code":"64", + "code":"75" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -518,8 +680,8 @@ "title":"Package Management", "uri":"dli_01_0366.html", "doc_type":"usermanual", - "p_code":"45", - "code":"58" + "p_code":"63", + "code":"76" }, { "desc":"Package management provides the following functions:Managing Package PermissionsCreating a PackageDeleting a PackageYou can delete program packages in batches.You can del", @@ -527,8 +689,8 @@ "title":"Overview", "uri":"dli_01_0407.html", "doc_type":"usermanual", - "p_code":"58", - "code":"59" + "p_code":"76", + "code":"77" }, { "desc":"You can isolate package groups or packages allocated to different users by setting permissions to ensure data query performance.The administrator and the owner of a packa", @@ -536,8 +698,8 @@ "title":"Managing Permissions on Packages and Package Groups", "uri":"dli_01_0477.html", "doc_type":"usermanual", - "p_code":"58", - "code":"60" + "p_code":"76", + "code":"78" }, { "desc":"DLI allows you to submit program packages in batches to the general-use queue for running.If you need to update a package, you can use the same package or file to upload ", @@ -545,8 +707,8 @@ "title":"Creating a Package", "uri":"dli_01_0367.html", "doc_type":"usermanual", - "p_code":"58", - "code":"61" + "p_code":"76", + "code":"79" }, { "desc":"You can delete a package based on actual conditions.On the left of the management console, choose Data Management > Package Management.Click Delete in the Operation colum", @@ -554,8 +716,8 @@ "title":"Deleting a Package", "uri":"dli_01_0369.html", "doc_type":"usermanual", - "p_code":"58", - "code":"62" + "p_code":"76", + "code":"80" }, { "desc":"To change the owner of a package, click More > Modify Owner in the Operation column of a package on the Package Management page.If the package has been grouped, you can m", @@ -563,8 +725,8 @@ "title":"Modifying the Owner", "uri":"dli_01_0478.html", "doc_type":"usermanual", - "p_code":"58", - "code":"63" + "p_code":"76", + "code":"81" }, { "desc":"DLI built-in dependencies are provided by the platform by default. In case of conflicts, you do not need to upload them when packaging JAR packages of Spark or Flink Jar ", @@ -572,8 +734,8 @@ "title":"Built-in Dependencies", "uri":"dli_01_0397.html", "doc_type":"usermanual", - "p_code":"58", - "code":"64" + "p_code":"76", + "code":"82" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -582,7 +744,7 @@ "uri":"dli_01_0379.html", "doc_type":"usermanual", "p_code":"", - "code":"65" + "code":"83" }, { "desc":"To facilitate SQL operation execution, DLI allows you to customize query templates or save the SQL statements in use as templates. After templates are saved, you do not n", @@ -590,8 +752,8 @@ "title":"Managing SQL Templates", "uri":"dli_01_0021.html", "doc_type":"usermanual", - "p_code":"65", - "code":"66" + "p_code":"83", + "code":"84" }, { "desc":"Flink templates include sample templates and custom templates. You can modify an existing sample template to meet the actual job logic requirements and save time for edit", @@ -599,8 +761,8 @@ "title":"Managing Flink Templates", "uri":"dli_01_0464.html", "doc_type":"usermanual", - "p_code":"65", - "code":"67" + "p_code":"83", + "code":"85" }, { "desc":"You can modify a sample template to meet the Spark job requirements, saving time for editing SQL statements.Currently, the cloud platform does not provide preset Spark te", @@ -608,8 +770,8 @@ "title":"Managing Spark SQL Templates", "uri":"dli_01_0551.html", "doc_type":"usermanual", - "p_code":"65", - "code":"68" + "p_code":"83", + "code":"86" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -617,8 +779,8 @@ "title":"Appendix", "uri":"dli_01_05110.html", "doc_type":"usermanual", - "p_code":"65", - "code":"69" + "p_code":"83", + "code":"87" }, { "desc":"TPC-H is a test set developed by the Transaction Processing Performance Council (TPC) to simulate decision-making support applications. It is widely used in academia and ", @@ -626,8 +788,8 @@ "title":"TPC-H Sample Data in the SQL Template", "uri":"dli_01_05111.html", "doc_type":"usermanual", - "p_code":"69", - "code":"70" + "p_code":"87", + "code":"88" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -636,7 +798,7 @@ "uri":"dli_01_0426.html", "doc_type":"usermanual", "p_code":"", - "code":"71" + "code":"89" }, { "desc":"In cross-source data analysis scenarios, DLI needs to connect to external data sources. However, due to the different VPCs between the data source and DLI, the network ca", @@ -644,8 +806,8 @@ "title":"Overview", "uri":"dli_01_0003.html", "doc_type":"usermanual", - "p_code":"71", - "code":"72" + "p_code":"89", + "code":"90" }, { "desc":"If DLI needs to access external data sources, you need to establish enhanced datasource connections to enable the network between DLI and the data sources, and then devel", @@ -653,8 +815,8 @@ "title":"Cross-Source Analysis Development Methods", "uri":"dli_01_0410.html", "doc_type":"usermanual", - "p_code":"71", - "code":"73" + "p_code":"89", + "code":"91" }, { "desc":"Create an enhanced datasource connection for DLI to access, import, query, and analyze data of other data sources.For example, to connect DLI to the MRS, RDS, CSS, Kafka,", @@ -662,8 +824,8 @@ "title":"Creating an Enhanced Datasource Connection", "uri":"dli_01_0006.html", "doc_type":"usermanual", - "p_code":"71", - "code":"74" + "p_code":"89", + "code":"92" }, { "desc":"Delete an enhanced datasource connection that is no longer used on the console.Log in to the DLI management console.In the left navigation pane, choose Datasource Connect", @@ -671,8 +833,8 @@ "title":"Deleting an Enhanced Datasource Connection", "uri":"dli_01_0553.html", "doc_type":"usermanual", - "p_code":"71", - "code":"75" + "p_code":"89", + "code":"93" }, { "desc":"Host information is the mapping between host IP addresses and domain names. After you configure host information, jobs can only use the configured domain names to access ", @@ -680,8 +842,8 @@ "title":"Modifying Host Information", "uri":"dli_01_0013.html", "doc_type":"usermanual", - "p_code":"71", - "code":"76" + "p_code":"89", + "code":"94" }, { "desc":"The CIDR block of the DLI queue that is bound with a datasource connection cannot overlap with that of the data source.The default queue cannot be bound with a connection", @@ -689,8 +851,8 @@ "title":"Binding and Unbinding a Queue", "uri":"dli_01_0514.html", "doc_type":"usermanual", - "p_code":"71", - "code":"77" + "p_code":"89", + "code":"95" }, { "desc":"A route is configured with the destination, next hop type, and next hop to determine where the network traffic is directed. Routes are classified into system routes and c", @@ -698,8 +860,8 @@ "title":"Adding a Route", "uri":"dli_01_0014.html", "doc_type":"usermanual", - "p_code":"71", - "code":"78" + "p_code":"89", + "code":"96" }, { "desc":"Delete a route that is no longer used.A custom route table cannot be deleted if it is associated with a subnet.Log in to the DLI management console.In the left navigation", @@ -707,8 +869,8 @@ "title":"Deleting a Route", "uri":"dli_01_0556.html", "doc_type":"usermanual", - "p_code":"71", - "code":"79" + "p_code":"89", + "code":"97" }, { "desc":"Enhanced connections support user authorization by project. After authorization, users in the project have the permission to perform operations on the enhanced connection", @@ -716,8 +878,8 @@ "title":"Enhanced Connection Permission Management", "uri":"dli_01_0018.html", "doc_type":"usermanual", - "p_code":"71", - "code":"80" + "p_code":"89", + "code":"98" }, { "desc":"A tag is a key-value pair customized by users and used to identify cloud resources. It helps users to classify and search for cloud resources. A tag consists of a tag key", @@ -725,8 +887,8 @@ "title":"Enhanced Datasource Connection Tag Management", "uri":"dli_01_0019.html", "doc_type":"usermanual", - "p_code":"71", - "code":"81" + "p_code":"89", + "code":"99" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -735,7 +897,7 @@ "uri":"dli_01_0422.html", "doc_type":"usermanual", "p_code":"", - "code":"82" + "code":"100" }, { "desc":"When analyzing across multiple sources, it is not recommended to configure authentication information directly in a job as it can lead to password leakage. Instead, you a", @@ -743,8 +905,8 @@ "title":"Overview", "uri":"dli_01_0561.html", "doc_type":"usermanual", - "p_code":"82", - "code":"83" + "p_code":"100", + "code":"101" }, { "desc":"Create a CSS datasource authentication on the DLI console to store the authentication information of the CSS security cluster to DLI. This will allow you to access to the", @@ -752,8 +914,8 @@ "title":"Creating a CSS Datasource Authentication", "uri":"dli_01_0427.html", "doc_type":"usermanual", - "p_code":"82", - "code":"84" + "p_code":"100", + "code":"102" }, { "desc":"Create a Kerberos datasource authentication on the DLI console to store the authentication information of the data source to DLI. This will allow you to access to the dat", @@ -761,8 +923,8 @@ "title":"Creating a Kerberos Datasource Authentication", "uri":"dli_01_0558.html", "doc_type":"usermanual", - "p_code":"82", - "code":"85" + "p_code":"100", + "code":"103" }, { "desc":"Create a Kafka_SSL datasource authentication on the DLI console to store the Kafka authentication information to DLI. This will allow you to access to Kafka instances wit", @@ -770,8 +932,8 @@ "title":"Creating a Kafka_SSL Datasource Authentication", "uri":"dli_01_0560.html", "doc_type":"usermanual", - "p_code":"82", - "code":"86" + "p_code":"100", + "code":"104" }, { "desc":"Create a password datasource authentication on the DLI console to store passwords of the GaussDB(DWS), RDS, DCS, and DDS data sources to DLI. This will allow you to acces", @@ -779,8 +941,8 @@ "title":"Creating a Password Datasource Authentication", "uri":"dli_01_0559.html", "doc_type":"usermanual", - "p_code":"82", - "code":"87" + "p_code":"100", + "code":"105" }, { "desc":"Grant permissions on a datasource authentication to users so multiple user jobs can use the datasource authentication without affecting each other.The administrator and t", @@ -788,8 +950,8 @@ "title":"Datasource Authentication Permission Management", "uri":"dli_01_0480.html", "doc_type":"usermanual", - "p_code":"82", - "code":"88" + "p_code":"100", + "code":"106" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -798,7 +960,7 @@ "uri":"dli_01_0485.html", "doc_type":"usermanual", "p_code":"", - "code":"89" + "code":"107" }, { "desc":"DLI allows you to set variables that are frequently used during job development as global variables on the DLI management console. This avoids repeated definitions during", @@ -806,8 +968,8 @@ "title":"Global Variables", "uri":"dli_01_0476.html", "doc_type":"usermanual", - "p_code":"89", - "code":"90" + "p_code":"107", + "code":"108" }, { "desc":"You can grant permissions on a global variable to users.The administrator and the global variable owner have all permissions. You do not need to set permissions for them,", @@ -815,17 +977,8 @@ "title":"Permission Management for Global Variables", "uri":"dli_01_0533.html", "doc_type":"usermanual", - "p_code":"89", - "code":"91" - }, - { - "desc":"Only the tenant account or a subaccount of user group admin can authorize access.After entering the DLI management console, you are advised to set agency permissions to e", - "product_code":"dli", - "title":"Service Authorization", - "uri":"dli_01_0486.html", - "doc_type":"usermanual", - "p_code":"89", - "code":"92" + "p_code":"107", + "code":"109" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -834,7 +987,7 @@ "uri":"dli_01_0408.html", "doc_type":"usermanual", "p_code":"", - "code":"93" + "code":"110" }, { "desc":"DLI has a comprehensive permission control mechanism and supports fine-grained authentication through Identity and Access Management (IAM). You can create policies in IAM", @@ -842,8 +995,8 @@ "title":"Overview", "uri":"dli_01_0440.html", "doc_type":"usermanual", - "p_code":"93", - "code":"94" + "p_code":"110", + "code":"111" }, { "desc":"You can use Identity and Access Management (IAM) to implement fine-grained permissions control on DLI resources. For details, see Overview.If your cloud account does not ", @@ -851,8 +1004,8 @@ "title":"Creating an IAM User and Granting Permissions", "uri":"dli_01_0418.html", "doc_type":"usermanual", - "p_code":"93", - "code":"95" + "p_code":"110", + "code":"112" }, { "desc":"Custom policies can be created as a supplement to the system policies of DLI. You can add actions to custom policies. For the actions supported for custom policies, see \"", @@ -860,8 +1013,8 @@ "title":"Creating a Custom Policy", "uri":"dli_01_0451.html", "doc_type":"usermanual", - "p_code":"93", - "code":"96" + "p_code":"110", + "code":"113" }, { "desc":"A resource is an object that exists within a service. You can select DLI resources by specifying their paths.", @@ -869,8 +1022,8 @@ "title":"DLI Resources", "uri":"dli_01_0417.html", "doc_type":"usermanual", - "p_code":"93", - "code":"97" + "p_code":"110", + "code":"114" }, { "desc":"Request conditions are useful in determining when a custom policy takes effect. A request condition consists of a condition key and operator. Condition keys are either gl", @@ -878,8 +1031,8 @@ "title":"DLI Request Conditions", "uri":"dli_01_0475.html", "doc_type":"usermanual", - "p_code":"93", - "code":"98" + "p_code":"110", + "code":"115" }, { "desc":"Table 1 lists the common operations supported by each system policy of DLI. Choose proper system policies according to this table. For details about the SQL statement per", @@ -887,8 +1040,8 @@ "title":"Common Operations Supported by DLI System Policy", "uri":"dli_01_0441.html", "doc_type":"usermanual", - "p_code":"93", - "code":"99" + "p_code":"110", + "code":"116" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -897,7 +1050,7 @@ "uri":"dli_01_0513.html", "doc_type":"usermanual", "p_code":"", - "code":"100" + "code":"117" }, { "desc":"On the DLI management console, you can import data stored on OBS to DLI tables from Data Management > Databases and Tables > Table Management and SQL Editor pages. For de", @@ -905,8 +1058,8 @@ "title":"Importing Data to a DLI Table", "uri":"dli_01_0420.html", "doc_type":"usermanual", - "p_code":"100", - "code":"101" + "p_code":"117", + "code":"118" }, { "desc":"This section describes metrics reported by DLI to Cloud Eye as well as their namespaces and dimensions. You can use the management console or APIs provided by Cloud Eye t", @@ -914,8 +1067,8 @@ "title":"Viewing Monitoring Metrics", "uri":"dli_01_0445.html", "doc_type":"usermanual", - "p_code":"100", - "code":"102" + "p_code":"117", + "code":"119" }, { "desc":"With CTS, you can record operations associated with DLI for later query, audit, and backtrack operations.", @@ -923,17 +1076,17 @@ "title":"DLI Operations That Can Be Recorded by CTS", "uri":"dli_01_0318.html", "doc_type":"usermanual", - "p_code":"100", - "code":"103" + "p_code":"117", + "code":"120" }, { "desc":"A quota limits the quantity of a resource available to users, thereby preventing spikes in the usage of the resource.You can also request for an increased quota if your e", "product_code":"dli", - "title":"Quotas", + "title":"Quota Management", "uri":"dli_01_0550.html", "doc_type":"usermanual", - "p_code":"100", - "code":"104" + "p_code":"117", + "code":"121" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -942,7 +1095,7 @@ "uri":"dli_01_0539.html", "doc_type":"usermanual", "p_code":"", - "code":"105" + "code":"122" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -950,8 +1103,17 @@ "title":"Flink Jobs", "uri":"dli_03_0037.html", "doc_type":"usermanual", - "p_code":"105", - "code":"106" + "p_code":"122", + "code":"123" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0137.html", + "doc_type":"usermanual", + "p_code":"123", + "code":"124" }, { "desc":"DLI Flink jobs support the following data formats:Avro, Avro_merge, BLOB, CSV, EMAIL, JSON, ORC, Parquet, and XML.DLI Flink jobs support data from the following data sour", @@ -959,8 +1121,8 @@ "title":"What Data Formats and Data Sources Are Supported by DLI Flink Jobs?", "uri":"dli_03_0083.html", "doc_type":"usermanual", - "p_code":"106", - "code":"107" + "p_code":"124", + "code":"125" }, { "desc":"A sub-user can view queues but cannot view Flink jobs. You can authorize the sub-user using DLI or IAM.Authorization on DLILog in to the DLI console using a tenant accoun", @@ -968,8 +1130,8 @@ "title":"How Do I Authorize a Subuser to View Flink Jobs?", "uri":"dli_03_0139.html", "doc_type":"usermanual", - "p_code":"106", - "code":"108" + "p_code":"124", + "code":"126" }, { "desc":"DLI Flink jobs are highly available. You can enable the automatic restart function to automatically restart your jobs after short-time faults of peripheral services are r", @@ -977,8 +1139,8 @@ "title":"How Do I Set Auto Restart upon Exception for a Flink Job?", "uri":"dli_03_0090.html", "doc_type":"usermanual", - "p_code":"106", - "code":"109" + "p_code":"124", + "code":"127" }, { "desc":"When you create a Flink SQL job or Flink Jar job, you can select Save Job Log on the job editing page to save job running logs to OBS.To set the OBS bucket for storing th", @@ -986,8 +1148,8 @@ "title":"How Do I Save Flink Job Logs?", "uri":"dli_03_0099.html", "doc_type":"usermanual", - "p_code":"106", - "code":"110" + "p_code":"124", + "code":"128" }, { "desc":"DLI can output Flink job results to DIS. You can view the results in DIS. For details, see \"Obtaining Data from DIS\" in Data Ingestion Service User Guide.DLI can output F", @@ -995,8 +1157,8 @@ "title":"How Can I Check Flink Job Results?", "uri":"dli_03_0043.html", "doc_type":"usermanual", - "p_code":"106", - "code":"111" + "p_code":"124", + "code":"129" }, { "desc":"Choose Job Management > Flink Jobs. In the Operation column of the target job, choose More > Permissions. When a new user is authorized, No such user. userName:xxxx. is d", @@ -1004,8 +1166,8 @@ "title":"Why Is Error \"No such user. userName:xxxx.\" Reported on the Flink Job Management Page When I Grant Permission to a User?", "uri":"dli_03_0160.html", "doc_type":"usermanual", - "p_code":"106", - "code":"112" + "p_code":"124", + "code":"130" }, { "desc":"Checkpoint was enabled when a Flink job is created, and the OBS bucket for storing checkpoints was specified. After a Flink job is manually stopped, no message is display", @@ -1013,8 +1175,8 @@ "title":"How Do I Know Which Checkpoint the Flink Job I Stopped Will Be Restored to When I Start the Job Again?", "uri":"dli_03_0180.html", "doc_type":"usermanual", - "p_code":"106", - "code":"113" + "p_code":"124", + "code":"131" }, { "desc":"When you set running parameters of a DLI Flink job, you can enable Alarm Generation upon Job Exception to receive alarms when the job runs abnormally or is in arrears.If ", @@ -1022,8 +1184,17 @@ "title":"Why Is a Message Displayed Indicating That the SMN Topic Does Not Exist When I Use the SMN Topic in DLI?", "uri":"dli_03_0036.html", "doc_type":"usermanual", - "p_code":"106", - "code":"114" + "p_code":"124", + "code":"132" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Flink SQL", + "uri":"dli_03_0131.html", + "doc_type":"usermanual", + "p_code":"123", + "code":"133" }, { "desc":"The consumption capability of a Flink SQL job depends on the data source transmission, queue size, and job parameter settings. The peak consumption is 10 Mbit/s.", @@ -1031,8 +1202,8 @@ "title":"How Much Data Can Be Processed in a Day by a Flink SQL Job?", "uri":"dli_03_0130.html", "doc_type":"usermanual", - "p_code":"106", - "code":"115" + "p_code":"133", + "code":"134" }, { "desc":"The temp stream in Flink SQL is similar to a subquery. It is a logical stream used to simplify the SQL logic and does not generate data storage. Therefore, there is no ne", @@ -1040,8 +1211,8 @@ "title":"Does Data in the Temporary Stream of Flink SQL Need to Be Cleared Periodically? How Do I Clear the Data?", "uri":"dli_03_0061.html", "doc_type":"usermanual", - "p_code":"106", - "code":"116" + "p_code":"133", + "code":"135" }, { "desc":"SymptomWhen you create a Flink SQL job and configure the parameters, you select an OBS bucket you have created. The system displays a message indicating that the OBS buck", @@ -1049,8 +1220,8 @@ "title":"Why Is a Message Displayed Indicating That the OBS Bucket Is Not Authorized When I Select an OBS Bucket for a Flink SQL Job?", "uri":"dli_03_0138.html", "doc_type":"usermanual", - "p_code":"106", - "code":"117" + "p_code":"133", + "code":"136" }, { "desc":"When using a Flink SQL job, you need to create an OBS partition table for subsequent batch processing.In the following example, the day field is used as the partition fie", @@ -1058,8 +1229,8 @@ "title":"How Do I Create an OBS Partitioned Table for a Flink SQL Job?", "uri":"dli_03_0089.html", "doc_type":"usermanual", - "p_code":"106", - "code":"118" + "p_code":"133", + "code":"137" }, { "desc":"In this example, the day field is used as the partition field with the parquet encoding format (only the parquet format is supported currently) to dump car_info data to O", @@ -1067,8 +1238,8 @@ "title":"How Do I Dump Data to OBS and Create an OBS Partitioned Table?", "uri":"dli_03_0075.html", "doc_type":"usermanual", - "p_code":"106", - "code":"119" + "p_code":"133", + "code":"138" }, { "desc":"When I run the creation statement with an EL expression in the table name in a Flink SQL job, the following error message is displayed:DLI.0005: AnalysisException: t_user", @@ -1076,8 +1247,8 @@ "title":"Why Is Error Message \"DLI.0005\" Displayed When I Use an EL Expression to Create a Table in a Flink SQL Job?", "uri":"dli_03_0167.html", "doc_type":"usermanual", - "p_code":"106", - "code":"120" + "p_code":"133", + "code":"139" }, { "desc":"After data is written to OBS through the Flink job output stream, data cannot be queried from the DLI table created in the OBS file path.For example, use the following Fl", @@ -1085,8 +1256,8 @@ "title":"Why Is No Data Queried in the DLI Table Created Using the OBS File Path When Data Is Written to OBS by a Flink Job Output Stream?", "uri":"dli_03_0168.html", "doc_type":"usermanual", - "p_code":"106", - "code":"121" + "p_code":"133", + "code":"140" }, { "desc":"After a Flink SQL job is submitted on DLI, the job fails to be executed. The following error information is displayed in the job log:connect to DIS failed java.lang.Illeg", @@ -1094,8 +1265,8 @@ "title":"Why Does a Flink SQL Job Fails to Be Executed, and Is \"connect to DIS failed java.lang.IllegalArgumentException: Access key cannot be null\" Displayed in the Log?", "uri":"dli_03_0174.html", "doc_type":"usermanual", - "p_code":"106", - "code":"122" + "p_code":"133", + "code":"141" }, { "desc":"Semantic verification for a Flink SQL job (reading DIS data) fails. The following information is displayed when the job fails:Get dis channel xxxinfo failed. error info: ", @@ -1103,8 +1274,8 @@ "title":"Why Is Error \"Not authorized\" Reported When a Flink SQL Job Reads DIS Data?", "uri":"dli_03_0176.html", "doc_type":"usermanual", - "p_code":"106", - "code":"123" + "p_code":"133", + "code":"142" }, { "desc":"After a Flink SQL job consumed Kafka and sent data to the Elasticsearch cluster, the job was successfully executed, but no data is available.Possible causes are as follow", @@ -1112,8 +1283,17 @@ "title":"Data Writing Fails After a Flink SQL Job Consumed Kafka and Sank Data to the Elasticsearch Cluster", "uri":"dli_03_0232.html", "doc_type":"usermanual", - "p_code":"106", - "code":"124" + "p_code":"133", + "code":"143" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Flink Jar Jobs", + "uri":"dli_03_0132.html", + "doc_type":"usermanual", + "p_code":"123", + "code":"144" }, { "desc":"The procedure is as follows:Add the following code to the JAR file code of the Flink Jar job:// Configure the pom file on which the StreamExecutionEnvironment depends.\nSt", @@ -1121,8 +1301,8 @@ "title":"How Do I Configure Checkpoints for Flink Jar Jobs and Save the Checkpoints to OBS?", "uri":"dli_03_0038.html", "doc_type":"usermanual", - "p_code":"106", - "code":"125" + "p_code":"144", + "code":"145" }, { "desc":"Configuration files can be uploaded for user-defined jobs (JAR).Upload the configuration file to DLI through Package Management.In the Other Dependencies area of the Flin", @@ -1130,8 +1310,8 @@ "title":"Does a Flink JAR Job Support Configuration File Upload? How Do I Upload a Configuration File?", "uri":"dli_03_0044.html", "doc_type":"usermanual", - "p_code":"106", - "code":"126" + "p_code":"144", + "code":"146" }, { "desc":"The dependency of your Flink job conflicts with a built-in dependency of the DLI Flink platform. As a result, the job submission fails.Delete your JAR file that is the sa", @@ -1139,8 +1319,8 @@ "title":"Why Does the Submission Fail Due to Flink JAR File Conflict?", "uri":"dli_03_0119.html", "doc_type":"usermanual", - "p_code":"106", - "code":"127" + "p_code":"144", + "code":"147" }, { "desc":"When a Flink Jar job is submitted to access GaussDB(DWS), an error message is displayed indicating that the job fails to be started. The job log contains the following er", @@ -1148,8 +1328,8 @@ "title":"Why Does a Flink Jar Job Fail to Access GaussDB(DWS) and a Message Is Displayed Indicating Too Many Client Connections?", "uri":"dli_03_0161.html", "doc_type":"usermanual", - "p_code":"106", - "code":"128" + "p_code":"144", + "code":"148" }, { "desc":"An exception occurred when a Flink Jar job is running. The following error information is displayed in the job log:org.apache.flink.shaded.curator.org.apache.curator.Conn", @@ -1157,8 +1337,8 @@ "title":"Why Is Error Message \"Authentication failed\" Displayed During Flink Jar Job Running?", "uri":"dli_03_0165.html", "doc_type":"usermanual", - "p_code":"106", - "code":"129" + "p_code":"144", + "code":"149" }, { "desc":"The storage path of the Flink Jar job checkpoints was set to an OBS bucket. The job failed to be submitted, and an error message indicating an invalid OBS bucket name was", @@ -1166,8 +1346,8 @@ "title":"Why Is Error Invalid OBS Bucket Name Reported After a Flink Job Submission Failed?", "uri":"dli_03_0233.html", "doc_type":"usermanual", - "p_code":"106", - "code":"130" + "p_code":"144", + "code":"150" }, { "desc":"Flink Job submission failed. The exception information is as follows:Flink JAR files conflicted. The submitted Flink JAR file conflicted with the HDFS JAR file of the DLI", @@ -1175,8 +1355,8 @@ "title":"Why Does the Flink Submission Fail Due to Hadoop JAR File Conflict?", "uri":"dli_03_0234.html", "doc_type":"usermanual", - "p_code":"106", - "code":"131" + "p_code":"144", + "code":"151" }, { "desc":"You can use Flink Jar to connect to Kafka with SASL SSL authentication enabled.", @@ -1184,8 +1364,17 @@ "title":"How Do I Connect a Flink jar Job to SASL_SSL?", "uri":"dli_03_0266.html", "doc_type":"usermanual", - "p_code":"106", - "code":"132" + "p_code":"144", + "code":"152" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Performance Tuning", + "uri":"dli_03_0133.html", + "doc_type":"usermanual", + "p_code":"123", + "code":"153" }, { "desc":"Data Stacking in a Consumer GroupThe accumulated data of a consumer group can be calculated by the following formula: Total amount of data to be consumed by the consumer ", @@ -1193,8 +1382,8 @@ "title":"How Do I Optimize Performance of a Flink Job?", "uri":"dli_03_0106.html", "doc_type":"usermanual", - "p_code":"106", - "code":"133" + "p_code":"153", + "code":"154" }, { "desc":"Add the following SQL statements to the Flink job:", @@ -1202,8 +1391,8 @@ "title":"How Do I Write Data to Different Elasticsearch Clusters in a Flink Job?", "uri":"dli_03_0048.html", "doc_type":"usermanual", - "p_code":"106", - "code":"134" + "p_code":"153", + "code":"155" }, { "desc":"The DLI Flink checkpoint/savepoint mechanism is complete and reliable. You can use this mechanism to prevent data loss when a job is manually restarted or restarted due t", @@ -1211,8 +1400,17 @@ "title":"How Do I Prevent Data Loss After Flink Job Restart?", "uri":"dli_03_0096.html", "doc_type":"usermanual", - "p_code":"106", - "code":"135" + "p_code":"153", + "code":"156" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"O&M Guide", + "uri":"dli_03_0135.html", + "doc_type":"usermanual", + "p_code":"123", + "code":"157" }, { "desc":"On the Flink job management page, hover the cursor on the status of the job that fails to be submitted to view the brief information about the failure.The possible causes", @@ -1220,8 +1418,8 @@ "title":"How Do I Locate a Flink Job Submission Error?", "uri":"dli_03_0103.html", "doc_type":"usermanual", - "p_code":"106", - "code":"136" + "p_code":"157", + "code":"158" }, { "desc":"On the Flink job management, click Edit in the Operation column of the target job. On the displayed page, check whether Save Job Log in the Running Parameters tab is enab", @@ -1229,8 +1427,8 @@ "title":"How Do I Locate a Flink Job Running Error?", "uri":"dli_03_0105.html", "doc_type":"usermanual", - "p_code":"106", - "code":"137" + "p_code":"157", + "code":"159" }, { "desc":"Flink's checkpointing is a fault tolerance and recovery mechanism. This mechanism ensures that real-time programs can self-recover in case of exceptions or machine issues", @@ -1238,8 +1436,8 @@ "title":"How Can I Check if a Flink Job Can Be Restored From a Checkpoint After Restarting It?", "uri":"dli_03_0136.html", "doc_type":"usermanual", - "p_code":"106", - "code":"138" + "p_code":"157", + "code":"160" }, { "desc":"To rectify this fault, perform the following steps:Log in to the DIS management console. In the navigation pane, choose Stream Management. View the Flink job SQL statemen", @@ -1247,8 +1445,8 @@ "title":"Why Does DIS Stream Not Exist During Job Semantic Check?", "uri":"dli_03_0040.html", "doc_type":"usermanual", - "p_code":"106", - "code":"139" + "p_code":"157", + "code":"161" }, { "desc":"If the OBS bucket selected for a job is not authorized, perform the following steps:Select Enable Checkpointing or Save Job Log.Specify OBS Bucket.Select Authorize OBS.", @@ -1256,8 +1454,8 @@ "title":"Why Is the OBS Bucket Selected for Job Not Authorized?", "uri":"dli_03_0045.html", "doc_type":"usermanual", - "p_code":"106", - "code":"140" + "p_code":"157", + "code":"162" }, { "desc":"Mode for storing generated job logs when a DLI Flink job fails to be submitted or executed. The options are as follows:If the submission fails, a submission log is genera", @@ -1265,8 +1463,8 @@ "title":"Why Are Logs Not Written to the OBS Bucket After a DLI Flink Job Fails to Be Submitted for Running?", "uri":"dli_03_0064.html", "doc_type":"usermanual", - "p_code":"106", - "code":"141" + "p_code":"157", + "code":"163" }, { "desc":"The Flink/Spark UI was displayed with incomplete information.When the queue is used to run a job, the system releases the cluster and takes about 10 minutes to create a n", @@ -1274,8 +1472,8 @@ "title":"Why Is Information Displayed on the FlinkUI/Spark UI Page Incomplete?", "uri":"dli_03_0235.html", "doc_type":"usermanual", - "p_code":"106", - "code":"142" + "p_code":"157", + "code":"164" }, { "desc":"JobManager and TaskManager heartbeats timed out. As a result, the Flink job is abnormal.Check whether the network is intermittently disconnected and whether the cluster l", @@ -1283,8 +1481,8 @@ "title":"Why Is the Flink Job Abnormal Due to Heartbeat Timeout Between JobManager and TaskManager?", "uri":"dli_03_0236.html", "doc_type":"usermanual", - "p_code":"106", - "code":"143" + "p_code":"157", + "code":"165" }, { "desc":"Test address connectivity.If the network is unreachable, rectify the network connection first. Ensure that the network connection between the DLI queue and the external d", @@ -1292,8 +1490,8 @@ "title":"Why Is Error \"Timeout expired while fetching topic metadata\" Repeatedly Reported in Flink JobManager Logs?", "uri":"dli_03_0265.html", "doc_type":"usermanual", - "p_code":"106", - "code":"144" + "p_code":"157", + "code":"166" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1301,8 +1499,17 @@ "title":"Problems Related to SQL Jobs", "uri":"dli_03_0020.html", "doc_type":"usermanual", - "p_code":"105", - "code":"145" + "p_code":"122", + "code":"167" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0216.html", + "doc_type":"usermanual", + "p_code":"167", + "code":"168" }, { "desc":"A temporary table is used to store intermediate results. When a transaction or session ends, the data in the temporary table can be automatically deleted. For example, in", @@ -1310,8 +1517,17 @@ "title":"SQL Jobs", "uri":"dli_03_0200.html", "doc_type":"usermanual", - "p_code":"145", - "code":"146" + "p_code":"168", + "code":"169" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Job Development", + "uri":"dli_03_0204.html", + "doc_type":"usermanual", + "p_code":"167", + "code":"170" }, { "desc":"If a large number of small files are generated during SQL execution, job execution and table query will take a long time. In this case, you should merge small files.Set t", @@ -1319,8 +1535,8 @@ "title":"How Do I Merge Small Files?", "uri":"dli_03_0086.html", "doc_type":"usermanual", - "p_code":"145", - "code":"147" + "p_code":"170", + "code":"171" }, { "desc":"When creating an OBS table, you must specify a table path in the database. The path format is as follows: obs://xxx/database name/table name.If the specified path is akdc", @@ -1328,8 +1544,8 @@ "title":"How Do I Specify an OBS Path When Creating an OBS Table?", "uri":"dli_03_0092.html", "doc_type":"usermanual", - "p_code":"145", - "code":"148" + "p_code":"170", + "code":"172" }, { "desc":"DLI allows you to associate JSON data in an OBS bucket to create tables in asynchronous mode.The statement for creating the table is as follows:", @@ -1337,8 +1553,8 @@ "title":"How Do I Create a Table Using JSON Data in an OBS Bucket?", "uri":"dli_03_0108.html", "doc_type":"usermanual", - "p_code":"145", - "code":"149" + "p_code":"170", + "code":"173" }, { "desc":"You can use the where condition statement in the select statement to filter data. For example:", @@ -1346,8 +1562,8 @@ "title":"How Do I Set Local Variables in SQL Statements?", "uri":"dli_03_0087.html", "doc_type":"usermanual", - "p_code":"145", - "code":"150" + "p_code":"170", + "code":"174" }, { "desc":"The correct method for using the count function to perform aggregation is as follows:OrIf an incorrect method is used, an error will be reported.", @@ -1355,8 +1571,8 @@ "title":"How Can I Use the count Function to Perform Aggregation?", "uri":"dli_03_0069.html", "doc_type":"usermanual", - "p_code":"145", - "code":"151" + "p_code":"170", + "code":"175" }, { "desc":"You can use the cross-region replication function of OBS. The procedure is as follows:Export the DLI table data in region 1 to the user-defined OBS bucket.Use the OBS cro", @@ -1364,8 +1580,8 @@ "title":"How Do I Synchronize DLI Table Data from One Region to Another?", "uri":"dli_03_0072.html", "doc_type":"usermanual", - "p_code":"145", - "code":"152" + "p_code":"170", + "code":"176" }, { "desc":"Currently, DLI does not allow you to insert table data into specific fields. To insert table data, you must insert data of all table fields at a time.", @@ -1373,8 +1589,17 @@ "title":"How Do I Insert Table Data into Specific Fields of a Table Using a SQL Job?", "uri":"dli_03_0191.html", "doc_type":"usermanual", - "p_code":"145", - "code":"153" + "p_code":"170", + "code":"177" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Job O&M Errors", + "uri":"dli_03_0206.html", + "doc_type":"usermanual", + "p_code":"167", + "code":"178" }, { "desc":"Create an OBS directory with a unique name. Alternatively, you can manually delete the existing OBS directory and submit the job again. However, exercise caution when del", @@ -1382,8 +1607,8 @@ "title":"Why Is Error \"path obs://xxx already exists\" Reported When Data Is Exported to OBS?", "uri":"dli_03_0014.html", "doc_type":"usermanual", - "p_code":"145", - "code":"154" + "p_code":"178", + "code":"179" }, { "desc":"This message indicates that the two tables to be joined contain the same column, but the owner of the column is not specified when the command is executed.For example, ta", @@ -1391,8 +1616,8 @@ "title":"Why Is Error \"SQL_ANALYSIS_ERROR: Reference 't.id' is ambiguous, could be: t.id, t.id.;\" Displayed When Two Tables Are Joined?", "uri":"dli_03_0066.html", "doc_type":"usermanual", - "p_code":"145", - "code":"155" + "p_code":"178", + "code":"180" }, { "desc":"Check if your account is in arrears and top it up if necessary.If the same error message persists after the top-up, log out of your account and log back in.", @@ -1400,8 +1625,8 @@ "title":"Why Is Error \"The current account does not have permission to perform this operation,the current account was restricted. Restricted for no budget.\" Reported when a SQL Statement Is Executed?", "uri":"dli_03_0071.html", "doc_type":"usermanual", - "p_code":"145", - "code":"156" + "p_code":"178", + "code":"181" }, { "desc":"Cause AnalysisWhen you query the partitioned table XX.YYY, the partition column is not specified in the search criteria.A partitioned table can be queried only when the q", @@ -1409,8 +1634,8 @@ "title":"Why Is Error \"There should be at least one partition pruning predicate on partitioned table XX.YYY\" Reported When a Query Statement Is Executed?", "uri":"dli_03_0145.html", "doc_type":"usermanual", - "p_code":"145", - "code":"157" + "p_code":"178", + "code":"182" }, { "desc":"The following error message is displayed when the LOAD DATA command is executed by a Spark SQL job to import data to a DLI table:In some cases ,the following error messag", @@ -1418,8 +1643,8 @@ "title":"Why Is Error \"IllegalArgumentException: Buffer size too small. size\" Reported When Data Is Loaded to an OBS Foreign Table?", "uri":"dli_03_0169.html", "doc_type":"usermanual", - "p_code":"145", - "code":"158" + "p_code":"178", + "code":"183" }, { "desc":"An error is reported during SQL job execution:Please contact DLI service. DLI.0002: FileNotFoundException: getFileStatus on obs://xxx: status [404]Check whether there is ", @@ -1427,8 +1652,8 @@ "title":"Why Is Error \"DLI.0002 FileNotFoundException\" Reported During SQL Job Running?", "uri":"dli_03_0189.html", "doc_type":"usermanual", - "p_code":"145", - "code":"159" + "p_code":"178", + "code":"184" }, { "desc":"Currently, DLI supports the Hive syntax for creating tables of the TEXTFILE, SEQUENCEFILE, RCFILE, ORC, AVRO, and PARQUET file types. If the file format specified for cre", @@ -1436,8 +1661,8 @@ "title":"Why Is a Schema Parsing Error Reported When I Create a Hive Table Using CTAS?", "uri":"dli_03_0046.html", "doc_type":"usermanual", - "p_code":"145", - "code":"160" + "p_code":"178", + "code":"185" }, { "desc":"When you run a DLI SQL script on DataArts Studio, the log shows that the statements fail to be executed. The error information is as follows:DLI.0999: RuntimeException: o", @@ -1445,8 +1670,8 @@ "title":"Why Is Error \"org.apache.hadoop.fs.obs.OBSIOException\" Reported When I Run DLI SQL Scripts on DataArts Studio?", "uri":"dli_03_0173.html", "doc_type":"usermanual", - "p_code":"145", - "code":"161" + "p_code":"178", + "code":"186" }, { "desc":"After the migration job is submitted, the following error information is displayed in the log:org.apache.sqoop.common.SqoopException:UQUERY_CONNECTOR_0001:Invoke DLI serv", @@ -1454,8 +1679,8 @@ "title":"Why Is Error \"UQUERY_CONNECTOR_0001:Invoke DLI service api failed\" Reported in the Job Log When I Use CDM to Migrate Data to DLI?", "uri":"dli_03_0172.html", "doc_type":"usermanual", - "p_code":"145", - "code":"162" + "p_code":"178", + "code":"187" }, { "desc":"Error message \"File not Found\" is displayed when a SQL job is accessed.Generally, the file cannot be found due to a read/write conflict. Check whether a job is overwritin", @@ -1463,8 +1688,8 @@ "title":"Why Is Error \"File not Found\" Reported When I Access a SQL Job?", "uri":"dli_03_0207.html", "doc_type":"usermanual", - "p_code":"145", - "code":"163" + "p_code":"178", + "code":"188" }, { "desc":"Error message \"DLI.0003: AccessControlException XXX\" is reported when a SQL job is accessed.Check the OBS bucket written in the AccessControlException to confirm if your ", @@ -1472,8 +1697,8 @@ "title":"Why Is Error \"DLI.0003: AccessControlException XXX\" Reported When I Access a SQL Job?", "uri":"dli_03_0208.html", "doc_type":"usermanual", - "p_code":"145", - "code":"164" + "p_code":"178", + "code":"189" }, { "desc":"Error message \"DLI.0001: org.apache.hadoop.security.AccessControlException: verifyBucketExists on {{bucket name}}: status [403]\" is reported when a SQL job is Accessed.Yo", @@ -1481,8 +1706,8 @@ "title":"Why Is Error \"DLI.0001: org.apache.hadoop.security.AccessControlException: verifyBucketExists on {{bucket name}}: status [403]\" Reported When I Access a SQL Job?", "uri":"dli_03_0209.html", "doc_type":"usermanual", - "p_code":"145", - "code":"165" + "p_code":"178", + "code":"190" }, { "desc":"Error message \"The current account does not have permission to perform this operation,the current account was restricted.\" is reported during SQL statement execution.Chec", @@ -1490,8 +1715,17 @@ "title":"Why Is Error \"The current account does not have permission to perform this operation,the current account was restricted. Restricted for no budget\" Reported During SQL Statement Execution? Restricted for no budget.", "uri":"dli_03_0210.html", "doc_type":"usermanual", - "p_code":"145", - "code":"166" + "p_code":"178", + "code":"191" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"O&M Guide", + "uri":"dli_03_0211.html", + "doc_type":"usermanual", + "p_code":"167", + "code":"192" }, { "desc":"If the job runs slowly, perform the following steps to find the causes and rectify the fault:Check whether the problem is caused by FullGC.Log in to the DLI console. In t", @@ -1499,8 +1733,8 @@ "title":"How Do I Troubleshoot Slow SQL Jobs?", "uri":"dli_03_0196.html", "doc_type":"usermanual", - "p_code":"145", - "code":"167" + "p_code":"192", + "code":"193" }, { "desc":"You can view SQL job logs for routine O&M.Obtain the ID of the DLI job executed on the DataArts Studio console.Job IDOn the DLI console, choose Job Management > SQL Jobs.", @@ -1508,8 +1742,8 @@ "title":"How Do I View DLI SQL Logs?", "uri":"dli_03_0091.html", "doc_type":"usermanual", - "p_code":"145", - "code":"168" + "p_code":"192", + "code":"194" }, { "desc":"You can view the job execution records when a job is running.Log in to the DLI management console.In the navigation pane on the left, choose Job Management > SQL Jobs.Ent", @@ -1517,8 +1751,8 @@ "title":"How Do I View SQL Execution Records?", "uri":"dli_03_0116.html", "doc_type":"usermanual", - "p_code":"145", - "code":"169" + "p_code":"192", + "code":"195" }, { "desc":"If the execution of an SQL statement takes a long time, you need to access the Spark UI to check the execution status.If data skew occurs, the running time of a stage exc", @@ -1526,8 +1760,8 @@ "title":"How Do I Eliminate Data Skew by Configuring AE Parameters?", "uri":"dli_03_0093.html", "doc_type":"usermanual", - "p_code":"145", - "code":"170" + "p_code":"192", + "code":"196" }, { "desc":"A DLI table exists but cannot be queried on the DLI console.If a table exists but cannot be queried, there is a high probability that the current user does not have the p", @@ -1535,8 +1769,8 @@ "title":"What Can I Do If a Table Cannot Be Queried on the DLI Console?", "uri":"dli_03_0184.html", "doc_type":"usermanual", - "p_code":"145", - "code":"171" + "p_code":"192", + "code":"197" }, { "desc":"A high compression ratio of OBS tables in the Parquet or ORC format (for example, a compression ratio of 5 or higher compared with text compression) will lead to large da", @@ -1544,8 +1778,8 @@ "title":"The Compression Ratio of OBS Tables Is Too High", "uri":"dli_03_0013.html", "doc_type":"usermanual", - "p_code":"145", - "code":"172" + "p_code":"192", + "code":"198" }, { "desc":"DLI supports only UTF-8-encoded texts. Ensure that data is encoded using UTF-8 during table creation and import.", @@ -1553,8 +1787,8 @@ "title":"How Can I Avoid Garbled Characters Caused by Inconsistent Character Codes?", "uri":"dli_03_0009.html", "doc_type":"usermanual", - "p_code":"145", - "code":"173" + "p_code":"192", + "code":"199" }, { "desc":"User A created the testTable table in a database through a SQL job and granted user B the permission to insert and delete table data. User A deleted the testTable table a", @@ -1562,8 +1796,8 @@ "title":"Do I Need to Grant Table Permissions to a User and Project After I Delete a Table and Create One with the Same Name?", "uri":"dli_03_0175.html", "doc_type":"usermanual", - "p_code":"145", - "code":"174" + "p_code":"192", + "code":"200" }, { "desc":"A CSV file is imported to a DLI partitioned table, but the imported file data does not contain the data in the partitioning column. The partitioning column needs to be sp", @@ -1571,8 +1805,8 @@ "title":"Why Can't I Query Table Data After Data Is Imported to a DLI Partitioned Table Because the File to Be Imported Does Not Contain Data in the Partitioning Column?", "uri":"dli_03_0177.html", "doc_type":"usermanual", - "p_code":"145", - "code":"175" + "p_code":"192", + "code":"201" }, { "desc":"When an OBS foreign table is created, a field in the specified OBS file contains a carriage return line feed (CRLF) character. As a result, the data is incorrect.The stat", @@ -1580,8 +1814,8 @@ "title":"How Do I Fix the Data Error Caused by CRLF Characters in a Field of the OBS File Used to Create an External OBS Table?", "uri":"dli_03_0181.html", "doc_type":"usermanual", - "p_code":"145", - "code":"176" + "p_code":"192", + "code":"202" }, { "desc":"A SQL job contains join operations. After the job is submitted, it is stuck in the Running state and no result is returned.When a Spark SQL job has join operations on sma", @@ -1589,8 +1823,8 @@ "title":"Why Does a SQL Job That Has Join Operations Stay in the Running State?", "uri":"dli_03_0182.html", "doc_type":"usermanual", - "p_code":"145", - "code":"177" + "p_code":"192", + "code":"203" }, { "desc":"The on clause was not added to the SQL statement for joining tables. As a result, the Cartesian product query occurs due to multi-table association, and the queue resourc", @@ -1598,8 +1832,8 @@ "title":"The on Clause Is Not Added When Tables Are Joined. Cartesian Product Query Causes High Resource Usage of the Queue, and the Job Fails to Be Executed", "uri":"dli_03_0187.html", "doc_type":"usermanual", - "p_code":"145", - "code":"178" + "p_code":"192", + "code":"204" }, { "desc":"Partition data is manually uploaded to a partition of an OBS table. However, the data cannot be queried using DLI SQL editor.After manually adding partition data, you nee", @@ -1607,8 +1841,8 @@ "title":"Why Can't I Query Data After I Manually Add Data to the Partition Directory of an OBS Table?", "uri":"dli_03_0190.html", "doc_type":"usermanual", - "p_code":"145", - "code":"179" + "p_code":"192", + "code":"205" }, { "desc":"To dynamically overwrite the specified partitioned data in the DataSource table, set dli.sql.dynamicPartitionOverwrite.enabled to true and then run the insert overwrite s", @@ -1616,8 +1850,8 @@ "title":"Why Is All Data Overwritten When insert overwrite Is Used to Overwrite Partitioned Table?", "uri":"dli_03_0212.html", "doc_type":"usermanual", - "p_code":"145", - "code":"180" + "p_code":"192", + "code":"206" }, { "desc":"The possible causes and solutions are as follows:After you purchase a DLI queue and submit a SQL job for the first time, wait for 5 to 10 minutes. After the cluster is st", @@ -1625,8 +1859,8 @@ "title":"Why Is a SQL Job Stuck in the Submitting State?", "uri":"dli_03_0213.html", "doc_type":"usermanual", - "p_code":"145", - "code":"181" + "p_code":"192", + "code":"207" }, { "desc":"Spark does not have the datetime type and uses the TIMESTAMP type instead.You can use a function to convert data types.The following is an example.select cast(create_date", @@ -1634,8 +1868,8 @@ "title":"Why Is the create_date Field in the RDS Table Is a Timestamp in the DLI query result?", "uri":"dli_03_0214.html", "doc_type":"usermanual", - "p_code":"145", - "code":"182" + "p_code":"192", + "code":"208" }, { "desc":"If the table name is changed immediately after SQL statements are executed, the data size of the table may be incorrect.If you need to change the table name, change it 5 ", @@ -1643,8 +1877,8 @@ "title":"What Can I Do If datasize Cannot Be Changed After the Table Name Is Changed in a Finished SQL Job?", "uri":"dli_03_0215.html", "doc_type":"usermanual", - "p_code":"145", - "code":"183" + "p_code":"192", + "code":"209" }, { "desc":"When DLI is used to insert data into an OBS temporary table, only part of data is imported.Possible causes are as follows:The amount of data read during job execution is ", @@ -1652,8 +1886,8 @@ "title":"Why Is the Data Volume Changes When Data Is Imported from DLI to OBS?", "uri":"dli_03_0231.html", "doc_type":"usermanual", - "p_code":"145", - "code":"184" + "p_code":"192", + "code":"210" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1661,8 +1895,17 @@ "title":"Problems Related to Spark Jobs", "uri":"dli_03_0021.html", "doc_type":"usermanual", - "p_code":"105", - "code":"185" + "p_code":"122", + "code":"211" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0163.html", + "doc_type":"usermanual", + "p_code":"211", + "code":"212" }, { "desc":"DLI Spark does not support job scheduling. You can use other services, such as DataArts Studio, or use APIs or SDKs to customize job schedule.The Spark SQL syntax does no", @@ -1670,8 +1913,17 @@ "title":"Spark Jobs", "uri":"dli_03_0201.html", "doc_type":"usermanual", - "p_code":"185", - "code":"186" + "p_code":"212", + "code":"213" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Job Development", + "uri":"dli_03_0217.html", + "doc_type":"usermanual", + "p_code":"211", + "code":"214" }, { "desc":"To use Spark to write data into a DLI table, configure the following parameters:fs.obs.access.keyfs.obs.secret.keyfs.obs.implfs.obs.endpointThe following is an example:", @@ -1679,8 +1931,8 @@ "title":"How Do I Use Spark to Write Data into a DLI Table?", "uri":"dli_03_0107.html", "doc_type":"usermanual", - "p_code":"185", - "code":"187" + "p_code":"214", + "code":"215" }, { "desc":"Hard-coded or plaintext AK and SK pose significant security risks. To ensure security, encrypt your AK and SK, store them in configuration files or environment variables,", @@ -1688,8 +1940,8 @@ "title":"How Do I Set the AK/SK for a Queue to Operate an OBS Table?", "uri":"dli_03_0017.html", "doc_type":"usermanual", - "p_code":"185", - "code":"188" + "p_code":"214", + "code":"216" }, { "desc":"Log in to the DLI console. In the navigation pane, choose Job Management > Spark Jobs. In the job list, locate the target job and click next to Job ID to view the parame", @@ -1697,8 +1949,8 @@ "title":"How Do I View the Resource Usage of DLI Spark Jobs?", "uri":"dli_03_0102.html", "doc_type":"usermanual", - "p_code":"185", - "code":"189" + "p_code":"214", + "code":"217" }, { "desc":"If the pymysql module is missing, check whether the corresponding EGG package exists. If the package does not exist, upload the pyFile package on the Package Management p", @@ -1706,8 +1958,8 @@ "title":"How Do I Use Python Scripts to Access the MySQL Database If the pymysql Module Is Missing from the Spark Job Results Stored in MySQL?", "uri":"dli_03_0076.html", "doc_type":"usermanual", - "p_code":"185", - "code":"190" + "p_code":"214", + "code":"218" }, { "desc":"DLI natively supports PySpark.For most cases, Python is preferred for data analysis, and PySpark is the best choice for big data analysis. Generally, JVM programs are pac", @@ -1715,8 +1967,8 @@ "title":"How Do I Run a Complex PySpark Program in DLI?", "uri":"dli_03_0082.html", "doc_type":"usermanual", - "p_code":"185", - "code":"191" + "p_code":"214", + "code":"219" }, { "desc":"You can use DLI Spark jobs to access data in the MySQL database using either of the following methods:Solution 1: Buy a queue, create an enhanced datasource connection, a", @@ -1724,8 +1976,8 @@ "title":"How Does a Spark Job Access a MySQL Database?", "uri":"dli_03_0127.html", "doc_type":"usermanual", - "p_code":"185", - "code":"192" + "p_code":"214", + "code":"220" }, { "desc":"When shuffle statements, such as GROUP BY and JOIN, are executed in Spark jobs, data skew occurs, which slows down the job execution.To solve this problem, you can config", @@ -1733,8 +1985,8 @@ "title":"How Do I Use JDBC to Set the spark.sql.shuffle.partitions Parameter to Improve the Task Concurrency?", "uri":"dli_03_0068.html", "doc_type":"usermanual", - "p_code":"185", - "code":"193" + "p_code":"214", + "code":"221" }, { "desc":"You can use SparkFiles to read the file submitted using –-file form a local path: SparkFiles.get(\"Name of the uploaded file\").The file path in the Driver is different fro", @@ -1742,8 +1994,17 @@ "title":"How Do I Read Uploaded Files for a Spark Jar Job?", "uri":"dli_03_0118.html", "doc_type":"usermanual", - "p_code":"185", - "code":"194" + "p_code":"214", + "code":"222" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Job O&M Errors", + "uri":"dli_03_0218.html", + "doc_type":"usermanual", + "p_code":"211", + "code":"223" }, { "desc":"The following error is reported when a Spark job accesses OBS data:Set the AK/SK to enable Spark jobs to access OBS data. For details, see How Do I Set the AK/SK for a Qu", @@ -1751,8 +2012,8 @@ "title":"Why Are Errors \"ResponseCode: 403\" and \"ResponseStatus: Forbidden\" Reported When a Spark Job Accesses OBS Data?", "uri":"dli_03_0156.html", "doc_type":"usermanual", - "p_code":"185", - "code":"195" + "p_code":"223", + "code":"224" }, { "desc":"Check whether the OBS bucket is used to store DLI logs on the Global Configuration > Job Configurations page. The job log bucket cannot be used for other purpose.", @@ -1760,8 +2021,8 @@ "title":"Why Is Error \"verifyBucketExists on XXXX: status [403]\" Reported When I Use a Spark Job to Access an OBS Bucket That I Have Access Permission?", "uri":"dli_03_0164.html", "doc_type":"usermanual", - "p_code":"185", - "code":"196" + "p_code":"223", + "code":"225" }, { "desc":"When a Spark job accesses a large amount of data, for example, accessing data in a GaussDB(DWS) database, you are advised to set the number of concurrent tasks and enable", @@ -1769,8 +2030,8 @@ "title":"Why Is a Job Running Timeout Reported When a Spark Job Runs a Large Amount of Data?", "uri":"dli_03_0157.html", "doc_type":"usermanual", - "p_code":"185", - "code":"197" + "p_code":"223", + "code":"226" }, { "desc":"Spark jobs cannot access SFTP. Upload the files you want to access to OBS and then you can analyze the data using Spark jobs.", @@ -1778,8 +2039,8 @@ "title":"Why Does the Job Fail to Be Executed and the Log Shows that the File Directory Is Abnormal When I Use a Spark Job to Access Files in SFTP?", "uri":"dli_03_0188.html", "doc_type":"usermanual", - "p_code":"185", - "code":"198" + "p_code":"223", + "code":"227" }, { "desc":"When a Spark job is running, an error message is displayed, indicating that the user does not have the database permission. The error information is as follows:org.apache", @@ -1787,8 +2048,17 @@ "title":"Why Does the Job Fail to Be Executed Due to Insufficient Database and Table Permissions?", "uri":"dli_03_0192.html", "doc_type":"usermanual", - "p_code":"185", - "code":"199" + "p_code":"223", + "code":"228" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"O&M Guide", + "uri":"dli_03_0219.html", + "doc_type":"usermanual", + "p_code":"211", + "code":"229" }, { "desc":"I cannot find the specified Python environment after adding the Python 3 package.Set spark.yarn.appMasterEnv.PYSPARK_PYTHON to python3 in the conf file to specify the Pyt", @@ -1796,8 +2066,8 @@ "title":"Why Can't I Find the Specified Python Environment After Adding the Python Package?", "uri":"dli_03_0077.html", "doc_type":"usermanual", - "p_code":"185", - "code":"200" + "p_code":"229", + "code":"230" }, { "desc":"The remaining CUs in the queue may be insufficient. As a result, the job cannot be submitted.To view the remaining CUs of a queue, perform the following steps:Check the C", @@ -1805,8 +2075,8 @@ "title":"Why Is a Spark Jar Job Stuck in the Submitting State?", "uri":"dli_03_0220.html", "doc_type":"usermanual", - "p_code":"185", - "code":"201" + "p_code":"229", + "code":"231" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1814,8 +2084,17 @@ "title":"Product Consultation", "uri":"dli_03_0001.html", "doc_type":"usermanual", - "p_code":"105", - "code":"202" + "p_code":"122", + "code":"232" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0221.html", + "doc_type":"usermanual", + "p_code":"232", + "code":"233" }, { "desc":"Data Lake Insight (DLI) is a serverless data processing and analysis service fully compatible with Apache Spark and Apache Flink ecosystems. It frees you from managing an", @@ -1823,8 +2102,8 @@ "title":"What Is DLI?", "uri":"dli_03_0002.html", "doc_type":"usermanual", - "p_code":"202", - "code":"203" + "p_code":"233", + "code":"234" }, { "desc":"DLI supports the following data formats:ParquetCSVORCJsonAvro", @@ -1832,8 +2111,8 @@ "title":"Which Data Formats Does DLI Support?", "uri":"dli_03_0025.html", "doc_type":"usermanual", - "p_code":"202", - "code":"204" + "p_code":"233", + "code":"235" }, { "desc":"The Spark component of DLI is a fully managed service. You can only use the DLI Spark through its APIs. .The Spark component of MRS is built on the VM in an MRS cluster. ", @@ -1841,8 +2120,8 @@ "title":"What Are the Differences Between MRS Spark and DLI Spark?", "uri":"dli_03_0115.html", "doc_type":"usermanual", - "p_code":"202", - "code":"205" + "p_code":"233", + "code":"236" }, { "desc":"DLI data can be stored in either of the following:OBS: Data used by SQL jobs, Spark jobs, and Flink jobs can be stored in OBS, reducing storage costs.DLI: The column-base", @@ -1850,8 +2129,8 @@ "title":"Where Can DLI Data Be Stored?", "uri":"dli_03_0029.html", "doc_type":"usermanual", - "p_code":"202", - "code":"206" + "p_code":"233", + "code":"237" }, { "desc":"DLI tables store data within the DLI service, and you do not need to know the data storage path.OBS tables store data in your OBS buckets, and you need to manage the sour", @@ -1859,8 +2138,8 @@ "title":"What Are the Differences Between DLI Tables and OBS Tables?", "uri":"dli_03_0117.html", "doc_type":"usermanual", - "p_code":"202", - "code":"207" + "p_code":"233", + "code":"238" }, { "desc":"Currently, DLI supports analysis only on the data uploaded to the cloud. In scenarios where regular (for example, on a per day basis) one-off analysis on incremental data", @@ -1868,8 +2147,8 @@ "title":"How Can I Use DLI If Data Is Not Uploaded to OBS?", "uri":"dli_03_0010.html", "doc_type":"usermanual", - "p_code":"202", - "code":"208" + "p_code":"233", + "code":"239" }, { "desc":"Data in the OBS bucket shared by IAM users under the same account can be imported. You cannot import data in the OBS bucket shared with other IAM account.", @@ -1877,8 +2156,8 @@ "title":"Can I Import OBS Bucket Data Shared by Other Tenants into DLI?", "uri":"dli_03_0129.html", "doc_type":"usermanual", - "p_code":"202", - "code":"209" + "p_code":"233", + "code":"240" }, { "desc":"Log in to the management console.Click in the upper left corner and select a region and a project.Click the My Quota icon in the upper right corner of the page.The Serv", @@ -1886,8 +2165,8 @@ "title":"Why Is Error \"Failed to create the database. {\"error_code\":\"DLI.1028\";\"error_msg\":\"Already reached the maximum quota of databases:XXX\".\" Reported?", "uri":"dli_03_0264.html", "doc_type":"usermanual", - "p_code":"202", - "code":"210" + "p_code":"233", + "code":"241" }, { "desc":"No, a global variable can only be used by the user who created it. Global variables can be used to simplify complex parameters. For example, long and difficult variables ", @@ -1895,8 +2174,17 @@ "title":"Can a Member Account Use Global Variables Created by Other Member Accounts?", "uri":"dli_03_0263.html", "doc_type":"usermanual", - "p_code":"202", - "code":"211" + "p_code":"233", + "code":"242" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Job Management", + "uri":"dli_03_0222.html", + "doc_type":"usermanual", + "p_code":"232", + "code":"243" }, { "desc":"If you are suggested to perform following operations to run a large number of DLI jobs:Group the DLI jobs by type, and run each group on a queue.Alternatively, create IAM", @@ -1904,8 +2192,8 @@ "title":"How Do I Manage Tens of Thousands of Jobs Running on DLI?", "uri":"dli_03_0126.html", "doc_type":"usermanual", - "p_code":"202", - "code":"212" + "p_code":"243", + "code":"244" }, { "desc":"The field names of tables that have been created cannot be changed.You can create a table, define new table fields, and migrate data from the old table to the new one.", @@ -1913,8 +2201,17 @@ "title":"How Do I Change the Name of a Field in a Created Table?", "uri":"dli_03_0162.html", "doc_type":"usermanual", - "p_code":"202", - "code":"213" + "p_code":"243", + "code":"245" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Privacy and Security", + "uri":"dli_03_0261.html", + "doc_type":"usermanual", + "p_code":"232", + "code":"246" }, { "desc":"No. The spark.acls.enable configuration item is not used in DLI. The Apache Spark command injection vulnerability (CVE-2022-33891) does not exist in DLI.", @@ -1922,8 +2219,8 @@ "title":"Does DLI Have the Apache Spark Command Injection Vulnerability (CVE-2022-33891)?", "uri":"dli_03_0260.html", "doc_type":"usermanual", - "p_code":"202", - "code":"214" + "p_code":"246", + "code":"247" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1931,8 +2228,8 @@ "title":"Quota", "uri":"dli_03_0053.html", "doc_type":"usermanual", - "p_code":"105", - "code":"215" + "p_code":"122", + "code":"248" }, { "desc":"Log in to the management console.Click in the upper left corner and select Region and Project.Click (the My Quotas icon) in the upper right corner.The Service Quota pag", @@ -1940,8 +2237,8 @@ "title":"How Do I View My Quotas?", "uri":"dli_03_0031.html", "doc_type":"usermanual", - "p_code":"215", - "code":"216" + "p_code":"248", + "code":"249" }, { "desc":"The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your applica", @@ -1949,8 +2246,8 @@ "title":"How Do I Increase a Quota?", "uri":"dli_03_0032.html", "doc_type":"usermanual", - "p_code":"215", - "code":"217" + "p_code":"248", + "code":"250" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1958,8 +2255,17 @@ "title":"Permission", "uri":"dli_03_0054.html", "doc_type":"usermanual", - "p_code":"105", - "code":"218" + "p_code":"122", + "code":"251" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0223.html", + "doc_type":"usermanual", + "p_code":"251", + "code":"252" }, { "desc":"DLI has a comprehensive permission control mechanism and supports fine-grained authentication through Identity and Access Management (IAM). You can create policies in IAM", @@ -1967,8 +2273,8 @@ "title":"How Do I Manage Fine-Grained DLI Permissions?", "uri":"dli_03_0100.html", "doc_type":"usermanual", - "p_code":"218", - "code":"219" + "p_code":"252", + "code":"253" }, { "desc":"You cannot perform permission-related operations on the partition column of a partitioned table.However, when you grant the permission of any non-partition column in a pa", @@ -1976,8 +2282,17 @@ "title":"What Is Column Permission Granting of a DLI Partition Table?", "uri":"dli_03_0008.html", "doc_type":"usermanual", - "p_code":"218", - "code":"220" + "p_code":"252", + "code":"254" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"O&M Guide", + "uri":"dli_03_0226.html", + "doc_type":"usermanual", + "p_code":"251", + "code":"255" }, { "desc":"When you submit a job, a message is displayed indicating that the job fails to be submitted due to insufficient permission caused by arrears. In this case, you need to ch", @@ -1985,8 +2300,8 @@ "title":"Why Does My Account Have Insufficient Permissions Due to Arrears?", "uri":"dli_03_0140.html", "doc_type":"usermanual", - "p_code":"218", - "code":"221" + "p_code":"255", + "code":"256" }, { "desc":"When the user update an existing program package, the following error information is displayed:\"error_code\"*DLI.0003\",\"error_msg\":\"Permission denied for resource 'resourc", @@ -1994,8 +2309,8 @@ "title":"Why Does the System Display a Message Indicating Insufficient Permissions When I Update a Program Package?", "uri":"dli_03_0195.html", "doc_type":"usermanual", - "p_code":"218", - "code":"222" + "p_code":"255", + "code":"257" }, { "desc":"When the SQL query statement is executed, the system displays a message indicating that the user does not have the permission to query resources.Error information: DLI.00", @@ -2003,8 +2318,8 @@ "title":"Why Is Error \"DLI.0003: Permission denied for resource...\" Reported When I Run a SQL Statement?", "uri":"dli_03_0227.html", "doc_type":"usermanual", - "p_code":"218", - "code":"223" + "p_code":"255", + "code":"258" }, { "desc":"The table permission has been granted and verified. However, after a period of time, an error is reported indicating that the table query fails.There are two possible rea", @@ -2012,8 +2327,8 @@ "title":"Why Can't I Query Table Data After I've Been Granted Table Permissions?", "uri":"dli_03_0228.html", "doc_type":"usermanual", - "p_code":"218", - "code":"224" + "p_code":"255", + "code":"259" }, { "desc":"If a table inherits database permissions, you do not need to regrant the inherited permissions to the table.When you grant permissions on a table on the console:If you se", @@ -2021,8 +2336,8 @@ "title":"Will an Error Be Reported if the Inherited Permissions Are Regranted to a Table That Inherits Database Permissions?", "uri":"dli_03_0057.html", "doc_type":"usermanual", - "p_code":"218", - "code":"225" + "p_code":"255", + "code":"260" }, { "desc":"User A created Table1.User B created View1 based on Table1.After the Select Table permission on Table1 is granted to user C, user C fails to query View1.User B does not h", @@ -2030,8 +2345,8 @@ "title":"Why Can't I Query a View After I'm Granted the Select Table Permission on the View?", "uri":"dli_03_0067.html", "doc_type":"usermanual", - "p_code":"218", - "code":"226" + "p_code":"255", + "code":"261" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2039,8 +2354,17 @@ "title":"Queue", "uri":"dli_03_0049.html", "doc_type":"usermanual", - "p_code":"105", - "code":"227" + "p_code":"122", + "code":"262" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Usage", + "uri":"dli_03_0229.html", + "doc_type":"usermanual", + "p_code":"262", + "code":"263" }, { "desc":"Currently, you are not allowed to modify the description of a created queue. You can add the description when purchasing the queue.", @@ -2048,8 +2372,8 @@ "title":"Does the Description of a DLI Queue Can Be Modified?", "uri":"dli_03_0109.html", "doc_type":"usermanual", - "p_code":"227", - "code":"228" + "p_code":"263", + "code":"264" }, { "desc":"Deleting a queue does not cause table data loss in your database.", @@ -2057,8 +2381,8 @@ "title":"Will Table Data in My Database Be Lost If I Delete a Queue?", "uri":"dli_03_0166.html", "doc_type":"usermanual", - "p_code":"227", - "code":"229" + "p_code":"263", + "code":"265" }, { "desc":"You need to develop a mechanism to retry failed jobs. When a faulty queue is recovered, your application tries to submit the failed jobs to the queue again.", @@ -2066,8 +2390,8 @@ "title":"How Does DLI Ensure the Reliability of Spark Jobs When a Queue Is Abnormal?", "uri":"dli_03_0170.html", "doc_type":"usermanual", - "p_code":"227", - "code":"230" + "p_code":"263", + "code":"266" }, { "desc":"DLI allows you to subscribe to an SMN topic for failed jobs.Log in to the DLI console.In the navigation pane on the left, choose Queue Management.On the Queue Management ", @@ -2075,8 +2399,17 @@ "title":"How Do I Monitor Queue Exceptions?", "uri":"dli_03_0098.html", "doc_type":"usermanual", - "p_code":"227", - "code":"231" + "p_code":"263", + "code":"267" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"O&M Guide", + "uri":"dli_03_0230.html", + "doc_type":"usermanual", + "p_code":"262", + "code":"268" }, { "desc":"To check the running status of the DLI queue and determine whether to run more jobs on that queue, you need to check the queue load.Search for Cloud Eye on the console.In", @@ -2084,8 +2417,8 @@ "title":"How Do I View DLI Queue Load?", "uri":"dli_03_0095.html", "doc_type":"usermanual", - "p_code":"227", - "code":"232" + "p_code":"268", + "code":"269" }, { "desc":"You need to check the large number of jobs in the Submitting and Running states on the queue.Use Cloud Eye to view jobs in different states on the queue. The procedure is", @@ -2093,8 +2426,8 @@ "title":"How Do I Determine Whether There Are Too Many Jobs in the Current Queue?", "uri":"dli_03_0183.html", "doc_type":"usermanual", - "p_code":"227", - "code":"233" + "p_code":"268", + "code":"270" }, { "desc":"Currently, DLI provides two types of queues, For SQL and For general use. SQL queues are used to run SQL jobs. General-use queues are compatible with Spark queues of earl", @@ -2102,8 +2435,8 @@ "title":"How Do I Switch an Earlier-Version Spark Queue to a General-Purpose Queue?", "uri":"dli_03_0065.html", "doc_type":"usermanual", - "p_code":"227", - "code":"234" + "p_code":"268", + "code":"271" }, { "desc":"DLI queues do not use resources or bandwidth when no job is running. In this case, the running status of DLI queues is not displayed on CES.", @@ -2111,8 +2444,8 @@ "title":"Why Cannot I View the Resource Running Status of DLI Queues on Cloud Eye?", "uri":"dli_03_0193.html", "doc_type":"usermanual", - "p_code":"227", - "code":"235" + "p_code":"268", + "code":"272" }, { "desc":"In DLI, 64 CU = 64 cores and 256 GB memory.In a Spark job, if the driver occupies 4 cores and 16 GB memory, the executor can occupy 60 cores and 240 GB memory.", @@ -2120,8 +2453,8 @@ "title":"How Do I Allocate Queue Resources for Running Spark Jobs If I Have Purchased 64 CUs?", "uri":"dli_03_0088.html", "doc_type":"usermanual", - "p_code":"227", - "code":"236" + "p_code":"268", + "code":"273" }, { "desc":"Queue plans create failed. The plan xxx target cu is out of quota is displayed when you create a scheduled scaling task.The CU quota of the current account is insufficien", @@ -2129,8 +2462,8 @@ "title":"Why Is Error \"Queue plans create failed. The plan xxx target cu is out of quota\" Reported When I Schedule CU Changes?", "uri":"dli_03_0159.html", "doc_type":"usermanual", - "p_code":"227", - "code":"237" + "p_code":"268", + "code":"274" }, { "desc":"After a SQL job was submitted to the default queue, the job runs abnormally. The job log reported that the execution timed out. The exception logs are as follows:[ERROR] ", @@ -2138,8 +2471,17 @@ "title":"Why Is a Timeout Exception Reported When a DLI SQL Statement Fails to Be Executed on the Default Queue?", "uri":"dli_03_0171.html", "doc_type":"usermanual", - "p_code":"227", - "code":"238" + "p_code":"268", + "code":"275" + }, + { + "desc":"In daily big data analysis work, it is important to allocate and manage compute resources properly to provide a good job execution environment.You can allocate resources ", + "product_code":"dli", + "title":"How Can I Check the Actual and Used CUs for an Elastic Resource Pool as Well as the Required CUs for a Job?", + "uri":"dli_03_0276.html", + "doc_type":"usermanual", + "p_code":"268", + "code":"276" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2147,8 +2489,17 @@ "title":"Datasource Connections", "uri":"dli_03_0022.html", "doc_type":"usermanual", - "p_code":"105", - "code":"239" + "p_code":"122", + "code":"277" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Datasource Connections", + "uri":"dli_03_0110.html", + "doc_type":"usermanual", + "p_code":"277", + "code":"278" }, { "desc":"You need to create a VPC peering connection to enable network connectivity. Take MRS as an example. If DLI and MRS clusters are in the same VPC, and the security group is", @@ -2156,8 +2507,8 @@ "title":"Why Do I Need to Create a VPC Peering Connection for an Enhanced Datasource Connection?", "uri":"dli_03_0128.html", "doc_type":"usermanual", - "p_code":"239", - "code":"240" + "p_code":"278", + "code":"279" }, { "desc":"An enhanced datasource connection failed to pass the network connectivity test. Datasource connection cannot be bound to a queue. The following error information is displ", @@ -2165,8 +2516,8 @@ "title":"Failed to Bind a Queue to an Enhanced Datasource Connection", "uri":"dli_03_0237.html", "doc_type":"usermanual", - "p_code":"239", - "code":"241" + "p_code":"278", + "code":"280" }, { "desc":"The outbound rule had been configured for the security group of the queue associated with the enhanced datasource connection. The datasource authentication used a passwor", @@ -2174,8 +2525,8 @@ "title":"DLI Failed to Connect to GaussDB(DWS) Through an Enhanced Datasource Connection", "uri":"dli_03_0238.html", "doc_type":"usermanual", - "p_code":"239", - "code":"242" + "p_code":"278", + "code":"281" }, { "desc":"A datasource connection is created and bound to a queue. The connectivity test fails and the following error information is displayed:failed to connect to specified addre", @@ -2183,8 +2534,8 @@ "title":"How Do I Do if the Datasource Connection Is Created But the Network Connectivity Test Fails?", "uri":"dli_03_0179.html", "doc_type":"usermanual", - "p_code":"239", - "code":"243" + "p_code":"278", + "code":"282" }, { "desc":"Configuring the Connection Between a DLI Queue and a Data Source in a Private NetworkIf your DLI job needs to connect to a data source, for example, MRS, RDS, CSS, Kafka,", @@ -2192,8 +2543,8 @@ "title":"How Do I Configure the Network Between a DLI Queue and a Data Source?", "uri":"dli_03_0186.html", "doc_type":"usermanual", - "p_code":"239", - "code":"244" + "p_code":"278", + "code":"283" }, { "desc":"The possible causes and solutions are as follows:If you have created a queue, do not bind it to a datasource connection immediately. Wait for 5 to 10 minutes. After the c", @@ -2201,8 +2552,8 @@ "title":"What Can I Do If a Datasource Connection Is Stuck in Creating State When I Try to Bind a Queue to It?", "uri":"dli_03_0257.html", "doc_type":"usermanual", - "p_code":"239", - "code":"245" + "p_code":"278", + "code":"284" }, { "desc":"DLI enhanced datasource connection uses VPC peering to directly connect the VPC networks of the desired data sources for point-to-point data exchanges.", @@ -2210,8 +2561,17 @@ "title":"How Do I Connect DLI to Data Sources?", "uri":"dli_03_0259.html", "doc_type":"usermanual", - "p_code":"239", - "code":"246" + "p_code":"278", + "code":"285" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Cross-Source Analysis", + "uri":"dli_03_0112.html", + "doc_type":"usermanual", + "p_code":"277", + "code":"286" }, { "desc":"To perform query on data stored on services rather than DLI, perform the following steps:Assume that the data to be queried is stored on multiple services (for example, O", @@ -2219,8 +2579,8 @@ "title":"How Can I Perform Query on Data Stored on Services Rather Than DLI?", "uri":"dli_03_0011.html", "doc_type":"usermanual", - "p_code":"239", - "code":"247" + "p_code":"286", + "code":"287" }, { "desc":"Connect VPCs in different regions.Create an enhanced datasource connection on DLI and bind it to a queue.Add a DLI route.", @@ -2228,8 +2588,8 @@ "title":"How Can I Access Data Across Regions?", "uri":"dli_03_0085.html", "doc_type":"usermanual", - "p_code":"239", - "code":"248" + "p_code":"286", + "code":"288" }, { "desc":"When data is inserted into DLI, set the ID field to NULL.", @@ -2237,8 +2597,17 @@ "title":"How Do I Set the Auto-increment Primary Key or Other Fields That Are Automatically Filled in the RDS Table When Creating a DLI and Associating It with the RDS Table?", "uri":"dli_03_0028.html", "doc_type":"usermanual", - "p_code":"239", - "code":"249" + "p_code":"286", + "code":"289" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Datasource Connection O&M", + "uri":"dli_03_0256.html", + "doc_type":"usermanual", + "p_code":"277", + "code":"290" }, { "desc":"Possible CausesThe network connectivity is abnormal. Check whether the security group is correctly selected and whether the VPC is correctly configured.The network connec", @@ -2246,8 +2615,8 @@ "title":"Why Is the Error Message \"communication link failure\" Displayed When I Use a Newly Activated Datasource Connection?", "uri":"dli_03_0047.html", "doc_type":"usermanual", - "p_code":"239", - "code":"250" + "p_code":"290", + "code":"291" }, { "desc":"The cluster host information is not added to the datasource connection. As a result, the KRB authentication fails, the connection times out, and no error is recorded in l", @@ -2255,8 +2624,8 @@ "title":"Connection Times Out During MRS HBase Datasource Connection, and No Error Is Recorded in Logs", "uri":"dli_03_0080.html", "doc_type":"usermanual", - "p_code":"239", - "code":"251" + "p_code":"290", + "code":"292" }, { "desc":"When you create a VPC peering connection for the datasource connection, the following error information is displayed:Before you create a datasource connection, check whet", @@ -2264,8 +2633,8 @@ "title":"Why Can't I Find the Subnet When Creating a DLI Datasource Connection?", "uri":"dli_03_0111.html", "doc_type":"usermanual", - "p_code":"239", - "code":"252" + "p_code":"290", + "code":"293" }, { "desc":"A datasource RDS table was created in the DataArts Studio, and the insert overwrite statement was executed to write data into RDS. DLI.0999: BatchUpdateException: Incorre", @@ -2273,8 +2642,8 @@ "title":"Error Message \"Incorrect string value\" Is Displayed When insert overwrite Is Executed on a Datasource RDS Table", "uri":"dli_03_0239.html", "doc_type":"usermanual", - "p_code":"239", - "code":"253" + "p_code":"290", + "code":"294" }, { "desc":"The system failed to create a datasource RDS table, and null pointer error was reported.The following table creation statement was used:The RDS database is in a PostGre c", @@ -2282,8 +2651,8 @@ "title":"Null Pointer Error Is Displayed When the System Creates a Datasource RDS Table", "uri":"dli_03_0250.html", "doc_type":"usermanual", - "p_code":"239", - "code":"254" + "p_code":"290", + "code":"295" }, { "desc":"The system failed to execute insert overwrite on the datasource GaussDB(DWS) table, and org.postgresql.util.PSQLException: ERROR: tuple concurrently updated was displayed", @@ -2291,8 +2660,8 @@ "title":"Error Message \"org.postgresql.util.PSQLException: ERROR: tuple concurrently updated\" Is Displayed When the System Executes insert overwrite on a Datasource GaussDB(DWS) Table", "uri":"dli_03_0251.html", "doc_type":"usermanual", - "p_code":"239", - "code":"255" + "p_code":"290", + "code":"296" }, { "desc":"A datasource table was used to import data to a CloudTable HBase table. This HBase table contains a column family and a rowkey for 100 million simulating data records. Th", @@ -2300,8 +2669,8 @@ "title":"RegionTooBusyException Is Reported When Data Is Imported to a CloudTable HBase Table Through a Datasource Table", "uri":"dli_03_0252.html", "doc_type":"usermanual", - "p_code":"239", - "code":"256" + "p_code":"290", + "code":"297" }, { "desc":"A table was created on GaussDB(DWS) and then a datasource connection was created on DLI to read and write data. An error message was displayed during data writing, indica", @@ -2309,8 +2678,8 @@ "title":"A Null Value Is Written Into a Non-Null Field When a DLI Datasource Connection Is Used to Connect to a GaussDB(DWS) Table", "uri":"dli_03_0253.html", "doc_type":"usermanual", - "p_code":"239", - "code":"257" + "p_code":"290", + "code":"298" }, { "desc":"A datasource GaussDB(DWS) table and the datasource connection were created in DLI, and the schema of the source table in GaussDB(DWS) were updated. During the job executi", @@ -2318,8 +2687,8 @@ "title":"An Insert Operation Failed After the Schema of the GaussDB(DWS) Source Table Is Updated", "uri":"dli_03_0254.html", "doc_type":"usermanual", - "p_code":"239", - "code":"258" + "p_code":"290", + "code":"299" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2327,8 +2696,8 @@ "title":"APIs", "uri":"dli_03_0056.html", "doc_type":"usermanual", - "p_code":"105", - "code":"259" + "p_code":"122", + "code":"300" }, { "desc":"In the REST API provided by DLI, the request header can be added to the request URI, for example, Content-Type.Content-Type indicates the request body type or format. The", @@ -2336,8 +2705,8 @@ "title":"Why Is Error \"unsupported media Type\" Reported When I Subimt a SQL Job?", "uri":"dli_03_0060.html", "doc_type":"usermanual", - "p_code":"259", - "code":"260" + "p_code":"300", + "code":"301" }, { "desc":"When different IAM users call an API under the same enterprise project in the same region, the project ID is the same.", @@ -2345,8 +2714,8 @@ "title":"Is the Project ID Fixed when Different IAM Users Call an API?", "uri":"dli_03_0125.html", "doc_type":"usermanual", - "p_code":"259", - "code":"261" + "p_code":"300", + "code":"302" }, { "desc":"When the API call for submitting a SQL job times out, and the following error information is displayed:There are currently no resources tracked in the state, so there is ", @@ -2354,8 +2723,8 @@ "title":"What Can I Do If an Error Is Reported When the Execution of the API for Creating a SQL Job Times Out?", "uri":"dli_03_0178.html", "doc_type":"usermanual", - "p_code":"259", - "code":"262" + "p_code":"300", + "code":"303" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2363,8 +2732,8 @@ "title":"SDKs", "uri":"dli_03_0058.html", "doc_type":"usermanual", - "p_code":"105", - "code":"263" + "p_code":"122", + "code":"304" }, { "desc":"When you query the SQL job results using SDK, the system checks the job status when the job is submitted. The timeout interval set in the system is 300s. If the job is no", @@ -2372,8 +2741,8 @@ "title":"How Do I Set the Timeout Duration for Querying SQL Job Results Using SDK?", "uri":"dli_03_0073.html", "doc_type":"usermanual", - "p_code":"263", - "code":"264" + "p_code":"304", + "code":"305" }, { "desc":"Run the ping command to check whether dli.xxx can be accessed.If dli.xxx can be accessed, check whether DNS resolution is correctly configured.If dli.xxx can be accessed,", @@ -2381,8 +2750,8 @@ "title":"How Do I Handle the dli.xxx,unable to resolve host address Error?", "uri":"dli_03_0255.html", "doc_type":"usermanual", - "p_code":"263", - "code":"265" + "p_code":"304", + "code":"306" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2391,6 +2760,6 @@ "uri":"dli_01_00006.html", "doc_type":"usermanual", "p_code":"", - "code":"266" + "code":"307" } ] \ No newline at end of file diff --git a/docs/dli/umn/dli_01_00006.html b/docs/dli/umn/dli_01_00006.html index e5b29e84..2406d648 100644 --- a/docs/dli/umn/dli_01_00006.html +++ b/docs/dli/umn/dli_01_00006.html @@ -8,7 +8,13 @@ -

2024-02-27

+

2024-04-28

+ +

Added the following section:

+ + + +

2024-02-27

Added the following content:

diff --git a/docs/dli/umn/dli_01_0002.html b/docs/dli/umn/dli_01_0002.html index e7aa6f36..57841538 100644 --- a/docs/dli/umn/dli_01_0002.html +++ b/docs/dli/umn/dli_01_0002.html @@ -22,8 +22,8 @@ -

Step 2: Create a Queue

A queue is the basis for using DLI. Before executing an SQL job, you need to create a queue.

-
  • An available queue default is preset in DLI.
  • You can also create queues as needed.
    1. Log in to the DLI management console.
    2. In the left navigation pane of the DLI management console, choose SQL Editor.
    3. On the left pane, select the Queues tab, and click next to Queues.

      For details, see Creating a Queue.

      +

      Step 2: Create a Queue

      A queue is the basis for using DLI. Before executing a SQL job, you need to create a queue.

      +
      • DLI provides a preconfigured queue named default.
      • You can also create queues as needed.
        1. Log in to the DLI management console.
        2. In the left navigation pane of the DLI management console, choose SQL Editor.
        3. On the left pane, select the Queues tab, and click next to Queues.

          For details, see Creating a Queue.

      @@ -32,7 +32,7 @@

      The default database is a built-in database. You cannot create the default. database.

      1. In the left navigation pane of the DLI management console, choose SQL Editor.
      2. In the editing window on the right of the SQL Editor page, enter the following SQL statement and click Execute. Read and agree to the privacy agreement, and click OK.
        create database db1;
        -

        After the database is successfully created, click in the middle pane to refresh the database list. The new database db1 is displayed in the list.

        +

        After the database is successfully created, click in the middle pane to refresh the database list. The new database db1 is displayed in the list.

        When you execute a query on the DLI management console for the first time, you need to read the privacy agreement. You can perform operations only after you agree to the agreement. For later queries, you will not need to read the privacy agreement again.

      diff --git a/docs/dli/umn/dli_01_0005.html b/docs/dli/umn/dli_01_0005.html index f325c710..b077a7b4 100644 --- a/docs/dli/umn/dli_01_0005.html +++ b/docs/dli/umn/dli_01_0005.html @@ -11,7 +11,7 @@

      Precautions

      • If a folder and a file have the same name in the OBS directory, the file path is preferred as the path of the OBS table to be created.

      Creating a Database

      1. You can create a database on either the Data Management page or the SQL Editor page.

        • To create a database on the Data Management page:
          1. On the left of the management console, choose Data Management > Databases and Tables.
          2. In the upper right corner of the Databases and Tables page, click Create Database to create a database.
          -
        • To create a database on the SQL Editor page:
          1. On the left of the management console, click SQL Editor.
          2. In the navigation pane on the left, click beside Databases.
          +
        • To create a database on the SQL Editor page:
          1. On the left of the management console, click SQL Editor.
          2. In the navigation pane on the left, click next to Databases.

      2. In the displayed Create Database dialog box, specify Name and Description by referring to Table 1.

        @@ -144,7 +144,7 @@ @@ -177,7 +177,7 @@ - @@ -213,7 +213,7 @@
        Table 1 Description

        Parameter

        @@ -40,7 +40,7 @@
        • Tag key: Enter a tag key name in the text box.
          NOTE:

          A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

        -
        • Tag value: Enter a tag value in the text box.
          NOTE:

          A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

          +
          • Tag value: Enter a tag value in the text box.
            NOTE:

            A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

          @@ -52,10 +52,10 @@

        • Creating a Table

          Before creating a table, ensure that a database has been created.

          -
          1. You can create a table on either the Databases and Tables page or the SQL Editor page.

            Datasource connection tables, such as View tables, HBase (MRS) tables, OpenTSDB (MRS) tables, DWS tables, RDS tables, and CSS tables, cannot be created. You can use SQL to create views and datasource connection tables. For details, see sections Creating a View and Creating a Datasource Connection Table in the Data Lake Insight SQL Syntax Reference.

            +
            1. You can create a table on either the Databases and Tables page or the SQL Editor page.

              Datasource connection tables, such as View tables, HBase (MRS) tables, OpenTSDB (MRS) tables, GaussDB(DWS) tables, RDS tables, and CSS tables, cannot be created. You can use SQL to create views and datasource connection tables. For details, see sections Creating a View and Creating a Datasource Connection Table in the Data Lake Insight SQL Syntax Reference.

              -
              • To create a table on the Data Management page:
                1. On the left of the management console, choose Data Management > Databases and Tables.
                2. On the Databases and Tables page, select the database for which you want to create a table. In the Operation column, click More > Create Table to create a table in the current database.
                -
              • To create a table on the SQL Editor page:
                1. On the left of the management console, click SQL Editor.
                2. In the navigation pane of the displayed SQL Editor page, click Databases. You can create a table in either of the following ways:
                  • Click a database name. In the Tables area, click on the right to create a table in the current database.
                  • Click on the right of the database and choose Create Table from the shortcut menu to create a table in the current database.
                  +
                  • To create a table on the Data Management page:
                    1. On the left of the management console, choose Data Management > Databases and Tables.
                    2. On the Databases and Tables page, select the database for which you want to create a table. In the Operation column, click More > Create Table to create a table in the current database.
                    +
                  • To create a table on the SQL Editor page:
                    1. On the left of the management console, click SQL Editor.
                    2. In the navigation pane of the displayed SQL Editor page, click Databases. You can create a table in either of the following ways:
                      • Click a database name. In the Tables area, click on the right to create a table in the current database.
                      • Click on the right of the database and choose Create Table from the shortcut menu to create a table in the current database.

                3. In the displayed Create Table dialog box, set parameters as required.

                  • If you set Data Location to DLI, set related parameters by referring to Table 2.
                  • If you set Data Location to OBS, set related parameters by referring to Table 2 and Table 3. @@ -108,7 +108,7 @@

        Type

        Data type of a column. This parameter corresponds to Column Name.

        -
        • string: The data is of the string type.
        • int: Each integer is stored on four bytes.
        • date: The value ranges from 0000-01-01 to 9999-12-31.
        • double: Each number is stored on eight bytes.
        • boolean: Each value is stored on one byte.
        • decimal: The valid bits are positive integers between 1 to 38, including 1 and 38. The decimal digits are integers less than 10.
        • smallint/short: The number is stored on two bytes.
        • bigint/long: The number is stored on eight bytes.
        • timestamp: The data indicates a date and time. The value can be accurate to six decimal points.
        • float: Each number is stored on four bytes.
        • tinyint: Each number is stored on one byte. Only OBS tables support this data type.
        +
        • string: The data is of the string type.
        • int: Each integer is stored on four bytes.
        • date: The value ranges from 0000-01-01 to 9999-12-31.
        • double: Each number is stored on eight bytes.
        • boolean: Each value is stored on one byte.
        • decimal: The valid bits are positive integers between 1 to 38, including 1 and 38. The decimal digits are integers less than 10.
        • smallint/short: The number is stored on two bytes.
        • bigint/long: The number is stored on eight bytes.
        • timestamp: The data indicates a date and time. The value can be accurate to six decimal points.
        • float: Each number is stored on four bytes.
        • tinyint: Each number is stored on one byte. Only OBS tables support this data type.

        string

        Data Format

        DLI supports the following data formats:

        -
        • Parquet: DLI can read non-compressed data or data that is compressed using Snappy and gzip.
        • CSV: DLI can read non-compressed data or data that is compressed using gzip.
        • ORC: DLI can read non-compressed data or data that is compressed using Snappy.
        • JSON: DLI can read non-compressed data or data that is compressed using gzip.
        • Avro: DLI can read uncompressed Avro data.
        +
        • Parquet: DLI can read non-compressed data or data that is compressed using Snappy and gzip.
        • CSV: DLI can read non-compressed data or data that is compressed using gzip.
        • ORC: DLI can read non-compressed data or data that is compressed using Snappy.
        • JSON: DLI can read non-compressed data or data that is compressed using gzip.
        • Avro: DLI can read uncompressed Avro data.

        CSV

        User-defined Quotation Character

        This parameter is valid only when Data Format is set to CSV and you select User-defined Quotation Character.

        +

        This parameter is valid only when Data Format is set to CSV and you select User-defined Quotation Character.

        The following quotation characters are supported:

        • Single quotation mark (')
        • Double quotation marks (")
        • Others: Enter a user-defined quotation character.
-

  • Click OK.

    After a table is created, you can view and select the table for use on the Data Management page or SQL Editor page.

    +

  • Click OK.

    After a table is created, you can view and select the table for use on the Data Management page or SQL Editor page.

  • (Optional) After a DLI table is created, you can decide whether to directly import data to the table.
  • diff --git a/docs/dli/umn/dli_01_0006.html b/docs/dli/umn/dli_01_0006.html index 3dc50590..b1e3aecf 100644 --- a/docs/dli/umn/dli_01_0006.html +++ b/docs/dli/umn/dli_01_0006.html @@ -65,7 +65,7 @@
    • Tag key: Enter a tag key name in the text box.
      NOTE:

      A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

    -
    • Tag value: Enter a tag value in the text box.
      NOTE:

      A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

      +
      • Tag value: Enter a tag value in the text box.
        NOTE:

        A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

      @@ -112,7 +112,7 @@

      Type of IP addresses.

      -

      IPV4

      +

      IPv4

      Source

      diff --git a/docs/dli/umn/dli_01_0007.html b/docs/dli/umn/dli_01_0007.html index 414e1795..263204d8 100644 --- a/docs/dli/umn/dli_01_0007.html +++ b/docs/dli/umn/dli_01_0007.html @@ -3,7 +3,7 @@

      Previewing Data

      The Preview page displays the first 10 records in the table.

      Procedure

      You can preview data on either the Data Management page or the SQL Editor page.
      • To preview data on the Data Management page:
        1. On the left of the management console, choose Data Management > Databases and Tables.
        2. On the displayed Data Management page, click the name of the database where the target table whose data you want to export resides to switch to the Manage Tables page.
        3. Click More in the Operation column of the target table and select View Properties.
        4. Click the Preview tab to preview the table data.
        -
      • To preview data on the SQL Editor page:
        1. On the left of the management console, click SQL Editor.
        2. In the navigation pane of the displayed SQL Editor page, click Databases.
        3. Click the corresponding database name to view the tables in the database.
        4. Click on the right of the corresponding table, choose View Properties from the list menu, and click the Preview tab to preview the data of the table.
        +
      • To preview data on the SQL Editor page:
        1. On the left of the management console, click SQL Editor.
        2. In the navigation pane of the displayed SQL Editor page, click Databases.
        3. Click the corresponding database name to view the tables in the database.
        4. Click on the right of the corresponding table, choose View Properties from the list menu, and click the Preview tab to preview the data of the table.
      diff --git a/docs/dli/umn/dli_01_0008.html b/docs/dli/umn/dli_01_0008.html index 846fa36c..21f5410a 100644 --- a/docs/dli/umn/dli_01_0008.html +++ b/docs/dli/umn/dli_01_0008.html @@ -4,7 +4,7 @@

      Metadata Description

      • Metadata is used to define data types. It describes information about the data, including the source, size, format, and other data features. In database fields, metadata interprets data content in the data warehouse.
      • When you create a table, metadata is defined, consisting of the column name, type, and description.
      • The Metadata page displays information about the target table, including Column Name, Column Type, Data Type, and Description.

      Procedure

      You can view metadata on either the Data Management page or the SQL Editor page.
      • To view metadata on the Data Management page:
        1. On the left of the management console, choose Data Management > Databases and Tables.
        2. On the displayed Data Management page, click the name of the database where the target table whose data you want to export resides to switch to the Manage Tables page.
        3. Click More in the Operation column of the target table and select View Properties. In the Metadata tab, view the metadata of the table.
        -
      • To view metadata on the SQL Editor page:
        1. On the left of the management console, click SQL Editor.
        2. In the navigation pane of the displayed SQL Editor page, click Databases.
        3. Click the corresponding database name to view the tables in the database.
        4. Click on the right of the table and choose View Properties from the shortcut menu. On the Metadata tab page, view the metadata of the table.
        +
      • To view metadata on the SQL Editor page:
        1. On the left of the management console, click SQL Editor.
        2. In the navigation pane of the displayed SQL Editor page, click Databases.
        3. Click the corresponding database name to view the tables in the database.
        4. Click on the right of the table and choose View Properties from the shortcut menu. On the Metadata tab page, view the metadata of the table.
      diff --git a/docs/dli/umn/dli_01_0010.html b/docs/dli/umn/dli_01_0010.html index 1cc248d2..9a3e6287 100644 --- a/docs/dli/umn/dli_01_0010.html +++ b/docs/dli/umn/dli_01_0010.html @@ -37,7 +37,7 @@

      Compression Format

      Compression format of the data to be exported. The following compression formats are supported:

      -
      • none
      • bzip2
      • deflate
      • gzip
      +
      • none
      • bzip2
      • deflate
      • gzip

      Storage Path

      diff --git a/docs/dli/umn/dli_01_0011.html b/docs/dli/umn/dli_01_0011.html index 82363e7a..87400259 100644 --- a/docs/dli/umn/dli_01_0011.html +++ b/docs/dli/umn/dli_01_0011.html @@ -7,7 +7,7 @@

    Deleting a Table

    You can delete a table on either the Data Management page or the SQL Editor page.
    • Delete the table on the Data Management page.
      1. On the left of the management console, choose Data Management > Databases and Tables.
      2. Locate the row where the database whose tables you want to delete, click the database name to switch to the Table Management page.
      3. Locate the row where the target table locates and click More > Delete in the Operation column.
      4. In the displayed dialog box, click Yes.
      -
    • Delete a table on the SQL Editor page.
      1. On the top menu bar of the DLI management console, click SQL Editor.
      2. In the navigation tree on the left, click Databases. Click the name of a database where the table you want belongs. The tables of the selected database are displayed.
      3. Click on the right of the table and choose Delete from the shortcut menu.
      4. In the dialog box that is displayed, click OK.
      +
    • Delete a table on the SQL Editor page.
      1. On the top menu bar of the DLI management console, click SQL Editor.
      2. In the navigation tree on the left, click Databases. Click the name of a database where the table you want belongs. The tables of the selected database are displayed.
      3. Click on the right of the table and choose Delete from the shortcut menu.
      4. In the dialog box that is displayed, click OK.
    diff --git a/docs/dli/umn/dli_01_0012.html b/docs/dli/umn/dli_01_0012.html index 31425337..b6ccd14f 100644 --- a/docs/dli/umn/dli_01_0012.html +++ b/docs/dli/umn/dli_01_0012.html @@ -12,6 +12,8 @@ + + diff --git a/docs/dli/umn/dli_01_0013.html b/docs/dli/umn/dli_01_0013.html index 236e45cd..62cd1b36 100644 --- a/docs/dli/umn/dli_01_0013.html +++ b/docs/dli/umn/dli_01_0013.html @@ -14,7 +14,7 @@
  • Click OK.
  • How Do I Obtain MRS Host Information?

    • Method 1: View MRS host information on the management console.

      To obtain the host name and IP address of an MRS cluster, for example, MRS 3.x, perform the following operations:

      -
      1. Log in to the MRS management console.
      2. On the Active Clusters page displayed, click your desired cluster to access its details page.
      3. Click the Components tab.
      4. Click ZooKeeper.
      5. Click the Instance tab to view corresponding service IP addresses. You can select any service IP address.
      6. Modify host information by referring to Modifying Host Information.
      +
      1. Log in to the MRS management console.
      2. On the Active Clusters page displayed, click your desired cluster to access its details page.
      3. Click the Components tab.
      4. Click ZooKeeper.
      5. Click the Instance tab to view the corresponding service IP addresses. You can select any service IP address.
      6. Modify host information by referring to Modifying Host Information.

      If the MRS cluster has multiple IP addresses, enter any service IP address when creating a datasource connection.

    • Method 2: Obtain MRS host information from the /etc/hosts file on an MRS node.
      1. Log in to any MRS node as user root.
      2. Run the following command to obtain MRS hosts information. Copy and save the information.

        cat /etc/hosts

        diff --git a/docs/dli/umn/dli_01_0014.html b/docs/dli/umn/dli_01_0014.html index b00e8f58..649c1fc1 100644 --- a/docs/dli/umn/dli_01_0014.html +++ b/docs/dli/umn/dli_01_0014.html @@ -24,6 +24,7 @@

        IP Address

        Custom route CIDR block. The CIDR block of different routes can overlap but cannot be the same.

        +

        Do not add the CIDR blocks 100.125.xx.xx and 100.64.xx.xx to prevent conflicts with the internal CIDR blocks of services such as SWR. This can lead to failure of the enhanced datasource connection.

        diff --git a/docs/dli/umn/dli_01_0017.html b/docs/dli/umn/dli_01_0017.html index 226644b8..fe1a9b05 100644 --- a/docs/dli/umn/dli_01_0017.html +++ b/docs/dli/umn/dli_01_0017.html @@ -1,7 +1,7 @@

        SQL Job Management

        -

        SQL jobs allow you to execute SQL statements entered in the SQL job editing window, import data, and export data.

        +

        SQL jobs allow you to execute SQL statements in the SQL job editing window, import data, and export data.

        SQL job management provides the following functions:

        SQL Jobs Page

        On the Overview page of the DLI console, click SQL Jobs to go to the SQL job management page. Alternatively, you can click Job Management > SQL Jobs. The job list displays all SQL jobs. If there are a large number of jobs, they will be displayed on multiple pages. You can switch to the specified page as needed. DLI allows you to view jobs in all statuses. By default, jobs in the job list are displayed in descending order of the job creation time.

        @@ -25,7 +25,7 @@

        Type

        Job type. The following types are supported:

        -
        • IMPORT: A job that imports data to DLI
        • EXPORT: A job that exports data from DLI
        • DCL: Conventional DCLs and operations related to queue permissions
        • DDL:Conventional DDLs, including creating and deleting databases and tables
        • QUERY: A job that querys data by running SQL statements
        • INSERT: A job that inserts data by running SQL statements
        • UPDATE: A job that updates data.
        • DELETE: A job that deletes a SQL job.
        • DATA_MIGRATION: A job that migrates data.
        • RESTART_QUEUE: A job that restarts a queue.
        • SCALE_QUEUE: A job that changes queue specifications, including sale-out and scale-in.
        +
        • IMPORT: A job that imports data to DLI
        • EXPORT: A job that exports data from DLI
        • DCL: Conventional DCLs and operations related to queue permissions
        • DDL:Conventional DDLs, including creating and deleting databases and tables
        • QUERY: A job that queries data by running SQL statements
        • INSERT: A job that inserts data by running SQL statements
        • UPDATE: A job that updates data.
        • DELETE: A job that deletes a SQL job.
        • DATA_MIGRATION: A job that migrates data.
        • RESTART_QUEUE: A job that restarts a queue.
        • SCALE_QUEUE: A job that changes queue specifications, including sale-out and scale-in.

        Status

        @@ -37,7 +37,7 @@

        Query

        SQL statements for operations such as exporting and creating tables

        -

        You can click to copy the query statement.

        +

        You can click to copy the query statement.

        Duration

        @@ -81,11 +81,11 @@

        Exporting Query Results

        A maximum of 1000 records can be displayed in the query result on the console. To view more or all data, you can export the data to OBS. The procedure is as follows:

        You can export results on the SQL Jobs page or the SQL Editor page.

        • On the Job Management > SQL Jobs page, you can click More > Export Result in the Operation column to export the query result.
        • After the query statements are successfully executed on the SQL Editor page, click next to the View Result tab page to export query results.
        -

        If no column of the numeric type is displayed in the query result, the result cannot be exported.

        +

        If there are no numerical columns in the query results, job results cannot be exported.

        -
        Table 2 Exporting parameters

        Parameter

        +
        @@ -98,13 +98,13 @@ - - diff --git a/docs/dli/umn/dli_01_0022.html b/docs/dli/umn/dli_01_0022.html index a13e448d..c4ab9648 100644 --- a/docs/dli/umn/dli_01_0022.html +++ b/docs/dli/umn/dli_01_0022.html @@ -29,7 +29,7 @@ diff --git a/docs/dli/umn/dli_01_0253.html b/docs/dli/umn/dli_01_0253.html index 2ba7e5e2..5efbeac4 100644 --- a/docs/dli/umn/dli_01_0253.html +++ b/docs/dli/umn/dli_01_0253.html @@ -7,7 +7,7 @@

        Prerequisites

        The data to be imported has been stored on OBS.

        Procedure

        1. You can import data on either the Data Management page or the SQL Editor page.

          • To import data on the Data Management page:
            1. On the left of the management console, choose Data Management > Databases and Tables.
            2. Click the name of the database corresponding to the table where data is to be imported to switch to the table management page.
            3. Locate the row where the target table resides and choose More > Import in the Operation column. The Import dialog box is displayed.
            -
          • To import data on the SQL Editor page:
            1. On the left of the management console, click SQL Editor.
            2. In the navigation tree on the left of SQL Editor, click Databases to see all databases. Click the database where the target table belongs. The table list is displayed.
            3. Click on the right of the table and choose Import from the shortcut menu. The Import page is displayed.

              +
            4. To import data on the SQL Editor page:
              1. On the left of the management console, click SQL Editor.
              2. In the navigation tree on the left of SQL Editor, click Databases to see all databases. Click the database where the target table belongs. The table list is displayed.
              3. Click on the right of the table and choose Import from the shortcut menu. The Import page is displayed.

        2. In the Import dialog box, set the parameters based on Table 1.

          @@ -49,8 +49,8 @@

        - @@ -121,7 +121,7 @@

      3. Click OK.
      4. You can view the imported data in either of the following ways:

        Currently, only the first 10 records are displayed.

        -
        • Choose Data Management > Databases and Tables in the navigation pane of the console. Locate the row that contains the database where the target table belongs and click More > View Properties in the Operation column. In the displayed dialog box, click the Preview tab to view the imported data.
        • In the Databases tab of the SQL Editor, click the database name to go to the table list. Click on the right of a table name and choose View Properties from the shortcut menu. In the displayed dialog box, click Preview to view the imported data.
        +
        • Choose Data Management > Databases and Tables in the navigation pane of the console. Locate the row that contains the database where the target table belongs and click More > View Properties in the Operation column. In the displayed dialog box, click the Preview tab to view the imported data.
        • On the Databases tab of the SQL Editor, click the database name to go to the table list. Click on the right of a table name and choose View Properties from the shortcut menu. In the displayed dialog box, click Preview to view the imported data.

      5. (Optional) View the status and execution result of the importing job on the Job Management > SQL Jobs page.
      6. diff --git a/docs/dli/umn/dli_01_0320.html b/docs/dli/umn/dli_01_0320.html index a48a40c4..8cddb3ff 100644 --- a/docs/dli/umn/dli_01_0320.html +++ b/docs/dli/umn/dli_01_0320.html @@ -10,7 +10,7 @@

        On the OBS console, you can configure lifecycle rules for a bucket to periodically delete objects in it or change object storage classes.

        • SQL statements can be executed in batches on the SQL editor page.
        -
        • Commonly used keyworks in the job editing window are highlighted in different colors.
        • Both single-line comment and multi-line comment are allowed. Use two consecutive hyphens (--) in each line to comment your statements.
        +
        • Commonly used syntax in the job editing window is highlighted in different colors.
        • Both single-line comment and multi-line comment are allowed. Use two consecutive hyphens (--) in each line to comment your statements.

        Navigation pane

        The navigation pane on the left consists of Databases, Queues, and Templates tabs.

        @@ -49,10 +49,10 @@
        -

        SQL Editing Window

        SQL job editing window is displayed in the upper right part of the page.

        +

        SQL Editing Window

        SQL job editing window is displayed in the upper right part of the page. For details about the parameters, see Table 2.

        The SQL statement editing area is below the operation bar. For details about keyboard shortcuts, see Table 3.

        -
        Table 2 Parameters

        Parameter

        Description

        Queues

        The queue where the jobs are executed. SQL jobs can be executed only in SQL queues. For details about how to create a queue, see Creating a Queue.

        +

        The queue where the jobs are executed. SQL jobs can only be executed on SQL queues. For details about how to create a queue, see Creating a Queue.

        Compression Format

        Compression format of the data to be exported. The following options are supported:

        -
        • none
        • bzip2
        • deflate
        • gzip
        +

        Compression format of the data to be exported. The options are:

        +
        • none
        • bzip2
        • deflate
        • gzip

        Storage Path

        diff --git a/docs/dli/umn/dli_01_0019.html b/docs/dli/umn/dli_01_0019.html index 615a5f4c..cf8a514c 100644 --- a/docs/dli/umn/dli_01_0019.html +++ b/docs/dli/umn/dli_01_0019.html @@ -30,7 +30,7 @@

        Tag value

        You can perform the following operations:

        -
        • Click the text box and select a predefined tag value from the drop-down list.
        • Enter a tag value in the text box.
          NOTE:

          A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

          +
          • Click the text box and select a predefined tag value from the drop-down list.
          • Enter a tag value in the text box.
            NOTE:

            A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

        Tag value

        You can specify the tag value in either of the following ways:

        -
        • Click the text box and select a predefined tag value from the drop-down list.
        • Enter a tag value in the text box.
          NOTE:

          A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

          +
          • Click the text box and select a predefined tag value from the drop-down list.
          • Enter a tag value in the text box.
            NOTE:

            A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

        Path

        You can directly enter a path or click and select an OBS path. If no bucket is available, you can directly switch to the OBS management console and create an OBS bucket.

        -
        • When creating an OBS table, you must specify a folder as the directory. If a file is specified, data import may be failed.
        • If a folder and a file have the same name in the OBS directory, the file path is preferred as the path of the data to be imported.
        +

        You can directly enter a path or click and select an OBS path. If no bucket is available, you can directly switch to the OBS management console and create an OBS bucket.

        +
        • When creating an OBS table, you must specify a folder as the directory. If a file is specified, data import may be failed.
        • If a folder and a file have the same name in the OBS directory, the file path is preferred as the path of the data to be imported.
        NOTE:

        The path can be a file or folder.

        Table 2 Components of the SQL job editing window

        No.

        +
        @@ -65,7 +65,7 @@
        Table 2 Components of the SQL job editing window

        No.

        Button & Drop-Down List

        Queues

        Select a queue from the drop-down list box. If no queue is available, the default queue is displayed. Refer to Creating a Queue and create a queue.

        -

        SQL jobs can be executed only on SQL queues.

        +

        SQL jobs can only be executed on SQL queues.

        3

        @@ -189,7 +189,7 @@
        • View Result -
          Table 5 Operations in the result tab

          Operation

          +
          @@ -205,16 +205,16 @@ - -
          Table 5 Operations on the result tab

          Operation

          Description

          Click to view the query result in a chart or table.

          Export the result

          +

          Export job results

          Export the job execution results to the created OBS bucket. For details, see SQL Job Management.

          +

          Export the job execution results to the created OBS bucket.

          -

          SQL Query Procedure

          1. Log in to the DLI management console. On the page displayed, choose Job Management > SQL Jobs. On the page displayed, click Create Job.

            On the SQL editor page, the system prompts you to create an OBS bucket to store temporary data generated by DLI jobs. In the Set Job Bucket dialog box, click Setting. On the page displayed, click the edit button in the upper right corner of the job bucket card. In the Set Job Bucket dialog box displayed, enter the job bucket path and click OK.

            +

            SQL Query Procedure

            1. Log in to the DLI management console. On the page displayed, choose Job Management > SQL Jobs. On the page displayed, click Create Job.

              On the SQL editor page, the system prompts you to create an OBS bucket to store temporary data generated by DLI jobs. In the Set Job Bucket dialog box, click Setting. On the page displayed, click the edit button in the upper right corner of the job bucket card. In the Set Job Bucket dialog box displayed, enter the job bucket path and click OK.

            2. Select a queue from the queue list in the upper left corner of the SQL job editing window. For details about how to create a queue, see Creating a Queue.
            3. In the upper right corner of the SQL job editing window, select a database, for example, qw, from the Databases drop-down list.
            4. Create a table, for example, qw. For details about how to create a database and table, see Creating a Database or a Table.
            5. In the SQL job editing window, enter the following SQL statement:
              1
              SELECT * FROM qw.qw LIMIT 10;
               
              diff --git a/docs/dli/umn/dli_01_0363.html b/docs/dli/umn/dli_01_0363.html index 1fb0600d..4100a31b 100644 --- a/docs/dli/umn/dli_01_0363.html +++ b/docs/dli/umn/dli_01_0363.html @@ -8,53 +8,53 @@
            6. To create a queue on the SQL Editor page:
              1. In the navigation pane of the DLI management console, click SQL Editor.
              2. Click Queues. On the tab page displayed, click on the right to create a queue.
            7. On the Create Queue page displayed, set the parameters according to Table 1. -
              Table 1 Parameters

              Parameter

              +
              - - - - - - - - - - - - - diff --git a/docs/dli/umn/dli_01_0367.html b/docs/dli/umn/dli_01_0367.html index 8c80f04d..e4bb90f6 100644 --- a/docs/dli/umn/dli_01_0367.html +++ b/docs/dli/umn/dli_01_0367.html @@ -47,7 +47,7 @@
              • Tag key: Enter a tag key name in the text box.
                NOTE:

                A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

              -
              • Tag value: Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
                • Tag value: Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                diff --git a/docs/dli/umn/dli_01_0376.html b/docs/dli/umn/dli_01_0376.html index 1632ded9..b4821638 100644 --- a/docs/dli/umn/dli_01_0376.html +++ b/docs/dli/umn/dli_01_0376.html @@ -3,7 +3,7 @@

                Modifying the Owners of Databases and Tables

                During actual use, developers create databases and tables and submit them to test personnel for testing. After the test is complete, the databases and tables are transferred to O&M personnel for user experience. In this case, you can change the owner of the databases and tables to transfer data to other owners.

                Modifying the Database Owner

                You can change the owner of a database on either the Data Management page or the SQL Editor page.
                • On the Data Management page, change the database owner.
                  1. On the left of the management console, choose Data Management > Databases and Tables.
                  2. On the Databases and Tables page, locate the database you want and click More > Modify Database in the Operation column.
                  3. In the displayed dialog box, enter a new owner name (an existing username) and click OK.
                  -
                • Change the database owner on the SQL Editor page.
                  1. On the left of the management console, click SQL Editor.
                  2. In the navigation tree on the left, click Databases, click on the right of the database you want to modify, and choose Modify Database from the shortcut menu.
                  3. In the displayed dialog box, enter a new owner name (an existing username) and click OK.
                  +
                • Change the database owner on the SQL Editor page.
                  1. On the left of the management console, click SQL Editor.
                  2. In the navigation tree on the left, click Databases, click on the right of the database you want to modify, and choose Modify Database from the shortcut menu.
                  3. In the displayed dialog box, enter a new owner name (an existing username) and click OK.
                diff --git a/docs/dli/umn/dli_01_0378.html b/docs/dli/umn/dli_01_0378.html index 5af837a1..c4ef4d49 100644 --- a/docs/dli/umn/dli_01_0378.html +++ b/docs/dli/umn/dli_01_0378.html @@ -6,12 +6,61 @@

                Functions

                You can query and analyze heterogeneous data sources such as RDS, and GaussDB(DWS) on the cloud using access methods, such as visualized interface, RESTful API, JDBC, and Beeline. The data format is compatible with five mainstream data formats: CSV, JSON, Parquet, and ORC.

                • Basic functions
                  • You can use standard SQL statements to query in SQL jobs.
                  • Flink jobs support Flink SQL online analysis. Aggregation functions such as Window and Join, geographic functions, and CEP functions are supported. SQL is used to express service logic, facilitating service implementation.
                  • For spark jobs, fully-managed Spark computing can be performed. You can submit computing tasks through interactive sessions or in batch to analyze data in the fully managed Spark queues.
                  -
                • Federated analysis of heterogeneous data sources
                  • Spark datasource connection: Data sources such as DWS, RDS, and CSS can be accessed through DLI.
                  • Interconnection with multiple cloud services is supported in Flink jobs to form a rich stream ecosystem. The DLI stream ecosystem consists of cloud service ecosystems and open source ecosystems.
                    • Cloud service ecosystem: DLI can interconnect with other services in Flink SQL. You can directly use SQL to read and write data from cloud services.
                    • Open-source ecosystems: After connections to other VPCs are established through datasource connections, you can access all data sources and output targets (such as Kafka, HBase, and Elasticsearch) supported by Flink and Spark in your dedicated DLI queue.
                    +
                  • Federated analysis of heterogeneous data sources
                    • Spark datasource connection: Data sources such as GaussDB(DWS), RDS, and CSS can be accessed through DLI.
                    • Interconnection with multiple cloud services is supported in Flink jobs to form a rich stream ecosystem. The DLI stream ecosystem consists of cloud service ecosystems and open source ecosystems.
                      • Cloud service ecosystem: DLI can interconnect with other services in Flink SQL. You can directly use SQL to read and write data from cloud services.
                      • Open-source ecosystems: After connections to other VPCs are established through datasource connections, you can access all data sources and output targets (such as Kafka, HBase, and Elasticsearch) supported by Flink and Spark in your dedicated DLI queue.

                  • Storage-compute decoupling

                    DLI is interconnected with OBS for data analysis. In this architecture where storage and compute are decoupled, resources of these two types are charged separately, helping you reduce costs and improving resource utilization.

                    You can choose single-AZ or multi-AZ storage when you create an OBS bucket for storing redundant data on the DLI console. The differences between the two storage policies are as follows:

                    • Multi-AZ storage means data is stored in multiple AZs, improving data reliability. If the multi-AZ storage is enabled for a bucket, data is stored in multiple AZs in the same region. If one AZ becomes unavailable, data can still be properly accessed from the other AZs. The multi-AZ storage is ideal for scenarios that demand high reliability. You are advised to use this policy.
                    • Single-AZ storage means that data is stored in a single AZ, with lower costs.
                    +
                  • Elastic resource pool

                    Elastic resource pools support the CCE cluster architecture for heterogeneous resources so you can centrally manage and allocate them. For details, see Elastic Resource Pool.

                    +

                    Elastic resource pools have the following advantages:

                    +
                    • Unified management
                      • You can manage multiple internal clusters and schedule jobs. You can manage millions of cores for compute resources.
                      • Elastic resource pools can be deployed across multiple AZs to support high availability.
                      +
                    • Tenant resource isolation

                      Resources of different queues are isolated to reduce the impact on each other.

                      +
                    • Shared access and flexibility
                      • Minute-level scaling helps you to handle request peaks.
                      • Queue priorities and CU quotas can be set at different time to improve resource utilization.
                      +
                    • Job-level isolation (supported in later versions)

                      SQL jobs can run on independent Spark instances, reducing mutual impacts between jobs.

                      +
                    • Automatic scaling (supported in later versions)

                      The queue quota is updated in real time based on workload and priority.

                      +
                    +

                    Using elastic resource pools has the following advantages.

                    + +
              Table 1 Parameters

              Parameter

              Description

              +

              Description

              Name

              +

              Name

              Name of a queue.

              +

              Name of a queue.

              • The queue name can contain only digits, letters, and underscores (_), but cannot contain only digits, start with an underscore (_), or be left unspecified.
              • The length of the name cannot exceed 128 characters.
              NOTE:

              The queue name is case-insensitive. Uppercase letters will be automatically converted to lowercase letters.

              Type

              +

              Type

              • For SQL: compute resources used for SQL jobs.
              • For general purpose: compute resources used for Spark and Flink jobs.
                NOTE:

                Selecting Dedicated Resource Mode enables you to create a dedicated queue. Enhanced datasource connections can only be created for dedicated queues.

                +
              • For SQL: compute resources used for SQL jobs.
              • For general purpose: compute resources used for Spark and Flink jobs.
                NOTE:

                Selecting Dedicated Resource Mode enables you to create a dedicated queue. Enhanced datasource connections can only be created for dedicated queues.

              Specifications

              +

              Specifications

              The compute nodes' total number of CUs. One CU equals one vCPU and 4 GB of memory. DLI automatically assigns CPU and memory resources to each compute node, and the client does not need to know how many compute nodes are being used.

              +

              The compute nodes' total number of CUs. One CU equals one vCPU and 4 GB of memory. DLI automatically assigns CPU and memory resources to each compute node, and the client does not need to know how many compute nodes are being used.

              Description

              +

              Description

              Description of the queue to be created. The description can contain a maximum of 128 characters.

              +

              Description of the queue to be created. The description can contain a maximum of 128 characters.

              Advanced Settings

              +

              Advanced Settings

              In the Queue Type area, select Dedicated Resource Mode and then click Advanced Settings.
              • Default: The system automatically configures the parameter.
              • Custom

                CIDR Block: You can specify the CIDR block. For details, see Modifying the CIDR Block. If DLI enhanced datasource connection is used, the CIDR block of the DLI queue cannot overlap with that of the data source.

                +
              In the Queue Type area, select Dedicated Resource Mode and then click Advanced Settings.
              • Default: The system automatically configures the parameter.
              • Custom

                CIDR Block: You can specify the CIDR block. For details, see Modifying the CIDR Block. If DLI enhanced datasource connection is used, the CIDR block of the DLI queue cannot overlap with that of the data source.

                Queue Type: When running an AI-related SQL job, select AI-enhanced. When running other jobs, select Basic.

              Tags

              +

              Tags

              Tags used to identify cloud resources. A tag includes the tag key and tag value. If you want to use the same tag to identify multiple cloud resources, that is, to select the same tag from the drop-down list box for all services, you are advised to create predefined tags on the Tag Management Service (TMS).

              +

              Tags used to identify cloud resources. A tag includes the tag key and tag value. If you want to use the same tag to identify multiple cloud resources, that is, to select the same tag from the drop-down list box for all services, you are advised to create predefined tags on the Tag Management Service (TMS).

              NOTE:
              • A maximum of 20 tags can be added.
              • Only one tag value can be added to a tag key.
              • The key name in each resource must be unique.
              • Tag key: Enter a tag key name in the text box.
                NOTE:

                A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                -
              • Tag value: Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
              • Tag value: Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

              + + + + + + + + + + + + + + + + + + + + +

              Advantage

              +

              No Elastic Resource Pool

              +

              Use Elastic Resource Pool

              +

              Efficiency

              +

              You need to set scaling tasks repeatedly to improve the resource utilization.

              +

              Dynamic scaling can be done in seconds.

              +

              Resource utilization

              +

              +

              Resources cannot be shared among different queues.

              +

              For example, a queue has idle CUs and another queue is heavily loaded. Resources cannot be shared. You can only scale up the second queue.

              +

              Queues added to the same elastic resource pool can share compute resources.

              +

              When you set a data source, you must allocate different network segments to each queue, which requires a large number of VPC network segments.

              +

              You can add multiple general-purpose queues in the same elastic resource pool to one network segment, simplifying the data source configuration.

              +

              Resource allocation

              +

              If resources are insufficient for scale-out tasks of multiple queues, some queues will fail to be scaled out.

              +

              You can set the priority for each queue in the elastic resource pool based on the peak hours to ensure proper resource allocation.

              +
              +

              DLI Core Engine: Spark+Flink

              • Spark is a unified analysis engine that is ideal for large-scale data processing. It focuses on query, compute, and analysis. DLI optimizes performance and reconstructs services based on open-source Spark. It is compatible with the Apache Spark ecosystem and interfaces, and improves performance by 2.5x when compared with open-source Spark. In this way, DLI enables you to perform query and analysis of EB's of data within hours.
              • Flink is a distributed compute engine that is ideal for batch processing, that is, for processing static data sets and historical data sets. You can also use it for stream processing, that is, processing real-time data streams and generating data results in real time. DLI enhances features and security based on the open-source Flink and provides the Stream SQL feature required for data processing.
              diff --git a/docs/dli/umn/dli_01_0384.html b/docs/dli/umn/dli_01_0384.html index aaa00c55..27f7efa0 100644 --- a/docs/dli/umn/dli_01_0384.html +++ b/docs/dli/umn/dli_01_0384.html @@ -5,11 +5,11 @@

              On the Overview page, click Create Job in the upper right corner of the Spark Jobs tab or click Create Job in the upper right corner of the Spark Jobs page. The Spark job editing page is displayed.

              On the Spark job editing page, a message is displayed, indicating that a temporary DLI data bucket will be created. The created bucket is used to store temporary data generated by DLI, such as job logs and job results. You cannot view job logs if you choose not to create it. The bucket will be created and the default bucket name is used.

              If you do not need to create a DLI temporary data bucket and do not want to receive this message, select Do not show again and click Cancel.

              -

              Prerequisites

              • You have uploaded the dependencies to the corresponding OBS bucket on the Data Management > Package Management page. For details, see Creating a Package.
              • Before creating a Spark job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a cross-source connection to enable the network between the job running queue and external data sources.
                • For details about the external data sources that can be accessed by Spark jobs, see Cross-Source Analysis Development Methods.
                • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                  On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                  +

                  Prerequisites

                  • You have uploaded the dependencies to the corresponding OBS bucket on the Data Management > Package Management page. For details, see Creating a Package.
                  • Before creating a Spark job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a datasource connection to enable the network between the job running queue and external data sources.
                    • For details about the external data sources that can be accessed by Spark jobs, see Cross-Source Analysis Development Methods.
                    • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                      On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                  -

                  Procedure

                  1. In the left navigation pane of the DLI management console, choose Job Management > Spark Jobs. The Spark Jobs page is displayed.

                    Click Create Job in the upper right corner. In the job editing window, you can set parameters in Fill Form mode or Write API mode.

                    +

                    Procedure

                    1. In the left navigation pane of the DLI management console, choose Job Management > Spark Jobs. The Spark Jobs page is displayed.

                      Click Create Job in the upper right corner. In the job editing window, you can set parameters in Fill Form mode or Write API mode.

                      The following uses the Fill Form as an example. In Write API mode, refer to the Data Lake Insight API Reference for parameter settings.

                    1. Select a queue.

                      Select the queue you want to use from the drop-down list box.

                    2. Configure the job.

                      Configure job parameters by referring to Table 1.

                      @@ -29,6 +29,7 @@

              Select the package to be executed. The value can be .jar or .py.

              You can select the name of a JAR or pyFile package that has been uploaded to the DLI resource management system. You can also specify an OBS path, for example, obs://Bucket name/Package name.

              +

              Spark 3.3.x or later supports only packages in OBS paths.

              Main Class (--class)

              diff --git a/docs/dli/umn/dli_01_0403.html b/docs/dli/umn/dli_01_0403.html index bf55a6e3..5caf870f 100644 --- a/docs/dli/umn/dli_01_0403.html +++ b/docs/dli/umn/dli_01_0403.html @@ -6,74 +6,64 @@
              • Flink SQL uses SQL statements to define jobs and can be submitted to any general purpose queue.
              • Flink Jar customizes a JAR package job based on Flink APIs. It runs on dedicated queues.

              Flink job management provides the following functions:

              -

              Assigning Agency Permissions

              Agencies are required for DLI to execute Flink jobs. You can set the agency when logging in to the management console for the first time or go to Global Configurations > Service Authorization to modify the agencies.

              -

              The permissions are as follows:

              -
              • Tenant Administrator (global) permissions are required to access data from OBS to execute Flink jobs on DLI, for example, obtaining OBS/GaussDB(DWS) data sources, log dump (including bucket authorization), checkpointing enabling, and job import and export.

                Due to cloud service cache differences, permission setting operations require about 60 minutes to take effect.

                -
                -
              • DIS Administrator permissions are required to use DIS data as the data source of DLI Flink jobs.

                Due to cloud service cache differences, permission setting operations require about 30 minutes to take effect.

                -
                -
              • To use CloudTable data as the data source of DLI Flink jobs, CloudTable Administrator permissions are required.

                Due to cloud service cache differences, permission setting operations require about 3 minutes to take effect.

                -
                -
              -

              Flink Jobs Page

              On the Overview page, click Flink Jobs to go to the Flink job management page. Alternatively, you can choose Job Management > Flink Jobs from the navigation pane on the left. The page displays all Flink jobs. If there are a large number of jobs, they will be displayed on multiple pages. DLI allows you to view jobs in all statuses.

              -
              Table 1 Job management parameters

              Parameter

              +
              - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/dli/umn/dli_01_0418.html b/docs/dli/umn/dli_01_0418.html index 6786c04b..cd6d3aea 100644 --- a/docs/dli/umn/dli_01_0418.html +++ b/docs/dli/umn/dli_01_0418.html @@ -2,7 +2,7 @@

              Creating an IAM User and Granting Permissions

              You can use Identity and Access Management (IAM) to implement fine-grained permissions control on DLI resources. For details, see Overview.

              -

              If your cloud account does not need individual IAM users, then you may skip over this chapter.

              +

              If your cloud account does not need individual IAM users, then you may skip over this section.

              This section describes how to create an IAM user and grant DLI permissions to the user. Figure 1 shows the procedure.

              Prerequisites

              Before assigning permissions to user groups, you should learn about system policies and select the policies based on service requirements. For details about system permissions supported by DLI, see DLI System Permissions.

              @@ -12,7 +12,7 @@
              1. Create a user group and grant the permission to it.

                Create a user group on the IAM console, and assign the DLI ReadOnlyAccess permission to the group.

              2. Create a user and add the user to the user group.

                Create a user on the IAM console and add the user to the group created in 1.

              3. Log in and verify the permission.

                Log in to the management console using the newly created user, and verify that the user's permissions.

                -
                • Choose Service List > Data Lake Insight. The DLI management console is displayed. If you can view the queue list on the Queue Management page but cannot buy DLI queues by clicking Buy Queue in the upper right corner (assume that the current permission contains only DLI ReadOnlyAccess), the DLI ReadOnlyAccess permission has taken effect.
                • Choose any other service in Service List. If a message appears indicating that you have insufficient permissions to access the service, the DLI ReadOnlyAccess permission has already taken effect.
                +
                • Choose Service List > Data Lake Insight. The DLI management console is displayed. If you can view the queue list on the Queue Management page but cannot buy DLI queues by clicking Buy Queue in the upper right corner (assume that the current permission contains only DLI ReadOnlyAccess), the DLI ReadOnlyAccess permission has taken effect.
                • Choose any other service in Service List. If a message appears indicating that you have insufficient permissions to access the service, the DLI ReadOnlyAccess permission has already taken effect.

              diff --git a/docs/dli/umn/dli_01_0440.html b/docs/dli/umn/dli_01_0440.html index c3bce4b8..55c6c466 100644 --- a/docs/dli/umn/dli_01_0440.html +++ b/docs/dli/umn/dli_01_0440.html @@ -4,11 +4,11 @@

              DLI has a comprehensive permission control mechanism and supports fine-grained authentication through Identity and Access Management (IAM). You can create policies in IAM to manage DLI permissions. You can use both the DLI's permission control mechanism and the IAM service for permission management.

              Application Scenarios of IAM Authentication

              When using DLI on the cloud, enterprise users need to manage DLI resources (queues) used by employees in different departments, including creating, deleting, using, and isolating resources. In addition, data of different departments needs to be managed, including data isolation and sharing.

              DLI uses IAM for refined enterprise-level multi-tenant management. IAM provides identity authentication, permissions management, and access control, helping you securely access to your cloud resources.

              -

              With IAM, you can use your cloud account to create IAM users for your employees, and assign permissions to the users to control their access to specific resource types. For example, some software developers in your enterprise need to use DLI resources but must not delete them or perform any high-risk operations. To achieve this result, you can create IAM users for the software developers and grant them only the permissions required for using DLI resources.

              +

              With IAM, you can use your account to create IAM users for your employees, and assign permissions to the users to control their access to specific resource types. For example, some software developers in your enterprise need to use DLI resources but must not delete them or perform any high-risk operations. To achieve this result, you can create IAM users for the software developers and grant them only the permissions required for using DLI resources.

              For a new user, you need to log in for the system to record the metadata before using DLI.

              -

              IAM is free of charge. You pay only for the resources you use.

              -

              If your cloud account does not need individual IAM users for permissions management, skip this chapter.

              +

              IAM is free to use, and you only need to pay for the resources in your account.

              +

              If your account does not need individual IAM users for permissions management, skip over this section.

              DLI System Permissions

              Table 1 lists all the system-defined roles and policies supported by DLI.

              Type: There are roles and policies.
              • Roles: A type of coarse-grained authorization mechanism that defines permissions related to user responsibilities. Only a limited number of service-level roles are available. When using roles to grant permissions, you also need to assign other roles on which the permissions depend. However, roles are not an ideal choice for fine-grained authorization and secure access control.
              • Policies: A type of fine-grained authorization mechanism that defines permissions required to perform operations on specific cloud resources under certain conditions. This mechanism allows for more flexible policy-based authorization, meeting requirements for secure access control. For example, you can grant DLI users only the permissions for managing a certain type of ECSs.
              @@ -156,7 +156,7 @@

              Examples

              An Internet company mainly provides game and music services. DLI is used to analyze user behaviors and assist decision making.

              -

              As shown in Figure 1, the Leader of the Basic Platform Team has applied for a Tenant Administrator account to manage and use cloud services. The Leader of the Basic Platform Team creates a subaccount with the DLI Service Administrator permission to manage and use DLI, as the Big Data Platform Team requires DLI for data analysis. The Leader of the Basic Platform Team creates a Queue A and assigns it to Data Engineer A to analyze the gaming data. A Queue B is also assigned to Data Engineer B to analyze the music data. Besides granting the queue usage permission, the Leader of the Basic Platform Team grants data (except the database) management and usage permissions to the two engineers.

              +

              As shown in Figure 1, the Leader of the Basic Platform Team has applied for a Tenant Administrator account to manage and use cloud services. The Leader of the Basic Platform Team creates a subaccount with the DLI Service Administrator permission to manage and use DLI, as the Big Data Platform Team requires DLI for data analysis. The Leader of the Basic Platform Team creates a Queue A and assigns it to Data Engineer A to analyze the gaming data. A Queue B is also assigned to Data Engineer B to analyze the music data. Besides granting the queue usage permission, the Leader of the Basic Platform Team grants data (except the database) management and usage permissions to the two engineers.

              Figure 1 Granting permissions

              The Data Engineer A creates a table named gameTable for storing game prop data and a table named userTable for storing game user data. The music service is a new service. To explore potential music users among existing game users, the Data Engineer A assigns the query permission on the userTable to the Data Engineer B. In addition, Data Engineer B creates a table named musicTable for storing music copyrights information.

              Table 3 describes the queue and data permissions of Data Engineer A and Data Engineer B.

              diff --git a/docs/dli/umn/dli_01_0441.html b/docs/dli/umn/dli_01_0441.html index 65511366..d649fdf6 100644 --- a/docs/dli/umn/dli_01_0441.html +++ b/docs/dli/umn/dli_01_0441.html @@ -3,585 +3,601 @@

              Common Operations Supported by DLI System Policy

              Table 1 lists the common operations supported by each system policy of DLI. Choose proper system policies according to this table. For details about the SQL statement permission matrix in DLI in terms of permissions on databases, tables, and roles, see SQL Syntax of Batch Jobs > Data Permissions Management > Data Permissions List in the Data Lake Insight SQL Syntax Reference.

              -
              Table 1 Job management parameters

              Parameter

              Description

              +

              Description

              ID

              +

              ID

              ID of a submitted Flink job, which is generated by the system by default.

              +

              ID of a submitted Flink job, which is generated by the system by default.

              Name

              +

              Name

              Name of the submitted Flink job.

              +

              Name of the submitted Flink job.

              Type

              +

              Type

              Type of the submitted Flink job. Including:

              +

              Type of the submitted Flink job. Including:

              • Flink SQL: Flink SQL jobs
              • Flink Jar: Flink Jar jobs

              Status

              +

              Status

              Job statuses, including:

              +

              Job statuses, including:

              • Draft
              • Submitting
              • Submission failed
              • Running: After the job is submitted, a normal result is returned.
              • Running exception: The job stops running due to an exception.
              • Downloading
              • Idle
              • Stopping
              • Stopped
              • Stopping failed
              • Creating the savepoint
              • Completed

              Description

              +

              Description

              Description of the submitted Flink job.

              +

              Description of the submitted Flink job.

              Username

              +

              Username

              Name of the user who submits a job.

              +

              Name of the user who submits a job.

              Created

              +

              Created

              Time when a job is created.

              +

              Time when a job is created.

              Started

              +

              Started

              Time when a Flink job starts to run.

              +

              Time when a Flink job starts to run.

              Duration

              +

              Duration

              Time consumed by job running.

              +

              Time consumed by job running.

              Operation

              +

              Operation

              • Edit: Edit a created job. For details, see Editing a Job.
              • Start: Start and run a job. For details, see Starting a Job.
              • More
                • FlinkUI: After you click this button, the Flink job execution page is displayed.
                  NOTE:

                  When you execute a job on a created queue, the cluster is restarted. It takes about 10 minutes. If you click FlinkUI before the cluster is created, an empty projectID will be cached. The FlinkUI page cannot be displayed.

                  +
              • Edit: Edit a created job. For details, see Editing a Job.
              • Start: Start and run a job. For details, see Starting a Job.
              • More
                • FlinkUI: After you click this button, the Flink job execution page is displayed.
                  NOTE:

                  When you execute a job on a created queue, the cluster is restarted. It takes about 10 minutes. If you click FlinkUI before the cluster is created, an empty projectID will be cached. The FlinkUI page cannot be displayed.

                  You are advised to use a dedicated queue so that the cluster will not be released. Alternatively, wait for a while after the job is submitted (the cluster is created), and then check FlinkUI.

                • Stop: Stop a Flink job. If this function is unavailable, jobs in the current status cannot be stopped.
                • Delete: Delete a job.
                  NOTE:

                  A deleted job cannot be restored.

                  diff --git a/docs/dli/umn/dli_01_0410.html b/docs/dli/umn/dli_01_0410.html index 8b788b4a..810edd3c 100644 --- a/docs/dli/umn/dli_01_0410.html +++ b/docs/dli/umn/dli_01_0410.html @@ -119,7 +119,7 @@

              √

              RDS MySQL

              +

              RDS for MySQL

              √

              Table 1 Common operations supported by each system policy

              Resources

              +
              - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + diff --git a/docs/dli/umn/dli_01_0447.html b/docs/dli/umn/dli_01_0447.html index b64b33d3..46b309c2 100644 --- a/docs/dli/umn/dli_01_0447.html +++ b/docs/dli/umn/dli_01_0447.html @@ -59,7 +59,7 @@

              Modifying Permissions for an Existing User or Project

              For a user or project that has some permissions on the database, you can revoke the existing permissions or grant new ones.

              -

              If the options in Set Permission are gray, the corresponding account does not have the permission to modify the database. You can apply to the administrator, database owner, or other authorized users for granting and revoking permissions of databases.

              +

              If the options in Set Permission are gray, the corresponding account does not have the permission to modify the database. You can apply to the administrator, database owner, or other authorized users for granting and revoking permissions of databases.

              1. In the User Permission Info list, find the user whose permission needs to be set.
                • If the user is a sub-user, you can set permissions for it.
                • If the user is already an administrator, you can only view the permissions information.

                In the Project Permission Info list, locate the project for which you want to set permissions and click Set Permission.

                diff --git a/docs/dli/umn/dli_01_0448.html b/docs/dli/umn/dli_01_0448.html index 3d2b929c..0507e675 100644 --- a/docs/dli/umn/dli_01_0448.html +++ b/docs/dli/umn/dli_01_0448.html @@ -47,29 +47,29 @@
            8. For details about the OBS table permissions, see Table 2.

              -
            9. Table 1 Common operations supported by each system permission

              Resource

              Operation

              +

              Operation

              Description

              +

              Description

              DLI FullAccess

              +

              DLI FullAccess

              DLI ReadOnlyAccess

              +

              DLI ReadOnlyAccess

              Tenant Administrator

              +

              Tenant Administrator

              DLI Service Administrator

              +

              DLI Service Administrator

              Queue

              +

              Queue

              DROP_QUEUE

              +

              DROP_QUEUE

              Deleting a queue

              +

              Deleting a Queue

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SUBMIT_JOB

              +

              SUBMIT_JOB

              Submitting the job

              +

              Submitting a job

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              CANCEL_JOB

              +

              CANCEL_JOB

              Terminating the job

              +

              Terminating a Job

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              RESTART

              +

              RESTART

              Restarting a queue

              +

              Restarting a queue

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              GRANT_PRIVILEGE

              +

              GRANT_PRIVILEGE

              Granting permissions to the queue

              +

              Granting permissions to a queue

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              REVOKE_PRIVILEGE

              +

              REVOKE_PRIVILEGE

              Revoking permissions from the queue

              +

              Revoking permissions to a queue

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_PRIVILEGES

              +

              SHOW_PRIVILEGES

              Viewing the queue permissions of other users

              +

              Viewing the queue permissions of other users

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              Database

              +

              Database

              DROP_DATABASE

              +

              DROP_DATABASE

              Deleting a database

              +

              Deleting a database

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              CREATE_TABLE

              +

              CREATE_TABLE

              Creating a table

              +

              Creating a table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              CREATE_VIEW

              +

              CREATE_VIEW

              Creating a view

              +

              Creating a view

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              EXPLAIN

              +

              EXPLAIN

              Explaining the SQL statement as an execution plan

              +

              Explaining the SQL statement as an execution plan

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              CREATE_ROLE

              +

              CREATE_ROLE

              Creating a role

              +

              Creating a role

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              DROP_ROLE

              +

              DROP_ROLE

              Deleting a role

              +

              Deleting a role

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_ROLES

              +

              SHOW_ROLES

              Displaying a role

              +

              Displaying a role

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              GRANT_ROLE

              +

              GRANT_ROLE

              Binding a role

              +

              Binding a role

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              REVOKE_ROLE

              +

              REVOKE_ROLE

              Unbinding the role

              +

              Unbinding a role

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_USERS

              +

              SHOW_USERS

              Displaying the binding relationships between all roles and users

              +

              Displaying the binding relationships between all roles and users

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              GRANT_PRIVILEGE

              +

              GRANT_PRIVILEGE

              Granting permissions to the database

              +

              Granting permissions to the database

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              REVOKE_PRIVILEGE

              +

              REVOKE_PRIVILEGE

              Revoking permissions to the database

              +

              Revoking permissions to the database

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_PRIVILEGES

              +

              SHOW_PRIVILEGES

              Viewing database permissions of other users

              +

              Viewing database permissions of other users

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              DISPLAY_ALL_TABLES

              +

              DISPLAY_ALL_TABLES

              Displaying tables in the database

              +

              Displaying tables in a database

              √

              +

              √

              √

              +

              √

              √

              +

              √

              √

              +

              √

              DISPLAY_DATABASE

              +

              DISPLAY_DATABASE

              Displaying databases

              +

              Displaying databases

              √

              +

              √

              √

              +

              √

              √

              +

              √

              √

              +

              √

              CREATE_FUNCTION

              +

              CREATE_FUNCTION

              Creating a function

              +

              Creating a function

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              DROP_FUNCTION

              +

              DROP_FUNCTION

              Deleting a function

              +

              Deleting a function

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_FUNCTIONS

              +

              SHOW_FUNCTIONS

              Displaying all functions

              +

              Displaying all functions

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              DESCRIBE_FUNCTION

              +

              DESCRIBE_FUNCTION

              Displaying function details

              +

              Displaying function details

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              Table

              +

              Table

              DROP_TABLE

              +

              DROP_TABLE

              Deleting a table

              +

              Deleting tables

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SELECT

              +

              SELECT

              Querying a table

              +

              Querying tables

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              INSERT_INTO_TABLE

              +

              INSERT_INTO_TABLE

              Inserting

              +

              Inserting table data

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_ADD_COLUMNS

              +

              ALTER_TABLE_ADD_COLUMNS

              Adding a column

              +

              Adding a column

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              INSERT_OVERWRITE_TABLE

              +

              INSERT_OVERWRITE_TABLE

              Rewriting

              +

              Overwriting a table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_RENAME

              +

              ALTER_TABLE_RENAME

              Renaming a table

              +

              Renaming a table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_ADD_PARTITION

              +

              ALTER_TABLE_ADD_PARTITION

              Adding partitions to the partition table

              +

              Adding partitions to the partition table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_RENAME_PARTITION

              +

              ALTER_TABLE_RENAME_PARTITION

              Renaming a table partition

              +

              Renaming a table partition

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_DROP_PARTITION

              +

              ALTER_TABLE_DROP_PARTITION

              Deleting partitions from a partition table

              +

              Deleting partitions from a partition table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_PARTITIONS

              +

              SHOW_PARTITIONS

              Displaying all partitions

              +

              Displaying all partitions

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_RECOVER_PARTITION

              +

              ALTER_TABLE_RECOVER_PARTITION

              Restoring table partitions

              +

              Restoring table partitions

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              ALTER_TABLE_SET_LOCATION

              +

              ALTER_TABLE_SET_LOCATION

              Setting the partition path

              +

              Setting the partition path

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              GRANT_PRIVILEGE

              +

              GRANT_PRIVILEGE

              Granting permissions to the table

              +

              Granting permissions to the table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              REVOKE_PRIVILEGE

              +

              REVOKE_PRIVILEGE

              Revoking permissions from the table

              +

              Revoking permissions to the table

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              SHOW_PRIVILEGES

              +

              SHOW_PRIVILEGES

              Viewing table permissions of other users

              +

              Viewing table permissions of other users

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              DISPLAY_TABLE

              +

              DISPLAY_TABLE

              Displaying a table

              +

              Displaying a table

              √

              +

              √

              √

              +

              √

              √

              +

              √

              √

              +

              √

              DESCRIBE_TABLE

              +

              DESCRIBE_TABLE

              Displaying table information

              +

              Displaying table information

              √

              +

              √

              ×

              +

              ×

              √

              +

              √

              √

              +

              √

              +

              Enhanced datasource connection

              +

              BIND_QUEUE

              +

              Binding an enhanced datasource connection to a queue

              +

              It is only used to grant permissions across projects.

              +

              ×

              +

              ×

              +

              ×

              +

              ×

              diff --git a/docs/dli/umn/dli_01_0464.html b/docs/dli/umn/dli_01_0464.html index 8c19c3c4..5077550b 100644 --- a/docs/dli/umn/dli_01_0464.html +++ b/docs/dli/umn/dli_01_0464.html @@ -7,25 +7,25 @@

              Flink SQL Sample Template

              The template list displays existing sample templates for Flink SQL jobs. Table 1 describes the parameters in the template list.

              The scenarios of sample templates can be different, which are subject to the console.

              -
              Table 2 Parameter description

              Parameter

              +
              - - - - - - - - - -
              Table 2 Parameter description

              Parameter

              Description

              +

              Description

              Authorization Object

              +

              Authorization Object

              Select User or Project.

              +

              Select User or Project.

              Username/Project

              +

              Username/Project

              • If you select User, enter the IAM username when granting table permissions to the user.
                NOTE:

                The username is an existing IAM user name and has logged in to the DLI management console.

                +
              • If you select User, enter the IAM username when granting table permissions to the user.
                NOTE:

                The username is an existing IAM user name and has logged in to the DLI management console.

                -
              • If you select Project, select the project to be authorized in the current region.
                NOTE:

                If you select Project, you can only view information about the authorized tables and their databases.

                +
              • If you select Project, select the project to be authorized in the current region.
                NOTE:

                If you select Project, you can only view information about the authorized tables and their databases.

              Non-inheritable Permissions

              +

              Non-inheritable Permissions

              Select a permission to grant it to the user, or deselect a permission to revoke it.
              • The following permissions are applicable to both user and project authorization:
                • View Table Creation Statement: This permission allows you to view the statement for creating the current table.
                • View Table Information: This permission allows you to view information about the current table.
                • Select Table: This permission allows you to query data of the current table.
                • Drop Table: This permission allows you to delete the current table.
                • Rename Table: Rename the current table.
                • Insert: This permission allows you to insert data into the current table.
                • Overwrite: This permission allows you to insert data to overwrite the data in the current table.
                • Add Column: This permission allows you to add columns to the current table.
                • Grant Permission: This permission allows you to grant table permissions to other users or projects.
                • Revoke Permission: This permission allows you to revoke the table's permissions that other users or projects have but cannot revoke the table owner's permissions.
                • View Other Users' Permissions: This permission allows you to query other users' permission on the current table.
                +
              Select a permission to grant it to the user, or deselect a permission to revoke it.
              • The following permissions are applicable to both user and project authorization:
                • View Table Creation Statement: This permission allows you to view the statement for creating the current table.
                • View Table Information: This permission allows you to view information about the current table.
                • Select Table: This permission allows you to query data of the current table.
                • Drop Table: This permission allows you to delete the current table.
                • Rename Table: Rename the current table.
                • Insert: This permission allows you to insert data into the current table.
                • Overwrite: This permission allows you to insert data to overwrite the data in the current table.
                • Add Column: This permission allows you to add columns to the current table.
                • Grant Permission: This permission allows you to grant table permissions to other users or projects.
                • Revoke Permission: This permission allows you to revoke the table's permissions that other users or projects have but cannot revoke the table owner's permissions.
                • View Other Users' Permissions: This permission allows you to query other users' permission on the current table.
                The partition table also has the following permissions:
                • Add Partition: This permission allows you to add a partition to a partition table.
                • Delete Partition: This permission allows you to delete existing partitions from a partition table.
                • Configure Path for Partition: This permission allows you to set the path of a partition in a partition table to a specified OBS path.
                • Rename Table Partition: This permission allows you to rename partitions in a partition table.
                • Restore Table Partition: This permission allows you to export partition information from the file system and save the information to metadata.
                • View All Partitions: This permission allows you to view all partitions in a partition table.
              @@ -92,7 +92,7 @@

              Authorization Object

              Select User or Project.

              +

              Select User or Project.

              Username/Project

              diff --git a/docs/dli/umn/dli_01_0451.html b/docs/dli/umn/dli_01_0451.html index ef9bda21..b770dde8 100644 --- a/docs/dli/umn/dli_01_0451.html +++ b/docs/dli/umn/dli_01_0451.html @@ -143,12 +143,12 @@

              DLI:*:*:table:databases.dbname.tables.*

              DLI, any region, any account ID, all table resources of database dbname

              +

              DLI, any region, any account ID, all table resources of database dbname

              DLI:*:*:database:databases.dbname

              DLI, any region, any account ID, resource of database dbname

              +

              DLI, any region, any account ID, resource of database dbname

              DLI:*:*:queue:queues.*

              @@ -240,7 +240,7 @@
              -
            10. Combine all the preceding fields into a JSON file to form a complete policy. You can set multiple actions and resources. You can also create a policy on the visualized page provided by IAM. For example:

              The authorized user has the permission to create and delete any database, submit jobs for any queue, and delete any table under any account ID in any region of DLI.

              +
            11. Combine all the preceding fields into a JSON file to form a complete policy. You can set multiple actions and resources. You can also create a policy on the visualized page provided by IAM. For example:

              Create a policy that grants users the permission to create and delete databases, submit jobs for any queue, and delete tables under any account ID in any region of DLI.

              {
                   "Version": "1.1",
                   "Statement": [
              diff --git a/docs/dli/umn/dli_01_0455.html b/docs/dli/umn/dli_01_0455.html
              index 691fb9cf..49ac41b5 100644
              --- a/docs/dli/umn/dli_01_0455.html
              +++ b/docs/dli/umn/dli_01_0455.html
              @@ -3,7 +3,7 @@
               

              Creating a Flink SQL Job

              This section describes how to create a Flink SQL job. You can use Flink SQLs to develop jobs to meet your service requirements. Using SQL statements simplifies logic implementation. You can edit Flink SQL statements for your job in the DLI SQL editor. This section describes how to use the SQL editor to write Flink SQL statements.

              DLI Flink OpenSource SQL jobs are fully compatible with the syntax of Flink 1.10 and 1.12 provided by the community. In addition, Redis, GaussDB(DWS), and DIS data source types are added based on the community connector.

              -

              Prerequisites

              • You have prepared the data input and data output channels. For details, see Preparing Flink Job Data.
              • When you use a Flink SQL job to access other external data sources, such as OpenTSDB, HBase, Kafka, DWS, RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a cross-source connection to connect the job running queue to the external data source.
                • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                • For details about how to create a datasource connection, see Enhanced Datasource Connections. After a datasource connection is created, you can choose More > Test Address Connectivity in the Operation column on the Queue Management page to check whether the network connection between the queue and the external data source is normal. For details, see Testing Address Connectivity.
                +

                Prerequisites

                • You have prepared the data input and data output channels. For details, see Preparing Flink Job Data.
                • When you use a Flink SQL job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a datasource connection to connect the job running queue to the external data source.
                  • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                  • For details about how to create a datasource connection, see Enhanced Datasource Connections. After a datasource connection is created, you can choose More > Test Address Connectivity in the Operation column on the Queue Management page to check whether the network connection between the queue and the external data source is normal. For details, see Testing Address Connectivity.

                Creating a Flink SQL Job

                1. In the left navigation pane of the DLI management console, choose Job Management > Flink Jobs. The Flink Jobs page is displayed.
                2. In the upper right corner of the Flink Jobs page, click Create Job.
                3. Specify job parameters.

                  @@ -43,7 +43,7 @@

                  • Tag key: Enter a tag key name in the text box.
                    NOTE:

                    A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                  -
                  • Tag value: Enter a tag value in the text box.
                    NOTE:

                    A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                    +
                    • Tag value: Enter a tag value in the text box.
                      NOTE:

                      A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                    diff --git a/docs/dli/umn/dli_01_0457.html b/docs/dli/umn/dli_01_0457.html index 240999b5..63d5939e 100644 --- a/docs/dli/umn/dli_01_0457.html +++ b/docs/dli/umn/dli_01_0457.html @@ -2,7 +2,7 @@

                    Creating a Flink Jar Job

                    This section describes how to create a Flink Jar job. You can perform secondary development based on Flink APIs, build your own JAR file, and submit the JAR file to DLI queues. DLI is fully compatible with open-source community APIs. To create a custom Flink job, you need to compile and build application JAR files. You must have a certain understanding of Flink secondary development and have high requirements related to stream computing complexity.

                    -

                    Prerequisites

                    • Ensure that a dedicated queue has been created. To create a dedicated queue, select Dedicated Resource Mode when you choose the type of a queue during purchase.
                    • When creating a Flink Jar job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a cross-source connection to connect the job running queue to the external data source.
                      • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                      • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                        On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                        +

                        Prerequisites

                        • Ensure that a dedicated queue has been created. To create a dedicated queue, select Dedicated Resource Mode when you choose the type of a queue during purchase.
                        • When you use a Flink Jar job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a datasource connection to connect the job running queue to the external data source.
                          • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                          • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                            On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                        • When running a Flink Jar job, you need to build the secondary development application code into a Jar package and upload the JAR package to the created OBS bucket. Choose Data Management > Package Management to create a package. For details, see Creating a Package.

                          DLI does not support the download function. If you need to modify the uploaded data file, please edit the local file and upload it again.

                          @@ -43,7 +43,7 @@
                          • Tag key: Enter a tag key name in the text box.
                            NOTE:

                            A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                          -
                          • Tag value: Enter a tag value in the text box.
                            NOTE:

                            A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                            +
                            • Tag value: Enter a tag value in the text box.
                              NOTE:

                              A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                            diff --git a/docs/dli/umn/dli_01_0461.html b/docs/dli/umn/dli_01_0461.html index dd839112..1381b13e 100644 --- a/docs/dli/umn/dli_01_0461.html +++ b/docs/dli/umn/dli_01_0461.html @@ -19,7 +19,7 @@

                            Stopping a Job

                            You can stop a job in the Running or Submitting state.

                            1. In the left navigation pane of the DLI management console, choose Job Management > Flink Jobs. The Flink Jobs page is displayed.
                            2. Stop a job using either of the following methods:

                              • Stopping a job

                                Locate the row that contains the job to be stopped, click More in the Operation column, and select Stop.

                                -

                                Alternatively, you can select the row where the job you want to stop locates and click Stop in the upper left of the job list.

                                +

                                Alternatively, you can select the row where the job you want to stop locates and click Stop in the upper left of the job list.

                              • Batch stopping jobs

                                Locate the rows containing the jobs you want to stop and click Stop in the upper left of the job list.

                            3. In the displayed Stop Job dialog box, click OK to stop the job.

                              • Before stopping a job, you can trigger a savepoint to save the job status information. When you start the job again, you can choose whether to restore the job from the savepoint.
                              • If you select Trigger savepoint, a savepoint is created. If Trigger savepoint is not selected, no savepoint is created. By default, the savepoint function is disabled.
                              • The lifecycle of a savepoint starts when the savepoint is triggered and stops the job, and ends when the job is restarted. The savepoint is automatically deleted after the job is restarted.
                              @@ -51,7 +51,7 @@
                              • When switching to another project or user, you need to grant permissions to the new project or user. For details, see Managing Flink Job Permissions.
                              • Only jobs whose data format is the same as that of Flink jobs exported from DLI can be imported.
                              1. In the left navigation pane of the DLI management console, choose Job Management > Flink Jobs. The Flink Jobs page is displayed.
                              1. Click Import Job in the upper right corner. The Import Job dialog box is displayed.

                                -

                              2. Select the complete OBS path of the job configuration file to be imported. Click Next.
                              3. Configure the same-name job policy and click next. Click Next.

                                • Select Overwrite job of the same name. If the name of the job to be imported already exists, the existing job configuration will be overwritten and the job status switches to Draft.
                                • If Overwrite job of the same name is not selected and the name of the job to be imported already exists, the job will not be imported.
                                +

                              4. Select the complete OBS path of the job configuration file to be imported. Click Next.
                              5. Configure the same-name job policy and click next. Click Next.

                                • Select Overwrite job of the same name. If the name of the job to be imported already exists, the existing job configuration will be overwritten and the job status switches to Draft.
                                • If Overwrite job of the same name is not selected and the name of the job to be imported already exists, the job will not be imported.

                              6. Ensure that Config File and Overwrite Same-Name Job are correctly configured. Click Confirm to import the job.

                              Modifying Name and Description

                              You can change the job name and description as required.

                              diff --git a/docs/dli/umn/dli_01_0462.html b/docs/dli/umn/dli_01_0462.html index ed73481a..989e5407 100644 --- a/docs/dli/umn/dli_01_0462.html +++ b/docs/dli/umn/dli_01_0462.html @@ -2,7 +2,7 @@

                              Flink Job Details

                              After creating a job, you can view the job details to learn about the following information:

                              - +

                              Viewing Job Details

                              This section describes how to view job details. After you create and save a job, you can click the job name to view job details, including SQL statements and parameter settings. For a Jar job, you can only view its parameter settings.

                              1. In the left navigation pane of the DLI management console, choose Job Management > Flink Jobs. The Flink Jobs page is displayed.
                              2. Click the name of the job to be viewed. The Job Detail tab is displayed.

                                In the Job Details tab, you can view SQL statements, configured parameters.

                                The following uses a Flink SQL job as an example. @@ -158,7 +158,7 @@

                              -

                              Checking the Job Monitoring Information

                              You can use Cloud Eye to view details about job data input and output.

                              +

                              Checking Job Monitoring Information

                              You can use Cloud Eye to view details about job data input and output.

                              1. In the left navigation pane of the DLI management console, choose Job Management > Flink Jobs. The Flink Jobs page is displayed.
                              2. Click the name of the job you want. The job details are displayed.

                                Click Job Monitoring in the upper right corner of the page to switch to the Cloud Eye console.

                                The following table describes monitoring metrics related to Flink jobs.

                                diff --git a/docs/dli/umn/dli_01_0463.html b/docs/dli/umn/dli_01_0463.html index dfd6ead3..3b873fc2 100644 --- a/docs/dli/umn/dli_01_0463.html +++ b/docs/dli/umn/dli_01_0463.html @@ -31,7 +31,7 @@
            12. Tag value

              You can perform the following operations:

              -
              • Click the text box and select a predefined tag value from the drop-down list.
              • Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
                • Click the text box and select a predefined tag value from the drop-down list.
                • Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

              @@ -53,7 +53,7 @@ diff --git a/docs/dli/umn/dli_01_0563.html b/docs/dli/umn/dli_01_0563.html new file mode 100644 index 00000000..aac50a18 --- /dev/null +++ b/docs/dli/umn/dli_01_0563.html @@ -0,0 +1,51 @@ + + +

              Setting Queue Properties

              +

              Scenario

              DLI allows you to set properties for queues.

              +

              You can set Spark driver parameters to improve the scheduling efficiency of queues.

              +

              This section describes how to set queue properties on the management console.

              +
              +

              Constraints and Limitations

              • Only SQL queues of the Spark engine support configuring queue properties.
              • Setting queue properties is only supported after the queue has been created.
              • Currently, only queue properties related to the Spark driver can be set.
              • Queue properties cannot be set in batches.
              • For a queue in an elastic resource pool, if the minimum CUs of the queue is less than 16 CUs, both Max. Spark Driver Instances and Max. Prestart Spark Driver Instances set in the queue properties do not apply.
              +
              +

              Procedure

              1. In the navigation pane of the DLI management console, choose Resources > Queue Management.
              2. In the Operation column of the queue, choose More > Set Property.
              3. Go to the queue property setting page and set property parameters. For details about the property parameters, see Table 1. +
              Table 1 Parameters in the Flink SQL sample template list

              Parameter

              +
              - - - - - - - @@ -98,7 +98,7 @@
              • Tag key: Enter a tag key name in the text box.
                NOTE:

                A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

              -
              • Tag value: Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
                • Tag value: Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                @@ -107,50 +107,50 @@
              Table 1 Parameters in the Flink SQL sample template list

              Parameter

              Description

              +

              Description

              Name

              +

              Name

              Name of a template, which has 1 to 64 characters and only contains letters, digits, hyphens (-), and underlines (_).

              +

              Name of a template, which has 1 to 64 characters and only contains letters, digits, hyphens (-), and underlines (_).

              Description

              +

              Description

              Description of a template. It contains 0 to 512 characters.

              +

              Description of a template. It contains 0 to 512 characters.

              Operation

              +

              Operation

              Create Job: Create a job directly by using the template. After a job is created, the system switches to the Edit page under Job Management.

              +

              Create Job: Create a job directly by using the template. After a job is created, the system switches to the Edit page under Job Management.

            13. Click OK to enter the editing page.
              The Table 4 describes the parameters on the template editing page. -
              diff --git a/docs/dli/umn/dli_01_0515.html b/docs/dli/umn/dli_01_0515.html new file mode 100644 index 00000000..37e993a8 --- /dev/null +++ b/docs/dli/umn/dli_01_0515.html @@ -0,0 +1,94 @@ + + +

              Creating an Elastic Resource Pool and Running a Job

              +
              This section walks you through the procedure of adding a queue to an elastic resource pool and binding an enhanced datasource connection to the elastic resource pool.
              Figure 1 Process of creating an elastic resource pool
              +
              + +
              Table 4 Template parameters

              Parameter

              +
              - - - - - - - - - - - - - - - - - diff --git a/docs/dli/umn/dli_01_0485.html b/docs/dli/umn/dli_01_0485.html index b6fbd347..ed0c3da6 100644 --- a/docs/dli/umn/dli_01_0485.html +++ b/docs/dli/umn/dli_01_0485.html @@ -12,8 +12,6 @@ - diff --git a/docs/dli/umn/dli_01_0486.html b/docs/dli/umn/dli_01_0486.html deleted file mode 100644 index 54f641d0..00000000 --- a/docs/dli/umn/dli_01_0486.html +++ /dev/null @@ -1,57 +0,0 @@ - - -

              Service Authorization

              -

              Prerequisites

              Only the tenant account or a subaccount of user group admin can authorize access.

              -
              -

              Procedure

              After entering the DLI management console, you are advised to set agency permissions to ensure that DLI can be used properly.

              -

              If you need to adjust the agency permissions, modify them on the Service Authorization page. For details about the required agency permissions, see Table 1.

              -
              1. Select required agency permissions and click Update Authorization. Only the tenant account or a subaccount of user group admin can authorize access. If the message "Agency permissions updated" is displayed, the update is successful.
              2. Once service authorization has succeeded, an agency named dli_admin_agency on IAM will be created. Go to the agency list to view the details. Do not delete dli_admin_agency.

                -
              - -
              Table 4 Template parameters

              Parameter

              Description

              +

              Description

              Name

              +

              Name

              You can modify the template name.

              +

              You can modify the template name.

              Description

              +

              Description

              You can modify the template description.

              +

              You can modify the template description.

              Saving Mode

              +

              Saving Mode

              • Save Here: Save the modification to the current template.
              • Save as New: Save the modification as a new template.
              +
              • Save Here: Save the modification to the current template.
              • Save as New: Save the modification as a new template.

              SQL statement editing area

              +

              SQL statement editing area

              In the area, you can enter detailed SQL statements to implement business logic. For details about how to compile SQL statements, see Data Lake Insight SQL Syntax Reference.

              +

              In the area, you can enter detailed SQL statements to implement business logic. For details about how to compile SQL statements, see Data Lake Insight SQL Syntax Reference.

              Save

              +

              Save

              Save the modifications.

              +

              Save the modifications.

              Create Job

              +

              Create Job

              Use the current template to create a job.

              +

              Use the current template to create a job.

              Format

              +

              Format

              Format SQL statements. After SQL statements are formatted, you need to compile SQL statements again.

              +

              Format SQL statements. After SQL statements are formatted, you need to compile SQL statements again.

              Theme Settings

              +

              Theme Settings

              Change the font size, word wrap, and page style (black or white background).

              +

              Change the font size, word wrap, and page style (black or white background).

              - - - - - - - - - - - - - - - - - - - - - -
              Table 1 DLI agency permissions

              Permission

              -

              Details

              -

              Remarks

              -

              Tenant Administrator (global service)

              -

              Tenant Administrator permissions are required to access data from OBS to execute Flink jobs on DLI, for example, obtaining OBS/DWS data sources, log dump (including bucket authorization), checkpointing enabling, and job import and export.

              -

              Due to cloud service cache differences, permission setting operations require about 60 minutes to take effect.

              -

              DIS Administrator

              -

              DIS Administrator permissions are required to use DIS data as the data source of DLI Flink jobs.

              -

              Due to cloud service cache differences, permission setting operations require about 30 minutes to take effect.

              -

              VPC Administrator

              -

              VPC Administrator permissions are required to use the VPC, subnet, route, VPC peering connection, and port for DLI datasource connections.

              -

              Due to cloud service cache differences, permission setting operations require about 3 minutes to take effect.

              -

              SMN Administrator

              -

              To receive notifications when a DLI job fails, SMN Administrator permissions are required.

              -

              Due to cloud service cache differences, permission setting operations require about 3 minutes to take effect.

              -
              -
              - - -
              - -
              - diff --git a/docs/dli/umn/dli_01_0489.html b/docs/dli/umn/dli_01_0489.html index 1bdfa0ac..133f1c4a 100644 --- a/docs/dli/umn/dli_01_0489.html +++ b/docs/dli/umn/dli_01_0489.html @@ -1,7 +1,7 @@

              Testing Address Connectivity

              -

              It can be used to test the connectivity between the DLI queue and the peer IP address specified by the user in common scenarios, or the connectivity between the DLI queue and the peer IP address bound to the datasource connection in cross-source connection scenarios. The operation is as follows:

              +

              It can be used to test the connectivity between the DLI queue and the peer IP address specified by the user in common scenarios, or the connectivity between the DLI queue and the peer IP address bound to the datasource connection in datasource connection scenarios. The operation is as follows:

              1. On the Queue Management page, locate the row containing the target queue, click More in the Operation column, and select Test Address Connectivity.
              2. On the Test Address Connectivity page, enter the address to be tested. The domain name and IP address are supported, and the port number can be specified.
              3. Click Test.

                If the test address is reachable, a message is displayed on the page, indicating that the address is reachable.

                If the test address is unreachable, the system displays a message indicating that the address is unreachable. Check the network configurations and try again. Network configurations include the VPC peering and the datasource connection. Check whether they have been activated.

              diff --git a/docs/dli/umn/dli_01_0498.html b/docs/dli/umn/dli_01_0498.html index 58e84739..6bd0eaab 100644 --- a/docs/dli/umn/dli_01_0498.html +++ b/docs/dli/umn/dli_01_0498.html @@ -3,7 +3,7 @@

              (Recommended) Creating a Flink OpenSource SQL Job

              This section describes how to create a Flink OpenSource SQL job.

              DLI Flink OpenSource SQL jobs are fully compatible with the syntax of Flink 1.10 and 1.12 provided by the community. In addition, Redis, GaussDB(DWS), and DIS data source types are added based on the community connector. For details about the syntax and restrictions of Flink SQL DDL, DML, and functions, see Table API & SQL.

              -

              Prerequisites

              • You have prepared the data input and data output channels. For details, see Preparing Flink Job Data.
              • Before creating a Flink OpenSource SQL job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a cross-source connection to connect the job running queue to the external data source.
                • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                  On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                  +

                  Prerequisites

                  • You have prepared the data input and data output channels. For details, see Preparing Flink Job Data.
                  • Before creating a Flink OpenSource SQL job to access other external data sources, such as OpenTSDB, HBase, Kafka, GaussDB(DWS), RDS, CSS, CloudTable, DCS Redis, and DDS, you need to create a datasource connection to connect the job running queue to the external data source.
                    • For details about the external data sources that can be accessed by Flink jobs, see Cross-Source Analysis Development Methods.
                    • For details about how to create a datasource connection, see Enhanced Datasource Connections.

                      On the Resources > Queue Management page, locate the queue you have created, and choose More > Test Address Connectivity in the Operation column to check whether the network connection between the queue and the data source is normal. For details, see Testing Address Connectivity.

                  @@ -45,7 +45,7 @@
                  • Tag key: Enter a tag key name in the text box.
                    NOTE:

                    A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                  -
                  • Tag value: Enter a tag value in the text box.
                    NOTE:

                    A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                    +
                    • Tag value: Enter a tag value in the text box.
                      NOTE:

                      A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                    diff --git a/docs/dli/umn/dli_01_0504.html b/docs/dli/umn/dli_01_0504.html new file mode 100644 index 00000000..6c85a9c6 --- /dev/null +++ b/docs/dli/umn/dli_01_0504.html @@ -0,0 +1,113 @@ + + +

                    Overview

                    +

                    Elastic Resource Pool

                    An elastic resource pool provides compute resources (CPU and memory) for running DLI jobs. The unit is CU. One CU contains one CPU and 4 GB memory.

                    +

                    You can create multiple queues in an elastic resource pool. Compute resources can be shared among queues. You can properly set the resource pool allocation policy for queues to improve compute resource utilization.

                    +
                    +

                    Specifications

                    DLI offers compute resources in the specifications listed in Table 1.

                    + +
                    + + + + + + + + + + + + + + + + +
                    Table 1 Elastic resource pool specifications

                    Edition

                    +

                    Specification

                    +

                    Constraint

                    +

                    Scenario

                    +

                    Basic

                    +

                    16–64 CUs

                    +
                    • High reliability and availability are not supported.
                    • Queue properties and job priorities cannot be set.
                    • Notebook instances cannot be interconnected with.
                    +

                    For more constraints and limitations on elastic resource pools, see Constraints.

                    +

                    This edition is suitable for testing scenarios with low resource consumption and low requirements for resource reliability and availability.

                    +

                    Standard

                    +

                    64 CUs or higher

                    +

                    For more constraints and limitations on elastic resource pools, see Constraints.

                    +

                    This edition offers powerful computing capabilities, high availability, and flexible resource management. It is suitable for large-scale computing tasks and business scenarios with long-term resource planning needs.

                    +
                    +
                    +
                    +

                    Constraints

                    • The region of an elastic resource pool cannot be changed.
                    • Jobs of Flink 1.10 or later can run in elastic resource pools.
                    • The network segment of an elastic resource pool cannot be changed after being set.
                    • You can only view the scaling history of resource pools in the last 30 days.
                    • Elastic resource pools cannot access the Internet.

                      +

                      +
                    +
                    +

                    Scenario

                    Resources too fixed to meet a range of requirements.

                    +

                    The quantities of compute resources required for jobs change in different time of a day. If the resources cannot be scaled based on service requirements, they may be wasted or insufficient. Figure 1 shows the resource usage during a day.

                    +
                    • After ETL jobs are complete, no other jobs are running during 04:00 to 07:00 in the early morning. The resources could be released at that time.
                    • From 09:00 to 12:00 a.m. and 02:00 to 04:00 p.m., a large number of ETL report and job queries are queuing for compute resources.
                      Figure 1 Fixed resources
                      +
                    +

                    Resources are isolated and cannot be shared.

                    +
                    A company has two departments, and each run their jobs on a DLI queue. Department A is idle from 08:00 to 12:00 a.m. and has remaining resources, while department B has a large number of service requests during this period and needs more resources to meet the requirements. Since the resources are isolated and cannot be shared between department A and B, the idle resources are wasted.
                    Figure 2 Resource waste due to resource isolation
                    +
                    +

                    Elastic resource pools can be accessed by different queues and automatically scaled to improve resource utilization and handle resource peaks.

                    +

                    You can use elastic resource pools to centrally manage and allocate resources. Multiple queues can be bound to an elastic resource pool to share the pooled resources.

                    +
                    +

                    Architecture and Advantages

                    Elastic resource pools support the CCE cluster architecture for heterogeneous resources so you can centrally manage and allocate them.

                    +

                    Elastic resource pools have the following advantages:

                    +
                    • Unified management
                      • You can manage multiple internal clusters and schedule jobs. You can manage millions of cores for compute resources.
                      • Elastic resource pools can be deployed across multiple AZs to support high availability. (This function will be supported in later versions.)
                      +
                    • Tenant resource isolation

                      Resources of different queues are isolated to reduce the impact on each other.

                      +
                    • Shared access and flexibility
                      • Specifications can be scaled in seconds to help you handle request peaks.
                      • Queue priorities and CU quotas can be set at different time to improve resource utilization.
                      +
                    • Job-level isolation (supported in later versions)

                      SQL jobs can run on independent Spark instances, reducing mutual impacts between jobs.

                      +
                    • Automatic scaling (supported in later versions)

                      The queue quota is updated in real time based on workload and priority.

                      +
                    +

                    Using elastic resource pools has the following advantages.

                    + +
                    + + + + + + + + + + + + + + + + + + + + +

                    Advantage

                    +

                    No Elastic Resource Pool

                    +

                    Use Elastic Resource Pool

                    +

                    Efficiency

                    +

                    You need to set scaling tasks repeatedly to improve the resource utilization.

                    +

                    Dynamic scaling can be done in seconds.

                    +

                    Resource utilization

                    +

                    Resources cannot be shared among different queues.

                    +

                    For example, if queue 1 has 10 unused CUs and queue 2 requires more resources due to heavy load, queue 2 cannot utilize the resources of queue 1. It has to be scaled up.

                    +

                    Queues added to the same elastic resource pool can share compute resources.

                    +

                    When you set a data source, you must allocate different network segments to each queue, which requires a large number of VPC network segments.

                    +

                    You can add multiple general-purpose queues in the same elastic resource pool to one network segment, simplifying the data source configuration.

                    +

                    Resource allocation

                    +

                    If resources are insufficient for scale-out tasks of multiple queues, some queues will fail to be scaled out.

                    +

                    You can set the priority for each queue in the elastic resource pool based on the peak hours to ensure proper resource allocation.

                    +
                    +
                    +

                    You can perform the following operations on elastic resource pools:

                    + +
                    +
                    +
                    + +
                    + diff --git a/docs/dli/umn/dli_01_0505.html b/docs/dli/umn/dli_01_0505.html new file mode 100644 index 00000000..464fb803 --- /dev/null +++ b/docs/dli/umn/dli_01_0505.html @@ -0,0 +1,83 @@ + + +

                    Creating an Elastic Resource Pool

                    +

                    For details about the application scenarios of elastic resource pools, see the Overview. This section describes how to create an elastic resource pool.

                    +

                    Precautions

                    • If you use an enhanced datasource connection, the network segment of the elastic resource pool cannot overlap that of the data source.
                    • The network segment of an elastic resource pool cannot be changed after being set.
                    +
                    +

                    Creating an Elastic Resource Pool

                    1. In the navigation pane on the left, choose Resources > Resource Pool.
                    2. On the Resource Pool page, click Buy Resource Pool in the upper right corner.
                    3. On the displayed page, set the following parameters:

                      +

                      + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 1 Parameters

                      Parameter

                      +

                      Description

                      +

                      Region

                      +

                      Select a region. Select a region near you to ensure the lowest latency possible.

                      +

                      Project

                      +

                      Each region corresponds to a project.

                      +

                      Name

                      +

                      Name of the elastic resource pool.

                      +
                      • The name can contain only digits, letters, and underscores (_), but cannot contain only digits or start with an underscore (_) or digit.
                      • The description can contain a maximum of 128 characters.
                      +
                      NOTE:

                      The elastic resource pool name is case-insensitive. Uppercase letters will be automatically converted to lowercase letters.

                      +
                      +

                      CU Range

                      +

                      The maximum and minimum CUs allowed for the elastic resource pool.

                      +

                      Description

                      +

                      Description of the elastic resource pool

                      +

                      CIDR Block

                      +

                      The CIDR block to which the elastic resource pool belongs. If you use an enhanced datasource connection, the CIDR block of the elastic resource pool cannot overlap that of the data source. The CIDR block of an elastic resource pool cannot be modified after being set.

                      +

                      Recommended CIDR block:

                      +

                      10.0.0.0~10.255.0.0/16~19

                      +

                      172.16.0.0~172.31.0.0/16~19

                      +

                      192.168.0.0~192.168.0.0/16~19

                      +

                      Enterprise Project

                      +

                      If the created elastic resource pool belongs to an enterprise project, select the enterprise project.

                      +

                      An enterprise project facilitates project-level management and grouping of cloud resources and users.

                      +
                      NOTE:

                      This parameter is displayed only for users who have enabled the Enterprise Management Service.

                      +
                      +

                      Tags

                      +

                      Tags used to identify cloud resources. A tag includes the tag key and tag value. If you want to use the same tag to identify multiple cloud resources, that is, to select the same tag from the drop-down list box for all services, you are advised to create predefined tags on the Tag Management Service (TMS).

                      +
                      NOTE:
                      • A maximum of 20 tags can be added.
                      • Only one tag value can be added to a tag key.
                      • The key name in each resource must be unique.
                      +
                      +
                      • Tag key: Enter a tag key name in the text box.
                        NOTE:

                        A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                        +
                        +
                      +
                      • Tag value: Enter a tag value in the text box.
                        NOTE:

                        A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                        +
                        +
                      +
                      +
                      +

                    4. Click Buy and confirm the configurations.
                    5. Click Pay. Wait until the status of the elastic resource pool changes to Available. The elastic resource pool is successfully created.
                    6. Refer to Creating an Elastic Resource Pool and Running a Job and Configuring Scaling Policies for Queues to perform subsequent operations as needed.
                    +
                    +
                    +
                    + +
                    + diff --git a/docs/dli/umn/dli_01_0506.html b/docs/dli/umn/dli_01_0506.html new file mode 100644 index 00000000..73af2415 --- /dev/null +++ b/docs/dli/umn/dli_01_0506.html @@ -0,0 +1,177 @@ + + +

                    Managing Queues

                    +

                    Multiple queues can be added to an elastic resource pool. For details about how to add a queue, see Adding a Queue. You can configure the number of CUs you want based on the compute resources used by DLI queues during peaks and troughs and set priorities for the scaling policies to ensure stable running of jobs.

                    +

                    Precautions

                    • In any time segment of a day, the total minimum CUs of all queues in an elastic resource pool cannot be more than the minimum CUs of the pool.
                    • In any time segment of a day, the maximum CUs of any queue in an elastic resource pool cannot be more than the maximum CUs of the pool.
                    • The periods of scaling policies cannot overlap.
                    • The period of a scaling policy can only be set by hour and specified by the start time and end time. For example, if you set the period to 00-09, the time range when the policy takes effect is [00:00, 09:00). The period of the default scaling policy cannot be modified.
                    • In any period, compute resources are preferentially allocated to meet the minimum number of CUs of all queues. The remaining CUs (total CUs of the elastic resource pool – total minimum CUs of all queues) are allocated in accordance with the scaling policy priorities.
                    • After the queue is scaled out, the system starts billing you for the added CUs. So, if you do not have sufficient requirements, scale in your queue to release unnecessary CUs to save cost. +
                      + + + + + + + + + + + + + + + + +
                      Table 1 CU allocation (without jobs)

                      Scenario

                      +

                      CUs

                      +

                      An elastic resource pool has a maximum number of 256 CUs for queue A and queue B. The scaling policies are as follows:

                      +
                      • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                      • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                      +

                      From 00:00 a.m. to 09:00 a.m.:

                      +
                      1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 160 CUs remaining.
                      2. The remaining CUs are allocated based on the priorities. Queue B is prior to queue A. Therefore, queue B gets 128 CUs, and queue A has 32 CUs.
                      +

                      An elastic resource pool has a maximum number of 96 CUs for queue A and queue B. The scaling policies are as follows:

                      +
                      • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                      • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                      +

                      From 00:00 a.m. to 09:00 a.m.:

                      +
                      1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are no remaining CUs.
                      2. The allocation is complete.
                      +

                      An elastic resource pool has a maximum number of 128 CUs for queue A and queue B. The scaling policies are as follows:

                      +
                      • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                      • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                      +

                      From 00:00 a.m. to 09:00 a.m.:

                      +
                      1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 32 CUs remaining.
                      2. The remaining 32 CUs are preferentially allocated to queue B.
                      +

                      An elastic resource pool has a maximum number of 128 CUs for queue A and queue B. The scaling policies are as follows:

                      +
                      • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                      • Queue B: priority 5; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                      +

                      From 00:00 a.m. to 09:00 a.m.:

                      +
                      1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 32 CUs remaining.
                      2. The two queues have the same priority, the remaining 32 CUs are randomly allocated to the two queues.
                      +
                      +
                      + +
                      + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 2 CU allocation (with jobs)

                      Scenario

                      +

                      Actual CUs of Elastic Resource Pool

                      +

                      CUs Allocated to Queue A

                      +

                      CUs Allocated to Queue B

                      +

                      Allocation Description

                      +

                      Queues A and B are added to the elastic resource pool. The scaling policies are as follows:

                      +
                      • Queue A: period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                      • Queue B: period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                      +

                      192 CUs

                      +

                      64 CUs

                      +

                      128 CUs

                      +

                      If the actual CUs of the elastic resource pool are greater than or equal to the sum of the maximum CUs of the two queues,

                      +

                      the maximum CUs are allocated to both queues.

                      +

                      96 CUs

                      +

                      32 CUs

                      +

                      64 CUs

                      +

                      The elastic resource pool preferentially meets the minimum CUs of the two queues.

                      +

                      After the minimum CUs are allocated to the two queues, no CUs are allocatable.

                      +

                      128 CUs

                      +

                      32 CUs to 64 CUs

                      +

                      64 CUs to 96 CUs

                      +

                      The elastic resource pool preferentially meets the minimum CUs of the two queues. That is, 32 CUs are allocated to queue A, 64 CUs are allocated to queue B, and the remaining 32 CUs are available.

                      +

                      The remaining CUs are allocated based on the queue load and priority. The actual CUs of the queue change within the range listed.

                      +
                      +
                      +
                    +
                    +

                    Managing Queues

                    1. In the navigation pane on the left, choose Resources > Resource Pool.
                    2. Locate the target elastic resource pool and click Queue MGMT in the Operation column. The Queue Management page is displayed.
                    3. View the queues added to the elastic resource pool.

                      +

                      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                      Table 3 Queue parameters

                      Parameter

                      +

                      Description

                      +

                      Name

                      +

                      Queue name

                      +

                      Type

                      +

                      Queue type

                      +
                      • For SQL
                      • For general purpose
                      +

                      Period

                      +

                      The start and end time of the queue scaling policy. This time range includes the start time but not the end time, that is, [start time, end time).

                      +

                      Min CUs

                      +

                      Minimum number of CUs allowed by the scaling policy.

                      +

                      Max CUs

                      +

                      Maximum number of CUs allowed by the scaling policy.

                      +

                      Priority

                      +

                      Priority of the scaling policy for a queue in the elastic resource pool. The priority ranges from 1 to 100. A smaller value indicates a lower priority.

                      +

                      Engine

                      +

                      For a queue running SQL jobs, the engine is Spark.

                      +

                      For a queue for general purpose, the engine can be Spark or Flink, but it is displayed by -- in this page.

                      +

                      Created

                      +

                      Time when a queue is added to the elastic resource pool

                      +

                      Enterprise Project

                      +

                      Enterprise project the queue belongs to.

                      +

                      Queues under different enterprise projects can be added to an elastic resource pool.

                      +

                      Owner

                      +

                      User who added this queue

                      +

                      Operation

                      +
                      • Edit: Modify or add a scaling policy.
                      • Delete: Delete the queue.
                      +
                      +
                      +

                    4. Locate the target queue and click Edit in the Operation column.
                    5. In the displayed Queue Management pane, perform the following operations as needed:

                      • Add: Click Create to add a scaling policy. Set Priority, Period, Min CU, and Max CU, and click OK.
                      • Modify: Modify parameters of an existing scaling policy and click OK.
                      • Delete: Locate the row that contains the scaling policy you want, click Delete and click OK.

                        The Priority and Period parameters must meet the following requirements:

                        +
                        • Priority: The default value is 1. The value ranges from 1 to 100. A bigger value indicates a higher priority.
                        • Period:
                          • You can only set the period to hours in [start time,end time) format.
                          • For example, if the Period to 01 and 17, the scaling policy takes effect at 01:00 a.m. till 05:00 p.m.
                          • The periods of scaling policies with different priorities cannot overlap.
                          +
                        • Max CUs and Min CUs:
                          • In any time segment of a day, the total minimum CUs of all queues in an elastic resource pool cannot be more than the minimum CUs of the pool.
                          • In any time segment of a day, the maximum CUs of any queue in an elastic resource pool cannot be more than the maximum CUs of the pool.
                          +
                        +
                        +
                      +

                    6. After you finish the settings, click statistics icon in the upper right corner of the queue list to view all scaling policies of all queue in the elastic resource pool.

                      +

                    7. View the scaling task generated when the scaling starts. Go to Job Management > SQL Jobs and view the jobs of the SCALE_QUEUE type.
                    +
                    +
                    +
                    + +
                    + diff --git a/docs/dli/umn/dli_01_0507.html b/docs/dli/umn/dli_01_0507.html new file mode 100644 index 00000000..13395af9 --- /dev/null +++ b/docs/dli/umn/dli_01_0507.html @@ -0,0 +1,26 @@ + + +

                    Setting CUs

                    +

                    CU settings are used to control the maximum and minimum CU ranges for elastic resource pools to avoid unlimited resource scaling.

                    +

                    For example, an elastic resource pool has a maximum of 256 and two queues, and each queue must have at least 64 CUs. If you want to add another queue that needs at lest 256 CUs to the elastic resource pool, the operation is not allowed due to the maximum CUs of the elastic resource pool.

                    +

                    Precautions

                    • In any time segment of a day, the total minimum CUs of all queues in an elastic resource pool cannot be more than the minimum CUs of the pool.
                    • In any time segment of a day, the maximum CUs of any queue in an elastic resource pool cannot be more than the maximum CUs of the pool.
                    • When you change the minimum CUs of a created elastic resource pool, ensure that the value is no more than the current CU value. Otherwise, the modification fails.
                    +
                    +

                    Setting CUs

                    1. In the navigation pane on the left, choose Resources > Resource Pool.
                    2. Locate the row that contains the desired elastic resource pool, click More in the Operation column, and select Set CUs.
                    3. In the Set CUs dialog box, set the minimum CUs on the left and the maximum CUs on the right. Click OK.
                    +
                    +

                    FAQ

                    • How Do I Change the Minimum CUs of the Existing Queues in an Elastic Resource Pool If the Total CUs of the Queues Equal the Minimum CUs of the Elastic Resource Pool?

                      Answer:

                      +
                      • Step 1: Increase the maximum number of CUs of the existing queues so that the current number of CUs of the elastic resource pool is no less than the target minimum number of CUs (the sum of the minimum number of CUs of the queues you want to change to).

                        If the maximum number of CUs of the elastic resource pool is equal to its minimum number of CUs, increase the maximum number of CUs.

                        +
                        +
                      • Step 2: Set the minimum CUs of the elastic resource pool.
                      • Step 3: Change the minimum CUs of the existing queues in the elastic resource pool.
                      +
                    • How Do I Add Queues to an Elastic Resource Pool If the Total CUs of the Queues Equal the Minimum CUs of the Elastic Resource Pool?

                      Answer:

                      +
                      • Step 1: Increase the maximum number of CUs of the existing queues so that the current number of CUs of the elastic resource pool is no less than the target minimum number of CUs (the sum of the minimum number of CUs of the queues you want to change to).

                        If the maximum number of CUs of the elastic resource pool is equal to its minimum number of CUs, increase the maximum number of CUs.

                        +
                        +
                      • Step 2: Set the minimum CUs of the elastic resource pool.
                      • Step 3: Add queues to the elastic resource pool.
                      • Step 4: Restore the maximum CUs of the queues you have increased in Step 1.
                      +
                    +
                    +
                    +
                    + +
                    + diff --git a/docs/dli/umn/dli_01_0508.html b/docs/dli/umn/dli_01_0508.html new file mode 100644 index 00000000..9c6bb96c --- /dev/null +++ b/docs/dli/umn/dli_01_0508.html @@ -0,0 +1,13 @@ + + +

                    Elastic Resource Pool

                    +
                    + + diff --git a/docs/dli/umn/dli_01_0509.html b/docs/dli/umn/dli_01_0509.html new file mode 100644 index 00000000..86396744 --- /dev/null +++ b/docs/dli/umn/dli_01_0509.html @@ -0,0 +1,104 @@ + + +

                    Adding a Queue

                    +

                    You can add one or more queues to an elastic resource pool to run jobs. This section describes how to add a queue to an elastic resource pool.

                    +

                    Precautions

                    Automatic scaling of an elastic resource pool cannot be triggered for Flink jobs.

                    +
                    +

                    Adding a Queue

                    1. In the navigation pane on the left, choose Resources > Resource Pool.
                    2. Locate the target elastic resource pool and click Add Queue in the Operation column.
                    3. On the Add Queue page, configure basic queue information. The following table describes the parameters.

                      +

                      + + + + + + + + + + + + + + + + + + + + + + +
                      Table 1 Queue information

                      Parameter

                      +

                      Description

                      +

                      Name

                      +

                      Name of the queue to add

                      +

                      Type

                      +
                      • For SQL: The queue is used to run SQL jobs.
                      • General general purpose: The queue is used to run Spark and Flink jobs.
                      +

                      Engine

                      +

                      If Type is For SQL, the queue engine can be spark or trino.

                      +

                      Enterprise Project

                      +

                      Select the enterprise project the queue belongs to. Queues under different enterprise projects can be added to an elastic resource pool.

                      +

                      Enterprise projects let you manage cloud resources and users by project.

                      +
                      NOTE:

                      This parameter is displayed only for users who have enabled the Enterprise Management Service.

                      +
                      +

                      Description

                      +

                      Description about the queue.

                      +

                      Tags

                      +

                      Tags used to identify cloud resources. A tag includes the tag key and tag value. If you want to use the same tag to identify multiple cloud resources, that is, to select the same tag from the drop-down list box for all services, you are advised to create predefined tags on the Tag Management Service (TMS).

                      +
                      NOTE:
                      • A maximum of 20 tags can be added.
                      • Only one tag value can be added to a tag key.
                      • The key name in each resource must be unique.
                      +
                      +
                      • Tag key: Enter a tag key name in the text box.
                        NOTE:

                        A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                        +
                        +
                      +
                      • Tag value: Enter a tag value in the text box.
                        NOTE:

                        A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                        +
                        +
                      +
                      +
                      +

                    4. Click Next. On the displayed page, configure a scaling policy for the queue in the elastic resource pool.

                      Click Create to add a scaling policy with specified priority, period, Minimum CUs, and Maximum CUs. The parameters of each scaling policy are as follows:

                      + +
                      + + + + + + + + + + + + + + + + +
                      Table 2 Auto scaling policy

                      Parameter

                      +

                      Description

                      +

                      Priority

                      +

                      A bigger priority value indicates a higher priority. You can set a number ranging from 1 to 100.

                      +

                      Period

                      +

                      Time segment when the policy takes effects. It can be set only by hour. The start time is on the left, and the end time is on the right.

                      +
                      • The time range includes the start time but not the end time, that is, [start time, end time).

                        For example, if the Period to 01 and 17, the scaling policy takes effect at 01:00 a.m. till 05:00 p.m.

                        +
                      +
                      • The periods of scaling policies with different priorities cannot overlap.
                      +

                      Min CU

                      +

                      Minimum number of CUs allowed by the scaling policy.

                      +
                      • In any time segment of a day, the total minimum CUs of all queues in an elastic resource pool cannot be more than the minimum CUs of the pool.
                      • If the minimum CUs of the queue is less than 16 CUs, both Max. Spark Driver Instances and Max. Prestart Spark Driver Instances set in the queue properties do not apply. Refer to Setting Queue Properties.
                      +

                      Max CU

                      +

                      Maximum number of CUs allowed by the scaling policy.

                      +

                      In any time segment of a day, the maximum CUs of any queue in an elastic resource pool cannot be more than the maximum CUs of the pool.

                      +
                      +
                      +

                      The first scaling policy is the default policy and cannot be deleted or modified.

                      +
                      +

                    5. Click OK. View all queues and scaling policies added to the elastic resource pool by referring to Managing Queues.
                    +
                    +
                    +
                    + +
                    + diff --git a/docs/dli/umn/dli_01_0513.html b/docs/dli/umn/dli_01_0513.html index 76be00db..30981876 100644 --- a/docs/dli/umn/dli_01_0513.html +++ b/docs/dli/umn/dli_01_0513.html @@ -14,7 +14,7 @@
                  • -
              diff --git a/docs/dli/umn/dli_01_0514.html b/docs/dli/umn/dli_01_0514.html index 59156f94..95de5686 100644 --- a/docs/dli/umn/dli_01_0514.html +++ b/docs/dli/umn/dli_01_0514.html @@ -32,7 +32,7 @@

              Datasource connection status. The following three statuses are available:

              • Creating
              • Active
              • Failed
              -
              NOTE:

              If the connection status is Failed, click on the left to view the detailed error information.

              +
              NOTE:

              If the connection status is Failed, click on the left to view the detailed error information.

              + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Procedure

              Step

              +

              Description

              +

              Reference

              +

              Create an elastic resource pool

              +

              Create an elastic resource pool and configure basic information, such as the billing mode, CU range, and CIDR block.

              +

              Creating an Elastic Resource Pool

              +

              Add a queue to the elastic resource pool

              +

              Add the queue where your jobs will run on to the elastic resource pool. The operations are as follows:

              +
              1. Set basic information about the queue, such as the name and type.
              2. Configure the scaling policy of the queue, including the priority, period, and the maximum and minimum CUs allowed for scaling.
              +

              Adding a Queue

              +

              Managing Queues

              +

              (Optional) Create an enhanced datasource connection.

              +

              If a job needs to access data from other data sources, for example, GaussDB(DWS) and RDS, you need to create a datasource connection.

              +

              The created datasource connection must be bound to the elastic resource pool.

              +

              Creating an Enhanced Datasource Connection

              +

              Run a job.

              +

              Create and submit the job as you need.

              +

              SQL Job Management

              +

              Overview

              +

              Creating a Spark Job

              +
              +
              +

              Step 1: Create an Elastic Resource Pool

              1. Log in to the DLI management console. In the navigation pane on the left, choose Resources > Resource Pool.
              2. On the displayed Resource Pool page, click Buy Resource Pool in the upper right corner.
              3. On the displayed page, set the following parameters:
                • Name: Enter the name of the elastic resource pool. For example, pool_test.
                • CU range: Minimum and maximum CUs of the elastic resource pool.
                • CIDR Block: Network segment of the elastic resource pool. For example, 172.16.0.0/18.
                • Set other parameters as required.
                +

                For details about how to create an elastic resource pool, see Creating an Elastic Resource Pool.

                +
              4. Click Buy. Confirm the configuration and click Pay.
              5. Go to the Resource Pool page to view the creation status. If the status is Available, the elastic resource pool is ready for use.
              +
              +

              Step 2: Add a Queue to the Elastic Resource Pool

              1. In the Operation column of the created elastic resource pool, click Add Queue.
              2. Specify the basic information about the queue. The configuration parameters are as follows:
                • Name: Queue name
                • Type: Queue type In this example, select For general purpose.

                  For SQL: The queue is used to run Spark SQL jobs.

                  +

                  For general purpose: The queue is used to run Flink and Spark Jar jobs.

                  +
                • Set other parameters as required.
                +
              3. Click Next. On the displayed page, set Min CU to 64 and Max CU to 64.
              4. Click OK. The queue is added.
              +
              +

              (Optional) Step 3: Create an Enhanced Datasource Connection

              In this example, a datasource connection is required to connect to RDS. You need to create a datasource connection. If your job does not need to connect to an external data source, skip this step.

              +
              1. Log in to the RDS console and create an RDS DB instance. For details, see . Log in to the RDS instance, create a database and name it test2.
              2. Locate the row that contains the test2 database, click Query SQL Statements in the Operation column. On the displayed page, enter the following statement to create table tabletest2. Click Execute SQL. The table creation statement is as follows:
                CREATE TABLE `tabletest2` (
                +	`id` int(11) unsigned,
                +	`name` VARCHAR(32)
                +)	ENGINE = InnoDB	DEFAULT CHARACTER SET = utf8mb4;
                +
              3. On the RDS console, choose Instances form the navigation pane. Click the name of a created RDS DB instance to view its basic information.
              4. In the Connection Information pane, obtain the floating IP address, database port, VPC, and subnet.
              5. Click the security group name. In the Inbound Rules tab, add a rule to allow access from the CIDR block of the elastic resource pool. For example, if the CIDR block of the elastic resource pool is 172.16.0.0/18 and the database port is 3306, set the rule Priority to 1, Action to Allow, Protocol to TCP and Port to 3306, Type to IPv4, and Source to 172.16.0.0/18.

                Click OK. The security group rule is added.

                +
              6. Log in to the DLI management console. In the navigation pane on the left, choose Datasource Connections. On the displayed page, click Create in the Enhanced tab.
              7. In the displayed dialog box, set the following parameters:
                • Connection Name: Name of the enhanced datasource connection
                • Resource Pool: Select the elastic resource pool created in Step 1: Create an Elastic Resource Pool.

                  If you cannot decide the elastic resource pool in this step, you can skip this parameter, go to the Enhanced tab, and click More > Bind Resource Pool in the Operation column of the row that contains this datasource connection after it is created.

                  +
                  +
                • VPC: Select the VPC of the RDS DB instance obtained in 4.
                • Subnet: Select the subnet of the RDS DB instance obtained in 4.
                • Set other parameters as you need.
                +

                Click OK. Click the name of the created datasource connection to view its status. You can perform subsequent steps only after the connection status changes to Active.

                +
              8. Click Resources > Queue Management, select the target queue, for example, general_test. In the Operation column, click More and select Test Address Connectivity.
              9. In the displayed dialog box, enter Floating IP address:Database port of the RDS database in the Address box and click Test to check whether the database is reachable.
              +
              +

              Step 4: Run a Job

              Run a Flink SQL jab on a queue in an elastic resource pool.

              +
              1. On the DLI management console, choose Job Management > Flink Jobs. On the Flink Jobs page, click Create Job.
              2. In the Create Job dialog box, set Type to Flink SQL and Name to testFlinkSqlJob. Click OK.
              3. On the job editing page, set the following parameters:
                • Queue: Select the general_test queue added to the elastic resource pool in Step 2: Add a Queue to the Elastic Resource Pool.
                • Save Job Log: Enable this function.
                • OBS Bucket: Select an OBS bucket for storing job logs and grant access permissions of the OBS bucket as prompted.
                • Enable Checkpointing: Enable.
                • Enter the SQL statement in the editing pane. The following is an example. Modify the parameters in bold as you need.
                  CREATE SINK STREAM car_info (id INT, name STRING) WITH (
                  +  type = "rds",
                  +  region = "", /* Change the value to the current region ID. */
                  +   'pwd_auth_name'="xxxxx", // Name of the datasource authentication of the password type created on DLI. If datasource authentication is used, you do not need to set the username and password for the job.
                  +db_url = "mysql://192.168.x.x:3306/test2", /* The format is mysql://floating IP address:port number of the RDS database/database name. */
                  +table_name = "tabletest2" /* Table name in RDS database */
                  +);
                  +INSERT INTO
                  +  car_info
                  +SELECT
                  +  13,
                  +  'abc';
                  +
                +
              4. Click Check Semantic and ensure that the SQL statement passes the check. Click Save. Click Start, confirm the job parameters, and click Start Now to execute the job.
              5. Wait until the job is complete. The job status changes to Completed.
              6. Log in to the RDS console, click the name of the RDS DB instance. On the displayed page, click the name of the created database, for example, test2, and click Query SQL Statements in the Operation column of the row that containing the tabletest2 table.
              7. On the displayed page, click Execute SQL. Check whether data has been written into the RDS table.
              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0516.html b/docs/dli/umn/dli_01_0516.html new file mode 100644 index 00000000..195139da --- /dev/null +++ b/docs/dli/umn/dli_01_0516.html @@ -0,0 +1,121 @@ + + +

              Configuring Scaling Policies for Queues

              +

              Scenario

              A company has multiple departments that perform data analysis in different periods during a day.

              +
              • Department A requires a large number of compute resources from 00:00 a.m. to 09:00 a.m. In other time segments, only small tasks are running.
              • Department B requires a large number of compute resources from 10:00 a.m. to 10:00 p.m. Some periodical tasks are running in other time segments during a day.
              +

              In the preceding scenario, you can add two queues to an elastic resource pool: queue test_a for department A, and queue test_b for department B. You can add scaling policies for 00:00-09:00 and 10:00-23:00 respectively to the test_a and test_b queues. For jobs in other periods, you can modify the default scaling policy.

              + +
              + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              Table 1 Scaling policy

              Queue

              +

              Period

              +

              Priority

              +

              CUs

              +

              Default Period

              +

              Default Priority

              +

              Default CUs

              +

              Remarks

              +

              test_a

              +

              [00:00, 09:00)

              +

              20

              +

              Minimum CU: 64

              +

              Maximum CU: 128

              +

              The time segments beyond [00:00, 09:00)

              +

              5

              +

              Minimum CU: 16

              +

              Maximum CU: 32

              +

              Jobs of department A

              +

              test_b

              +

              [10:00, 23:00)

              +

              20

              +

              Minimum CU: 64

              +

              Maximum CU: 128

              +

              The time segments beyond [10:00, 23:00)

              +

              5

              +

              Minimum CU: 32

              +

              Maximum CU: 64

              +

              Jobs of department B

              +
              +
              +
              +

              Precautions

              • In any time segment of a day, the total minimum CUs of all queues in an elastic resource pool cannot be more than the minimum CUs of the pool.
              • In any time segment of a day, the maximum CUs of any queue in an elastic resource pool cannot be more than the maximum CUs of the pool.
              • The periods of scaling policies cannot overlap.
              • The period of a scaling policy can only be set by hour and specified by the start time and end time. For example, if you set the period to 00-09, the period when the policy takes effect is [00:00, 09:00). The period of the default scaling policy cannot be modified.
              • In any period, compute resources are preferentially allocated to meet the minimum number of CUs of all queues. The remaining CUs (maximum CUs of the elastic resource pool – total minimum CUs of all queues) are allocated in accordance with the scaling policy priorities.
                • A scaling policy with a smaller priority value (for example, 1) is prior to those with a bigger priority value (for example, 100).
                • If the scaling policies of two queues have the same priority, resources are randomly allocated to a queue. If there are remaining resources, they are randomly allocated until there is no more left. +
                  + + + + + + + + + + + + + + + + +
                  Table 2 CU allocation

                  Scenario

                  +

                  CUs

                  +

                  An elastic resource pool has a maximum number of 256 CUs for queue A and queue B. The scaling policies are as follows:

                  +
                  • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 128
                  • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                  +

                  From 00:00 a.m. to 09:00 a.m.:

                  +
                  1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 160 CUs remaining.
                  2. The remaining CUs are allocated based on the priorities. Queue B is prior to queue A. Therefore, queue B gets 64 CUs, and queue A has 96 CUs.
                  +

                  An elastic resource pool has a maximum number of 96 CUs for queue A and queue B. The scaling policies are as follows:

                  +
                  • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                  • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                  +

                  From 00:00 a.m. to 09:00 a.m.:

                  +
                  1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are no remaining CUs.
                  2. The allocation is complete.
                  +

                  An elastic resource pool has a maximum number of 128 CUs for queue A and queue B. The scaling policies are as follows:

                  +
                  • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                  • Queue B: priority 10; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                  +

                  From 00:00 a.m. to 09:00 a.m.:

                  +
                  1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 32 CUs remaining.
                  2. The remaining 32 CUs are all preferentially allocated to queue B.
                  +

                  An elastic resource pool has a maximum number of 128 CUs for queue A and queue B. The scaling policies are as follows:

                  +
                  • Queue A: priority 5; period: 00:00–9:00; minimum CU: 32; maximum CU: 64
                  • Queue B: priority 5; time period: 00:00–9:00; minimum CU: 64; maximum CU: 128
                  +

                  From 00:00 a.m. to 09:00 a.m.:

                  +
                  1. The minimum CUs are allocated to the two queues. Queue A has 32 CUs, and queue B has 64 CUs. There are 32 CUs remaining.
                  2. The two queues have the same priority, the remaining 32 CUs are randomly allocated to the two queues.
                  +
                  +
                  +
                +
              +
              +

              Setting a Scaling Policy

              1. Log in to the DLI management console and create an elastic resource pool. Set the minimum and maximum number of CUs of the pool to 128 and 256 respectively. For details, see Creating an Elastic Resource Pool.
              2. Choose Resources > Resource Pool. Locate the row that contains the created elastic resource pool, and click Queue MGMT in the Operation column.
              3. Refer to Adding a Queue to create the test_a queue and set the scaling policy.

                1. Set the priority of the default scaling policy to 5, Min CU to 16, and Max CU to 32.
                2. Click create to add a scaling policy. Set the priority to 20, Period to 00--09, Min CU to 64, and Max CU to 128.
                +

              4. View the scaling policy on the Queue Management page of the specific elastic resource pool.

                Click to view graphical statistics of priorities and CU settings for all time segments.

                +

              5. Refer to Adding a Queue to create the test_b queue and set the scaling policy.

                1. Set the priority of the default scaling policy to 5, Min CU to 32, and Max CU to 64.
                2. Click create to add a scaling policy. Set the priority to 20, Period to 10--23, Min CU to 64, and Max CU to 128.
                +

              6. View the scaling policy on the Queue Management page of the specific elastic resource pool.

                Click to view graphical statistics on priorities and CU settings of the two queues for all time segments.

                +

              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0524.html b/docs/dli/umn/dli_01_0524.html new file mode 100644 index 00000000..bc54155f --- /dev/null +++ b/docs/dli/umn/dli_01_0524.html @@ -0,0 +1,23 @@ + + +

              Modifying Specifications

              +

              Scenario

              If CUs of a yearly/monthly elastic resource pool cannot meet your service requirements, you can modify the CUs. In this case, you will be charged based on the number of CUs exceeding that of the yearly/monthly elastic resource pool.

              +

              For example, you have purchased an elastic resource pool with 64 CUs, and you find that most time data processing needs 128 CUs. You can add 64 CUs to the elastic resource pool and be billed based on a CU/hour basis. To save more, you can scale up your elastic resource pool to 128 CUs and be billed on a yearly/monthly basis for the 128-CU package.

              +
              +

              Precautions

              Currently, only yearly/monthly elastic resource pools can be scaled.

              +
              +

              Scaling Up

              1. In the navigation pane on the left of the console, choose Resources > Resource Pool.
              2. Select the elastic resource pool you want and choose More > Modify Specifications in the Operation column.
              3. In the Modify Specifications dialog page, set Operation to Scale-out and specify the number of CUs you want to add.
              4. Confirm the changes and click OK.
              5. Choose Job Management > SQL Jobs to view the status of the SCALE_POOL SQL job.

                If the job status is Scaling, the elastic resource pool is scaling up. Wait until the job status changes to Finished.

                +
              +
              +

              Scaling Down

              By default, the minimum number of CUs is 16. That is, when the specifications of an elastic resource pool are 16 CUs, you cannot scale the pool down.

              +
              +
              1. In the navigation pane on the left, choose Resources > Resource Pool.
              2. Select the elastic resource pool you want and choose More > Modify Specifications in the Operation column.
              3. In the Modify Specifications dialog page, set Operation to Scale-in and specify the number of CUs you want to add.
              4. Confirm the changes and click OK.
              5. Choose Job Management > SQL Jobs to view the status of the SCALE_POOL SQL job.

                If the job status is Scaling, the elastic resource pool is scaling down. Wait until the job status changes to Finished.

                +
              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0525.html b/docs/dli/umn/dli_01_0525.html new file mode 100644 index 00000000..f185f651 --- /dev/null +++ b/docs/dli/umn/dli_01_0525.html @@ -0,0 +1,48 @@ + + +

              Managing Tags

              +

              Tag Management

              A tag is a key-value pair that you can customize to identify cloud resources. It helps you to classify and search for cloud resources. A tag consists of a tag key and a tag value.

              +

              If you use tags in other cloud services, you are advised to create the same tag (key-value pairs) for cloud resources used by the same business to keep consistency.

              +

              DLI supports the following two types of tags:

              +
              • Resource tags: non-global tags created on DLI
              +
              • Predefined tags: global tags created on Tag Management Service (TMS).

                +
              +

              DLI allows you to add, modify, or delete tags for queues.

              +
              1. In the left navigation pane of the DLI console, choose Resources > Resource Pool.
              2. In the Operation column of the queue, choose More > Tags.
              3. The tag management page is displayed, showing the tag information about the current queue.
              4. Click Add/Edit Tag. The Add/Edit Tag dialog is displayed. Enter a tag and a value, and click Add.

                +

                + + + + + + + + + + +
                Table 1 Tag parameters

                Parameter

                +

                Description

                +

                Tag key

                +

                You can specify the tag key in either of the following ways:

                +
                • Click the text box and select a predefined tag key from the drop-down list.

                  To add a predefined tag, you need to create one on TMS and then select it from the Tag key drop-down list. You can click View predefined tags to go to the Predefined Tags page of the TMS console. Then, click Create Tag in the upper corner of the page to create a predefined tag.

                  +

                  +
                +
                • Enter a tag key in the text box.
                  NOTE:

                  A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                  +
                  +
                +

                Tag value

                +

                You can specify the tag value in either of the following ways:

                +
                • Click the text box and select a predefined tag value from the drop-down list.
                • Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                  +
                  +
                +
                +
                +

              5. Click OK.
              6. (Optional) To delete a tag, locate the row where the tag locates in the tag list and click Delete in the Operation column to delete the tag.
              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0526.html b/docs/dli/umn/dli_01_0526.html new file mode 100644 index 00000000..6716b3c0 --- /dev/null +++ b/docs/dli/umn/dli_01_0526.html @@ -0,0 +1,45 @@ + + +

              Managing Permissions

              +

              Administrators can assign permissions of different operation scopes to users for each elastic resource pool.

              +

              Precautions

              • The administrator and elastic resource pool owner have all permissions, which cannot be set or modified by other users.
              +
              +

              Procedure

              1. In the navigation pane on the left of the DLI console, choose Resources > Resource Pool.
              2. Select the desired elastic resource pool and choose More > Permissions in the Operation column. The User Permissions area displays the list of users who have permissions of elastic resource pools.

                You can assign permissions to new users, modify permissions for users who already have some permissions of elastic resource pools, and revoke all permissions of a user on a pool.

                +
                • Assign permissions to a new user.
                  A new user does not have permissions on the elastic resource pool.
                  1. Click Set Permission in the Operations column on User Permissions page. The Set Permission dialog box is displayed.
                  2. Set Username to the name of the desired IAM user, and select the required permissions for the user.
                  3. Click OK to.

                    Table 1 describes the related parameters.

                    + +
                    + + + + + + + + + + +
                    Table 1 Parameters

                    Parameter

                    +

                    Description

                    +

                    Username

                    +

                    Name of the user you want to grant permissions to

                    +
                    NOTE:

                    The username must be an existing IAM username and has logged in to the DLI management console.

                    +
                    +

                    Select the permissions to be granted to the user

                    +
                    • Update: Update the description of an elastic resource pool.
                    • Resources: Add queues, delete queues, and configure scaling policies for queues in an elastic resource pool.
                    • Delete: Delete the elastic resource pool.
                    • Grant Permission: Grant the elastic resource pool permissions to other users.
                    • Revoke Permission: Revoke the permissions that other users have but you cannot revoke the owner's permissions.
                    • View Other User's Permissions: View the elastic resource pool permissions of other users.
                    +
                    +
                    +
                  +
                  +
                • To assign or revoke permissions of a user who has some permissions on the elastic resource pool, perform the following steps:
                  1. In the list under User Permissions, select the user whose permissions need to be modified and click Set Permission in the Operation column.
                  2. In the displayed Set Permission dialog box, modify the permissions of the user. Table 1 lists the detailed permission descriptions.

                    If Set Permission is gray, you are not allowed to change permissions on this elastic resource pool. You can apply to the administrator, elastic resource pool owner, or other authorized users for granting and revoking permissions.

                    +
                  3. Click OK.
                  +
                • To revoke all permissions of a user on an elastic resource pool, perform the following steps:

                  In the list under User Permissions, select the desired user whose permissions and click Set Permission in the Operation column. Click Yes in the Revoke Permission dialog box.

                  +
                +

              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0528.html b/docs/dli/umn/dli_01_0528.html new file mode 100644 index 00000000..c6775822 --- /dev/null +++ b/docs/dli/umn/dli_01_0528.html @@ -0,0 +1,19 @@ + + +

              Before You Start

              +
              + + diff --git a/docs/dli/umn/dli_01_0529.html b/docs/dli/umn/dli_01_0529.html new file mode 100644 index 00000000..2357a99f --- /dev/null +++ b/docs/dli/umn/dli_01_0529.html @@ -0,0 +1,33 @@ + + +

              Regular Operations

              +
              + + diff --git a/docs/dli/umn/dli_01_0530.html b/docs/dli/umn/dli_01_0530.html new file mode 100644 index 00000000..6cab58e6 --- /dev/null +++ b/docs/dli/umn/dli_01_0530.html @@ -0,0 +1,21 @@ + + +

              Binding a Queue

              +

              Scenario

              If you want a queue to use resources in an elastic resource pool, bind the queue to the pool.

              +

              You can click Associate Queue on the Resource Pool page to bind a queue to an elastic resource pool, or bind a queue on the Queue Management page.

              +

              Elastic resource pools support only Flink 1.10 or later. If jobs using Flink 1.7 run on a queue that is bound to an elastic resource pool, errors may occur due to incompatibility.

              +
              +
              +

              Prerequisites

              • Both the elastic resource pool and queue are available.
              • The queue you want to bind must be a dedicated queue in pay-per-use billing mode.
              • No resources are frozen.
              • Only queues under the same enterprise project can be bound to an elastic resource pool.
              +
              +

              Associating a Queue

              1. In the navigation pane on the left, choose Resources > Resource Pool.
              2. Locate the row that contains the desired elastic resource pool, click More in the Operation column, and select Associate Queue.
              3. In the displayed dialog box, select the desired queue and click OK.
              +
              +

              Allocating a Queue to an Elastic Resource Pool

              1. In the navigation pane on the left, choose Resources > Queue Management.
              2. Locate the target queue and choose More > Bind Resource Pool in the Operation column.
              3. Select the desired elastic resource pool and click OK.
              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0531.html b/docs/dli/umn/dli_01_0531.html index d3431845..d9a30ffd 100644 --- a/docs/dli/umn/dli_01_0531.html +++ b/docs/dli/umn/dli_01_0531.html @@ -15,7 +15,7 @@

              Step 1: Prepare a Data Source

              In this example, Kafka is the data source.

              For more information about Flink job data, see Preparing Flink Job Data.

              Enable DIS to import Kafka data to DLI. For details, see "Buying a Kafka Instance" in the Distributed Message Service Kafka User Guide.

              -
              1. Create the dependent Kafka resources.
                Before creating a Kafka instance, ensure the availability of resources, including a virtual private cloud (VPC), subnet, security group, and security group rules.
                • For details about how to create a VPC and subnet, see "Creating a VPC and Subnet" in the Virtual Private Cloud User Guide. For details about how to create and use a subnet in an existing VPC, see "Create a subnet for the VPC" in the Virtual Private Cloud User Guide.
                  • The created VPC and the Kafka instance you will create must be in the same region.
                  • Retain the default settings unless otherwise specified.
                  +
                  1. Create the dependent Kafka resources.
                    Before creating a Kafka instance, ensure the availability of resources, including a virtual private cloud (VPC), subnet, security group, and security group rules.
                    • For details about how to create a VPC and subnet, see "Creating a VPC and Subnet" in Virtual Private Cloud User Guide. For details about how to create and use a subnet in an existing VPC, see "Create a Subnet for the VPC" in Virtual Private Cloud User Guide.
                      • The created VPC and the Kafka instance you will create must be in the same region.
                      • Retain the default settings unless otherwise specified.
                    • For details about how to create a security group, see "Creating a Security Group" in the Virtual Private Cloud User Guide. For details about how to add rules to a security group, see "Creating a Subnet for the VPC" in the Virtual Private Cloud User Guide.
                    @@ -23,7 +23,7 @@
                  2. Create a DMS for Kafka Instance for job input streams.
                    1. Log in to the DMS for Kafka console.
                    2. Select a region in the upper left corner.
                    3. On the DMS for Kafka page, click Buy Instance in the upper right corner and set related parameters. The required instance information is as follows:
                      • Region: Select the region where DLI is located.
                      • Project: Keep the default value.
                      • AZ: Keep the default value.
                      • Instance Name: kafka-dliflink
                      • Specifications: Default
                      • Enterprise Project: default
                      • Version: Keep the default value.
                      • CPU Architecture: Keep the default value.
                      • Broker Flavor: Select a flavor as needed.
                      • Brokers: Retain the default value.
                      • Storage Space: Keep the default value.
                      • Capacity Threshold Policy: Keep the default value.
                      • VPC and Subnet: Select the VPC and subnet created in 1.
                      • Security Group: Select the security group created in 1.
                      • Manager Username: Enter dliflink (used to log in to the instance management page).
                      • Password: **** (The system cannot detect your password.)
                      • Confirm Password: ****
                      • More Settings: Do not configure this parameter.
                    4. Click Buy. The confirmation page is displayed.
                    5. Confirm that the instance information is correct, read and agree to the , and click Submit. It takes about 10 to 15 minutes to create an instance.
                  3. Create a Kafka topic.
                    1. Click the name of the created Kafka instance. The basic information page of the instance is displayed.
                    2. Choose Topics in the navigation pane on the left. On the displayed page, click Create Topic. Configure the following parameters:
                      • Topic Name: For this example, enter testkafkatopic.
                      • Partitions: Set the value to 1.
                      • Replicas: Set the value to 1.
                      -

                      Retain default values for other parameters.

                      +

                      Retain the default values for other parameters.

                  @@ -51,7 +51,7 @@
                  1. In the navigation pane on the OBS management console, choose Object Storage.
                  2. In the upper right corner of the page, click Create Bucket and set bucket parameters.
                    • Region: Select the region where DLI is located.
                    • Bucket Name: Enter a bucket name. For this example, enter obstest.
                    • Default Storage Class: Standard
                    • Bucket Policy: Private
                    • Default Encryption: Do not enable
                    • Direct Reading: Do not enable
                    • Enterprise Project: default
                    • Tags: Leave it blank.
                  3. Click Create Now.
                  -

                  Step 4: Create a Queue

                  Flink OpenSource SQL jobs cannot run on the default queue. You need to create a queue, for example, Flinktest. For details, see Creating a Queue.

                  +

                  Step 4: Create a Queue

                  Flink OpenSource SQL jobs cannot run on the default queue. You need to create a queue, for example, Flinktest. For details, see "Creating a Queue".

                  1. Log in to the DLI management console. On the Overview page, click Buy Queue in the upper right corner.

                    If this is your first time logging in to the DLI management console, you need to be authorized to access OBS.

                  2. Configure the following parameters:
                    • Name: Flinktest
                    • Type: For general purpose. Select Dedicated Resource Mode.
                    • Specifications: 16 CUs
                    • Enterprise Project: default
                    • Description: Leave it blank.
                    • Advanced Settings: Custom
                    • CIDR Block: Set a CIDR block that does not conflict with the Kafka instance's CIDR block.
                  3. Click Buy and confirm the configuration.
                  4. Submit the request.

                    It takes 10 to 15 minutes to bind the queue to a cluster after the queue is created.

                    @@ -60,19 +60,19 @@

                    Step 5: Create an Enhanced Datasource Connection Between DLI and Kafka

                    You need to create an enhanced datasource connection for the Flink OpenSource SQL job. For details, see "Creating an Enhanced Datasource Connection".

                    • The CIDR block of the DLI queue bound with a datasource connection cannot overlap with the CIDR block of the data source.
                    • Datasource connections cannot be created for the default queue.
                    • To access a table across data sources, you need to use a queue bound to a datasource connection.
                    -
                    1. Create a Kafka security group rule to allow access from the CIDR block of the DLI queue.

                      1. On the Kafka management console, click an instance name on the DMS for Kafka page. Basic information of the Kafka instance is displayed.
                      2. In the Connection pane, obtain the Instance Address (Private Network). In the Network pane, obtain the VPC and subnet of the instance.
                      3. Click the security group name in the Network pane. On the displayed page, click the Inbound Rules tab and add a rule to allow access from the DLI queue.

                        For example, if the CIDR block of the queue is 10.0.0.0/16, set Protocol to TCP, Type to IPv4, Source to 10.0.0.0/16, and click OK.

                        +
                        1. Create a Kafka security group rule to allow access from the CIDR block of the DLI queue.

                          1. On the Kafka management console, click an instance name on the DMS for Kafka page. Basic information of the Kafka instance is displayed.
                          2. In the Connection pane, obtain the Instance Address (Private Network). In the Network pane, obtain the VPC and subnet of the instance.
                          3. Click the security group name in the Network pane. On the displayed page, click the Inbound Rules tab and add a rule to allow access from the DLI queue.

                            For example, if the CIDR block of the queue is 10.0.0.0/16, set Protocol to TCP, Type to IPv4, Source to 10.0.0.0/16, and click OK.

                        2. Create an enhanced datasource connection to Kafka.

                          1. Log in to the DLI management console. In the navigation pane on the left, choose Datasource Connections. On the displayed page, click Create in the Enhanced tab.
                          2. In the displayed dialog box, set the following parameters: For details, see the following section:
                            • Connection Name: Name of the enhanced datasource connection For this example, enter dli_kafka.
                            • Resource Pool: Select the name of the queue created in Step 4: Create a Queue.
                            • VPC: Select the VPC of the Kafka instance.
                            • Subnet: Select the subnet of Kafka instance.
                            • Set other parameters as you need.

                            Click OK. Click the name of the created datasource connection to view its status. You can perform subsequent steps only after the connection status changes to Active.

                            -
                          3. Choose Resources > Queue Management and locate the queue created in Step 4: Create a Queue. In the Operation column, choose More > Test Address Connectivity.
                          4. In the displayed dialog box, enter Kafka instance address (private network):port in the Address box and click Test to check whether the instance is reachable. Note that multiple addresses must be tested separately.
                          +
                        3. Choose Resources > Queue Management and locate the queue created in Step 4: Create a Queue. In the Operation column, click More and select Test Address Connectivity.
                        4. In the displayed dialog box, enter Kafka instance address (private network):port in the Address box and click Test to check whether the instance is reachable. Note that multiple addresses must be tested separately.

                    -

                    Step 6: Create an Enhanced Datasource Connection Between DLI and RDS

                    1. Create an RDS security group rule to allow access from CIDR block of the DLI queue.

                      If the RDS DB instance and Kafka instance are in the same security group of the same VPC, skip this step. Access from the DLI queue has been allowed in 1.
                      1. Go to the RDS console, click the name of the target RDS for MySQL DB instance on the Instances page. Basic information of the instance is displayed.
                      2. In the Connection Information pane, obtain the floating IP address, database port, VPC, and subnet.
                      3. Click the security group name. On the displayed page, click the Inbound Rules tab and add a rule to allow access from the DLI queue. For example, if the CIDR block of the queue is 10.0.0.0/16, set Priority to 1, Action to Allow, Protocol to TCP, Type to IPv4, Source to 10.0.0.0/16, and click OK.
                      +

                      Step 6: Create an Enhanced Datasource Connection Between DLI and RDS

                      1. Create an RDS security group rule to allow access from CIDR block of the DLI queue.

                        If the RDS DB instance and Kafka instance are in the same security group of the same VPC, skip this step. Access from the DLI queue has been allowed in 1.
                        1. Go to the RDS console, click the name of the target RDS for MySQL DB instance on the Instances page. Basic information of the instance is displayed.
                        2. In the Connection Information pane, obtain the floating IP address, database port, VPC, and subnet.
                        3. Click the security group name. On the displayed page, click the Inbound Rules tab and add a rule to allow access from the DLI queue. For example, if the CIDR block of the queue is 10.0.0.0/16, set Priority to 1, Action to Allow, Protocol to TCP, Type to IPv4, Source to 10.0.0.0/16, and click OK.

                      2. Create an enhanced datasource connection to RDS.

                        If the RDS DB instance and Kafka instance are in the same VPC and subnet, skip this step. The enhanced datasource connection created in 2 has connected the subnet.

                        If the two instances are in different VPCs or subnets, perform the following steps to create an enhanced datasource connection:
                        1. Log in to the DLI management console. In the navigation pane on the left, choose Datasource Connections. On the displayed page, click Create in the Enhanced tab.
                        2. In the displayed dialog box, set the following parameters: For details, see the following section:
                          • Connection Name: Name of the enhanced datasource connection For this example, enter dli_rds.
                          • Resource Pool: Select the name of the queue created in Step 4: Create a Queue.
                          • VPC: Select the VPC of the RDS DB instance.
                          • Subnet: Select the subnet of RDS DB instance.
                          • Set other parameters as you need.

                          Click OK. Click the name of the created datasource connection to view its status. You can perform subsequent steps only after the connection status changes to Active.

                          -
                        3. Choose Resources > Queue Management and locate the queue created in Step 4: Create a Queue. In the Operation column, choose More > Test Address Connectivity.
                        4. In the displayed dialog box, enter floating IP address:database port of the RDS DB instance in the Address box and click Test to check whether the database is reachable.
                        +
                      3. Choose Resources > Queue Management and locate the queue created in Step 4: Create a Queue. In the Operation column, click More and select Test Address Connectivity.
                      4. In the displayed dialog box, enter floating IP address:database port of the RDS DB instance in the Address box and click Test to check whether the database is reachable.

                    @@ -120,7 +120,7 @@ CREATE TABLE jdbcSink ( insert into jdbcSink select * from kafkaSource;
                • Click Check Semantics.
                • Click Start. On the displayed Start Flink Job page, confirm the job specifications and the price, and click Start Now to start the job.

                  After the job is started, the system automatically switches to the Flink Jobs page, and the created job is displayed in the job list. You can view the job status in the Status column. After a job is successfully submitted, Status of the job will change from Submitting to Running.

                  -

                  If Status of a job is Submission failed or Running exception, the job fails to be submitted or fails to run. In this case, you can move the cursor over the status icon to view the error details. You can click to copy these details. After handling the fault based on the provided information, resubmit the job.

                  +

                  If Status of a job is Submission failed or Running exception, the job fails to be submitted or fails to run. In this case, you can hover over the status icon to view the error details. You can click to copy these details. After handling the fault based on the provided information, resubmit the job.

                • Connect to the Kafka cluster and send the following test data to the Kafka topics:
                  {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"} 
                   
                   {"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
                  diff --git a/docs/dli/umn/dli_01_0532.html b/docs/dli/umn/dli_01_0532.html new file mode 100644 index 00000000..128c06fe --- /dev/null +++ b/docs/dli/umn/dli_01_0532.html @@ -0,0 +1,20 @@ + + +

                  Viewing Scaling History

                  +

                  Scenario

                  If you added a queue to or deleted one from an elastic resource pool, or you scaled an added queue, the CU quantity of the elastic resource pool may be changed. You can view historical CU changes of an elastic resource pool on the console.

                  +

                  When scaling in an elastic resource pool, Spark and SQL jobs may automatically retry. However, if the number of retries exceeds the limit, the jobs will fail and need to be manually executed again.

                  +
                  +
                  +

                  Prerequisites

                  Currently, you can only view the historical records generated within the last 30 days on the console.

                  +
                  +

                  Viewing Scaling History of an Elastic Resource Pool

                  1. In the navigation pane on the left, choose Resources > Resource Pool.
                  2. Select the desired elastic resource pool and choose More > Expansion History in the Operation column.
                  3. On the displayed page, select a duration to view the CU usage.

                    You can view the number of CUs before and after a scaling, and the target number of CUs.

                    +

                    The historical records can be displayed in charts or tables. Click in the upper right corner to switch the display.

                    +
                  +
                  +
                  +
                  + +
                  + diff --git a/docs/dli/umn/dli_01_0550.html b/docs/dli/umn/dli_01_0550.html index 36744be4..00230a3b 100644 --- a/docs/dli/umn/dli_01_0550.html +++ b/docs/dli/umn/dli_01_0550.html @@ -1,19 +1,19 @@ -

                  Quotas

                  -

                  What Is a Quota?

                  A quota limits the quantity of a resource available to users, thereby preventing spikes in the usage of the resource.

                  -

                  You can also request for an increased quota if your existing quota cannot meet your service requirements.

                  +

                  Quota Management

                  +

                  What Is a Quota?

                  A quota limits the quantity of a resource available to users, thereby preventing spikes in the usage of the resource.

                  +

                  You can also request for an increased quota if your existing quota cannot meet your service requirements.

                  -

                  How Do I View My Quotas?

                  1. Log in to the management console.
                  2. Click in the upper left corner and select Region and Project.
                  3. Click (the My Quotas icon) in the upper right corner.

                    The Service Quota page is displayed.

                    -
                  4. View the used and total quota of each type of resources on the displayed page.

                    If a quota cannot meet service requirements, increase a quota.

                    +

                    How Do I View My Quotas?

                    1. Log in to the management console.
                    2. Click in the upper left corner and select a region and a project.
                    3. Click the My Quota icon in the upper right corner of the page.

                      The Service Quota page is displayed.

                      +
                    4. View the used and total quota of each type of resources on the displayed page.

                      If a quota cannot meet service requirements, increase a quota.

                    -

                    How Do I Apply for a Higher Quota?

                    The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your application and inform you of the progress by phone call or email.

                    -

                    Before dialing the hotline number or sending an email, ensure that the following information has been obtained:

                    -
                    • Domain name, project name, and project ID

                      Log in to the management console using the cloud account, click the username in the upper right corner, select My Credentials from the drop-down list, and obtain the domain name, project name, and project ID on the My Credentials page.

                      -
                    • Quota information, including:
                      • Service name
                      • Quota type
                      • Required quota
                      +

                      How Do I Apply for a Higher Quota?

                      The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your application and inform you of the progress by phone call or email.

                      +

                      Before dialing the hotline number or sending an email, ensure that the following information has been obtained:

                      +
                      • Domain name, project name, and project ID

                        To obtain the preceding information, log in to the management console, click the username in the upper right corner, and choose My Credentials from the drop-down list.

                        +
                      • Quota information, including:
                        • ServiceName
                        • Quota type
                        • Required quota
                      -

                      Learn how to obtain the service hotline and email address.

                      +

                      Learn how to obtain the service hotline and email address.

                    diff --git a/docs/dli/umn/dli_01_0552.html b/docs/dli/umn/dli_01_0552.html index 8ded1d7b..e8470cee 100644 --- a/docs/dli/umn/dli_01_0552.html +++ b/docs/dli/umn/dli_01_0552.html @@ -29,7 +29,7 @@
            14. Tag value

              You can specify the tag value in either of the following ways:

              -
              • Click the tag value text box and select a predefined tag value from the drop-down list.
              • Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
                • Click the tag value text box and select a predefined tag value from the drop-down list.
                • Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

              Tag key

              You can specify the tag key in either of the following ways:

              -
              • Click the text box for tag key and select a predefined tag key from the drop-down list.

                To add a predefined tag, you need to create one on TMS and then select it from the Tag key drop-down list. You can click View predefined tags to go to the Predefined Tags page of the TMS console. Then, click Create Tag in the upper corner of the page to create a predefined tag.

                +
                • Click the text box for tag key and select a predefined tag key from the drop-down list.

                  To add a predefined tag, you need to create one on TMS and then select it from the Tag key drop-down list. You can click View predefined tags to go to the Predefined Tags page of the TMS console. Then, click Create Tag in the upper corner of the page to create a predefined tag.

                • Enter a tag key in the text box.
                  NOTE:

                  A tag key can contain a maximum of 128 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed, but the value cannot start or end with a space or start with _sys_.

                  @@ -64,7 +64,7 @@

              Tag value

              You can specify the tag value in either of the following ways:

              -
              • Click the tag value text box and select a predefined tag value from the drop-down list.
              • Enter a tag value in the text box.
                NOTE:

                A tag value can contain a maximum of 225 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

                +
                • Click the tag value text box and select a predefined tag value from the drop-down list.
                • Enter a tag value in the text box.
                  NOTE:

                  A tag value can contain a maximum of 255 characters. Only letters, digits, spaces, and special characters (_.:=+-@) are allowed. The value cannot start or end with a space.

              + + + + + + + + + + + + + + + + + +
              Table 1 Queue properties

              Property

              +

              Description

              +

              Value Range

              +

              Max. Spark Driver Instances

              +

              Maximum number of Spark drivers can be started on this queue, including the Spark driver that is prestarted and the Spark driver that runs jobs.

              +
              • For a 16-CU queue, the value is 2.
              • For a queue that has more than 16 CUs, the value range is [2, queue CUs/16].
              • If the minimum CUs of the queue is less than 16 CUs, this configuration item does not apply.
              +

              Max. Prestart Spark Driver Instances

              +

              Maximum number of Spark drivers can be prestarted on this queue. When the number of Spark drivers that run jobs exceeds the value of Max. Concurrency per Instance, the jobs are allocated to the Spark drivers that are prestarted.

              +
              • For a 16-CU queue, the value range is 0 to 1.
              • For a queue that has more than 16 CUs, the value range is [2, queue CUs/16].
              • If the minimum CUs of the queue is less than 16 CUs, this configuration item does not apply.
              +

              Max. Concurrency per Instance

              +

              Maximum number of jobs can be concurrently executed by a Spark driver. When the number of jobs exceeds the value of this parameter, the jobs are allocated to other Spark drivers.

              +

              1–32

              +
              +
              +
            15. Click OK.
            16. + + +
              + +
              + diff --git a/docs/dli/umn/dli_01_0565.html b/docs/dli/umn/dli_01_0565.html new file mode 100644 index 00000000..2ae1b24a --- /dev/null +++ b/docs/dli/umn/dli_01_0565.html @@ -0,0 +1,26 @@ + + +

              Allocating a Queue to an Enterprise Project

              +

              You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can create user groups and users with different permissions and add them to enterprise projects.

              +

              DLI allows you to select an enterprise project when creating a queue. This section describes how to bind a DLI queue to and modify an enterprise project.

              +

              Currently, enterprise projects can be modified only for queues that have not been added to elastic resource pools.

              +
              +

              Prerequisites

              You have logged in to the Enterprise Project Management Service console and created an enterprise project.

              +
              +

              Binding an Enterprise Project

              When creating a DLI queue, you can select a created enterprise project for Enterprise Project.

              +

              Alternatively, you can click Create Enterprise Project to go to the Enterprise Project Management Service console to create an enterprise project and view existing ones.

              +

              For details about how to create a queue, see Creating a Queue.

              +
              +

              Modifying an Enterprise Project

              You can modify the enterprise project bound to a created cluster as needed.

              +
              1. Log in to the DLI management console.
              2. In the navigation pane on the left, choose Resources > Queue Management.
              3. In the queue list, locate the queue for which you want to modify the enterprise project, click More in the Operation column, and select Modify Enterprise Project.
              4. In the Modify Enterprise Project dialog box displayed, select an enterprise project.

                Alternatively, you can click Create Enterprise Project to go to the Enterprise Project Management Service console to create an enterprise project and view existing ones.

                +
              5. After the modification, click OK to save the enterprise project information of the queue.
              +
              +

              Related Operations

              For details about how to modify the enterprise project of an elastic resource pool, see Allocating to an Enterprise Project.

              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_01_0566.html b/docs/dli/umn/dli_01_0566.html new file mode 100644 index 00000000..19726f5e --- /dev/null +++ b/docs/dli/umn/dli_01_0566.html @@ -0,0 +1,27 @@ + + +

              Allocating to an Enterprise Project

              +

              You can create enterprise projects matching the organizational structure of your enterprises to centrally manage cloud resources across regions by project. Then you can create user groups and users with different permissions and add them to enterprise projects.

              +

              DLI allows you to select an enterprise project when creating an elastic resource pool. This section describes how to bind an elastic resource pool to and modify an enterprise project.

              +

              Modifying the enterprise project of an elastic resource pool will modify the enterprise projects of the queues in the elastic resource pool.

              +

              Only queues under the same enterprise project can be bound to an elastic resource pool.

              +
              +

              Prerequisites

              You have logged in to the Enterprise Project Management Service console and created an enterprise project.

              +
              +

              Binding an Enterprise Project

              When creating an elastic resource pool, you can select a created enterprise project for Enterprise Project.

              +

              Alternatively, you can click Create Enterprise Project to go to the Enterprise Project Management Service console to create an enterprise project and view existing ones.

              +

              For details about how to create a queue, see Creating an Elastic Resource Pool.

              +
              +

              Modifying an Enterprise Project

              You can modify the enterprise project bound to a created cluster as needed.

              +
              1. Log in to the DLI management console.
              2. In the navigation pane on the left, choose Resources > Resource Pool.
              3. In the elastic resource pool list, locate the elastic resource pool for which you want to modify the enterprise project, click More in the Operation column, and select Allocate to Enterprise Project.
              4. In the Modify Enterprise Project dialog box displayed, select an enterprise project.

                Alternatively, you can click Create Enterprise Project to go to the Enterprise Project Management Service console to create an enterprise project and view existing ones.

                +
              5. After the modification, click OK to save the enterprise project information of the elastic resource pool.
              +
              +

              Related Operations

              For details about how to modify the enterprise project of a queue, see Allocating a Queue to an Enterprise Project.

              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_03_0001.html b/docs/dli/umn/dli_03_0001.html index 2a035801..b3ee2248 100644 --- a/docs/dli/umn/dli_03_0001.html +++ b/docs/dli/umn/dli_03_0001.html @@ -4,29 +4,11 @@
              diff --git a/docs/dli/umn/dli_03_0002.html b/docs/dli/umn/dli_03_0002.html index dce9fdce..e2523026 100644 --- a/docs/dli/umn/dli_03_0002.html +++ b/docs/dli/umn/dli_03_0002.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0008.html b/docs/dli/umn/dli_03_0008.html index 5f51cbb4..9d76034f 100644 --- a/docs/dli/umn/dli_03_0008.html +++ b/docs/dli/umn/dli_03_0008.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0009.html b/docs/dli/umn/dli_03_0009.html index 94c995e9..ba3dba02 100644 --- a/docs/dli/umn/dli_03_0009.html +++ b/docs/dli/umn/dli_03_0009.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0010.html b/docs/dli/umn/dli_03_0010.html index f465fe3b..eedf2334 100644 --- a/docs/dli/umn/dli_03_0010.html +++ b/docs/dli/umn/dli_03_0010.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0011.html b/docs/dli/umn/dli_03_0011.html index 463da933..c11ee61e 100644 --- a/docs/dli/umn/dli_03_0011.html +++ b/docs/dli/umn/dli_03_0011.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0013.html b/docs/dli/umn/dli_03_0013.html index 13a73692..7733a01b 100644 --- a/docs/dli/umn/dli_03_0013.html +++ b/docs/dli/umn/dli_03_0013.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0014.html b/docs/dli/umn/dli_03_0014.html index 210b5533..077f41f0 100644 --- a/docs/dli/umn/dli_03_0014.html +++ b/docs/dli/umn/dli_03_0014.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0017.html b/docs/dli/umn/dli_03_0017.html index 806fa011..75b87126 100644 --- a/docs/dli/umn/dli_03_0017.html +++ b/docs/dli/umn/dli_03_0017.html @@ -29,7 +29,7 @@ sc.hadoopConfiguration.set("fs.obs.session.token", sts)
              diff --git a/docs/dli/umn/dli_03_0020.html b/docs/dli/umn/dli_03_0020.html index ecf35d99..f2067d07 100644 --- a/docs/dli/umn/dli_03_0020.html +++ b/docs/dli/umn/dli_03_0020.html @@ -4,83 +4,13 @@
              diff --git a/docs/dli/umn/dli_03_0021.html b/docs/dli/umn/dli_03_0021.html index e6a0efe6..f220113c 100644 --- a/docs/dli/umn/dli_03_0021.html +++ b/docs/dli/umn/dli_03_0021.html @@ -4,37 +4,13 @@
              diff --git a/docs/dli/umn/dli_03_0022.html b/docs/dli/umn/dli_03_0022.html index f93ee993..1fe0c02e 100644 --- a/docs/dli/umn/dli_03_0022.html +++ b/docs/dli/umn/dli_03_0022.html @@ -5,43 +5,11 @@
              diff --git a/docs/dli/umn/dli_03_0025.html b/docs/dli/umn/dli_03_0025.html index a73c0333..7e48a1d2 100644 --- a/docs/dli/umn/dli_03_0025.html +++ b/docs/dli/umn/dli_03_0025.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0028.html b/docs/dli/umn/dli_03_0028.html index 5ea12f32..2a5ea254 100644 --- a/docs/dli/umn/dli_03_0028.html +++ b/docs/dli/umn/dli_03_0028.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0029.html b/docs/dli/umn/dli_03_0029.html index c1f8fa13..09033cc9 100644 --- a/docs/dli/umn/dli_03_0029.html +++ b/docs/dli/umn/dli_03_0029.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0036.html b/docs/dli/umn/dli_03_0036.html index e5b6c1ad..a4bff550 100644 --- a/docs/dli/umn/dli_03_0036.html +++ b/docs/dli/umn/dli_03_0036.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0037.html b/docs/dli/umn/dli_03_0037.html index 51d2c1ca..a02a1dce 100644 --- a/docs/dli/umn/dli_03_0037.html +++ b/docs/dli/umn/dli_03_0037.html @@ -4,81 +4,15 @@
              diff --git a/docs/dli/umn/dli_03_0038.html b/docs/dli/umn/dli_03_0038.html index 7ab46c3a..c3bb6924 100644 --- a/docs/dli/umn/dli_03_0038.html +++ b/docs/dli/umn/dli_03_0038.html @@ -47,7 +47,7 @@ StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironm
              diff --git a/docs/dli/umn/dli_03_0040.html b/docs/dli/umn/dli_03_0040.html index 720f221a..e4847d71 100644 --- a/docs/dli/umn/dli_03_0040.html +++ b/docs/dli/umn/dli_03_0040.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0043.html b/docs/dli/umn/dli_03_0043.html index 883adca6..c1a4b5d6 100644 --- a/docs/dli/umn/dli_03_0043.html +++ b/docs/dli/umn/dli_03_0043.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0044.html b/docs/dli/umn/dli_03_0044.html index 99b8f6a0..98044b55 100644 --- a/docs/dli/umn/dli_03_0044.html +++ b/docs/dli/umn/dli_03_0044.html @@ -66,7 +66,7 @@ private void initConf() {
              diff --git a/docs/dli/umn/dli_03_0045.html b/docs/dli/umn/dli_03_0045.html index 9b8ec75e..301889fa 100644 --- a/docs/dli/umn/dli_03_0045.html +++ b/docs/dli/umn/dli_03_0045.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0046.html b/docs/dli/umn/dli_03_0046.html index 8ff9bfba..822d7ca9 100644 --- a/docs/dli/umn/dli_03_0046.html +++ b/docs/dli/umn/dli_03_0046.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0047.html b/docs/dli/umn/dli_03_0047.html index 71a6e34f..8da071eb 100644 --- a/docs/dli/umn/dli_03_0047.html +++ b/docs/dli/umn/dli_03_0047.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0048.html b/docs/dli/umn/dli_03_0048.html index 6f98e404..91fcb8fa 100644 --- a/docs/dli/umn/dli_03_0048.html +++ b/docs/dli/umn/dli_03_0048.html @@ -10,7 +10,7 @@ insert into es2 select * from ssource;
              diff --git a/docs/dli/umn/dli_03_0049.html b/docs/dli/umn/dli_03_0049.html index c3173cdd..21acaedc 100644 --- a/docs/dli/umn/dli_03_0049.html +++ b/docs/dli/umn/dli_03_0049.html @@ -4,27 +4,9 @@
              diff --git a/docs/dli/umn/dli_03_0054.html b/docs/dli/umn/dli_03_0054.html index 74e74eb9..18bfcccc 100644 --- a/docs/dli/umn/dli_03_0054.html +++ b/docs/dli/umn/dli_03_0054.html @@ -4,21 +4,9 @@
              diff --git a/docs/dli/umn/dli_03_0057.html b/docs/dli/umn/dli_03_0057.html index a02d6647..e9910c00 100644 --- a/docs/dli/umn/dli_03_0057.html +++ b/docs/dli/umn/dli_03_0057.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0061.html b/docs/dli/umn/dli_03_0061.html index a66ec3f3..beab1114 100644 --- a/docs/dli/umn/dli_03_0061.html +++ b/docs/dli/umn/dli_03_0061.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0064.html b/docs/dli/umn/dli_03_0064.html index cb086139..a8a7300d 100644 --- a/docs/dli/umn/dli_03_0064.html +++ b/docs/dli/umn/dli_03_0064.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0065.html b/docs/dli/umn/dli_03_0065.html index 7171d5a8..b6b9ddc9 100644 --- a/docs/dli/umn/dli_03_0065.html +++ b/docs/dli/umn/dli_03_0065.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0066.html b/docs/dli/umn/dli_03_0066.html index 6701df2c..a7e842fb 100644 --- a/docs/dli/umn/dli_03_0066.html +++ b/docs/dli/umn/dli_03_0066.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0067.html b/docs/dli/umn/dli_03_0067.html index f4bd398b..4d9f150a 100644 --- a/docs/dli/umn/dli_03_0067.html +++ b/docs/dli/umn/dli_03_0067.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0068.html b/docs/dli/umn/dli_03_0068.html index b5a5866a..fe77615b 100644 --- a/docs/dli/umn/dli_03_0068.html +++ b/docs/dli/umn/dli_03_0068.html @@ -11,7 +11,7 @@ st.execute("set spark.sql.shuffle.partitions=20")
              diff --git a/docs/dli/umn/dli_03_0069.html b/docs/dli/umn/dli_03_0069.html index 77d25473..7bcd07c6 100644 --- a/docs/dli/umn/dli_03_0069.html +++ b/docs/dli/umn/dli_03_0069.html @@ -27,7 +27,7 @@ WHERE
              diff --git a/docs/dli/umn/dli_03_0071.html b/docs/dli/umn/dli_03_0071.html index 32f1d276..659ff90c 100644 --- a/docs/dli/umn/dli_03_0071.html +++ b/docs/dli/umn/dli_03_0071.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0072.html b/docs/dli/umn/dli_03_0072.html index 00196fa3..754c3551 100644 --- a/docs/dli/umn/dli_03_0072.html +++ b/docs/dli/umn/dli_03_0072.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0075.html b/docs/dli/umn/dli_03_0075.html index 04db3484..b059069a 100644 --- a/docs/dli/umn/dli_03_0075.html +++ b/docs/dli/umn/dli_03_0075.html @@ -58,7 +58,7 @@
              diff --git a/docs/dli/umn/dli_03_0076.html b/docs/dli/umn/dli_03_0076.html index d103ef63..be60c760 100644 --- a/docs/dli/umn/dli_03_0076.html +++ b/docs/dli/umn/dli_03_0076.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0077.html b/docs/dli/umn/dli_03_0077.html index 5e62d7cc..a60751ac 100644 --- a/docs/dli/umn/dli_03_0077.html +++ b/docs/dli/umn/dli_03_0077.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0080.html b/docs/dli/umn/dli_03_0080.html index 856b41f5..0038f27f 100644 --- a/docs/dli/umn/dli_03_0080.html +++ b/docs/dli/umn/dli_03_0080.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0082.html b/docs/dli/umn/dli_03_0082.html index 75b57c5e..bfcb9467 100644 --- a/docs/dli/umn/dli_03_0082.html +++ b/docs/dli/umn/dli_03_0082.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0083.html b/docs/dli/umn/dli_03_0083.html index 61b2dbaa..ec783ec2 100644 --- a/docs/dli/umn/dli_03_0083.html +++ b/docs/dli/umn/dli_03_0083.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0085.html b/docs/dli/umn/dli_03_0085.html index 6ab75c37..92ede8cc 100644 --- a/docs/dli/umn/dli_03_0085.html +++ b/docs/dli/umn/dli_03_0085.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0086.html b/docs/dli/umn/dli_03_0086.html index 2a126246..f67a1c92 100644 --- a/docs/dli/umn/dli_03_0086.html +++ b/docs/dli/umn/dli_03_0086.html @@ -9,7 +9,7 @@ select * FROM tablename distribute by rand()
              diff --git a/docs/dli/umn/dli_03_0087.html b/docs/dli/umn/dli_03_0087.html index aca9ae27..c429a6a1 100644 --- a/docs/dli/umn/dli_03_0087.html +++ b/docs/dli/umn/dli_03_0087.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0088.html b/docs/dli/umn/dli_03_0088.html index 3e6babb9..26d22ac4 100644 --- a/docs/dli/umn/dli_03_0088.html +++ b/docs/dli/umn/dli_03_0088.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0089.html b/docs/dli/umn/dli_03_0089.html index 7ce71038..ab9cf67f 100644 --- a/docs/dli/umn/dli_03_0089.html +++ b/docs/dli/umn/dli_03_0089.html @@ -3,7 +3,7 @@

              How Do I Create an OBS Partitioned Table for a Flink SQL Job?

              Scenario

              When using a Flink SQL job, you need to create an OBS partition table for subsequent batch processing.

              -

              Procedure

              In the following example, the day field is used as the partition field with the parquet encoding format (only the parquet format is supported currently) to dump car_info data to OBS.

              +

              Procedure

              In the following example, the day field is used as the partition field with the parquet encoding format to dump car_info data to OBS.

              + + + + + + + +
               1
                2
                3
              @@ -61,7 +61,7 @@
               
              diff --git a/docs/dli/umn/dli_03_0090.html b/docs/dli/umn/dli_03_0090.html index cb4f81c1..ab404100 100644 --- a/docs/dli/umn/dli_03_0090.html +++ b/docs/dli/umn/dli_03_0090.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0091.html b/docs/dli/umn/dli_03_0091.html index b7e0b770..3eaa1067 100644 --- a/docs/dli/umn/dli_03_0091.html +++ b/docs/dli/umn/dli_03_0091.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0092.html b/docs/dli/umn/dli_03_0092.html index 99dcc82c..702a907f 100644 --- a/docs/dli/umn/dli_03_0092.html +++ b/docs/dli/umn/dli_03_0092.html @@ -14,7 +14,7 @@ LOCATION 'obs://akc-bigdata/akdc.db'
              diff --git a/docs/dli/umn/dli_03_0093.html b/docs/dli/umn/dli_03_0093.html index afd6ec18..4a8132e4 100644 --- a/docs/dli/umn/dli_03_0093.html +++ b/docs/dli/umn/dli_03_0093.html @@ -20,7 +20,7 @@ spark.sql.adaptive.skewedPartitionMaxSplits:10
              diff --git a/docs/dli/umn/dli_03_0095.html b/docs/dli/umn/dli_03_0095.html index 22a0b58f..93dfa7ec 100644 --- a/docs/dli/umn/dli_03_0095.html +++ b/docs/dli/umn/dli_03_0095.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0096.html b/docs/dli/umn/dli_03_0096.html index 0d854ca8..b4f4cdbd 100644 --- a/docs/dli/umn/dli_03_0096.html +++ b/docs/dli/umn/dli_03_0096.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0098.html b/docs/dli/umn/dli_03_0098.html index 443e9011..12e37958 100644 --- a/docs/dli/umn/dli_03_0098.html +++ b/docs/dli/umn/dli_03_0098.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0099.html b/docs/dli/umn/dli_03_0099.html index 261ba5c7..2a7db5c6 100644 --- a/docs/dli/umn/dli_03_0099.html +++ b/docs/dli/umn/dli_03_0099.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0100.html b/docs/dli/umn/dli_03_0100.html index 21a0e117..55859cd2 100644 --- a/docs/dli/umn/dli_03_0100.html +++ b/docs/dli/umn/dli_03_0100.html @@ -67,7 +67,7 @@
              diff --git a/docs/dli/umn/dli_03_0102.html b/docs/dli/umn/dli_03_0102.html index 45c51a53..1ff20d94 100644 --- a/docs/dli/umn/dli_03_0102.html +++ b/docs/dli/umn/dli_03_0102.html @@ -15,7 +15,7 @@
              diff --git a/docs/dli/umn/dli_03_0103.html b/docs/dli/umn/dli_03_0103.html index 719eed80..bc81825e 100644 --- a/docs/dli/umn/dli_03_0103.html +++ b/docs/dli/umn/dli_03_0103.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0105.html b/docs/dli/umn/dli_03_0105.html index 4db45d8a..8673e9d6 100644 --- a/docs/dli/umn/dli_03_0105.html +++ b/docs/dli/umn/dli_03_0105.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0106.html b/docs/dli/umn/dli_03_0106.html index 04fd6e26..81ffcbc7 100644 --- a/docs/dli/umn/dli_03_0106.html +++ b/docs/dli/umn/dli_03_0106.html @@ -39,7 +39,7 @@
              diff --git a/docs/dli/umn/dli_03_0107.html b/docs/dli/umn/dli_03_0107.html index eeac9832..70a1a79a 100644 --- a/docs/dli/umn/dli_03_0107.html +++ b/docs/dli/umn/dli_03_0107.html @@ -30,7 +30,7 @@ counts.saveAsTextFile(out_file_name)
              diff --git a/docs/dli/umn/dli_03_0108.html b/docs/dli/umn/dli_03_0108.html index fbd8f2a1..f62f9c71 100644 --- a/docs/dli/umn/dli_03_0108.html +++ b/docs/dli/umn/dli_03_0108.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0109.html b/docs/dli/umn/dli_03_0109.html index bcaec919..171cebec 100644 --- a/docs/dli/umn/dli_03_0109.html +++ b/docs/dli/umn/dli_03_0109.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0110.html b/docs/dli/umn/dli_03_0110.html new file mode 100644 index 00000000..d80be8bb --- /dev/null +++ b/docs/dli/umn/dli_03_0110.html @@ -0,0 +1,28 @@ + + +

              Datasource Connections

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0111.html b/docs/dli/umn/dli_03_0111.html index 43e8cbcf..76683518 100644 --- a/docs/dli/umn/dli_03_0111.html +++ b/docs/dli/umn/dli_03_0111.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0112.html b/docs/dli/umn/dli_03_0112.html new file mode 100644 index 00000000..38193fea --- /dev/null +++ b/docs/dli/umn/dli_03_0112.html @@ -0,0 +1,20 @@ + + +

              Cross-Source Analysis

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0115.html b/docs/dli/umn/dli_03_0115.html index 2fa39828..0f4c635a 100644 --- a/docs/dli/umn/dli_03_0115.html +++ b/docs/dli/umn/dli_03_0115.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0116.html b/docs/dli/umn/dli_03_0116.html index 88ea396f..763ce600 100644 --- a/docs/dli/umn/dli_03_0116.html +++ b/docs/dli/umn/dli_03_0116.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0117.html b/docs/dli/umn/dli_03_0117.html index 8217dff6..3610b8ae 100644 --- a/docs/dli/umn/dli_03_0117.html +++ b/docs/dli/umn/dli_03_0117.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0118.html b/docs/dli/umn/dli_03_0118.html index 6da9f2ed..9a8ab5cc 100644 --- a/docs/dli/umn/dli_03_0118.html +++ b/docs/dli/umn/dli_03_0118.html @@ -30,7 +30,7 @@ object DliTest {
              diff --git a/docs/dli/umn/dli_03_0119.html b/docs/dli/umn/dli_03_0119.html index 43319121..5e0d016c 100644 --- a/docs/dli/umn/dli_03_0119.html +++ b/docs/dli/umn/dli_03_0119.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0126.html b/docs/dli/umn/dli_03_0126.html index b57d1317..67ee6d71 100644 --- a/docs/dli/umn/dli_03_0126.html +++ b/docs/dli/umn/dli_03_0126.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0127.html b/docs/dli/umn/dli_03_0127.html index cee710e2..c7ea12d1 100644 --- a/docs/dli/umn/dli_03_0127.html +++ b/docs/dli/umn/dli_03_0127.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0128.html b/docs/dli/umn/dli_03_0128.html index fcb0d421..bcf70760 100644 --- a/docs/dli/umn/dli_03_0128.html +++ b/docs/dli/umn/dli_03_0128.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0129.html b/docs/dli/umn/dli_03_0129.html index 0a4c3e09..7ec1a4d1 100644 --- a/docs/dli/umn/dli_03_0129.html +++ b/docs/dli/umn/dli_03_0129.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0130.html b/docs/dli/umn/dli_03_0130.html index 025ee34b..01cdffd7 100644 --- a/docs/dli/umn/dli_03_0130.html +++ b/docs/dli/umn/dli_03_0130.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0131.html b/docs/dli/umn/dli_03_0131.html new file mode 100644 index 00000000..eb34497c --- /dev/null +++ b/docs/dli/umn/dli_03_0131.html @@ -0,0 +1,33 @@ + + +

              Flink SQL

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0132.html b/docs/dli/umn/dli_03_0132.html new file mode 100644 index 00000000..7666b9e8 --- /dev/null +++ b/docs/dli/umn/dli_03_0132.html @@ -0,0 +1,29 @@ + + +

              Flink Jar Jobs

              +
              + + diff --git a/docs/dli/umn/dli_03_0133.html b/docs/dli/umn/dli_03_0133.html new file mode 100644 index 00000000..bac2b71a --- /dev/null +++ b/docs/dli/umn/dli_03_0133.html @@ -0,0 +1,20 @@ + + +

              Performance Tuning

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0135.html b/docs/dli/umn/dli_03_0135.html new file mode 100644 index 00000000..06cbcf9b --- /dev/null +++ b/docs/dli/umn/dli_03_0135.html @@ -0,0 +1,32 @@ + + +

              O&M Guide

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0136.html b/docs/dli/umn/dli_03_0136.html index 51717b4c..f6bad72b 100644 --- a/docs/dli/umn/dli_03_0136.html +++ b/docs/dli/umn/dli_03_0136.html @@ -59,7 +59,7 @@
              diff --git a/docs/dli/umn/dli_03_0137.html b/docs/dli/umn/dli_03_0137.html new file mode 100644 index 00000000..505c824d --- /dev/null +++ b/docs/dli/umn/dli_03_0137.html @@ -0,0 +1,30 @@ + + +

              Usage

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0138.html b/docs/dli/umn/dli_03_0138.html index 6645c794..9db67a6f 100644 --- a/docs/dli/umn/dli_03_0138.html +++ b/docs/dli/umn/dli_03_0138.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0139.html b/docs/dli/umn/dli_03_0139.html index 868918a9..288393ba 100644 --- a/docs/dli/umn/dli_03_0139.html +++ b/docs/dli/umn/dli_03_0139.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0140.html b/docs/dli/umn/dli_03_0140.html index 5ed3b909..858d0a13 100644 --- a/docs/dli/umn/dli_03_0140.html +++ b/docs/dli/umn/dli_03_0140.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0145.html b/docs/dli/umn/dli_03_0145.html index 38a9e0b3..b3e548e2 100644 --- a/docs/dli/umn/dli_03_0145.html +++ b/docs/dli/umn/dli_03_0145.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0156.html b/docs/dli/umn/dli_03_0156.html index de30c37a..9a15efd1 100644 --- a/docs/dli/umn/dli_03_0156.html +++ b/docs/dli/umn/dli_03_0156.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0157.html b/docs/dli/umn/dli_03_0157.html index 1dd9f63b..cc92c3ac 100644 --- a/docs/dli/umn/dli_03_0157.html +++ b/docs/dli/umn/dli_03_0157.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0159.html b/docs/dli/umn/dli_03_0159.html index 17965ced..b50ba0ef 100644 --- a/docs/dli/umn/dli_03_0159.html +++ b/docs/dli/umn/dli_03_0159.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0160.html b/docs/dli/umn/dli_03_0160.html index 336b0e21..ff120b74 100644 --- a/docs/dli/umn/dli_03_0160.html +++ b/docs/dli/umn/dli_03_0160.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0161.html b/docs/dli/umn/dli_03_0161.html index c0f7e55c..8977e505 100644 --- a/docs/dli/umn/dli_03_0161.html +++ b/docs/dli/umn/dli_03_0161.html @@ -16,7 +16,7 @@
              diff --git a/docs/dli/umn/dli_03_0162.html b/docs/dli/umn/dli_03_0162.html index 2b7e9456..61db61cb 100644 --- a/docs/dli/umn/dli_03_0162.html +++ b/docs/dli/umn/dli_03_0162.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0163.html b/docs/dli/umn/dli_03_0163.html new file mode 100644 index 00000000..0d8e6733 --- /dev/null +++ b/docs/dli/umn/dli_03_0163.html @@ -0,0 +1,16 @@ + + +

              Usage

              +

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0164.html b/docs/dli/umn/dli_03_0164.html index fe668a6c..986dfcc0 100644 --- a/docs/dli/umn/dli_03_0164.html +++ b/docs/dli/umn/dli_03_0164.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0165.html b/docs/dli/umn/dli_03_0165.html index 2d1a21d7..3bd7d1cc 100644 --- a/docs/dli/umn/dli_03_0165.html +++ b/docs/dli/umn/dli_03_0165.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0166.html b/docs/dli/umn/dli_03_0166.html index 23a4a792..e8b1d4e5 100644 --- a/docs/dli/umn/dli_03_0166.html +++ b/docs/dli/umn/dli_03_0166.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0167.html b/docs/dli/umn/dli_03_0167.html index 28565b0c..115d4b0b 100644 --- a/docs/dli/umn/dli_03_0167.html +++ b/docs/dli/umn/dli_03_0167.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0168.html b/docs/dli/umn/dli_03_0168.html index 9a2a0412..16f10ff5 100644 --- a/docs/dli/umn/dli_03_0168.html +++ b/docs/dli/umn/dli_03_0168.html @@ -34,7 +34,7 @@
              diff --git a/docs/dli/umn/dli_03_0169.html b/docs/dli/umn/dli_03_0169.html index f3f3a3b1..bb473ba3 100644 --- a/docs/dli/umn/dli_03_0169.html +++ b/docs/dli/umn/dli_03_0169.html @@ -14,7 +14,7 @@
              diff --git a/docs/dli/umn/dli_03_0170.html b/docs/dli/umn/dli_03_0170.html index a3a6f625..8096c214 100644 --- a/docs/dli/umn/dli_03_0170.html +++ b/docs/dli/umn/dli_03_0170.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0171.html b/docs/dli/umn/dli_03_0171.html index 4193548c..64370860 100644 --- a/docs/dli/umn/dli_03_0171.html +++ b/docs/dli/umn/dli_03_0171.html @@ -13,7 +13,7 @@
              diff --git a/docs/dli/umn/dli_03_0172.html b/docs/dli/umn/dli_03_0172.html index 8d4d729d..2abde769 100644 --- a/docs/dli/umn/dli_03_0172.html +++ b/docs/dli/umn/dli_03_0172.html @@ -19,7 +19,7 @@ at java.lang.Thread.run(Thread.java:748)
              diff --git a/docs/dli/umn/dli_03_0173.html b/docs/dli/umn/dli_03_0173.html index 0e8287f0..31100079 100644 --- a/docs/dli/umn/dli_03_0173.html +++ b/docs/dli/umn/dli_03_0173.html @@ -16,7 +16,7 @@ Cause by: ObsException: com.obs.services.exception.ObsException: OBSs servcie Er
              diff --git a/docs/dli/umn/dli_03_0174.html b/docs/dli/umn/dli_03_0174.html index 5f70d0a2..ea0c3b6f 100644 --- a/docs/dli/umn/dli_03_0174.html +++ b/docs/dli/umn/dli_03_0174.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0175.html b/docs/dli/umn/dli_03_0175.html index 01560ca1..c4481786 100644 --- a/docs/dli/umn/dli_03_0175.html +++ b/docs/dli/umn/dli_03_0175.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0176.html b/docs/dli/umn/dli_03_0176.html index 96720398..672d6328 100644 --- a/docs/dli/umn/dli_03_0176.html +++ b/docs/dli/umn/dli_03_0176.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0177.html b/docs/dli/umn/dli_03_0177.html index e446007a..e44a3af9 100644 --- a/docs/dli/umn/dli_03_0177.html +++ b/docs/dli/umn/dli_03_0177.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0179.html b/docs/dli/umn/dli_03_0179.html index 6b386618..c5904560 100644 --- a/docs/dli/umn/dli_03_0179.html +++ b/docs/dli/umn/dli_03_0179.html @@ -6,7 +6,7 @@

              Check Whether a Port Number Is Added to the End of the Domain Name or IP Address

              The port number is required for the connectivity test.

              The following example tests the connectivity between a queue and a specified RDS DB instance. The RDS DB instance uses port 3306.

              @@ -15,17 +15,14 @@

              Check Whether the Information of the Peer VPC and Subnet Are Correct.

              When you create an enhanced datasource connection, you need to specify the peer VPC and subnet.

              For example, to test the connectivity between a queue and a specified RDS DB instance, you need to specify the RDS VPC and subnet information.

              -

              Check Whether the CIDR Block of the Queue Overlaps That of the Data Source

              The CIDR block of the DLI queue bound with a datasource connection cannot overlap the CIDR block of the data source.

              +

              Check Whether the CIDR Block of the Queue Overlaps with That of the Data Source

              The CIDR block of the DLI queue bound with a datasource connection cannot overlap the CIDR block of the data source.

              You can check whether they overlap by viewing the connection logs.

              CIDR block conflicts of queue A and queue B. In this example, queue B is bound to an enhanced datasource connection to data source C. Therefore, a message is displayed, indicating that the network segment of queue A conflicts with that of data source C. As a result, a new enhanced datasource connection cannot be established.

              Solution: Modify the CIDR block of the queue or create another queue.

              Planing the CIDR blocks for your queues helps you to avoid this problem.

              Check Whether the VPC Administrator Permission Is Granted to DLI

              View the connection logs to check whether there is the required permission.

              -

              Figure 1 and Figure 2 show the logs when subnet ID and route ID of the destination cannot be obtained because there is no permission.

              Solution: Grant DLI the VPC Administrator permission and cancel the IAM ReadOnlyAccess authorization.

              -
              Figure 1 Viewing connection logs
              -
              Figure 2 Viewing connection logs

              Check Whether the Destination Security Group Allows Access from the CIDR Block of the Queue

              To connect to Kafka, GaussDB(DWS), and RDS instances, add security group rules for the DLI CIDR block to the security group where the instances belong. For example, to connect a queue to RDS, perform the following operations:
              1. Log in to the DLI console, choose Resources > Queue Management in the navigation pane on the left. On the displayed page, select the target queue, and click to expand the row containing the target queue to view its CIDR block.
              2. On the Instance Management page of the RDS console, click the instance name. In the Connection Information area, locate Database Port to obtain the port number of the RDS DB instance.
              3. In the Connection Information area locate the Security Group and click the group name to switch to the security group management page. Select the Inbound Rules tab and click Add Rule. Set the priority to 1, protocol to TCP, port to the database port number, and source to the CIDR block of the DLI queue. Click OK.
              @@ -39,7 +36,7 @@
              diff --git a/docs/dli/umn/dli_03_0180.html b/docs/dli/umn/dli_03_0180.html index 1e862dfb..c8502d37 100644 --- a/docs/dli/umn/dli_03_0180.html +++ b/docs/dli/umn/dli_03_0180.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0181.html b/docs/dli/umn/dli_03_0181.html index b3c9f3a1..c4c1ba63 100644 --- a/docs/dli/umn/dli_03_0181.html +++ b/docs/dli/umn/dli_03_0181.html @@ -18,7 +18,7 @@ bb" null null
              diff --git a/docs/dli/umn/dli_03_0182.html b/docs/dli/umn/dli_03_0182.html index 5bf17a88..c1cecc9b 100644 --- a/docs/dli/umn/dli_03_0182.html +++ b/docs/dli/umn/dli_03_0182.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0183.html b/docs/dli/umn/dli_03_0183.html index 7aa4717c..abe1c4f1 100644 --- a/docs/dli/umn/dli_03_0183.html +++ b/docs/dli/umn/dli_03_0183.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0184.html b/docs/dli/umn/dli_03_0184.html index b131277a..3e6e94ce 100644 --- a/docs/dli/umn/dli_03_0184.html +++ b/docs/dli/umn/dli_03_0184.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0186.html b/docs/dli/umn/dli_03_0186.html index 1dec468c..27209a27 100644 --- a/docs/dli/umn/dli_03_0186.html +++ b/docs/dli/umn/dli_03_0186.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0187.html b/docs/dli/umn/dli_03_0187.html index bf904404..11e5dd19 100644 --- a/docs/dli/umn/dli_03_0187.html +++ b/docs/dli/umn/dli_03_0187.html @@ -29,7 +29,7 @@ where to_char(from_unixtime(fs.special_start_time), 'yyyymmdd') = substr('202206
              diff --git a/docs/dli/umn/dli_03_0188.html b/docs/dli/umn/dli_03_0188.html index 15f23484..864dc107 100644 --- a/docs/dli/umn/dli_03_0188.html +++ b/docs/dli/umn/dli_03_0188.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0189.html b/docs/dli/umn/dli_03_0189.html index 07dccdc7..d9280e2f 100644 --- a/docs/dli/umn/dli_03_0189.html +++ b/docs/dli/umn/dli_03_0189.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0190.html b/docs/dli/umn/dli_03_0190.html index 43961854..ac860393 100644 --- a/docs/dli/umn/dli_03_0190.html +++ b/docs/dli/umn/dli_03_0190.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0191.html b/docs/dli/umn/dli_03_0191.html index 8cea3ea0..dde695be 100644 --- a/docs/dli/umn/dli_03_0191.html +++ b/docs/dli/umn/dli_03_0191.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0192.html b/docs/dli/umn/dli_03_0192.html index df90f7cc..79c664f4 100644 --- a/docs/dli/umn/dli_03_0192.html +++ b/docs/dli/umn/dli_03_0192.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0193.html b/docs/dli/umn/dli_03_0193.html index 2e750693..2aa80b6a 100644 --- a/docs/dli/umn/dli_03_0193.html +++ b/docs/dli/umn/dli_03_0193.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0195.html b/docs/dli/umn/dli_03_0195.html index 5fb93689..7aa36710 100644 --- a/docs/dli/umn/dli_03_0195.html +++ b/docs/dli/umn/dli_03_0195.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0196.html b/docs/dli/umn/dli_03_0196.html index 65255012..9471e4f2 100644 --- a/docs/dli/umn/dli_03_0196.html +++ b/docs/dli/umn/dli_03_0196.html @@ -34,7 +34,7 @@ SELECT count(1) as count,num from lefttbl group by lefttbl.num ORDER BY count d
              diff --git a/docs/dli/umn/dli_03_0200.html b/docs/dli/umn/dli_03_0200.html index 1fc511ac..d96b3758 100644 --- a/docs/dli/umn/dli_03_0200.html +++ b/docs/dli/umn/dli_03_0200.html @@ -16,7 +16,7 @@
              diff --git a/docs/dli/umn/dli_03_0201.html b/docs/dli/umn/dli_03_0201.html index 64719534..d0e6f9be 100644 --- a/docs/dli/umn/dli_03_0201.html +++ b/docs/dli/umn/dli_03_0201.html @@ -14,7 +14,7 @@
              diff --git a/docs/dli/umn/dli_03_0204.html b/docs/dli/umn/dli_03_0204.html new file mode 100644 index 00000000..ab5ab004 --- /dev/null +++ b/docs/dli/umn/dli_03_0204.html @@ -0,0 +1,28 @@ + + +

              Job Development

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0206.html b/docs/dli/umn/dli_03_0206.html new file mode 100644 index 00000000..42657827 --- /dev/null +++ b/docs/dli/umn/dli_03_0206.html @@ -0,0 +1,40 @@ + + +

              Job O&M Errors

              +

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0207.html b/docs/dli/umn/dli_03_0207.html index 2501b1ed..ed8045b0 100644 --- a/docs/dli/umn/dli_03_0207.html +++ b/docs/dli/umn/dli_03_0207.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0208.html b/docs/dli/umn/dli_03_0208.html index be2b8906..5859cee2 100644 --- a/docs/dli/umn/dli_03_0208.html +++ b/docs/dli/umn/dli_03_0208.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0209.html b/docs/dli/umn/dli_03_0209.html index 22bac4ff..0bdd5b03 100644 --- a/docs/dli/umn/dli_03_0209.html +++ b/docs/dli/umn/dli_03_0209.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0210.html b/docs/dli/umn/dli_03_0210.html index 05971e57..ffb212f2 100644 --- a/docs/dli/umn/dli_03_0210.html +++ b/docs/dli/umn/dli_03_0210.html @@ -9,7 +9,7 @@
              diff --git a/docs/dli/umn/dli_03_0211.html b/docs/dli/umn/dli_03_0211.html new file mode 100644 index 00000000..811df367 --- /dev/null +++ b/docs/dli/umn/dli_03_0211.html @@ -0,0 +1,50 @@ + + +

              O&M Guide

              +

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0212.html b/docs/dli/umn/dli_03_0212.html index 04d2fc0f..f21abd78 100644 --- a/docs/dli/umn/dli_03_0212.html +++ b/docs/dli/umn/dli_03_0212.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0213.html b/docs/dli/umn/dli_03_0213.html index beda0b22..fa24d027 100644 --- a/docs/dli/umn/dli_03_0213.html +++ b/docs/dli/umn/dli_03_0213.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0214.html b/docs/dli/umn/dli_03_0214.html index e77830be..80ec38bf 100644 --- a/docs/dli/umn/dli_03_0214.html +++ b/docs/dli/umn/dli_03_0214.html @@ -8,7 +8,7 @@
              diff --git a/docs/dli/umn/dli_03_0215.html b/docs/dli/umn/dli_03_0215.html index c9c80f89..166831ef 100644 --- a/docs/dli/umn/dli_03_0215.html +++ b/docs/dli/umn/dli_03_0215.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0216.html b/docs/dli/umn/dli_03_0216.html new file mode 100644 index 00000000..6c45e83b --- /dev/null +++ b/docs/dli/umn/dli_03_0216.html @@ -0,0 +1,16 @@ + + +

              Usage

              +

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0217.html b/docs/dli/umn/dli_03_0217.html new file mode 100644 index 00000000..6af3d3a9 --- /dev/null +++ b/docs/dli/umn/dli_03_0217.html @@ -0,0 +1,30 @@ + + +

              Job Development

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0218.html b/docs/dli/umn/dli_03_0218.html new file mode 100644 index 00000000..215efdd9 --- /dev/null +++ b/docs/dli/umn/dli_03_0218.html @@ -0,0 +1,24 @@ + + +

              Job O&M Errors

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0219.html b/docs/dli/umn/dli_03_0219.html new file mode 100644 index 00000000..50e85426 --- /dev/null +++ b/docs/dli/umn/dli_03_0219.html @@ -0,0 +1,18 @@ + + +

              O&M Guide

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0220.html b/docs/dli/umn/dli_03_0220.html index e64c7094..a68060e7 100644 --- a/docs/dli/umn/dli_03_0220.html +++ b/docs/dli/umn/dli_03_0220.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0221.html b/docs/dli/umn/dli_03_0221.html new file mode 100644 index 00000000..46cfdd36 --- /dev/null +++ b/docs/dli/umn/dli_03_0221.html @@ -0,0 +1,32 @@ + + +

              Usage

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0222.html b/docs/dli/umn/dli_03_0222.html new file mode 100644 index 00000000..1c77d15d --- /dev/null +++ b/docs/dli/umn/dli_03_0222.html @@ -0,0 +1,18 @@ + + +

              Job Management

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0223.html b/docs/dli/umn/dli_03_0223.html new file mode 100644 index 00000000..7ec2c8e7 --- /dev/null +++ b/docs/dli/umn/dli_03_0223.html @@ -0,0 +1,17 @@ + + +

              Usage

              +
              + + diff --git a/docs/dli/umn/dli_03_0226.html b/docs/dli/umn/dli_03_0226.html new file mode 100644 index 00000000..c18d1118 --- /dev/null +++ b/docs/dli/umn/dli_03_0226.html @@ -0,0 +1,26 @@ + + +

              O&M Guide

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0227.html b/docs/dli/umn/dli_03_0227.html index 63056f78..5ad8021c 100644 --- a/docs/dli/umn/dli_03_0227.html +++ b/docs/dli/umn/dli_03_0227.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0228.html b/docs/dli/umn/dli_03_0228.html index e711254a..48e74058 100644 --- a/docs/dli/umn/dli_03_0228.html +++ b/docs/dli/umn/dli_03_0228.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0229.html b/docs/dli/umn/dli_03_0229.html new file mode 100644 index 00000000..48993c0e --- /dev/null +++ b/docs/dli/umn/dli_03_0229.html @@ -0,0 +1,22 @@ + + +

              Usage

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0230.html b/docs/dli/umn/dli_03_0230.html new file mode 100644 index 00000000..77622dfe --- /dev/null +++ b/docs/dli/umn/dli_03_0230.html @@ -0,0 +1,30 @@ + + +

              O&M Guide

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0231.html b/docs/dli/umn/dli_03_0231.html index 00d4db3e..ae0efc8a 100644 --- a/docs/dli/umn/dli_03_0231.html +++ b/docs/dli/umn/dli_03_0231.html @@ -18,7 +18,7 @@
              diff --git a/docs/dli/umn/dli_03_0232.html b/docs/dli/umn/dli_03_0232.html index 77bb0950..c1eef455 100644 --- a/docs/dli/umn/dli_03_0232.html +++ b/docs/dli/umn/dli_03_0232.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0233.html b/docs/dli/umn/dli_03_0233.html index d470fb6c..b5329039 100644 --- a/docs/dli/umn/dli_03_0233.html +++ b/docs/dli/umn/dli_03_0233.html @@ -11,7 +11,7 @@
              diff --git a/docs/dli/umn/dli_03_0234.html b/docs/dli/umn/dli_03_0234.html index 3d77106b..d86c4e47 100644 --- a/docs/dli/umn/dli_03_0234.html +++ b/docs/dli/umn/dli_03_0234.html @@ -28,7 +28,7 @@
              diff --git a/docs/dli/umn/dli_03_0235.html b/docs/dli/umn/dli_03_0235.html index a4f87120..9252b110 100644 --- a/docs/dli/umn/dli_03_0235.html +++ b/docs/dli/umn/dli_03_0235.html @@ -10,7 +10,7 @@
              diff --git a/docs/dli/umn/dli_03_0236.html b/docs/dli/umn/dli_03_0236.html index c89f2ae6..424248ee 100644 --- a/docs/dli/umn/dli_03_0236.html +++ b/docs/dli/umn/dli_03_0236.html @@ -13,7 +13,7 @@
              diff --git a/docs/dli/umn/dli_03_0237.html b/docs/dli/umn/dli_03_0237.html index 7d8d5960..5275b91a 100644 --- a/docs/dli/umn/dli_03_0237.html +++ b/docs/dli/umn/dli_03_0237.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0238.html b/docs/dli/umn/dli_03_0238.html index 464bd2f5..db83be76 100644 --- a/docs/dli/umn/dli_03_0238.html +++ b/docs/dli/umn/dli_03_0238.html @@ -14,7 +14,7 @@
              diff --git a/docs/dli/umn/dli_03_0239.html b/docs/dli/umn/dli_03_0239.html index 35a3643a..7b3890f6 100644 --- a/docs/dli/umn/dli_03_0239.html +++ b/docs/dli/umn/dli_03_0239.html @@ -16,7 +16,7 @@
              diff --git a/docs/dli/umn/dli_03_0250.html b/docs/dli/umn/dli_03_0250.html index 73f27613..147e7e44 100644 --- a/docs/dli/umn/dli_03_0250.html +++ b/docs/dli/umn/dli_03_0250.html @@ -18,7 +18,7 @@
              diff --git a/docs/dli/umn/dli_03_0251.html b/docs/dli/umn/dli_03_0251.html index 315ef407..ffa64a33 100644 --- a/docs/dli/umn/dli_03_0251.html +++ b/docs/dli/umn/dli_03_0251.html @@ -15,7 +15,7 @@
              diff --git a/docs/dli/umn/dli_03_0252.html b/docs/dli/umn/dli_03_0252.html index 52cb8c52..516f3d27 100644 --- a/docs/dli/umn/dli_03_0252.html +++ b/docs/dli/umn/dli_03_0252.html @@ -13,7 +13,7 @@
              diff --git a/docs/dli/umn/dli_03_0253.html b/docs/dli/umn/dli_03_0253.html index 9e28195e..af83c943 100644 --- a/docs/dli/umn/dli_03_0253.html +++ b/docs/dli/umn/dli_03_0253.html @@ -15,7 +15,7 @@ Detail: Failing row contains (400070309, 9.00, 25, null, 2020-09-22, 2020-09-23
              diff --git a/docs/dli/umn/dli_03_0254.html b/docs/dli/umn/dli_03_0254.html index 110e0911..f7363335 100644 --- a/docs/dli/umn/dli_03_0254.html +++ b/docs/dli/umn/dli_03_0254.html @@ -12,7 +12,7 @@
              diff --git a/docs/dli/umn/dli_03_0256.html b/docs/dli/umn/dli_03_0256.html new file mode 100644 index 00000000..f2a1739c --- /dev/null +++ b/docs/dli/umn/dli_03_0256.html @@ -0,0 +1,32 @@ + + +

              Datasource Connection O&M

              +

              +
              +
              + + + +
              + diff --git a/docs/dli/umn/dli_03_0257.html b/docs/dli/umn/dli_03_0257.html index 0d252f32..650e62a2 100644 --- a/docs/dli/umn/dli_03_0257.html +++ b/docs/dli/umn/dli_03_0257.html @@ -6,7 +6,7 @@
              diff --git a/docs/dli/umn/dli_03_0259.html b/docs/dli/umn/dli_03_0259.html index 619f1243..5fee2c9a 100644 --- a/docs/dli/umn/dli_03_0259.html +++ b/docs/dli/umn/dli_03_0259.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0260.html b/docs/dli/umn/dli_03_0260.html index 512f49c7..2b6619b4 100644 --- a/docs/dli/umn/dli_03_0260.html +++ b/docs/dli/umn/dli_03_0260.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0261.html b/docs/dli/umn/dli_03_0261.html new file mode 100644 index 00000000..e7d23c9c --- /dev/null +++ b/docs/dli/umn/dli_03_0261.html @@ -0,0 +1,16 @@ + + +

              Privacy and Security

              +

              +
              + + diff --git a/docs/dli/umn/dli_03_0263.html b/docs/dli/umn/dli_03_0263.html index 9aa6428c..a552aaab 100644 --- a/docs/dli/umn/dli_03_0263.html +++ b/docs/dli/umn/dli_03_0263.html @@ -7,7 +7,7 @@
              diff --git a/docs/dli/umn/dli_03_0264.html b/docs/dli/umn/dli_03_0264.html index 5b527b08..420a8997 100644 --- a/docs/dli/umn/dli_03_0264.html +++ b/docs/dli/umn/dli_03_0264.html @@ -7,7 +7,7 @@

              How Do I Apply for a Higher Quota?

              The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your application and inform you of the progress by phone call or email.

              Before dialing the hotline number or sending an email, ensure that the following information has been obtained:

              -
              • Domain name, project name, and project ID

                Log in to the management console using the cloud account, click the username in the upper right corner, select My Credentials from the drop-down list, and obtain the domain name, project name, and project ID on the My Credentials page.

                +
                • Domain name, project name, and project ID

                  To obtain the preceding information, log in to the management console, click the username in the upper right corner, and choose My Credentials from the drop-down list.

                • Quota information, including:
                  • ServiceName
                  • Quota type
                  • Required quota

                Learn how to obtain the service hotline and email address.

                @@ -15,7 +15,7 @@
              diff --git a/docs/dli/umn/dli_03_0265.html b/docs/dli/umn/dli_03_0265.html index cb75808b..24f63c33 100644 --- a/docs/dli/umn/dli_03_0265.html +++ b/docs/dli/umn/dli_03_0265.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0266.html b/docs/dli/umn/dli_03_0266.html index c1eb1748..7471fdaf 100644 --- a/docs/dli/umn/dli_03_0266.html +++ b/docs/dli/umn/dli_03_0266.html @@ -5,7 +5,7 @@
              diff --git a/docs/dli/umn/dli_03_0276.html b/docs/dli/umn/dli_03_0276.html new file mode 100644 index 00000000..aa7c0b8d --- /dev/null +++ b/docs/dli/umn/dli_03_0276.html @@ -0,0 +1,32 @@ + + +

              How Can I Check the Actual and Used CUs for an Elastic Resource Pool as Well as the Required CUs for a Job?

              +

              In daily big data analysis work, it is important to allocate and manage compute resources properly to provide a good job execution environment.

              +

              You can allocate resources and adjust task execution order based on the job's compute needs and data scale, and schedule different elastic resource pools or queues to adapt to different workloads. To ensure normal job execution, the CUs required for the submitted job should be less than or equal to the remaining available CUs in the elastic resource pool.

              +

              This section describes how to view the usage of compute resources in an elastic resource pool and the required CUs for a job.

              +

              Checking the Actual and Used CUs for an Elastic Resource Pool

              1. Log in to the DLI management console.
              2. Choose Resources > Resource Pool.

                Locate the target resource pool in the list and check its Actual CUs and Used CUs.

                +
                • Actual CUs: number of CUs that can be allocated in the elastic resource pool.
                • Used CUs: CUs that have been allocated to and used by the current elastic resource pool.
                +

                To ensure normal job execution, the CUs required for the submitted job should be less than or equal to the remaining available CUs in the elastic resource pool.

                +

                For details about the number of CUs required by different types of jobs, see Checking the Required CUs for a Job.

                +
              +
              +

              Checking the Required CUs for a Job

              • SQL job:

                Use the monitoring dashboard provided by Cloud Eye to check the number of running and submitted jobs, and use the job count to determine the overall resource usage of SQL jobs.

                +
              • Flink job:
                1. Log in to the DLI management console.
                2. In the navigation pane on the left, choose Job Management > Flink Jobs.
                3. In the job list, click the name of the target job.
                4. Click Flink Job Settings then Resources.
                5. Check the value of CUs, that is, the total number of CUs used by the job.

                  You can set the number of CUs on the job editing page using the following formula: CUs = Job Manager CUs + (Parallelism/Slots per TM) x CUs per TM.

                  +
                +
              • Spark job:
                1. Log in to the DLI management console.
                2. In the navigation pane on the left, choose Job Management > Spark Jobs.
                3. Locate the target job in the list and click Edit in the Operation column.

                  Check the compute resource specifications configured for the job.

                  +

                  The formula is as follows:

                  +

                  Number of CUs of a Spark job = Number of CUs used by executors + Number of CUs used by the driver

                  +

                  Number of CUs used by executors = max {[(Executors x Executor Memory)/4], (Executors x Executor Cores)} x 1

                  +

                  Number of CUs used by the driver = max [(Driver Memory/4), Driver Cores] x 1

                  +
                  • If Advanced Settings is set to Skip for a Spark job, resource specifications of type A are used by default.
                  • The unit of compute resource specifications for Spark jobs is CU. One CU consists of one CPU and 4 GB of memory. In the formulas above, x1 represents the conversion of CPU to CU.
                  • To calculate the required CUs for the executors or driver, use either the memory or the number of CPU cores. Choose the larger value between the two as the number of required CUs.
                  +
                  +
                +
              +
              +
              +
              + +
              + diff --git a/docs/dli/umn/dli_07_0002.html b/docs/dli/umn/dli_07_0002.html index 97924b78..5b980c33 100644 --- a/docs/dli/umn/dli_07_0002.html +++ b/docs/dli/umn/dli_07_0002.html @@ -8,7 +8,7 @@

              Federated Analysis of Heterogeneous Data Sources

              • Digital service transformation for car companies

                In the face of new competition pressures and changes in travel services, car companies build the IoV cloud platform and IVI OS to streamline Internet applications and vehicle use scenarios, completing digital service transformation for car companies. This delivers better travel experience for vehicle owners, increases the competitiveness of car companies, and promotes sales growth. For example, DLI can be used to collect and analyze daily vehicle metric data (such as batteries, engines, tire pressure, and airbags), and give maintenance suggestions to vehicle owners in time.

                -
              • Advantages
                • No need for migration in multi-source data analysis: RDS stores the basic information about vehicles and vehicle owners, table store saves real-time vehicle location and health status, and DWS stores periodic metric statistics. DLI allows federated analysis on data from multiple sources without data migration.
                • Tiered data storage: Car companies need to retain all historical data to support auditing and other services that require infrequent data access. Warm and cold data is stored in OBS and frequently accessed data is stored in DWS, reducing the overall storage cost.
                • Rapid and agile alarm triggering: There are no special requirements for the CPU, memory, hard disk space, and bandwidth.
                +
              • Advantages
                • No need for migration in multi-source data analysis: RDS stores the basic information about vehicles and vehicle owners, table store saves real-time vehicle location and health status, and GaussDB(DWS) stores periodic metric statistics. DLI allows federated analysis on data from multiple sources without data migration.
                • Tiered data storage: Car companies need to retain all historical data to support auditing and other services that require infrequent data access. Warm and cold data is stored in OBS and frequently accessed data is stored in GaussDB(DWS), reducing the overall storage cost.
                • Rapid and agile alarm triggering: There are no special requirements for the CPU, memory, hard disk space, and bandwidth.

              Big Data ETL Processing

              • Carrier big data analysis

                Carriers typically require petabytes, or even exabytes of data storage, for both structured (base station details) and unstructured (messages and communications) data. They need to be able to access the data with extremely low data latency. It is a major challenge to extract value from this data efficiently. DLI provides multi-mode engines such as batch processing and stream processing to break down data silos and perform unified data analysis.

                diff --git a/docs/dli/umn/dli_07_0003.html b/docs/dli/umn/dli_07_0003.html index 2fdeefd4..d9e76c70 100644 --- a/docs/dli/umn/dli_07_0003.html +++ b/docs/dli/umn/dli_07_0003.html @@ -10,8 +10,6 @@

              Metadata

              Metadata is used to define data types. It describes information about the data, including the source, size, format, and other data features. In database fields, metadata interprets data content in the data warehouse.

              -

              Compute Resource

              Queues in DLI are computing resources, which are the basis for using DLI. SQL jobs and Spark jobs performed by users require computing resources.

              -

              Storage Resource

              Storage resources in DLI are used to store data of databases and DLI tables. To import data to DLI, storage resources must be prepared. The storage resources reflect the volume of data you are allowed to store in DLI.

              SQL Job

              SQL job refers to the SQL statement executed in the SQL job editor. It serves as the execution entity used for performing operations, such as importing and exporting data, in the SQL job editor.

              diff --git a/docs/dli/umn/dli_07_0005.html b/docs/dli/umn/dli_07_0005.html index 9fd0c057..a96dfde7 100644 --- a/docs/dli/umn/dli_07_0005.html +++ b/docs/dli/umn/dli_07_0005.html @@ -11,6 +11,11 @@

              For more constraints on using a DLI queue, see Queue Overview.

              +

              On Elastic Resource Pools

              • The region of an elastic resource pool cannot be changed.
              • Jobs of Flink 1.10 or later can run in elastic resource pools.
              • The network segment of an elastic resource pool cannot be changed after being set.
              • You can only view the scaling history of resource pools in the last 30 days.
              • Elastic resource pools cannot access the Internet.

                +

                +
              +

              For more constraints on elastic resource pools, see Elastic Resource Pool Overview.

              +

              On Resources

              • Database
                • default is the database built in DLI. You cannot create a database named default.
                • DLI supports a maximum of 50 databases.
              • Table
                • DLI supports a maximum of 5,000 tables.
                • DLI supports the following table types:
                  • MANAGED: Data is stored in a DLI table.
                  • EXTERNAL: Data is stored in an OBS table.
                  • View: A view can only be created using SQL statements.
                  • Datasource table: The table type is also EXTERNAL.
                • You cannot specify a storage path when creating a DLI table.
                diff --git a/docs/dli/umn/dli_07_0006.html b/docs/dli/umn/dli_07_0006.html index 0fedc548..732c4d81 100644 --- a/docs/dli/umn/dli_07_0006.html +++ b/docs/dli/umn/dli_07_0006.html @@ -645,6 +645,22 @@

              √

              Enhanced datasource connection

              +

              BIND_QUEUE

              +

              Binding an enhanced datasource connection to a queue

              +

              It is only used to grant permissions across projects.

              +

              ×

              +

              ×

              +

              ×

              +

              ×

              +
              diff --git a/docs/dli/umn/dli_07_0009.html b/docs/dli/umn/dli_07_0009.html index c834bc96..bc4ad33f 100644 --- a/docs/dli/umn/dli_07_0009.html +++ b/docs/dli/umn/dli_07_0009.html @@ -10,7 +10,7 @@

              How Do I Apply for a Higher Quota?

              The system does not support online quota adjustment. To increase a resource quota, dial the hotline or send an email to the customer service. We will process your application and inform you of the progress by phone call or email.

              Before dialing the hotline number or sending an email, ensure that the following information has been obtained:

              -
              • Domain name, project name, and project ID

                Log in to the management console using the cloud account, click the username in the upper right corner, select My Credentials from the drop-down list, and obtain the domain name, project name, and project ID on the My Credentials page.

                +
                • Domain name, project name, and project ID

                  To obtain the preceding information, log in to the management console, click the username in the upper right corner, and choose My Credentials from the drop-down list.

                • Quota information, including:
                  • ServiceName
                  • Quota type
                  • Required quota

                Learn how to obtain the service hotline and email address.

                diff --git a/docs/dli/umn/en-us_image_0000001262007480.png b/docs/dli/umn/en-us_image_0000001262007480.png new file mode 100644 index 0000000000000000000000000000000000000000..82567adc85a45b657fd2f7fc6276818f4fcd53f7 GIT binary patch literal 305 zcmeAS@N?(olHy`uVBq!ia0vp^>Oic@!3HD`W`D2)QjEnx?oJHr&dIz4a#+$GeH|GX zHuiJ>Nn{1`ISV`@iy0XB4ude`@%$AjK*3|4E{-7)?r&!}ay1(WxK^KDSJiyaCn{#e z+g0f+A4KgrxaIOP>7Ry1J|!8?4#wB0oDoiT;7^g5KKW5m(1JUgBiHZKlLpy%U@kq?;dgR=@N-)%2kX7IT5#>THpBv^bv!ntDnm{r-UW|Bj$XS literal 0 HcmV?d00001 diff --git a/docs/dli/umn/en-us_image_0000001309687485.png b/docs/dli/umn/en-us_image_0000001309687485.png new file mode 100644 index 0000000000000000000000000000000000000000..34ad74b829900a74c6a7269d729dc2295f004b1b GIT binary patch literal 68213 zcmeEt_di?V-@aB=7j4lRHELDuqV_H|Ywr=Xw%8*!RV}S8h}zWNYOheVR%(mXt`Q@# zi4p62^z(WCh3_xVlNWMcC&#((`#rAr^}cfMFPiF#BzI`;5D*ZMC@aZp6A%!Q5fEG( zxpfmb^BDTN1$evds$}FrKtTHQ>i1d_C+QOcf~N$^^0K-<=^L}Y0lLW9p55&xfdnfu zC8eiaw$yp_JiO7@ZZb)9)knv{@W)R^P)W> zF-y0Ad0o9oe?)j+`_CbPXW$L8{~rI8fBNu0XJy{}-$}p{|62^Q9CH;8AXQ)ACK#8m z9vc0qq_`Ny;tMxJqtWZF`-7-I-z}F+JBNN}h#UO+6)EVp$TE;FDlqrU-auLT%gS$G z&6AUp=F=_zMNoQUn$)?9^5_R5jbBYHSK&meS4c#*-M9qN)77Qj`I???`iO#JZ84Hp z9|D2cJ|ZG;jf;q+1Yew;?46$P`N8{#hLTy3NMzsYKJc@?*)TuB-7rJ83cXGs#jB#C za&S29Qd(X8%FD|OnpalF1!S79c5_&kCop%XsA_9O^HtKio;6=yoa*W8m!zl9KoUrq2u*>H4IE02z7ieYGV{v(ZrMYBK*n)K zNAOf?v033A1X4dJ>7D+VhijCW;ds%Ag5Y>&x=m67^c$OPRd?F8KQZGI6AmNaUK=-6 za~WrSA!jQ#s5GOyNP|Mj;Je+}lCrYI`E3JzeV9Tt&FmJGVXV-+`0OkI1evtcmW_n6?F&?mK! z%_+Ncrfq5Keu`*XU%!^sm+lvEv&!x$>RfC^%SHAO+xwx0Z{L^ze*;b|?MhL#?1@2! zkvyq&eH$MU(fi;A!C%AAr#76D{x)96EpqD?K(si_wfdHey(TseoKH1G)E4~EpKa*?FR(!gf zUtO&|kR{#T#kG4_@u}3Ha&%*|27Ppzl$BMfm8($N)@EjHoevDL*8RlAQTWeG|3gRg z%8J8y(Fl;Y6N&6PfK_z$^(VGglarE^p@S0pzX>N;p}(L>N%^}=J)EMVuQ?51|2$k) z=2+KlxolX8db0l?Urdx40e6YH`2dT>)&c9?+uNHu7k>TvwQ<$c)^2?(>bR1UenOrT zEdztX$r@Wuf&H4H=bW6#nI?~`aL-pwb~sq$17cNEPcJ_}D$Btks4$$~PXtfTeW{IhiIy>i&?EXneRjv;28Z%Go;PV z+Ndh8lQ4MOg7}wlh3KF0ajFclpdu{&V1{@D@~@Hqc>*j{>ooa92Fplq7h2uR_t0^X zh@eM*hVV;%em*qs_wR{ozkqnC+xSsuDsGML=YK@WtFNbLUHU`|p{^?{8{+QnZsO?^ zI92a7t(8(O9DEVr3tzB*EGUSik?>I}ZT5xFef7fr-Y0nKrt3JB-!L&zk|UZOB&jX9 zkVxyB5WB@1u-rQ)=yyN`M5n_-C)3pcva&hAeLWYybWM}oP>{f{U3^9u|baODq=a6ha`_o!?6|$ zVs-H`ZmKw(S2J$H#3bVsaJs5gmIfl3H#V~c3WeG|Xh6?PxW`_Pqxx`h((&lnu%l3b&-a?I~BNgWgi;J}Y4+8MM=IZJI1fGNA0r>YNMW%*JKlA$+bIpcoC%dO2j&bGeEJCdbi)oa zR;cmvt~pss5rJ#eFYdTTKO6@fSRhblfHK#-3|0NsEuRv*2*?Q7>r-qcOh;jz#@u4Y2e7#MzTq#Py^pz#^HKzCAsAnR zI1&0l9JA!kP;4!vrCerM+b^QLCOmJwg4%qzs5Pc=fIh~=x#_jX>R>eFL4SW|sN`Kr zNUNNU+wP*BQ0Ik?fyqragEGSro49AA_73(3Ya_;uRmQ5Y)+vmKeuXK!ve?m#8*y1z zwe>>GNPe=}6b7G`kwLfV<2YSkj{JUACq0~6_xjiOPPVRc^W9A{g9@(5i!_)*j0DKC zJL-##zdTUTFm25-_G8x~tWd`RZIH@I?(Jl#uAxr^P(80|m(XfMcD4)<^Ib)(?iZJo@Z+bZUD~;eogQleHSL*3W?qxW zYQ3)u0SK|B!GW77N7+w{U+OO%!l8M~QC2vbz?i}2Z4Hc6o<|mIgX1IiH>gu$O}oEz@V zfPzjQr3!*4t_gF3CvAHXh_Nda)YqR{_^XAumfl52v448*bn`PQY<}BiO!~_Wfz^RD zZ_UZGaPZo24vm70dc8VArbQXBk zWYq^G0KsBw#=m3rrqWjaOWoYP8!oQ@{LVRr7@wK zre3`!^UhGW-b^}ZPja(LzzV8k#rkB^-4VVxor4i&HTFZj?%#j~=t>Jzle@d^zPn-& zt*x&B`(G^>1QHRj#EjdIfbCe;bb*1kP9HsfEH5B-*uGUzP*5itG+v|ygozS(qRbxW zXJxz(P#;pmi{8ZA!-7DyG@)`mYNnq0Hwqf_0-xk2CX}F90^+0Yt z1I0lj*_Lf1L&Ku0%7N}ra+<5j_1AGIX_!UjPuv|<@#)B_&bn z;b0A$l@4o0J^irxT)2mr8&imUc4>{A?D^>mxYqteyGdt8MMY!k$WJK^cA?5DY?dEs z7Eq&usDf-7D-*X>(5c1$u=YP3y0_g6OiNcwE2pfiOh~qNW5W>y@QPG{)eeX-=M!OP z(^4rp1Q@Mn077E^^h&b2UqVTPHM!W>j;cO<);>K?D6MGb@M|X~pluhEjH614dGgRm zkr{y2`Lh+_DY!4412Gst84ez^va1E29&Q4qapOhqi_wntH8}J3BlfO%C3SWCedFao zW}Tt#h zX_+=(N4fC?81BxUJ6AO7T5E6ZO3VkZhX7NCc4p)n0$lssWxL(TH%P3#<2~ZPhg(i- zJAq7e!!cs5f%pSp$}AQQHqn){8|}Tgb^%CUS$R2<@cvnj+VBfDHenhONwRp^@|fz+ z06!F?J-+@x`iYs*^Oh!M+@-Guj-i#a0mW@uwzeH@-IugD5_5kZ<$pArL(i7ML7jUo zp|Ibb0q!45k%{!+B4Lm+1;{55m(3?DX+ZH?`>N`~vV0bm zaof__10;Kz;)dMbj{7G8#4|rw+{c`F?>^fz^H`Da`B4@XiZrWwG3rYr72xwbcs>Ql zEmWNkP62fJSG%u1-+A5LnpBhJGZp%edtm|gVilzn9b|o|j7n=yEm>%i6w7}zIXOAv z*w)+Ze`<@k>V=ZPg%p_z*!`1(2@@B4psMGNrS?RN@6@hH`a>8dR$mt=dWF$Tie041 z7sN2A#^YL;A}^0HNKU`3On~7$Bqp;|Ru-+lcAbE@!>#tLy{)348Sd`Sibxpe_)PUS zDxU5h1};6r+@j3yybewhxAGbiDfvbH!s0zkan8cp{eoIHad8ur=|0^#nm2e(*igUx z)7`FX10Ozg3aR?lvh7LCnK8-A+uoRrvC?$xjjK-!shfFIkbf?$x?aRfL?CkU%R2fe z5dSJbw|2!muYBJ{B14)VDrnh#PwndqSx)`3xSmpE<&mMt)dZxA^6NK4aGFc z_h@IHy^(=%-~1Wz368X4s@H;Y}Zu&nH$Xl_YN z)s{5k8o`5y$ujQ{UavSfIHtHR{2S@YVAKNI$2RzW&INTHtAVb>#GU7F<+|n|^UVAu z$yr&!jY#oqrSQ|GSm_v}hCzw@1kDxcWxCPgtcvxUMFIOfP9#DIl|2>p$TE56Pc#ra zE3*P^Ngmwb8$Ad2jlua4dH!EO%jo`N;5Ac7HqaLo+8gP5UKl#*M(mcWxca8SZvpv9YG{7(@bLmMv}g@d%x+MoW5 zPNQll)S64R+so=G^U%uHRSY3!G}M;Bz!xnzT&9>tAGxzj&WAj z&JMKY_j+y+lJv^+pU12u&gTj~e>|e~q31#jqxzaQx}At18|)x%yFf)P4k_!7W#DC} zT`MUskG|j$7H+La0)&bb1E&ub=_1MrY~e1)(+w`k2S%&*12pC> z+mF_FUiS}c`_pCj9xQ}D@|{2T9*+772pF>g`vc`Mn^F4&+1O^EgQwi^)py)kUSBSb z;U3G*GZmU1p;l!(!TfhNOHk}heSYW3=K6h=1(*{PUfoNx*_bsCX4Dq|cwc&N&FG`7 z*=LN>#Jwwkb_lTbx5*GdLeSCI*Y_0eKc5Z0JWmBi1UkOUxPEe@;Pvg`%e1^hZy#q3 zEBrpWhQwq6sY~l_bKcO0ta$Z>uKUt6D}SyGhAC|?$5ysi3~V2wMo!Dj;3`+-(<8uY z6%C9$Wm*FiPOtp|VJvWA;5ebn`DyqzC(v{`VLUI-j~J5b?uwU~I60MP249>o2L%&k z{GiIXJieQHaHg#ma6w`nD`o zsm#5W(u+H@nAnk|0AR8F=C7Ef{{DWU%GZPC0L$HPuyfIL8c017FApGiKS-V&_46}= zxBV5s&;bT;)wg+u*QoL|$pd}x6#)soN#f|m3`793%&11!+`JH6aT3!VLsv>K$qzKn zIH0SmYqC#*g%uJhUjIz~?2YCX?+tH2-?BxJ&VRg3sbD_z`=0G=8K|WmdF!2))(E4< zNa~A)SKOdxT|n@f=31on_4#t+f%v`zfzmy5(WM)b_}TXtt9{6+HgKVpsr#z|8tIpE z9u<;0)b`4Xn>AN*MA*&t$6Q}4f%f65`UC8C?cGLvOfm|KtV5YibBOOufh&AwTm8lM2;^y%+{g6_3C?_jxbQslw9KrJxBK&GS#px*TV{vCDk zq@s5O_LmQB-!%h%36BANxzDRiM_cdCnntTFQ{y zOeu?}m5L%RbFD&If_B3)K&`Mp*|Sc4@%B|I_yWqR%A)M+>x3eA{8s_NPv?MqD2F&Z zi^h?G%jdiJ0IiYR@P#=bu~j*@Y}V_e&EbAYHba@*m8jCImR8Y8GJleDxq~FuK@u|1lksE;=kt8D+uKOhz_Z2|FVx7V(;bUYS4B)urGU>#}+vI>& z@+@npFwLdSh=^!`3szF1a#dq6KWuDmu~!<)*n-ofeY#YjLMd?)Z_sYQ71}rYe8*t1 zNhvACsi_Vp0c~KYTu zg#1Y`cTexW#7??~1Pv)8pv@}T+l$Z-Wfc`|x>EVPmkO?UJ|Km(nE5Oa0iKtysq=7m z2ZYzWuDK2NYVkl>+0zya$TG~tS?j+N+osjZ^0EUE0Zs)@iZP(3>Nl#%JV_Kr7VR>t z8QvGN#QV-o=*I(qgbRp2#f?60p79r*ogX0HHJLzw`Qb46!gD?#srgj9IPU7(Dz<9A zZGag1(q&Ep2u|Z90jRQ}zP?}Y-W zizsS=se9kD{I^87O%3zf&(K50H7kLvY=yl*v)9(9PDDghUg5#4m`NX4;Nqg$hs8Pc zC$f($g_Asa3<{Rm+Xw2h*O;30_!SK3>(c~2p1*8@3sh_>f;BfZt!!-SFj=*bgN}5fQKM^pQ^_#eb8sAH^%&Q zdk2}>OPYOJxv8bC{S7dfWb@XSaW~@9`g;88c^Rx)NU{jb;jGS-X`bSaE+dZVLz&7k=T`)ojbP-VdAe9!NAv6(ddR2@IFu> z#yMoD`eYopjU)h}<4Sc+FbdAT7(G}wP=P+?@+m=LJKWJXIP(^^O z_t~P^`FRto@#-M5HzyFy7k+sn=)yJ8yn0!onQJ~vyvmTFLT5QWm{&|Z>b6Ux7* zk?7X62+EVI_YX`ed3>fPJ`t3?6}q>qqW;A@p43QZR!C0@*I3`yq+1;5k(fBbJoj46 z3i2YmWHnP42j)d=s_8jorTb8>SLyQlDrdFr)Gmu)g#Lc?pMBgBeY>oEC3bm68TVZ#^CC`yM-IJJlct0fGqi zBqhcdCz`>R08^~|VRQ=}So5zIrEsn)+lG?Nz*lL>@RH@s zNtHg#Bj8%G9#Ux!iuR{XrYZ>P2is(HFg3tig}hRwD;eU6nZjIizeKsjr2@>8G$ac^ zdSK+z&kr91R{ogO$R^#rC}|cf?HpRT{ih<jVG&hN%H*ov;iA^|01_8B^TGg zmN}j%OW!wHU>kI3az4F`t>%5?a6}x5>&0Z^ae*k#)7(|h?JU*A%Y1~FajY>neqS4x z;a6U9+TRy}4R@fgXXQ>H9XUd4lvh7F)+f}lzB)Pnsjyj-FFsU1JGzV#$* z?rir6Sqs;Pu_=cZ1nCvd2bp(z{)vu3aqZQ9y{%t0PZHO&7N?BS2 zKD3FdT?z58J#-1ycRf;P#r0}0L^p7{;E}sLQ?dfxc3p@eFs^btF<7V0)ZU&(#5Yx8 z{@kK95~2jD&#rO0Hz#AE4W>Tig;2nTJJpvb!!@T`uiZ-|G+5! z5E1RYKx$mnq~&t4peo6>PB0hiX$)QOs)KzIlE_b zG}0$LSHPGYC;-fvC9CbYO#ggOpXhKSj&ri~tP+%~6MPWqLy66%RyI*h#%?m23-$fa zvjEcTyqD@wJwl#2lxS110ABF;-3E;ALtWk0Hl=oHXOy zFLApN$(GtZ+sks2NXoumEfokubl}={Kdqa}zm*xmRJUhN+pjYCm0GmQAf}ji=7JNK zb(Z8qxfNFyc&35hxeO{SpqoaoXI7k1ql$J4#1}zja@FT>exOA#DE$tpi9%}+5}A)q zwx32?k=ed-kQ=zk-F^X{(mRQ#HNga~7-rzGisOMh!#Z2f3EHqx%V6Q4@ zhqqztbj17R5C6Z88KsqN#<7xzh6lFEd8huWlk~~72&m<>`*LrR%>9Gz>gB$ESi23J zE7>GwNhu~lWb#@Jb3!7Q%b?7r88*SqKQGYhg>%E3PAdy)Q5b7Wla_))H`QcL99M%$ zw@OrG9Wy(7Y4S!G++nUx2ZmtnA?<48A)0ii@>_%}YFzOK| zcX&>++It%Sb+Ss3Ds-tgVMH!^x}mSSEYPcpoWp`inVm4aD96F+n3%b*p1f3zDtGxJ z6m8xBnU8(OH(l4mY{^ALbJ$yLx&U#aHsbMaRrS`S70=7tXmii^Le14(#-n#XVl7w@ z88Vy33*bcZJN<^I6>9EeC8RM+&qZqrQ?U&Q(oZbRqKqc6o&=d8TBv1Ix{QqWhkCHG ziVAGD$^go2c{p3it*Xy7P{OCsjn*%E>vA0AG!0EOJOU$FIoXvvK;l-$2drf4qBD)N z)!N#RispHYo}Fo8|L|}9bU;;eO@_!EpDE(WZhJbpzpB^9AL>}Fssmv>g-Win=Hgti zL5SvP)^5aDBEJ4C4doOy3Yl#&0dBYJw80%baI+5$r8VgYi)(XTo#kj=*lQ;PCk|#OJiF4US!q02N5oRU4k%We*o?bU}cl<>2 zq)T{2{=u!Hw?kX5;lp2df@7Egsj+$q)EK(V&p$hA?HK!JnQ?RzMt-Zeo_zWK0>8uI zsdZ}VVPF*5PEp$qjH1jB_T1-)24(o~xWTP1n?f=y;^7LT;s|S+TMz z5{7r9q2zbi$J42tI=|tj2Vv3c&H?5iPy% zN>y4aSuRK%3YDzZ9{o0?wh;4fu@1b%zmOLZh|))FlFnBW=7=P3{1vGmZv-btZRj6~ zu~9&0X+L}Kl72~A{&JHv`Zn?6hCHyO4U3%`Z~`CHk_;QfUCi$5T}#T((?`laK29_W@}j52FtZZFn~i&;^@UM{TUD49ni zO7JF~V&(z;6n<}i>bsLit(p&i^~;@>c@k-hXFujUsZ>ey80@i#%Q*5oVc21KINsDf zAL@sTU-CQ$jHHkQ_pe>}F6#a&$5r3XJ))W|*l1QEok{%KR2?5|`OnnOABIOWdc4%k zY|GF0H+K6auxB3|g{Up9gzs%Y7jGLu;k4EP3-_!uJVF5IM^ArbZh{w=1Y=2% z`g*O67WT(r+wZg&(!70kh z*rowPluLpGr|^q4&__Xssu-jRoVQt5c`~s1;;8Sot(jn1>F~<8cEs&1#JHDHf79ah z?Xlz+Jjtm-PP&_mpByTtpILs&GV%wB=*=opJp1XbeCOzkkfgW2b_1SzOkc>Cbh}V}n6FaeuhGa?3;LrpCB%lavCr zN6m?|Erq5}W~QpG8}b6v3`JypM|Bc0F_KA3_k)i^0Y$iNZ73#hli}oGCvlhND`o@_ zuNNvfKoZVhgruI4D7L@L=rFu9?v%h*nhZ%y0>u47ym??Ke^2@o`o}6ax}Pt*#EHYh z9VjW4og3UM8ja!Yp7uU0&#&=#E)4oA70ZQ{u1xxBKg~8H=g}WTwJ|3PbBA0!hH3AI zftOmtDT*p}x+w=o--DfXwTGmjv%eTvn4`2-QkHOi47PpkK^dca8sqQ5K}F*oU1TfU zf0Zz96TbN64*$B<+#~d0?|An1+_xw#Th-fp1k9EI`9h48(|9+y=2f+5|?yS%rF%gOT+);>D2uHGA-NbU?tkkYBp zoJUkd*etQ#upB>M9y?fPZjawQ;T~^uU(7GVySD3x4xIPxXt+TWet%7(` z(II@U+ZX6gwlt{$HRZw4N-JkwtP4-=?-Hx5PiqjipE7&#(x!-@3=rCOL(aa{rzKms zWb_uSV#XyvL$dvtmEDRHi1yP0eBnLXGH%}A$+X2otOh0X*7Hy|HYM|S^8qbeY*_`E z_VcV)z&AiZ8smepeobTkEQ3eN3bX2BYFSbNS*Z<9dPg7PX*_3R6UR$67RNiN{>??g zC-x8)T$DHya`jP?&_>?J61nY29%NxKJ{nTalY~}7{@9)?`z=6bJL^x)+)}LE7l^k< zL~0-s(#syAW{N^?*Uzo&as`1|wk%!n4MAsi@!dh2$*Hg`rb6fZpo9HFuf3#(HH-&d z$;$-`17{&W)dc0uxk%;sG8~R0mI zmMsn@uPT;O>lV_@yC0t^(OFH^f_q3e{j)VbiYYlw*O9lGPcL}gB+sQUpY}SKN?hBV zs_&XF{8${yl6yYo^$nDkxARPX!gkXbU{YTuD!4h%0>6QJMs|lQq+84tArq23Q(m2t z9!hi#*c+vCkyIgxe})$L6$o#VuK7{BW_j#h`isloPZ)QE0e8V5f-wXD{#x%tr3RcKdHBI%n zE#DxdOB9jCEQq*&==fPF_xyR`>`y4bFqty;HN{!g6U9CjDgxxOnG}HdjzdhGj2#Lj zx6CM&*t}tLag*4i-aIB#w=vvTfL;6Q~N}dvV%_QiOsaBKB11TDq zwB}r(f8|G-`t`25tX_t_=l(k{GUCviJLQ%}Es*lC`waBwCmS_G6COj7g<)axeM&d} zNjV}HA~Ctw2yZ_BBJ(bp>@rHe!2s^R$#Bn;9sJ`vR|=B590ZxQ*n-H9zN{-hkCj|NdazUc6!wTSrf*z&rHCD!;cnW7QUP2{!PSWH!xGTr}vo?mog-6I}%FOI7v)Jlp);^`OoX| z)2s;ol8Sw7ZmQ%|ZH-o%_se34K>`=zd7)xN^;p8oD%%ouX-2tqvj%=`Np};40md{4 zU=PyUSN0n&<{~Ezk}%NEHRt9)P2d%%8y=A|h0)@$>&U7$dkkz5wxr}w6KdSzTPcJ( zv2Nm??A9J>5kH7Uf)l5^@>_W5%6K!!RT-~8Jx}O^pp4|9Qe$ThE_1&b{;?8Tctg3fLzY-Aqko~9jiX}}~zx1B;JICkW*~AfB z`*KanDjKaTN>eL6-4CbS(h$cT1^)&TD3EcCJCbmbvAx#d}BAobVJ(eL&dfS!BaN9_(sdzV1gGU~iAC~-Z?j8R2%X*Po{JVfWIoxm*h|2*#RdB?I-_Gb^w zWw`Qj5@Mbf>P9^&Ym&Ri?{zxnw${Uy>`(Kv8}air z(DuYb$Iu&7Oo&H=hP6>zHmQo|{(g_{iQ@C>Wl2KfUNI_$WOMh$E4$n4fO1#bF22ki zyQVp(N*GecZg3{^Z9qu@XMdIfQb7dMiJ&$1E2vdyH>=gZC~$o+a$B_7%2WD$ag-cc zlcp#;Z7)zh3d!q!U(V7n(n#)aZ!yGe-0sov#yGX?hnZ-|&Do3J;eJ*t#We9_YHG3& z&+gqWC6z5s%UuCI0N?o4`jLOHgV^2x1@TW z3NScyLE59HfySTsVX*JXEia8Pbn;QRL`Oe;n&hC3s}OtwS#P*38&`aiW@QSSD=tF~ zWnL=PwE02(3fVXxFn#^}lfvkxMWJ-IXr=yoQ-iSm^dOMIs?{vwR){6H^^FBduamDq zVosW2G^wxlgr{lJH}zs)4uuGK(6O(H+?is;VPIw3!Mis=hH>TLW}reM7d_SPUc*rV z7Xw6muDKcHTdOipfhb^lp<6813Et;+Q?Xl2<4^V(uw5iAoh>}Xjv1m7s?sfr5*M*a>4O)`Z#5ck&Lmhz9qiuFRV-*jax}I*QborTD_$9u9dp8 z{^(mIn<%yRpkhJ6asau@G_jIXxnMyHcbFDR@+6&ouP0YoIx5*Uhge_8Aav`0p zj`G~}B}DU&R+^P*>imFltshnD+S|=Lrv|9Fz^;jc{05tzugGE zx_)*5f=M?O(e?VRz#P-I=$54Af~$RLQe6o^aTR&TgRtwFvyrT|$`Eoo3a=^o5Yr|F zc@Nx-0SU4oWK=Y#pHyrxlhEz{c$8%4NQwcXbQq)I%|6++MAFQY>$Tg)lnC_gU{ftevw=nBBc%o!(0Qj(wZ^MbZB`FA}7UpiNbc9^dwGl zcPk^W#XH^%yiFNVVwo-2nthFi3N*pL_6PE#^=E2SQ zml=uEAhm0!U!5TDrnMcQs+WRqs}%12j-#t(ja40j>4PVGrqKnGK=ls%mjTT0sB<+O zo*eYHOC@nJPsfk8($9+6e+CkgA&Gt9w8lwsjzv{ijrLoO`Ud+n#7Y7NJGV`v_uC|g zq`gjFu8x}HszhmI;(W#a>CLzm$hPG9y1Q3=T~Ttq(iG7HkK) z#c4x9{Lt&y6l)?;_Jz_;pBAeI_p_by+rV86Z%{rBVRncWYRz3alubNy}c2;FY zPvcTuOhlgW*?|q6y#u$;WUFIk^(}xB_(gQ~AW}6PvBb|0D4(JDbIzoasi~*2k&1Lb z&7p|V?#-Zcp9^CAOEUR;f5dz(nlFwwbzVN>)fvdC_`cdWZGAAG5cZtCdZLK?+r*3wIpi z?r73ot*cd|c%S=3UM(cfR9^alqBFwobk64dpeeSlq}mMD1Eq8K?Eiaj;ADcmADdAi z(<_C+ex|PgIF_NhFi5O9YvVn)(dd+#|3Uw$$uh2S5hMXj#CFSv(jqu#lVMHc{<-pY z+4Pg34ny}Ae$TlhXCz9U6tf$CS!4|s+I;eTvydHiA(Nbe+^&*B41Xse_*+Hw38i4V zI9&q0-+*9gTE5{JEy-;{nK7lvc>1S&cSR&df=AUL_DPX2C#KG3mXx|7Ke%G5%&n`& zb9X(^Dtw%^8oqP#E=ud^@|riV0mhtRyx+&PI<)?#P2qYbr^54g4da0CBV+ABY#L20D=IYo zQibbFW04>p{|1Oqp52app}l7FYWCbwL7MX@@MyjXd}Tv8{3jK2Y!7qn>fbqXg7-t5 zv52+xQ3v>GAE{Brj_$x+_4U%e!b2UMVOWpHhJnU+)@iMJi{9$g`P9PN zzRtKcT|vdo15~*+{zB_SE;1%Gl2Ge|gXuI+ zhx(UgiZY;=*Y5RDBb2WB9!y)e)cPvsawW&pReRYu=Z3L4Ax*67c6r{tfyKA_ryZekPG7_$R7LjiV@`6LB!(k#Rn3KTYDMw4HcG&Pp%Op}buZhlPp}6fXDvCuKArE_;8IYhB&vCye zhCAW~AI{=CUt8AhcWBg3GH7ov69zZo4>wC0<-%(HxF6iI19mQqWC1%2fxUEw;tv6q z%hJRhs6CynX98XpQMe5;Mk{$vt9>3b=mDaoqn>WvuveXf8vs z$7tTSE%-9C@Ns$m0F+YMy2^2x$a3;Q)$Q(YMl<$g-lNwNwub4HM(?JiOg9X@;a!+X zY4X^Ay9q{~qn+l!X#qZmcg1($iAb%fer@&403H$e^tFi=sa!Q`KngbqU02(VyUR11^;` ze&&t+&$9rD^Uaq$5j+B<=Z%?Pn3$OM9-{bYH)HorbttJqae*ZqdnX&IO*d!-^5@om z>CiXuaI9P)_g_%G`LO`6mol64x{lj`m9qY&fdmXA?gs_kBX_N8vmR-^9<-YL_}yFM zPoFJmEstj~zJG0L=DhDeJ6R`&-=7FJDnnY!ttyzqgZc`Q=+`r@-LL#PTy% z`qOsU?)rq>PbX46RTmcHJbr_R597gTb7S?Lj**Tr#@jOu)#*ukpI8e2Qjp&b*c^Ob zxwW-oXHHAoB``g8ayGF4dHH66brG{pGGE*wmqr`ps}EE3l9=J&Cr73|XN&3VCp1k| zF(Gx4s<&+-0v&xu4~%7G>kyQ*w#?TK;qeXm!b%wO;iRky2XSt9 zihd=3HJ6_Ra<^k`=eItNA;Zz}FWPbI90f8aKRxyW5S@Dx^l*VY6Ypr{PK&iJf)ptk z^>q3kJtl2DJ3gmRy?@_=HZ<#SvdWRPJwS)Z-UVMU(e`Pt^Ff2D`9~bL4{Vk$^nirT zO>dTyiHRS(2CDu1`Ex*-#g7GZNXB2+p<6MQKf-x~9HdZ5s~YP;p>1^m&Nzw31Dpi& z?*g+Z=f%|(x1U$!Qpi`_L&;paLX+pj2miX>c|oy!j%&ef_iu@7ped6HS>E&gj8k>r}MlLB6K|4`|Xa7hhzx`_lU|hOfuP0|T*o-2GEkd#~rc8(r%CqcP7aO5KF#S#g&9aN*!T z0klgrJKTtpJ5BztMZegKS)d&=U5s=y6>GWfOWdtk&^&q$ms9?)Q6v5tRkZRw-&t9C ztgwmy3t39+iKC%dcv>ZIrV!&7BJIo}N@>BYh*8=y-1jy#!@Ov>CR23oQ+cUtgss)s z8-!iX4o;&-O*idJ4yt55xX59Gj)Z zOZ5I;1^K<6F+(u>@B6zc&39gPtUrS0Ng}F=K8^FH5l;MVBo!w;tiJq{U2oHGE%3vx ze+dmEx8M(%K3k-fYH0pYD=qoqFfvKybbN4#jje}>5KibudK9HzI!}0o7k%tX6o5%R z_`6+;Y`-#WNrc>Pws`y6>!ZJZVuF%y0=!ifb_i_1fk()^NT4kkgSGqCkU!GxL*Kbc zTfZ&-Yat;wMVD`LAB*MqvV7Cl^at#atMu_>tvK@D#1opX3(L^FKy#Ao09x2QB)W=F zcXipGod6xm-NfYEckTuFf>p5qW|uz$)|4Kwo$rMCCH-BpL@B!J=Fq+d3H-AEKo80 z_^qmN-0cO-dy!`w$ zmXy$9j2`R!-1@B%eWox;!eQhCSBt*ZfEO0DUA$s9mHkStY?Y#RP+@d6kSnO`zIR48 z8|(*F==Yr;<@u`2eY@uwD#p@6WPQZl>b;c& zi4@n@gr5GATkF)u(YI2Pe-EINUp%HY22XnH5=LG*hWy_sP-}u7svy7YWWtP4D%3S_ z(^0qkV;EZfRJnHI16HIAWZIbyePKvNtFm`U92oU1FpBJ96M$!H^h!H5_fG{*0|OBx z`*$0^H^(Ik!_Ao~-*L+$+}R~SDI2G-mBSF~Npfvqv_Z9h$!jGVljFkjYr@fca{R(K!BQq%P;XdgGLm2~S z@B1?I^B>f}{0pz6$X`8us<~0~P4LmQNw$(v|8oB4m!%uQO{ZG+_)^s;aC;MczY!A|iH1Nmb9AX}fR$IJd|ZFg*BNC;)6Qej0>j_G+)1)L38TWGp{nE{ zxa9YBCeo@PgBxmze`k&7h#k=+cl|rauw%_9m8_Iz@tp>j3lafRJv&no2RnlJ{Hd4A zg976Zw&^|9mo5NQ|43`^hbL}xy%FKlBoxQlyrq`nb zB#{h$?jBgdho*jcO}o+k*vJ-daVIc=5wiA={Pt7-h+A34#{}JkhAHd_zC(dxV)is5ine|x62@dBKTJ4XUiFtmY?Voh z((fCe5E>lFH>1})dp~z8c_PW?-rJ_$Tl4hf2jc4!q>SRI_Us#2)L%h6#Nb(ysax_|`i-f%8@PJTh_$(RN^1em-rPK}d4z%Zvhj z)2m9$&>#p=x>9@h$Eh$%<%e8kUmK4+@SXj!C2+Yld+HKv z%)feEFC=&@O!>X2mOlbjv<_h`@0$0W^_V3veLz5i)1Ku{{1ok&Sbl?T06`B%jF>K$ zFbPpWcpcOAHg!Yg8kB)>6nY`z1(%!53J7W*@>U(ZzG$KTIlfmQ2{eKLvE z%3qH3bXwm?ddW@Zb4vyVP4Kqfmh2DUn8t*km3J+dQ#JjaVtCWb-OG`r*?1a&07kzUcuIV}#ME!k^SZBB;TzB}-T?FNa^9vYxcKzRd z533G-;YCFp=sNH0%qXz?NE0k!PX0ejc;bld(-b}nJtRsW=9$i_U@m6U4HeTPZ9`-l zJNYU@m$DUB-9@u3^lQv&P)f?mJl5uuC5M9Cp;fW>R7b&y@ZgE%f!XQlv9F?=HJglA z=kPCT)Opo-uFSLZOc1YStP;^igM~<nPiMUSP!sDlw|- zBuUk>Ozw{I;~Y}_VSt=)GCqM|^*I6i`*WEaBc2Kmr}7hR#2E`FKGkVaSN0E%Ybrjn zwmg!^WTjnR8ekG9|M~iiut#983qxAmR)|F3Pl~$5L^p2!Ezo&F?ceP@RA2(U9nj#= zLO&->Errw`=MRhuFLO~oaWIH$>KuGu9JL9`PL!JXO%cT(evw?*t=R*Qbl3m;(JZNy z?XWIX11XZ3PTuevt&yT{ENN#fB-PHZU_Y9AisnANgw?lHU4H&h`nccBJSt@@Xk9A9 zW9%u0i~iHpt)o>4o|})5!&il4AjIGB=8is1ZKTF#Wk$xy3D+-DfYpQNE%&5!~UXb9?N&{ zXdhjP0-5S`Bah|G-l3$+fd0p;|MvvA;)7=NRIBS<@Jz9Kd3`N3k@<$l7V|Bmja5uwSTUG4zEoW-#s%8<6qnp{PKDU#D>B-~PD|K~$#$z89EPn~e`$Omi} z&ROydL=voYDDL;NtCOBMKDUpSMML3|m{1cQxycE6tdAV^=&noQubetR3VB~;bv2$) zj1hNR-P?~_E4go~eKt0BXv^h+r!GhS7Z=oonqpN!cpesgAF*$mmKD`<45v9%WbYLz zqfhG&VoD9O4YBSz)==oxRSt(MfqVwZqs_RSO1ML+sP_>KVo4j^yixo|X`>v+SL;MXS}u(PEc#y$ZlBfllGp<@tyUh9pZyR^HO` z;+)Xn*Zk1%al(ihKxCFs}uuU!K{phk#-|Iq=b z>^jqtv7r}5yZ5Q^4+*&-w2ZP$I%J7*<}u_kR8&E4Ie|rPu5#S$VbSp6u}26`PrBHi z$mH*Z9?lISe5705`z<9xL#jdxWW4J?GLFo0w0`L`>-343&=Il0QbvGJQ%=74ZopuX z$e1-0i4AjmV?A7-BiyvrN$~F)1_9l|!xm*$bvJ2ml{=+Jgc)w8}>`9^xI^v@Ct4@P&rr}}urKTp+2OAq_9&e4c8>%# z9(T0?<@;t^)=TXttm?dLI>NB(N|uD`TeafB!600p@MkKX^VI?|MZ!Y^`EM`IvgOg_ zCb&@2HAn_?U*Dt>BB8gwgTHlUhZ(y#Tt;wAU+f6(9Yh{)sNw{Ph*2$*b^-zZ4y;>lYTW(NW9p$_^g? zecTcb#OPUU$pDk^bGE_nYa~)~hy8N-&2?{|Zm$Ur|M@zCU}0-hGY}eW4J%=I1+DW+ z6%&AKLdri)cs8gjU+kAU%)C*5t6q7>hpC!!dND>j*iG^{^W-mQYr4S2>8Z-QsbH^sy_z}at|Nnl% zF*d)HPvvLLij?)6^hGuCeJ(`XVq6B3dnjm;ms9lbL;*N$%+5Rbzy8M4`2SDkFn0$uRP*;n+>%}#^ zYh#aTRon34>8-`Yq1&}dy{x$W0NRnN`_JC;a4)6yPjf*iPxE^&0i_H_S|s2+B>|@v|{~VwI5}j z3Gk->WF&=$Hn0E11z2wl43y2yr;sZM<7E4C;(JVLp$8d`3-YulQPU@Ui*Ud=;@SAZT_eC1q~MMYuL(;~!f3yPflBuJmy+U}#D zYi#IppvWeP9&h{}A9n-s#Jg4PROAyWL}w{VH>K#f;>Z%;6Ul=ORwcOpV*J}{=PBX) zE55!vA6pw&a4B5udcX$UF^`a@mY^jtzCL;K-KaemG}l8QB_-YY^T%wWfD~$HLracR z2#-C3=jV%(udK{oanYYrnteO`b$ht@8dH*F=6%reDp4U5 z`=k~ecUlnrccs6kra))Ig}DGE+odL6GM4xpEiElMT7uOnt@@v{v*}AHIr6mH_4Z}o z@XXao;5Z7OQUNjiHc|17p}1bw*qWZA3~eYnzcws>7xHC86RgGJ-&(kkczO5v52d&2 zY-S85C?eDTbm1;UN>dZk^MQ7eHFkP5K9|t=-I6aar88JiNR&N4CyR#a12h0x4216H z_MEa#RO>qjv(XIjAXPo?LTA?-V1i3{1YC(A&vqm=vc__!eaqZDJT%_CaT*;Ob-ULd zlZGg8L+j=&?vszdhsoO~24;B`_`-e?Dz4a6*1;qF!hexIO4HBeG#LvrIHxorLNbU>u9UTJfy9Wnt zy`}g9?X}z7L=RS0@`kfT9DJX?b=Lq}S4|tCwsJ;jj+N8uBn{Aj6wn%LO^+(#zP_DPeGu&`3d!b`IHyK$@F-$$&!O`Sd?F z%DA=;&@E;tfByXWoS(nik~T9nW$><|J18jVl;YAP!3#8%1kKL|71>+meSCx_tWqv6 z+%vO0)C~6_kXI1^9g3|dfErWO#FGa64Z_aObFljo zTK}Mt(mx&vJa1>`Q(@tHOPXBa#1IC7Y1OP(iG_u(^}{urJW@5H>r?~-(@3hbbK4?>FX9yp&|b1A8TIMw`Quiy_~M+y}Y)- z(3zpA!*zfw1c)Tu4r$uobQ8jwzORd3%<|D^YpJXM0N1j8I~VNcYZZ`8%}(ZyAAU`5 zon`@VeDA}*ichATOo?MUkxUOhMB|GboD_}h*TKs8IwBGhrQ}txuBQ9RZe-guXxSOT((MZY2?nA4ru}Y?|6RR8|W&juU zYCFJZx8anKDej-AQXrxoRqGAp40rKE&(7etnp&5WHY0V6?VMq^h5J?fj7Q)*IN~`& zAB9h(sM^v3IGuTsOhYR|IRj$z4X%1dC0agl{*}ZtU*u?NYK`>Wz=}tV|NEmaBxa~` zPdpDDV`tnikNz$vlM8bSw{bSqvE&-{_kP@5Z2!Q<{0*ZDMVJ?t7(XM4&5hVxN(xC? zS;!N(8-t(wy1Kgjo<~R&o-3V-&0RJtdv$fSwUy>Wr?$(tc%X|`a*H7RX^=V7*CA~%z%O^Q&a$}++m+S3a4gvPR`{MIdiqt<({yx4WN-%1TaKdhg z&$r|l2=Hr+yOTrx{aj&kjypwxI0VP@sSVf1=-$9Okjc@GLFXsmk2xjQ6WdslLb^>u zO{@MwotIJ)J_`;c$n*uI`gTO`c^VO|{d{c{=mlrc^HI4Cv^3GA{wJzR!C#4K=M6=7 z#w}3K+#2Lek&lx|agflGU>W|FbJHYYdARG7yNXD~?`K673nP5JUZPI45&k77Z_F-6 zQQJsQ);NAh{>Qo{CC-to4Q{g#ibAnE$x4Zj&s9zdq3_L1n#=B*x^Tc>tq;+WN|sOi zF$G($?1>Md=SDy6#+K(7K^UT?#jrcQG)m^sBB83vx9XUlnc1@l8kVGhZcbP0u;JzV z44znvs!_volvDH4Tlf*2`r%9#Z692X6o0MZ^lU1ak{~y!)RT8#ChL zaRSjVg^ED{@LXtl)UcwdDbsD&zNEbTAloKJws7u-g_U));{Fd4XiLd$tal-Hzb;nk zvaRBOcu7G+8uxQ~dGum`!P-Vho*MFkAga>)ob;9bqNl>xaZx7QA+try+n2m^iirxJ zX7VHZ&rHj5@P!mCGr|7jxSnz!N|}i5TENCc9<_B&`#~O#Hp#9!5+F5PnPs9ZMxM_N z*r86s2X8WbRwI&>=*6IqoUE)Ox3;#JjJ{e0eKOrqZfI#~retU*Uq=Z<$kiHd5l3yW z?mlUp>e<-rK3>b2eZ$D%XjEiuV4e7IcfnWzy^i&!%KkJIR2XtQaO!iN+^m?9mPKD? zH%d*NJjl<+-p1X%a>Rfuf`WqL33f`S)f?Mm$&Gk_f1ihUA7N!Rg3{u2E5dU10UkiGhqoJ?5!`EzJ-@pB#? z9{6d1=mgR6Xa#j|u`yYi8eFfsa0}c!2R+JaRyX6HE8EVU2sbtgZ0qYeI5=GR7fvwp z^74`kdpiDD7xg@P0UBgW7ry!X_pj$x#vy(bQI)ZjNKa2sZGAmFG6)H&DxD{Do+&rb z+Mjj)oyu+``oFus?nMFmss81%iq7zsFj$kd&aYlv1b*YM)Qrxi~H-nSYQ>W~N1x69CSYRD-D9I_;7AnZ8_nt}#hPg>y z8D?@Y-B0zBT@C-#N0FPnnpU|IYh>v=>TCC=$vbqpx?tYl+l^g)On%{kbMn9g?gF*D zi@3!6_>;j%&7~lT7_oIBVYuLeL3q0n@XfS}1Rm~!c7ZQycpFMflR`&tVrZdH@kH$VK7;)YIUZ-4h}Y&~Xt zNGEq(l~mO93U%-e;G=ZKUVHo;2m7(7ao(mLE-Up0sW7JZTy&(q5Xw>6=77gV(h<31 z`EZ(G>ANygkEZJCm(3rq^e+yUFwLMeQQMaz|5FQ44}z;^76PV%GCN=7_nMkh4U?AP zvD?kCs!E6bdAId)4Rv*A1&tD|GToli3VZX*gC&qbga1B+Fh7q&5Jlv)(T^WbE|An} z$4@>gx?K8%md&u`>KnNh+t{2)jk){yu9~#j->PW~Sn0qo&duQNd!t*zgoiN$;iJ5J zOtK2xt`47;+R{WJ%I}5L$D#rC`>fu4{!!|_%YOSJ_S)Kg1wRr3NMQFD_172m?IH&) z;QSLaGc)(BZCHQ1dJndB4x=}<_^r@B95`H#yZe$5a=7M_u0nh07V8RPcZXaWeRP=0 zk-q-w6AR~26KW4^zZxOCo`3kTsvSVS-XYe$(DTQN3OAq$gS~7O(2_GrHdvI|BwIas z9>(M@Z=ReJuMg*j64OBB5i0HU)@2OD7#KW$c=`GHkqbD7g0L}Z;IW8^$oh|=i3ufU zh$`vI$CMPM*=66O6^RcYK2#?NLFC~$C6#q7@07$e;qpWJw?pQQHR1DL$;4PoLAIL* z86qO=X28}|Yqn?ciMbARg#gTC1>!_%$fu?zZ+)4H>@33wYU;^^x{K&m;#6kKLFA~- zQn9Xz>C39~_B+vs^s{~^Oh&Jjm_mDPq5r=@yz7!M zkSIXfvV;^cuRD&?3TBh*U`b`?r}&NG!H8Y3)};ZfpIuH^?nwd2537{-6Lb-YiCKl zBPKf1+wW`44_jH^7ySb9*}Gecn00jDaLh5c{c#S)5xc3(6Z0?s7$O(JQ66%5&626r zVI1me;ge=wWE zMGT~FcZf642ih!k{QQnTms>yF`w?TQjczw?=hBelsFDnxoK3<;?tNGgtQ{Qk)F9{E z0r%UW<8Piv%-I~kynWMU8T%voYUz&r!sV7U;&O7^bYeIVPRk37*4bTGH#dTqo>8xb z8a)*K?^P7}A3tiQ=?}mW?}>tP>kIrqyisTYXulml2s*CieaRZg17 z?aq_>nsmTyAjO+rMR+vvPYXq(^Cv=mS5v)`t5ZP~m6_>2zPKpNCcn44yDkK~W(Ft9 z){w&mk*_rAiFTqg0k}IShce8ou1+yZn(IllB?>F>pdc7Yun}|S{P5waW!I0)%;URp z;GQIGb}2x_+Z)ENS8s2=0PP)`H+<<_7N@KpF`W2`yIvMpqoaSYKKYuuptv|3G)gmH z96Mi62uQv;Sde#d;T92Dnt#d)b|lR<8YSuxo+DO-YW&fm(QIEQVHZCls@T4Zi!%Dd zY5luukk{CF^2d1RI`!u*UVD30ILxn!PH*9A!zMz3eOSpfFJPYR+aV5tC zb>M!FkB_egz=ZDg!u2++S!fw#s&vK?G{wVDNKl!UfVq@@Ha0WmAWl5# zK*Hs+RPlP1i14MnyzR@MR(9W2%ofm)yyfqocL;++%y5cEKw#BYa|%STDtT!`U}GSy z@Mh8nt(P%#%dy+K3ylEa9v4!!N>W48E1wI2kKHLWG6!vqZ5O7~_m>(hMo16ZZgN%6 z7Nhi;C#-my>jVg*%#0!ypuFk^1|uNSI^S~xu~8qmc3p2}YfFCV9Vs2n_8|)j2fU4p z^4Y=qeH@u**EIpw>v2mWB~sFPJ*Svgc}9&P?B_$$Y*F|CQtQP}nvS;ol zs+dqOJ4{=mRygggt~n@8(aJQ%0{?Zpw2*T}a+F^!oY0(>wr*r1a8<0F)f@KMGEWqC z4Rqr=SuBI!2@1%z;Hd}z?6$F<*x010>ouQc*9!=32*EhnYN4kF5BdqlN<5^kJGn8` zmd)eRTMW5Z)4civo~*2_v;^p2sDtP#=wUho_e2e>cT-cEGv4q58Lkkp9uuq#+!UCd z@Q+lFCXOH_y0w-h{rf#dX6-kcS5ZFHhVp|oETt?PoeM9${!HO=mP^^l#cIJ0SL=6) zP1t-sgZxT%g6YaWQfuvVvccM_Bi74h?a`A#$u%a;kQgQ8G;Y@&U4Eyv#|S+=gs9x0 zzaMae`D$0+Diq*U85?qxhSIp3m6hmda>er>2-CXJry+2-R`s`uI!TLDdgJU4*&t%9 zJa-5;D`C>f`El%OV=>!7Mb=PD0f)8G8=#CId9shLN@o%RZH~%70yk3b@H;)^T_huy zrj<}7I5sr?0n^Rm8SHd>k^ zlY}^7qc)*mrd4cUu_5V7po*BuRU8pBIzd+C5tphuR;NAJj zr{6k*r!|_?)_GxYp!mX0mh9v_ZD34jf>+jg3u&pxhyFz zQX0g)U<2Zgr-L*WeN2(9RfE9RF+9QGza1)H=b#Ha>3-6DFhxqKEjxuS~@*&p5-7fpdsGj zbak9-e{Z2D)Ejv2`)zyUxG&E@QpZ1X!^|(UHnn2$O%FIsP}lKULp0#Qo%nJ|8=_MJ zU5tv!z;~$C}!%k;;+_yUhJj7K2yh^Y-|zwNv=PlUJ-W=l(-DLOhPYU$!u)UcTfn54*w|Qo zfEeQQg;G}(AJ#=gg*ibrha_SbVOCnrH58e4GWk;CCug|l;5zQTJFeKg7rDNaj&URd zWowiwJ5%eo$wC=vizc;>)g`IB+pT1wXRo~zFB&YT^mDy8==EoIh5&63SfZ)#b#Jw_ z2ryJyn0t$V+%&9KFGv=MKL%>0TU!!sg?eg#qBiE-SX5G(8 z7*o%~<8+8m6Hkqj9Ur1d;D|+efpt26BZ2@bxxP>*2&JX28RQ#%xw$-CKc6uwDJ%?e zHXS!%N=H~yu#771d4k#9vy3eC?s^1EOx#1#H$5ZPEcHE_l4StprLo%-6Ztz6-6h11}+41 zYvZ_1h?u~2B;~6tdGZO1;uf(waU8rK&*ELr_o8)ne^)TAri`=I^7|HmqaogFFzc}>~=ix^_|w!J=hObBQU zDTy^&P*Lb%D5i8NPHu|D4>w3(4b(U`zv1_hn8M4VIZ6UdWRoc#emXzW5|f~&CT86c z1T5t+Gwaf45z5`0U+p8R{chDpV{{UVY?xI1D+kW%sr67?42o2D( zI3AV4GqI_u$*FIl&ClneDM~unZFjobeP0kXsAXbeGVY0f0`e^rGc#$ehJfQHhEce zHCXdafe61*J|4;8`Hrc>4T4HXr#3u&K9jZ$X_AzJg83z1zI`kB{F&CrP(cBmmp9E2 zvBBfe5CLo%aG7%t0GUeCs`KQ}S3=!?H%yuuua+A$S8wlwM3p=(*J{(hedId-D!0bF zK`LcS+j9w}9N?Mtcn(r5?S39ykJ)a8G(MX&-|3L)K<_J4 zAvHB+A7^;3b};LqYw+%hPiokcYv22=Yi$;-6LxVgub$9TYDhmFCBy#(kPpL01YDga zv;_I!JkQV1&(6;$rl)(sd=EA+R_f%<|1x{!XHY%vHb^51t6eTvqIvumSxy*q;o zIeKqK+jHSpa-_8bAO72CDjF6r2$GsGr-P;sPK)00w5L1j=+8UUpy)xz4R^PI=b^!$I~;%=40w zrjTMALqParu7xaz^fW+`4!zhnW!?Kg`Kiz&G_VvK@NvW@LG23@ zAFu1f7AuFersWijf)JT4F(SI=eh*r{rY9tJyNQpFN8jFuwI5c`a6ctIn6Hb0LPJv0 zv=}=Qtv7*fa8sCyoSXorL24=zypZ#ow-C=bew5|A%DlW5SO`4zcB05UI9KV46ZMNn z9)I9*lnadjyg*gYr|B38=g_13%1ckosKpJ@dMs-XfFdOY0f`7uORr*MiDla1EsDFS z-jn2Y?Wx5t|NX0ulHP*E{K7&_kO4qIsJJTGxxySA90#t7ibD;PAix7A3(mQMw4V9c z;s0VM(O15avW4Ki$EV#BM@L7U`tUPwMwjPr0jX;SnzaMWQEfR!0W=3dfjMI-;JM*! zh3<4#ZZFI4`@IJm`6msXmkLmeRQPKW|7gVDQ3zd1zRhw(=RYfbM z_V&sGBwLNR0zMT`wUzHB=)j?cb{T^lwHYwGTcbIcX2A`Xxz`jBd3?a^YUt}LflMJl z7hL=#>p`d#!xZg}UJRO=n($Q3-Q67^YVU8y9Rk1p;p-vl|zyKf*f^RJ3vjThivAzbXEgM9y3PT)TCp|KyQAYVn^Z<2#rltC-} znC&f>0(Kn4h~CzctNmsIP;;r&+($$0To)F?RZo%^ZJX}!J3zsr&hk0JrC*T zgk1YzLcr}yAaY z#m()4KJO4EdXPqY5eAwa932hU<^0~GQZW5TPf7znX7DOo*auA+u)X&u><_{F3-y+z zPvNOlkb?L8m@)Zq)5^D?Krcc*)9Hq7q+`U(I&|ra2_a8wP94ktT80aYkm`%zceG6h zrBVZixsm-TYb0HuFfPH}gIhQyI8f>{$8d$;2T_?us{tw2JQsfh$7(z7FPib9Jd#E(-f^QJ zvUhE&+2M0}FoF^Vtid|;$qb;u#qRqae133_wZWiJCCrc-qYBG+Lb+bBZI`XY&tQZZ zI^hhUXR{1$=2-EmAq9Jaz)_fO!B-jxVde2_a4iCaN8v>bL?agZL-djJp;N} zQ;_{||Az`icJe(|#hzeic#(N>D-VeAdfc!MDWZ?pja;S1gK9USbacMn3QPk`qF=9H zg9(!UC>`H4OXPTo){w#$lKIocUIQRVRO*(3F-yF;f9Uj=_;-KZLqcaiz1a`9&tyx{B5oc+o` z&9zUGY>j%nw~*5vu7Sr9>hfr@{af(u(b*lDBNHhZnSfVygow{YrNv05)0219H50&o z^lG9SMpscWCR($rX{XOw+yc_2VuK(^33+2S@iTCRd(f>a(OA<_ihg+Xz?~P|UCOvZ zGT#*8d3TqrjrG~jUdOPv5N*)7Xthp>}a|b|b)UsWyy)@8yXy08)_Z zMDK&howaFcheW;;Yy2o&eB4;d?%8R~kI#3%pVN{ek)@{MGob||v)g&?iXZ;YO{At? zBkdAvaY^yy%DG#*@@aP!$?w>>{k;ULXjFxd%WS;!jBe&h^ zS=#>iJDZr!%}}0zZ7tx#uK_%3z~2(s8o;M5J2!B|@muCelvWfc2hpYH>b6{`TV<}Q zj5S+3m(46JY zv)I%S3IIpGQ35r$k-fbI!qUxQJFZ!9z0hGxG6*bM&VI%K&jO5oc8MrvSwB$1WenT~m}IqcAj)ZMw@nZy3naW40Nc>0 zT~J>FhPS~}ssH1)0CBfBJ0Z>Tw0Xsm9*!}WffuEV=iCz!><6~lQ9tAme$(ga=s*s> zUVtA=J)3xAw8*@r`jq4`IVb|JE-Xu9FDo^!Vpnm0QK-G}UgA#}pDB|ipE#LPAjFbu}R|E54@*L-oaE1E; z2MTBeOVoaVm}|UFrHF-#0db?`&^tdTM7sX}K=8h6rlv|U_d#XZC#AztiV!KT|C zH-&%uETz0iWGJZ-nWci|@t9->xN(IDqI{~AFM0)$!=fniMSk~yDb8D{aeui_sypu^5yV9_v=wKmZ!%nIZ9}E9ku(Heg^6kz(dy=^q_)fB-l z530AdBpOvHo}~U;0{NEyY@Cp#y8XQ`)(N?9D6nglmEy=!86fz2F6OBb-&07&NjKG6 zT3R8{0}hNRS_~>#oJqC4(!-5wJGY6;05bZbxV2@Q-bowmJ1V`kyE)Qc`lbO2GF;us z1-q&mdj9btf`1QND@Yb!ld)jzNkuB#t4Kg0$YBd9Wnh4iY?Hsc!%wiZX>hW5NrF*C zgl#8cTk%0|9{wl-sm^2vO~O5_LaeaqOjXwOK!$0~@AJBm$vX4&BZN|;vu`GHJ72q0 zevRp}6G=ZYW(2E3qYB4Ir!|xgS9d*=p4C~3hw{v3Ix#uv^HMo!ThM^ZHJN0VMaEwW zRT;2%U;ha=XR61O%#I#1J~Gryobf=e?%{VpsW_)QQ=E<`BCCxZ z8ON11HTZBeauNTn%H%2$tjdhh>d(%mK>#;D*7%AdSx@Zn;~$V!z3Ji!PYsiFAp!J- z=}*SH_wO|D*eM5&ISERA@qbTCJ^2`@EyMk)rKYnvH8e3o#edEcOAxiyKNRA?C!accbeEj5VmmrzJ88MWHo12g% zi->Ul+E{#`^A-3(=Qqm#Bm;|#V6!oj1>51k8f8-4MmctJW1fjgI@~MSPuaDzI|fgHxrv{`&S zoe(QqX7LA7TY>S{dRp4Le*c~oX)Fbn^`^Ueu3=mLY*<~&-J6`SuiFIoke?b)Bq)=E zK6NoF3U9&+d!cmUi}gWo%Do!ci$R_zMG?Wbt2v}8li~{;A~Zqfr@fOxv3k0Z8k4%o zd`#$*#-CEw-b*n=WHv#VKm6ys8o*_uN8y+e`JOW{NvTRy8CTtHWv$O9q#vZu`l@Y} zb%V1?=^3h=ENP&|YC13pFFThkxFR9(m&(e-pFasC^YOlZwO;hzWd+%1jg4*M z$~2LuKL1k-;QCt~jK%^1Gxks_D+3>2{h@owyrHIM4@kpB!Hg6biGHjfb6BcSfZ~PU z%}ZUl0UUeOpDC$IC1UCR;`Bv9w{gNm)~Cbn19%=(G|zquS$Rs{%iQ83%`yy3V_MN#$ZDC;}D6LFE7p(96&UUFq|uLb9*oTZw)Rlo<$J{slV6B+}D(-Et2R2 z&yQ7&ezq+2P6ZEY>)c9FE!#Kl_?e6x<#5ktOFklv6P+#Gt5lw<8_k3?`r-i1Cfddt z`=VgjifOm6eCP+^Zc4`lS}_qz`m6A=`*$h%{CGPiMt%(3T!fW!;$c4=HQrQ3oSYc= zWkiS@Sr|JV7rTfqgS6q}4iGLYsHlkk_5?VsYxn%o$}#Q55`%O|B3KQmSH^L6EUmG5<PV$$o`$E(}3OC*#gO zD?2-OY`d_!`lR7}p9;^adwWQ<;{2aVW-|}Sx1ePgcOEpkKiY5o)WPu{$_IJFhT4j6 z^C`_kCyaF@_&3w--}8{rAFoI~?JNTulZ(Vs@Ju{8hR6-VvE` zd8fo29QAv{i0`biGYU=9MVO+>?w_)hd0859=jreuPQH!(v>Vekc>F9-+-Q2G5hG0U z@uN7vdv5s!1jG()ivGS7HG1XWo);Z00@en`dOZ(J;l+gS>Ip=A4O#uApw;Js@{rte zj&zi-g{A`(LKrC&Z;D3n{8-9|PdH3{h3QZi3hwgVc~02tuN(z79=}H8JbM;y(iJaz zZ#WTaEbBuvdQMmM!jeaXW^WC`pJ8W{w3e;U%LFO&NeHxFaoGF3rH4edKdf{7 z${K8`XgqkkTmxC@@=CEtL_Du9)!t2oONpXJWSi3UG2rk6Y%fr{prxsqQ(M;5wAC_` zoSi)kH0H90y)1;&}zF>1|VvSUsM94_hJ-)?(?XUN?XSoR{C4W41J(okeyHQl6=bQo*a zoKBR|&{3dyVVy;Cd)VLi@@(I*Il)Y9)E#CjQBcG;QnJ_%#zDfy>gw)7!og5?T0E=I zU<@zp0#1D$b;0Km+&UPF3>Or!s1ji^@w7HVz`T zw2)`d&>4S4GTVsCE^xe>7-?ZDWSXCPY&D2e_r~pW*6{!*SEM)^Cv77+O7n-{TQ8gE zQ1g)dexJDzCu0L&Y*<^~JrBwrjHD#MDj zmKI@vB^(lfLD3J1uWVPLs-YmW<5rQ;S}X9Le;+)tDNO;=&%! z+`L}TNO>Az9=CYudwl;?S(Qq3G)i{z=kgHjl9#ZGCn=e~cG2h1k43vO5m+yvM=0&E zu^bQoozHiGU6Rn%B_(_H-Eygk*sI`DNkja&~kqED&CYH6uVqxp>bUMYk*j>D*M)qfi=C>kQ7{hcYS^^CL2zs~(8mNbm zQc)t-4sS9A#(#x%rRZY;S^r5^b^ZJAo$I+aBXaTNhUqDH1_!-;H_kLcNeOR54vNQC z9P*Qq4<-CcH6MXT7Y(@Hh|Cz)9ZBCa;!Jx~H$#z%HsMCdwSgQZz^@{x{m`b9g2z?yR`hM*C9to(OvJysz2KP_5LalH`XhHNHlCt*%D^8_5(R%e zQRM(k$;dA$Sl=NyWuIc|IrMsEm^1dcp%53MF5F%gf8fYdD2GOc!j0U`kB!6Yc6_RJ zX)0q2jLfU+wKauUXQ7Qd!|YVqWM08kTouX?(RsO=z4v5A5%ZC5gIXL;_D#*rjBLx& zAP0!~l%;$39Oo1{W~1ikj?ni^>k?4lG73tpg@+pB(ovY zcK98K%8j5Dd^3Q|s z2p&Vy&?@l@6De~)2CH%C*6Rw?lCH#~Stsh$Er3}ETg3OaNoi3danO zs4G4E^NtalipKG!A1hKT7F-A;sg!>RY!C1d)v+$sx?ad?Xw_QG&tC1kz!#|BGqo9+ zN=`~od4&O>!Rp8TIM*|IFA#Gbsdp$YWLedKLiX3P%>*4e!ygTClY3*SeIZ<#-A5(7 z6Nt9)B$3L9TEZ44FS9N^oIZ=t=7ps}7L1R!?IKXUpZ#06ROVxXv1DsT_jjn)hjj&( z*-#5zSz$H}1&|b#Q*1j-Po~IO{=V{FgP4pte#O0i60DwI087S3i&l_zeYUyDe3k2o zN{K!P!FI3pOLCNP;=t+mc&%5AY|PgO@Wr7y8E-87^5VjlF_$71p~4KKrtj_CsC`4C zUhpdcg}k`)T!5)N!MQ-q)lCi_9xO+uDghPyWJ((Sjb`aBUtRLnfy8;Ou^W^_usDlO zD_+JXiYB^-xQdEqZsaF(vSYX&6;bz&=p%P_x3c^o!{x~Qz}!mZ*DXGu(x_1)eK(1~ zI@xfZL7j7(d{`-(oTzb^_^V(SYTAQKD!<68GPpS*m;SYtFn7JbG3Zn%&x*vEdf4yX z1M7zp5>v^$wB?w!lodAagPHWRM0A(8djuwQO&LQH{9eRx976f3DY*2~oWTOc2tGC8X!o;etl2^e76vQTH4Bh=K6uad44B zbdzh!0Fh3`B3A~J>H2RB9i&!i&c}1Qgu7gg4$GeR>eJ$0%0i5urmBRfSxxDQ1PsJ8 zY@FsD&G%0)rlIRMC$LDD4BykyufE(N#SA8jcupi$wY}D) zzMD@ekwN1`W3n!uS%c>rtCsGQ(@#37>>}Y3k4YUQ4PM}p8soIR=y3DL5yprh=xT`8 zXD2;-(pL3R7Pcg>Hd^Oix=(g-P#ySxi{2D>+6a+eUx(j?vXV(CHmMlSxt^;#2-CB31im<&!Gj8ZH7)&);v8L6CS94 z2mYkVRL8605%c<*lzO4&n%Oe#d(j&;eBq6z=;H4Lu__Q*EYEut9((3lTZSG%-JC~_XIp+uS7Wm5Njv(24O&^0k%2$9 z$AUXcN=|?HmZzqqO=>WU9WkwIBP^*Y;EaFjC<@t734I>N=M`R+o#-pE7$EnA&#O4G zWVHI%vc~p`&(z2`RRo^EnVZp%zTWUBR_Pc2fX((0I%rP#@r>h9?yl7EDbbFs_oilA zI6;6^Z#z4#Kl$3-D=2RL2v9h)_%A>+UVATm5N%emc9U%?&qCv-NrCunDaofGGSh@k zzh~@8b2xW$F%-O*cyFTlDcM1+K4LJOZ9oaG-J=j+XZV95DZz${HGk?t-u|jJ<;QlF zcDYJknO;{e{sC+zS4C0`L#uj6pb!7&Cs z@C$MA@s&VIgb=efHpg7$>jqq_PT<8a>WxugsNxBFnH)S5I;X53$55*4&ay^acSjwo z5;OX3p(sXVFTQ^RDJvS0p@aB}n)7U(AvQq1BZ29!j#E>GKL41*`lB#Dfw6vdPTs`r z-f12kgFpO8Ma7Bi7j2DviQa1riFwTI89ut~=?u@_aLT@NaZ_snBS9hdxR0kRdr5&^ zGw6Dd(qz;q8Wx@8tF3p4?$fg}H#$Yt)^n7@L&>*)WrkNu?VIV^PT~}1k;a4l0=)#Y z;e6#DUlJPxMk}+{&nzr0A&LLv_yC2#U6Hi=OZP;9>0?a${oQ#UzG=z}HwG4C5=fNY8LmBa5$J#uS&Lo*UuLDCk zWuWOgApvKj23G&$m+F(=%!^e#p2q8)#Srtkp>ly6zOG{LuA|C)Ol8<4k10MGDJY(e zzSN&~D&i5JKg~J6V05pntVo~1T}sNx@EY%|Z$=3OuAUVB{*72Z>oL=>+4ODC&6t^x zfrGyB+Y@0Ubh}u+( z4u!+t$chdX@js|?$&$QY#f75O-Yu3v$(Hi1G1@iC?(F8Rg*c5b(?lIfysxveNn;B< zSz?Q~<*QDuD6M3BeEbWYN7i^JAw<3&oFLc+Nx8Uw4AIy{J6E&KR1FV8{GQsd6~S}o zQD45=NB^iX>FbX$w$gZ=t7TJFFlL~2IB)#kdrw+-V~`^Au>Pp0BS#Uja4MwR{=6IX z?Tp?qa7$WP;Ci>WpX}byT=3aHgyfGDDypeTn42SHE5K)t^)I`uPMzEoa+_FNBfohw zdk);tXz;8X&dq0cRgK`e(GqfU$IaIEXXHT72L+thd_rw4TgTpTv8ILw=<7nXoDH~b~u)gxAOPz_- zVLyboTdw(9!%W7q&kKT1er}@E;(jMdl|wo=I{PoHC{y^ZeUWxEB1qs=Ikz z8U*zZ>%k+anXA$zQ%3VGGF|NF_ZY&#LK}*zF`hLL=o~(Kxuj8eSWjF_g7I7- zmOw&N(+YZtxH+xI?|HZXIknOAa0<2JBg0d8>mcZb`geH9d&Hlfp56ki8kd)sX_w7F zh)&-nn$}fU$H~tpGcq<#S7P6ocfH>$q+(bV_4a-R3eVrasa49Kr*Ld%fhDQQi2=g* zf#*%<{QeyZ?i>{FX$LeE=KJ@R`rYn;S_96cx22_}R!eH1DKS6M&>VP0El*B%foF|N zGS2mF^h!%eNf|8Gl~Tl0Vqw{WY5@(bXqEBst6wEc;=r(#t$>>OxK#+D-E3RVV4=V# z8C?FGfb#0st0=LT~F|BSw;xM|c j#l>VRcJ1I3tvPf%H|)~Y0To3vHv`SQDW%>{GKEsd8(b6y}lzSm&|9jwsTOLMgV({}V2k%X{Hq zUt{$T;7T~tM}2idIY-ytk~Z#bS)s4S@#dRGy>(X9gKpQb7@{!z#STPxjL%0H+-kAuKsk-O%9j<=cAMHmzH_Y^_mSY{cKZ z#xBTd%TKlCt4|C{%wu8D)Ib7wRj`^C5W{fUomdhu@C5n?mOjQF{hWw4Pn7fOx%T_; zKT9OHWFz>RV#C~AKGr+b8b~&vQwYGOhXYZ@0o#N29*>v3V%9N$Srn|%uhaXdZkJ?9~vj^T0wa0dD@ipzP~IK1O$0Z<20Cl zA?6(~vhoz;U;65XKE!ork>#m1!C-)-0A*fE)@X```_1?B^520}pQpFT zu{Jvx^^pR{SR;>T0&OE@(^2QTnVC`0BX#ZZF)YtgExDsYgI!sB}& zaK+&HPvmC&?2^TE|H9|f=)Zs@E&lT}QPOnN#>bFu75YTL3su+~ep$_m>7>U*)INXs z^=qzs|8FrufUqksA-E@l0|TVQom1sK_g^Eg0$~P*hCUH@ihytn=&$yzgAc+;-iQIE z>o>Nl7c0;IBy$Mr)W3;R5APc54I<=V>$Sd1eSe43@^wbAXL7D?CwZjy&#)Y9GLgp& zC~({EQC3Q=ULh}34W_4b{rTfMc!+sIV`W}7C4NRUYbuT5*HRvE!e1Yx zThj7cK3pk71c0!fFMjmV|1NdLT!`ed%W8vswESIuCa)9Ic`@lGaYndE)BKyLlu7Yo zo2O!eZd{TE3%Xx&bLGnGRnNu$_Dn1C%o{LvfN3 zh|+#`-R5Dtcb#)%os@GJu#k}WzNqo1Gw{~>_35DoPOX*liQisVd)%(KPDzA2Nd;B_ zDDw&%y7fCoa%FC^{gu;_-KOl)x2Z~>w@DdP~HVRNjiK>Kpt`2gO9YBP5 zs>{UI;zd+&sHr=Q66Z(rOTNlpp)o9`PL=f%45SJFAi9k5Bo{)Fh!84p5(ok~4b& zGn01{kIkBf((C2e?>9q?VNymWY_l)9NbQ^% z%aqt^eoG!_WK6*OO;HB6)v#D{XcwK6=fON*jdOr~lHc*PB!nb9gw+vL zFwpV^Y8fVPPcdEcoy?a1w+j zY)~s=49_UCQp2Jm&&tYnakp&1~d zgrwP@#W>N9YV=f54an_yxa^*e{N1i`7FR&=>ngp!(8L(BdDao2-f%d3#V3*b<1Z#P3ERPZ1D0%KypTdXRetG@Py z0=;qBK$ZmH5(2!=>VPvINL?nIeV;&MsEDmC1E4Vv>&LWRmPJiW$RW%)g#S)Rh{@C) zzrn_TOFfz?Km?=-0ELW=jUjjV@$pBSQ>o_7YbbDK2#QKmrw>n9+b3AUNPIRyoq)f| z@GQm+H_Pnl`6Acm8HOrfBGdiePr7F&kD^gMh2sNl1T*rZ6G4meMED|=?(FDiOEqmR zX-)&^lqBrfhquh6P&A_S{fwJn3e z$xNqkw1 zx~*(@QwrxPn8Lx*GM2JHr>0K?>fxtIlEj8y^}FYrcJVJ?zO-8dCojlM6j$!x7k%?E z=Wq#WxO)=3=pf{gl?}+nUKh;K7yL|WR~qc9@=_a3X>n#2pt_CFmOcE1U?7D!{lFZ* z(KzDt?fSEz?`xGUQ$1?8KRtJqQ6}UoJTANf4W_sm*l-$_HG!)w-qGUE8X?A(NN$n9 zQ-ST9C!WJe10=Nn1JZna=`%AbQqs~7|6NXJM>Z1R={ho3iXvd&k)(uAbGEro8O3a- z`5T}Az|O0I?fyzTf;UXF2>tESKV&Y&w0@e&#eq4{*Jv)i!b|pI2XAER=`c#e#LB<|=-D?akT$vxn(A)p77vM2-7WLKX%W_JD;&~I`L71u} z#K0xes4PW|CCY+o3vAkMAVlRA6l6>vd;vs$Ot->x?bYdkF3sLQ1ZX{OOdzTHE+G&C znokH4k$v{fs-`xf7-jqtS1s4hMx>$DF+gVo#xut8VL<)e z7*mS&%slF8zj?spW|V>EF{tLWr6qb2oS7g-0(rKe2tsq*J}ND~`iCOwYPCuH{Is%n zDdjW9j}o+@r^La~I*Ue-VPsxl1R85b0pps2Ek>)k$%x$MtjV5rcZ3%dtTqwet6Wa> z8T!Rkl6A9{!V2T(GlcIB8=oIcE27Z5ItC0Jry!n zXG9W8EKYU1_UEgKyo|6Wd)-V+U2#b36#=L0-Te+DCPVvd$LPKuYgVrQ^E4)aGw*($ z+uDby)j{art2(P^^9dNchA+93jGGvpEP-9;gib7sG}RYK@XD%G{F_RSBHCfdX){N$p!nhkQ2d_`zR{s|>kO0rQ^i)0eft=J0VuA2 zxw{Aj=974JcLyHGFbf{`*d%w#)Zk8V0@Il~Hn}}uiLjl4pFwvB_ja=YH%?#AQdYwO zY4v&ht5`n|*{_d*w8)>WMn+T%VwlK3ST9m`uSKO=a4s!4)|5CHg*v71wqG$m?DHrD z3I|yq|G|?Uy>)dP2^;RJVq>C@^(d3-v@!44-}`=Sg8qka29s##KVqDKkbMvy0{6~< zGWTJN<~y1&5X~+Te|$GEGuIF zt1=c#YG2&ydzU^nN<9!aCIxNFBW6-22(f_Cub+%;b%Pl!b^8i)h)+9@-(n%P&7d=y zp@rbZ#te7c(;?vxK7IW27RUzu10gm2(qFrinEEg;0Hy%UN01-*pO4{S@%eT$YW?Ox z9k2n7jg3#&+JFN9UZZT_AFyqIVh1gu{=&pifOfdp1&<{##f2(Xc?t~=mwD|g_UJ7D z00wwsItdWWfyrd9&YH%%o#^}YH0uXCjcFI3e!wJLm+pSOd8GgxO`z-bpGyg_`c&Ah zceQyDdf(zf+)&^4CcXOw>>uv04yYIyDA3G!kDglZc2aMj;=w2L5~4y(MMi@$)!v5z zApDq9rYv~el_yfq>(2VFGeV3Bv&Qx(OZNU_>t33{owQc-B`u@F3oZv{$N%2s->%kM zHr%9~g@?FY$;J{;$%ve}yJ2WPf+Q#?Ze=q_FkC_#rNxVu7!Qu?%%FJL$lCMkk+6JZ ztEFXl+_-MN)|WCjUI%r%F2}hj{oR%PM}rq0u+ooxVhktf(D?b)k=_Rpdcg+Kqp>8 zJ)mg`3fv1tz#&KW$962#LI~IClWPD<`i~;@L9YksSnvnXEyL&e&ilZ9BV_d*QeCyH z{Wm%=oxp0YA_I7;c1noeeg~8hNNN6KyM~ZAX*b2`RiytdR#j3~e)AdnH*k>i0AvJ! zj}iFspCqQ(2)wY-yJrX@o;j^XhI922CkdSDsAFr4jlJEFjSaTPC82Nl6v&_nG}qMl zSz`h^TNT!t%TwU=5z}(3Hm8iNxj8SCtu@1|sl$oI?@m|EA3o4Qf?u3bs}svsOGzB9 z7l;#4OvA7?M+(}wB6!ty->-dGcGPhcJw*>vO1Ga8X+}sWJFh@u;2^;Jn1HnUkA{^7 z0=T%QD5qT4W3Wx2X zwuga{k!^^b4UCj(blknt+}t}{sC^sNKgD3l1egzSCmnC?=l~o~)N$X7yMdb!+-h9P zL^HT`1fIE?qJ-6U?Ffey;N)Z-8ss?nLrg=aQpMj>k&C(HHNvU0vLfi_g!WdgzBFUe zv7dSIfEW`8XUr{sEg;5S>bgD$AJN+UgN(HP@s^SMwi5$vbSWFG-D-<{Ls}>ie|sV@ z6(JRNCU}iJX1*+Ug8SP2!#sBnL0r=wB1MYCYvJ-hi`JC<(*jY`Qt&oSf=g9@PT(=!>V>wnB{ z6x95Dn|Hsoy!^>c_3BCg?(ne-5!9@fBU=ZTte2dO44BY%4kkifmJ);Lu=;d-bN?gN z+lP!${!xQXAVy0btRm zGB76(4>6u!Lc+|F@$2@bSHYSEz6|jm1t<1&b*QYcbk>69OXP-;25IW(FZXZc%(t3BtlY>GupHRn2xkGbMb1_ zT%m5kkAeX6>y+0sC{;E$hv_l8Dk_whl!Sv}IbhQPJ{!#b$oTe{gzeIv+1%^_1vqU{ zx4SZ>vb{O}iN*f5lZB`^54?n9R4-+YPTj~H=pd3 za(;lrZt#-helwptc9*lOu*r(-{m>_b!f}J2K5cb2aTnXrMbrAOcTL}NJfYyD2Wz{x z9s8=cGe`69AJ`~<-E_;ISMtrAlM};1#;**&j>V z!GRf-?~)6!nF?PjvD#Nxu}%;7_Kr1ALb}a?GUF;m#V!0nz}=pR?D+#yVF*E8GKUkZ zZd>;fTE%C7p%<&pISLjG2> z*HmY6cZWd7WZIJWc!m`B13E%`OHP8faM9SrL{XhA_U z+h*qGdpFJfL_XmsJ}({=)&c~XV?nFBZj;t^R`ZCDxp1Uu_;$SPA$PYAK9>{X57rm% ztzJ*4#N3Z$AdOSOH+nz0L()8Sj3kvmHa_OsH*CPAjM?f6z<>70?d?r@T=besX5tt6 z5p2*ij=O{s8)TCt5xxk6TxWa{ri!ZIl?PYI^>|+fob1Iscz4p~(409!J(iA&4J-@< zMTqbHwCAdDQn9<`iIRE?v1B`}@gqoaL)#N(E{ay$`a!!vQb9}`#GMg{1nLCnd|-Q- ziofV9P#HoUIYZwrF0mlZ$9sw#Kqe^_E2vYfj z7a@E$BI0>SNC+6DR0;@g@W?+Ph3$-Ii^8ifW#NAmc6)6W#XQJb;kF-@gt904DsJC)hLy+Sv{ZoB$pTsky`6u-t`U` z%%>DnkezkFRkQ)x z^C(T{S0@L}6tm81V~HG;@DN`m?|oIKo(l}He5ey_y_L7w9j?E>9ar9Dit9>n z_X0V`FAeyO1hXN&W}W(xp&?-NBqbrKwq3kE!x3cdaDM!}vuM+d3^pG+Af49FT$2E+ zQ@`xY197TT)7+%9XC(l(1&f!KmiDkY>do5^RRHWcYPiUavI5qpbJ4ba1dv@1k+rU8 zF>Px^9YCAx21e}95cLuKLEs;+GTs0ZQEGV2MHM) zQnb(6eIGq$l%h%t+<40&`-z6^gC+EZryaEq%?>)DgXv1^MRg5V^ZwS{r zSK}&dH2DHU>uvx+MhuIGK$;N*GfkGh&5ehf zEk+U+{Q*-A1be1AZrpD#3)b^S1%3L9xW?GL?fH?W_4s}IC?n;rpra^&_W2?H=_`I@ z0_#D=$Og?+1BorIhO{^|)8&;I>t2XWn};e2>MB>jmMhl6B*?(~B+H(L)?5Nw^$^^K z9|%#5gpue)E;M(Hb1?Du{^fIg(p3*Y+ikm8 zFb8B9AQK4bgF1nY^rBTjb$r=UpE4kF&eO^_5t2COCM7<_pixxUlJ}XPCgYJiI5{&f z)G?*Orc$TeorOL8Lz$;!yY&kJf8qK!ds~|chc@xsZUwX9oy}R}N>%+62O)_CHUSq}{v@_e+aw43a83$&dWpBR&ONlfmfUy{7R; zOZ>5DULi2vi>f;(t!zj!#5%4Y7Qmu(_sqvd+Q(f1PG}Df;9F;9=evSI;3!SWEL$B*-ImP)uG6vFkJEV~A1+uL z+!fK^X$~`f0$DRL00wgy(lQI0yQ1Ubdi)VE=sT%FgHF}J zzyPq60A=f6y*zN^0P6y%*)c#lTEBd7rBmeuz9kdb&@1;SZIvb*00&P%{U}XNVh_zM zNg50fhe7xVZy#9$Te{Z$1_$5fXgwrCS&bW4>)%`QmBa4uug1=KK797PP3C*F7Khqq ztf(a?9vyT!8C_qX%H8`JipKO~9@lEvW-1cZnGVKG6; zXov)e3SiJ=(4{Sz90Q6@EDvy^Ci=~BYrX~E?a@5UYfXeZh*d{Vq8r|xo#Xr&l8J`; zfwntX#a2ll>(k1ISIQQJrR?`3xb$wj7_NeL1j{C+hq7LBuIX_CHb3X&RXlR~Fx*>96?DePtk=pH$0RX=ccI()xrh;Yk$uqpl3zdU==MYEF+BYxXdd_S4JY9JufA1fPPXx_xz}ka`7?9Va2G3;039(!=ta=>3n4Yw4 zZvHyyd1>cosaDpwveC3*KAyvtk?e?r^7T)Nh9XArm8Rbo_8#OoQlxKeu92n}cPD!l zqkyq?2or?P$%t)bWr(M0H7uJzR$Qtwd7Yj-b`qsN7=t&5O*((FyZLG***a7GXw-9u z?_}(uI-tGOLi7%)=0H*N!v-A3pS6~vm;llqJmY5EVaD49<-#n5r7JCr$hVPi+g%+f znt5V9&z6y8il{!OJGm7LBPV3ru|2N#p|sVS(L|Kmm(%|~QvCXFS94Z&lkdWEaFAax znD{=3E>lsn$z!vaR*bnaJab_++H5aDKNoM2f+MPm`{oAjHHT- zbYmJ+VuMcjO(5-VC1~0ZFqhn(^%HGAUN8VD81zZH^KyW`*1!N`%A64VRh^o6aUF}! zQ@8G9%VA;9mp~jX#?~LyMBqAALf8k@H zOYkCzP`N8{RSn|b{>gPe29LH^uRHAZU0joGt1_SJ^Q(GGA)C@u?(;+g(fs&tJ5f1wao66G1H~kWBkyBsHZyE2mPL$cwcKqFV(%Z^g(URPj$Hz!K z%yO{kR#}-^RQpEDQH<_{D>tF)wSb1DR%uYH+;709PDsBF3ChAGtwQ19iCMwk-L$*2 z8BQpLg8NHSQeJL)zBOpIP-8!EUuhV{>o%kZR@?CI+&HL39`bzyH9MHi_T(;}dLwku zCRoQ8iZ`!`PSS_s3;#yPf6PI%MO?gkea(@{5jm)=-*($Uu8~W{_`cF8vN?hEY&sCd zMq6Pl{=(uH^|>K-Xo=S$20Jj!n&Rd;pe!z;FFhErM3q?Nb7X)dXEts|pRP30t~1|3 zRU1%+inc*HUnce{wixktNEz94Esg2XX3!=>ZC1`y9#+OL{HVf46%{XK^YJ)&x#i>! zla`ALUHng|pIISC`<@B80N0$|1MLV6GkUP`+a=-RXPThNi3t>Zca<$LP^!*$mETC4 z5!-#a+$&E#)%b%6agW_ua%l9Jk&<-Ui_1yiQ#A`uN>|okd#fq+iGU@N22nc_C2>#Z zl4r;I*BZ2vu{cZe4lRlqUP zX^V3!Q$C~L=M1N_$cmXz#E8_}rWS}i;>Fw8mfl-l^KJx=4QaSzgzIZU;O%Ln;oZOw z_fJC7RX+nb#c&6>=%qUSe>1MOd&3gBKX4wq>7&Zq?&sA`HJfGC7Uc7c$P?Ob-HaXW zgf{+dF*G5o?ia;Y{UE%}5>y-bZM2u*m0{KcOUgrXUnhm}(*s#)Yt`3R!-AF!hEoY~ z(!~C*Q-Hxii&G@6M)Am@I{C%=8l^z|+}*_bgK%(tj5(#s$^Le-YA)^aWbZJ~h0teJ zCH1Y{Fje-`wmU<+tKijMM0??JSBlxfY0Ps0qNKr`#GA9lmql46@vgV$$)SC9Fstrn zEQY@klQeaE{#1(o!N=2gP)-Vo*GI~CHF3m}u6I&WS=$_HRGv-@k}x8uJCUwv%#XSn z@v>T3>`&3=BSq&rrHV7o(vAq*$eCC6yzDwl6DUn9p^FuSx92rO*05wQGr3QKUv2h= zEq3WuYpF{YXs=0B)BPY5z>I`$9t4-M+D$^Enao!fT#YAHM1@jS&LI6|U}M3JZ5s2T zGJVtIr#$jwxsQ7G^z)Tp;(Ijd`EQrTjB%=XRrx$3GxlG;hmjw$6j7t-;f7YdCK_4B zAq4Ws7^~~s{Xb#QQL_a|DoZIg>F_rY=O*^YxUDQKxGAM8 z1T;YmmV+tCR%;~bv%ay>*X4#!N=hK2NX)Gw?TE5T>HSe;JXe+gIaZY=N9YI|OPg&0 z-ovMLomUGafc*03bOs?DCO?b8y1JOom${Xg*LHfksIbEGju{B}JfzWc)6+d*m|QZi z+c};$cmuW#BK)1K_j$%TRp9^Y1=zB=HvWYD<{c(3*00E(yG*tI^pbwX*kivT#@F+Z z=OJxs!8(#BV~)d$uF(z+do5EOW_OoHy~5F+ATbbB%%-0FRWz6*zE3(-0g+ra=+5Xb zjE(z)8VG2eU5T1=OEFnA39uqx@Tsz9DSJ=F1)Cpr4S6Ek-IJ?vhmv%#^bM4g&V>cM zhd&;&Al!y`_n}wUNi!d0)sp|TA6TH5)uzfh!>3!O;@;#+X;Q3lF5HvdZK>OT!ik5D z5O);3-_YlnuEE?(ll~g7|5-|~``9%u>e-u4D6?}9LyFT$>z!jC7E=MMgea+?a*zDn zNxV&k_Ffdg3==Ve8^mT=r4)P>p1)G?#Vz-z%^|wh_Hg3F;^Hvi;lTr~Xm#7d1wVF> zC2G0Ts&drr@&W+{Axpnu zxze2&_4po^QO5eO@Y(uVfofiENw%#n)E-TWy!B*5G_p)HM|+0SmOreRca5>A8Ic0? zQ&!yC6qERgB8T8mE}RgDq<#PZr6=dbw96WIUPon&2_sWgCq4QT3LGV}Qk*22{EvDL zPxDSKF5Ywu>|$>EmT`Z146Q7Nvym|JSt+51cXNwqq?AmDJ5IQ@{!yUUP|}=!!K^|d za8-(ySp3%O$XQmvk|An}f3hL`;%aP*#3JR^odlQh8@3k}jzfoN<2vgGI>7R~J~sBy z=WX3pRxdMfue&Gdv}u=@5XuUEQFhcU2p9YnOX8#Nx?~FYgO5S{k0XEj2Y@7R5J(As z9CQ^Va8d?{Z4MW9wSNIKnc(gYbYF%FWbx)}%t?F&zYoUZsevgEcqy31V;OwlsdT#) zH{fwXR%STW0r8*c(yA7@wV)}aDJH*SYJE2%0sS{|m|nd`ny24I_O zl}hfivXKe#UtU*gxtP$6Ot3zUWr5I2E!1wdjKLfCOv~T6dWbI{F((D1M_fY<@6!x@ekmdo)RjiM?h- z{I^>WF{?-rJi0d3XsDo+=W3xB_JPD7aR8O?8kZ4F`o6;YE9hC-L@6lg8`Tu^G5-y} zRF}6!C^V;r5`Go7(g%EaJ?b}u;Udzs5i=FpjZy8^!Ru?#tYX}O#o8x)Y&winUoXiW zOXfZp_ZDlb356|8e#D>=`BTcUlY{df6CG%rEx5rZ5?B-VB+-}}9VbYazcx%)pcZ5CFLq#; z=od|N6Jg~fpGGtM-3&!?)rSD=H*v3r6#$~KzZv_!-#~xEU~9adBKOJLPrlU<|AOMZ zAtgY@kNgZ~;{wCgj|&0VjhJiINcW0d$L`(?J|6k$IayJX@4Xj1SgdU2-MeEQQ3gvA zOAD=MhZTA8r&6^fU%kp)KfK>FeB7q~Ax^ww)S0VYWSr=W~e!rC0($9u+Q z_-hii503!#AI1*mYYJz|mXDc7?#ZEG3vM_OEVZ{F&pI_cP9y%{?_JPtwOYn1Y-Nzv z9U@O2_08>M#SZ>p0eo}9-hRJxpb|&RHYVreWqzFoeuF!|BF%sK)N?wn!D6$-`RM&( z$03UJ$+t%Y20eC=boSYm>>$M*s=-x-ti91`)Q(LFu%{*8$MhE4U<9<51~zJ~qlwD$ ztVfI;i|=+kYy48aTxP%QeO~J_Z&tM~ zry?1fipPBJD=4K9VctZ-Nm!D%q(oHz`T?#YE~kl8kkshQmw$yNhC& z$38Ye0EWt(xWteKBS);4t9t7a+kGJ%omb9BX8i6GuKaFiFw}@od5(%0UxFvBd`c9E zm~bTFneNSyuZhp4JQECdlHjZR-|KHdM~Jnxt*`4^zVLUAn3CO~qB?7PEe+%U6XL)x z<)KU))d9Po_Qyr@W4>?vw+tIE#r@*l^y4+v+}!kV9|6;AeImzx zEG8cVgk4#>-OG^PTRQ0etLqR7Td7F24I^M;g2tArEM^hvbN+JID7G8UKzM8tqQAj{ zOohVG^NLnUpC^SUV}3+Fb52yA*H0g(AyQgseB27ywV8&%KVJQouD&-`(2C6^|@X zR#QH3A6ql3nD!00C9U#{bXn3%31IWvR)8HjUW zbv7(y*;mUskVTchd{5bcx**5A*pntdmr`PoFS9juGvx3vG;cE8d;TiZgrTltJyvBQ zjw;}8#dDIRKG?iF!&q7tU1=;tEAq2a8-Ws$Sk4u|4$zC&%%e0HnHZ^yi=coR6SZe_ z)hb?pc61|0f)9p`q;eMFAOic}>9*J7IT6o%Lm{d$73+%C2v(ZL;A~mSP)(EYB-h}^ zd&m_I6rf_GTPKIIP$ZsSD@pC`4_CRnpfXrTLEu%fOMk}|xyctqcO{glmgz2%VvS@a z-9L*3arP_I%%f>qQj7vZVGs_If;uFcpo0~&{BW4k4s#x3vZWlCs29wrKI3=YktED|k_ z_WWi%$x!kdFqjrj`}LS0M811|Sb*|;Fkx+FzITwh@@{rU<`?~1*on=Wwqr4)!vC!< zLg(L8yFHY7`oZ*<|r8yUfB*#BUO>tcRiUUzv`m_GukVk3*0WEkE~up*Zwsdk&L zaQ&qE8!#6c7I8=dNC$X(gNbMX)M@O%oRW?)w!&Gv<>cCy7NC$EaH}=ure1lt59V1_ zAZf8KhtQ(ZTJHE7kNhFf?NYw@oy^k~tknM=10~7GTD1zSb}pFt1qa%R9a%+bf4;#? zST%BR!frR#<3Z=;#%bij+GhDDX>zwiq6rTtGn)35I$3qYKMyl0Q0SAj<5-x7tKGZ~ zD6h5-UQucF8*7PzWaj#5HtmbFP#X#Df71tYrf^wt&qdLR%$-&VLBa3<@h4R>*K>Ad;-=Jw2i!YM^>)> z?5csW=3N!X&Kig3y+?PI<6!%fa}##9eo0dThSz&6W=C>U3*c#_vgwc4$;)YMjs6Ft zC5G1xpSrwbzZ++r1uEl7{jAnTpa=RJDFP`EEpS z0Sz-YTh0|=_Njk3Vi}v?uawTd&Gm)c+jB|=fTcl+kD}oQi14>ait34M%^nSwk{F;+ z>vm!tQqZIPHmWb?g^=0g`uZeAGr*kEN>jRD1#;S6Gw?9QJ2A@k-i9)6(sXHgiP4p_ zUR9X=@@#Yv1#pxurAWA#-=$zwui zQ-KbIxnXC!F~CNgb>=d);DS&M7{y&N@nm9T!(SW9cJhyyKD4$(wU=8K7BYa`0M)=1 zbRL5?J5s*gy&GxhDJt=L7X&pB!@+Qj*9k%(Twv9Ss@KsCz17UE`9=I+|okRC+^)GXr}uiA9KOpZE@Pc+inwy{A=Lg0HY}r zBDR!1_DaA{)fD%}gGTZMo4p*CgjY&#qUhQY%FuXa>qxu*gtU#k`tHAL7cUgZ z!XBGDG?TCLmM20jT^e3kZoSJM)6+mC(Tc?T4U=yd!#DlrzQNNO0sMjPlFE0F3F}UO zwTybwIL#$K+8cpdDN8V?+yr(fK$kdO2K_SCbGK=4$xbePx(frw3s%1DWK9~?CWj1D!Wi_J zT|D2_Ycq#97R0@~#M#1EWv+sR7vdFAwoYqU#;7MQr%Qe)x85v@AUHgfh(1>}43vf% zcZRJuWtcM;_6}n*CN2{wj(jsUgrkI;S2G@UV6j&if{_8{2$F+6k4XAE1S@N2Io#5; zC>ZAUNML0BGK6d{5@>MFh_*==7zaGJvjVo@>cK>vvRpte$DL>kqytRqaIXE1B-ogm zwp-8EB~+QmfL5b5s*v4zZB7UIbACrHTaule2#)GV@%sLhB_)#DT-i67|6Mwgl&>P9 zG?mrtc6R*3?ezw^`yu=-t%S2k0qVALnvf-O898yL#PsG>BISN33^3kP%joYDDC8YV{7`!H6LpEJJU`<{T*k&8S&v8wd% zM*h(g=Uu04%}w5yAweE`3`@k@`eJ#=;`l?8 z^G`*04Q$93LQQ>5lKSbW1rP%u!ET2`Kd2msPfVn~S$s1LrRp~W^P0T6@59;8H&-BE zin;YfZJJr#wLwCtaLYENEHN0wugj%^Ps*!80dxZ7xKnV{Onn%R9^96u!qL&T)RN^q zEG%OkFfLTFzuIF9XL$P#G07|LQM;Lwx(G^2f{G(tk~WP3Fi>Iv6Y4d^CTr7AWio)AHX|5`!WU zsOtIazEN<*%z>P|95N&|UAuRRMs zSz0D%iH1v|OW-FPnK@=qr~Z3iJdPW%aTus*^tGBZtz?rKVComIZlt#JUD=kZW;lwk z{sffW>@v@%20WWmPmdP?R}J(IWD59LPa+9k1w!Jsfq$FbUCli>ES;|^>WYG7hF~*& zf1;vU^ov@Kl-wJ9kfGUqG~Z6+Ys8Gt3{YoA``;HpQMD{DKu3}kJJo~*fpKk8J>JSD zq`HJ@2q2r1xfNM?+rEPrs(?+4jOFRUYCN7KX7a8dMcXZ2QdFkwaf-Otv9KxEK6L7$35!@B@ujDBHeTvJcMS z@mypIP=MH~mV@``<*CW#`u`eqmhnZXpmNS=i(PI`Z=2ZbYRKpCWT%#6uRnz+oO5U3 zGPa!Ta3SXdGJ(KxKdguW)c`xxYE<+;TbMsuZtU2S&lfjwaj=k0cNd|phyNr{V2E4a zQQ>MO#z=f0{0r!V*0kc?zY4m9eWy(coQm>a!tG*yaxzPPwBP@#0Ncm&Oc2_?w!jX- zbmByrM; z086@la%kiVQoDCSNTKeLzEx4?ypujV`wgTN;G;^lnmL=)y198RB$U@74wxrt6J>|1 z7TmssHwd>ff`&XAuY>j8+YH=v1uyGV@u! zjDBsY$hhSx&S7T;i>Z#*f78MtQfB;<7z(5`MwdwAe*f1F!Q%R%<(;pF;|+z}Zr9#6 zvthOCvS&dKg>kisDyns>I-%CepWoKYdT!l{+KX?xi(B0W52;tUnnUl;^AkmS&x|5_mm)FBa>@ykh z0mf${FoYcT^{0R<+0o5c)r9eZvHYJr^`Yx4oFOlgXi}?mC)5wSR*ncRF(Tq7mYW5-XH$T&b6)>gr31byDweRVn<~_WF^mTcX3cMLJw6HUlQq z-bBT+l}T{s)*0w?@;TLTr?pXB(5&FO)(=&kYW3wW7h z?`|u=QT_e>^G|m`-uC;J6DPmt>+Gq?$IM^K@<{CDhj>{}u8yXkdHwvDQ0OGA>_Y@Q?vVwGF!0wUxS2$6h@rx?0;-I|$E2q!iTLSZoFs?|vA1ICgj} z(Pvd%!v%V0czl`Cg{KIBHH&Py^@XaN``e+V`&5+&21(VRHAd0DC^9oknK|{`lI*L} z@9@x)cDF1I?cklHlH&9)D=5n`qw{tHL;j72?|h-#RuuDMOu#5}=(C({^|mAQZl3

                u=7Z0KZXTy~{3pk)7BRMm#_2Il6T4KNol`9P|N z@;U3Va0%>xT9R3m>9o~^6!E%U^ai`tH@xY*KvItdEuC07eEZ|y2OOM@<$?f5AgLBn zKJ8XfA)l=3U@q_n-$@?rR*28XD!BLW>z@2aN-L7U@p&o!75A^3x^18Ju6ByCwbP{i zWbg6JM$T|wL)Oo5CvS1Zc(A439amQ{D2`9nLd_07ExJHV!}@D(-Z)Ec^XG=K=w1R3 z3gCB`q#K-^ardcg_PIER9uA$uB4{sX!&TMCGZL5jscwY{+!1ZsU=?rLuYBrGjcY4L z9Gz(${QnrGeW)QLtWZqIw#LdlvQAOBwXrTerQY3mzlPv(|Ju0@QcYNAW@P(4)tFYH zg4yJYF3pS;T_iKEA<2`N{P{{=2k5LZkJq~j{JNJeQ$Et=>2{$F?#&-#Cfsk>Xm`j&aj{a9=C5!(;6x}awZWe>3Bi6uxi7Nr)xvS7Y5PFSSG`{R%8XS*MF zIKaynk7!Fs!y|5xcVD-yQo8+zZklQ7HG@+>@O0NMVe}bB#8<`uE+SM!{N*W<@BkbI zf&KWg#ft5ke-1a`AbvrC?!<4OIObWDSZ!d%G@q&3Q$GGi+5TkbUaQ68MAoR~We;~3 zLhZeA zc1HSeaI6Xf3&kOaci!BlWK8izd#qIth#zk>wY&VKxL$Z@Ilwga+BDmWImK+tTj~ zY*(O}q8IY|KiSr}LF?8hAtH~96IUUkFvG#)L-q1bz9;&`j#oW$Gc(Pp%By!$SiG!t zhrNzPS6n5Q*d>30%EXJ0QdNX8f!R30&8iFDPYefp08`aua!!a0pyH z6ds}uK6Ero`>h&0Y0Y1;BFzU&qBrVP*1TeQtMxg?76oIWMI{{+P_L-RK04+Jx58GU z*Pl@iIQCBv-r=!k*k1+n1&49^hIFjoY)k1}FPBHc1dIM zacK?+RHPfAbzVt`5i%OX38fXE!!6r+c7NG)b@@_NY;Wrlsf%Fl@$Y;;}U|h=v{L0AHL+6XF~jENsll`f z&O;0D`StOvR#H_y;CGvDu=Qem?{b}7dYe3o7h%3QH$6~O;kL;Pmp4PNP@&c zng|-&z8A+<#KZKP3XT4M@cNy;R5tVEdVP&nUb!y&74Q+I;moDl45h#s5$(R;-*f#?)coXyLPuNqJ>jmc;x$l;Up>JO4>JSS2k_Ixgb z-XZ1P49B^n9gf-*%$wYxh?;Oab_%M%wi6!$$M{2yB98+GH)8$ieAZ;f?B2cK$b|@C zSS_EisErlkD?v)7Eis+6nB8iFZy zaqgGTh9BN(Z|O?9>(;srE~b4Gh{7Q%P?_BLmXnGR70l6SZ9Xp?LF62&Yap}(&VM6=0u*on`R408kP41V&J zO9;eEgbaYL>z^vj{+kOR!_QJ1gjb9-LY|pirCS`$ZiN(vFf4gL!Vy@|*!ailXlw25 z^4+laK92hc&S9JkRZffGFc9|U-;Pw%Ec-G9FxDTOMaL@?$`+|-xUV5`rgAfD{xO)! z{Q&@M%bWJn0_L>=*a@zXgDArFe)cCzOC~`#n|**i&Y&k4+h>(Rw}s4&TbQ4)8f>f9-7IjKJ*1sH{%dvnzTd2l22lc(u9 zzFY`j?pu-de!cSfv4Ojv64B=REY5~O{BFp+r%6R+lhP){XEw&AQsog8KSA;LnOz#5 z)B@8G+TCk4_|j-YU0f)4gx;ngvf=i#a|JI%1Q$gb7m5J=71T;aAT&n&b5ykdqo583 z!LLO357X?vjlqXwa`Wr$KHeeiR?qD=FEgV9G9dzjZfNnk04-acCt|P7bj$CwsD0!x zosYqk|CmI@L-AwIYy;Ma7Vaqov|nz%4E-xU#K%a5UVz`)nWaXe*(nJAnD@tj-X51A zH?Z$lzw>0@Lr*4yNrJ(b`#6~ptG^pN;m|i$=)oD&;O$_B~cn zCJ(MZ0Ah__wtM7`ii-L>8?`C=ye*0z=%2u#yf-d*ZZi8j2|1t_Kn`J};Mup`YQAr~ z$waR3g+D=SET+W&A(5_cE8xptTTMfFypY1mBX)UT5K}m4_te`0bkFF?$N>maQc^t&xp^sLW`~D|q}A_VhXDz$ zAth1+^i(kkas_C~5fK27XxhW`#?Ld<26Qzt&O&#MvC1oS>mK=;bMuF*!$;3y|86U` zQ}D%cupG5^hW^~E_t0vp9-OvA-<|Fxi;+N=3egeY{InkXV}WEU+NpcP_Q zyvCRy@vNN#)F5$K(hEH&Pt55ukDFawU9nldCIgtQL-zWE1O8|UZZgX1_Y^qlD(pTz z;?Yv?l4wONYCi%LF*zy+|1JUONDl2h3++sKWFQ}$o%Pp4vm=vIQjAFk+(CC{5z={l zK~{!ZpFjVK);@RNIXpV*c7;O0z9OKvZ)FBAKmj->Jr7TQ;SmubRZ=o+DIMh3CUNN8_v|xSkoH&PQ9eioK470gEi^yV< z{=>s^WMpK3zU5B!$Qu6}BGbe0n?CILu+q!GR?Erwb1s!mRyx(QJe@AMy09sxY}KJWvoK3kk! zZo0_s0Jt)L;4cr{W1}h8op`FJ!Oe8)tOx;81CVz6RyecAK~6|8(BJ2P?%7tn{Cm>s zvy=|l+kbg3{|tSve%Wq&Ule}dc_j>8=^@nmc1hp1OhfAs(%kLxUA#NQgBtYE4R)TG zff^@jZb(Bf%Gg#Edh$Qr(ARLg_6{xJlthk|LqwdSrI`lzD7@nNxgUPHt1$uxwXo5y zOn=!)Wj+JPM*jS|;PBjyUyEfQP{lw+e~C6N9CQ^e4gJGrfAHblu>glE+9j{)m77`p z=@g9vm&n001`Erj$irjYVyev=!JY4%_HE7+!YRah9)<>eG%SD*y_1o3V`Rc{bz~omr?c?W%j<-2AX>z^nG;eZ24FTU^7&Szf}PGY;A462KO1xFqDDSgPzD@$vPq(gUO9q&ra9Bj2Ze}vvmMnp$=IOFgf1XR|->t=5o}@*Q zNbNR;F|I$uWGXfM3&;6br%?C@-Y%txF%9le7!94MLhI|$vQ1UgUUjOhRU0yf_xSlF z2h63(4$AmS(Ii`LG^eFav(LWGfn9$fl%nZR#Ky*HCVLm&^9@qYtleQwE3Nv$PN@Ix zhRp4c_oj*>X;m7;mE&1ATY@l33Q)=`ldasF08Wf%lu48=Zd~`dU)lS!gOvJHxoQG& zGoWjmc5{Z5OpDYMA}%0fhu3Og6-UU4PKeJ5kX@xyZSta2vkV9@s9q*Hb>+YFyM}F_ zw89b;uppN)AzGYy&$RReR9_3n7sSc++L7n=53to)nl|q0?^zutV^38NRF)lb?dC6? ze;W4M$J*&&TZ205#0`oX4YmgO_HFU?uRSx{Lb84hs!S42yplJ;X|AAsoy&J3$av^b zIc(hOMD)cOy{(FS;RPehg&Z-KhqFiv?~DaZII^DyBwKH|!{;EV^!-!-uO3k;@{|v+ z-jG`WPsDvOIBrvNAy|Txa@n=>VrRUp^1<_)K8e#O5r;kEl59TDL9QilH4?eF(S_Rn z_yGc0tOrFax$d9AWEgLfWpLD$FBwxya6EPopEv?v+*Um0=y2i47nSvqR>bU^BWsE@ zxQq3?{S|UV_>BEVIxpL=4H6kx6SC{nRZl_W)CU)p(CuVI~PTdy7_-U2>VGhB{KV~xw_={L7YbPgoG@FodxmqXqUfP3txAm-+3i&h>uunJK2SN8i#6saif5y|_!sp4 zw?P5Cp$F<(yur!IZLkw_Q4j|g7ij1pqMOuDZiKnyHw(B&thY<*cL0slq)bFtU$@~- zelt~6m(=p(E{>MAJBoZ!$T+QQ@hRCM4(yrY?um>Qjd+F1$Ga8~ew9eZUD1j{`*KXP zp529S8cBqX!z6HV&Phgb)WX#m`)Cn2W){;ZL`HGf6w|*=K>-sap_DieCTkU~w)OsO z*Ap;MkDr65H=Hi;`*DJn=t(I`q8HNelu@vw>h94%uEf^=?Y+!%%oguN=|Co}Gk zL?lsT0KXyEAPpN>1|Y$W=k1SZ$)%;GAypp+%!U)0B3y$T7pu|F)1{aH%mDU)V=(t;N{eCtsI;Q6-47g< z>v_FLxj98QqsY6;ZDJJmZ#f6F82va+3RkCi)Y&^J-B&mqjEWP1Cj5IQw<*})RTJZI zTTbWGJQh7C6i=BzP8NaxVykz)oLP7}!xsWP+w6SNEFBcV*6uOL15luPlYDhPX@}HR zI9`wO;<6}b3EgAFP^Gp%uAbwhV*IYJcLP3P<9C&ijg=bZTdUZQs`Bld&<-yLSXeG&M9aUTGRu-E_1yRm4+ z0uX@zXHbp~`N*?#j|Zdu>GZsH&lyk`tC(6?7%UR? zT^8%=Ah&Cz@&{Du9Sl|hl`()tGLLrImZX6v>Pk9Pt z7)R$J5^Ppm{g&^}vk|18ol&yZqM-Xc+PLI2x@k8#7f6;(qj zz|eMS%4u6QZP|DMrG!FV#>C|V2!R2wppI3VNR-gknDJ$FxxNJbltll#e8x(+)?`PA zM!V5bfAThHGkqujbuT4I>c=xOL<%SD45fXG7DflFw%}X~!jWXWIJGa%jGS&|Lwgpt zI{u9jd0{))zikqU5Lan$pj+6GnLN(7^ioy#t3-gk2{_e7K&+eR1u+_Kb9}k;zhvyV zxMyD{br*&YQmBTbuz^fPm+7UobWyh;jn89^;emL*C*wCa8^Ltn(Vpn5g(+*@Za1~@ z`u}QiPUDig{Z8RCloKb4&1-2&J+pyRB)=RXrv9car(4Tnqf;>yznm94?Qw>BJjt}&`z>T9Rd=Oo`97e}7Nyv#SYt(C+# zAW{*oPYc9@xewmf&P)@^Jcg5Ul~b%ELYKoxI2dF-rz`~LMXDmV9LC6ZCtJ^SvUKh_ z!SGpQ1^B%FRn&1Iu6tf|e4+2-HSDYiY@%q)tXqY#@4G=S;&@E>H&}XPV%YG!pW%MU z9p2fm4dRm;?bz(`*$N&hAuV}4pc~@8EUNHLH}`tKXYSsXJ-ul+G3AhU-&Z8kBVqe# zxSQye$@7^0?(w;(FTp7zg@C5eP-YCv4t$Pxo4ITk5P=F_F6U1Q#9PCqie2f;Tz`Bo z@9v8DQodlJ*^JHpakB^Z+@IRB+B{!Yu`A_w^CVjBw@ek^ z^+b)?BEHZ=0r&g@*Zq_qKaz-QPF08X65qw$Dftj1gg?#_tt$g03Ynk#D9V1 zw6J)h(-FC^`Df=Ge2)(aIos??Hw!%5IEUZ9JzGvi>HN%}rf<7<1Ae9yeVkq%+CEw`%ol-9&o zGwY)LN0i=&9K%fq!^#&wV%jrv6`0{3C!RbN+JooN&zBLvVQl3nZjt8PR(HrA?8D1e zu`VUU(%Z_s-os$u5*M~9q6xcvf97-M38{q`&1Ma|X2Xzp$0|s5h%ua2S=x@>SK3bA zeu3U@RfuNd`dnSN+S{R4X~D;F!Cs=wZZfJ$Ohx(*!r4zNgr8BRmvFlaT$f+_!DmFP z++@gCt7ymJ7a@0dV=3yH9kJ(-y6c4rkr86Sa?{zEHQpQT_iJi&N00Ei>E0H|Ghli-nWiai?1lp3Mqs1<|-DIij9!)C;2L#zQ+}mhGDC}-{y@G-F#vt5*_V` z`Atnb*^rtoQk_N$x?{`IZv80(zJ39Dh7b$YF!QgldOy+5+zqom|AdnZ4GY7cRYfk~ zPv~&ZF^lHOIJ0|@-_V{VXp;@BKe`2meGk*cy)HFR4{0%!q3OZ|Kd?s4*zMs)5ZByY zwlwv2q_)_D!?QN+maM06s_KPpY~|HdUVYqs46}102Dez_&4Qk2V&3bG=0R8F_NzOp z98P6d)~%+eH0ycYj0jFAT9GE*;^zuaUCb0-`qG_6T8U;B7@xm)leVS(5rm|ENJP~L z7jaW;{m}>hI=T(@Bi#>^xnDPjMt1sz%D39$E~j}_(xN`rwBQRiU9U+k?1PgAh}rh4 zI$-{3`^gtO;d7fdCx)hvOAGaNgagF177mm96w5VP!b<$6A51OWcb*6lR$Ooe>(8jg z{2e-%I2u!X ze16@Qy`Q>ICA1JcpUhT4#rx8;CzOWPfBDm43_8EZu(-cIf(UVi^*%?k(o^I@7rsv8 zpV$czFRaHDE>G*PmSxG_u6D!9AoKH_8Wc2Cqjry1g>TNXF2%<9?+m0xnxIj;Q+SKP zn0Sab`^t_E=c*ZEU3YUC806~&y*Sg%vVXVP={Ju5 zv2yXVuVUk!;jL408IDPJ3rJ1-j)Ut=-smg?)#hE*tWzI+SjurT-KUZ((hD2O`m~)#F)u@Uf#mD?aC6wCTZTiP2 z2NbfNIzAkv>G0X1u`yJgh>J$7eS>H3iO`*iSublm$BfR?UWcYMIcmUsK-{ccSZwjE zPpBq09$oK?o3zq20=uq^_As9f~^ACAS#% zN?*h?Meuc0wFk{d=;JDFsX*rf`pa+O-$$H6-&TzF&0bp)5fWV;wm=;&Mm$*-G*^q3 zMi`e`h~34Jkpf{@3tUh3_aL0tzLriiZGd3lNGLhi3nwyLhs#mZDwT?!k7wtpIR5rV zM2cwx#&yfqP1WFRaxO*tlL1}czuV$c{C&&y4tp8t-yP`cbwvlpvu2O^$m6AtM;p_! zi9Hj-69z6t9|_gH_ofR*Y$LtgIQj_JTy+y|z78}E8pvrXDf5w|-rzMZb|W4*q^8i2TPA-Byu(MYNF}923EEz>_stukgdw0NWalbB zzOfnqeZ5gBC~Lc(;QgShe|w(G)!450`kAK%5uA(vF<}jQ`MIkG>(G<3@Q9UoMM=R?}54QlmV=t5g|pbW02QDkbpfEfg8`*z_0|8 zzqE)6HAmDZ=yTU`f^q)#Mb6$S?_HXVc`l!V6HJ92t0Dij|AJ2HCGIJ&0 zYf?@x#xXiXAB&}GZ0v7JHo;_-l%E^pNqNlJNpleVYAt5d2ttq}MihIff)2pO@8&Rf zDb}ig_yU}5cZ)TujVwIVPG?5FtR)C{^R{x*GukXX3>$l$(YGgcMe(a|@bvpa>&8oo z$e+wVJ9O=J+IB)#@tw6LoX`u6kur8gt1pi$E^$XU$CbkBbeB5Dc)6p}(epP&AgmoA z-OfEq(dnD^{ext)(U3T6ey!Fp;oj*|PhTY}K+}~CgRZ_qv?zC4O#H?b%lA|-?R!cU zWNhE#rb$jO+kC!pi6NMOb4=$UK6h&r$;l0G zN7wtClS&uyrhg8$eN=pJA`DKR9&G-uUmwag`J&J%f<`O-v!jZtWs6nI z0b^;WStTK&mMPPKk@K7jX}clQ&4CU;aQS2A zPOSP-)h{uX<_nA~lfPbA#{;a5>K3G?hT9BWO`>quy@|GL6Js;&kEBTudE$z?8v^T; zu{ZgLE`@AfBr{X?9*$Q&xtvQgla=~R41~$Qt+({yKHJ`y{LboBG2Ai5QQdxVSD48o zXBB2-4HGmcK?5W|p=4rq-*TTmgCXGCYGhs;Z|jee&-qY#Y{a|w;B7lmWICRi*6!@BZRV}JPF~(jJt?7x}5Q)N+Vc{ai9s{=}ZrMJx$IpdpIb33>Essis zeC#JI(9i2J3H}Z>Y;Dw}`+FwH&DL!Gj21^Hq3Jk>;>cr4$5k`YX?RQCMh)F;Oh5Yg zuXI-C9nm?f{pk4>nMa>IXU2a0^-=zi4USb`wCf0eldEt#6cw+MJ3zH<>9tL-lwKCO2Ne}Y6~A|r(CQkaiW(-tzt5c`d9@4Iu|ULLmIdZOido02-+PAfN#=x`X5 zvDrIM=6GO=m_w`f)A2`rOK|XFu<}*&d0nVq#X2%$7Os7{Zh91^TWs6<)V{>~y-R0} zqu|g*;6>EXHWYgEOAqDMOs3LmyUAgH(2ebm;{M22fEA` z2`^6eaVpPRPAT{_KWzs{93&F;Z6b@@-3e4IgmjrW8CD6FKdsLaCA_F~a2Rp7S@-tm zqG26`OMGp$dbtAFK>D^+`eM5?sBb^-5NcunnoZ>SbB4jTd-|f*XYtJF9+I_bj|l>L z{Kj!g)9u2t6J`NL@rKQtvB(}hRk@N`1qBy*DsSJn#v>3;-@l&=%dGmjno8<)AxR4K z3JZLd#+WV8lkH-E}T`y0OtHm49ksf$F0>X=d zPm*GvecWH=jgjjm=uR(`YQ z&fNEbAtzpfp208bc#BdUwS&kF;`_*7%9}}D;OdU%trKd2^bXhFWau%1-^bBW!2{om z+o=k?F2?q9$yfF$cEG>f_ETik+xPUZ;TtNnd2|@;-XlLZO`ro6*Ztw=Y*DN4(Q(ZA zast+_O$aGcB#k>&6XF^j`7=`>pc^tJNX3$WiJd^%blW@Wk}bZmc3pIfg>tFe@A`aY zUwWO+2B8_;YvPU8%?^7QMJtCe$0g>`)LY`Y zi?(Ck*|wK2o`09Jj@_Qx8!@u_9@ZJ}uH`__7exid&S^oEkVMd0^b^T|qMB|-4U%(w zFHhB2C!CL~NWFPZNz}w0FRYX{StscKttk4}sBYfK7|NS{(T9T>Wp~hX@k|+ZgX$C5 zbQW!RW0xv!V?)6amve_E^NJ8}%NL)ggLZAk6o$@k%X5O0H>T#P*G>tLh{n@~30Yh^^MRV=iDS`lRlbzyhd8jVJES9DWkNEQW1 z9DaVfH^`3&AGSbSUl@&`8b=O`VA;Hfnr+}bP%aL==XtOF(O^ES_9}PYT&A`;RnV9I zV*I4V|bsycj1B!uz^WdYy9>Qm2sFnAie3mKZ%OYpruaKRs)U;j1|Ovch+wX)~LSf%b+c zZ!sz&mCG10E@(-meph`iX@QizH7v)RP!~r~^HF1qsR6U4&um5bwRL0Fa@j&D@48@v zJ2t5O5;YaNE?=3K_$2kV@ddR2M?mqbym7Q<$A!~{-`)kWjgoT8uuQG}_ogOL`i1E7 z{Uo+;wVr%4lTigDh68r)pMTgD8l<_77U&Edb+FvW{}|wz_Lil(#m+!%(#81$iC{~< zpb_aB4Z$}HSa)!We_E{BVr{of9H~T;KFeL<`=d!QJO5qZiUG|veIhdT`o(c}RNEc1 zX2tKFdSW%}1_j&<9P+u5Y|zTCPscfnpDHB=@0H>$r(F?pX`L7Dmkw>R0tT+*-9_%P zj-+Nk1|1p>-+1+fKO zuE*V&lF=gtrTnnG#|Vd}E($7TD`{`16Kid5cf3va`aM{EjK_Rxe=pBao$jr2#y75z z67n%2ZfeAtdpd%oW&mIvTAD8V6MXw)!_BB>8Cy_KFy@a@H|EzS=g~}*rr!gmAW_=w zo&{JD>B%23*OL)4BkP)S*m9KqNA6EUswC#UA|utGDpdX+xjjffI(mWe7*;nx1oCQ5 zWMTU7r1?PDVP=BT?j|;i51bFV|3mp{E_iDtTKT8I{;M9DhR*pbIw`65?C7sEKgXOk zKr8OuO^@GstY%1eP|cGbP{7VPoI0TTM)Xy#=DVroqnE9+oh&Z^Y|?gmL4m z#@PV3<%!fD`C?>97UjrEiW#TZGhQH_0K1V7qAn+Xo5hFq-IcrkqF7hoV&bx4go_-p zvvZGvGvZbrQ>5!*WQ!!;z@Z*{+P~>a-@#;gdRe`J{NTMySi^<=LV(P(Krn~G1;cfn;6=eZd=b%F+E4?H-3#TD$%ha!Ip9#=Hj(Nv-}Mp5WO)j1g7!Es&Xqx9YU zdgJbJ$E<(OO{c>j#H1)NNnhgiWtAaZgidDD%TGXguOq_VLx$?-Pv!kLbPY)KM$^?6 z#1!y^%9$PgkU#I|@$GMFR~dfxD=r&Gl)Q8sZC7#gVsNI9?$fM0I1|Nt$q4ok;lHb!U?#h^GJgPK4$2z~nbmm=!Q!hp&oVW1-2gNwzE{ zIbGr@(f!Sh{UR?6>>|LgsECq|s32KKIeg6aVdGS1W{}NoYAZQMAv;zk(NP)Ym26P- zsiUNpeya$_%7^>6NakZ3hf~)MQYP0MiA*~@+*f_JwB?3h?Jyc`!kSm31xsmCqh_#S z*v)FPMriz}zmf6OF@&jrP&;`A`(xNt#&ryX`(2RGOj%NncbETQGj~hZaNjBhEPg2t ztjs^5>>UlOnRHy>E8NwUiJgk>GNos^AYdkL?G)Y9ms`m7irlP27jT~6?qG13)AwM3 zQ__;TyaBH~)rRw|@s4cbhg%N3rjOCpysmx+FWwWn7@ct`(|5&w{Qf3>^$-7Lm$cvu zjc-a^&(VsbK2}c!=10GpVP-dU`SSsaU~tl{{I?Ix9r#>lIU7F&KXH$K55QFa8RUuy zA6gFq0u~7X literal 0 HcmV?d00001 diff --git a/docs/dli/umn/en-us_image_0000001309807469.png b/docs/dli/umn/en-us_image_0000001309807469.png new file mode 100644 index 0000000000000000000000000000000000000000..562796e4d2870fa62b6ed8b902efbe9e003b3af4 GIT binary patch literal 17950 zcmdUXXIPV6v}F(x3yO^{;HM}eMLJ4Xq=|s^-ULCqbm=IH1pxu2NQo%DN)5e;h*E;| z5}G0e2oNL`Ap~Z>d~@%QnZGl4=JGr|3CUZ|IqxoOueA>^?r10;!F4=DtubEY(MduY>`g>KIvM1^geG=_1y!DvYz$@7mf}b-z?-$Gd?silA$mX z^UFEurJjhE$$9+?f3&YeDm~9^x(Gq?0{-;#8?%#Lfq78DtuBBe{xR|JV(eC7m zpDc&8s0e-~QRMq(^2Vkq)@=-a_Q4QCu6qYm9Dz3|OPjL+$m{a{pVaUY!hI0tgF;DP z|BJj#9{Nm+ylNhoMP430UZ#SV##2w|kv~)a>rV}x$%r@av(E~=IM+~JeWxP);4@#+ z%>9;^E3cf3avf4Mq!vZ&3rIa`S8{V5%=Fh&c)Yy+N%0=BI~}1^QnK50{Ge$+ftQ;Z`~_^x;2r}zs-o5^Fh zg|&jXF-wl?OVV(VW6Mi-jc&P&rS(vxvCbnKc5%hImzgpA zc%cV`@hR!`9Km4D7)rgaUO{iIugA%H2|J(5crxv=Gb;AGSYqq2dKRYe<&11fYo^>Z zaq({<%(1%qwXyNIY7TESMI>WICZmVa;O2rUB7WPHI(g8P0dtg0^s{J~!cJ{G zliN<~>e~Oxa114=Vjl44>C)(^PK`ytADY1pJT_%4kkr{;fF3M3;*Fn49btd5u($+2 zkvG@>{rhnL#j#Wmww9LgY7Y-h^-G~uNAcog71o71rnQ$`5|dobIWW#1CWHYR5&a$A z$h6WkGhHs|?>j6VD{EiPQ{nq@8K0$v^m{^b=(=re$+Zn5II-|H)_Q1cEM>WrB;^^` z>nxXc$JxhaDW@*T95X?K_qo60bj8E%&xgvXyG!k+t$alkF$t_SeiTExnm`tcYS=8T zNe0kc3!-6b{ouHBMz+=5B0==7TK2zH%ZXCp&0A-i<1pG1lX4|5V^xKB;acp3t ziqc<{;C;U%oHI0`6dC-}@Tz2^?j)w;8rX1I{LTW?k+~=IWZ^n9t_wAWw=vr;$xP^Yt7FCS-8o>=eUU%xXq*T^wnlm%$^pzTm6T|*mH9q z`|WHpEyP>Y{}@q{ zba!{p-<7E8PPq4pw7Jc~(~xv$*6QgEdbO=|lNTliaigQXzkH|%BD8nf}<4$>bFH@>I`AsVB!{>(x3{1498*WWBO!Fx|J2ruGGsGKe?NW7d*6ncy ztK~&z(VzHp@KO>{b)=eW#MQB9<# z-APAoJZ4}t>|RI0qPYTlGVtLRwcorpbu?wl2Llsvp4M>_M&m~WD(_TPWo3*7e)H|< zoVvT4>PKdC6Y*IZed6*^`BUi&xIPkjPCCd1e+VU$uz0xfeQB4so&i(OONQyoyu490hWDMF-=?QC8G=)O z5-{&{J2azMiE}bew2dn&+GzOO5^g#{bltS@n`@~&@(Yg+z8?IynR6?-rz|5w(WIjG zm*jdAi*5`LuSp?1pVq@sGu70ICwPxn-hwaB<#4CxnB1gwC^aTE;@NN=?m1$-jzvRJPh;BzXU=v+CC&44 z$56?Q(Ewgi6&a>o+e?~HeaD56CDhd`$BKmvB?c?ISl^C$s*I{TAAnlcb#s%48Qsy- zyOWjb=kVFA1%{qB7>D8sk{p6CJxj1?Fz%Auz0otRQ+2%k&7(IH&UM5pwYBoGXCBc z|9ypx3qxqK#lxlt}G{Gd|eh^7zAdpDS2SdTzr^&?YxC% zsU7Z84Yc@T45s66KoDHd*2_(3*p9!{r>TUH=WnE@rglRj$y4EG$;!20?_2AXRsD*l z%0_klAUgwbJke4ZYwbcjJUr2GA`31L7mr`@Q0`ciH1R!l=C2N#aFNW8={hXovs9ao zW|KennKmSUsPa#cDq31>7*EQpy!xJjx9wDBG{3;HZ+Mc4HOB%msq0YH&;HSmv}Zc&;*=Zl9`#=H&(n`^U(=?@YzGXB_kWIcIE*iHJ{Q?K5?o2 zQt5_Sn%*dy?y{%18kD)BGVAIGS@v=&1rH|1;GHEzIv~QEk4tDLUwCp-1 z(OPPL{)Yb5;@!gZ5q3T^m(~sci5f+S&T2JMAM3(ne5E^Sl`>Z(eto@$=$Yd9h?gWve7m}8GWEnEAIY!`|4C*?2?(L|Fw|C075r0?I22Rv<-n7vOe%IDl zG_$ha_1kp9d7YO`4(w$7?Hv%K?&@gKB^y}aVXTL7P7>i-^9mS?j0zmMh;go@1Px`a ziv?$C%Zz^yGUyzxULX$J*|_7LdFWT@wiBi6vBcpWiC8CNQ&Iv>E*8h97f6YyblW*P zG^jc)BDp&Q*)>JbKLMBT@O_>58zPKIG}Ww|sSo^AP#*_K7GljgR)}D;|7xiA3hRwP zL!}Sg7>sSr!PLz*-)pK9dAWxHhoxN9NKPLe<#~6Fbi}w~K}kLN>i6c8pLtuoVBB^! z$hglR{dwNp1n2)!Daor##%C_S6gvB3zK^LB&gZ*&PzyP)6!%up&^c;OZo1b~D&T5rcaD0ke zoSpqa%jsz-XJFv^W7rOz5^=Zrt00~8(1WZs4?*U>Ts?-%{AQVtA3x52n^)qlKCqs` zk5n3hzu0913_Clu7Dvj}{5O`g)YQU{aX9HYI_7claC0lia^nqmDcg8EhqgDSjuav5 zulIY>Ukr3jwX(5FdFSKN)Hys?t{9xXlfQ_=*og(ay194N4+|3$AI55Y9J^9w{3vUw zyA|biSu7&9vmGM4s=I%q?B;va8a^O)D}7D*qq0YpC!a9g`Jd%mTYgV(ln-=0eM%~~ z`5t2)e`r6WBOd$dZgE0?wYFBV@RFU5>UV9HFst&1`$MlkZI(KHxWC0&>bRlPV)MA9 zj&_M|v0=wZYE^1VT~eYIbM@A|qqg+9@yVh?DAbwK>gwv%EkX^$G2)L3XIz;VfpPkG za&B#{bZ7E4op46843VznG$=}(m>4)--aPlRZis=owy>6L{Fy z*QfH5^L)vJpEqEEbaiw-UQmn6&dgLQHoTo@U|`&S`t<4h#PK?q(l^85122)nGpu}I zKwnJMN#EK!)O&I3qish*+Hfg;)?}jkx?PHwxb>B=#9mEwF6ZuF<&jdmD0dGJug{0L zx%fn2(4C1Q+|$lq?-p0`I4|-u#pK)H&(cJ5V9d?UQ$PP?hn4i(81pF@Ob(E>P%@EA zo*t?a;X`*ZZM>Tx##=EvxVpx3)ot3A(lx6jG&ydq0do^m}d4hrl-RH?ys+jrJb-?OR7(Ff}%b@p!?0XV zSU728&~j?i)1e->YrmYrQlT1OGlaKz!u;pjE$VMVQ5%ykMXXiEVTgSNnDNYO9y7nkfJ!M)V% zGEye3_)$yoTy*8&SX+|%p5LW=dU?%N;Zyt927?utOa(Pv>xc6>cOMo23Sk%vAnle3 zoPM-(`O>A=U3eXs?z!maVwKXTb0ZvoW)~G12r|D}{f=6F@KAYF7*_FAMew*UUUkdn zePy4>x4H&K0n=X}g*_h>!{5Z?3H~isOWzj@ zh6Ntm)w`R|cWLG1&fD8F=L>O${xhsV6a8Ex2B;Vh+riv5LU3VZB)#Wi`QTh(&5~8- zg5i~eFoI}e|X~8D$ z^tvlP3+GzY2ezBMc$aFK8KH#ea+NuJ4fXU0;QXGu6na4uhRADJ{L{175Ne3|vs5LY%=%G$7zrGGDVyP*!md?m`sjt#)YPCnLXf}+kUVRz$IwTHU z?uRQxh>6`De#FCb^CXtJ+);HOreUP3?dPMU^REQ`xsl@bmBnu+5r2>awRN){hN;rN zL*X|r&%NR7?CiJPbX*6&PlbgaOyTUkE49o16q4MPzJ*~AU5kbnlT>H<5yTN9>*eKT zPuW8AoX+hx^BF7H-ZmN*I2mH~90^5!h`;ozOXQZ4&Qq3rH0^(VQQiCYrHcVvHOH)G zCtK|be;GK)G-@74=K>8B7FZO^Gz@b%O`~Sd2M_97hH2YWX`h> zQOu&qg_J7A9543|3}imzIBegMAT%s+EW{dl{2?b6mTHJSN3Lv}N6GE%Z=!B|50xHD z@P_lqYUNTD-eqs{qm^#R87D$uX&Mhg%F++qC1YkK8hmeGyLN5wt6zWj0K~(*s%yD~ zB8w0H#DZ=Xy8r>0qTpl19+fVXAG6R-`+J3 zN}2i=$yRPF1umCQ&+lZHTHuC0S%ot4Ump1KBKG7s`IA#Y3g0L_PTI)m@y?7?aChd*h+ z*zXN=1^asY-)j0m>b6_mnU=`ZWkjjaB8K+QWM!;eK`e(+lDkjVNAj`RP(|LD11~Bn zHp+s7!&gXlsxdnwtM6q08XL9ovjz`uzm%6`Sv$tF-Y$ePUe?=O+m?5Bj!tYy;bw75x3kQ4|bnl zhj{oWC_kf9@u6q@ENFKeF+pWnxWngj{?-hOc&+p{sQ3n>esCgQ9hR+yCOi3wSSp(I zxhW=Id}H9ag5Hf858^OUUPcMd?p@ky4jZx-A8*n7!ha*Hc00Q(P=6{oxCrqFxfbe> zvb{C>)ssTStJkWT`aYHuId_r;nM-qH7SS}Pf%ht}HBKsMWq-sjrH*y2s>{|X3H5_% z?}(;ZEy=m)a#pdmJ}TGw%2S#@zH4qLPd@r50~X7Fy*3Rq5hR6EY&v@mv7p;$*u>m^rC%5LN3hmw zXXygO0O1_MeLvnvj*M9J_+>4aI`-+rj@Q8`LmLnE?${0$F}>~Xr7^pX3AE2`bZ+`TZFChy%QTJx+Rx4*#tOF6^__xcSX(yMP zIma{css2g^=Du%p^Mcbvuzvxfdv+wje31It8CNko+g<3yM9aLgW8rnFZt@@1V8Xz} z_%3Pk^g)z1q=+-(F57R{mz)P?ySIH&%zl!bfl9#wCb%N6-CEBGrA>|AECET0<_y+r z5U$2r#WwVn_p$?Azn$FHd)fi)AS>La1kaG*o+#W^0%HwTJ%gYO&q_R|* zhZqw0>2KU>t6xHOWUh60Cu_snAnWwUr2DxeBjdeDdhC+fNM2v`SJTM~(ucLugVnjH znVR1fyhCdHK9~5EPr~PAIR{+%rl}PbN|0{5d``@38&^P~=insc(l*wb2HQk~2o&mf zVvgxZmUtOn)b~NLrFv}u=Q&_x=Mj3C8I?IggR*>1cI*tjvq^7rFryZ<;jh1p)?G8-Z(h)_??m36TkBMU(+T@l& zf-5b3MlE)yD>+@&wU0ow?Yixo5=jWoZV;~G$p@4;c?x%5<@vUw?VBW`8ygOFHhvi+auXJ=thI zZI)l0*ATn1E=dS3YVHW3u{@64>sv+P$N9g5FIOM8{ywzT;4_}@&l#*MF|jQdDu?8M1~axDHxi7H`2YRT%zFGhcEUB#Rvs(d}y;wt5+8If_Y2(3Y;5TT8=g z9d;s0)$u_7QlS8QL`8ZTA1dA2b5AGrOMcKR7L!ijK2TjBJo%XziQFUv$Lq);!A3W|k)meO$-57p! znjPhbRP~kzSv{uXAxW@#B6H{^xOOM-Hss@68`Ocjt){HEQ91iBI8O{r&@vjz?Ltoc z@x5!bU`k_WRX+SEs_rhrw<+{x)qAGyDBHwBj=0VyuIo5hC^z3ktH3JbH`%|DMWj56 zT#|Y()IusKzK9YW`ZvoUxkEjP!{Cx`-o*BdPz1Y__<5Yn(w9@BJJS{yBKfI8uJ7T~ zgGZkE%s!K=RR>7>{G!HfiqTn~e;%TinXQo||ACcNoPe9CzwR^Ug;`k%0BDJ#V@7~A z-Sq~_qU-7u=dQ}e#BK;s1JR4Wpvc%uUw4Bx+x-zuXoS}14%LfC$UN_amkQ6JoeM0 z&NJ?z6<^=P`mKI7ubD(UhxRyU zk43nkxniiC5M|u3gFpPt=YIExyj6hc-$M6MomE6`uf06#(KVmyzoacX zlJ6QwIPjnR6b-8RFyi=JNV>A`ovPj#ukrf3Y{hT3#f~`r{k?LB6-B3pT;~kJ;oQmA zkCItuoBsWK7Rpw~oo$+70Y(&^KXL$_>WpI7G%c7Q9O>okfijMoL5G zZu7u3J(;c9=8>{FH{c0+VlGVdU5F2OVU&lmJpRCzCPY?LOf09`uWsL?mdlqf$8gc? zYXOQZV`XK9Fa8+i#4+L>U_HRn9s9Do5ASCbab!R}gwo3L%Vi#;%gutiMpYLP$V33b z6>2|C>0;F`S#thG^>{H7Gj|X8xLKfGk-AGG z-po5Oj=x4rC-b*$%|n#nIV2jlK0)9%ly86&^or`%bB&FSp{6$t{1YBOODk8Y9%f}z z^BnkCIJhzG!`+Mz-Pv5gX_*DCiSZQJMIIFrx@hx2OhS2WKCj3DS5e@%)^FhT`F2akJ+I4Axyj!hJ)hGxT4vFgT z>$}7lj)YV~`Qyj?;1q+1h@*9dIN&IulGk5;oeoqbu#0!NPDW1Fk-AY;WRt!FlCljsOg2Ux{FU7}=#1I2LXiYzD}t z3Lir?AOA_+_}Xo2P6xQ8T^0ZJ1uYksLXE3GpFFu|+m)IJL>gV&&O|_|QJ59i{-<-j zSsSdf@7}#T9p0jAn1HVyuOZe;efZppa5j|HEJB&IVvdSL7$qRTY zq!|Mtd#H$0()0dVMOOWFqH8y)OD>B--l(aUbNj^=!6KqWnqjyEwE8J-jdP|@&ja}= zXj8khHlb@YynN=omT~oM8X9m5C4Ke8n7sMQbvoFa4lFQc6mWBVI_l z!2l&ca|XQ#VeF2OvvDd|%eiQkQ34L*O7xNud9VQF`;Rv9(c3AKdYEy)qzb3uOps{* z<(2nC)P>NoTnxjrh$(ZM3LJ1;TU!I~U#%*jeGexi!I$%2?oB&SU+>cKOL1KAnBK+-5Im~KfNlOJ_^Q&46T%!Yr3dOpA%`%5 zho2t^JL+?>c4mWIU>7iSt<}AfUq@H>v_!uE3iVtW!wa<9e|M4R|6Y^V-BnBHtH&=z zlB<9ajpH*CHmo7t3=FKhk!D}@Y~rJdEhq0OoxKxq zyUzzDXei7f+j029al`x%AI=fh-o0Z5Po^WBa_3`g_dbELG<0-u(DqvMgD3g)hSvZg zTIJ@23u@22P%mzt8y47yqVr`r$_Y{k*hyst9UcA$MC3{pt`GC`YlZyOK@Gu%wDc%> z2VquO;A?e3ikKEq4HTSW-%ri_!2(CFxGdbn?${(h~amg)j2D zU`l;ML&q`fcssYHW_4Hm`0Zqql(ko-w`c6!2js9*R&g!n=wSjEIVh%|bdhXVM(i@$ zrDm=dW|W;L5H6@fzedol_9X7}WYvOno1A@*emyy52FXY-I^g8tms9T+c`xhdR{I5- zX4n+sba>YInCK1^9B!;~a=nozN!U-Q{c(lI$btX-TJm#)f?O4Qr{_54lcdr_?jcJ- zW*^hhwi9stt!DER32(L3)gwS~BK$~GaAVNTh1dWbPfMW0_jPMwUV+l&^)^g)_BXLv z5Q;9Hd?A>5;OYSoit^cR1ghet8N*xpMn|1e(6Ev)o;tXY37(S_rWqI*__-R$>U^(G z(i=+hyO)qu(9lyL7}d%?yc4BF^= z+WYtN5JzGV2HvQh&+tJ`dg{w&ES}VolHS(devdg!+%-t@ZB&02Xom!B;`h;sO!cHK z(}`hin;>uU_u!wOpSltMRhOQdbmcdK1aAY2sFaUJ;)bsRbUQ_?kSl|ZC$`~i%br66)7qz z3ezccc6J^jjC4&v=muLtB$2OQA0!lqwZehdAm1rnujrbE$@a`+}w1$MckX(jP`(F(98<&y-q)L>Y_>vhu>^d zS$pgu&;*L=w{jgEIeRu5g2S;=MAHI`T@gJ4*KfCcQL8E&(s6u0Dhr6xS87rkI@iVn zH^x>EFMWXg0cZO9{_)bvC98NZv45-T_r2~ajH&V=j+?i?c~y5C0u0O)pPEFh&flyr zjTFaN4NQl1_fF?GJUZC;Blgd5q4uJ1Y<`K#bfKZ7aGi`qu_2$8!_P+xQmhddhs`xx zw9U;6!GZM~?qXz1vXb3zeQoPas;_tSw=l1P$OPzwUD9)QYU!X18Y9=s>993%QDFguYgX!j_=zt!=HX?0OZt{ko-!sui4`3=hoG^ z-hRMEkdCuF_o?YqwcIVfUPQOxQ!ngR;#KpP^SSPUbMMF^VZ>;O;+Pst{c66}6m z-Cz(;nes_~`SRtjt(`(+y=cMrWep2x42)Gd%A1GLaqwrevL}FT1SI^KIvT;|6Z18K zEvwjClX+Q|s#0yBWGqP+YaQCea{Qdonr`)z2C+Y(ca7|f>-_7wSsxR21-UmYnJAC+XnJ=mL@KXn}boA|{xUBo4 z?;iYQeQ^_fHb%U6{h7(`ID8~H+wFT!qkPzRq^y=I;^Tw1c2Z8&wULd-oV-`AU=G+S zfm&(nVW)^680#FSX1u?NVQUM@p-8;D4R+;VN8RA|UiR)LEP z{gNErc5N2NGe#PcAF|^S6_eOlXs&m~E0!5qta+Wvg75+wWUp+|h1eG!d0lw(E-u2- zOn%MN!&9q zd*e@&J!QN}PmeHFW3Ot1APsSIdV_pw+|kuFD(}^iBlx`H;_G2nUph~oicr!suI?}< zj(U(zT^7Q)P`MzFG?45KnDw2Y)#LK|YyROZN_mhr-sor)x7kMO7?s$(5L0+(q*O{Q zHIZAq%_iel1`d&%qmwoNAzUD?@01GC%XXQczZ*KeW4bAe(&VLAE<{5uJqBIq@J8@1 z)IbhE1svXAi1jWOQ~`6AO|8n-hiK{MQ&uh6PzihkLon)C?S zb}&O9g8`HI!?c|#xdGwLgMuoN1Zb|tt}3OFqSThu^RRZ-=_?(FJY?|61T5ZtXmG4b zWRxHxlK#c_TW4%$>FB!f#=2=_{kx&g9Hv-){27uJv51h&H0C_+ffupIpYJ2#EA=L&JSDv48geLI<{^{SNV zWBVCRymCp`I9;sQxBMcXkv6%pTOC!qy44ppY=Ji??3x&_tHl_b&}Y}lWL8utAa+b% zugkbE3eQcNwo>i^! zrXV!N8-(KK2wW|`KIbPEGk;*7CP7#hkpmkaNO;UJi6Xy#tCz&YU2I2bU;ks|iv`-N z@qwU2U$zqy)xYEsblCk$So8*yASctZgNlD|W%D*Fqz_^F;Nbt*A@S|uU!-RNT94%4 z5BbeT8#n-hfL?-=FCJD~pycN}BtHPo$|6VHJZF@XghHXe%_c-;W?lq?FQmVGL-VQB z8?;mpdL@y*`j-^#>z7b3^T=4baveSp3aa|CVSHq|u`K49!mTBGq5Xm@HlhJ%Bt2DP z5{}?n?zl$Gxo&oFFD#9+WNK6W;3mEJrP4Rf0WjL{t;^73^UoMXmq`&^4=*T@u;9FT5Y{UHNfQaq(<8Vr$zLlL25nx6s^` z>?G6KyFN8Dpx0z{5ik62+tZKv{RbnruXp40y5Cb0V6KQw8FL!vb5G3e@(b(0RL)U81`nvhLe2h8(uha2b;Rttn}D}N7p*-sez4Nk@3FUox+uG zudwgI3;Tk7lgw|5mOWGZ?8&At2LAv1<58Z7++Rl(ez&}59-yfC# z4rZ6vBRCk*a<{P6t3UU!W1_reF|{aXzq7#cQc*bA?O+E^-73Dv~Ir!f~t! zRNw_kTE4d z!o*3?oln!am36G6y*(U3WTp%Nt~5lYoexvbKAK(e&M?6c`~adfny>xB37Q^Y1J{5n0$;4$Hc>YfHie zn#}oaPRBzH(C^~|lQfH>L9HTG7obXbNWZX@nau&;LUFo}U8+J0IMKwlNTliM@DaQT z-WXbmz#9vpfbmiV-~k`WS|}MeAC=0Ol$zMt!5D;MUR^#Bq4YK$ul8{4%$s^Zxs`*^1~*Q0)<|NU zNUia(GX!Sp2@AR_Uho07s*r1XdNU@{T8J1|^_`$xQ(`jdj*B$nD4lLHav@c|3*A*m z0PSd3m=)dqR~pvV3Zw74VMTy&GXB0#UU-HXg<%ti(x|huyK-<8OUZHRy~gtLnoFk< zF#&H*eu^r>(iOAs_V6zL>H){r_nGZ|d^ z*&FA>SJCXufKs_Uyy>(3cV~i2bvoGvf(fTYX45L%-PlX-GOjdANT`PU!jZG)s^!Pd zI0ZFWK!+!Si&eNj|CP7gPvQzF!(g~;XZrKGsS`EFgY3T5nphL@4(3nnbi`-WC1~H( zup5x5%oWy*E~3IxZK#^_a_q(q*k^qn|2*2)C(4EP6@8PMbJtbWUcZWScG#fgs{kdU%S66e3-5*9UHslID>EIZEc@^yU52PX~wg$;74uyb1A`zp2r zsnjaZhFiWn*BX`xROC4II@687cAYaDrNc+t^P~Kx^y5V(B-#b(7*3xyyl(UP$)N%A zaH{du6v-b)Y*XeI4$xWJu&ge-LAiRNqw(|}GFf-DtXTan58;)o$x`qix&)tzP8O-# zNQd3!45$=M)xwO-J=knX4OQmQE9&TzWz$h3?NT65xp&OFjPscaSIn}nh>>jRipKic z?>0a@H>yMCfG`X}s?{t%2|u~h5ouE)Exr09bife+zQ+UKOS)`6*xaHbANC~6qE>K4 zgch>(6;*f^IGTiWdS1t(A|WCHmXO1&vWfHo)(IDIG=3AG9V^@pYUn5Sj}tx5s`t+L zrQIpeOFYep#cain051eYIV;8$gMXGl7-@YlRAUIqDGwr2zr>K>tFqRBg!ZJ~{q&w~ zO+YfKCkS2#bY0Np3}w1gCG_^FWduMRDx_vsx4kr6yEzv`qlP-Kd2gDPCba2^9b<-6 zN?uCoxDZ{lSM?b50s=wxC4&4wYL_J}&AxXfX29B(@XYtv+c(SGne}ht<6GbQo<&c* zabUm7Jna#g!Qx=ngR*Je+lx6Ya8rDk^V65BG%7u?sZohoOUioXLQ1X?sjm8mxCgJ~ zXAwn!A+{q7ldAvA9co_F~-{*?IRn*lUAR$0a|1^L zRo#Yp5?r;w3*-;UfWM#G+T!x^^QBD`X;C(JtbXn@^H7&pj^gNlYha!M#h-1BO5-O` zbGKf{#(v!EmZwI&rV+92xD!Y@m7BAvNx?Ok<*lzLU?mAop!D5e`JK9D_3O(Er1eKb z(muI-ywZ)^9y%NAoLHC>5fKwrmmukt>(WrC9fQ=@GXCRtL|OYmJTCybEcai21{qno zhH@+I{Os&|u`>^l#yC{^eYgyw@Igj~jlqI^LP)XkzwfI4kNq&r4Hk4sD5RWyhvK$;jtIQ$5mQ z4J7j+BF)}@gnabT<{uz)_!&tfU02;JROxAeNcQ@i`Sbydf+7b<4#J%i6aRTcpE`IN zs`REc3h?oOn)ACj4)?3Z0Qu@?bo)Q9UI8`SYsQ+#b2{(l2%l%BGj2L(lweq5 zqx#v=z#ZjVihp7Mup$5VU<{e2&@89F@T6?;=FE(#L@=S#(!91N(WV zpVZ4Luo1;H{DuYh@Bd`MISryg#?b*pwC~YfLiue^3hP3lHTG~-(UP(4Mj^mTx3STG z)m7N@rQLE@NLAMb6?T9m?V6hPKetEx4kFoB7Nc89B)bc>oCx7jYgPVIO&dxzxKHv& zt27+L>oA%l0(wvk09U8U?31M{UOtLuX(WFrMDcq1@@$BQD@fn;r<$$7!|v)L)gV$&Zhg6E%^XG`^k!*lJ}jt{iPL8}kPCue z?r2ZJ8&F*`PfK$d{nIXR<;snIQ8sEY38*mhE{xP(?=>Ji1kDMU|9roUGEbvh z{3Et9chM*tsA-^lvZ~XEb%VJXIr<8^zDjX#<57*j7UxfU_QX$Wl~AJ>L&OyW{X*5* zm(yUOMNqeI89=ijSY-G)df3D?bl`36p>Z4RhkH9FpC+{jcRr&8lNlHo0t32n zy)vF~Cq!o~^{UOzhAr)Th`8I%hre0ym$7Ka>r)5$D$GSDBx=6-c$0>-lUlkoGO{v;Rd z@Nt8p!oIH&8%NvyP6k5r_3Ly07B|SArF~EPdTCT+pk7b#lu>o-Y@Jc8O}WPYQ(p1h z38?l$S0Ff9*{s2{A8|cHYMyG`|M?$+xj)%9%F1hO^FPpoH#5%!-HL-Dp`9$9@*KlvGn0bzV z)>O=lynsq?qho(|jQyqmSWszOCusS+JjF+}E{{cqv63=1C8_2O=h;L0j>FA?TIE{g zIYJ2vMsr)^sW(Oiey4<@+scVSW%5+LXL5 z1^Zy6G!66$V&8246KWej4x35_{IGE*B&(;1%u+-DhX2r&-)q2huiKmhTe9`-7^hla zhAk|1zaNz(O2^G|A`N@;2XyrdwFH@8IkK;B2Goro|3jw0_7q$76TE93`u7@0=+=L*Rhu`!ouQie{Rsgy_d!sUXzUp=v_^J0FrYb3Wp4wX*elND#NTE?0?{ z4&0^6z)|)!ze7g2eF-9@O)or6Iih38ia#MJBO_z$eAd72aIi!ou%*(&waVvK-F*Ti z={x&<%m~2~(i5TT24&k=GDPj@cLe9{GCccW>j>P%IIU0FcM2?5@Y2%CQZhH(N~rok zMXS$a>(P;jldbmP~yt=c>z8)2gx{ZV|_#42CuSfkJ(%vr1OaObhzXC zG(uRfZl~04Y&?cTO;u;FZhS2u@~;zk7fzR4mdv^v(rHt8YSO~0utXC+&AjiWuv1sL zd5W>hd{DntEct%xO-`;)Cf_IROg$Wv*mKTe{kh7A;`krC+7qfeRa8e z=zCP_N`PMG_(Fxhf1s8gEs`+NWPQ@sjdl4(*!%sa%J2)D3zy*(e!>KyAK%#t{EX2w zE*$4gB*6ODSy=^q?Fyh&zj-v3(kk}=njS$Acko-6{a_0xpPp?0SErSDB2A2nC}y7ew( zzC7r9_J{r`rrCFPe3m!Q*sU{7TwE_`*iGk8PD?y4c9)bIKW~l5bI}laMXhQr_P>Bh z?rjz#$dShSmpjgN^Lg&otJoek+b`emzR|laQ1+*9s=3%>vn3)xUk( zXEU8@Q+&pruc|+uR67x590gO=drK^bo9mMmq2;&S35MxII_%M>w}f({64>Cb;kbOy z<+Q|%ufB6f$;jCq49i0c3Kdqj;|mY2Ik6(yk8bfkq*yy2PaAeyv^FiPVD~n2(*65l z<{wimAK<1MVK8<)KTNV%3zUDfW=sfp28%$v?bpt!!g8tH&^27$^N}(2o1@$!Peb#5 z$A|tIqy4QDU>ubpW8T2M$V}u_r>Bo?jHPcMY=6k~h>llJhHE`^N|ZS*KH}Z|UOE~n z@@M;AehZZrAFLD=uQ5l>5^%uKg*224wY`i?tZ}^Z-Gb-@KIH3x+!dF=&i;;xFsvWQ7Y?r(S0yY3(M`N?-55ZL3l^<_3~e?^={ zkQ3{!ETz)F)JU5K8KyPk6zO^O0^$BCcjwPFu;)n}g&_#clJ2MMZ@lq3mZTS^N7PHl zrgx~n;{`0d_Xp0AoDt6EG4i{S);FGsWKKm<)6hMfJ}#JECKB2%FS5rruArpAX%5?U zy0X*aqRYL9HJ?dk-Y%ixGUSx4P8Qi-M{0=Sh;|qeQDY&!HCJa^Ca$WUw)VZ;bVk7u zSRPumo;;fi-%NbsQ=F>?J2JS{%Wgk#=eqpe!iJ0gOD0aiMM?Unk!$^01Z8y5y@W%3 zH51zhpRd4X`M&iG*7@f&sn06J4L4r@K;*b5qRC>#NMR166v~s&8LK47P~$~)w`J>u z81Vu=w6F|=cPewqVEv2Xxg`&MZCPPZ8S=;?>ZH-34B^wIvFt0!>!#y?MgHXE1dmrs zfXaD4i6cY5Jai&;PhbyvFxx7y`M55Z)Tk5o#T~1DzX5*<~ zG*W6|!D7T@X{BGS+|qQ`DB-dhm7i5$p7PR5iwba@#{glUu>D>6)8$ zPJfYPojbvI_@|}))W1f{W={LFjDgs!5CP7h*wLk_AtfNWu=H{maj`q=&j&Xq)$&S@ z(Z&rI5$|PX`BF*ycBfYb^=tL`S&dte5ZstV1ga4qeB11=6O8K{kAFK(UHu+?qR)+) z+kj9ZZh4M#BvVr%)`8+gsBx(S^VYn4b*sG0@x2YkbQ{Hwr>Gg5BM-S4@vICw!}Y{i zt}LGlPH%tznzzGT1Y&XcuFK=$5#u!NI;;Jh3CY8=Oecc)4p_O9awdyR2`h`NI93+# zR=qk>vp|m{nB&vg7rDo#v5e99qcpPjepw!$HBn8FbvC4VectGZq93BG#M=MB37gp8 zy|*}!GrM$=y6+`?kJiO^=danb+YmLdteTwjozs}Crr;Wnkvg5w?q+rD(UNah0S8SA zv5q0p;YiUS!5H7YIA!FfVK*0oJjz$&B)Ku^{~TKv>`hbWbJJ?nmGg}*mb;f`B$2PN zIzG$W?7&MmZ%ZYDTP~cFXr^7^3jLpzkO1G(#sqOv|D)X>6730Hy4N{2!}w^i(=-US znrKwLPc5NadiaVV z;gJ1t(_hFj|0>$O>?a;7xrO%f+&4Lq9mMS%t?QDc$;2+^QDhL=n|}0G`3fXQYjqWP zROkrw;sYaCxoKG#Wz55sZ^o=*`Du}L=f*K)b?o^5#TTUqY=yQoDnHZN1LnLWZ+c{z zm7J}|NZyb!k>gTOkLCDWl#^atk?@l61na>Gs4m!thgoa9o=~=3jHcnxLnfOdUMZ#I zoUNi&C6qb2C)g%3Jt1_dDoeimVPgO-A9BQ}^PzwZ9odzkR#t+oPIFTeTl~X=kGMkf zNLTl+BE53A>zt%yt4*YX>&F9}X7O{3#G_M8n|EDav)B=da}nN{iY9gHi(Ls;(Z24U zza=D%+b`V`O1GH~vFVXvn>=^J5SQM4MC7bI*O?uY!<%_$Zk_Id(#hn~xkKI4EA)H# zF7iYp{s{5M{R|r6EfT~0n;{g`hnZCOXSpq0Fi(GoHn^|EwDOC};}2qHxi(G@zH74Z zb(e^rO=oL+c4t+QRZl_y^75S-W_ybyQvxT6sjCaK;ZrXSuvdJRWdp(|fL)D=Cd z?!{hYWEZ@R72P+vEOJHO-?U=kyZ4gh>Q^r(_H`VKh;YgB)cDddqD z$z}WL!d=A>e=FhN(z0w9Z0IhHO7Jw{!_`x8GzhXaZnK=)m;Ss!GT1RD=#uM-(5CYO zqo_I^&168TnPrv`32CIns+cI%Z!b)sE!hbtO# z)MN?5;xbY_Wob{4ss4EVOG3GoPXB|vZi*Qf<|jD=8{0->iR{378m$`1(DB8`DNqF5VZ(ik8#Ew~^JnR0 z{a-b7R^I}Q?wX=^StY;O%)iw{xl>$6*IHrYm5l#uwQIs;td zA~3~x^S8BqGW@1RBC?-b95knhgauT2v@$+3XyjUu6DH6gEurZ$*c@tEV&4-yAvMT= zW$p$3 zn$TtS_n!|OBrr9ZCbdf-tqxmyVN5d$HFm z#!qrpvw6N=s`>HQakP3vLfyLPaL2UPu}soo6z{X2^Mt*G$#|&MFn-hIbMp3X)5Do7O21d&dTzR~8MU_~liqj-!?Rca_NaI)gBR(g6 z4G)T>5@L6+t@&~{(KZh;(_RfLsU+L0uT^v*9Jy%3UU%d1nT6QUsiuIAIHm5*Sc`Vv zL&q2gb_EIsmQp(wn}n>_rFi4uD{o9O{01U;OE-CX^G2u2J<}qr9GQPS4~+m5d(T6; zKW>C=ofT#GB&h|U z;a$1ipTS9<6dRia$mMU}{-VJpC~R{~+V7iX@(Jz*f81A^rR@I?1YCgHPOf73FPLU0 zzsxu~@H+zQkPC5Qyv8}$PB_7+n2WHtT|AVxd^2XFHz?p&<>oU!jKY%9M6Xh1O3_q? zX*A2ZbJ5*Sql!r*0m*j3%Vfe18aop!tO=%cx2RXs{%N&QX-Q(^RjOB?n(es$igD15`j8$+Gop)Fj>-#-&4wYrE4-LJB zu{bBdw=v@eknY4O+ShO&YD7JXw{k3}BH~d<2cx4?T<0JUfc5IVha`HfV$!nwoV*5o zK8K<1W>a>G@FozgipoD*azDr4uCu}9r4y!LN*-x_m8 z_HAA$^;fIg1)JiBPMhZK?MMk#X|zbUNpm)AKHeEvcMjHG8P_4%FDDYN`!eVFo9T_~ z$%p2wr)ul9nb&*B{B3>?Q|&u{5PKaDgXwNmOp?*lY^h*6x9ByzpR5mP7Fp;pQ#2Dr zu_Rm!&qYICwD!U2<8yso(%n+9!9S%f`Ek`0GYSW*6E_13c zBJjKw9?CW~v)`tE|05Ue5zBb)q^dNno+>F6CrjHW&$yrjMjV4pI?tn(YKIA@JF2{e zm8U^Pi+}M-R$BJhyvX-&?>*GVYVE_YwclCyhZVza z&GV+)m{2l~B(u33TnM62L_@NAUTW)bI6iIu%(DlhVXY=}%u7kIS~4x=wV#@rPw`I_ z|C&9PA0ugZZdiXusxZz*i-s%>4glCfxxa*RCUhtE8L&}Q$IqG9J?FgF=U6SEcFoTD z2!M3!mRlwE;T_zoK=J4lpX;7*c9lNh@V3r%tXRRvO1h0+gdtk~;>je7XQlmfIj~EN zgGs+tAh(pHo1vIL*F&AKC7g?oq;DH9Od+R9+P(Fu9w-6rma&G3me+nOn(?iU9x-up z#j$`s??rnOx^!*gHr4fglJ{I-*#IjbbdiIrXKSU&`ifx(AjHQeY{&q^G`d(qS{KsZ zWIy_J6KgebzNuMo4;ZIFU3x0XsUkiCfggnNn}qTEFT2D}+izddEbsczOqxrlZ%A(Q zyoE;7%Khh>Oz-f^HyUKj;;pv_E*}?j-((7#u74mZnJQ92kiDMA35%FNS>f1V?~&ay ztsZ}?al+JB{Mae_0AiCb!-X9|H{q@zY_)r(HH!*%YH5!E+d7J~Ha2gYvi+D+=k>6^u=IMTU!&moyntjYPdInQHv$3gS`aLTQLFx>6y{+c& zR{Ah7cQYDqpI#~8etWx3$7euvOk-V?UJlXUXq_6;;c zJ_yL&JZJ zk@nj4mQ%u}<@tK*1kv%>pq6kM###<@{6FyCl;Xn1yyE@auR0P8vL~Fs8&JT-T!b}w ztgw7>xn#nVh-iP<{bcWi*M}^>pIghj9}rAbjM*QEvRXTZ>sf8}B}8bRxOdlYMlSi2 zeF|*w}UY9{^?Z2Kk(@+su2!$}~2o2=j0o#D}vd#xE1 zuVYBb0=vbwbc5($ zLV!)5(7!H|oc@=_#?Zr$pHl8$ZE#dnQqnd2s|DHDQSSI!pD4!J*I#Jb zFW#e3Fe7Sjzty`vX|LqzY3U-d;j-`2;i&zQS4r=|1(Fm&0*)04Akl3{+LhR^dhQ8$ z-tK;T)T*?R|j@fZ3@xqeqEsEYyXoO4jzNM?V`No1gsU?dF~W}V0!Kf_a^ z58w=SnQd(vCl6K+11xWUFzx1z#@Q;0lA(7{gu$>~QLHxA(thUe83_Nmmz!t6eo)~;w1ls_j zR!@cvvUidEo|1I#s?VpsxY#0Uh_R#y%ADN`J_Jtp^>gz^vD!Lf6UfJ0h$zMuu71V0 zExRwJ{~?7HJR(>^N0+hFS;4HRB2+Gb%z`yK*@>A&lTW>2T{C@6+C#(I1&ezHn% z@WhhAWr<4PkTo~aT9fEcT|UwjCv@Zz6)0b^WI?cNx0|T`MIsEl^_SsT)9C}j1NU75 z5Wg&(vL&#fL`%!fMO!Gu=Kv-1V~of#TMPsfj#35g4p!o!J<+o zS|)V9BbS!+q6tn;TEK>4hefCD4Vq0btnsXdElAOS)MB!fG0mQaDKbzMS>M@Y84s6?YJei~|}$ry>9-E2s7 z2*$cyKC;mXxY|TpkI>%(5Wt{h6usRcLg0@JLD~2v$k~)CZw?F~X)1%})DyeliaCD- zlI^?wakm=)6CnQwyl1o#byiiqertAVk*Q7|xPl~=)4)VSJc2sgufyc1f7SX#WJi?A zU6AKsG;F=b3_*CZKU(+z^2+DlJlZ4rM%qt3VV@gy6|5WNo&E|D@;iW%vy2Wf=9g!7 z-k_-w{vM@3>F+_i?i$0Dg9cI4eFrs~wv`{LaCMiUnn4(5iy>PVbe#g-WhBgoV-96B zxF{K`(K48krMJK1(|m{fhEqn9PO>CFaPFX0i0w*tz$u_A=#rU8t@!g+dtJt7L*!#- zclRYOmbfSSo^F$(x8qxX+TQT^eO9dz%VROM3q>F&uRhMRrKSI~q=k76lfh#7bRPPJ zsJs{pQbPmnLy%q?3hB?QFAlOUT-U@8Rju1JXl9GR1VDH+$ZH-HayND5Pa0apC5Rr&#o!nKGqVyPl1-kRhc&*UcPK;y+K75`(iQpNeIq& zj|diC)JYhgZ%~G+t%`HR-4t*!y7QD3QPZrvo4YC=^+W>s^Z6#WU(DH$8tYNyDMtqj2C0(M|L`uTDOLv6S^OJSsrRlVpJsy7qWqW!1}GogQ+f z%!3-EY%3wO7%!X&A*YPCb7lI_JdMg|8IyT;BBFhvJUg2`7e=ss5|!VeCVp)EK&11^ z@c!?c_irBqG}-1jnSVNAZb?(UC;kMJcz?1D$6*zS;#@BPXF;8&b#3#;zqwTJC?q$u zNV6gAT522?U0aSah)B0xaUv0U`%bB(J0{1LRg%@LBJv(0-`%=^Bxm=$5!qX}HsT@53r zuoyAjbmJ@$8?EsA=Xu&gowZ9Kv_c3`lbr3Df=L;Lj5~SilEUtnBwhL!mBC^!eJovV zE^$fXW-ruB%^0tD^st^#sci@pvy$B{X_`2T-u}O6VgVIjl33d4w?{3tM#|K54ZX-Q&|_MLZ~IT8tD8!@m4^{;gOAbHp9b=%Cm6yCmKH-(16^1 zMsz3K^`g~|Rl17e*mD)6{`*oG)~ABF(~wwd$9JiZRkD!A=Os5!Ca!w?)WuW&u}5U^ zM_tc%?;ngagPwcO!m8Lpyc~iq;Jb5PTjP-an?QPbPadii<8 zLP*UA<1E(fWa_PjRNw@;knzLC0eYYC!wHd*!w1==;v4OblcFH2s^sylZ}T?aS40(h zlZdTx5r%ns8*&H|SMI20r1;2+$!g~PiZg;8Xz|A6KZ3q8$J!hN2;^sk&L08we|E$D zn&TcCz`9v_0U#;s)GXrbTkQ1-57uckhOM3X{A+3~4^QhA3)AZxhSjU_2PfN0ZPtKi zqKQI`#IxjAToJ#Ek)n%I*}c=8NXqNItx?xL=S0hO$41eOJ#n(nA~+Kj(X_p}vs?sY zKL)=^FJ9Dlc=U0Nd@rzexw4XEh0431wXNOWnbN<8?l3q+cD+ij03u%pw>2tLUx5=_ z1PlRGZ&tWs|2L$%FonGLS~GRnYifIBu-GZ5TM%cnvBmc0aHXFUYl>Lqjtws_gYN<@ zf-hVu|A&l%LN!RKwif0I<^*`1=MX{A>SA-k9&^FMELmE!Lxh@1oO$9?@cB4^#83y| z0u8~(@`x!xR+0@OJ^{*I`a!Btd#jOSnd(S2zTFvcTZ2veDksi6Od)RR8h&-s!=*Mg#c~vc-DaMrC~2jqu;-v_wh}jt z!$i1Np`vWX2enF`^nZj1ruAP0xV@!xm*pyf!>f#}>vPYsn^|i?if-0I!(*FOU`bk< zbUby*vKOs`R>P_g&&_8QxmXysi8MVvY5TDVE+nnhGDxK_toe*DftL2Y%YREI5to`f z1AD*O=HHRPnh4pYmDERMXc3bZ3Y_!kt6`|AlLEM{^iU(yx4@S~g6|JY-&{>0MndFR zurGd7{SLLZC42PO&7*m#gD{)p-C_QTZKBE9{~LQ2)}g!jX8CR`?bM({fn?R<-Nj92 zl|C?QsjCv!+`7qff`!Xp^&N6|MD|T5n&f%?~ zI`M(aFYKx|cIY(6($pw^9mHHZq8XiNH@8y)4~kChwR_24f7`5oddQk2I4Y*L&*4gu zuOB~$EE6bry;{*{3bV30*gVc@8mM>Qsx1U{Lw;>NeA9IE0*5y)+15+8v*X&5(UAhG2jH$0$o*9{;*ZfM|i zaJQsUy(!ZMm0Vh#&~Y@dcA`4K)bC-LTq>iR?5qv13Y|)j6!o9yAez*s(QF_I6AcHgYYL=KLkJcu#EJh`k6R9&WY1wmgd+MlbuU2}_+q z7F8uZ&YI{wOV?q2PmQqqOP0fm$Ca*xJ@P{AcC_kKEi85&atqPU=OY4+lIUeI6%`pv zh|iRbWh`QQ*%(LNba9J4UC}lCgg(?;c&anmna7aq(Fpqnmd5u)0d+n?WDKOjZPNO5 z;@mfxV`Qz5v+vw9bG{WK!BENAcu`et-XNo; zPJ@q|=5F`*Rh88^CzTi;KF6sOubC3A#ZNfkSQ<|HeAyi(Xjc=Y0Uh-qB3+_+PLL*i z=STb^i5LxasAMpFdZv9epkMdRgNh3^1W2$~oZ;s|?t5`hR6&2Lw)AT2VNa!JF+2De zyJ%6rI&^x~!hOFmvHzF*^_3k&siUIr`8A;V6(?rZR&zTCxj_nY;*Yf-u;09}Npv>A ze?C~Hia}pksr>p>6R7d>6Smk?{@0;fjx4^mU$jY=lI=#Fe!LD=OyKO>S169Vbc@}2 zMI(CbfN8v(WY)_(TgzseW;e}J+}0p9vy3HwQ!n?4#z#-{N3FWcy20^}Tw0#-B`q26 z$9kG7cnwpTze;g#X@=-hwTKnse3n?&)5&i@yHrhtIsYbWxPoPBx~PYAE+aX~vya{T zcU14>7pm>95SFY_mE7|c;duD{)G|}dY7zInUDnGr#XprN)c;@|KWjd! z89mm!gi5UIrDr%=`f}@CZ?2mtO7=QzUa0k6iGwX?^RKDpKIc|Q`TF&@1z|@*m$uza zOeTI`s64NrcnrLa|HXZIMytCC2Ubj<8ed<uk zVUQQi#nfl0a{OYV>E@4a`Mg7(0$A>n-#07sR4C zZ+B0I_AX#QNL$t25<^{kMe+rsTp2wEis@QidxEHU-^1Sjd&8 z%P0_~Tm%6=KXku=3tUGCxT6p(;D}E%yW;_4h$7Dw00O8FtY&BIIwoO=6oG;`DP@)e z)tx|FgA8}>knb*F?#O$;0mmim74cq)D}$7X8WGpEirlBsKcWD&6$G{3Y`5qYU1RnA zO+cwpD?n(gg|RxjC;)~=zr8QZ_UFLZUsaEu+avLu2A6$xsYV5n~EBJrHLmL>{ zwzYeilLr7h?!3k3E?kYuomi^YrBNDdv@atvdUYhuG4BCeC|*yTy-v^8nU9NW`yIQK zO>nfV@2aId4CSlI8C}-Ek5BosYzS>&WC5F%@Et;u27W$SQheb>_<-?qz+SIX0--_y zw(Y?z>;E)H{@wy(q@SJrf@}uyXm?&b^V3CGZ9D;Yo*ZOReDwx+RouRG!1nz43!75X zPCj(8QYU$ghWvG2>J&}<-1^i10h}ILGYacRM!w%1xs3A2wN^!-EI&O=yq8D0nfQ+m*8UIvzv6=J^IFyF9CKipU+Q>VfwWPWaN3{@96k(0 zNpMb@BkUYI3Fc14o=;oAW(uN_m~UQres9~b+-R94rE8r1jRP3@&_r#rM(BClpLFCiIqM|L6Kj|^a{02g4z1>KoR+i zyO^!$pco&UrF);;SRU<14Jt=eh-7*H9L#oOM*ZDW^1!r&G}y`ZWUz!owcEiD0j$2) z=OY^gkoP?-U7Hw;FcVs9&L;BP0F#Ygch>eP@K-11O`@Ak+mOZrD698hpZ?To8B-V{ z17FN!%&YOx|4HWtY5e1Cd?8z=B;I}lesSG3J|K@%(G5OzzA*<7+`V{m?Zx22C~u`h zCPQF~E!YUkL_C_a1tq>u)kpAv(!m_Qx^)Iv@z7eFF+TWE*NNpYZ}Or=bJT?J>Fj5) zhJgREV)$VUD$VN$qRSD3@!+?v;UnPaf$Emc^aj?)#%}*u*5PE4e{e8U`?7@nkfOGB z(Tt1m>8&3kk)HdnxmXH`FV=!86b(mwYmxAg9vzb|E6c&p;iEs4Vaz3e-qsWnd_jhU z%A-mMZHU~?xzRB%ku*Q4Kdm68nD1gKfePOiK^^*`pkM5b`54(B-5Y0itSN&hrn8a~RkSMSqhO#ka3CO2 zS$%bO4iL|^e<(?9NxBohamTG-$bw`Gn^Jk>I?j|3 z=bjEmhvUkkg~sEeBn}Q50&P@(Z={bWL`Zhw)S`hiW6BtXl;OG&PU>tQbF82E7oL z0B@*lOF4DnOh8rMj%x#hcYD)TI4JpOm|FsSJ4M!pWF8a(s7c>+J&TR73K+xh$YmNI z?s`caMwxBw!qf4V315$`Mbh2ZRi`1hB=ueTilW^A zfy2T{bdrlwZ_L~W8ulJx0t&?f(}3o_YX8}g%|HICh|E|i#f9du~dzpbl)NG3$ z4t{HEGk)Kyon=IX5R)EqtH#R>et7_uE!E%lB7H-$sN%})NdcYcqLys0!#GD?@R45% z0`HzS*#7vNAsm1NYhBnJCH9&OHaI5`tOI(XxdyA-jDwaK%nHEQy3G8}JI$6}x4|ZO z5CB}+r$w5h$B$cRqsM>c#L8ge4}he5_e1P724f5%hb`2QkSE@)=GUte)S+mXPY9~u z0)6QFmsL9jr=@P8RbbnljK-UQ*s5npiN5u3+tcFzbv_%iw5iJvk@S5QZ{qHZO&BSK5Hb*>zaI39qtM6S8f&1CtQ?gFeGd% zK7RB#;jx3GG5FEmN{>@u#1L2bBJ&r|>TDmZuLUSnN|S_p~zOKh^;IuiSFLC3Yd(&5nulf=C-H0E z4v!Y3`s{iu-w$kHPmI+|6`LB=Wk$B_`zn^_LVmb)uJH*v`m8%g4Jz$dkSPA-^05TN zpG|!d+m6?;b@xHb#JV(3W1pn#_I_NcZANNQ<(5dcTiB%^H<1uq92QQM_u=Gg666#=ip^wN@!-%Nt*pZEy=(# z0IO=yG@CBJSTUuT@S%sNOwr&h0hrJe+4G$TE3UuGlArQuwJz$KMDV)5m9HU6$ot)T zBok+27c3rdYf1ZUMyCCk)Pf|HIUzGG>!t=<>dlm3Swz?Jw+BNlSnoM*oauX@K2}QBiSm%lt-_RpgP=+D&i$>P}46SGU=j%y?3lPlV)q zoXWoMyXyD}7DaE&ZcVwI@aLK6E`l-5y+l+I*WD0t9`Ek3X%M~lz zg*h^Lrm`vxYbp_WMbe1r&&O%TyM292xQsrBC7JDgqgj`IX$&T6+V(wh9rmM-W2Y0H z!TN_Lp`}!H=TX^lx~?wc1D!f+y4XKg|@pI7?qgWXx`ZiqcYkiW@PPo}!C=havl zf?)o)zgU(Uk9q6l^A?ZQFVPEf!TlV3(^5e=q9P5&J~bv}F1_qwE~Q-i*2{|n5@1_F z&MU7xPW5tmY!L_A3eglyew=2VEy!1Y^4VC;?VB<1mN)lTDi7nKHmb!XJU8CBui}qu zgpl>xkBSYY{}amAeDfwgT3ieXQ6;aMXVg5grh)D1iu|+Zs9I$;FUfP@J5Wogcq1?3LJ{v1xjfb}?j6TqEU#kpx{P2j^HrRYuI-ikF z@H|^IGGrl(Z2L?b_|{D6+a#Z)|E>M9ut7@4KZ!7C3KUWXISTp`+fk{5(2$8xZQ1jy zod7Q3;Qblf!5l*uWP~B3$m9Knp@WMd;x4SKA^(0Z`ZqMeJ}n-woGxUb4E7 z^5tglXrX;{@Tyn)a6HS0Qe6NXm<=ltL#K1pD>6?T#L|;cgm-G;iF46##>?dLB?H*1W?9MS6{CY6u>~r+H4SE z0S5$aq0+IRV;9sMZ=&Q@jPrN4{%v%G1}E3*3AaZ2P324wtuI?PqXI69Hwr82Cia=B zhC{v&tN2WX+mIHMKUEC#<$gxP{T40Lwo1^a;!f!>zuZ1bN1{ z$ohWNIk3R-M%Hc@8KUDut;P@4)4AT2(v%lyBd8qt!|{rL`Y-L>2o)Cla#5K$9@NSS ze3yC&;Pm45;g^6&!N66V8?tl00H7I^vG12qH`kgBfD@tKVNwMB^X=fu^vi?7iCO^Y zI+HPbRxa9VH;^i}rDx&3(g#L9M#jNl)eFAC(F@QRb1Bi9vn~{)%o`6i zaYgrATgciCO)m&dJ0ey_iup1<5Zr-23&Yi@^)+qOeV;CHid4Q zex9?Gv7cf+F^{`hz(<)_lxowdUN#tBZF2)%sY{)M9A^fNWg}WuVUEDYe5WDIkRXns z*qah{$8KT=3A#}Y!dn0U2}oF0+oK(1J>~Q1JHU{5tZ!f?c6{ zDe8i7pmLwhK}2)1rA-LGG&A-!dGJNYWF07p_Myfq==DrBa6w5iusw?P>$SScX``l7 zV6)z*+NbT_j-JunRIW>L^D_1T77*M`A?}B8XpXRHkVW*4J7DQ|Uq4`7pZ4HGe+Sy0 zZXec^Ou=TL&E|Vi^T!Z_5b{S{0Jwlgc|dr(nRJo+Oi7}9X|uOEEovI}y?I=`lT=I0 zJ}wy86;yVbtS^UM3Cz|*_v7}P@r+D3mFKXDMNLUj?hBRH3Eetg7NDkycc$I70&};4 z!hHJ17*thx8hX{EhJv&+FxTr6M%f*~a<@X8yaL7+NO z^0QwGrVmxQ{IrP;>w(OGZJ|T} zs+eURjq18u$rTatPIz{haht@FB0pmM-sS}ap%p}*3lQUEEY;1GLbb^FSoQmyZK<*3 zi%joGL*(36a>T=)!JHPL7p}b3WKB0Lo{YGr^P$7N>*9+?cEJ(CZeFp;F)mT8&YD@O zuID<6t3oRIEqdk`UcIIrRE!jQv}^DE8kNiUW>N*cN>?Z7YfWnuF!l~L3V{4g60byJ z{(K3G`o%J~Qf$#k-T30U-1e`W3iB|BeIl3II6P)nXzke(w~=2gBNbCD{bnQG;a#~Z zELk^+WeSAaBiibT6)N^MQZ5z^aM2>p_Y-bvaA{%$sXaH`MC5kM8KDi`b+(>!&+FH9 zUj3aL(3z`eC^tW>yTlQ+mX#M6r)03 zT5{@{#<%LLj;A^8MISR3MGl6oJ14zJSD<;Y{f>=i}cr?6m{7YJR;(H#pFK)k7=Vx+^_jEar1kL4V5)%`f)mR~H?&(HSp zE~*HR)aRHtk8*RjZ0DBjEEAeu>h1{hNMer`j8p^kWo|5kC@K~|Bn)8xu}xKB{Atg{ zcE0tG*-v4xybAxJRU=d-G@!=~*e(&Ig)>?^+5-Fh?bkoyDLm`rm)eB9HK zb-S5oW7Eg^HVXQMF6Tb3bclyetZPLK78|_CH#XL=1-#(i>=V&nPCP&W?N-N#mS56*dLus%Z)A*Hckeuv>dy$<|p1| zP1Pq*T#VdV>LUt2o7G3)fXb?iZWHPPfMaRe(*+7zFl$9ICZmQx9FmL|Co=T?bb zA0#!?YW(BX7(=xL7ILckv+iA;v^}W3p%$t6;kZ^iH#IGxU#DELmc3{1c3ij8z^F&J z1pExvTq=$Q{vqWXpjv2IaCI)(7n;1O9QxO5^w+oBv@#_yND#waB?hRK_GB2gJ(NI| zb;La3uD#7acC+$=&1@QmuHi;(6g);i1?l{n3#AWq0cbu=PGVy-NeYZ;B$Qtf6uKXG=2oI+t!cNAZB(0DtAr~pgQtcn# z7wSo?AE4%j!Zhs~k?37-=%Kivjz{fu5Fn!vJec?3*%i!ma}H;PAa!2@QVMf{OG|`u z3H%vvc0B12)XD$;Pzwj-zXAV9a+W2yW4B8444vfG^-$3t!uC}VE}+^1%fQ2k0}1Fu zu}~ve@$p3KAin)CPygdfD=|iWY2eI)T-e!E1k^1nJYiwEF;1L?N1J->ao&&`cQ)?9AN>Z-;UMUELq64=H_3aYa@k+zv0F6%a?y}Xn2*JPRtV> zBpN?NImyh=(N)dpBNy~Y(b{2|7*|&%d0S#29sL~RlbeA}E z9d+?W@_=(1RkfgAyKXGa5myZY7*4b-npGga^NMA_`6x&|l!M1|8HZ4H!OyGn24~|3 z60yjHi*tT$J$aB-X{x{&XseeB^t=#Q@@7702(VHhvW7mFfg=y^xqR>f_|d)^7+0{Z ziK1E74%Q3!ua)4D0sqN_PmTY{gwF1J;8N>cTNe12Zl5S4k8y1BWeH3bkl1~%hCVqU zO&g5gemO@TKyFMx5;(l{BRJamA@pzpoJ zy53MpOjf?}F>yhH7`J~VdS9&?;TR*lI)2C&ML6JL;>Z)&6*&2WsS_=sJO!JawSNUVUrZrsRahcVHT*cVr?A&wX%^^%P1_R3wZMe8W? zG9q3M?Jnuwq`^4nJH&M{?DVU;!b-%Z1={j7ua|9gQwDJY>4`VqeKI*C z1x;7?{G%H?=I$W{1j|mW@!?I>{hi%C0cMJLOri*Mj1KHb)jK}PTEQ(UDvq-TM`U9S zKN4IpP@79|KwtAV$6uQ)f-jQMBDy%<6(p9co zZ8_t)DpkLF)z8()1!y<6Foe9x>khG)%dXBuf!V^Vr|v5=9{FC&Eg!CMVUm`AhpSWt zDvXp#YxUe8#|=H^4V!zwt{~$U(o-ej##SeVnC|YtEWf zH}8aP{w^@H!*Db@bTQq##&zpuOcv#s1hhngIVqHN3)mz-*Mw!8)vxvkmq{8vs0DM| zdWm?#a~Z4+rd;h0zloVt&_7!$cRqN<5cAo4g)}hB{L38IW=HE;c)~-uoyKy~@~LYo zUhLx+iSs1M=EqY-us)vOX7$jMPsk7;f2K~493E8TX$deDM?`!NHS zK%?)N=Qe%9eyI-?9p`Qo_0j6*L;FPNKicVX`6m8^-NE{1p2~w zI1!Qc9;-XPVfF$LuOa^(nC#9gTH07H91izz@AsC(Y=1wjNV{}A3c^|82H06etP zfBGuaR8cznwXVQ#sL-Mod@gDQ>E>DJ-r<2odg=-38_kYX_1fG|_)c5>T+ZOXWHpe_ z2G=2Puew1i5w{0;Wbi;g^YejY6Y98b`Cl7t+?Q)|V;XzMN)yr6LDvs3HrQ2K)brgAq zJ?n&}zmLTmTRGeL*FD_hCv%-Uso~NNle=wHgBi(u|Ci{k&MGCEm5xN(#_tZ3vDoC5 z(kYs8d%q{{%-1(0O3GP36sm`L9M@J+P{RLhKn&k5+w}xaaTS@Wf>D^HP3&1 zVJ@4-L(jlDGnEx+I0(z|_5bMQT*IME!#MuV5Zdg9X_KhYq_J6QorW?>nPN5>(v_r< za!4DE!_GJ#mafIN4MXJ+Mkw?;HIB{Dq((>@3Zn}%Wr`eT21DeKeeCYFU-rv>+7Iui z_kFJS!~NX%b6@}G{{M{9;+|1$0#m2avLQGwL(GeHXQ-r7JyzO4f~dPuUWF?Et#pfD z55Q=IX2m`Y1h|N?%9=Go0BDI!EwHCuqa3Kh%=L0YT2(V=m51#*aJSdZc|wRI?}LU>~*)emD_td1Jj{me-(k+_G8`p)&D=Y=7FFhV<9IjO-Q+C0HeyXA(x=OMIF zo&0iNn&`*0Tp5Mdc;LK~$ifuBOadnor~&i^1~JtNcwd>3^9FXqMSI-25~q9iU`MJk zxLG|z2vE?NO}4^qV1i3^KgTs$|H8_rIqHSg95geBAH5S<;vq|&CX)ABpV5IgVu4B9 z1vZrkI#Id89qKoFfpm>19~ZSSSzmyAT~O+%L$5=WyOIUle&AAsD&LqWN&4Ush5dJh zJA(MLqzUT;Oz9Ni37!Ep7HVtGjN26~8+FtSq|{#G>sP+HeKj%350eRk88@MO;hEv3 zB-Nli2h|}ZfDULr@x>m15wcs!yLN)$s^RCaheyXJ=#V!@EFQW(?km|)Un`29sr^*_ z2!Bx!F4bDd-Bw*OV@0o;wDnWQr-b z1nI=2dimzI15wrD_ym0H)>Ec1TwlIh{Gn;|cd;2HLvs8Brp&bTFxiIKxl9P^4>_gy zbO?UI6=i;0lpso1*m9eG77-u*+*rl;_@Zy2U@pgecZQx7<(DwL!*VI5z%kQ;^?1nC zFJ{pGHrofqZL~-$CXGoIm6Ju+^I#r{ge9@uZO+kS@B|^hmieIx2P=cUiUWI9?#grd z^4mI0>T^#$1^A7pnW;}FCcm@;;*H)J7+wOeW8vE)sF8g_gRa*bH$};B*l+$R1c(DJ z+&7-Kj~R_6$^dATj18_1Vaq{)Jpf;1;2R${y65AIZd?#a82R!+!2EqiMzO%k+u^(S zmotQxeOPIAL?AOtazb<;rByH_18PI8GSn)IDjG5-5X3$p^C5b(*UZhm?E|44A69;h zfxzNvWz;#vpojdffNndZr3#<)5I55)Knn^7I(X3RMVS9iYW*2Z_Ib8@MI`cYD+`LG zUE8}no<6`ikJvv)@~oW&vK}A#XXq$qqC}<6cOLdhZ&1!#rgvnwki`4#G(z8PtrB{l zd1paNE;<%%odb{<%g(@BF;&%jbE?NXt3I#HBCS^8*)>jcF@f;NWDIowFL?ZQzs6L7 z!b|959SMaPMR~U8F?C4bSk%WNyuUBZ>P}7aW1R=G?|01^ap=<=-dYEc zyU=S8|K}kejaaBcHS|!xL2`7j!O0q^jEr=cgKKhOic@!3HD`W`D2)QjEnx?oJHr&dIz4a#+$GeH|GX zHuiJ>Nn{1`ISV`@iy0XB4ude`@%$AjK*3|4E{-7)?r&!}ay1(WxK^KDSJiyaCn{#e z+g0f+A4KgrxaIOP>7Ry1J|!8?4#wB0oDoiT;7^g5KKW5m(1JUgBiHZKlLpy%U@kq?;dgR=@N-)%2kX7IT5#>THpBv^bv!ntDnm{r-UW|Bj$XS literal 0 HcmV?d00001 diff --git a/docs/dli/umn/en-us_image_0000001323141682.png b/docs/dli/umn/en-us_image_0000001323141682.png new file mode 100644 index 0000000000000000000000000000000000000000..9e676539a6fe296fd77a5441e7007469793d4760 GIT binary patch literal 350 zcmV-k0iphhP)4N`v$HYiXsI#C%Sod;;L5dI3|8hw z41$9EG_`y)OCQDyCcKvH*bkjD=H#Ptg*y9z~0uJXhTT>1D*y3+yO*K!NR3$ zNlJuq(LvO800RR90|Sw5m;VgJw{eJeKzwvC(T0)&20RT6xC4lef`f-oGQ58CjwrRF zB0@x$8$>%`+s^$&=K^^-Y3e(mv!#Mqr6hp?PXnV47=+;n`3}OO4^!$#Zn;n!y0$i0_^Q0D7cPcF?|P#Q*>R07*qoM6N<$f@1%aQ2+n{ literal 0 HcmV?d00001 diff --git a/docs/dli/umn/en-us_image_0000001372847466.png b/docs/dli/umn/en-us_image_0000001372847466.png deleted file mode 100644 index 35e283157d90b580b45cbcc3b31333059f7d0a14..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 221 zcmV<303!d1P))A!d`p{mhWH(cfkaN;|*`JRBUvDVNu`svMmcfNsCv5FX4$qG`_=o@D@ypyum z`f~E{FC$-VF)ADVK`FrE6GY9Z(G(huT0Eg;nY$O)Uik##afsQvtCMOpHNmLS^fMX& Xr}`S=9Wp_P00000NkvXXu0mjfd{$y^ diff --git a/docs/dli/umn/en-us_image_0000001373007370.png b/docs/dli/umn/en-us_image_0000001373007370.png deleted file mode 100644 index 1909444d23d92a3a6566bca91ce065d14dbda919..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1004 zcmeAS@N?(olHy`uVBq!ia0vp^Vj#@H3?x5i&EaHVU}gyL32_B-i2;l_022maMg=TL zfE5PVkN`UjaKHd37;wP=HyH51052HufdM}l2!Meg7zlxZFc^q{fvBjc7#N6yfrNyF zBp676fwZ)=3>e7D%F2O(yu7@Ef`TF#C@Co^gMo^QimIxrnwpwA7-(o{XliO|X=!PL zfsT%juCA`0o}RwGz5y5*8X6iI85tWJo0yoGnwpxKnVFlLTUc0FT3T9JSy@|K+kkCcvmzTGP*6~CaBxUSNN8wiSXfv@L_}m{WK>jCbaZq~OiWx{Tzq_dLPA1fVq$V~a!N`{ zYHDg)T3SX%MrLMaR#sMac6M%VZeCtqetv#oVPR2GQAtTjX=!O$Sy@FzMP+4WRaI3@ zO-*fWZGC-xLqkJTQ&V$ub4yE0TU%RudwXYRXIEEOPft&8Z*PBp|AYw>CQX_&dGh3` zQ>RX!K7GcF8M9{1nmv2=+_`h-&!4|w!GcAL7A;=9cQ>uk5sDH4e!pN zM?2c%!f)$Moqn;JbAR{#-LH2<-aWd<6M=_Uy)Vz z>Gb1s+(~DB#NsS6_)g4`cRj>@b~4MGr%TsH2#HlrdJFVdQ&MBb@08(^UA^-pY diff --git a/docs/dli/umn/en-us_image_0000001377545298.png b/docs/dli/umn/en-us_image_0000001377545298.png deleted file mode 100644 index 4f1d1cad0dc1bd881ca8d400f2869ce0d79babde..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24201 zcmbTeRaE6p^rnq94UN0IySux)9<*_Hhl9JjTjLImJ2Z{!!QGw4-C_9sXXd+{i|-;y ztyE?2)Jj%W?PtG9q_UzE!VlaZU|?VfGScFzU|}00a zU29&JR3hqX6CvOl`|@4>)!B<_=%+a|u?P&hu$>>Nc?Vw=Enihf<%?D(V8vMAR0dr_ zB~KLQx4g)VO4$zy6~ay-X7GTAr11jks6y&7kBsXExT|KUh{-Z%(6>b7fROjvt>0B)1I|#Xa!ShiF{wUl-ex-broODD!gNjD{En*mNV=^SDQ(n%fKV{~UDlri}l&e7k;@ zJw$`ul*_AVVHZ;o*y$QylBknXYob3eSEPrRL8XF9g5ON@g1QA_aeSvV;R&Q*+c%@I z*c1|B=NfFzejN2gv1Nz{tc+&YHKSpIp|)!OxrYe))6ZCU)KBytvN#Rl|Flro2CPc! z=;c*Z*I%UG@=6khUS6mHzReL$qw5@Qee7u6Ax=7zAKYAB=u4&pNk?%N^N#;$6Yj5W z(CFzaFF0rH2Yt5QdJpq4c}jLyEM3eoo)o z{ORbNhdR9T5i3=_hgNHUcwh9L`PSvmb<2VmEkI*5lkIy#VE7*QQ=M&RY+)BZQ*UF5 zxvJ)x2!~uuY&@GFPdHxf!B0Em#xa#3WEcf0u=Zys^IH-fzN~td2u%*>GfTeFgl0ZN zniYEljG(tM_*RDv3X`Gort~(ylI+Mxky?Y^_(W;sQ3dB)10*%H_~Vmoay@-9ukt_2 zhC*!PRTFnx7C&7h#m9`hjszi-qho}+Msu&(EB>j@nh3zhOzx1jtoS7% z^v>afyj7oHQpLQHuA6XvU3H_pg>F9JB-?1k&eE5P$~{=;W`a>54iV;#)_dRkM(9P+ zv@o3UuYL5q55?QTpv8F13by53Lq*&v>vPrC3ysem!QZh>M)2bHw_ExT{XJuYeejyeDR5bbOo-+T{c z71i-%4BLhd+Dj8K(iWrCAs@@sP#JGr&5#ryKB^&eW=avOpdgMpm+>`lKuk8!L@rLx zr)>9?u%s=qTEbP4tDd_ywf>ckIdGU(B!Q^lwWIN8A%{@lLJ2jq-|CWc+&1|r<>{Ly zD$U51MMFHLpJWu(kApOWjS?`qLxk;+{Wroi#nP;|Kfj{5DtCC?&i8lV>_>~yFFU`T z(~h2JJ+jQoUnM<3cileK(c}ySQqnqtQ>*0pIoYODF29!$4$~I_fbs36bmQMn2GkRX zwl3O9tBoYWVtk>gYb$dcorT( zDT~TH;%sxCb2Xhra$~#dIHsKNxMR&O8cIw+shUk@%LlhF|@C&ENF!01ycnGj50C z$lP$5eb4OlN!4AlSM_RvRZHJz^}=rqh*pfWGqy~4Y*G?gq ztEj4>#9;-8sn*=&W*J@RI|XLumvXbo2&#itkd#nb;6tDkxL}9T)R*R!4RJ{ml7#CH zg>@m3(kYMgCXBdi;JW?R0!i3tM2MgIQWzAcH2vCkNWlrDp~em+#)4N~wRs9598I z;%TS-X^`Uq9+D%O_Y`+}#9TJJs(Rz~l*CZ6GhaX^PRCKZ zPPDtb;Dzae(e)sSRkC7FKPSGX@>)UtBSOo}bo|dGtAL=4LA2VEvW#|mHN!-MUQ>A; z!|*JT#}P~y1l(M|JR>Huc}ho^H*NCFWbhhQW7nUFN+o?U1QoTlaYsHy1U!<%0ue zuVlmAQ;MW&5^Vy?)n_rkml+gM);}JN6Y^6gWwfy0t%imUsmEd|Gt?E>5K?8_UdwKB zubRnpW#cFQLEKg){d=+g+cEz438sow($|ineUN47&}CrE+On2T3Ox;nFpE4`q_QKg zG>Usx-=f+-CWg7f-nM|(C|Otcr!6q9&`u5YIAwm(m@k?h!_K;r4%SMr$^}h+q6S4= z*2=1yiiDLPH`|zluA;o6sD(F@5gej{?slRYdVW#Ee#rph$v&$hP$iAOyxgX+ls*m< zJu@HQ+>fB7F2t)m-!MTZDl2OyS__SciQT6eYhLu1F4gu#NPN&Kuazb)OATj)=+?b^9u9)9Y1ec&%k79haxyFXZR?zxT1OPW-;1$jQ>jm33A@T%u8D!ZWTz+W6$IAuCM!LhJ79*X! ziIy;P)7QBWD|#qIzeQCFTLO`ZyGaLyhdH?n9AJUzdXz|-5z-@~V!Q2(gt#p<)WwbH zZ(x(ZBP7LOP7+7=6SE{_*h6~CCcs#)obZV@SZ7RyNwQ0@Eyd+Xv1h5$L3^IyqL$bl zGTgYU1|C~3j=(@W981xF6wf>{VpsWhrxnO8itH(rW4r@ac9Cr?fD#t6W`yO)&@{QC zpj_>-NoaqFU9_)k^raVf-ax~|H*69~&UbsHB3MBHoYPAq*CHwC4rhVfTw{^vIdJ zC(O=@I}{=heR+r4NUom&le#TQnm(q$>b1J)=0+O0)SnD7&f4?P2n!@yx@WOP(=S(2 zRWYD^KN9+U1{qqznhJXxrQ9C%Zr?PEP-^MX+Ki*D;4lJTm~uJ={@!JY@tZUAOvj?t zkP>pXDzVRwvj9J7{d2+|#$axPxt{w&v)4PfhGT66&_ywdCpc>JUSq}|&97^qFkquA zUJY+R`CFw9rdhV-IWOwy8z7d&_itQB2>53;&E63zm-ti#? z9#0I@Qglv%&q9I+vY*V2bb@9YPMFa4HxBt*g&shZ#_FJ(^6u-<-tJAoDz$#Sp80r# zyr5JR#sLDlxE#5(ru^vY!$fQ^UADX?Kcq(XwaDhEvf< zB{lsKb=s)+VaL_3oTF-z>*6sSL7tQn;_eTC@=Lt_=)41!j)G#Gc6Jdue7xI$=Ol`S zxGruXpq{po<^n)W5<12;GpQp^?`=S*uN?WmaTva6pw|a05+OsfLC*ecS$ew=zZ5b< z3G1%Q<%eUT!w=N2}3rXSy-uB@dasCZ{~%Qw~k^U-kTh~rvQi>##p9F>c$|I+Uh z&+nZWRAG{<9ZJJ!*aND$2%k`vO%@z4^Vd6E=F6R5(nMXtdOq{eLg6@9ABVkwm+4ro zg4KO_Xt}+iTmRUjdOIZ=vhu6RZDSjXl2*p0b-w)zA4`=L5l+8*P2K2m$(C+?luOpU zk156NQIOq}=d-TUz~fdPnNfj)zJvq3qIiT(&m_t<*Nj%Ft#R5h>>=0HRA{@3c%`E} zuZQn_A$pAPm`9#?WJ42Yp=5Hh7oT0woCI!xJ&P0Eo3Xgot1JS&!?KU+oKF>zxWe%} z5SPTqc|N7U1-Fwn>|)VUXo-i8x#qsxvyB#OL3R*1br(68@s-8LDHv zOHfi`n;@w@mz@<7+B5{hw3G29Y+_^yxBq(F2#mUFsKCA)9ocpFZTNQ)o=JL2nUoeYK zfnqZ3C8jz-@A#4bktrmoS9Gn7=4_uKhgHX#7}(H2&dLVrK0Rt*ZViY?{r$EL@p+rh z1w(NBs{rU?sDXX}vZhP$>?uXSNzl(Lc-<6Rk5W|h;6U}7l0+qO9kW5Y(&E6Jd82+O zi#DAF%(nAD6EYf~JYFew%egi7txn+HRGU8M%#1s$aJ(p869kJQwnJovKSqVY?|7i_umxE>0Oneb5F@qRo^oT}Gh z!@uuA-A*|r&BRvyq!BO=f5E|~Rn{?xs?{aOQ@qto9vT}VsMXIMY~6Dus4%rVW{uY| z*y~1$M;Vj9?cDkZL}^9!5r`#vBsA23Ltx4_F=dWk+?~G7R*L+%$0=rr&`gj!Oe+%%~v_5=sCPFGb^y;pG7+-|x!l-?_LAx*~9mth{ zpbDD+IbBC9ogi69RM+gLa>Ic7X~ZjP9ZXj|#fPpFEsL%3xR;?(6o|>W;rj2a?^Cc! zGhGGzoay^6Kb~pZqtFKz*h!5ND{V@h$J&) z`Q}4Rn($Bl=*0X2S0oGqj`6#vM8x8FcaO(?4{+H*)o4!&X%N1dsDT#JW?h)TDDCunsUF+k? z$M1cRqvz?&rS?HS!6o>JE)ZE;Gon1Lq}L}xxNh6hhCijzwEFjpJ#;FeZ^$ubgcX9*%-G6RdiEJv#RwWAk#fER9=2<0OlKd z1JBuw+84cYP;-dCd39+8AknWP5$-DMqpG1vZ<(3dXMJoMVLDTu>(?|ju^VHhW4G!& zXA^Z#R_xg=mntqIaX)fz|d@sr3G)>!Cn7AF=Qrr2`+cK*oR$S-6d><46T z8x#cfzKjAk{Trh#$wj@~=m<(|-cFfne>D>n7tzYfSW!qB@(C-j4w!qu#t;uia%~>$ zT@v(R>zZF}x@nH-JdYv??!GVK$_$aK%Ff=Z#l+Rjt*nUfqmo=;WOzDe=WT*79${XG zNVtuYbgWv#S`o)0HzUNd<8uq!n8jFXd?R|l2V|`qeK?*7>Hyesb8FZa)sdh7dAl6o z-Q>_iB7Mj4NXF+?5o4|&mUb)z99PF&LvwumIBuQ4MPR|i5jr7 zJ>GY;fLxvBCVRK4?2J=2VPxZE>C-8VAavMo?$6CRwk@sS0n3f8VP7g^)))^f>{8$Q zO-MrA%R1N&5yC@vPE8Qi*NlX6q5?>@4$L>mK zX+9A8`>&Ia@h3yV$xqfMQo|-o4@QKzSc;?-QZc*wYHdN{JkcY?lLb@9E|?vAr>WPB z2A%HkG8%{=)7c!>eC}~4miO5j=Z-3AX+Hv06Nht2eW}KaI=R}GyTykyxhfXiLMS8u5rZgG`aQwAE-6Lz zu!1Kvjt#B>JyMMWVOYo2k%^Z46aMdp$!{NqABl4DifSs|18tXco9dx4giW9$S7$VW4C1F;)4u@Xay0Q-MkUr9my^;LeJ`x(5`pZl> zKi57#3<1aOBPF`QZ)fKo+^hb1a)$1EsM8tFC%qoAo$nHH1oTFuT#>_MA&bV#Veyyf zG{-2r^~?tJL6rE=optp;xvm~Rk5Hifu90#sPYa*r1;Pr1dS0-84(z538Hbp{v>qo- z9g?6IlH*enHM6jN#Z&ZCiuIyo3>Ye+<`HaVh&S0Tt?fK}s+P7?81*vYwCP?1pXBjR$Y$~q`i zwujRu6e*Ol5i6(YH;fYPWt1Y)>|$a_u0e&SW~O1;TD53ScK^_5IuI8dJTD;f-|N%k z)Eu1HDW*fhdR9&}zlh2a9u#WQ-+%B&La#Bpw2Z3BfS`g;n%_V^j2pfBNQa&;K_aw?v~^-LPly{=1Pm^zfDSSms&#MAoMC zdM=e#`L?5|#eoW}L)R-O9a(FA;UvBwFl0%o3zGGx)fu7CodqB-Q(QLk3QnHCZ6Ye2 z6lW~ipp^y*Htle6W)4#$5;?0BjftUb@nmPSU+`>D3O$-2=c#Y_jky|aKb`0EG&=?f zX+~?8l~Z;5Pb7K!0);tdx=d{eP3`hiN9+LuTkr3Y8)HnO^xc0@JCOQ7(@wo^v$>h& zsgsu0ryV5{YI@?b4=SXKJJtGm(3>|ppI^k`x|jWiUj)lD4A)Xn+N@)P4gCa&B{xvi zzG_7R^(l!sK^jL{8>hg+F(4<$w7WW_6_Kdmv(e~+Kc-=pZ&$ z?TP0Q4w|8ClZ^cuOsc9BSk+&X0woe4Zr_y-PhAbV{_PKyX#LZzTv$zlj)DYI2*z{D zo9%$kvO~dFAMLUo(*eKE^e3~J)$-4*pPh>ZZLG4F!bFiPu(LG>0Ar{$Y5n2W1CVDxay1+j$p;nX^ zauPDTUqYn6ELHHlQ98J(8?{;8(4EFimGZppJaTL13uA3#HdLR`#4VOF7xx8%%lPaiV4a{Vyi_I|O2`VuyF-%4vq zLWHFw)h1M#)$q^D``xkHz5O$ktq8(bc`d0MPXs)YJJD|EiG(IcgWcMq#d3OACSzv) zAZ_x<*v+nv$XSZZ+8iHS7M0WnX_F5YB!8DaVHCh}aB^`Bidquf?oWeh=Ax-0^pcigbsC!#?)FT3@ErJ0)h z`Ms6Lt2~$Xu-&yPJykEM(PHaYJO9A+0pB{I-mi}*ye_Y%z=-br@ zZ>$1i%MFv+4!wi*Et0ZPWb180p+uDhdr#p1l3Ai`qKk16mD8gNM!`JHa`={NS$8#^ zAG+t*Q>A90+MXfu%+=Nm6tL4ZmVQ-~{@@WBT)5@ZhS9p3;w!6A$S{RtJkYqGPsspz zG*Q2zW+}?Zq}<&hYttTQGPsUN=}*l6qYrD*bNh8UZqng%jlzbkF~F5?G%U z5&}CZ%git+E<7b}cF*!zvS%SkalZ4!!dT^fWHK!FU)Y|mm}}Au)6O9%L9Y1KQ>xD4 zKs_e+dtpUYy1g~-`UD6n(7yWuw(!2giF6dTYJaYwc1H5n@O$G@JnaMJH;szOaSI$X z5=vAiN=AKfHMvCx=C7i>^bB{O!3(r)I}gRY5dnr|);KT__)Gm*&_=gTv7yOI$u6;z zPUP&xx<3U@iQ%)g2R7|zVY)~ae;MiW)n$;+dVJY3k{fo3Hf@loPKd3@#)d|p#leSi z=awH~|HIUf#}H(Mh9JcAFeq6M#2*&pQgB7MIiXi1uK%k-+7fr^<5@e<#5EkXjNeL< z%!WexQ*t4rN+N@SU{9kK?dNqm1RWINgy%I&T;{dHx0tOo;%s zt3S-lZq&Stfm+4Kr{ZKN^kUn5JhMDV{Y}hvkZN<+x%{)sPt{O(CZVQ{Jitj(R78hK zP1T+Ms|DE1qJ!lpT1w7niA)fb01Y$Cha4qL9`-dwh>ISJZg(IaW2SWgeRqmexI^)u zgW@q@(OTg7mne{NFJ7p7p9=HSruK4d86}V!)&(BFJz7KsKta{3!(IAbqGMoY6d;G) zjqKk@_fz5`7N8}Jn>xwqPy@wY*3nJ1CwpABvcGy3_m`gJ8JzK|3ifb*(+_z z8MC+;KK>uuETjf0 z{6CPQ+W+N_|F1@DLBg?8CI5pnZ&?=SGSl^;{|8(46v2Hh(vUp}`~ zDjQ4we|g_C!`ki~c=UB{#}|bOL1yaY{*R*iGv^(l{0RMR_iBb^mi>dg2YlE`8vr1| zj5f-$CT9ANs)B5MeF?T6>*-HZ+ka^U4saEp^3GV^rA9%2;n$R2psK8M`}#QJ5SlvC z|ADSwcK+kBUu5=`(=H}X;f(KgOvioe&Xm8!!5d&@ymfc7^6WduhtzVOT}$n6UsO~C zc4iw0U%bu{j~qk{D>ZK}YQ~ z54Qpjnv^0T6xsr=16yiJOJB^YvkJSC{I2~!4Fmp>6;Qt>Ws3x_Q`sGz=31$a#Xfs5 zcHRR)MK^Y^%W>x|d1o*W5J6N}&v6-7bzaW&`!-+`}jR z?R5{f^uucISMdV;=k}yw0#k0o^qFCbVpOL5*)#Sr7A8K1*6K*RqeJ zuY*i}1z!|;1vHfv#_*ckl?TJfiJET(I9MZI7W@uZuTMwbQYKLAwEPOJDup0H77ap& z;SBCi|B&c(A$rLY7Hyyl@Wmex#F@UxX))~a_j^s>Q4;VoSoenJ+ne0fgZQVXJbFT4 z3doUzkrK9dHQnS_BN+O^UkZ@K-=PHpD_a_}`Nn-D)AFP(f$+enxZpZ~IQ@HeK6J^z zqri{6cq^TZtcL}@+LSNiyqUb57Fr=RY`{p7ce>kwN@rVO1##lD&vuGlEJ2UiH``l^ z4&K1A%bw2bZ5W0jss={)C$C9TR1NKUQ-;lJ6zPrCjUIG4UjSr5UQymo(Jnob% zRCtcqQQ$|WS?xxb$fb2ma5KeIJ4UVEOU_!RDsL63XKnP+__=#a?BnCL9?}*POz~Hr zI~n7Sf)lD0@2}SIx{+%ND+i#-dyaNI@h%x)VD^U?@wpkJ=X~EDreNGgaj~L}uRJ0V?Z4>e`w;y}osO%>&D{5;f ztAo)Y^riW?h985^VFY1-K+y0#uy}fU87AzjKOqBddR<^4uGqfIVxSZyUpM%z`>WuZ zn`gXR85t4*(BPN)Vk+-8_Aa~;%*TER#?CscWFn+mPEXqG5Dy5DMjSCw+U(5 z6@)r2iiZ3|SbK^F1unV<^Rx9qe-h;U`S5K@$(vm(6=jx!?Y$Le61#X;#Z>v{b~GM; zhI@Ing6f2M4`;$M20g#xat@;hm&5PIlfCPvSfPh5KNbJ8?8imka{=*(2eH-ck7|Fu z*1p?iraFtuM{@V`bX^eFl$+9`vF&6doA3$`m@#Lz#FE?Joh!M^E{5Gne4qFCcN_A> zxmW+0?-w6|;wO}|Fw3*{JFWkDO@l83j&tBB4`WK&Pw-(wUfSz3}P{H}*R3M~8ye(< zlG(_LOk6&<=ve;Y#XGpOPEv4g?(r17*gM-kzKDbkT4<-obD>tz^?QG@-sr%4=laPb zk4s_Px+&79Aiq>sZSG#Ik@s-vl?x@oaGY{3Ga&QW!qPZ0^?==c13YdXC*}bm5rJ#g zfaaJ8-zb!NI1c~Mq8mJi*3F9y!?p{(TiJ zEW#Ws%P8CQdqUt21?3W^jQe6EFV{`OBjo{x+wHnnDVrB9FRxWpzbYo~shYe3Ljpk0 z)Z8?oo+7_v6}Mxxa#S9PtwEWb*)}fMGd5|0egjEz6vt^5DaGY+k)vbu>SM+owI#3= zFC%Q~uQt_u=&1EZ0!cUl67bRZ!#IV6?7>%DGq7ZS;yMyaW8x%=^o z%H~+uZBDU1VpZBL{rh@pKZlBrhQP+-25|Iu)uV&Wve)<=?ft^=4E2nanF{3Crp4%h zJX(?p4Vf39OO(&w&=yXi)rmjf$1I=*xiZgs6p<^$#ubs@w(-X2a^>I3R4AYxev)Rty^!9)TA%_L{$l5;xw#E8jn*_N&jjdOb06r`4HTwxM=#?`kN zdVp<;YIaE$@s!g^CETHJlF4DrJACV%hR+W`rxQv) zYVnfH0Ty!{?Ru8K1nX@}!OZNeC!~3K1&@+Q<7{&iN{_n~c(Be@)kP@zXQe*$Qhm2L zdc2a_))^0n%EPQbOD`!mUViKxsFX@0drBnWdEoJHl0Dj_EeqK75*shwS-=xN%>NPU zEYpPX^=;frUU*YteB9T^kEe@wueEWflZX`eSW@(d%M8T(F@U((E3U*G>j zgj+?@$A+S!f!ohKwzhTZDwqrL*CePhk8T@tPCnkO;*__a?=p{AqmV_Y>rV$!!NpIw zNi&%-)dTh>4&_AJzUe=4!&XA`l4^hdx)9tT9*aVP_Y?$9N2Dw|9rj<|&h!P0F3^9? z4^o?c@g7;$j^<^VIe8RGmW4GXc}e9`#H8%_LW3)F!{yFEQVutEjU`nAP8Nn_bzjI_ zJG+#%Z<4p85 z=%1nW9dZtOyIc|$?q~r@sOXLIv1Y%kPA$(Zi4CM1*~?t^?>(MZ41Gs-;wY_gvn*-k zP&vPe&eb;ulvI3U^(GVg_{he$qLtw4C$H@3=P(&-{r6OFjv5K`krjXG1!Gc_A5Y1- z%Ki7Cx$Ac4dlx`g(a#&(HWz0Q7h zIq`i?uKV)ke9N`okgflg%i>Hoi|U1wA7|co;pNS2v8^085=#m~l`O5gu;`X}!3j?# zr|5GB%tSFJDhhk0Fq$X>N|@M9wsM>X5Yk{86;z>yJI=<+(jVN(%+@N~_U5$cH$##> zX(BF*eL*06tKXHRO4<@?FG*d8IKw9}@W(q(k5jU+HmH0;C6B)URu%^5l>3BZHj_Uv zW{2n3xa`;-U?|PJqJpWPAiubiYTS`=yq=i2wL|)I+BXO%dUlApy@`05%U-2bltLZg z27^=7Fd+gxCKpgw2huwBSEK05ng z;-A*mVc%QH=SZ?!B1E8x6ym2*XEJdANlB*Bz=pzpV+MTrJ8f%WiJHJ#?dHCy`wD8WSefXBoU6>S$p?z>PWG3 zy#b-yIh6POsn=5(ygFs2bUNMQk(qnT&rnzA(_3a2Y?0ez*rP<>s3a7gOOri09e=dk zgWziOJp6nHj6_}@g&LKQ%Z&7TGmS`H0K9a-O)$}`sHiEpn#HjP`wJ$K_0O12SM)6SNDU3YK|`d|PJ@k46`kl0lbSz-sC` z2n^{iVo^5f1&(PH1TQm)69%Fij&D>{Bu?j{vNd`NRgi ztwV9$ckCrJ}nATBvxuM3pETPylc zOXfy2zI58eRF`?a=OQLa0nH}^3aMbjjoBADOor7=r7m17e&$)a(VZV~W**3dP_KLj zZ{m?(-50@iRc|gCVzixcodY0pvD(+8Gd@gT=7tiNow7I4_N4g{sR#1-AKcrT;BZ*m z18Q5>c-)CcS6{vz933>Ghunz?5YBazqNZbwvQ`wb77;Ax*2 zv}cm>6@c|KpgV!79Up(0_Ib(+pH~nq71{Rrg<~o5rNo~mA2dcGLKtsBao&qV5rf=< zn}$bNx}SzeQ^S(R+4e!5oF=}aM%QQW|H}%M$Mlun5JP)X4D372ck-baTC*@H<3;(Z z={YLGp~M9b6-6PUW`w^UXSz`Hef$2lul=L!c)JJ%am^A69ZrJ30_6u_yR%KTzXcu& z6@?}X#|@$Vn_&fctRpJckU2O z&$dlTzc6H|p6K{6M)FW&l#nBO?VmB*y_h^fP&&qN6h zrq}+Xj&68w`*Q7!tp2e!x@LI8Ll1`C?kE>~HEszH)Oyk8;~XMqw?u%a8^7b|>9cWo zbi|^NJUcs^zKL_2-rE~kd)f&ZiKYMT)MvBjq@&(nVq|lRb-Cel8O-0$5J$R8ob^gO zJKxk382E?2sH7xLeRtux=EXr4TCn}zY_?W}Hg&-JA2(*w+bXFE98cxHQ<<twmR^jj#TBzLCNMmtfB=S3=LZu(7}4r`v~H9_z3g zpG8kc;K1T-9TK_wcX%ael-1hj)-97^!KcdasFhZGJTbAc=X0axmm;rCQ{12yu}iMQ z7jua4jt+M!E9lXG=8BqJLTa0JyagxX$r$3jeu{r1fABtj+O-MQz$@_yhc4f*Ap~ta zdcY7bvw<}IKzup1H@2LTNb4_!^V1>X_Ch@w!nC=9<;(JLS+6v9S#>Vz@4!KR=p)Zgq3> zx~y(it`s8j==T;e4HIvIvO#THT3Wh3l*Gz`grV4@mTRzx9P$O^n=uoJ?fMiAjGvWR9XU z+*2a%-8>NFAhyV|X@0Gd*y=a%w+d@6t81p9*0Y-;E017ag(#ADm|$OP#oQ4bVx{%{ zK;a+}?*AcG@AhuPs7{f$mjLowrZAHCbs71}oTqsAxrrQW7ci&Q?qD`@*Y>h_S6ZlL zMW8l7^=O$gT#~Fq*qq|v4@-sY&TFKiwm=Qup|V9=L8yo-*hNKjdrgc6h}!3_JZt%=(A5*6qbX_N4`d)XJi?;+siL6~w}Y;op-W6fPg|)P;zaEYF3R@1AB7!M zmf!*N^@)sb$X~o7t`8oWlSB{(m!$gw!e0_N2FDRhfnrk=|9&$sr@&NOVSa$31i51PfIf$W|vzOtQSn4Pw8Y zGKcrnA+G28>A4z_n(cL=O;Atg_=9Sw!psbfVfGGy;Z%gG%jke?i>XxaghviQo5e@c`@`)_2?Yd_4&1Sz)ws zE|21mpsoB~;SpZo4L^1;)mW!i;z7>U?WhjEfmQx(vbBZDjSgvq2hJ2uzCk|Dk*GWOq{{+i=07iRGtAUCJ@b(5dKB3?xo7lHCuA5)lEqw1rS3xWZ*6(|QPZS$mdCF4 zqm92#+`33L1P?bQMF&3d}GhXcm6 zo~Mv&DV#U$oG<7hSJpWW5+ooif@;4CezY7_yuEG)L`8`|UN_!JG@fDF9M4EXE9OXT zjk0jni`SpcEN}H+yVR6}nti{=YdjDg#ii{J_huelmIeABash9o%|W+Fmv};rJ+6+O z#-ABiIjD_D=F1bj;;553_!p$gfc=Mc!WO1P+f$M@Hqb_elr0wjgd?ApP}_u~559&V zH|2xB|JEa+gO=aJ`nF8b#p`#hP^zQu&!>z#Un0}zQKzG4>G(@GBERr5L7|-P+dDx| zpcx-`Jkl!6=k>FPa^1ujKhlL3_hT{~JN5U|v$DTJ)N>wo?r9dIBP`y$Ytz2*lbLH1 zrvLDXy5(stiNO^)Ks7c?o@V$Y5U_rLKymh$Bpl;jQE_4?^ba*0Bt)IO;70>@y1i4v zuJA9R9G(&`mM8Gni=d`gd^p zP!yCyQ)-4mXa7ccHhG7VlUq~h?gM{Q&MyIc;UwVM7-J)w2TuMn87H^2oeM-~K`2T0 z*xf9zz5zf}Yt)&%K z=Uwmp$!#Z2fYpsyFLEOm~S`@q^TQr4J{~xQm93GJK0cYGS z)5SrrKuFKr_Kb5}A{S0;tC&jp@DOXs^S6#9BZZ|<{@%_6w+9xjD|~*55KPN?@S)m- zMaJ493w?SAWEPI_*d=p!Vj0`c?e+&g4xi#ct&#Z4^UQ0%da2-UbS9vB5O+KjN^7$- z%3!jgLATJjJ%pY5td>Pk>*w}5&*4xy;LM76cC&9S_z2MXjwBzuMRv|deJv6;LC?m8 zC6g2F{1EP7e96Ci(eH!ZPhi;-e*VIp6cQDG-oD(1{FOc7KLzvR`ZoL4GE$P! zuv?nBbwntp#&`g&I1CyGQAa*zgG~R2^TRB$8y5YV7+tE2l3hYfT6w!0KRIW586bD9 z@b*rkwh7wP^%ku*Ua3$cs~pp};}uoD2J2Y9dAl0K;_eeEe2T0!)zonBm^K(BN3LD^ zzOG@c7?#>KbiIjAGH7JyoOZH6Z>|Ox8^}OOSPJWww{s(nm!16e`fQdsJ+V8)xF-6u z4=#V{1@)=g97(4x#-JlN8KX+*TTp^BITr2W#pQxlr|{`If0S)?$Qw=^UNrO3TbRbh z$2mWEvPE4QVX*OceFRWg1gpDn4aZB2q#%T?$Y;=O& z{y_&GD#i)e3-N>Y8bN}GbstNgxt}}!ABKpaPAm^kL5`l?Uj)3)#Mc~xZf_^Y6W6o-{DPY6e^E<%gOZ^DC@iL3;{tk!TpU!RbPue~;w1O|DtcpFv z*IaY4BGtUT7fxmF;9*18Hs&aET+Bspc+#AjY0~li0m=t=;!Kkz+y76bDZa#lFa)T! zp1*ZOl0wn2Do(X|!~ut$@UoPmsc*tK?*4$HMUz2CwOf4tJ=~XID)Et|leb^V{~=&s znhh@>lyrGsq^f;bglD9)WC1AWRj)clhY(bVPNwHZP29yi?c;@OOtw9ckCM!i@{uWw ziql#(y)nS-=bl;*b}M8+NxR%ARdpG;v~NhKQ_5uj;Fo@7=b*N&mak7;+1ZJci|!e$0o68QEFTAE4kU%2`#MMW4kR=F$XeMQZ)(75kLtOE z`g&iH9%1h}8n^@|-7CLWwTM~qOW)0qr7=VoHb>$m!wP!-2`WaO2w#p7_-W*x<4P&6 ztZU)anjOMaAj?50FAyS){k+NDZu!5v*CyNI!4<7ys#=n!RJ8IG;splyf8%a3cc5%yz? zFF{m!JHy=ErY7-E_HpTiZ+isXcpqO{^}AWqZ{qE38pJe$XU7xr7b}BVT~lUlOirC~ z(}i+tnuoVPv!3`M)yFVrmeBlb1`9pGi2JU<$%#etJ=s&JSJKN7y*JO?eW{cN{inNG zs;zh6b*g}E-B4Chf^_$IeMrpF(sJ$oNh+UqURJTHbU_Nn-+Sbx;TEqlq$_;Z6EC^= zV_7gTLg@eg3vhS}NQ=V3gkzP_FVz#U494egom$Qfe*>OTe!f*O!lFobb%>HCcygdf z5Rkc0Bj12d-RHo3Za+36S z%{za6%*fj)thLG!Tv4F4CtYx-FIL`c^46^%Qr=U+jMObNr#8of^eGu|=L z$_1ltOy5%Ua%vg^X6D2@P82P_g{P|;S`h;(qThocH8l!_TPG^7dDCe1N^?jRMvT=p zHK-_$w;sOMQPR1>SY zR2m1V(|EXRQQ@zwa0|Uy$*Je)3+lG`mL|E%@OijSJSDa0cT>~gY}qUE!o+$YPHcLM z{Nri&pRz;^`3gL9;!Nh^LvFRBCkIPu%s8esb)u~vHMZ0{Vv^{yIT-T^Aoh=h_?b4> zmTF&O=T8r-*x|q6P!2mfj6TpP7Ws-3Lq#!aC;bN?j)MRd;U16JZ@z(!Ei}?`wSL;q?7n4^z_ji$ti8G+InVE{msHRHK5bV{Yo|RHbwo{07)`uI4D5~l+Xcfie0jR1#&gZ6AhQXXTR{h-vr~dr;GR#?q^M~z zgqXZwC4W&T!nC9ymgaZd>`+Sw;Dg(?q@QzgUM(4L*_WUH10|)#e>PaOv_VjL#bjAv zK_u^YyBQ@*r9^DqUn-ZL_G&V3y*GVyV{!jFnv>^la5EIDK*D|G6in8uR~b2U`c#T& zkZjFa0~R$Zm#unY7dy7Pql1;b(&VD88GEuwVx))ch1>pmiq(mlHWp$}^Y%eI zUhTuZXFxBkN(Eluf;( z?oSu-ZgjCwnlV5TTH4<&daMaR5zpYv^^AiLu0#?#oeY$2psNgn@s>uDNzkXdNiQ9> z5Ut%l@Sq>0_b$0|(v;vcY;(?n-qi;)3mnt~a7{LAFbbXRpZd6GO_vx!XlE&82MzBr z#`G#FtaLRDi7*V=`N|gDoV3+m8f5$fz-!>)5NETLQ)~0%NRp$f@$aN=Qt?|ki{vO% z{P3N}mwEXYng$RX<_)Y{ef^Yww9Leiw_WT>X3tb$~z;xegU$YKy$@k&rz5`?GzI7MY% zV<${uwv^+2DIyjJjax$F_~N8pGNjBQ8(94J?*oeM>pJ)({X7qVe*04cDX>x}U^*Q* z-$AD|G$dIrW2ElGSKghY350*S196FuQV47S=BGuZey!%P9I>9o`$;k7t~Ju5d@4VQ z@pB9@221vyu(>AP7;oVSB)cjOcqCmhl|L1}W+VwA&Z%S_nY<(|{oCY0tG#)STT%VH zoWs5fR!o`=8CNGmbT0XSpPrK4dXlw;*^G`a$#o1XyJU+xzh0?iNa%O>{Gl$mLP@>q zA(IgeHgYruGo$i^jm6@INJ2nc)8GD+efl#wL?6ev^Kd)5JT+G7if~89|_i8e$U*l zqs7Z>zP_=_FyY9?GlXoi0dqYfLAs;)S z8eYq(q&vUb-fB_j#0yFf%bMEH{1}{#{qKWmqoSO0JS{4zAz%?qUi88>6RvNM2*2V> zZXL3rx0O`_PVG?a0Zcp}E{;vzHxMy|lXQha`;$Vr@X>y28JLs2M$RhtQ9XS`-{b{m zYW8W5-$Ty`cHKK~qcvilI{~QWeg6p~Ts#Str%7@lk{v0;Ls|m*>c5#E+_Hom`RfCnSk zrVEIJ@TjIHkZokBzVKw}$7-6wE?fitrW*|ZJo=+4U7d2d!|RsLEwR9Ga2Ur70bO3% z-~D?b+te-Ix~mw z6#Ll)&5=+hh?^hFA)Gwv+7gu(h6=(c(CwKKcD{0$MKAy7iLvcG4RA=48D#1`Dhiwey~!=$Cj;9stgCX(KxQbo>G;@0t)=E^^3x)cND zhF{Q%qE&@cgRPP^qzn3{Ujb<8BMvsca+O<;Pfvv8lYqED@mLI9B)s5da#QYBk*P>l z_w1NlZDRwD(7CaZos@V~X^-i;{ajD9M(4VWFz6?(I19O2$5Ox8jYG7zUlxq7u0|I^ z8a(sk=Z|n2tE5K$ur+Drm{$cWSNGyx00@WcU(P>nz#M6TRPk{_%|7I@9sH!iT8 zX{O#9OA~~bQ;5ss#z%m2)i?^*-t~m?qBAfu_jp234XmspvuC>U#(+^hDB}C7Px=3Hc5 zv2*P4eGjj^09OU76fzq;U z2&1V&DATBn@Jpf@*H{;F**TW6Yck6zOqBszJRGA!f?B1NRFnV(bmbt&#=t|#otN0M z6DRvBj3%Vr=y;gL?$O^X(9-(^cke0cHvO{?3htSciv~!tRyQ`iDGO{jsMkzXHH!9g z=jX#L*~@hbblt{e$a@?VphSI6zxiLwIx-}eG<+1zrjN5yWYWsq3l^rIw1ir6RoN0? zGN?~sCVes5Q5Y^g%@BA9Zrs$#E>eD&c^>j2vx0tBD|U|%0_*!924-hk+)wZh?6N&= z2*`}NU!g4XqzK2;d9QRcCKZA8Ljr#k%jaol9YKOSs6l-Nnm~gr!(yJ0tW|Zo{-hXN zG>p_Y6ZAgXJy}Qo28HsnepjbId1_NbO-TwdXI)5VlG(5h?fdhNy(X=Q7*$AdVfKKq za`(wSijb`JDQ2le9!;vSBB4BJ_R)*DV0?6fQIq0Lb|6pt?`W+ug<3K`%6B@S$S{uz zcq@`;XXmr>xhIz1iO07)k?$qJEY0uz=K2|{j@R$l8G)OQMDokevc@af3RSVtUkanh zJ~s))ToDEYn0pPEjCsY8<@>8djrWv(e@Dz_?1drA_Z8n(9sSxcw+joTQ7w;z%j6N?QtGt)e&Xn=Ih0{;7Q;&4pr`PZtK@0dUXO?=`?VZfF?jmx z3wBpqC6wUAHa@sqFL8hC`E3mJ#&^Df!|5HVvSCJtjrmcb5B72xz52~=K0_+{XJyYUYXzYpzM zh}U2YH9O%b8;>7B>~5~#ehd3>4I#TCl9-Em;Bi*i$4|%<9PTRxDNnW1G-zfF@2o?AQIyZK&A3mX^0reUK zH^5+=AM!A|8!qxeDX%7A#p0kE@Z?hU)Q1wGTM!+bZ>7hZ(G{6WwQEB8>W1P{6Fzmp zmDnUvla&HBBHPH@t-toLg)PB!S`^~8+)olc2z50I^nP#qA?&IS+Dp4SfB$u=v>}xr z8&&yU(NA+W?NcX3m@Jvpnfq+ej26}nNAM}m>E2fx}-yh0p%L8}(56UdZo4udl? zR_^u^W4vp<%FaiwE6do^NWZ@7LyH^xGc&vmAK1LvM?6_QZP`7A_?@ym6?B$`3$CBu zuLx0{Y9TJQ^bpG{6N17~@SBj~41wpt{UBfovL2Hwt^3+%etw=+Hr}&!_~-~&tBKvo zYHnjAmSiH8N6_5d?4B;co&tCuxpx1}%ggH>=4(||RdWlAb`UmG$=|Il`tp5($Zj0m zd}}UgWR2ehC^apO4GU$U>NiuQBp@)m<*qMagfd3Y!<}O7#%GK%4nR$ew#CgP?Ku_t zA})#|Y;5M)^R~vMJGU`sBW-5)!-30ue1+zfbWdPuCh5vUUwp*_i~^CQ5}&lJtJ!us z9bNZK2cb`p8Oz`s@0U>D?JjAn0zia0I-RdV^t^c2>_5)9c$}=ZD}P=PqdL_=BS7GY zQk^n8l7%8}A`!EGr*ZAySehD7RSLEg<(84M03X@L2<>b+ALb42tXSDW4?Nj7@$w>t z#JO+(aucPZeid#9LkF~p)k?w5*^<0;`iqZM2*5^toLhZ^fzT zy)=|OT$0cGN7Y>QKD3l4B70R5lUhV1383xu>|6TJ6C5YSn(3GGZY$kNxvbedHM-pQ z*YN2h+dCK1sMQ(7=Bcp@=N zMQ2~0Eid>(9kYTri$pi5Yi4A}4(?}N$p;eAQ%T~EjkLJR_k-+|Ysp`&G2}lT@}EL) zF4Y(l8$2%=5!YUbo={3(_nW7I`OF|KpnSoc?_J`4?YzE>-DIQWv@THZ19S7&ZIks?}JTF!26Myd2w z07Yio(_MK1vjO?joNEaS(r&Z$2-oc>?mQaz$#=rZ(uUiVCDxax zQx#KIdM&tpJ}Nu8^^Q(&+rZ*%^(<{~<0+};7_=+ac6ahu7xXGqp19u2O*TuJop#@s zq3okNuw`~URD{(JV^5Gh?)Ch+@5cW2OFcX98mnM|W_Awyi2n!uSMTt)t&c2zO^x!L zh&~CwYvMdff#YZM;E>EGPuWPb${KLDGq8)7w)KZA?0fUM_3?CUZ?(1A4w+8|_dSWO zfF!=J(dG5^#LP@+Hp9cy@$i zXD3kGl5Zh7{uFO(vw${>2rVzZ4-%q89wPv%B9^qt!d$|D83$Mc4;w5w>j*H-IQ;Bv zvPea<|JPJh?CHL6t?xC$;`O_=VBd7%%AoPh%Es-Zt3rLj06)XV%zyGo^iB0^PnX$D zhUd0P``tuE+t#0s?T*cg3G%B@znf5oh>nMjSNuzdgUCj02=cUl56!=k^I>Px6sOP=F~UC z_Lqh%LOLCl5rguJt9OGG;cH*C4<=`4i$22Dh_2dxORHmN7+NYT5902 zozvTGZgr+NU_XllW0&Ms&mV2BDMN`S({G8A>YT;rWsg>x2XdCMq?K^DI5 zBUG-ecUHcldOd{L=bk30Oc7Aff3alsu)tg0^1P7q&$Q=eZW3oG$P7GT^}DW0x~e2p z(>JE+6ZJ7Od$mRmeVTToC*0m6C{1tC1gMo8N49o`fxC46Ir+;!Coy4Whg3r``kA-d z2Nr_7dpi*`!Sy}R~$4Xmg`ElhUsM7#1G%a zYw57Ha54F?{zKyZ{?^R>!sEDPZ$uz**sJfyP3$io#wZ{0jf{+3szp?Co6dbIDj0mje+!OPVVA-yC`R{$0 z{gKhd6*m|oA8+S4*6}i12t_u@!6aO!PU>@Zd~d<6yq{Zqe7K6K8y)m(PpDq7q_=&5 zsR)gJh-6J2evR7g&J+8~>VWTBZgX1P3Js%uptvj^>RPhc1M@jEoHIVexW_lN z^IBi{(3hRq&QCBAKq$>MIyWUmQ>;|Rgl9?^X`6_;ls;WB65w%2I4JRi)r;5Fe)Eo#t z4P1VZI6l{lu8A+3mc+?e3*El2EFqp|Pkj6Y^aNf$24jE|nZCRw(hQMza=ie<6?Ry6 zVf!9kq-kHh)=)!$m5r6r6(NoGy97b`fm#<~bv2_uqB48^k7?h~{^aut;U3Nq)zj2} zIDbu_Pr-)xNnhV4-FQUvnWS$^wW&!f_KVjJ9BtYCp2p{8Z<$%q9M1BFty+(RA}IbT z{u9`>^!t)?>Qv3(pB;uC`AY+md%rb?Vkr=QP9+_Lh9avnD@DAqYfBZs=bnPhK9K6+ zF9AQn3ISyOw6ma%^68A_nPBAnBRM(UJ1oVb7gd3++(Wb59JzAotJc;;eGU#jl*gwV zwTDvC*;yMXmzprmC84(rg~hL`0!hNy+|QVN)I#C>$w_tS#i-?`<8D`sZGMqj*9abg3aHU%0om_c%cwOy`*tJUf!1x`{(l45MELKvM_<2c(N`}mhTPCgpE#)L#W0E z=7y=nK1nS?aY8jc+JfJsD}FK7(2%RvaX_cbnXDp47V#7wA7qM>#sURDDLY5W~z(_>%CF4p}?pQMo8-S!u32&CmmsHSC9Xc!etg}y?KwBOPINV`mnZggPI z9gh+H)4|A7lyoxiloxT%8!T+_Q!1+by}8Wy-Q5wm?h3%PE>7yNVgjD{pRVNW**QAL z031Wu=eS*!Nx`=|Fl^LU20U84kUD3DE&A)W2KkoSV63=%h{F zJ+#hZOD?ediieE!ImIns^end2iRNw-9RDO=zk^|XTT&+Ga|(wme2)=mZ10i^ODeb3 zBqcx{1V6&rxdk$eg;dS$c*O;k1+T)nYR2;{QBm-lw6u2ith*svQA@-Ub|hQt-(4@7 zHTI@3%il!cXB8B{HZS-l%7;e1C|Tdlu5TbHw~t)?qv6t_#3C?-sG|gF3HdkTk0gZk zM}$_ytNoiergobt?qJ!f&th)HM9wklAb)C3cJmN(m+3|qv-bQ z*Fk72J&ulAawtm{hSQmcpEI(syifEAUkDe17`H{WQl7!8dzW;u~0T z_aGL~mGOL=!NJbnq4zkY=WumEtX&|Nca&wtIAs{1R4d2&v3vASvbQm$L#Q(|g7yt7 zAXK<0f~ka|NoW*WG|2>7>MD6ZJD`doP6#lhuVPxerZz=CY#2dne(iMYeM1cD{EQ!+&CK%|7t5iLp|nBkMBHm@aNyK)T=Kh2#Os_DF`Amsgr3pu_99RRTcZvB?#zhu)y&TS zxC1~(P`6bC1UJ;4GApSh`rTGM5v6Y{GA1&-yE3x}fk4RJmhZ<6nHJ4l6AE&2a#9bl zPg!2F{&EG8&=G`_<<9|=kvB;x_7u?xkoAJ8ogMDTIXYU9XpSNP!n2rOT->$cfYte@ z&SvkDr(OT#izV=(KNfIdPa7nHEjeBiZ}^`qv4>HF&zz|66Cly1fhUn?K8J9;*6jZX zDzX%G0ICokst}kRUlK{92ihCc#3E^=nd?RRI{&dc;NgP8|FME;2?LON0?Cq~e}(bi zD79J|74;Hp*VoC#sT|(A8$sUL>$0pX39O}PDgE3Exddm#L0AogJrcSY|m?A0uThkhEPVow2pss-t^Z$K2MMm*f725iAt*6q(5l994 x;d$iw5#h7j>Ua0%`HZ(P9LZCAbJ+EaYZ2J|GMg?Fglr@K0wN|SS}v>~_&@LzN1^}# diff --git a/docs/dli/umn/en-us_image_0000001427744557.png b/docs/dli/umn/en-us_image_0000001427744557.png deleted file mode 100644 index 19ff953ab5186e43e70edf778f5c4bba37cc0efe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35291 zcma%jbyOV9w{0KnE-5FqTcPF?zy!pQ0z4yKK*8AhF zH8Wj3tGeq{b@e%C@7mpwDoWBw@c8hbK7B%xm61^U^a+v~Tuy+62LG~PZQX!3a85G1 zuAe?35B&3mNM%4K0RM^RCaL45?r7=eY3gF}N!`K5$<)=&!VqV2{nIDXPqGrC8eTc4 zow81Z!&!$32&FsdpKQAVOPY{|s^Cp@Wr& z+Sb_RoFTjCDIp=yn2~2^v5}3p9&O~D6uJS#k0nn^_C&b7JUlev@Np<~0yvcviSkz( z3=XvDVVrZ~^T3bS8=0%8GucS7d=*+u_(A!gdvSta{}e;TYK>EzRR1kq&HM_9{3w;?~lu)#SE=OsT7}SXMCXvu*-4{OO1mDt)2IWO}17;kh z(wm?(TFfXCbh7(JY|cbwYZP!xK;?SH7H2CT;5|{FWD~EcTYsd;r3?J0`64>ZO(8Ks zXF+FDqREQaSPtJ4Vk9jU4df6nc2%GjsHL}i zz{zwgNuF>&F>`Lfz2Og)rZ)XYDTRNp*n%`N~bo&abLN6$z zeeBf$de!t(Qd70~t)~QPBjF84GGkSyOuwSGCr|G5r?8UXKch9{9NdXToY^}iljvY4%P@{P>|WSbilP4{$MYf>7aTG*=C8AuZG{rLYA!OvYv~y zM%e@!gBYeIWh}h}o5U1n(R3csI|+}KwzxG;N*~!w67@5!{QNSr`oCiQA`p$s;guM% z2>MiY6~hTY$>%aEH*p-AoCEZS!9r#|e;pfg)1;V`YIr3hu&LjFS|_}HZJ;xgOZW(8?H1T(i=p2M~d>UEr zbRynGnEAOmdhJ@U9;k`F?%bjDS&=7`=s9K6pEYJ0dq6>W59oW-Kq4%+q)OG8L|Lyj zc$P3K%EgW%T$+bWCDXdWG z=z)c3qyBTR7eULQu>O)G62KZmfj+>h-Rk;AysyI(zAH?BLYJndOWzc?y8o#{hQ3vd z+gQT2#R(sJhQ7WnhccHTXf{^8p0=_^y%s9WvMz7J)P~=KYuJZbnp^u6I|o$^uB9cPw7Kg_d5)fzm-(UMRk0P z6OpSnL7a5v+B!-WWjg%A&F0RJ9PiuNy8OsQWPd$rOQXfEa%4*un_u7hd<;1IY8{xn15$|@`ep(4y@u4Z!X8VrG!sC7s5m!@~4%9X- z1U1UNUm?oN<6YN6E;_D}>&{Hapw_(YLB8G+e5ddauBPaWLW#@;}aZ2L} zaQCd%+_GOkS*4(twzAVcK6-i{d)*-IEU$Kan8bQXqjVawl_Ts{?p2s8_GZd!AE&#% zdRt?g${B1kZ@eyJp%Y85tB^Sjx3~J;(c7oOy#p^CS(_U|Sn9_n^1Yq1H2jG=?uIM` zzbE^-o(O;M`(@!P5kPgW2Ja~Kf&BKvQBZpM|A z*|-}c5bs{wn|dyxBWVg+kUF_k?drvZo0VLg zGt19=e^y3;r|=SWu_C zE`|@)Nb;K>-O3E8oc}_7pp}8wDnEnh@;-yt#}1yDW*|{I^wl(zM;in_y3$#VuJ~6? z+Af%oh7nNg&9=yEkjPh^y%zr)WJ&E9_0L3=AViL?1kt7=#MR2Wt~3j(%o4Mc$C2y(SbYs&rJMVc z(kZQ>);FgRUmiye$L5;_Wurpr>_gycJe|&q|3@=8$Pjn@A;$Tn%__IKjoVyG;=DM9 zM$#ge4H_)BNX!iwkR~~BHgkEs){JYhZ1D}u)SD;=tYxe^5PgBOQP;~km_-pG|C*Vn zZDhr&cF?(pp1Ff-0f_B)s?F2=#?>lkjaKt&uN>+#nOPz?yC{xw^ist|W34TE{t3&UEDc)Zh$*K=s-p|!>mwjc%1hid znlqaR2XL&9|3R8?uAJX=LVU3YO-w%C+v`C%Gf8xcu}&)$v1u6Yi7KGI@2?(ma|xH( zOcuyf$ct+04&%0U=-VL-6kzLI#ZHyBOWDYJyMWK{ z_V(80EgL0c&iB!A6C8AU(DC!dX&$4EyF~|1`i@=55RjkyQSb}}F&Euw>G zr6V2!+`Bm{AlXd368$*+*jq(RQqIu`TDQ@cQR^mvz}9V-3_ce!)c zJacH9#myF~#HEQ}`jmZB)A$v6Atk$TZ94jTG`X@#N@>6RPZ-)xW3dp?SJ}(7zH2OK z;VNrdpIi*r7)(Zyz@f7WLd4~CD`+!KICgdPuRry>4faV0ycu}iq0gI#IeEgSA-wLT zcMlMLM4?Dacz)U!gS77vW&Lzv; zIF$=y1$`aL7H>#Pdt?uia+bTxiGbL?ahmwI_7ylUf?;WJ62)zBznxi}4nF6Vuo4Vm zwp)8At+j^j28Dy{=cs~ek{1J>v5hy{KoeNK=@y>d)Pzc0 z8qBdIHFYR8YZ&s){?Xs44N}@kdF`M+{$_5WoR^=+a5U?ja&94P@wg`+hILHd?`pbz zEt${!6w%R`c;M{^6OJZr9#Zh^ZYh)bH)|tgL%&p41QJ#rw*IYF`5e8Mq*ddMf$gZP znEHGTV_2^fY`<-ly8%*+HA( zpp)wC^YgvGt2I~0ht1xOO=0a1nCZ&71M9mbuk)xEog(39BwJ^T%G+`qB~0d9QzI5!e}SI_ztmzR1Os-l<=D%xblqejD}`j&{*5&aq;H&=Wl; zH@nW~24#X?y_U<5 zm?i5^88!mu7*^3X_14Wcs;0{mq$b81F^N%p1(^Y*WwAspv~UefXR%>IofKQU(wE@X zli^*Lh0<()$dmV}^T%RdmM*{a>iyS6mSNXpLXc{e9esP@W*5tw9qum2B( zi8>5IX9i`5l`hq#X#9&_cDxGsZy(20EQiYHK(Y|`M(~*|A_yKmO^*Ra954f z&5!>2Q~VdcP(Zw^FZr)B_FOsnzo-L~1xfB7)4@Aly=bwXt*})80G59Y5~TefYkI#* z9@77(lB^0bxZ!`=_&<)MLp{AdSi^y{(Hf~_Oc=Kb=~vxhBhkTPFSF2cm zt;yN}VG!13u>-6=7}Y>p=|GnI&cIlLu%!0OMoBCYHu2X_1KY&;q+|(cOn`i84zR8z zshFAzvesker-PfKNqT){HZ$bL^QQY}sz#T*)O}3}Qn3!EpkT>bm0#t2SM5+02tacn zHL`M0TpV|Zrn$2-)+%06sZb1XshWr?O_!?MXb|htY>E1Kkjf?PXIvhyk}g@Mf-0>( z>^k^@V|3u+j#lDBriaR^szQt3auIUhlxV;UQT)GQM%`%0fZEMbBR9c(Q? zmM+WRDyH;&bRMzeY75)Ls7afuxJHJ8Y*Sa4L~T3|`;2&<$u-s_#XGGu=a>L28pZw+ zIf-jjT1~BNOj^`XwkJO~b@jAvt;|RmsavMPnBmnYOsFiZ1HQ&ZM}aSnAM4$Y>r<;} z@4*VoQ7PDEt>Ld%Ab`{10tNB7MFh#;p%KNK(#?12YLwE^6uZ8FSZN-S;-eGlQ?@Bi zqF}|UgacswdygXx7c)^*0{%xBiU`Th#v6>m%Mrs^7csd@p68}-d`{S(JX@JIIgs;=s%C#qyB8VmxMZPvqlVUV>E)97Skr*lzET z9H^+yCN%>d0^fr5<)h7Aen_;ZN3v`jHj`{xxw#}IiHJJR#ZS-l$N|AXf;#ZyCA-B} zz;vIPq~irk{9!1A$;%o=tHpbl-*}zqaPsitW*IX_C=jumtSnW2T)3qdKzzXK^80g5 zzG?a*F41#{G@d9YW*;l3ITNUmVA1Pi#V~BB^v@5rwpZ-t&5wx9K<9MndRf0|-i3Z+$6U96T;dI?#2{;jZ5sJTuwYVedHjM4MuoJQ|D6IKv+WX&-WD9%Y1 zTj=YQgi4SW$J*;=q)E&+5*4V9!S4b#cY5t2@B85hn`9?A6Ja^40=jG#SnVTA;MmnM z{NB$gAZdKy_*2An^B>tRo%|paXQw!WsT&S2oD z*Y*W^KTvxTv$zfd$N-MJV>O#QI)b7ymMsDTlR9d<2q+sAMER*r~O zS-T57#+Rd2FPPAiXv0gV8L!9o;UgmPz@}LA0l>HvTvFwZ0jI z5l?feW%cMsIh<bdA-Qv@aCtjA;kSV$%8%|ovh5=bDxCn zM5*!$C%Uf3K=(U-7C^NOwd*&@xx_MCZbM`e*oFeBhl1lb#n6fZQ0j6-Q zF5TtP0+MhC*W@yQxa3b>-(p~1^9H}1m2bj649PVL5An=hay=6<-QbO=nx6i_{h_gw zZ^OveNzK*4GHraAqw9%j+QvIEXXlunZQ8sA%wgaa2QyJ@1FW~KH`Cw*i||(VtLcjJ z&?O^^>svP3?fLBMb*c8{LFv=!3KucfH-ms(_UdVZ*Int^46Atb&be@D8=TM`>0wv@ z%HjqYkl^0M$N9zMx)q8H*0|OCzD_>g0Q#GO(1tiCVNbEI6MmAIPw;AUDR_o6^hFdmF%JM0K&`vUxWla7mgx3D(6?2G3cO}jWeu!M1@-wHm`0F-EfNzwU34P zd%gz#2x1VY&)&J3Kolo2-hJqPiP=8Zv|E=1ZAk_M>~pdO0xx*srVJt4(4G1D1+LOc zpDt%jJI>%Bkuhxa%nwOiJ)ejJpJ#my2=J*4*+&V;_|ebY1~D`a({;)+31nBaQkP*o zWe--5&aA~34MN&MF1%=z>3StcZo4ktkrJy@GrJkNpI`SG-gJ)Xs{P$Cm@vssAxw3% zE#XDIt9truQ)R3O%2-59ttC-U5LCvQRV(HCAV{je61?Sdx8BlQ0A;g5FqT-xlu}k! z0!E&3)5VOMv(P=|l}=|LFDWS;qE=@ zIldrw`UF#)InZZ`l5WUqJEk<&T4>S#@cMuQlNXXcuMShW4U)T${O!l_EhCJZhtHj$ zr`?xD+&tr9qBxAR(_a-FN>gP4r6=BI*%s6e0fV`<=;6T}|~A{cwDO1Zqm_Jenn0|5q#&aYr_5uSxN?jbG1B8YV(%{n85Sng&`50Uu9R+N{H4 z3NbCtEkXkn)EM#F5t4o(xsdT95`fa&_G;AxQnZ%OZyg>_>_6kEHeS{lcg>L70~3sm zItS`#CMJGJtR&4Be^Ha?#;LhSyTm?blk>aZa7?&@&#Z%)*eWaoZQ=ORS-9z$zr9N{G z@qX9c8Y1MIE)p0J`|#y;e|}a%5T-MwC`pb*@}8YOswSBbGC(kcaL&qljLwz4Wo{BU+YH2WVozClQj0-utHsZ*T4H zPbhzWgzxK*LkjrFVX+M%4qI?gkyLJCMl>E}!tphD$cQ#(nb=L5B6I;qSnp#bL$&86 z93ChWSbKj#A4yGvoW*WPplye?9}sGZv|3a0K0EjTzfKu3lI`h!!qnd6+i-WWTH$~@ z(8rgL$Lx=A0*({6v^3phuF2aF=Oai%9@d16G^DiHfgU1Q)Mlc`HwW7alX=QC&B6?2 zbsfN7v|S?4N47DVQ8(~HxKo~0e`nT$?2fj)xcW8;i{@Nd%lPa&3|ZzNI@lHLS-Ib;6@>bO+>q zXJ+a4dBe>kl2^NzTHz`QufBO7#z1_3Mdyg8o;nkjixywY;5uf7H+M(DL6-LBOx)eB z(%&Pj&{((P&6$FL6BBoo(35K)aGC78Y{qbb<=ifKo1&}Ud`VGWhh$)vS<=~ki$CAG zY1P%wqITM*H&yP}r*w0J$l5Nr7|?xI3i>l9XiycMDWgpyE%K2d;;%*}!V~LUl)U@` zU`vJDp%5=w>reePv|D2}5@v`=k4-aqH@)f_^DR6MLPZzlrW*-+WIXmNpL=q&3$<(; z)RO47{g5K7&Ck2OZRuYeRX8Ozw=0IhpKlHG*(~DwqVTYO7Q`-LxKa+;!E~*Bg#k%T zLLTmcyq1nq*yAw>MyJy~yItH9b`Pg_UA&_``~8K&?`DDsUY#MNt&L5al!NarnsaQ{in9^mNlmUwmvK=Rf>sg8pfa z@G6g^#SIK2%A8~VgtVmH2GhSO{PlUX$z zYf>^@pWwlhkDb(X_H9diQiSE~m(}n186h2XUR+p+bSO$rVxR5D+%H!qSM^7VUz1{H zeWG@fG;}#UrplYw`w40CLCPATZ3@T*-`BtI86Xz8WQekXhOZPDQyBRvtKm0qPKa;Y zk%McY4Sui_6a|OuOw82+h}o%J+giAvWM)#tnZ9r;QuWJqguUw$=kGX!;1I5^Sk=DL4@iQ-??}+geg?Al&EX+t# z2Wk(+S> zM@$@;Ws5#2YY-8qW{H2ZC`xU?w_FJzEod>^c0M6%gKp8{tre8URa~jPUUC@-PcDjC zboP3W$j>iHyS;V^*CTmDBW+3+zu;^Jq|Uyv5$dfkJSVw`FkrYw445+K@%QFaoHcmz zW@hP8dOULCKtuBidEqM4O*|@OT@C`SwQ*i9IsG294#-z93YkuOVW}>48TKT29J@Ud z->W_NJ9Viofs}DMJL9bsl&{2`P-p9J5}zFi+k%xbGYPVPW$_NKYJ*U3+EEKsjq+K~ z*3k^Y!p`BRHrK`7kr+Y$tp(_*zIyD%9h%9+1fOc}o@u)j{c(_ylR=xbrw z;O{(7&0&}Roxcf3&MQxmp(9}SskPVk2b2f_L+aNKuB+lF_lJg_H%BJu!0XTKX32H) zl#%8q@3F2tldByE_o|Nz4SM!=OVhTF6rDs(!enn%i%A48o+Y*IOzlL?HWUK)ql)79 z9S@zeYcE;K^Z=#@Mk*I+;WG!&P*a*~)9Pi>^$L@Sq`akrvijBQ(g_}I)x`C=_ADXa z6a1mis|EsHm*0nB7&bk@@XIRlO?2Qrb@-1sDIza1qII=&kD3~A2D!g`vGIm)xX9*K z_5Ng_=KrI@P|$Q!7=5GhYB_NOgFm+_{M=)rEqGr!*ya<2h zv_88&)87wWL#$t--rzCW2zJJm+lYV@irDK|2Cy0eO3@P)k(c{@0SPBD&VgLymm=8C zL1`vjjsIva&{ZBv=>LZu6#a*a{J$7M8RLeA`8WdG&nrS&Atr>mM z6&355f^;2T>GV9#nS$>1UK(F?&EhZG?H!6P>uhWM*Zv|G1^tYecw??hUKPd?VZK*K zd5@fjn0~w<9XXJjyNLL?a%;F+AE+k~^Z7DI(J=~RwW-#g000W#y!g=KK$L5F4dTSI zuF?|!M@yp(?5cv?dqHKcvJrYZGMvP&vg$eo%V+!o;U`>HYMg9Jx-S~My>Ha6%4!_6s<#LN zC5?i9_{vUkBE=8amx(=)0M~x3d92K0EGd2#da8U@RsK%?*YL?x_Z`NYMJ1O8*it1O z^%*NjqJU@%zHS;;Sjd0+`a*QMhAZ2m@#C6*Y5E1FP(Y<@%tz?8+BWmo^f}2E{!5eb znBsKZ{%L8>P_Mx4McU8#I{(sEZNeTSYQhdaUR;=p%gs)q7ZYE{KdWf%D60CtkoK!r z6K5b3WqE~Y`$gY`i>2LbyW&4tM(ggrl=$oJFGioWYNh+F^x8cEah+n;Mn#96q7?{L zc$?&?v1Q>U6G>I63Z&W2V*2N!8y2hT)!oAnrMXvbL?1hm3qNCo_JkRtt>eN_QrfK@SM4EDK+pLz&q6SW=iSUtb)A+c`IJbpwfBc)A>kn|PF46VKYSA1L%! z&(>EiJC)Wp4@zfzj@M`TA_$+cq5Zz+O)J%=0_I+CbWd$k>Pu|0{d1$7pRT3|W8jtu z)!^a!sxMNWE!UTKL>;_My1pge-y&ap!F4(9Qy+duFPhVX_VbO^%gU`@vYQ zN;uM9zVw-nU>7quVM5hF=$q&FIH*;tI)DmnU1C80GI^05-Eox_cMZM%MqmS7T%P&6 z=c={dLKEQgvV6iwr-&yp2;1`nBXq<;;C{tvs&XkG&68yriEL&jj_1MAoBUxW$JapG zPT5JGd^O?JZ{vY(po_sHt1TKrpD@-8c-S?mO&HO}nHcLeYL6wsEj3uM{La~V46Sx# za?;?&EdUDC%I*qH-{x$Cv?uIbqTMJB^#~!1H?p3cQlG(she<`DkPlBrOPpE8AO6x z%fYpK;fw`~g^4$GgkY@;AIY1JHmr%V7%R15Cja#?X}AQl?>eTPpVLM;2lprWFh@uke((5x1lMQL4bTTIvzjQkdv>N1 z>E|Uo*(m-hUf(F-Y0_Rj=vr^OkZx%ZBzT!GVp*ATy6N)N+r-=y@a!?_9cr?_sCZkR zF=)`5z)oyfMvjd2(6r+x;I`!#D}YPz_!cz9m~LNCcgMHl07x;BUuGYHn!vtuBd}@q zQRc~OPjhQ(R-XVZoCuwHrx)PaOx+FvV_#}67PCk-FzSRn!?!&g4(KvRaa1THvh-;NPp^`8(A-!wEk(xtQla{Meb!-^&=5AU? zJWpMk*&x&6Uiw`X8)mvI`dz_~Q^M-4)w-yA6TYR2%f33Hd@-sjJaU2V4-!j~;@g){ zbj}KA|F^FwUn@*}XR`5%E0z6^+BVE!?)*^E)#ttx{cLke@SxzK)Dbf?qp+Le>U8*+ zW5Y?JGEolsEhhezN=M65*D7I42Yw zq$$3>rSRzf=uBG|uKKz9ZeS-8VC6G$jpS#-KIXvi)Q^9ll11W?5NbDCd zY?_-C|0k~MMNeax)qE3G6KcfJXXd|E4xM=DOy55dVA;HIG$eXX2qfJVcOP+E>+KdD znEc9)*5FgjlhS zjlhVB7B;%Xrr6F2kt@m<08HQwPhkDk+mjO3s1Fr5#4}c%c9s9up+9R|Z?J85!OPI8s7v ztvXoXZTCh(*4uQ_nHWEW1cIJze+b*+j8KAj&_%of+V^XVfn^;cqjA z(6pobit7%8R+mSEG+3`I=3YyK?`^W2l&TSB&K{~;VZRq@iaW6DTrhovVegCme&G1A z_$+kd^LFD+?I0h?MAaxX5M~w#n?(Ty1|$0$mrxk$(WFdkO0vpUG0CM$J2zkO-#VbV zqEj`pY>Q{SzH;r<;wq)PQp_s`e=2r~ZC$o7aNt=tJUa?gqHZ6%oH0ATc&6XJ%1NV?Vz6elxX9t(?Oll9Ir=BrYCa zzE+vj&Vcg@xhW^5nC+d*!WV0FGK>Dub%W zOEgWX8l@U-r#BLN_T4yNnYd+SinZymqZ)=Wz#<FvJKVYmNin&y#ws)0kbHVWloEfrj?MmQj5JJ$8!*7=8CVy!gAICC|+ zvKyOMIiTFIP_5P`e%QJFR?1rJO{;-v-yl$lX|<1&$+i)GSs|@>&?0IOQcMPXqCvC7 zWE$m}g@vZ?xn6mK+S@aXh;}E(FbsJz{*<4d)IwiShBA!hX4mnCr+S(c8G+QsI>l;W z+zOMvD&A^94q!}GAPq3)1Yt3VfGUyk&(^UG4b{eU2T?%l@$2)G9Que1yC@gJWf(=maN6=id?BQ*`+n6f4k@G{!c^0_}m_5 zlLcC&loNmMIEdxXA68{fu!53Vm08=_XU*%g5hoHSfo(DT$slCgVz^PINv5W)*v_rD zuA#xfb16>*(Rsne6j|Le7vpT-;mwYRFL;63UYC)D>E^QcvsgZA`14C`9iN>%`mHyc z+fTJxv5Cqp4}32J2gcO`gL0LsObrE$(tgRQvtCZ7s8sk8vddP(j3@YcTdd~Q*b*B| z_%A<#Q5(x;Yjp-VzdycqJriWwgLi(ZmBlX%4Gm@F=I)NA0&j2K)GOv`Q{`~Of^D)e z;rY_ilLd(b6?ifG&aTYleY2;Q3_Iqm>h=yE4!Z1rSX4yVX!(JCq-QRUVaut*!{k{z zLV9trWLsO?R7h9tL=zJj=YVY}1%_51r^rb%t`32vS#wB?vja}Ju!<_bQ*MFLOY(-y_MRC=({0gJRj>*P^`GgB8?-n<)PSOa z`zuE*uC0Wl=1FF?Bd@Q5StZlc)4}$uDRNW?hljO{2=T-HFR;Xl>N zLQo+ApyBIl_J@njZ~j8knl)&!VG(#(X@@hFRW%$OJj$_<$c`z+=6M`6Zs@h2gaH_e zt}221Qg3B8$inpV@EjaBF^KVG#jzHdIwg|x<)k44$8Nz>zrpJWog5B_-roe9i?XHQ zBRl<AzFlCjlby+pM&h z@^iM-O%{ntrQW=P3YPAvI*dch^;Ttb^lRNO2?}ju)P_AL?qtQXRrIjeCH-A$ZBvw{XAr}_v)>>WT2SX)Mld8`h_yq(qb23b9Y$Ba#2d;L9 zGl>D_ZeAgi=MLzGxz;jZe}dcg5OjZ!yM6_=JBwhhEibW3PDpS>l|3;%jYoxf?|hR@ zmZQW|SXd}ScYJh22>`Ki35Di=C*<=<=?#W>4G?KHh^wu2wgVrAT;J6-`T9!OR^qz7 zLlrfMzKgL-sht$kZ>p3h7;3XXiyh6##Kivm$aXx>m@gBeZPfWyn>?5~UyCsjcu2Rx z&Ep_aq(Q4PRPeGyHYSfpZDq;lvyHj$l9`|1ZqA|G*4EA&_%4_z6Qb5SzBflXx(C0$ zzOKMj&C1Ci*zXU&@ z*y&o?A8%*34{mJtCK(#^l2%qUG|GAAOzl#vs6`s|VDXB$ODkbG%7g3uI3D~Ux@VX} z@`PpN;LiB?Bs&L(xUw=%qy!*3Ni8O)$_Qxgj*_(Bna}+4Z}Mv21;SXLPdHN&M(vA z$omVCU-}isC+c_sC7n!Co>o7uBKg>t-IWk;>(h*_-2Bjib-b>Bp}>A}aFB}qn9{Tx zKoK7Qr>QFrs&1KamV;w_avU8Vn$PtfF&vqw&*>EzztvZR#H_nz_d=g*YMvQkpS(~FDpthkie(bEeHL)ymv zgEKSe;0RS&U0v7F5R;PByZck4LNmeXbcqeV388sWk{5OF4&Y&Gi+$1gKx28B4kxva zD3WJo$|o*OK9Z`9JUctRoPlo({bz1azDny`fJ5+b{Lt~qMeoU7ko-;qQIYnndELoT zn<{|l<@|OG8U)&%iC9BJ=Tr>*yB)Q@%y>Jbrf;J@f}?9-(A&hgQ;Yg@*oQV{kv0{I zSUKfs5KU@$Jw#m9r4`bs1}7hmI4*|u+B&3qQVBafoz(!DJrj}i=*+%U&FJgm2}EvN zH3s5y#PByLNQi?299E|2`#5B5>@-|o!QR2>L$syw$;m_|i=*>%(*V=3(NQ?|WyaBc zOo(p4XrxUM6 zt36J~!31#IJI))b(SRg=ihZC|p3xy=mC9voollq6W-7krD!6$4thlFgtW| z#l|5p2);4$ouK4b8U_nU?T3bjop^~fY&E5Z!J#{2_yUEUjW0|Dtw;#J-xOhCVNu6O zcL2+Fcls1jXq||Z>mWV3q1`{k3=RCa%&dBS3edvr3O9LHJ*3Z6ET0s)zvtox4H2f6 zF1~?XjwR<=;JCT*=(pf^{q$x%VdC<4@wXNO zc@xS(}G^CH*?GZ|-%A)?}Y7S9;ZA;APKJA017(!PJc)zHlXU^~-RtGW4wi9kTurcva7=S`Y zRZ)TGa`L5bn_G~WC(WXk6)?b>15U85zke5(lPR46M?JCpsUb{7UQ9D*=ftv6x(;EM zP_f|5OgwhU%QnLh6p~(3T*h(c!tcV6_7G#1Ld2ZJcgdp%cmSnow3D7~YlR-J!{X4m zZLqf4hu5P4P~q4q)xrcDq|1Rv>^;!=f>M^kaQDmNZ$A7c6^QFDHr*31u9T#5DYfKt z3!8`JtO8VQ4?S>*ovOtC+X_~BNs(G@LP6Mol&!nF-7*s~ z4j){fPwCys9EG{__PwEN(n_Ms(H$`tvjr&kV|}H z1B0YAs#?20_OV$O`FQweL}xhyzF~PA#MhyOgJux3oTKLK8uS*NNI6O{T8)rtU$T1K zU=hXpi;o-JDS4m9Y@lJ{(43!a;s=iGE=rg%^VO;ntd*VK`@A80Nm~t&SDT!85y_{m zh2nHS*gb zp}@U}xjOc`L$;hSP?|wrAzqWUDCBo6Lp$R*%k{DqfK`M?v3p%Ci?y2*)Y;01-w2(0 z<=pjpEniYna&+$l3(f$3-zf1^QJfZdTXTQ+cH_3pPK^(u=BMNNn!_2(g0v&p@ zENV45L~(Q|;X9w7%bAkGZ?~>szv!f{%YOV?!^0OdUm|7e$qNG?myyfhCvB*V&ow53j7Lq9~HBIYgtgZd%d!&P9%O%tVeEuvPd; zT#gXfb0dgYwxk0%OjhR3$jhTnl}iL2*4m%>X5`SfmFHdE9%G0ZUtMm0X2(mntRLOC zNHxos;m)4#Q@o3^0s+WEO#J@1&iNXBV#SnL1VwRCp@Q>{#6+@z7FT`3`!12R zh@*S1bKkjEs*oFfl9if&?&^eG*(B1=P>XIp!1f1uR8%zvSw`1h}HH>_~bcfO^W;*uhXVusG0aH-J|h|?Mj_gxV1hOzPb&>f3U z70iNEFYc|~vUtUI^thWW&>@=ahtse+bH8Okt74KV$}$6J&Y(4h#v~HS(?`t&usZ|N zZO@Vdx|O>(7l%8fn^8mV)m;h0J=(<^^$Mr$%=D2>A9?lk ze1^z7boMZ^_&7?rl0#dqz3PW8h6+HPI(CcnL#eqS3zdNXyxKH!cvE{vDOt>3lIt!i zuv~b^Eh^K#3@PZd&fIl@5e6ZqV?FF6S*W^y#S`D!R_^-o{<);I6cu;wyQ0r}kXdbn z#j-{PSJ(T=x9V~nL7&G6x#B)B=x=FjGqdzaQ?cQRRbY;`$TY9lC0s08ws!V#lDa!e z=|a>AXpzcCMVC_K&yz0HEnRd>_8h`}Y>ZigNDWdE{dLC5bLk>RI^fJ0)k?UpCi<}0 z6a1W>4Hzk||Lt^nFcCej;1f<#N=? zroL9;huQiWVWWq}eqm=vg-a_IPvBP!{O7#@uJyBor$pBuZ_OV{&BpzTRysyq!w2`w zFVf+(pl==y*!?MU-`$x(EE#}Im|N}wPE0zUiSaqez}Hei2!s!aFXIOQ_wA1`1>0^)LYd1IVM&e zvMg<`78)=o11)q7jken_y7nJ0gCZm82x%lcwRW$se~gZfF722ljhbg>Wb7#iK68BB zDu2NC{4bi$Dxj^d>B22g+@TO$in|qecQ5Y2-Cc^iyHm8h6nBSWMN^=-6Wrb9Ki|cl zdoHqb&YqdIo;4gT6E^Fmt9@Wh{*Jb`8m@N1{kP}5!ROl1eZ-}e>~?jf;;DUo*HXAO z8l@NAD_}A=cI9mS%>DW3c4Zc5T(@}9)o|H?kF#j21JNuO0TB_{)itHp{}#WaJE`6^ zd?=hxPsR9RzqUv0qZqE)3Z4+8WaXSwfUke|TmET2Gm|NY2xsusGjSg2b_ei|CSH## z{8mkFbO^(J5c^a3_JXfo-Vcobg)P?{z4OAKyBg^d60enps?DCSpdZX!Yd3#5EyvXT1H-5af$ zsk@K8@8`x_rQ*eGrFzvfw$0XDgil;0G0w9rIH@>!#AMg+z+t=2A73~-gf5TL2R%L6 zNzza1_V&-XMw4S}>RY|M)dm>Q4vam)Wso4alN_pj1RX&C%F{n}3!f_EhfyNtJQzjB z9lHjBL!(EL6rq`3pC6^j-#N2SSd&kCeLCm?6aUb?AW_8UyMsVhxdez_aop0!C*MeB z#=#>y%UAS=>c1&9(=ZCOQu;smjitnqr>73XBO|){h6DA?rAeB#{J~pqmuLU_RgNWdcV{Qg@BXa5p&`WP=tCkfelIN%oT3uxDaJt>Iy#j3^G&aZ z@W5b;Gm(zOjeFH#&x|>vJv@b2#)nzFZ2JTRyWU{u6e!dU58q1HD7#|X`tszECc-pn zau1NFek?F1k6UKt=9*Zx05=b-s=Vv_TL5x6C2~^ByKRf4>Lla9%^k|+}$Sz2YKzrt3VM{$X_gJ%jrMIfM(3hfXDR; z#E@EHoIS`vrOYzEbAy}8nJOvo;^Q-Vbi{D&wwx&`6$8|{L{cHME_G8|%Lp^}#B&6} zu3qKJq!NP(;C1L{gI`vrNwxH4P-@SzurZAX&SToPIjR_O!J|FsU04m*#ovG8-whW% zPdEitwqnHG+_z%#V!bM0dYPD*;7XU*eXcS1rfX;j2r24xd(EFz=$U|5(T9MZ8W1-} zTzL8TerPhV^YK+q_POHn+8xBP;`*lYO3&k7%EA4RjC&!D6{P&ghYQ=A@$vT|WapM| zVt0h){0n4_+HlR)u#4sqBG%i(@Zigu$Ad!WW!U>V`J1g+s}|}bl(~h>G3#>dd zO-)@m-;%~##-=f67Zd;&o-!*7h=@^*K2x@xUh9n6*~LF3SH8ZdMY?uDf_@T~^ALAK zfblX zGorFt`6LwvEGdxv4?i{K{ofud-2FUI2dMIuIeB@P3+CFH?i&0)r!20p5g zIr;3{xz8OHnN5b zXX(Qz5D#5-tJNzNH!Tee7Dxa+-Cbvd5pxjT1@4a=EJtXR*`q^XryMP{-|4 z(8JZZe(gpy)iYy?`qPVFv)+)0;Z^v{EfiDk1Je&fj}lpl7E?(o=T`}4UihEdE_%su z=pdLmDfaCWiXG7^vO5{v%yiU_!DALa?26uSu|ELir_%c#+}McH(9YER_`)+Wr2@_# z=&&2oN99dhFHbPD%bQ1sL&g$trM-nM`}j|_h>8K*n$D4o8U`1S5?(RJrEI%Q*b$AQ z=4&BnRU0;m{?kvke=EG=-{r~w^I&8sGn3T6xzjS!!5pLxMsE`BvaU)Nlw8fRW%CJF zU)EKkzKzQ@fy1X2|8gsjwb^{XwEhzOM{l!#7(T;v^;D1^E?+s$_z7_6Y$VjR^{ zdP!iAUhmG363mR)gP8U(Y)M_dP!ErYQ`EzMoreIcSHd{N2w-_&sijam`21CR&+8xk zV@2M_d2W0IP$iC_tI|;^bqyVZ6=Xq@^u{r*`NsLt@-~9ibTNQxe%?g$PWHZ_-Anvo z4dKa#OXshdMm``-SbyL9iBB~8fexwr!8)Eh*_TS5y56&jfiAVOd-a9vSYhCK-$kG%{7>17=sdS&w z$Fwi0(9{sIhQY?gnv%FO@amM3^g+%}?gxyfs7>!%M}LamU^Dm4e`u9Fw<9P2`)1=- zn=mOR_(LLWhDaJ&jP5sYwWXs(UaU68e+(26Wprp~RMAmzE`vXR#G`#o$TjtG2>p&M zPAG{qKzu19hZmUkyTL(ATMMWOC0VSt!7k)nNTS%I2ZZP-a%<_zU!+^SWF%;)cKbcH z-a|KCUuO{dckgaQw49S80Ev{cjJT+-2ir|C9`RiqbLhERTuSycrJ1`mwbZZ7(JUuP zcA2OFU8-fXIA~CzthOBs)dl#y34{pw0)EpfgU{R z#|GKH?(2OLWrXroqZmBO_D7;5&aG5*jQJz_d3?+-qJT3--EFC?{sE5PN zJn7F<{`ct|wVBR#xYhw+@7I>|68GFN_fEk}p3+?K!JVIzm*Mm8GIghmbWq)>4!tZ> zGZiuptdx^UCh4z{FP~^m38?Eguq{q~#cH_@t*J)DVv-ZQ%g*6bMjYDL*pN^Tsz9@+NEM}uarKZFF`zUXTNvCZU}E-DU9pI3psy1 zCZdPmU-sxYsO8UOVWnl+k6R|<6marcq1*&9oMs0if#8n{Fe)@}Am3-X1p|uaJ8Tpt zSPJvfoQ3CoUjo-s)^Bl$wu3&_!bN7*@0t_t2=))@o|uCT`GlXhG>PLh&_Lr!xQmU2_ZSClWvXvJSO?_6@@ zJ`D(ae>(KM=yB5C=%ftUFq}FK`D1uBZ#{X|6O>!z>NmdTdD}cad22!%_b7P@!;bw) zZ~vF{I{0Vj;rkBx?$0_(GiqtW&i|zK-scFTkMrqqlfr(p!{cy{iWQ1>UEaZV%b}V_ z2N$?&EP3yaU$jOY*A~w1O1UsmX6rb~cuzLpk=%6F*to}s(hWAlt9mU!t;#X*GL5qf zgaeRKX?G|847Uap({;T1+QYP4%4>RYwk{lXh)8|vNk!R6e?i}S0WaiqY6m|&k+9yw z)4a3{ZRB5kWe$|dmwrBbGNXPVd#`xs{|&|dNVT+*sDRyJh{kk-S3-Qgs}iGx-Mk`vm0^5 zwev4bY{J*7n&!$oF}0u8yjlov;F7ImC!JoIEil5oBSF#jspWRV4H*`rm&8WCcOx?U zB16g-3x?9R@<1a-I~}s*m)b%K81Z87PW_?f>0FGDN77y8R|eO;#65Q#F~W!}NZlw- z&jg1fgKlk{!hck%Cm>R_P^ng<9%w+|3Cf!88xhA#a>)2bOigR5aD8d(H7-MpSzI)) zeA5Stn`B3!CY)Z{$zNdOtsi**MJRgzqwsPEjKrG5e=Y>#h?=-q(pWQpWyS^8(1EEs zy}=^@9IO`_VpCNZmu->aRLpTeC1Q7ea?Bjx-&}s9sQppVQ4{O1(s2rHQ5vo~+lpelKtP|Lltu|A&mAAh zV3Ptx>e;le(Dh-6&p0BFcplt7u?I#)O_zOYIo;&t-?Xk>p^kG8+g7fS*hb7%uAs+7 z*2$1-Tq@S8#)7AC1e#>c-^xoJ0mV_4Rg*rq$n?~YW*>P{%V+IviE$`EL<*^{OnOR* zw${vbtf z6cX!HP0wR{Xz<4RY0|_xF~y-Op~|%$kw$&~z}}#a@*{K<**$5N%19M64A|$SIR5+C z@8(IUn2tQgA2YTJDFDAGa9k8>@A#k9xz?FJ3X2nNnj_W2H1;$^qBXt7u`*RyOYpxk zqlK8-8kWkqbUD4cxl`;X;TAAhk2?@^>-HJ+`|n4xP*rGi7GCf<7QC<{f4IZ(hwPr( z;0~tQ7y=R4I{sNM7sE_C)WbVpNe0Sv*LuYOxLzZty7t?U&`?<6_ZK&s3LstEp_5T| z$(trE!HtTeSbZ9e_|mr@Nmg+6dcScET)_e}hg|Ph6+v4sJ86f9y29_<|7(igo-)sW z8qH;A=Wg>928|64ns-Hw=()o&pkBOwC%;IJ2*yrPrB|d4x5&N;NDOQZwE^)~;))}7 z0Kyag!O*k^;>&l()=10{cba(2&09b301VTk@A(n`)FJ~|`?!?iNo@(ejYMl#hFX+} zX+N`4!Hjclvsds63GJ?oY$PjN)sd`WwEGt{&wUjyW9MKRr`%nX6cepj7{E+&B?|P7-#~>9pFbRN?=0Y%ho?f}j zZ7!y1j|Kg`$;xnzUmxwz(Gwmb-ygiZ0}`)e-84(Hva6#J#0GA7g4dNGml7Qkl56-Oe z;Pd-KuTDgpB+uEbaTYA#S;1OT!iPmzhKOG5n^ zXvrYKs5Lf*@&vR1APqK%FKlpB<-||0p54OQ%^E$Ad-}ujZvIb)3{go&wUMX1y!5$j zGhp4!s_@m7YU}gFzc95p27%NlgS|YyjIraQl{h@C-FH|Xr}(S5RD;cat(lM97Hfvl zUVc{&)heyslg9-w^qodtc)YGq&z;bftLU$#D`9E+4qL4PThyoS{@#NRI?;4DeEi)z zOH;snnhHoG<(B(=!hF!}mpJ)K$-k$wPR&_A)9TkyM`UE&m(eWo_5f9H&%`USarraj zxWa0#{@>{yKOs-!vj)F9846=Sk;~Vs9=@Jrrlio(HL~z@&3o-OG_vp6dANyktQ{H~ z1K@tZ@c_@*M5gar089X&HHr6pw`+58V*oV6hqKF8`}32FgK5Iay-%+_Ls!c}|7k~d z2@0Cp`p3v5p3T~{E-kI?@7%;)Ub3SOP{OkeAKw6?+wRF#h+|_Y1+#3~K?Lde__(6> z@M1uFHCVlj)85|R1e8*t5>#a5#3uB0a{IU7{yzT6*{P{69tK%J7*y7iyU@+WGop!8 z@M(vWSpVu|iPN^3UwO;|Y!NNa0*Rd@nVM3jOU%?Y(j&0{l%`<8v@(eC{#Y>L)v&$87w{%!(#MUkB_l$sh#ivTq z*y!eeb3l=-?tguX<7sr3qP&!sS8%;K{e^}RoL6Y*To?4`-yqjvlLF?`jw{RiqsTif zZH2bI)2SL$T2^*}nWC=;G=v8Py*eNv6TDx9EQv z5_`?5IBRBM6CHgiS8#sj>RhKnkqfqUg--%7PY+H&%YA(0%Xh?|ZE{xM^d=SC^~W zCqbgj!0cDj{=-e2eNW%uB1jTzA3sxYW2xLoWNY_ML1)U%7ufje968)k1c!q_IhNB{idX2pu@3mLtsogL8z z#^Qzu6*6qOf=_`%cb}*lQ^$!D$<}Q^SlIjBf>WDu_4J}hNiH7K63t9fnCZ{ZmeaFh z)thHSWfc|gy8+IQ;KZf||7Y_;EocaB;(J(X4o$l2;hk-l;9~KH>!1BAe&-Tk?TwT0 zmt9_vI_i4!%4tJToyx|wY=r^hbCbS@I+PlZNB*W)XuoL5;?^`=QFHUzi45u|ggX0g zMdIg!e|FJ=?e)Xuf&pToT<4$p#{D)%yN6=AAllLZ&nXPnf@D^F-fmtQU~AF2o45Di zzkj%5AQxx%-*0cE^7OdO6nOqk&}k5;WpxjjqQH2!qA59_E;N%wt2o> zRwLCvR(yhj@TSgAf#@p@2(O>RNrh^$5GHdi9Ru+``;ZIX-Fxyab{4Z~6`%VM_pE~7 z556a;O2*@9XKMcvd5N+TIoVvq%Qk=?Z&_Rt+I%?3Qk*w?cg7fR(KXNk%6gR9uVOY- zP$99EnX6eHNqy&Y%0P)kjji^-V-JG`%gc3`71qGbqmwY`0F!Jg`OI_7tyZMEMIHhM znJU9h_RopG_T0%4RFs30I=>v4un6(gtet(lrOI^7JS;wjZy!9sl@={rvpb}Q5Bb>6 z;QZm*y$7%4N$6wubiHW@;wKUr{iz;ypuq{hwjm?GvURKUMdPERP}Ys+0cc zYeMzy7IqvS;<&{#X@#*etb{3cP-kZjFn&Zx5$b$5J2aGL8o3oOrv|>E3Vs7WS?obeirthMukH z?N@%h^l~L?1NEqi1ndGr11~plkf4VIfPe&S^}GAT_SQPtN{->tpm*5YMh1cb%k6dI zRE=4k7Ny<==*aSk#-zIrFC&#c=xJ!D(JQF9c3*r z(uvdy_yP7_=t_0yrmYKReri5EKobcV(-D~QA}C~JWd0p_(9qHzdUf0g6=sl4H!4zE z6v4=;gdu8(8)JEss)1(i#fw#gk$%YLF^Cbe33CpGjtS-LqYY=^*@QE9%yCZrCvx$! zq!ELILsu)Yfs&PzJ#-zAfS#yhsFVs(G0DM0cz%WpHgmB+Bamz&`l_sgzO-V;*8I1K zta)@7#sBF**x{YXH*oOq5Nku&XKYa?+p|+?<6gI?6)@hVkR?Wb+8WH9^3dZ}$aS(f zF2h$^b3^z<1ZXlVFLG5xq_!IGPd;@A7DgJsMAXf1uZ*bh^78gy3kg4-2-EXORZs6c z>?8r!y#~W)V7TG+Ot@`+#=U+7!Cmt2MMk|k62?=G(2f9pjhnasAYitoLZMv`TS%vt z`O9+zOZFKu_uAP7+0dnHW+wfSF>LXqsa)PemwG|5ohfvM2AG4>{BN{e8(xgg($s;^Fo12!F8C4@?)KXP#8}q+_VpRIkeOKRc zrYqIoRGF+77h7;O)>yZhSy{j8+8?v3#yJam7k_K;3r6lg+W%sPhR0;G0*9IdwzmyQ z%ip%X2eD#Lu&hbQi8bm%cOH@^^TFyvE-&m#fq_lQyYAn#@Y~i4VJgab8HA>L?~H(W)_8RZ7)*)PUOl_3O~i3o)ekIpH!IDMMT9F{t-R z=vEJQN&BXRl(|>s^eCq4`petf8?3PLTb5QUWa{kWq+WdHB#Gi|sm@=-&Qr(8@uSaB z=x5Su$d;ebltke*O5*4?F3~-H<=ds5=Nb|CziuSjzC4#5%P2Y_6Mu=Lb-~m$=)DtjZudOperjcC(?tFxiHjZa@vupPU*E*6 z>n91J270FUH+z|L-IbQUNgYsmv_+OqL@-#lO@3RF|2ud}ZVNO3g{rxRh}pv9!C@uC z%r?~#!`eJ$%?#IH>tu+5bV23e$cn>^Qh0c06uV4^?t~*Pk;3}*Zs*g}SjGXjRoD7(AXZqat$6N-tPon7)%G0x+haZ1XZT7O)80>w&3&+^-H*gcUE}mIm-_|DE{Zzgj@vTF#cL|{ZKI@9wG7JDi$(oVaU|_oy-ihTP7U?9>jlo>Fo(q;Vp17&A;=tr;F`hOPSk5+@^!O!W0fs-39 zI`lP;?#Sv<=C`z`V^gl~}`vp}D)X>Hjnc^Bn z{KEJQyDmkR=N4pG2CS;Il@NP4E*Jg-Q&ub1H+o1<;?0v?82UWX4gmj5Q z>k2g@%yKne0@P%MAMqd*0RsTLPfogaMpG-8Se%k_vo<65tptPgq{84)a6X|MIo9ht zrEn{~1Sa~oi&xB+SNrD~B&4w=Mf5q_GntaZ;$ZR1Hbopqp=#jV^V*#!E0-PI>v`Y8 z-)bRXd8b|R5l(VvX5s@wGGJRQ zCl6=%!l8*iw_2%M;Yu|g1L9~KLFq>z=;F_!a#|dD{@nIELe@mz<|>@SZ>2&o@FhzR z3Zo$Rn{HU6-JYuQn(c1;Z_aB6(NLR_hDYvqU*HPJ=yE=b(}^fZ_@1O0a` z`1C5A07VUu0zkINnljmlT{6i5CaPQ>Cwm2FMz^vDUfIXQG!sjN?RvGSg7&Q zELE9kY?ldW&zOl33CQ}%NM2!TH%ex0la)Jp6C|(;23x4;R|Yv|7CrI8f=xs68|dQW zIXRDU=u+8G51v1EBFJEdE8<;+7FCHg#!{nk*x*Cb;0ja@P&f`bYW#xJgm=}$V`p*R zo*w+rF;sN23Qb*I^C~Ol)GIYtH;~8wUVK!S69fpT1){k7FcG2%_NZn}hIlMFvA^j% zGrH&)=s&+60brRH&%BL6g!h=lUoewU^r7B~Q$V7vcheKiwT7;ww^;Y5@GIhoCROpI z(Na09nUk|~iC)N7VC0E;wxY6&NBh7+Vr^Sjhk4*7X^Vr4$G3qd#hg=gH4u8(@O^gN z`)<(d#@qcgN+ZtlDwhHAdPRP9ke_C?7Q)Ei^*ssuqkyVJ#h&rt_N(T?T*xrlV+|Qr zP-)j2R%=iShf_J(KY_PZA7i6;)T{LK2Q04u=@seqhd{5+o_e90=1GTpDbzQw*T+)- zGD`U}7$A2dU&hPVv3YQlRxh93yh}Y}lWA5j%AZPu`ek?w9H)|C7ytN@cU^@7z(Lms zZ~T>M5;ngig;VEL57h^}y*fyMQvsB^_;|EoMldlcDfZ15Vc!R=@hY6G43nUHxY;@# z0|Rp~7%jMeejZP3v%aH4oI4i~gu%Laz%qp}^3+iv3MN*mS+2v-*wQm~vcnZvtMYo3 zHxy35J+CFF&mH;vOngSBngg;52#Bvr#Zk7!#~qe8FGVtyB>%jm9&N^u7y}2M{cMlp zN}n4(^r=H^aBAEzj7`VTuq+Ew-`fj0yrk#koMmERnz80yT3;V_AqMm!Fhdzk{PO2& zqr%He0zCXZx_FC40sif!JB6pfzTqoMJB8RVvA(l2QX8J3?x9Zs*kvoUsgq_|*x@qW zYP&}bGVNT_+{ff;eO!v_H45G9+kp5YP5~K+Oa$~MV$u_^Y$$8*OGdZ%!baewGgSzJK@Z%2i#_Yv- zir*;WOteGh)QOsS?FQjy^s&3LBoj?s!*BiU_}X9h_ALRRW$)!BrTdg5W{8;acwEw` zud#ylAW}F{I8d^TL%LC~!l+7vu!@b7XTZH|V(weI6>E!4Yv-zd-u~7rTA1mjzgsFD zs;;r2WidyD%>no+-!s>?ImYIwJsXil3^Pf&0zj|+j%(rd;T2z%r$ot!Y75U6QG+ij;T6U>vGAWW-gmnC@Rr zjm?&i&{IBogv0hSe6qTGH)4pY6Q?eJj8pGQKL_)wJ$!MLQH4Y)ZpPvFq!Zw-UpOyt z4g6N3$#j8X?Eic9hRPK4@2UH(n#Ra51`HMUif0lBrMko2o$=MpC_4&@g1tSH86jQ~ zBGQ3}(9@c5%`wo-z^RE=tcfEA=PKJ(9Fi9=oPG)G<8T0?ch}E19VAQuDL5-@lM+UD z8&JrGTJPXj$DEJYBZhfUa+VQpp^q637mOjRI4~$n{&~Aiub|f2t0T-*bc`Lid?kM* z?AU`slv`sLFjOT$8Np4&h~h9JA3mF|aA(`Y<2LCrms0#OH-O7PFCz4T`?zhtc<1i94KMSrGdWH9PUSjO|C-(~j2Crs>e18CCK zE!pD@a79rDI8(+@{$`UTyju3gkPsqOnw3NhMGR*Z{Ui*WH6~o!5^;t_$rI^M;U6|w z1bAAwGso_?T6MX5S0=e@llZuqWOW@~F<(9vsQRL?hZyxHo_o}jpbRL3ksZ2^Zl8D$ zQ6o|*W7z#edJ=Oz<=PJi--Iq4P9ky9;LP%65&3vsinf2YsSl~{QO3ODguw?$bYo9@xu3*F!e;7=ie*O*(k1>}0^gX5N_lxGMy!vL0x3*%P)st_<;QnS(XfN>WiVLoYVzd2 zxXN(}HE7_SQ(e|lRk_p;R57IC7!dh7|8sSAmBy4-)ErY((X3;FjdvydlE)C>YqeQM z@rAr-#m+5oc~Hp$M8;|=8%-%5jYuhnU8g}U%qAatY!d`a5E=isNBXI43k^@`uXb^N zIGiGj&3nJ9>=9kcF@s}JcK~y`w4>66T|ZScucCjkl78^5SNzzoXKqdTRs!+RD>33+ zlz0=ee)7L$a&*YALf8XUMHhmFeG4mw>SJvxHv(ZUo071fMJjHPvOv^?u#{L%v(|3S zEp*6IRA1tFY4qU2#AWQc5vqSljcIj{_4l&}h?xD-G;?hTf$~1c>x)kgYSYK;=cvoN zkjy@?9EXS_z+f^Y7_mmZw!lf3-UOpo3R%WsN$<&ERUh%aDn}|>AoJ`VajO#UYQy~g z+ZUc3^Fac^>oY&dA}s220iw` zNY>4HclkFc^Xv9(89qNVh+2*driOU0LWl=CWU%4INdT|5V`i#U1Z%v0vcKb1Iw)Gl zrbuhV0u@vRxzc3;O9{9bSP@OxbXhd$Gb1X^w&MD?$(*PYxI3`PQCBq!9>Xb-#+*8q~V-Mte8B_Na)~DuY<~loM}|~_Sp#!wE9Gow7ydS4IpnZ zDuc9-vXK&1NNdbTCg`)ov}$Yiu#;jK!t2y%t(`}2F7u1+jhORex{a@I4p0b>JAmC( z+F_tJeLV#DJTa=@3MYO)(lVM5-ardW)=c5op6az7J)+d9CaS&KDU972UEg|OhS;1- zB2*K-cL51`pK5>}PouQCHQ3{M87dyxLiQ+x%cuF@<_u zYM?dRE0>cj7;z#H$c?3g05ieN#ARdQUdmL$LhHwL?R+>`TdiQucI78D)*r~9E)?unyp8wm>UB*st|WDcJ)uOSM`Ul zI@UE``xy;|nvx8RIiLU_okVm>5-!<8b6P~Q;e(hMS5jKcqvW)Zy6$i0tw#O&lhHlE z$KT?vM|{ERovpSsUdB&n=#+k_S>6yQ*0J`v4v*vQ-nVw}9T}Rwl|RTMA2FEtP3`O# zv`)Qtl`PKFrTez_nCeU8*97sxx*@FFz4@mXyCOiK^pmdLY&Fn7kS?D?qt-*JZYDJQ zi9}t25@G2LkK^nf+@_=>PpWEm)2>FORO}P4oI8DZK_eimE>othUAB$*i^s@`Dz~2s zKN^pCgP1B~z#-W@fFV=v65SNi8jbm?r^?9fR?aYY zln;jv7gW%3T#_AA`n>!{Q+e*ChfV4Pn0#QNQN?gXd;MZ=#>pdlys|Wq2Ylzf9bsyRF30lt$>0bbJ z6Ds|YeaGF=e_|FqbbKpDk81Os0=&;UiiW|6izy8(ZN;}kff;XLs}>MVlZjPmLJ5xe z8MHR`%6(3tZ`o8kh**FYg2;|Ih>v!m_DQYuz}FC1H=8?OB#bQfH}PkyK+|)#w=tRh zN>SQ)azk~0$N|E#>2$qA6g$dGK}G%^F=vEg5T^{`S1j6IV=P!R1UMl7fqHsF2foo^ zy-Wo1EZeyg z|D0Ci3rGfjmrNep&o<6mKuwK+Lp5dN`#Sot!)vthMICvh z9x6qeWDTc4k>p!KLQNf^>SAufhW2H?^kZ4;515US24=-sN%~V0{0NFlP2^zmI2crk zVq9;r?|@sGVq`2s)IyA)As<3c#f6I<9apq;cCm&5$tEPTvN26yi$i?DT&cJA5#BgG zyE3=RwXGK8&NK`B?|0_?IcLG>aBT0;;UU3Xl|s3dCD3}x&YGR?QUDWNQ%96DRkx9o zbC@Q5KnD#X%t>0PX^x~=6qXacPoU@~0w^PzO-T{1;2io)+H(5${C3gzGDMLAa&kiK zIQC=VyuQ6T)-;ESdBT(szS|QpgPiTH>e!xuX=|ND9;B$EG%WL(8b+%3JQ!wWx`G}X zU+y#g3;91<4@KAkZc!w05pq=6Qj4K}EQu#ES=@&_Qj)8+MiNU)2EmTr7;;frjY4h; zDn!Z`wn46K#DBpvzstk^-m@|JIdcqMKT9SLwnUt7Jq8$^k+^sU4vk>&(4}8B=_@It zCtA1oh5gw6Z@oy9{`xZ1gQEsVK!zq&Umyyd@)hRXyxJQ!2=ukPm%URzwYz%PDoe3w zy<@`bMn>!w>%W1E*}T<_{s;9DYAJ>7rpzZ_3< zY8iT^lbzmdL0RVgHsE%ojrKnWIUU8l!@+fl3m<=kf=oGPkno*O4aPv))6vi z8FHjwg*k2Wxk|;{Zt?u)`%5p`wMhWdiZb9lj*i--m-(eKu_$wj@JTm%%H1A>%y?&q z%V#%+N)!|pQk0py_@rBwj5bzm)VDS&uo#=YtuxTdWWt$dUS_+npksLVv`e*XZch&f zU|zh=TK%g%nqZz{=M+(>U863PgT1hK%LNSjNB@MOaG$Go)LhmUJ^?-pS6}#%mWP4! zAeBmWnoP)6`EaEX_d{k-V1g+@TZTlH8raS+GCCUROvG2TM@N(#F(j#aP3V6KfZDiv zUV)MAZNXdL$bEf;uwXL_pPa`f7sXj}WTCr67H=PKv64B9;*|aAWnzvr!Jzt~(K%|I z##yFGh>NT1)O9N30Z%*cs%7K+zDUMQX$#L1Uk6n#0rc$%`?qy{vzKBml- z)!e6A__XQ$@@4JTchEwQ^2JBMWQ29=Xq-ZT4O%u7!Th3Z%AO^89FBybC<1o7TJ0 zl9Q|o1h@%i-&bKvTxWlGS=o%*-gSyXh58^&@-WC)w~zWE>PRPRw7(SYAvCXBwLZXm zcx&lXfXL>`zJAudb`+)9#xc&1pFAt}4yP`@z7ej9Q#EhzcT-L64vY!|kWZo~RDIw6 zIgV@nqRMyoVp#ju`1(?reV9|AOgy61^{0W^?u(TADn6!{@b_HYB^T_@FGKf_o0Ldc zZ90lwy!Uzc{}5+vev=QVvje!%yC^8ao_W9Wn@ZbhTi7Aj?J(ax*~#`Ice-tQqi?GG zZJcy>fAt(kvtB-3!e6fZM%(yHy1mUgHM6muvI8{!Rp1{{aQR6Pzccpucn?k=(u<3> zu0Ruy^-lsDA@j!PGh~UCq?$-f2=H>|dzXz8=opX#4C%are85l8Cz5J%EZlR+J#F?l zOB?sUzJLE;jEfkZKqMf*E6fJsV)6o0jbFfy>BRmE-I=L z0@bDWgfj??l#l;RE9mz`JU~o+mYxBG0|S+5EU*&6Q-mJqiHXx;MLz42$fN7moE_a* z;-b%;ECpaSh6q%dFQh^Nqavi;MQIRcy;^7%%o0!soC*^_ZZo!hCWHO63#+wWi|eSe z1TZ4GWgF*LYpFqOb>hlIhED@-_V0NJi5R-94yGd&#QuH}7S%Jyd8Ru25;Aq#Ra@2AZ;2tRz1#GXM>~r_r&Uv>^xWy%DD%E@r2u&)>h(}k z8Agk3^CzZKVPPM{nl6$FViV<291Eue6#UQ-#82E6JKHnO6IZ`T4pD z)C_15QoQCEP2qU`G8wSK&pXgrvUNq}2@(e`R+B85-~6QqU7s@KSVDcj?cGqVESSvG zmzj>z>khV~YMD;}tK>xJy88+BYy^a4%Wo}mN7Mk`>*LXJ?P9=1RCfg=x!&AwpCvap zH%?~rnI*x_MbAg6bIz=+OP+$L?%BVvGgoeK+v&b{|NEDxQ_^W~#R`$44HD;89TT@| zN$wk7!T*4i4;>y1=~>TqlkZf1yxF#Vk02TFuLpZU@Dr0=HfuaAE2D5EcYxd zaNpwdaDc#n$`Tnl0z3`#V~qclbK**(6qCS}T@g@kM2dQnR9C(}7p5aCX6gFL-q&$s z@5`%C6?25G9RVB!Cu((M!4bYs*M=3KYcR zda4V9X*dZ!|(;&;G@&g0hh|!J-t5{{F!M z0rBp^jeAd5qq3Vx!?R?{49*7HKto;3JQbPDUxZ>c#qMcS%?Uv9uGz)d&kfqC-7@(c z~0K zE{uNq7<%?yz8lz_SY>X2q9p&DO@@2o>fw-7zm;2;CDSI4M~;BsG^w8rH7A}X^zD1~ zDKkel7YsA|TaM<^n4&iOsp$vfaCN4azI>WDC#yq*iAkzzn>8w*0ZUU(m~OZ5sf=UhW%v>jUq(v&|Cvt_fu2s@9}iHQ=(jdd@jP=r&yX#9iK_ zhq15*j3xngBwxx(DpJf%D5xTSP`68p|2;!bt|w1bhAyL@$`i2@2PegSDxJW!VO8Qr z{Ogf_Z5ex{8wxF6nEr~U zu`s$n%WIR`Qe%op!Ur<5BQ#y!_cysF%+%3?J4Eft{V{v*5{iE4FWaa$c*peeD-n8l z;|FG--S0XgZcIyXF^s&_!Q}ky#>B>v%9J!7re#5-v6 zw~}V{LjAc1o?N4~BS$qS{*8RfcAs-7N4+aJ)esV?m^-`}U1NrSrAR*lZ1#GIDkJD- zX~=$1ObzvDTZRJ=V!w4FvR-!h%~k|-#X+uPuIwr$>*U6NX3sHrg(@#LJ0lUfT*C#k z{d0PVOKgg4U`9MI3^MT%Mu@d-B}<~*DU?o2M^Zz8WL1W4uVL)!1nYaQ>)g*K0ej7wZxo1zOVdBh=`_F zjkl{Fz25iR%fNLBRa$=F-ar&{1S?;;a9|d`(=XE9SHh9i3TcQzIf=%=7+4u2pDl=R zyWfY17SB$o4hz9J_=oYh_2{OYL(YY^Fv>=l9S8oXD9B7x_SID#t2CY~uW$Em?OO&L zVgLeP#tuquldp>R!x8(MY2nl#OORC4GKsx}2~qUtfCeW+!hI6qHNAc>W;3&4YY|Uu zHF4=54?op@?=G|FkE;B~!({mP7=ur^+JuO(YM0V?jMgG6t6ZrvOX7fE7LCyYU^SZc za<`m603<`dE4@n9oY*XvRwm#c!OMsWD*sxJEfdTppG+O``{ySFwDSJ0&v+y0oc#GL zj0)7tEgvJ4g)@~D2S2P~{;Ex1TWRDxPL!?SY7Lyd|fk~$|Y%Wa%u>8Mh8#%KFWXifFvp>DW++2 z(dHS!9;ZwH`OmI|R>t+BxD~j&&vdLv&o24W&|V;%2CnA3u0fA7R}Na?9#cjw-A!6> zy1934e(mFXig$ZNkg@BwzfJ?42Cc{?8;{MNdHyW;#?Fj0;vy=~VD%LwH>RKfHi;FrIl}Y=DvsO)n9tN}$nC2ifg+Fv=T%hkwDL*w2)T_Lf zbQn#JuxgEa20fTtðVI}wrS*NQahT;2V}Z+}g>womLAI^sllkctTHl+Kt0G)GzF za7OAG4mK@2+@ah4%0TgNX_Q-~AAnZ;J34gZJ)G0^#a57v;6^MZ=$v zkQS}zdY-NSZiwK@fmQqxzGXqrGUDAM=Rz;(#wEA_?(UMV%(4`mYV7QU=$0w__{87N za0o5!J-^Bf)=%xZ%8teHRV|GAu_&8R+X^MvG(EdVq-ffz&*8CZMzLlHL5`+2y0K+{ zsJ;ZU)jS)_k3`F^>Ib{iY&PcmF@3&n%a8+{(_%FiIsb*dR0sSp(OFa`js3GJq2+Gj zz)cD`-ttlWl~!G3Sao2Jq5eY;N(tSinV1l`gtnj&V^7S z9cgldy1B?Z_JDgzUg}CKVnAZQwPa-$t+SB90=U^zS$_6UsnAOw^-I;HH!lsU;Mfpx zyk(~SrrLznU-x(e_VLVuSO;JM#Z>RxNbl@nlZzn6%B^fReCzyzb!+EAhg?lt7_k&0 zQo{abJ6^>cat63jlOY6d7da(|a5T@Tf$~ z$=o)r6152#(;Q7P$t7%ZiO5{Gm0Pj1C6*$&bZBHXxrE7aDJpwVQAZP*5yCT)<%}k@ zR?1W^$<8mT@86&2`+UC7^Zh=b?_clNA3V~9Z1#?6;R9ar(xD8f5fIR$w8j3R%Bsd!rMHQI%fqk3)RT_dG^5#TjC>+=&mE8E+9r%%FS zPbZK2jtaFq_uE$b@M8A4q$;+jy*6v9u}%pEFSwQ!qD#nNY4A(LP*3E^SGLOOCnkY3 zSWOqWIWbWh?}F(fhbe_y;)T<1H+t;>VRn6QPJ|ekIJPvq_^I_2SC#cLg=O*0{cQw* zg}s+1pPwSJcpU7%ZFueRf$?67QIS7-M^l8vKXP2IvSSJH6qF;+%{?H~EJC-ELatd@ zYkRGG5w3GzsTH!Q_vVenf_EAO7_9haX0nk;ioHL2Lz_Pnqw=Ke(LuL1c!>1Qpmg!m94fgljZpFg$QQUU z>Gqz_l0Xc{8?g*#m^mBM;DTl`cuy*vEW-j5z=aoJHK zdaD-_Nz6xhDn}cJ@?)t@qQlcrK9%N%g#HKD@^P+DJQhcLfekBp77}wOrJ?^`Bo~j- zIkedTzwvCcLROGPuPS4&YdFE{X`b(Mz8&D@!SdL0$pa3w$*!a!@D_s6M6Ndf<{T9^ zQ!ICo>tG({uhQ_J@AwBK_4^#b1a-GoNQ;};A}9pQsykm%5bz$f8~*^FpK~#l`g&aZWX>=`Ub$@%?K4e+3 zj{d^RRyoP+&26k4^Dr}_!#w?<757r!TGgcBcS?oBlAAR09sC;AZ3sn+A^#AM8b9Yx zxRa>8F%YZt6PtLRhC|m8P!L9P%M9$0`<0Oz8DJA4P!0S{KX_e$=e_Sg9W(77Q5OSb zk?WXj{oWx9H-8jaKKHap-AdS64-bU)+cN{9Z@ksLflIal_`_tOh>B89_#A2+CgAJaW`hFa9q zl2r{h1ZRLj0v7?ZJJnBrf&oA!e7>qC5b^4pZi#DXH0_gtj>Hbkj?~f5cwO%F z{>bqAC?TAfvC5Xz?-aIeI#d`=PQz zO%^{q!)dSyHBIOiY7AKj0%)Gf)?4<>?9-dSNCoWatYjF1kid4E>#hGXvT$>zp4(ws zyC=u;@zUM7QSw~+mrmnAe5nTO)tAN6PO%t5XeFTv;)q%l*f_-`iB*H=J+r6bUE4zx z@|>gB+bo`sO0}a#KBNN7%iLo<(cA=?Oq_(I_G|491-O$jhaV;kHB*fef7_i9nqUO+ zm`K*x?$%#M%P{m88AAnntkTz#EiiEf za>*kbY4({dxR)y?^~hr3J%Plz4pxk5SoVGX5a2BKlp0# zY>HaaHf5&gQiKAT!2MoTFo7*DJ5asM%rpgwa=Gpp