From 04d4597cf33a9d3f256015c72dc8dfbc68bb2c59 Mon Sep 17 00:00:00 2001 From: "Su, Xiaomeng" Date: Thu, 2 Nov 2023 14:34:08 +0000 Subject: [PATCH] dli_sqlreference_0511_version Reviewed-by: Pruthi, Vineet Co-authored-by: Su, Xiaomeng Co-committed-by: Su, Xiaomeng --- docs/dli/sqlreference/ALL_META.TXT.json | 3822 +++++++++++++++++ docs/dli/sqlreference/CLASS.TXT.json | 3440 +++++++++++++++ docs/dli/sqlreference/PARAMETERS.txt | 3 + docs/dli/sqlreference/dli_08_00005.html | 20 + docs/dli/sqlreference/dli_08_0001.html | 97 + docs/dli/sqlreference/dli_08_0002.html | 14 + docs/dli/sqlreference/dli_08_0003.html | 14 + docs/dli/sqlreference/dli_08_0004.html | 93 + docs/dli/sqlreference/dli_08_0005.html | 14 + docs/dli/sqlreference/dli_08_0006.html | 43 + docs/dli/sqlreference/dli_08_0007.html | 14 + docs/dli/sqlreference/dli_08_0009.html | 20 + docs/dli/sqlreference/dli_08_0010.html | 14 + docs/dli/sqlreference/dli_08_0011.html | 14 + docs/dli/sqlreference/dli_08_0012.html | 20 + docs/dli/sqlreference/dli_08_0013.html | 123 + docs/dli/sqlreference/dli_08_0014.html | 58 + docs/dli/sqlreference/dli_08_0015.html | 14 + docs/dli/sqlreference/dli_08_0016.html | 14 + docs/dli/sqlreference/dli_08_0017.html | 14 + docs/dli/sqlreference/dli_08_0018.html | 14 + docs/dli/sqlreference/dli_08_0019.html | 20 + docs/dli/sqlreference/dli_08_0020.html | 18 + docs/dli/sqlreference/dli_08_0021.html | 14 + docs/dli/sqlreference/dli_08_0022.html | 14 + docs/dli/sqlreference/dli_08_0023.html | 14 + docs/dli/sqlreference/dli_08_0024.html | 93 + docs/dli/sqlreference/dli_08_0026.html | 20 + docs/dli/sqlreference/dli_08_0029.html | 88 + docs/dli/sqlreference/dli_08_0030.html | 14 + docs/dli/sqlreference/dli_08_0031.html | 14 + docs/dli/sqlreference/dli_08_0034.html | 14 + docs/dli/sqlreference/dli_08_0035.html | 14 + docs/dli/sqlreference/dli_08_0036.html | 14 + docs/dli/sqlreference/dli_08_0037.html | 14 + docs/dli/sqlreference/dli_08_0038.html | 14 + docs/dli/sqlreference/dli_08_0039.html | 14 + docs/dli/sqlreference/dli_08_0040.html | 20 + docs/dli/sqlreference/dli_08_0042.html | 14 + docs/dli/sqlreference/dli_08_0043.html | 14 + docs/dli/sqlreference/dli_08_0045.html | 14 + docs/dli/sqlreference/dli_08_0046.html | 14 + docs/dli/sqlreference/dli_08_0047.html | 14 + docs/dli/sqlreference/dli_08_0048.html | 14 + docs/dli/sqlreference/dli_08_0049.html | 14 + docs/dli/sqlreference/dli_08_0050.html | 14 + docs/dli/sqlreference/dli_08_0053.html | 20 + docs/dli/sqlreference/dli_08_0054.html | 137 + docs/dli/sqlreference/dli_08_0055.html | 14 + docs/dli/sqlreference/dli_08_0056.html | 19 + docs/dli/sqlreference/dli_08_0057.html | 12 + docs/dli/sqlreference/dli_08_0058.html | 266 ++ docs/dli/sqlreference/dli_08_0059.html | 100 + docs/dli/sqlreference/dli_08_0060.html | 15 + docs/dli/sqlreference/dli_08_0061.html | 144 + docs/dli/sqlreference/dli_08_0062.html | 86 + docs/dli/sqlreference/dli_08_0063.html | 80 + docs/dli/sqlreference/dli_08_0064.html | 23 + docs/dli/sqlreference/dli_08_0065.html | 309 ++ docs/dli/sqlreference/dli_08_0066.html | 207 + docs/dli/sqlreference/dli_08_0067.html | 235 + docs/dli/sqlreference/dli_08_0068.html | 123 + docs/dli/sqlreference/dli_08_0069.html | 79 + docs/dli/sqlreference/dli_08_0070.html | 21 + docs/dli/sqlreference/dli_08_0071.html | 64 + docs/dli/sqlreference/dli_08_0072.html | 43 + docs/dli/sqlreference/dli_08_0073.html | 43 + docs/dli/sqlreference/dli_08_0074.html | 48 + docs/dli/sqlreference/dli_08_0075.html | 121 + docs/dli/sqlreference/dli_08_0076.html | 258 ++ docs/dli/sqlreference/dli_08_0077.html | 175 + docs/dli/sqlreference/dli_08_0079.html | 57 + docs/dli/sqlreference/dli_08_0080.html | 27 + docs/dli/sqlreference/dli_08_0081.html | 95 + docs/dli/sqlreference/dli_08_0082.html | 56 + docs/dli/sqlreference/dli_08_0083.html | 61 + docs/dli/sqlreference/dli_08_0084.html | 61 + docs/dli/sqlreference/dli_08_0086.html | 27 + docs/dli/sqlreference/dli_08_0087.html | 48 + docs/dli/sqlreference/dli_08_0088.html | 122 + docs/dli/sqlreference/dli_08_0089.html | 25 + docs/dli/sqlreference/dli_08_0090.html | 52 + docs/dli/sqlreference/dli_08_0091.html | 44 + docs/dli/sqlreference/dli_08_0092.html | 48 + docs/dli/sqlreference/dli_08_0093.html | 48 + docs/dli/sqlreference/dli_08_0094.html | 59 + docs/dli/sqlreference/dli_08_0095.html | 105 + docs/dli/sqlreference/dli_08_0096.html | 735 ++++ docs/dli/sqlreference/dli_08_0097.html | 166 + docs/dli/sqlreference/dli_08_0098.html | 127 + docs/dli/sqlreference/dli_08_0099.html | 196 + docs/dli/sqlreference/dli_08_0100.html | 233 + docs/dli/sqlreference/dli_08_0101.html | 69 + docs/dli/sqlreference/dli_08_0102.html | 194 + docs/dli/sqlreference/dli_08_0103.html | 77 + docs/dli/sqlreference/dli_08_0104.html | 433 ++ docs/dli/sqlreference/dli_08_0105.html | 48 + docs/dli/sqlreference/dli_08_0106.html | 146 + docs/dli/sqlreference/dli_08_0107.html | 186 + docs/dli/sqlreference/dli_08_0108.html | 223 + docs/dli/sqlreference/dli_08_0109.html | 21 + docs/dli/sqlreference/dli_08_0110.html | 115 + docs/dli/sqlreference/dli_08_0111.html | 138 + docs/dli/sqlreference/dli_08_0112.html | 202 + docs/dli/sqlreference/dli_08_0118.html | 19 + docs/dli/sqlreference/dli_08_0119.html | 120 + docs/dli/sqlreference/dli_08_0120.html | 99 + docs/dli/sqlreference/dli_08_0121.html | 72 + docs/dli/sqlreference/dli_08_0122.html | 71 + docs/dli/sqlreference/dli_08_0123.html | 51 + docs/dli/sqlreference/dli_08_0124.html | 28 + docs/dli/sqlreference/dli_08_0125.html | 63 + docs/dli/sqlreference/dli_08_0129.html | 17 + docs/dli/sqlreference/dli_08_0130.html | 27 + docs/dli/sqlreference/dli_08_0131.html | 27 + docs/dli/sqlreference/dli_08_0138.html | 28 + docs/dli/sqlreference/dli_08_0139.html | 33 + docs/dli/sqlreference/dli_08_0140.html | 203 + docs/dli/sqlreference/dli_08_0141.html | 26 + docs/dli/sqlreference/dli_08_0142.html | 26 + docs/dli/sqlreference/dli_08_0143.html | 37 + docs/dli/sqlreference/dli_08_0144.html | 94 + docs/dli/sqlreference/dli_08_0145.html | 34 + docs/dli/sqlreference/dli_08_0146.html | 39 + docs/dli/sqlreference/dli_08_0147.html | 27 + docs/dli/sqlreference/dli_08_0148.html | 26 + docs/dli/sqlreference/dli_08_0149.html | 26 + docs/dli/sqlreference/dli_08_0150.html | 112 + docs/dli/sqlreference/dli_08_0151.html | 17 + docs/dli/sqlreference/dli_08_0152.html | 31 + docs/dli/sqlreference/dli_08_0153.html | 37 + docs/dli/sqlreference/dli_08_0154.html | 21 + docs/dli/sqlreference/dli_08_0155.html | 33 + docs/dli/sqlreference/dli_08_0156.html | 33 + docs/dli/sqlreference/dli_08_0157.html | 31 + docs/dli/sqlreference/dli_08_0158.html | 31 + docs/dli/sqlreference/dli_08_0159.html | 23 + docs/dli/sqlreference/dli_08_0160.html | 32 + docs/dli/sqlreference/dli_08_0161.html | 31 + docs/dli/sqlreference/dli_08_0162.html | 37 + docs/dli/sqlreference/dli_08_0163.html | 58 + docs/dli/sqlreference/dli_08_0164.html | 78 + docs/dli/sqlreference/dli_08_0165.html | 29 + docs/dli/sqlreference/dli_08_0166.html | 31 + docs/dli/sqlreference/dli_08_0167.html | 31 + docs/dli/sqlreference/dli_08_0168.html | 31 + docs/dli/sqlreference/dli_08_0169.html | 31 + docs/dli/sqlreference/dli_08_0170.html | 31 + docs/dli/sqlreference/dli_08_0171.html | 31 + docs/dli/sqlreference/dli_08_0172.html | 31 + docs/dli/sqlreference/dli_08_0173.html | 31 + docs/dli/sqlreference/dli_08_0174.html | 21 + docs/dli/sqlreference/dli_08_0175.html | 32 + docs/dli/sqlreference/dli_08_0176.html | 29 + docs/dli/sqlreference/dli_08_0177.html | 36 + docs/dli/sqlreference/dli_08_0178.html | 27 + docs/dli/sqlreference/dli_08_0179.html | 17 + docs/dli/sqlreference/dli_08_0180.html | 31 + docs/dli/sqlreference/dli_08_0181.html | 27 + docs/dli/sqlreference/dli_08_0182.html | 19 + docs/dli/sqlreference/dli_08_0183.html | 27 + docs/dli/sqlreference/dli_08_0184.html | 27 + docs/dli/sqlreference/dli_08_0185.html | 27 + docs/dli/sqlreference/dli_08_0186.html | 27 + docs/dli/sqlreference/dli_08_0187.html | 17 + docs/dli/sqlreference/dli_08_0188.html | 28 + docs/dli/sqlreference/dli_08_0189.html | 27 + docs/dli/sqlreference/dli_08_0190.html | 57 + docs/dli/sqlreference/dli_08_0191.html | 445 ++ docs/dli/sqlreference/dli_08_0192.html | 19 + docs/dli/sqlreference/dli_08_0193.html | 146 + docs/dli/sqlreference/dli_08_0194.html | 99 + docs/dli/sqlreference/dli_08_0195.html | 26 + docs/dli/sqlreference/dli_08_0196.html | 19 + docs/dli/sqlreference/dli_08_0197.html | 174 + docs/dli/sqlreference/dli_08_0198.html | 99 + docs/dli/sqlreference/dli_08_0199.html | 26 + docs/dli/sqlreference/dli_08_0200.html | 19 + docs/dli/sqlreference/dli_08_0201.html | 128 + docs/dli/sqlreference/dli_08_0202.html | 99 + docs/dli/sqlreference/dli_08_0203.html | 26 + docs/dli/sqlreference/dli_08_0204.html | 138 + docs/dli/sqlreference/dli_08_0205.html | 64 + docs/dli/sqlreference/dli_08_0206.html | 34 + docs/dli/sqlreference/dli_08_0207.html | 368 ++ docs/dli/sqlreference/dli_08_0209.html | 359 ++ docs/dli/sqlreference/dli_08_0216.html | 76 + docs/dli/sqlreference/dli_08_0217.html | 45 + docs/dli/sqlreference/dli_08_0218.html | 222 + docs/dli/sqlreference/dli_08_0219.html | 269 ++ docs/dli/sqlreference/dli_08_0220.html | 20 + docs/dli/sqlreference/dli_08_0221.html | 86 + docs/dli/sqlreference/dli_08_0223.html | 18 + docs/dli/sqlreference/dli_08_0224.html | 18 + docs/dli/sqlreference/dli_08_0225.html | 19 + docs/dli/sqlreference/dli_08_0226.html | 180 + docs/dli/sqlreference/dli_08_0227.html | 128 + docs/dli/sqlreference/dli_08_0228.html | 24 + docs/dli/sqlreference/dli_08_0229.html | 19 + docs/dli/sqlreference/dli_08_0230.html | 103 + docs/dli/sqlreference/dli_08_0231.html | 116 + docs/dli/sqlreference/dli_08_0232.html | 26 + docs/dli/sqlreference/dli_08_0233.html | 52 + docs/dli/sqlreference/dli_08_0234.html | 26 + docs/dli/sqlreference/dli_08_0235.html | 387 ++ docs/dli/sqlreference/dli_08_0236.html | 181 + docs/dli/sqlreference/dli_08_0237.html | 119 + docs/dli/sqlreference/dli_08_0238.html | 211 + docs/dli/sqlreference/dli_08_0239.html | 220 + docs/dli/sqlreference/dli_08_0240.html | 48 + docs/dli/sqlreference/dli_08_0241.html | 198 + docs/dli/sqlreference/dli_08_0242.html | 284 ++ docs/dli/sqlreference/dli_08_0243.html | 147 + docs/dli/sqlreference/dli_08_0244.html | 158 + docs/dli/sqlreference/dli_08_0245.html | 164 + docs/dli/sqlreference/dli_08_0247.html | 180 + docs/dli/sqlreference/dli_08_0248.html | 290 ++ docs/dli/sqlreference/dli_08_0249.html | 129 + docs/dli/sqlreference/dli_08_0251.html | 147 + docs/dli/sqlreference/dli_08_0252.html | 177 + docs/dli/sqlreference/dli_08_0253.html | 131 + docs/dli/sqlreference/dli_08_0254.html | 167 + docs/dli/sqlreference/dli_08_0255.html | 163 + docs/dli/sqlreference/dli_08_0257.html | 133 + docs/dli/sqlreference/dli_08_0258.html | 22 + docs/dli/sqlreference/dli_08_0259.html | 18 + docs/dli/sqlreference/dli_08_0260.html | 111 + docs/dli/sqlreference/dli_08_0261.html | 218 + docs/dli/sqlreference/dli_08_0262.html | 18 + docs/dli/sqlreference/dli_08_0263.html | 62 + docs/dli/sqlreference/dli_08_0266.html | 68 + docs/dli/sqlreference/dli_08_0267.html | 195 + docs/dli/sqlreference/dli_08_0270.html | 12 + docs/dli/sqlreference/dli_08_0271.html | 12 + docs/dli/sqlreference/dli_08_0272.html | 21 + docs/dli/sqlreference/dli_08_0273.html | 101 + docs/dli/sqlreference/dli_08_0274.html | 89 + docs/dli/sqlreference/dli_08_0275.html | 132 + docs/dli/sqlreference/dli_08_0281.html | 27 + docs/dli/sqlreference/dli_08_0282.html | 25 + docs/dli/sqlreference/dli_08_0283.html | 37 + docs/dli/sqlreference/dli_08_0284.html | 27 + docs/dli/sqlreference/dli_08_0285.html | 57 + docs/dli/sqlreference/dli_08_0286.html | 148 + docs/dli/sqlreference/dli_08_0289.html | 20 + docs/dli/sqlreference/dli_08_0290.html | 17 + docs/dli/sqlreference/dli_08_0291.html | 11 + docs/dli/sqlreference/dli_08_0292.html | 17 + docs/dli/sqlreference/dli_08_0293.html | 19 + docs/dli/sqlreference/dli_08_0294.html | 75 + docs/dli/sqlreference/dli_08_0295.html | 22 + docs/dli/sqlreference/dli_08_0296.html | 24 + docs/dli/sqlreference/dli_08_0297.html | 138 + docs/dli/sqlreference/dli_08_0298.html | 107 + docs/dli/sqlreference/dli_08_0299.html | 19 + docs/dli/sqlreference/dli_08_0300.html | 27 + docs/dli/sqlreference/dli_08_0301.html | 251 ++ docs/dli/sqlreference/dli_08_0302.html | 180 + docs/dli/sqlreference/dli_08_0303.html | 145 + docs/dli/sqlreference/dli_08_0304.html | 200 + docs/dli/sqlreference/dli_08_0305.html | 123 + docs/dli/sqlreference/dli_08_0306.html | 98 + docs/dli/sqlreference/dli_08_0307.html | 41 + docs/dli/sqlreference/dli_08_0308.html | 157 + docs/dli/sqlreference/dli_08_0309.html | 142 + docs/dli/sqlreference/dli_08_0310.html | 139 + docs/dli/sqlreference/dli_08_0311.html | 160 + docs/dli/sqlreference/dli_08_0312.html | 198 + docs/dli/sqlreference/dli_08_0313.html | 235 + docs/dli/sqlreference/dli_08_0314.html | 100 + docs/dli/sqlreference/dli_08_0315.html | 119 + docs/dli/sqlreference/dli_08_0316.html | 188 + docs/dli/sqlreference/dli_08_0317.html | 19 + docs/dli/sqlreference/dli_08_0318.html | 255 ++ docs/dli/sqlreference/dli_08_0319.html | 259 ++ docs/dli/sqlreference/dli_08_0320.html | 114 + docs/dli/sqlreference/dli_08_0321.html | 27 + docs/dli/sqlreference/dli_08_0322.html | 197 + docs/dli/sqlreference/dli_08_0323.html | 56 + docs/dli/sqlreference/dli_08_0324.html | 293 ++ docs/dli/sqlreference/dli_08_0325.html | 102 + docs/dli/sqlreference/dli_08_0326.html | 30 + docs/dli/sqlreference/dli_08_0327.html | 32 + docs/dli/sqlreference/dli_08_0328.html | 32 + docs/dli/sqlreference/dli_08_0329.html | 17 + docs/dli/sqlreference/dli_08_0330.html | 187 + docs/dli/sqlreference/dli_08_0331.html | 35 + docs/dli/sqlreference/dli_08_0332.html | 573 +++ docs/dli/sqlreference/dli_08_0333.html | 439 ++ docs/dli/sqlreference/dli_08_0334.html | 1704 ++++++++ docs/dli/sqlreference/dli_08_0335.html | 77 + docs/dli/sqlreference/dli_08_0336.html | 131 + docs/dli/sqlreference/dli_08_0337.html | 48 + docs/dli/sqlreference/dli_08_0338.html | 38 + docs/dli/sqlreference/dli_08_0339.html | 31 + docs/dli/sqlreference/dli_08_0340.html | 64 + docs/dli/sqlreference/dli_08_0341.html | 124 + docs/dli/sqlreference/dli_08_0342.html | 17 + docs/dli/sqlreference/dli_08_0343.html | 76 + docs/dli/sqlreference/dli_08_0344.html | 221 + docs/dli/sqlreference/dli_08_0345.html | 132 + docs/dli/sqlreference/dli_08_0346.html | 161 + docs/dli/sqlreference/dli_08_0347.html | 98 + docs/dli/sqlreference/dli_08_0348.html | 125 + docs/dli/sqlreference/dli_08_0349.html | 27 + docs/dli/sqlreference/dli_08_0350.html | 58 + docs/dli/sqlreference/dli_08_0351.html | 63 + docs/dli/sqlreference/dli_08_0352.html | 68 + docs/dli/sqlreference/dli_08_0353.html | 59 + docs/dli/sqlreference/dli_08_0354.html | 66 + docs/dli/sqlreference/dli_08_0355.html | 49 + docs/dli/sqlreference/dli_08_0356.html | 141 + docs/dli/sqlreference/dli_08_0357.html | 34 + docs/dli/sqlreference/dli_08_0358.html | 83 + docs/dli/sqlreference/dli_08_0359.html | 50 + docs/dli/sqlreference/dli_08_0370.html | 20 + docs/dli/sqlreference/dli_08_0371.html | 17 + docs/dli/sqlreference/dli_08_0372.html | 11 + docs/dli/sqlreference/dli_08_0373.html | 17 + docs/dli/sqlreference/dli_08_0374.html | 19 + docs/dli/sqlreference/dli_08_0375.html | 75 + docs/dli/sqlreference/dli_08_0376.html | 22 + docs/dli/sqlreference/dli_08_0377.html | 25 + docs/dli/sqlreference/dli_08_0378.html | 138 + docs/dli/sqlreference/dli_08_0379.html | 124 + docs/dli/sqlreference/dli_08_0380.html | 21 + docs/dli/sqlreference/dli_08_0381.html | 31 + docs/dli/sqlreference/dli_08_0382.html | 163 + docs/dli/sqlreference/dli_08_0383.html | 286 ++ docs/dli/sqlreference/dli_08_0384.html | 267 ++ docs/dli/sqlreference/dli_08_0385.html | 403 ++ docs/dli/sqlreference/dli_08_0386.html | 696 +++ docs/dli/sqlreference/dli_08_0387.html | 236 + docs/dli/sqlreference/dli_08_0388.html | 250 ++ docs/dli/sqlreference/dli_08_0389.html | 415 ++ docs/dli/sqlreference/dli_08_0390.html | 213 + docs/dli/sqlreference/dli_08_0391.html | 33 + docs/dli/sqlreference/dli_08_0392.html | 69 + docs/dli/sqlreference/dli_08_0393.html | 223 + docs/dli/sqlreference/dli_08_0394.html | 309 ++ docs/dli/sqlreference/dli_08_0395.html | 380 ++ docs/dli/sqlreference/dli_08_0396.html | 359 ++ docs/dli/sqlreference/dli_08_0397.html | 379 ++ docs/dli/sqlreference/dli_08_0398.html | 495 +++ docs/dli/sqlreference/dli_08_0399.html | 154 + docs/dli/sqlreference/dli_08_0400.html | 564 +++ docs/dli/sqlreference/dli_08_0401.html | 224 + docs/dli/sqlreference/dli_08_0402.html | 21 + docs/dli/sqlreference/dli_08_0403.html | 358 ++ docs/dli/sqlreference/dli_08_0404.html | 337 ++ docs/dli/sqlreference/dli_08_0405.html | 469 ++ docs/dli/sqlreference/dli_08_0406.html | 384 ++ docs/dli/sqlreference/dli_08_0407.html | 29 + docs/dli/sqlreference/dli_08_0408.html | 231 + docs/dli/sqlreference/dli_08_0409.html | 182 + docs/dli/sqlreference/dli_08_0410.html | 118 + docs/dli/sqlreference/dli_08_0411.html | 180 + docs/dli/sqlreference/dli_08_0412.html | 159 + docs/dli/sqlreference/dli_08_0413.html | 146 + docs/dli/sqlreference/dli_08_0414.html | 143 + docs/dli/sqlreference/dli_08_0415.html | 89 + docs/dli/sqlreference/dli_08_0416.html | 27 + docs/dli/sqlreference/dli_08_0417.html | 197 + docs/dli/sqlreference/dli_08_0418.html | 56 + docs/dli/sqlreference/dli_08_0419.html | 419 ++ docs/dli/sqlreference/dli_08_0420.html | 102 + docs/dli/sqlreference/dli_08_0421.html | 30 + docs/dli/sqlreference/dli_08_0422.html | 32 + docs/dli/sqlreference/dli_08_0423.html | 32 + docs/dli/sqlreference/dli_08_0424.html | 17 + docs/dli/sqlreference/dli_08_0425.html | 181 + docs/dli/sqlreference/dli_08_0426.html | 35 + docs/dli/sqlreference/dli_08_0427.html | 573 +++ docs/dli/sqlreference/dli_08_0428.html | 437 ++ docs/dli/sqlreference/dli_08_0429.html | 1680 ++++++++ docs/dli/sqlreference/dli_08_0430.html | 77 + docs/dli/sqlreference/dli_08_0431.html | 139 + docs/dli/sqlreference/dli_08_0432.html | 48 + docs/dli/sqlreference/dli_08_0433.html | 38 + docs/dli/sqlreference/dli_08_0434.html | 31 + docs/dli/sqlreference/dli_08_0435.html | 64 + docs/dli/sqlreference/dli_08_0436.html | 124 + docs/dli/sqlreference/dli_08_0437.html | 15 + docs/dli/sqlreference/dli_08_0438.html | 154 + docs/dli/sqlreference/dli_08_0450.html | 12 + .../en-us_image_0000001238321520.png | Bin 0 -> 32910 bytes .../en-us_image_0000001282578329.png | Bin 0 -> 64890 bytes .../en-us_image_0000001282578421.png | Bin 0 -> 32910 bytes .../en-us_image_0000001282841453.png | Bin 0 -> 64890 bytes .../sqlreference/en-us_image_0206796795.png | Bin 0 -> 34286 bytes .../sqlreference/en-us_image_0206796804.png | Bin 0 -> 150180 bytes .../sqlreference/en-us_image_0206796813.png | Bin 0 -> 45366 bytes .../sqlreference/en-us_image_0206796876.png | Bin 0 -> 17792 bytes .../sqlreference/en-us_image_0206797025.png | Bin 0 -> 77842 bytes .../sqlreference/en-us_image_0206797032.png | Bin 0 -> 145844 bytes .../sqlreference/en-us_image_0206797033.png | Bin 0 -> 73039 bytes .../sqlreference/en-us_image_0223994226.png | Bin 0 -> 5070 bytes .../sqlreference/en-us_image_0223994227.png | Bin 0 -> 8006 bytes .../sqlreference/en-us_image_0223994228.png | Bin 0 -> 7352 bytes .../sqlreference/en-us_image_0223994229.png | Bin 0 -> 14913 bytes .../caution_3.0-en-us.png | Bin 0 -> 2752 bytes .../public_sys-resources/danger_3.0-en-us.png | Bin 0 -> 2771 bytes .../public_sys-resources/delta.gif | Bin 0 -> 836 bytes .../public_sys-resources/deltaend.gif | Bin 0 -> 834 bytes .../public_sys-resources/icon-arrowdn.gif | Bin 0 -> 1887 bytes .../public_sys-resources/icon-arrowrt.gif | Bin 0 -> 1890 bytes .../public_sys-resources/icon-caution.gif | Bin 0 -> 1086 bytes .../public_sys-resources/icon-danger.gif | Bin 0 -> 1086 bytes .../public_sys-resources/icon-huawei.gif | Bin 0 -> 2767 bytes .../public_sys-resources/icon-note.gif | Bin 0 -> 983 bytes .../public_sys-resources/icon-notice.gif | Bin 0 -> 962 bytes .../public_sys-resources/icon-tip.gif | Bin 0 -> 879 bytes .../public_sys-resources/icon-warning.gif | Bin 0 -> 1086 bytes .../public_sys-resources/note_3.0-en-us.png | Bin 0 -> 1080 bytes .../public_sys-resources/notice_3.0-en-us.png | Bin 0 -> 1672 bytes .../warning_3.0-en-us.png | Bin 0 -> 3164 bytes 416 files changed, 49998 insertions(+) create mode 100644 docs/dli/sqlreference/ALL_META.TXT.json create mode 100644 docs/dli/sqlreference/CLASS.TXT.json create mode 100644 docs/dli/sqlreference/PARAMETERS.txt create mode 100644 docs/dli/sqlreference/dli_08_00005.html create mode 100644 docs/dli/sqlreference/dli_08_0001.html create mode 100644 docs/dli/sqlreference/dli_08_0002.html create mode 100644 docs/dli/sqlreference/dli_08_0003.html create mode 100644 docs/dli/sqlreference/dli_08_0004.html create mode 100644 docs/dli/sqlreference/dli_08_0005.html create mode 100644 docs/dli/sqlreference/dli_08_0006.html create mode 100644 docs/dli/sqlreference/dli_08_0007.html create mode 100644 docs/dli/sqlreference/dli_08_0009.html create mode 100644 docs/dli/sqlreference/dli_08_0010.html create mode 100644 docs/dli/sqlreference/dli_08_0011.html create mode 100644 docs/dli/sqlreference/dli_08_0012.html create mode 100644 docs/dli/sqlreference/dli_08_0013.html create mode 100644 docs/dli/sqlreference/dli_08_0014.html create mode 100644 docs/dli/sqlreference/dli_08_0015.html create mode 100644 docs/dli/sqlreference/dli_08_0016.html create mode 100644 docs/dli/sqlreference/dli_08_0017.html create mode 100644 docs/dli/sqlreference/dli_08_0018.html create mode 100644 docs/dli/sqlreference/dli_08_0019.html create mode 100644 docs/dli/sqlreference/dli_08_0020.html create mode 100644 docs/dli/sqlreference/dli_08_0021.html create mode 100644 docs/dli/sqlreference/dli_08_0022.html create mode 100644 docs/dli/sqlreference/dli_08_0023.html create mode 100644 docs/dli/sqlreference/dli_08_0024.html create mode 100644 docs/dli/sqlreference/dli_08_0026.html create mode 100644 docs/dli/sqlreference/dli_08_0029.html create mode 100644 docs/dli/sqlreference/dli_08_0030.html create mode 100644 docs/dli/sqlreference/dli_08_0031.html create mode 100644 docs/dli/sqlreference/dli_08_0034.html create mode 100644 docs/dli/sqlreference/dli_08_0035.html create mode 100644 docs/dli/sqlreference/dli_08_0036.html create mode 100644 docs/dli/sqlreference/dli_08_0037.html create mode 100644 docs/dli/sqlreference/dli_08_0038.html create mode 100644 docs/dli/sqlreference/dli_08_0039.html create mode 100644 docs/dli/sqlreference/dli_08_0040.html create mode 100644 docs/dli/sqlreference/dli_08_0042.html create mode 100644 docs/dli/sqlreference/dli_08_0043.html create mode 100644 docs/dli/sqlreference/dli_08_0045.html create mode 100644 docs/dli/sqlreference/dli_08_0046.html create mode 100644 docs/dli/sqlreference/dli_08_0047.html create mode 100644 docs/dli/sqlreference/dli_08_0048.html create mode 100644 docs/dli/sqlreference/dli_08_0049.html create mode 100644 docs/dli/sqlreference/dli_08_0050.html create mode 100644 docs/dli/sqlreference/dli_08_0053.html create mode 100644 docs/dli/sqlreference/dli_08_0054.html create mode 100644 docs/dli/sqlreference/dli_08_0055.html create mode 100644 docs/dli/sqlreference/dli_08_0056.html create mode 100644 docs/dli/sqlreference/dli_08_0057.html create mode 100644 docs/dli/sqlreference/dli_08_0058.html create mode 100644 docs/dli/sqlreference/dli_08_0059.html create mode 100644 docs/dli/sqlreference/dli_08_0060.html create mode 100644 docs/dli/sqlreference/dli_08_0061.html create mode 100644 docs/dli/sqlreference/dli_08_0062.html create mode 100644 docs/dli/sqlreference/dli_08_0063.html create mode 100644 docs/dli/sqlreference/dli_08_0064.html create mode 100644 docs/dli/sqlreference/dli_08_0065.html create mode 100644 docs/dli/sqlreference/dli_08_0066.html create mode 100644 docs/dli/sqlreference/dli_08_0067.html create mode 100644 docs/dli/sqlreference/dli_08_0068.html create mode 100644 docs/dli/sqlreference/dli_08_0069.html create mode 100644 docs/dli/sqlreference/dli_08_0070.html create mode 100644 docs/dli/sqlreference/dli_08_0071.html create mode 100644 docs/dli/sqlreference/dli_08_0072.html create mode 100644 docs/dli/sqlreference/dli_08_0073.html create mode 100644 docs/dli/sqlreference/dli_08_0074.html create mode 100644 docs/dli/sqlreference/dli_08_0075.html create mode 100644 docs/dli/sqlreference/dli_08_0076.html create mode 100644 docs/dli/sqlreference/dli_08_0077.html create mode 100644 docs/dli/sqlreference/dli_08_0079.html create mode 100644 docs/dli/sqlreference/dli_08_0080.html create mode 100644 docs/dli/sqlreference/dli_08_0081.html create mode 100644 docs/dli/sqlreference/dli_08_0082.html create mode 100644 docs/dli/sqlreference/dli_08_0083.html create mode 100644 docs/dli/sqlreference/dli_08_0084.html create mode 100644 docs/dli/sqlreference/dli_08_0086.html create mode 100644 docs/dli/sqlreference/dli_08_0087.html create mode 100644 docs/dli/sqlreference/dli_08_0088.html create mode 100644 docs/dli/sqlreference/dli_08_0089.html create mode 100644 docs/dli/sqlreference/dli_08_0090.html create mode 100644 docs/dli/sqlreference/dli_08_0091.html create mode 100644 docs/dli/sqlreference/dli_08_0092.html create mode 100644 docs/dli/sqlreference/dli_08_0093.html create mode 100644 docs/dli/sqlreference/dli_08_0094.html create mode 100644 docs/dli/sqlreference/dli_08_0095.html create mode 100644 docs/dli/sqlreference/dli_08_0096.html create mode 100644 docs/dli/sqlreference/dli_08_0097.html create mode 100644 docs/dli/sqlreference/dli_08_0098.html create mode 100644 docs/dli/sqlreference/dli_08_0099.html create mode 100644 docs/dli/sqlreference/dli_08_0100.html create mode 100644 docs/dli/sqlreference/dli_08_0101.html create mode 100644 docs/dli/sqlreference/dli_08_0102.html create mode 100644 docs/dli/sqlreference/dli_08_0103.html create mode 100644 docs/dli/sqlreference/dli_08_0104.html create mode 100644 docs/dli/sqlreference/dli_08_0105.html create mode 100644 docs/dli/sqlreference/dli_08_0106.html create mode 100644 docs/dli/sqlreference/dli_08_0107.html create mode 100644 docs/dli/sqlreference/dli_08_0108.html create mode 100644 docs/dli/sqlreference/dli_08_0109.html create mode 100644 docs/dli/sqlreference/dli_08_0110.html create mode 100644 docs/dli/sqlreference/dli_08_0111.html create mode 100644 docs/dli/sqlreference/dli_08_0112.html create mode 100644 docs/dli/sqlreference/dli_08_0118.html create mode 100644 docs/dli/sqlreference/dli_08_0119.html create mode 100644 docs/dli/sqlreference/dli_08_0120.html create mode 100644 docs/dli/sqlreference/dli_08_0121.html create mode 100644 docs/dli/sqlreference/dli_08_0122.html create mode 100644 docs/dli/sqlreference/dli_08_0123.html create mode 100644 docs/dli/sqlreference/dli_08_0124.html create mode 100644 docs/dli/sqlreference/dli_08_0125.html create mode 100644 docs/dli/sqlreference/dli_08_0129.html create mode 100644 docs/dli/sqlreference/dli_08_0130.html create mode 100644 docs/dli/sqlreference/dli_08_0131.html create mode 100644 docs/dli/sqlreference/dli_08_0138.html create mode 100644 docs/dli/sqlreference/dli_08_0139.html create mode 100644 docs/dli/sqlreference/dli_08_0140.html create mode 100644 docs/dli/sqlreference/dli_08_0141.html create mode 100644 docs/dli/sqlreference/dli_08_0142.html create mode 100644 docs/dli/sqlreference/dli_08_0143.html create mode 100644 docs/dli/sqlreference/dli_08_0144.html create mode 100644 docs/dli/sqlreference/dli_08_0145.html create mode 100644 docs/dli/sqlreference/dli_08_0146.html create mode 100644 docs/dli/sqlreference/dli_08_0147.html create mode 100644 docs/dli/sqlreference/dli_08_0148.html create mode 100644 docs/dli/sqlreference/dli_08_0149.html create mode 100644 docs/dli/sqlreference/dli_08_0150.html create mode 100644 docs/dli/sqlreference/dli_08_0151.html create mode 100644 docs/dli/sqlreference/dli_08_0152.html create mode 100644 docs/dli/sqlreference/dli_08_0153.html create mode 100644 docs/dli/sqlreference/dli_08_0154.html create mode 100644 docs/dli/sqlreference/dli_08_0155.html create mode 100644 docs/dli/sqlreference/dli_08_0156.html create mode 100644 docs/dli/sqlreference/dli_08_0157.html create mode 100644 docs/dli/sqlreference/dli_08_0158.html create mode 100644 docs/dli/sqlreference/dli_08_0159.html create mode 100644 docs/dli/sqlreference/dli_08_0160.html create mode 100644 docs/dli/sqlreference/dli_08_0161.html create mode 100644 docs/dli/sqlreference/dli_08_0162.html create mode 100644 docs/dli/sqlreference/dli_08_0163.html create mode 100644 docs/dli/sqlreference/dli_08_0164.html create mode 100644 docs/dli/sqlreference/dli_08_0165.html create mode 100644 docs/dli/sqlreference/dli_08_0166.html create mode 100644 docs/dli/sqlreference/dli_08_0167.html create mode 100644 docs/dli/sqlreference/dli_08_0168.html create mode 100644 docs/dli/sqlreference/dli_08_0169.html create mode 100644 docs/dli/sqlreference/dli_08_0170.html create mode 100644 docs/dli/sqlreference/dli_08_0171.html create mode 100644 docs/dli/sqlreference/dli_08_0172.html create mode 100644 docs/dli/sqlreference/dli_08_0173.html create mode 100644 docs/dli/sqlreference/dli_08_0174.html create mode 100644 docs/dli/sqlreference/dli_08_0175.html create mode 100644 docs/dli/sqlreference/dli_08_0176.html create mode 100644 docs/dli/sqlreference/dli_08_0177.html create mode 100644 docs/dli/sqlreference/dli_08_0178.html create mode 100644 docs/dli/sqlreference/dli_08_0179.html create mode 100644 docs/dli/sqlreference/dli_08_0180.html create mode 100644 docs/dli/sqlreference/dli_08_0181.html create mode 100644 docs/dli/sqlreference/dli_08_0182.html create mode 100644 docs/dli/sqlreference/dli_08_0183.html create mode 100644 docs/dli/sqlreference/dli_08_0184.html create mode 100644 docs/dli/sqlreference/dli_08_0185.html create mode 100644 docs/dli/sqlreference/dli_08_0186.html create mode 100644 docs/dli/sqlreference/dli_08_0187.html create mode 100644 docs/dli/sqlreference/dli_08_0188.html create mode 100644 docs/dli/sqlreference/dli_08_0189.html create mode 100644 docs/dli/sqlreference/dli_08_0190.html create mode 100644 docs/dli/sqlreference/dli_08_0191.html create mode 100644 docs/dli/sqlreference/dli_08_0192.html create mode 100644 docs/dli/sqlreference/dli_08_0193.html create mode 100644 docs/dli/sqlreference/dli_08_0194.html create mode 100644 docs/dli/sqlreference/dli_08_0195.html create mode 100644 docs/dli/sqlreference/dli_08_0196.html create mode 100644 docs/dli/sqlreference/dli_08_0197.html create mode 100644 docs/dli/sqlreference/dli_08_0198.html create mode 100644 docs/dli/sqlreference/dli_08_0199.html create mode 100644 docs/dli/sqlreference/dli_08_0200.html create mode 100644 docs/dli/sqlreference/dli_08_0201.html create mode 100644 docs/dli/sqlreference/dli_08_0202.html create mode 100644 docs/dli/sqlreference/dli_08_0203.html create mode 100644 docs/dli/sqlreference/dli_08_0204.html create mode 100644 docs/dli/sqlreference/dli_08_0205.html create mode 100644 docs/dli/sqlreference/dli_08_0206.html create mode 100644 docs/dli/sqlreference/dli_08_0207.html create mode 100644 docs/dli/sqlreference/dli_08_0209.html create mode 100644 docs/dli/sqlreference/dli_08_0216.html create mode 100644 docs/dli/sqlreference/dli_08_0217.html create mode 100644 docs/dli/sqlreference/dli_08_0218.html create mode 100644 docs/dli/sqlreference/dli_08_0219.html create mode 100644 docs/dli/sqlreference/dli_08_0220.html create mode 100644 docs/dli/sqlreference/dli_08_0221.html create mode 100644 docs/dli/sqlreference/dli_08_0223.html create mode 100644 docs/dli/sqlreference/dli_08_0224.html create mode 100644 docs/dli/sqlreference/dli_08_0225.html create mode 100644 docs/dli/sqlreference/dli_08_0226.html create mode 100644 docs/dli/sqlreference/dli_08_0227.html create mode 100644 docs/dli/sqlreference/dli_08_0228.html create mode 100644 docs/dli/sqlreference/dli_08_0229.html create mode 100644 docs/dli/sqlreference/dli_08_0230.html create mode 100644 docs/dli/sqlreference/dli_08_0231.html create mode 100644 docs/dli/sqlreference/dli_08_0232.html create mode 100644 docs/dli/sqlreference/dli_08_0233.html create mode 100644 docs/dli/sqlreference/dli_08_0234.html create mode 100644 docs/dli/sqlreference/dli_08_0235.html create mode 100644 docs/dli/sqlreference/dli_08_0236.html create mode 100644 docs/dli/sqlreference/dli_08_0237.html create mode 100644 docs/dli/sqlreference/dli_08_0238.html create mode 100644 docs/dli/sqlreference/dli_08_0239.html create mode 100644 docs/dli/sqlreference/dli_08_0240.html create mode 100644 docs/dli/sqlreference/dli_08_0241.html create mode 100644 docs/dli/sqlreference/dli_08_0242.html create mode 100644 docs/dli/sqlreference/dli_08_0243.html create mode 100644 docs/dli/sqlreference/dli_08_0244.html create mode 100644 docs/dli/sqlreference/dli_08_0245.html create mode 100644 docs/dli/sqlreference/dli_08_0247.html create mode 100644 docs/dli/sqlreference/dli_08_0248.html create mode 100644 docs/dli/sqlreference/dli_08_0249.html create mode 100644 docs/dli/sqlreference/dli_08_0251.html create mode 100644 docs/dli/sqlreference/dli_08_0252.html create mode 100644 docs/dli/sqlreference/dli_08_0253.html create mode 100644 docs/dli/sqlreference/dli_08_0254.html create mode 100644 docs/dli/sqlreference/dli_08_0255.html create mode 100644 docs/dli/sqlreference/dli_08_0257.html create mode 100644 docs/dli/sqlreference/dli_08_0258.html create mode 100644 docs/dli/sqlreference/dli_08_0259.html create mode 100644 docs/dli/sqlreference/dli_08_0260.html create mode 100644 docs/dli/sqlreference/dli_08_0261.html create mode 100644 docs/dli/sqlreference/dli_08_0262.html create mode 100644 docs/dli/sqlreference/dli_08_0263.html create mode 100644 docs/dli/sqlreference/dli_08_0266.html create mode 100644 docs/dli/sqlreference/dli_08_0267.html create mode 100644 docs/dli/sqlreference/dli_08_0270.html create mode 100644 docs/dli/sqlreference/dli_08_0271.html create mode 100644 docs/dli/sqlreference/dli_08_0272.html create mode 100644 docs/dli/sqlreference/dli_08_0273.html create mode 100644 docs/dli/sqlreference/dli_08_0274.html create mode 100644 docs/dli/sqlreference/dli_08_0275.html create mode 100644 docs/dli/sqlreference/dli_08_0281.html create mode 100644 docs/dli/sqlreference/dli_08_0282.html create mode 100644 docs/dli/sqlreference/dli_08_0283.html create mode 100644 docs/dli/sqlreference/dli_08_0284.html create mode 100644 docs/dli/sqlreference/dli_08_0285.html create mode 100644 docs/dli/sqlreference/dli_08_0286.html create mode 100644 docs/dli/sqlreference/dli_08_0289.html create mode 100644 docs/dli/sqlreference/dli_08_0290.html create mode 100644 docs/dli/sqlreference/dli_08_0291.html create mode 100644 docs/dli/sqlreference/dli_08_0292.html create mode 100644 docs/dli/sqlreference/dli_08_0293.html create mode 100644 docs/dli/sqlreference/dli_08_0294.html create mode 100644 docs/dli/sqlreference/dli_08_0295.html create mode 100644 docs/dli/sqlreference/dli_08_0296.html create mode 100644 docs/dli/sqlreference/dli_08_0297.html create mode 100644 docs/dli/sqlreference/dli_08_0298.html create mode 100644 docs/dli/sqlreference/dli_08_0299.html create mode 100644 docs/dli/sqlreference/dli_08_0300.html create mode 100644 docs/dli/sqlreference/dli_08_0301.html create mode 100644 docs/dli/sqlreference/dli_08_0302.html create mode 100644 docs/dli/sqlreference/dli_08_0303.html create mode 100644 docs/dli/sqlreference/dli_08_0304.html create mode 100644 docs/dli/sqlreference/dli_08_0305.html create mode 100644 docs/dli/sqlreference/dli_08_0306.html create mode 100644 docs/dli/sqlreference/dli_08_0307.html create mode 100644 docs/dli/sqlreference/dli_08_0308.html create mode 100644 docs/dli/sqlreference/dli_08_0309.html create mode 100644 docs/dli/sqlreference/dli_08_0310.html create mode 100644 docs/dli/sqlreference/dli_08_0311.html create mode 100644 docs/dli/sqlreference/dli_08_0312.html create mode 100644 docs/dli/sqlreference/dli_08_0313.html create mode 100644 docs/dli/sqlreference/dli_08_0314.html create mode 100644 docs/dli/sqlreference/dli_08_0315.html create mode 100644 docs/dli/sqlreference/dli_08_0316.html create mode 100644 docs/dli/sqlreference/dli_08_0317.html create mode 100644 docs/dli/sqlreference/dli_08_0318.html create mode 100644 docs/dli/sqlreference/dli_08_0319.html create mode 100644 docs/dli/sqlreference/dli_08_0320.html create mode 100644 docs/dli/sqlreference/dli_08_0321.html create mode 100644 docs/dli/sqlreference/dli_08_0322.html create mode 100644 docs/dli/sqlreference/dli_08_0323.html create mode 100644 docs/dli/sqlreference/dli_08_0324.html create mode 100644 docs/dli/sqlreference/dli_08_0325.html create mode 100644 docs/dli/sqlreference/dli_08_0326.html create mode 100644 docs/dli/sqlreference/dli_08_0327.html create mode 100644 docs/dli/sqlreference/dli_08_0328.html create mode 100644 docs/dli/sqlreference/dli_08_0329.html create mode 100644 docs/dli/sqlreference/dli_08_0330.html create mode 100644 docs/dli/sqlreference/dli_08_0331.html create mode 100644 docs/dli/sqlreference/dli_08_0332.html create mode 100644 docs/dli/sqlreference/dli_08_0333.html create mode 100644 docs/dli/sqlreference/dli_08_0334.html create mode 100644 docs/dli/sqlreference/dli_08_0335.html create mode 100644 docs/dli/sqlreference/dli_08_0336.html create mode 100644 docs/dli/sqlreference/dli_08_0337.html create mode 100644 docs/dli/sqlreference/dli_08_0338.html create mode 100644 docs/dli/sqlreference/dli_08_0339.html create mode 100644 docs/dli/sqlreference/dli_08_0340.html create mode 100644 docs/dli/sqlreference/dli_08_0341.html create mode 100644 docs/dli/sqlreference/dli_08_0342.html create mode 100644 docs/dli/sqlreference/dli_08_0343.html create mode 100644 docs/dli/sqlreference/dli_08_0344.html create mode 100644 docs/dli/sqlreference/dli_08_0345.html create mode 100644 docs/dli/sqlreference/dli_08_0346.html create mode 100644 docs/dli/sqlreference/dli_08_0347.html create mode 100644 docs/dli/sqlreference/dli_08_0348.html create mode 100644 docs/dli/sqlreference/dli_08_0349.html create mode 100644 docs/dli/sqlreference/dli_08_0350.html create mode 100644 docs/dli/sqlreference/dli_08_0351.html create mode 100644 docs/dli/sqlreference/dli_08_0352.html create mode 100644 docs/dli/sqlreference/dli_08_0353.html create mode 100644 docs/dli/sqlreference/dli_08_0354.html create mode 100644 docs/dli/sqlreference/dli_08_0355.html create mode 100644 docs/dli/sqlreference/dli_08_0356.html create mode 100644 docs/dli/sqlreference/dli_08_0357.html create mode 100644 docs/dli/sqlreference/dli_08_0358.html create mode 100644 docs/dli/sqlreference/dli_08_0359.html create mode 100644 docs/dli/sqlreference/dli_08_0370.html create mode 100644 docs/dli/sqlreference/dli_08_0371.html create mode 100644 docs/dli/sqlreference/dli_08_0372.html create mode 100644 docs/dli/sqlreference/dli_08_0373.html create mode 100644 docs/dli/sqlreference/dli_08_0374.html create mode 100644 docs/dli/sqlreference/dli_08_0375.html create mode 100644 docs/dli/sqlreference/dli_08_0376.html create mode 100644 docs/dli/sqlreference/dli_08_0377.html create mode 100644 docs/dli/sqlreference/dli_08_0378.html create mode 100644 docs/dli/sqlreference/dli_08_0379.html create mode 100644 docs/dli/sqlreference/dli_08_0380.html create mode 100644 docs/dli/sqlreference/dli_08_0381.html create mode 100644 docs/dli/sqlreference/dli_08_0382.html create mode 100644 docs/dli/sqlreference/dli_08_0383.html create mode 100644 docs/dli/sqlreference/dli_08_0384.html create mode 100644 docs/dli/sqlreference/dli_08_0385.html create mode 100644 docs/dli/sqlreference/dli_08_0386.html create mode 100644 docs/dli/sqlreference/dli_08_0387.html create mode 100644 docs/dli/sqlreference/dli_08_0388.html create mode 100644 docs/dli/sqlreference/dli_08_0389.html create mode 100644 docs/dli/sqlreference/dli_08_0390.html create mode 100644 docs/dli/sqlreference/dli_08_0391.html create mode 100644 docs/dli/sqlreference/dli_08_0392.html create mode 100644 docs/dli/sqlreference/dli_08_0393.html create mode 100644 docs/dli/sqlreference/dli_08_0394.html create mode 100644 docs/dli/sqlreference/dli_08_0395.html create mode 100644 docs/dli/sqlreference/dli_08_0396.html create mode 100644 docs/dli/sqlreference/dli_08_0397.html create mode 100644 docs/dli/sqlreference/dli_08_0398.html create mode 100644 docs/dli/sqlreference/dli_08_0399.html create mode 100644 docs/dli/sqlreference/dli_08_0400.html create mode 100644 docs/dli/sqlreference/dli_08_0401.html create mode 100644 docs/dli/sqlreference/dli_08_0402.html create mode 100644 docs/dli/sqlreference/dli_08_0403.html create mode 100644 docs/dli/sqlreference/dli_08_0404.html create mode 100644 docs/dli/sqlreference/dli_08_0405.html create mode 100644 docs/dli/sqlreference/dli_08_0406.html create mode 100644 docs/dli/sqlreference/dli_08_0407.html create mode 100644 docs/dli/sqlreference/dli_08_0408.html create mode 100644 docs/dli/sqlreference/dli_08_0409.html create mode 100644 docs/dli/sqlreference/dli_08_0410.html create mode 100644 docs/dli/sqlreference/dli_08_0411.html create mode 100644 docs/dli/sqlreference/dli_08_0412.html create mode 100644 docs/dli/sqlreference/dli_08_0413.html create mode 100644 docs/dli/sqlreference/dli_08_0414.html create mode 100644 docs/dli/sqlreference/dli_08_0415.html create mode 100644 docs/dli/sqlreference/dli_08_0416.html create mode 100644 docs/dli/sqlreference/dli_08_0417.html create mode 100644 docs/dli/sqlreference/dli_08_0418.html create mode 100644 docs/dli/sqlreference/dli_08_0419.html create mode 100644 docs/dli/sqlreference/dli_08_0420.html create mode 100644 docs/dli/sqlreference/dli_08_0421.html create mode 100644 docs/dli/sqlreference/dli_08_0422.html create mode 100644 docs/dli/sqlreference/dli_08_0423.html create mode 100644 docs/dli/sqlreference/dli_08_0424.html create mode 100644 docs/dli/sqlreference/dli_08_0425.html create mode 100644 docs/dli/sqlreference/dli_08_0426.html create mode 100644 docs/dli/sqlreference/dli_08_0427.html create mode 100644 docs/dli/sqlreference/dli_08_0428.html create mode 100644 docs/dli/sqlreference/dli_08_0429.html create mode 100644 docs/dli/sqlreference/dli_08_0430.html create mode 100644 docs/dli/sqlreference/dli_08_0431.html create mode 100644 docs/dli/sqlreference/dli_08_0432.html create mode 100644 docs/dli/sqlreference/dli_08_0433.html create mode 100644 docs/dli/sqlreference/dli_08_0434.html create mode 100644 docs/dli/sqlreference/dli_08_0435.html create mode 100644 docs/dli/sqlreference/dli_08_0436.html create mode 100644 docs/dli/sqlreference/dli_08_0437.html create mode 100644 docs/dli/sqlreference/dli_08_0438.html create mode 100644 docs/dli/sqlreference/dli_08_0450.html create mode 100644 docs/dli/sqlreference/en-us_image_0000001238321520.png create mode 100644 docs/dli/sqlreference/en-us_image_0000001282578329.png create mode 100644 docs/dli/sqlreference/en-us_image_0000001282578421.png create mode 100644 docs/dli/sqlreference/en-us_image_0000001282841453.png create mode 100644 docs/dli/sqlreference/en-us_image_0206796795.png create mode 100644 docs/dli/sqlreference/en-us_image_0206796804.png create mode 100644 docs/dli/sqlreference/en-us_image_0206796813.png create mode 100644 docs/dli/sqlreference/en-us_image_0206796876.png create mode 100644 docs/dli/sqlreference/en-us_image_0206797025.png create mode 100644 docs/dli/sqlreference/en-us_image_0206797032.png create mode 100644 docs/dli/sqlreference/en-us_image_0206797033.png create mode 100644 docs/dli/sqlreference/en-us_image_0223994226.png create mode 100644 docs/dli/sqlreference/en-us_image_0223994227.png create mode 100644 docs/dli/sqlreference/en-us_image_0223994228.png create mode 100644 docs/dli/sqlreference/en-us_image_0223994229.png create mode 100644 docs/dli/sqlreference/public_sys-resources/caution_3.0-en-us.png create mode 100644 docs/dli/sqlreference/public_sys-resources/danger_3.0-en-us.png create mode 100644 docs/dli/sqlreference/public_sys-resources/delta.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/deltaend.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-arrowdn.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-arrowrt.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-caution.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-danger.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-huawei.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-note.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-notice.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-tip.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/icon-warning.gif create mode 100644 docs/dli/sqlreference/public_sys-resources/note_3.0-en-us.png create mode 100644 docs/dli/sqlreference/public_sys-resources/notice_3.0-en-us.png create mode 100644 docs/dli/sqlreference/public_sys-resources/warning_3.0-en-us.png diff --git a/docs/dli/sqlreference/ALL_META.TXT.json b/docs/dli/sqlreference/ALL_META.TXT.json new file mode 100644 index 00000000..474594b3 --- /dev/null +++ b/docs/dli/sqlreference/ALL_META.TXT.json @@ -0,0 +1,3822 @@ +[ + { + "uri":"dli_08_0221.html", + "product_code":"dli", + "code":"1", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Spark SQL Syntax Reference", + "title":"Spark SQL Syntax Reference", + "githuburl":"" + }, + { + "uri":"dli_08_0266.html", + "product_code":"dli", + "code":"2", + "des":"This section describes the common configuration items of the SQL syntax for DLI batch jobs.", + "doc_type":"sqlreference", + "kw":"Common Configuration Items of Batch SQL Jobs,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Common Configuration Items of Batch SQL Jobs", + "githuburl":"" + }, + { + "uri":"dli_08_0219.html", + "product_code":"dli", + "code":"3", + "des":"This section describes the Spark SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.", + "doc_type":"sqlreference", + "kw":"SQL Syntax Overview of Batch Jobs,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"SQL Syntax Overview of Batch Jobs", + "githuburl":"" + }, + { + "uri":"dli_08_0070.html", + "product_code":"dli", + "code":"4", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Databases", + "title":"Databases", + "githuburl":"" + }, + { + "uri":"dli_08_0071.html", + "product_code":"dli", + "code":"5", + "des":"This statement is used to create a database.IF NOT EXISTS: Prevents system errors if the database to be created exists.COMMENT: Describes a database.DBPROPERTIES: Specifi", + "doc_type":"sqlreference", + "kw":"Creating a Database,Databases,SQL Syntax Reference", + "title":"Creating a Database", + "githuburl":"" + }, + { + "uri":"dli_08_0072.html", + "product_code":"dli", + "code":"6", + "des":"This statement is used to delete a database.IF EXISTS: Prevents system errors if the database to be deleted does not exist.DATABASE and SCHEMA can be used interchangeably", + "doc_type":"sqlreference", + "kw":"Deleting a Database,Databases,SQL Syntax Reference", + "title":"Deleting a Database", + "githuburl":"" + }, + { + "uri":"dli_08_0073.html", + "product_code":"dli", + "code":"7", + "des":"This syntax is used to view the information about a specified database, including the database name and database description.EXTENDED: Displays the database properties.If", + "doc_type":"sqlreference", + "kw":"Viewing a Specified Database,Databases,SQL Syntax Reference", + "title":"Viewing a Specified Database", + "githuburl":"" + }, + { + "uri":"dli_08_0074.html", + "product_code":"dli", + "code":"8", + "des":"This syntax is used to query all current databases.NoneKeyword DATABASES is equivalent to SCHEMAS. You can use either of them in this statement.View all the current datab", + "doc_type":"sqlreference", + "kw":"Viewing All Databases,Databases,SQL Syntax Reference", + "title":"Viewing All Databases", + "githuburl":"" + }, + { + "uri":"dli_08_0223.html", + "product_code":"dli", + "code":"9", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating an OBS Table", + "title":"Creating an OBS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0076.html", + "product_code":"dli", + "code":"10", + "des":"Create an OBS table using the DataSource syntax.The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of suppor", + "doc_type":"sqlreference", + "kw":"Creating an OBS Table Using the DataSource Syntax,Creating an OBS Table,SQL Syntax Reference", + "title":"Creating an OBS Table Using the DataSource Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0077.html", + "product_code":"dli", + "code":"11", + "des":"This statement is used to create an OBS table using the Hive syntax. The main differences between the DataSource and the Hive syntax lie in the supported data formats and", + "doc_type":"sqlreference", + "kw":"Creating an OBS Table Using the Hive Syntax,Creating an OBS Table,SQL Syntax Reference", + "title":"Creating an OBS Table Using the Hive Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0224.html", + "product_code":"dli", + "code":"12", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table", + "title":"Creating a DLI Table", + "githuburl":"" + }, + { + "uri":"dli_08_0098.html", + "product_code":"dli", + "code":"13", + "des":"This DataSource syntax can be used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table Using the DataSource Syntax,Creating a DLI Table,SQL Syntax Reference", + "title":"Creating a DLI Table Using the DataSource Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0204.html", + "product_code":"dli", + "code":"14", + "des":"This Hive syntax is used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of suppor", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table Using the Hive Syntax,Creating a DLI Table,SQL Syntax Reference", + "title":"Creating a DLI Table Using the Hive Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0087.html", + "product_code":"dli", + "code":"15", + "des":"This statement is used to delete tables.If the table is stored in OBS, only the metadata is deleted. The data stored on OBS is not deleted.If the table is stored in DLI, ", + "doc_type":"sqlreference", + "kw":"Deleting a Table,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Deleting a Table", + "githuburl":"" + }, + { + "uri":"dli_08_0089.html", + "product_code":"dli", + "code":"16", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Viewing Tables", + "title":"Viewing Tables", + "githuburl":"" + }, + { + "uri":"dli_08_0090.html", + "product_code":"dli", + "code":"17", + "des":"This statement is used to view all tables and views in the current database.FROM/IN: followed by the name of a database whose tables and views will be displayed.NoneCreat", + "doc_type":"sqlreference", + "kw":"Viewing All Tables,Viewing Tables,SQL Syntax Reference", + "title":"Viewing All Tables", + "githuburl":"" + }, + { + "uri":"dli_08_0091.html", + "product_code":"dli", + "code":"18", + "des":"This statement is used to show the statements for creating a table.CREATE TABLE: statement for creating a tableThe table specified in this statement must exist. Otherwise", + "doc_type":"sqlreference", + "kw":"Viewing Table Creation Statements,Viewing Tables,SQL Syntax Reference", + "title":"Viewing Table Creation Statements", + "githuburl":"" + }, + { + "uri":"dli_08_0092.html", + "product_code":"dli", + "code":"19", + "des":"Check the properties of a table.TBLPROPERTIES: This statement allows you to add a key/value property to a table.property_name is case sensitive. You cannot specify multip", + "doc_type":"sqlreference", + "kw":"Viewing Table Properties,Viewing Tables,SQL Syntax Reference", + "title":"Viewing Table Properties", + "githuburl":"" + }, + { + "uri":"dli_08_0093.html", + "product_code":"dli", + "code":"20", + "des":"This statement is used to query all columns in a specified table.COLUMNS: columns in the current tableFROM/IN: followed by the name of a database whose tables and views w", + "doc_type":"sqlreference", + "kw":"Viewing All Columns in a Specified Table,Viewing Tables,SQL Syntax Reference", + "title":"Viewing All Columns in a Specified Table", + "githuburl":"" + }, + { + "uri":"dli_08_0094.html", + "product_code":"dli", + "code":"21", + "des":"This statement is used to view all partitions in a specified table.PARTITIONS: partitions in a specified tablePARTITION: a specified partitionThe table specified in this ", + "doc_type":"sqlreference", + "kw":"Viewing All Partitions in a Specified Table,Viewing Tables,SQL Syntax Reference", + "title":"Viewing All Partitions in a Specified Table", + "githuburl":"" + }, + { + "uri":"dli_08_0105.html", + "product_code":"dli", + "code":"22", + "des":"This statement is used to view the table statistics. The names and data types of all columns in a specified table will be returned.EXTENDED: displays all metadata of the ", + "doc_type":"sqlreference", + "kw":"Viewing Table Statistics,Viewing Tables,SQL Syntax Reference", + "title":"Viewing Table Statistics", + "githuburl":"" + }, + { + "uri":"dli_08_0262.html", + "product_code":"dli", + "code":"23", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Modifying a Table", + "title":"Modifying a Table", + "githuburl":"" + }, + { + "uri":"dli_08_0263.html", + "product_code":"dli", + "code":"24", + "des":"This statement is used to add one or more new columns to a table.ADD COLUMNS: columns to addCOMMENT: column descriptionDo not run this SQL statement concurrently. Otherwi", + "doc_type":"sqlreference", + "kw":"Adding a Column,Modifying a Table,SQL Syntax Reference", + "title":"Adding a Column", + "githuburl":"" + }, + { + "uri":"dli_08_0354.html", + "product_code":"dli", + "code":"25", + "des":"DLI controls multiple versions of backup data for restoration. After the multiversion function is enabled, the system automatically backs up table data when you delete or", + "doc_type":"sqlreference", + "kw":"Enabling or Disabling Multiversion Backup,Modifying a Table,SQL Syntax Reference", + "title":"Enabling or Disabling Multiversion Backup", + "githuburl":"" + }, + { + "uri":"dli_08_0080.html", + "product_code":"dli", + "code":"26", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Syntax for Partitioning a Table", + "title":"Syntax for Partitioning a Table", + "githuburl":"" + }, + { + "uri":"dli_08_0081.html", + "product_code":"dli", + "code":"27", + "des":"After an OBS partitioned table is created, no partition information is generated for the table. Partition information is generated only after you:Insert data to the OBS p", + "doc_type":"sqlreference", + "kw":"Adding Partition Data (Only OBS Tables Supported),Syntax for Partitioning a Table,SQL Syntax Referen", + "title":"Adding Partition Data (Only OBS Tables Supported)", + "githuburl":"" + }, + { + "uri":"dli_08_0082.html", + "product_code":"dli", + "code":"28", + "des":"This statement is used to rename partitions.PARTITION: a specified partitionRENAME: new name of the partitionThis statement is used for OBS table operations.The table and", + "doc_type":"sqlreference", + "kw":"Renaming a Partition (Only OBS Tables Supported),Syntax for Partitioning a Table,SQL Syntax Referenc", + "title":"Renaming a Partition (Only OBS Tables Supported)", + "githuburl":"" + }, + { + "uri":"dli_08_0083.html", + "product_code":"dli", + "code":"29", + "des":"Deletes one or more partitions from a partitioned table.The table in which partitions are to be deleted must exist. Otherwise, an error is reported.The to-be-deleted part", + "doc_type":"sqlreference", + "kw":"Deleting a Partition,Syntax for Partitioning a Table,SQL Syntax Reference", + "title":"Deleting a Partition", + "githuburl":"" + }, + { + "uri":"dli_08_0343.html", + "product_code":"dli", + "code":"30", + "des":"This statement is used to delete one or more partitions based on specified conditions.This statement is used for OBS table operations only.The table in which partitions a", + "doc_type":"sqlreference", + "kw":"Deleting Partitions by Specifying Filter Criteria (Only OBS Tables Supported),Syntax for Partitionin", + "title":"Deleting Partitions by Specifying Filter Criteria (Only OBS Tables Supported)", + "githuburl":"" + }, + { + "uri":"dli_08_0084.html", + "product_code":"dli", + "code":"31", + "des":"This statement is used to modify the positions of table partitions.PARTITION: a specified partitionLOCATION: path of the partitionFor a table partition whose position is ", + "doc_type":"sqlreference", + "kw":"Altering the Partition Location of a Table (Only OBS Tables Supported),Syntax for Partitioning a Tab", + "title":"Altering the Partition Location of a Table (Only OBS Tables Supported)", + "githuburl":"" + }, + { + "uri":"dli_08_0079.html", + "product_code":"dli", + "code":"32", + "des":"This statement is used to update the partition information about a table in the Metastore.OrPARTITIONS: partition informationSERDEPROPERTIES: Serde attributeThis statemen", + "doc_type":"sqlreference", + "kw":"Updating Partitioned Table Data (Only OBS Tables Supported),Syntax for Partitioning a Table,SQL Synt", + "title":"Updating Partitioned Table Data (Only OBS Tables Supported)", + "githuburl":"" + }, + { + "uri":"dli_08_0359.html", + "product_code":"dli", + "code":"33", + "des":"Spark caches Parquet metadata to improve performance. If you update a Parquet table, the cached metadata is not updated. Spark SQL cannot find the newly inserted data and", + "doc_type":"sqlreference", + "kw":"Updating Table Metadata with REFRESH TABLE,Syntax for Partitioning a Table,SQL Syntax Reference", + "title":"Updating Table Metadata with REFRESH TABLE", + "githuburl":"" + }, + { + "uri":"dli_08_0100.html", + "product_code":"dli", + "code":"34", + "des":"The LOAD DATA function can be used to import data in CSV, Parquet, ORC, JSON, and Avro formats. The data is converted into the Parquet data format for storage.INPATH: pat", + "doc_type":"sqlreference", + "kw":"Importing Data to the Table,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Importing Data to the Table", + "githuburl":"" + }, + { + "uri":"dli_08_0095.html", + "product_code":"dli", + "code":"35", + "des":"This statement is used to insert the SELECT query result or a certain data record into a table.Insert the SELECT query result into a table.INSERT INTO [TABLE] [db_name.]t", + "doc_type":"sqlreference", + "kw":"Inserting Data,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Inserting Data", + "githuburl":"" + }, + { + "uri":"dli_08_0217.html", + "product_code":"dli", + "code":"36", + "des":"This statement is used to delete data from the DLI or OBS table.Only data in the DLI or OBS table can be deleted.", + "doc_type":"sqlreference", + "kw":"Clearing Data,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Clearing Data", + "githuburl":"" + }, + { + "uri":"dli_08_0205.html", + "product_code":"dli", + "code":"37", + "des":"This statement is used to directly write query results to a specified directory. The query results can be stored in CSV, Parquet, ORC, JSON, or Avro format.USING: Specifi", + "doc_type":"sqlreference", + "kw":"Exporting Search Results,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Exporting Search Results", + "githuburl":"" + }, + { + "uri":"dli_08_0349.html", + "product_code":"dli", + "code":"38", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Backing Up and Restoring Data of Multiple Versions", + "title":"Backing Up and Restoring Data of Multiple Versions", + "githuburl":"" + }, + { + "uri":"dli_08_0350.html", + "product_code":"dli", + "code":"39", + "des":"After multiversion is enabled, backup data is retained for seven days by default. You can change the retention period by setting system parameterdli.multi.version.retenti", + "doc_type":"sqlreference", + "kw":"Setting the Retention Period for Multiversion Backup Data,Backing Up and Restoring Data of Multiple ", + "title":"Setting the Retention Period for Multiversion Backup Data", + "githuburl":"" + }, + { + "uri":"dli_08_0351.html", + "product_code":"dli", + "code":"40", + "des":"After the multiversion function is enabled, you can run the SHOW HISTORY command to view the backup data of a table. For details about the syntax for enabling or disablin", + "doc_type":"sqlreference", + "kw":"Viewing Multiversion Backup Data,Backing Up and Restoring Data of Multiple Versions,SQL Syntax Refer", + "title":"Viewing Multiversion Backup Data", + "githuburl":"" + }, + { + "uri":"dli_08_0352.html", + "product_code":"dli", + "code":"41", + "des":"After the multiversion function is enabled, you can run the RESTORE TABLE statement to restore a table or partition of a specified version. For details about the syntax f", + "doc_type":"sqlreference", + "kw":"Restoring Multiversion Backup Data,Backing Up and Restoring Data of Multiple Versions,SQL Syntax Ref", + "title":"Restoring Multiversion Backup Data", + "githuburl":"" + }, + { + "uri":"dli_08_0353.html", + "product_code":"dli", + "code":"42", + "des":"After the multiversion function is enabled, expired backup data will be directly deleted by the system when theinsert overwrite or truncate statement is executed. You can", + "doc_type":"sqlreference", + "kw":"Configuring the Trash Bin for Expired Multiversion Data,Backing Up and Restoring Data of Multiple Ve", + "title":"Configuring the Trash Bin for Expired Multiversion Data", + "githuburl":"" + }, + { + "uri":"dli_08_0355.html", + "product_code":"dli", + "code":"43", + "des":"The retention period of multiversion backup data takes effect each time the insert overwrite or truncate statement is executed. If neither statement is executed for the t", + "doc_type":"sqlreference", + "kw":"Deleting Multiversion Backup Data,Backing Up and Restoring Data of Multiple Versions,SQL Syntax Refe", + "title":"Deleting Multiversion Backup Data", + "githuburl":"" + }, + { + "uri":"dli_08_0118.html", + "product_code":"dli", + "code":"44", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with an HBase Table", + "title":"Creating a Datasource Connection with an HBase Table", + "githuburl":"" + }, + { + "uri":"dli_08_0119.html", + "product_code":"dli", + "code":"45", + "des":"This statement is used to create a DLI table and associate it with an existing HBase table.Before creating a DLI table and associating it with HBase, you need to create a", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with HBase,Creating a Datasource Connection with an HBase Ta", + "title":"Creating a DLI Table and Associating It with HBase", + "githuburl":"" + }, + { + "uri":"dli_08_0120.html", + "product_code":"dli", + "code":"46", + "des":"This statement is used to insert data in a DLI table to the associated HBase table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field", + "doc_type":"sqlreference", + "kw":"Inserting Data to an HBase Table,Creating a Datasource Connection with an HBase Table,SQL Syntax Ref", + "title":"Inserting Data to an HBase Table", + "githuburl":"" + }, + { + "uri":"dli_08_0121.html", + "product_code":"dli", + "code":"47", + "des":"This statement is used to query data in an HBase table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be querie", + "doc_type":"sqlreference", + "kw":"Querying an HBase Table,Creating a Datasource Connection with an HBase Table,SQL Syntax Reference", + "title":"Querying an HBase Table", + "githuburl":"" + }, + { + "uri":"dli_08_0220.html", + "product_code":"dli", + "code":"48", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with an OpenTSDB Table", + "title":"Creating a Datasource Connection with an OpenTSDB Table", + "githuburl":"" + }, + { + "uri":"dli_08_0122.html", + "product_code":"dli", + "code":"49", + "des":"Run the CREATE TABLE statement to create the DLI table and associate it with the existing metric in OpenTSDB. This syntax supports the OpenTSDB of CloudTable and MRS.Befo", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with OpenTSDB,Creating a Datasource Connection with an OpenT", + "title":"Creating a DLI Table and Associating It with OpenTSDB", + "githuburl":"" + }, + { + "uri":"dli_08_0123.html", + "product_code":"dli", + "code":"50", + "des":"Run the INSERT INTO statement to insert the data in the DLI table to the associated OpenTSDB metric.If no metric exists on the OpenTSDB, a new metric is automatically cre", + "doc_type":"sqlreference", + "kw":"Inserting Data to the OpenTSDB Table,Creating a Datasource Connection with an OpenTSDB Table,SQL Syn", + "title":"Inserting Data to the OpenTSDB Table", + "githuburl":"" + }, + { + "uri":"dli_08_0124.html", + "product_code":"dli", + "code":"51", + "des":"This SELECT command is used to query data in an OpenTSDB table.If no metric exists in OpenTSDB, an error will be reported when the corresponding DLI table is queried.If t", + "doc_type":"sqlreference", + "kw":"Querying an OpenTSDB Table,Creating a Datasource Connection with an OpenTSDB Table,SQL Syntax Refere", + "title":"Querying an OpenTSDB Table", + "githuburl":"" + }, + { + "uri":"dli_08_0192.html", + "product_code":"dli", + "code":"52", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with a DWS table", + "title":"Creating a Datasource Connection with a DWS table", + "githuburl":"" + }, + { + "uri":"dli_08_0193.html", + "product_code":"dli", + "code":"53", + "des":"This statement is used to create a DLI table and associate it with an existing DWS table.Before creating a DLI table and associating it with DWS, you need to create a dat", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with DWS,Creating a Datasource Connection with a DWS table,S", + "title":"Creating a DLI Table and Associating It with DWS", + "githuburl":"" + }, + { + "uri":"dli_08_0194.html", + "product_code":"dli", + "code":"54", + "des":"This statement is used to insert data in a DLI table to the associated DWS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "doc_type":"sqlreference", + "kw":"Inserting Data to the DWS Table,Creating a Datasource Connection with a DWS table,SQL Syntax Referen", + "title":"Inserting Data to the DWS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0195.html", + "product_code":"dli", + "code":"55", + "des":"This statement is used to query data in a DWS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried m", + "doc_type":"sqlreference", + "kw":"Querying the DWS Table,Creating a Datasource Connection with a DWS table,SQL Syntax Reference", + "title":"Querying the DWS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0196.html", + "product_code":"dli", + "code":"56", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with an RDS Table", + "title":"Creating a Datasource Connection with an RDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0197.html", + "product_code":"dli", + "code":"57", + "des":"This statement is used to create a DLI table and associate it with an existing RDS table. This function supports access to the MySQL and PostgreSQL clusters of RDS.Before", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with RDS,Creating a Datasource Connection with an RDS Table,", + "title":"Creating a DLI Table and Associating It with RDS", + "githuburl":"" + }, + { + "uri":"dli_08_0198.html", + "product_code":"dli", + "code":"58", + "des":"This statement is used to insert data in a DLI table to the associated RDS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "doc_type":"sqlreference", + "kw":"Inserting Data to the RDS Table,Creating a Datasource Connection with an RDS Table,SQL Syntax Refere", + "title":"Inserting Data to the RDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0199.html", + "product_code":"dli", + "code":"59", + "des":"This statement is used to query data in an RDS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried ", + "doc_type":"sqlreference", + "kw":"Querying the RDS Table,Creating a Datasource Connection with an RDS Table,SQL Syntax Reference", + "title":"Querying the RDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0200.html", + "product_code":"dli", + "code":"60", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with a CSS Table", + "title":"Creating a Datasource Connection with a CSS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0201.html", + "product_code":"dli", + "code":"61", + "des":"This statement is used to create a DLI table and associate it with an existing CSS table.Before creating a DLI table and associating it with CSS, you need to create a dat", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with CSS,Creating a Datasource Connection with a CSS Table,S", + "title":"Creating a DLI Table and Associating It with CSS", + "githuburl":"" + }, + { + "uri":"dli_08_0202.html", + "product_code":"dli", + "code":"62", + "des":"This statement is used to insert data in a DLI table to the associated CSS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "doc_type":"sqlreference", + "kw":"Inserting Data to the CSS Table,Creating a Datasource Connection with a CSS Table,SQL Syntax Referen", + "title":"Inserting Data to the CSS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0203.html", + "product_code":"dli", + "code":"63", + "des":"This statement is used to query data in a CSS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried m", + "doc_type":"sqlreference", + "kw":"Querying the CSS Table,Creating a Datasource Connection with a CSS Table,SQL Syntax Reference", + "title":"Querying the CSS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0225.html", + "product_code":"dli", + "code":"64", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with a DCS Table", + "title":"Creating a Datasource Connection with a DCS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0226.html", + "product_code":"dli", + "code":"65", + "des":"This statement is used to create a DLI table and associate it with an existing DCS key.Before creating a DLI table and associating it with DCS, you need to create a datas", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with DCS,Creating a Datasource Connection with a DCS Table,S", + "title":"Creating a DLI Table and Associating It with DCS", + "githuburl":"" + }, + { + "uri":"dli_08_0227.html", + "product_code":"dli", + "code":"66", + "des":"This statement is used to insert data in a DLI table to the DCS key.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2...\n [FROM DL", + "doc_type":"sqlreference", + "kw":"Inserting Data to a DCS Table,Creating a Datasource Connection with a DCS Table,SQL Syntax Reference", + "title":"Inserting Data to a DCS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0228.html", + "product_code":"dli", + "code":"67", + "des":"This statement is used to query data in a DCS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.Query data in the test_re", + "doc_type":"sqlreference", + "kw":"Querying the DCS Table,Creating a Datasource Connection with a DCS Table,SQL Syntax Reference", + "title":"Querying the DCS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0229.html", + "product_code":"dli", + "code":"68", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Datasource Connection with a DDS Table", + "title":"Creating a Datasource Connection with a DDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0230.html", + "product_code":"dli", + "code":"69", + "des":"This statement is used to create a DLI table and associate it with an existing DDS collection.Before creating a DLI table and associating it with DDS, you need to create ", + "doc_type":"sqlreference", + "kw":"Creating a DLI Table and Associating It with DDS,Creating a Datasource Connection with a DDS Table,S", + "title":"Creating a DLI Table and Associating It with DDS", + "githuburl":"" + }, + { + "uri":"dli_08_0231.html", + "product_code":"dli", + "code":"70", + "des":"This statement is used to insert data in a DLI table to the associated DDS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "doc_type":"sqlreference", + "kw":"Inserting Data to the DDS Table,Creating a Datasource Connection with a DDS Table,SQL Syntax Referen", + "title":"Inserting Data to the DDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0232.html", + "product_code":"dli", + "code":"71", + "des":"This statement is used to query data in a DDS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.If schema information is ", + "doc_type":"sqlreference", + "kw":"Querying the DDS Table,Creating a Datasource Connection with a DDS Table,SQL Syntax Reference", + "title":"Querying the DDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0129.html", + "product_code":"dli", + "code":"72", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Views", + "title":"Views", + "githuburl":"" + }, + { + "uri":"dli_08_0130.html", + "product_code":"dli", + "code":"73", + "des":"This statement is used to create views.CREATE VIEW: creates views based on the given select statement. The result of the select statement will not be written into the dis", + "doc_type":"sqlreference", + "kw":"Creating a View,Views,SQL Syntax Reference", + "title":"Creating a View", + "githuburl":"" + }, + { + "uri":"dli_08_0131.html", + "product_code":"dli", + "code":"74", + "des":"This statement is used to delete views.DROP: Deletes the metadata of a specified view. Although views and tables have many common points, the DROP TABLE statement cannot ", + "doc_type":"sqlreference", + "kw":"Deleting a View,Views,SQL Syntax Reference", + "title":"Deleting a View", + "githuburl":"" + }, + { + "uri":"dli_08_0138.html", + "product_code":"dli", + "code":"75", + "des":"This statement returns the logical plan and physical execution plan for the SQL statement.EXTENDED: After this keyword is specified, the logical and physical plans are ou", + "doc_type":"sqlreference", + "kw":"Viewing the Execution Plan,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Viewing the Execution Plan", + "githuburl":"" + }, + { + "uri":"dli_08_0139.html", + "product_code":"dli", + "code":"76", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Permissions Management", + "title":"Data Permissions Management", + "githuburl":"" + }, + { + "uri":"dli_08_0140.html", + "product_code":"dli", + "code":"77", + "des":"Table 1 describes the SQL statement permission matrix in DLI in terms of permissions on databases, tables, and roles.For privilege granting or revocation on databases and", + "doc_type":"sqlreference", + "kw":"Data Permissions List,Data Permissions Management,SQL Syntax Reference", + "title":"Data Permissions List", + "githuburl":"" + }, + { + "uri":"dli_08_0141.html", + "product_code":"dli", + "code":"78", + "des":"This statement is used to create a role in the current database or a specified database.Only users with the CREATE_ROLE permission on the database can create roles. For e", + "doc_type":"sqlreference", + "kw":"Creating a Role,Data Permissions Management,SQL Syntax Reference", + "title":"Creating a Role", + "githuburl":"" + }, + { + "uri":"dli_08_0148.html", + "product_code":"dli", + "code":"79", + "des":"This statement is used to delete a role in the current database or a specified database.NoneThe role_name to be deleted must exist in the current database or the specifie", + "doc_type":"sqlreference", + "kw":"Deleting a Role,Data Permissions Management,SQL Syntax Reference", + "title":"Deleting a Role", + "githuburl":"" + }, + { + "uri":"dli_08_0142.html", + "product_code":"dli", + "code":"80", + "des":"This statement is used to bind a user with a role.NoneThe role_name and username must exist. Otherwise, an error will be reported.", + "doc_type":"sqlreference", + "kw":"Binding a Role,Data Permissions Management,SQL Syntax Reference", + "title":"Binding a Role", + "githuburl":"" + }, + { + "uri":"dli_08_0147.html", + "product_code":"dli", + "code":"81", + "des":"This statement is used to unbind the user with the role.Nonerole_name and user_name must exist and user_name has been bound to role_name.To unbind the user_name1 from rol", + "doc_type":"sqlreference", + "kw":"Unbinding a Role,Data Permissions Management,SQL Syntax Reference", + "title":"Unbinding a Role", + "githuburl":"" + }, + { + "uri":"dli_08_0143.html", + "product_code":"dli", + "code":"82", + "des":"This statement is used to display all roles or roles bound to the user_name in the current database.ALL: Displays all roles.Keywords ALL and user_name cannot coexist.To d", + "doc_type":"sqlreference", + "kw":"Displaying a Role,Data Permissions Management,SQL Syntax Reference", + "title":"Displaying a Role", + "githuburl":"" + }, + { + "uri":"dli_08_0144.html", + "product_code":"dli", + "code":"83", + "des":"This statement is used to grant permissions to a user or role.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must be a user.The privilege mu", + "doc_type":"sqlreference", + "kw":"Granting a Permission,Data Permissions Management,SQL Syntax Reference", + "title":"Granting a Permission", + "githuburl":"" + }, + { + "uri":"dli_08_0146.html", + "product_code":"dli", + "code":"84", + "des":"This statement is used to revoke permissions granted to a user or role.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must be a user.The pri", + "doc_type":"sqlreference", + "kw":"Revoking a Permission,Data Permissions Management,SQL Syntax Reference", + "title":"Revoking a Permission", + "githuburl":"" + }, + { + "uri":"dli_08_0145.html", + "product_code":"dli", + "code":"85", + "des":"This statement is used to show the permissions granted to a user or role in the resource.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must", + "doc_type":"sqlreference", + "kw":"Displaying the Granted Permissions,Data Permissions Management,SQL Syntax Reference", + "title":"Displaying the Granted Permissions", + "githuburl":"" + }, + { + "uri":"dli_08_0149.html", + "product_code":"dli", + "code":"86", + "des":"This statement is used to display the binding relationship between roles and a user in the current database.NoneThe ROLE variable must exist.", + "doc_type":"sqlreference", + "kw":"Displaying the Binding Relationship Between All Roles and Users,Data Permissions Management,SQL Synt", + "title":"Displaying the Binding Relationship Between All Roles and Users", + "githuburl":"" + }, + { + "uri":"dli_08_0056.html", + "product_code":"dli", + "code":"87", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Types", + "title":"Data Types", + "githuburl":"" + }, + { + "uri":"dli_08_0057.html", + "product_code":"dli", + "code":"88", + "des":"Data type is a basic attribute of data. It is used to distinguish different types of data. Different data types occupy different storage space and support different opera", + "doc_type":"sqlreference", + "kw":"Overview,Data Types,SQL Syntax Reference", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dli_08_0058.html", + "product_code":"dli", + "code":"89", + "des":"Table 1 lists the primitive data types supported by DLI.VARCHAR and CHAR data is stored in STRING type on DLI. Therefore, the string that exceeds the specified length wil", + "doc_type":"sqlreference", + "kw":"Primitive Data Types,Data Types,SQL Syntax Reference", + "title":"Primitive Data Types", + "githuburl":"" + }, + { + "uri":"dli_08_0059.html", + "product_code":"dli", + "code":"90", + "des":"Spark SQL supports complex data types, as shown in Table 1.When a table containing fields of the complex data type is created, the storage format of this table cannot be ", + "doc_type":"sqlreference", + "kw":"Complex Data Types,Data Types,SQL Syntax Reference", + "title":"Complex Data Types", + "githuburl":"" + }, + { + "uri":"dli_08_0282.html", + "product_code":"dli", + "code":"91", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"User-Defined Functions", + "title":"User-Defined Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0283.html", + "product_code":"dli", + "code":"92", + "des":"DLI allows you to create and use user-defined functions (UDF) and user-defined table functions (UDTF) in Spark jobs.If a function with the same name exists in the databas", + "doc_type":"sqlreference", + "kw":"Creating a Function,User-Defined Functions,SQL Syntax Reference", + "title":"Creating a Function", + "githuburl":"" + }, + { + "uri":"dli_08_0284.html", + "product_code":"dli", + "code":"93", + "des":"This statement is used to delete functions.TEMPORARY: Indicates whether the function to be deleted is a temporary function.IF EXISTS: Used when the function to be deleted", + "doc_type":"sqlreference", + "kw":"Deleting a Function,User-Defined Functions,SQL Syntax Reference", + "title":"Deleting a Function", + "githuburl":"" + }, + { + "uri":"dli_08_0281.html", + "product_code":"dli", + "code":"94", + "des":"Displays information about a specified function.EXTENDED: displays extended usage information.The metadata (implementation class and usage) of an existing function is ret", + "doc_type":"sqlreference", + "kw":"Displaying Function Details,User-Defined Functions,SQL Syntax Reference", + "title":"Displaying Function Details", + "githuburl":"" + }, + { + "uri":"dli_08_0285.html", + "product_code":"dli", + "code":"95", + "des":"View all functions in the current project.In the preceding statement, regex is a regular expression. For details about its parameters, see Table 1.For details about other", + "doc_type":"sqlreference", + "kw":"Displaying All Functions,User-Defined Functions,SQL Syntax Reference", + "title":"Displaying All Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0064.html", + "product_code":"dli", + "code":"96", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Built-in Functions", + "title":"Built-in Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0065.html", + "product_code":"dli", + "code":"97", + "des":"Table 1 lists the mathematical functions supported in DLI.", + "doc_type":"sqlreference", + "kw":"Mathematical Functions,Built-in Functions,SQL Syntax Reference", + "title":"Mathematical Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0066.html", + "product_code":"dli", + "code":"98", + "des":"Table 1 lists the date functions supported in DLI.", + "doc_type":"sqlreference", + "kw":"Date Functions,Built-in Functions,SQL Syntax Reference", + "title":"Date Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0067.html", + "product_code":"dli", + "code":"99", + "des":"Table 1 lists the string functions supported by DLI.", + "doc_type":"sqlreference", + "kw":"String Functions,Built-in Functions,SQL Syntax Reference", + "title":"String Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0068.html", + "product_code":"dli", + "code":"100", + "des":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "doc_type":"sqlreference", + "kw":"Aggregate Functions,Built-in Functions,SQL Syntax Reference", + "title":"Aggregate Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0069.html", + "product_code":"dli", + "code":"101", + "des":"A window function performs a calculation operation on a set of values related to the current value. A window function can be an aggregate function used in the GROUP BY cl", + "doc_type":"sqlreference", + "kw":"Window Functions,Built-in Functions,SQL Syntax Reference", + "title":"Window Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0150.html", + "product_code":"dli", + "code":"102", + "des":"This statement is a basic query statement and is used to return the query results.The table to be queried must exist. Otherwise, an error is reported.To filter the record", + "doc_type":"sqlreference", + "kw":"Basic SELECT Statements,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"Basic SELECT Statements", + "githuburl":"" + }, + { + "uri":"dli_08_0151.html", + "product_code":"dli", + "code":"103", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Filtering", + "title":"Filtering", + "githuburl":"" + }, + { + "uri":"dli_08_0152.html", + "product_code":"dli", + "code":"104", + "des":"This statement is used to filter the query results using the WHERE clause.All is used to return repeated rows. By default, all repeated rows are returned. It is followed ", + "doc_type":"sqlreference", + "kw":"WHERE Filtering Clause,Filtering,SQL Syntax Reference", + "title":"WHERE Filtering Clause", + "githuburl":"" + }, + { + "uri":"dli_08_0153.html", + "product_code":"dli", + "code":"105", + "des":"This statement is used to filter the query results using the HAVING clause.All is used to return repeated rows. By default, all repeated rows are returned. It is followed", + "doc_type":"sqlreference", + "kw":"HAVING Filtering Clause,Filtering,SQL Syntax Reference", + "title":"HAVING Filtering Clause", + "githuburl":"" + }, + { + "uri":"dli_08_0154.html", + "product_code":"dli", + "code":"106", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Sorting", + "title":"Sorting", + "githuburl":"" + }, + { + "uri":"dli_08_0155.html", + "product_code":"dli", + "code":"107", + "des":"This statement is used to order the result set of a query by the specified field.ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the highe", + "doc_type":"sqlreference", + "kw":"ORDER BY,Sorting,SQL Syntax Reference", + "title":"ORDER BY", + "githuburl":"" + }, + { + "uri":"dli_08_0156.html", + "product_code":"dli", + "code":"108", + "des":"This statement is used to achieve the partial sorting of tables according to fields.ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the hi", + "doc_type":"sqlreference", + "kw":"SORT BY,Sorting,SQL Syntax Reference", + "title":"SORT BY", + "githuburl":"" + }, + { + "uri":"dli_08_0157.html", + "product_code":"dli", + "code":"109", + "des":"This statement is used to bucket a table and sort the table within buckets.CLUSTER BY: Buckets are created based on specified fields. Single fields and multiple fields ar", + "doc_type":"sqlreference", + "kw":"CLUSTER BY,Sorting,SQL Syntax Reference", + "title":"CLUSTER BY", + "githuburl":"" + }, + { + "uri":"dli_08_0158.html", + "product_code":"dli", + "code":"110", + "des":"This statement is used to bucket a table according to the field.DISTRIBUTE BY: Buckets are created based on specified fields. A single field or multiple fields are suppor", + "doc_type":"sqlreference", + "kw":"DISTRIBUTE BY,Sorting,SQL Syntax Reference", + "title":"DISTRIBUTE BY", + "githuburl":"" + }, + { + "uri":"dli_08_0159.html", + "product_code":"dli", + "code":"111", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Grouping", + "title":"Grouping", + "githuburl":"" + }, + { + "uri":"dli_08_0160.html", + "product_code":"dli", + "code":"112", + "des":"This statement is used to group a table based on columns.Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.Single-column GROU", + "doc_type":"sqlreference", + "kw":"Column-Based GROUP BY,Grouping,SQL Syntax Reference", + "title":"Column-Based GROUP BY", + "githuburl":"" + }, + { + "uri":"dli_08_0161.html", + "product_code":"dli", + "code":"113", + "des":"This statement is used to group a table according to expressions.The groupby_expression can contain a single field or multiple fields, and also can call aggregate functio", + "doc_type":"sqlreference", + "kw":"Expression-Based GROUP BY,Grouping,SQL Syntax Reference", + "title":"Expression-Based GROUP BY", + "githuburl":"" + }, + { + "uri":"dli_08_0162.html", + "product_code":"dli", + "code":"114", + "des":"This statement filters a table after grouping it using the HAVING clause.The groupby_expression can contain a single field or multiple fields, and can also call aggregate", + "doc_type":"sqlreference", + "kw":"GROUP BY Using HAVING,Grouping,SQL Syntax Reference", + "title":"GROUP BY Using HAVING", + "githuburl":"" + }, + { + "uri":"dli_08_0163.html", + "product_code":"dli", + "code":"115", + "des":"This statement is used to generate the aggregate row, super-aggregate row, and the total row. The statement can achieve multi-layer statistics from right to left and disp", + "doc_type":"sqlreference", + "kw":"ROLLUP,Grouping,SQL Syntax Reference", + "title":"ROLLUP", + "githuburl":"" + }, + { + "uri":"dli_08_0164.html", + "product_code":"dli", + "code":"116", + "des":"This statement is used to generate the cross-table row and achieve the cross-statistics of the GROUP BY field.GROUPING SETS is the expansion of GROUP BY. For example:SELE", + "doc_type":"sqlreference", + "kw":"GROUPING SETS,Grouping,SQL Syntax Reference", + "title":"GROUPING SETS", + "githuburl":"" + }, + { + "uri":"dli_08_0165.html", + "product_code":"dli", + "code":"117", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"JOIN", + "title":"JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0166.html", + "product_code":"dli", + "code":"118", + "des":"This statement is used to join and return the rows that meet the JOIN conditions from two tables as the result set.JOIN/INNER JOIN: Only the records that meet the JOIN co", + "doc_type":"sqlreference", + "kw":"INNER JOIN,JOIN,SQL Syntax Reference", + "title":"INNER JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0167.html", + "product_code":"dli", + "code":"119", + "des":"Join the left table with the right table and return all joined records of the left table. If no joined record is found, NULL will be returned.LEFT OUTER JOIN: Returns all", + "doc_type":"sqlreference", + "kw":"LEFT OUTER JOIN,JOIN,SQL Syntax Reference", + "title":"LEFT OUTER JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0168.html", + "product_code":"dli", + "code":"120", + "des":"Match the right table with the left table and return all matched records of the right table. If no matched record is found, NULL will be returned.RIGHT OUTER JOIN: Return", + "doc_type":"sqlreference", + "kw":"RIGHT OUTER JOIN,JOIN,SQL Syntax Reference", + "title":"RIGHT OUTER JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0169.html", + "product_code":"dli", + "code":"121", + "des":"Join all records from the right table and the left table and return all joined records. If no joined record is found, NULL will be returned.FULL OUTER JOIN: Matches all r", + "doc_type":"sqlreference", + "kw":"FULL OUTER JOIN,JOIN,SQL Syntax Reference", + "title":"FULL OUTER JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0170.html", + "product_code":"dli", + "code":"122", + "des":"This statement has the same function as INNER JOIN, that is, the result set that meet the WHERE condition is returned. However, IMPLICIT JOIN does not use the condition s", + "doc_type":"sqlreference", + "kw":"IMPLICIT JOIN,JOIN,SQL Syntax Reference", + "title":"IMPLICIT JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0171.html", + "product_code":"dli", + "code":"123", + "des":"Cartesian JOIN joins each record of table A with all records in table B. For example, if there are m records in table A and n records in table B, m x n records will be ge", + "doc_type":"sqlreference", + "kw":"Cartesian JOIN,JOIN,SQL Syntax Reference", + "title":"Cartesian JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0172.html", + "product_code":"dli", + "code":"124", + "des":"This statement is used to query the records that meet the JOIN condition from the left table.LEFT SEMI JOIN: Indicates to only return the records from the left table. LEF", + "doc_type":"sqlreference", + "kw":"LEFT SEMI JOIN,JOIN,SQL Syntax Reference", + "title":"LEFT SEMI JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0173.html", + "product_code":"dli", + "code":"125", + "des":"This statement is used to join multiple tables using unequal values and return the result set that meet the condition.The non_equi_join_condition is similar to join_condi", + "doc_type":"sqlreference", + "kw":"NON-EQUIJOIN,JOIN,SQL Syntax Reference", + "title":"NON-EQUIJOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0174.html", + "product_code":"dli", + "code":"126", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Subquery", + "title":"Subquery", + "githuburl":"" + }, + { + "uri":"dli_08_0175.html", + "product_code":"dli", + "code":"127", + "des":"Subqueries are nested in the WHERE clause, and the subquery result is used as the filtering condition.All is used to return repeated rows. By default, all repeated rows a", + "doc_type":"sqlreference", + "kw":"Subquery Nested by WHERE,Subquery,SQL Syntax Reference", + "title":"Subquery Nested by WHERE", + "githuburl":"" + }, + { + "uri":"dli_08_0176.html", + "product_code":"dli", + "code":"128", + "des":"This statement is used to nest subquery by FROM and use the subquery results as the data source of the external SELECT statement.All is used to return repeated rows. By d", + "doc_type":"sqlreference", + "kw":"Subquery Nested by FROM,Subquery,SQL Syntax Reference", + "title":"Subquery Nested by FROM", + "githuburl":"" + }, + { + "uri":"dli_08_0177.html", + "product_code":"dli", + "code":"129", + "des":"This statement is used to embed a subquery in the HAVING clause. The subquery result is used as a part of the HAVING clause.All is used to return repeated rows. By defaul", + "doc_type":"sqlreference", + "kw":"Subquery Nested by HAVING,Subquery,SQL Syntax Reference", + "title":"Subquery Nested by HAVING", + "githuburl":"" + }, + { + "uri":"dli_08_0178.html", + "product_code":"dli", + "code":"130", + "des":"This statement is used to nest queries in the subquery.All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) on", + "doc_type":"sqlreference", + "kw":"Multi-Layer Nested Subquery,Subquery,SQL Syntax Reference", + "title":"Multi-Layer Nested Subquery", + "githuburl":"" + }, + { + "uri":"dli_08_0179.html", + "product_code":"dli", + "code":"131", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Alias", + "title":"Alias", + "githuburl":"" + }, + { + "uri":"dli_08_0180.html", + "product_code":"dli", + "code":"132", + "des":"This statement is used to specify an alias for a table or the subquery result.table_reference: Can be a table, view, or subquery.As: Is used to connect to table_reference", + "doc_type":"sqlreference", + "kw":"AS for Table,Alias,SQL Syntax Reference", + "title":"AS for Table", + "githuburl":"" + }, + { + "uri":"dli_08_0181.html", + "product_code":"dli", + "code":"133", + "des":"This statement is used to specify an alias for a column.alias: gives an alias for the attr_expr field.AS: Whether to add AS does not affect the result.The to-be-queried t", + "doc_type":"sqlreference", + "kw":"AS for Column,Alias,SQL Syntax Reference", + "title":"AS for Column", + "githuburl":"" + }, + { + "uri":"dli_08_0182.html", + "product_code":"dli", + "code":"134", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Set Operations", + "title":"Set Operations", + "githuburl":"" + }, + { + "uri":"dli_08_0183.html", + "product_code":"dli", + "code":"135", + "des":"This statement is used to return the union set of multiple query results.UNION: The set operation is used to join the head and tail of a table based on certain conditions", + "doc_type":"sqlreference", + "kw":"UNION,Set Operations,SQL Syntax Reference", + "title":"UNION", + "githuburl":"" + }, + { + "uri":"dli_08_0184.html", + "product_code":"dli", + "code":"136", + "des":"This statement is used to return the intersection set of multiple query results.INTERSECT returns the intersection of multiple query results. The number of columns return", + "doc_type":"sqlreference", + "kw":"INTERSECT,Set Operations,SQL Syntax Reference", + "title":"INTERSECT", + "githuburl":"" + }, + { + "uri":"dli_08_0185.html", + "product_code":"dli", + "code":"137", + "des":"This statement is used to return the difference set of two query results.EXCEPT minus the sets. A EXCEPT B indicates to remove the records that exist in both A and B from", + "doc_type":"sqlreference", + "kw":"EXCEPT,Set Operations,SQL Syntax Reference", + "title":"EXCEPT", + "githuburl":"" + }, + { + "uri":"dli_08_0186.html", + "product_code":"dli", + "code":"138", + "des":"This statement is used to define the common table expression (CTE) using WITH...AS to simplify the query and make the result easier to read and maintain.cte_name: Name of", + "doc_type":"sqlreference", + "kw":"WITH...AS,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"WITH...AS", + "githuburl":"" + }, + { + "uri":"dli_08_0187.html", + "product_code":"dli", + "code":"139", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"CASE...WHEN", + "title":"CASE...WHEN", + "githuburl":"" + }, + { + "uri":"dli_08_0188.html", + "product_code":"dli", + "code":"140", + "des":"This statement is used to display result_expression according to the joined results of input_expression and when_expression.CASE: Subquery is supported in basic CASE stat", + "doc_type":"sqlreference", + "kw":"Basic CASE Statement,CASE...WHEN,SQL Syntax Reference", + "title":"Basic CASE Statement", + "githuburl":"" + }, + { + "uri":"dli_08_0189.html", + "product_code":"dli", + "code":"141", + "des":"This statement is used to obtain the value of boolean_expression for each WHEN statement in a specified order. Then return the first result_expression with the value TRUE", + "doc_type":"sqlreference", + "kw":"CASE Query Statement,CASE...WHEN,SQL Syntax Reference", + "title":"CASE Query Statement", + "githuburl":"" + }, + { + "uri":"dli_08_0190.html", + "product_code":"dli", + "code":"142", + "des":"This statement is used together with the window function. The OVER statement is used to group data and sort the data within the group. The window function is used to gene", + "doc_type":"sqlreference", + "kw":"OVER Clause,Spark SQL Syntax Reference,SQL Syntax Reference", + "title":"OVER Clause", + "githuburl":"" + }, + { + "uri":"dli_08_0370.html", + "product_code":"dli", + "code":"143", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Flink Opensource SQL 1.12 Syntax Reference", + "title":"Flink Opensource SQL 1.12 Syntax Reference", + "githuburl":"" + }, + { + "uri":"dli_08_0371.html", + "product_code":"dli", + "code":"144", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Constraints and Definitions", + "title":"Constraints and Definitions", + "githuburl":"" + }, + { + "uri":"dli_08_0372.html", + "product_code":"dli", + "code":"145", + "des":"STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP,", + "doc_type":"sqlreference", + "kw":"Supported Data Types,Constraints and Definitions,SQL Syntax Reference", + "title":"Supported Data Types", + "githuburl":"" + }, + { + "uri":"dli_08_0373.html", + "product_code":"dli", + "code":"146", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Syntax", + "title":"Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0374.html", + "product_code":"dli", + "code":"147", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Definition Language (DDL)", + "title":"Data Definition Language (DDL)", + "githuburl":"" + }, + { + "uri":"dli_08_0375.html", + "product_code":"dli", + "code":"148", + "des":"Create a table with a specified name.COMPUTED COLUMNA computed column is a virtual column generated using column_name AS computed_column_expression. A computed column eva", + "doc_type":"sqlreference", + "kw":"CREATE TABLE,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE TABLE", + "githuburl":"" + }, + { + "uri":"dli_08_0376.html", + "product_code":"dli", + "code":"149", + "des":"Create a view with multiple layers nested in it to simplify the development process.IF NOT EXISTSIf the view already exists, nothing happens.Create a view named viewName.", + "doc_type":"sqlreference", + "kw":"CREATE VIEW,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE VIEW", + "githuburl":"" + }, + { + "uri":"dli_08_0377.html", + "product_code":"dli", + "code":"150", + "des":"Create a user-defined function.For details about how to create a user-defined function, see User-Defined Functions (UDFs).IF NOT EXISTSIf the function already exists, not", + "doc_type":"sqlreference", + "kw":"CREATE FUNCTION,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE FUNCTION", + "githuburl":"" + }, + { + "uri":"dli_08_0378.html", + "product_code":"dli", + "code":"151", + "des":"SyntaxPrecautionsFlink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:The case of identifiers is preserved whether or not the", + "doc_type":"sqlreference", + "kw":"Data Manipulation Language (DML),Syntax,SQL Syntax Reference", + "title":"Data Manipulation Language (DML)", + "githuburl":"" + }, + { + "uri":"dli_08_0379.html", + "product_code":"dli", + "code":"152", + "des":"This section describes the Flink open source SQL 1.12 syntax supported by DLI. For details about the parameters and examples, see the syntax description.", + "doc_type":"sqlreference", + "kw":"Overview,Flink Opensource SQL 1.12 Syntax Reference,SQL Syntax Reference", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"dli_08_0380.html", + "product_code":"dli", + "code":"153", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"DDL Syntax", + "title":"DDL Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0381.html", + "product_code":"dli", + "code":"154", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating Source Tables", + "title":"Creating Source Tables", + "githuburl":"" + }, + { + "uri":"dli_08_0382.html", + "product_code":"dli", + "code":"155", + "des":"DataGen is used to generate random data for debugging and testing.NoneWhen you create a DataGen table, the table field type cannot be Array, Map, or Row. You can use COMP", + "doc_type":"sqlreference", + "kw":"DataGen Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"DataGen Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0383.html", + "product_code":"dli", + "code":"156", + "des":"DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types an", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"GaussDB(DWS) Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0384.html", + "product_code":"dli", + "code":"157", + "des":"Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excell", + "doc_type":"sqlreference", + "kw":"HBase Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"HBase Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0385.html", + "product_code":"dli", + "code":"158", + "des":"The JDBC connector is a Flink's built-in connector to read data from a database.An enhanced datasource connection with the instances has been established, so that you can", + "doc_type":"sqlreference", + "kw":"JDBC Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"JDBC Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0386.html", + "product_code":"dli", + "code":"159", + "des":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "doc_type":"sqlreference", + "kw":"Kafka Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"Kafka Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0387.html", + "product_code":"dli", + "code":"160", + "des":"The MySQL CDC source table, that is, the MySQL streaming source table, reads all historical data in the database first and then smoothly switches data read to the Binlog ", + "doc_type":"sqlreference", + "kw":"MySQL CDC Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"MySQL CDC Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0388.html", + "product_code":"dli", + "code":"161", + "des":"The Postgres CDC source table, that is, Postgres streaming source table, is used to read the full snapshot data and changed data of the PostgreSQL database in sequence. T", + "doc_type":"sqlreference", + "kw":"Postgres CDC Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"Postgres CDC Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0389.html", + "product_code":"dli", + "code":"162", + "des":"Create a source stream to obtain data from Redis as input for jobs.An enhanced datasource connection has been created for DLI to connect to the Redis database, so that yo", + "doc_type":"sqlreference", + "kw":"Redis Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"Redis Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0390.html", + "product_code":"dli", + "code":"163", + "des":"Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provi", + "doc_type":"sqlreference", + "kw":"Upsert Kafka Source Table,Creating Source Tables,SQL Syntax Reference", + "title":"Upsert Kafka Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0391.html", + "product_code":"dli", + "code":"164", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating Result Tables", + "title":"Creating Result Tables", + "githuburl":"" + }, + { + "uri":"dli_08_0392.html", + "product_code":"dli", + "code":"165", + "des":"The BlackHole connector allows for swallowing all input records. It is designed for high-performance testing and UDF output. It is not a substantive sink. The BlackHole r", + "doc_type":"sqlreference", + "kw":"BlackHole Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"BlackHole Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0393.html", + "product_code":"dli", + "code":"166", + "des":"DLI can output Flink job data to the ClickHouse database. ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and prov", + "doc_type":"sqlreference", + "kw":"ClickHouse Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"ClickHouse Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0394.html", + "product_code":"dli", + "code":"167", + "des":"DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex t", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"GaussDB(DWS) Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0395.html", + "product_code":"dli", + "code":"168", + "des":"DLI outputs Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "doc_type":"sqlreference", + "kw":"Elasticsearch Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"Elasticsearch Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0396.html", + "product_code":"dli", + "code":"169", + "des":"DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scal", + "doc_type":"sqlreference", + "kw":"HBase Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"HBase Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0397.html", + "product_code":"dli", + "code":"170", + "des":"DLI outputs the Flink job output data to RDS through the JDBC result table.An enhanced datasource connection with the instances has been established, so that you can conf", + "doc_type":"sqlreference", + "kw":"JDBC Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"JDBC Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0398.html", + "product_code":"dli", + "code":"171", + "des":"DLI outputs the Flink job output data to Kafka through the Kafka result table.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subs", + "doc_type":"sqlreference", + "kw":"Kafka Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"Kafka Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0399.html", + "product_code":"dli", + "code":"172", + "des":"The Print connector is used to print output data to the error file or TaskManager file, making it easier for you to view the result in code debugging.NoneThe Print result", + "doc_type":"sqlreference", + "kw":"Print Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"Print Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0400.html", + "product_code":"dli", + "code":"173", + "des":"DLI outputs the Flink job output data to Redis. Redis is a key-value storage system that supports multiple types of data structures. It can be used in scenarios such as c", + "doc_type":"sqlreference", + "kw":"Redis Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"Redis Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0401.html", + "product_code":"dli", + "code":"174", + "des":"Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provi", + "doc_type":"sqlreference", + "kw":"Upsert Kafka Result Table,Creating Result Tables,SQL Syntax Reference", + "title":"Upsert Kafka Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0402.html", + "product_code":"dli", + "code":"175", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating Dimension Tables", + "title":"Creating Dimension Tables", + "githuburl":"" + }, + { + "uri":"dli_08_0403.html", + "product_code":"dli", + "code":"176", + "des":"Create a GaussDB(DWS) table to connect to source streams for wide table generation.Ensure that you have created a GaussDB(DWS) cluster using your account.A DWS database t", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Dimension Table,Creating Dimension Tables,SQL Syntax Reference", + "title":"GaussDB(DWS) Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0404.html", + "product_code":"dli", + "code":"177", + "des":"Create a Hbase dimension table to connect to the source streams for wide table generation.An enhanced datasource connection has been created for DLI to connect to HBase, ", + "doc_type":"sqlreference", + "kw":"HBase Dimension Table,Creating Dimension Tables,SQL Syntax Reference", + "title":"HBase Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0405.html", + "product_code":"dli", + "code":"178", + "des":"Create a JDBC dimension table to connect to the source stream.You have created a JDBC instance for your account.When you create a Flink OpenSource SQL job, set Flink Vers", + "doc_type":"sqlreference", + "kw":"JDBC Dimension Table,Creating Dimension Tables,SQL Syntax Reference", + "title":"JDBC Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0406.html", + "product_code":"dli", + "code":"179", + "des":"Create a Redis table to connect to source streams for wide table generation.An enhanced datasource connection with Redis has been established, so that you can configure s", + "doc_type":"sqlreference", + "kw":"Redis Dimension Table,Creating Dimension Tables,SQL Syntax Reference", + "title":"Redis Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0407.html", + "product_code":"dli", + "code":"180", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Format", + "title":"Format", + "githuburl":"" + }, + { + "uri":"dli_08_0408.html", + "product_code":"dli", + "code":"181", + "des":"Apache Avro is supported for you to read and write Avro data based on an Avro schema with Flink. The Avro schema is derived from the table schema.KafkaUpsert KafkaCurrent", + "doc_type":"sqlreference", + "kw":"Avro,Format,SQL Syntax Reference", + "title":"Avro", + "githuburl":"" + }, + { + "uri":"dli_08_0409.html", + "product_code":"dli", + "code":"182", + "des":"Canal is a Changelog Data Capture (CDC) tool that can stream changes in real-time from MySQL into other systems. Canal provides a unified format schema for changelog and ", + "doc_type":"sqlreference", + "kw":"Canal,Format,SQL Syntax Reference", + "title":"Canal", + "githuburl":"" + }, + { + "uri":"dli_08_0410.html", + "product_code":"dli", + "code":"183", + "des":"The Avro Schema Registry (avro-confluent) format allows you to read records that were serialized by the io.confluent.kafka.serializers.KafkaAvroSerializer and to write re", + "doc_type":"sqlreference", + "kw":"Confluent Avro,Format,SQL Syntax Reference", + "title":"Confluent Avro", + "githuburl":"" + }, + { + "uri":"dli_08_0411.html", + "product_code":"dli", + "code":"184", + "des":"The CSV format allows you to read and write CSV data based on a CSV schema. Currently, the CSV schema is derived from table schema.KafkaUpsert KafkaUse Kafka to send data", + "doc_type":"sqlreference", + "kw":"CSV,Format,SQL Syntax Reference", + "title":"CSV", + "githuburl":"" + }, + { + "uri":"dli_08_0412.html", + "product_code":"dli", + "code":"185", + "des":"Debezium is a Changelog Data Capture (CDC) tool that can stream changes in real-time from other databases into Kafka. Debezium provides a unified format schema for change", + "doc_type":"sqlreference", + "kw":"Debezium,Format,SQL Syntax Reference", + "title":"Debezium", + "githuburl":"" + }, + { + "uri":"dli_08_0413.html", + "product_code":"dli", + "code":"186", + "des":"The JSON format allows you to read and write JSON data based on a JSON schema. Currently, the JSON schema is derived from table schema.KafkaUpsert KafkaElasticsearchIn th", + "doc_type":"sqlreference", + "kw":"JSON,Format,SQL Syntax Reference", + "title":"JSON", + "githuburl":"" + }, + { + "uri":"dli_08_0414.html", + "product_code":"dli", + "code":"187", + "des":"Flink supports to interpret Maxwell JSON messages as INSERT/UPDATE/DELETE messages into Flink SQL system. This is useful in many cases to leverage this feature,such as:Sy", + "doc_type":"sqlreference", + "kw":"Maxwell,Format,SQL Syntax Reference", + "title":"Maxwell", + "githuburl":"" + }, + { + "uri":"dli_08_0415.html", + "product_code":"dli", + "code":"188", + "des":"The raw format allows you to read and write raw (byte based) values as a single column.Note: This format encodes null values as null of the byte[] type. This may have lim", + "doc_type":"sqlreference", + "kw":"Raw,Format,SQL Syntax Reference", + "title":"Raw", + "githuburl":"" + }, + { + "uri":"dli_08_0416.html", + "product_code":"dli", + "code":"189", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"DML Snytax", + "title":"DML Snytax", + "githuburl":"" + }, + { + "uri":"dli_08_0417.html", + "product_code":"dli", + "code":"190", + "des":"SyntaxDescriptionSELECT is used to select data from a table.ALL indicates that all results are returned.DISTINCT indicates that the duplicated results are removed.Precaut", + "doc_type":"sqlreference", + "kw":"SELECT,DML Snytax,SQL Syntax Reference", + "title":"SELECT", + "githuburl":"" + }, + { + "uri":"dli_08_0418.html", + "product_code":"dli", + "code":"191", + "des":"SyntaxDescriptionUNION is used to return the union set of multiple query results.INTERSECT is used to return the intersection of multiple query results.EXCEPT is used to ", + "doc_type":"sqlreference", + "kw":"Set Operations,DML Snytax,SQL Syntax Reference", + "title":"Set Operations", + "githuburl":"" + }, + { + "uri":"dli_08_0419.html", + "product_code":"dli", + "code":"192", + "des":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:Array functionsArray functionsGroup", + "doc_type":"sqlreference", + "kw":"Window,DML Snytax,SQL Syntax Reference", + "title":"Window", + "githuburl":"" + }, + { + "uri":"dli_08_0420.html", + "product_code":"dli", + "code":"193", + "des":"SyntaxPrecautionsCurrently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or ", + "doc_type":"sqlreference", + "kw":"JOIN,DML Snytax,SQL Syntax Reference", + "title":"JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0421.html", + "product_code":"dli", + "code":"194", + "des":"FunctionThis clause is used to sort data in ascending order on a time attribute.PrecautionsCurrently, only sorting by time attribute is supported.ExampleSort data in asce", + "doc_type":"sqlreference", + "kw":"OrderBy & Limit,DML Snytax,SQL Syntax Reference", + "title":"OrderBy & Limit", + "githuburl":"" + }, + { + "uri":"dli_08_0422.html", + "product_code":"dli", + "code":"195", + "des":"Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in", + "doc_type":"sqlreference", + "kw":"Top-N,DML Snytax,SQL Syntax Reference", + "title":"Top-N", + "githuburl":"" + }, + { + "uri":"dli_08_0423.html", + "product_code":"dli", + "code":"196", + "des":"Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.ROW_NUMBER(): Assigns a unique, sequential number to each row,", + "doc_type":"sqlreference", + "kw":"Deduplication,DML Snytax,SQL Syntax Reference", + "title":"Deduplication", + "githuburl":"" + }, + { + "uri":"dli_08_0424.html", + "product_code":"dli", + "code":"197", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Functions", + "title":"Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0425.html", + "product_code":"dli", + "code":"198", + "des":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "doc_type":"sqlreference", + "kw":"User-Defined Functions (UDFs),Functions,SQL Syntax Reference", + "title":"User-Defined Functions (UDFs)", + "githuburl":"" + }, + { + "uri":"dli_08_0426.html", + "product_code":"dli", + "code":"199", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Built-In Functions", + "title":"Built-In Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0427.html", + "product_code":"dli", + "code":"200", + "des":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "doc_type":"sqlreference", + "kw":"Mathematical Operation Functions,Built-In Functions,SQL Syntax Reference", + "title":"Mathematical Operation Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0428.html", + "product_code":"dli", + "code":"201", + "des":"SyntaxExampleTest input data.Test the data source kafka. The message content is as follows:{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}\n{name:", + "doc_type":"sqlreference", + "kw":"String Functions,Built-In Functions,SQL Syntax Reference", + "title":"String Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0429.html", + "product_code":"dli", + "code":"202", + "des":"Table 1 lists the time functions supported by Flink OpenSource SQL.FunctionReturns a SQL date parsed from string in form of yyyy-MM-dd.Returns a SQL date parsed from stri", + "doc_type":"sqlreference", + "kw":"Temporal Functions,Built-In Functions,SQL Syntax Reference", + "title":"Temporal Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0430.html", + "product_code":"dli", + "code":"203", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Conditional Functions,Built-In Functions,SQL Syntax Reference", + "title":"Conditional Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0431.html", + "product_code":"dli", + "code":"204", + "des":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.The following example converts the amount value to an integer.Flink jobs do not sup", + "doc_type":"sqlreference", + "kw":"Type Conversion Functions,Built-In Functions,SQL Syntax Reference", + "title":"Type Conversion Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0432.html", + "product_code":"dli", + "code":"205", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Collection Functions,Built-In Functions,SQL Syntax Reference", + "title":"Collection Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0433.html", + "product_code":"dli", + "code":"206", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Value Construction Functions,Built-In Functions,SQL Syntax Reference", + "title":"Value Construction Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0434.html", + "product_code":"dli", + "code":"207", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Value Access Functions,Built-In Functions,SQL Syntax Reference", + "title":"Value Access Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0435.html", + "product_code":"dli", + "code":"208", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Hash Functions,Built-In Functions,SQL Syntax Reference", + "title":"Hash Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0436.html", + "product_code":"dli", + "code":"209", + "des":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "doc_type":"sqlreference", + "kw":"Aggregate Functions,Built-In Functions,SQL Syntax Reference", + "title":"Aggregate Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0437.html", + "product_code":"dli", + "code":"210", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Table-Valued Functions", + "title":"Table-Valued Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0438.html", + "product_code":"dli", + "code":"211", + "des":"The string_split function splits a target string into substrings based on the specified separator and returns a substring list.Create a Flink OpenSource SQL job by referr", + "doc_type":"sqlreference", + "kw":"string_split,Table-Valued Functions,SQL Syntax Reference", + "title":"string_split", + "githuburl":"" + }, + { + "uri":"dli_08_0289.html", + "product_code":"dli", + "code":"212", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Flink Opensource SQL 1.10 Syntax Reference", + "title":"Flink Opensource SQL 1.10 Syntax Reference", + "githuburl":"" + }, + { + "uri":"dli_08_0290.html", + "product_code":"dli", + "code":"213", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Constraints and Definitions", + "title":"Constraints and Definitions", + "githuburl":"" + }, + { + "uri":"dli_08_0291.html", + "product_code":"dli", + "code":"214", + "des":"STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP,", + "doc_type":"sqlreference", + "kw":"Supported Data Types,Constraints and Definitions,SQL Syntax Reference", + "title":"Supported Data Types", + "githuburl":"" + }, + { + "uri":"dli_08_0292.html", + "product_code":"dli", + "code":"215", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Syntax Definition", + "title":"Syntax Definition", + "githuburl":"" + }, + { + "uri":"dli_08_0293.html", + "product_code":"dli", + "code":"216", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Definition Language (DDL)", + "title":"Data Definition Language (DDL)", + "githuburl":"" + }, + { + "uri":"dli_08_0294.html", + "product_code":"dli", + "code":"217", + "des":"This clause is used to create a table with a specified name.COMPUTED COLUMNA computed column is a virtual column generated using column_name AS computed_column_expression", + "doc_type":"sqlreference", + "kw":"CREATE TABLE,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE TABLE", + "githuburl":"" + }, + { + "uri":"dli_08_0295.html", + "product_code":"dli", + "code":"218", + "des":"Create a view with multiple layers nested in it to simplify the development process.IF NOT EXISTSIf the view already exists, nothing happens.Create a view named viewName.", + "doc_type":"sqlreference", + "kw":"CREATE VIEW,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE VIEW", + "githuburl":"" + }, + { + "uri":"dli_08_0296.html", + "product_code":"dli", + "code":"219", + "des":"Create a user-defined function.IF NOT EXISTSIf the function already exists, nothing happens.LANGUAGE JAVA|SCALALanguage tag is used to instruct Flink runtime how to execu", + "doc_type":"sqlreference", + "kw":"CREATE FUNCTION,Data Definition Language (DDL),SQL Syntax Reference", + "title":"CREATE FUNCTION", + "githuburl":"" + }, + { + "uri":"dli_08_0297.html", + "product_code":"dli", + "code":"220", + "des":"SyntaxPrecautionsFlink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:The case of identifiers is preserved whether they are q", + "doc_type":"sqlreference", + "kw":"Data Manipulation Language (DML),Syntax Definition,SQL Syntax Reference", + "title":"Data Manipulation Language (DML)", + "githuburl":"" + }, + { + "uri":"dli_08_0298.html", + "product_code":"dli", + "code":"221", + "des":"This section describes the Flink OpenSource SQL syntax supported by DLI. For details about the parameters and examples, see the syntax description.", + "doc_type":"sqlreference", + "kw":"Flink OpenSource SQL 1.10 Syntax,Flink Opensource SQL 1.10 Syntax Reference,SQL Syntax Reference", + "title":"Flink OpenSource SQL 1.10 Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0299.html", + "product_code":"dli", + "code":"222", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Definition Language (DDL)", + "title":"Data Definition Language (DDL)", + "githuburl":"" + }, + { + "uri":"dli_08_0300.html", + "product_code":"dli", + "code":"223", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Source Table", + "title":"Creating a Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0301.html", + "product_code":"dli", + "code":"224", + "des":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "doc_type":"sqlreference", + "kw":"Kafka Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"Kafka Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0302.html", + "product_code":"dli", + "code":"225", + "des":"Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove da", + "doc_type":"sqlreference", + "kw":"DIS Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"DIS Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0303.html", + "product_code":"dli", + "code":"226", + "des":"The JDBC connector is a Flink's built-in connector to read data from a database.An enhanced datasource connection with the database has been established, so that you can ", + "doc_type":"sqlreference", + "kw":"JDBC Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"JDBC Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0304.html", + "product_code":"dli", + "code":"227", + "des":"DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types an", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"GaussDB(DWS) Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0305.html", + "product_code":"dli", + "code":"228", + "des":"Create a source stream to obtain data from Redis as input for jobs.An enhanced datasource connection with Redis has been established, so that you can configure security g", + "doc_type":"sqlreference", + "kw":"Redis Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"Redis Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0306.html", + "product_code":"dli", + "code":"229", + "des":"Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excell", + "doc_type":"sqlreference", + "kw":"HBase Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"HBase Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0358.html", + "product_code":"dli", + "code":"230", + "des":"You can call APIs to obtain data from the cloud ecosystem or an open source ecosystem and use the obtained data as input of Flink jobs.The customized source class needs t", + "doc_type":"sqlreference", + "kw":"userDefined Source Table,Creating a Source Table,SQL Syntax Reference", + "title":"userDefined Source Table", + "githuburl":"" + }, + { + "uri":"dli_08_0307.html", + "product_code":"dli", + "code":"231", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Result Table", + "title":"Creating a Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0344.html", + "product_code":"dli", + "code":"232", + "des":"DLI exports Flink job data to ClickHouse result tables.ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provide", + "doc_type":"sqlreference", + "kw":"ClickHouse Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"ClickHouse Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0308.html", + "product_code":"dli", + "code":"233", + "des":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "doc_type":"sqlreference", + "kw":"Kafka Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"Kafka Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0309.html", + "product_code":"dli", + "code":"234", + "des":"DLI exports the output data of the Flink job to Kafka in upsert mode.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription ", + "doc_type":"sqlreference", + "kw":"Upsert Kafka Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"Upsert Kafka Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0310.html", + "product_code":"dli", + "code":"235", + "des":"DLI writes the Flink job output data into DIS. The data is filtered and imported to the DIS stream for future processing.DIS addresses the challenge of transmitting data ", + "doc_type":"sqlreference", + "kw":"DIS Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"DIS Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0311.html", + "product_code":"dli", + "code":"236", + "des":"DLI exports the output data of the Flink job to RDS.An enhanced datasource connection with the database has been established, so that you can configure security group rul", + "doc_type":"sqlreference", + "kw":"JDBC Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"JDBC Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0312.html", + "product_code":"dli", + "code":"237", + "des":"DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex t", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"GaussDB(DWS) Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0313.html", + "product_code":"dli", + "code":"238", + "des":"DLI exports the output data of the Flink job to Redis. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scen", + "doc_type":"sqlreference", + "kw":"Redis Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"Redis Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0314.html", + "product_code":"dli", + "code":"239", + "des":"DLI exports Flink job output data to SMN.SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling ", + "doc_type":"sqlreference", + "kw":"SMN Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"SMN Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0315.html", + "product_code":"dli", + "code":"240", + "des":"DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scal", + "doc_type":"sqlreference", + "kw":"HBase Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"HBase Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0316.html", + "product_code":"dli", + "code":"241", + "des":"DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "doc_type":"sqlreference", + "kw":"Elasticsearch Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"Elasticsearch Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0348.html", + "product_code":"dli", + "code":"242", + "des":"OpenTSDB is a distributed, scalable time series database based on HBase. OpenTSDB is designed to collect monitoring information of a large-scale cluster and query data in", + "doc_type":"sqlreference", + "kw":"OpenTSDB Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"OpenTSDB Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0347.html", + "product_code":"dli", + "code":"243", + "des":"Write your Java code to insert the processed data into a specified database supported by your cloud service.Implement the custom sink class :The custom sink class is inhe", + "doc_type":"sqlreference", + "kw":"User-defined Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"User-defined Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0345.html", + "product_code":"dli", + "code":"244", + "des":"The print connector exports your data output to the error file or the out file of TaskManager. It is mainly used for code debugging and output viewing.Read data from Kafk", + "doc_type":"sqlreference", + "kw":"Print Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"Print Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0346.html", + "product_code":"dli", + "code":"245", + "des":"You can create a file system result table to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly accord", + "doc_type":"sqlreference", + "kw":"File System Result Table,Creating a Result Table,SQL Syntax Reference", + "title":"File System Result Table", + "githuburl":"" + }, + { + "uri":"dli_08_0317.html", + "product_code":"dli", + "code":"246", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Dimension Table", + "title":"Creating a Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0318.html", + "product_code":"dli", + "code":"247", + "des":"Create a JDBC dimension table to connect to the source stream.You have created a JDBC instance for your account.The RDS table is used to connect to the source stream.CREA", + "doc_type":"sqlreference", + "kw":"JDBC Dimension Table,Creating a Dimension Table,SQL Syntax Reference", + "title":"JDBC Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0319.html", + "product_code":"dli", + "code":"248", + "des":"Create a GaussDB(DWS) dimension table to connect to the input stream.You have created a GaussDB(DWS) instance for your account.Use an RDS table to connect to the source s", + "doc_type":"sqlreference", + "kw":"GaussDB(DWS) Dimension Table,Creating a Dimension Table,SQL Syntax Reference", + "title":"GaussDB(DWS) Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0320.html", + "product_code":"dli", + "code":"249", + "des":"Create a Hbase dimension table to connect to the source stream.An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the", + "doc_type":"sqlreference", + "kw":"HBase Dimension Table,Creating a Dimension Table,SQL Syntax Reference", + "title":"HBase Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0321.html", + "product_code":"dli", + "code":"250", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Data Manipulation Language (DML)", + "title":"Data Manipulation Language (DML)", + "githuburl":"" + }, + { + "uri":"dli_08_0322.html", + "product_code":"dli", + "code":"251", + "des":"SyntaxDescriptionThis clause is used to select data from a table.ALL indicates that all results are returned.DISTINCT indicates that the duplicated results are removed.Pr", + "doc_type":"sqlreference", + "kw":"SELECT,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"SELECT", + "githuburl":"" + }, + { + "uri":"dli_08_0323.html", + "product_code":"dli", + "code":"252", + "des":"SyntaxDescriptionUNION is used to return the union set of multiple query results.INTERSECT is used to return the intersection of multiple query results.EXCEPT is used to ", + "doc_type":"sqlreference", + "kw":"Set Operations,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"Set Operations", + "githuburl":"" + }, + { + "uri":"dli_08_0324.html", + "product_code":"dli", + "code":"253", + "des":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:Array functionsArray functionsGroup", + "doc_type":"sqlreference", + "kw":"Window,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"Window", + "githuburl":"" + }, + { + "uri":"dli_08_0325.html", + "product_code":"dli", + "code":"254", + "des":"SyntaxPrecautionsCurrently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or ", + "doc_type":"sqlreference", + "kw":"JOIN,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"JOIN", + "githuburl":"" + }, + { + "uri":"dli_08_0326.html", + "product_code":"dli", + "code":"255", + "des":"FunctionThis clause is used to sort data in ascending order on a time attribute.PrecautionsCurrently, only sorting by time attribute is supported.ExampleSort data in asce", + "doc_type":"sqlreference", + "kw":"OrderBy & Limit,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"OrderBy & Limit", + "githuburl":"" + }, + { + "uri":"dli_08_0327.html", + "product_code":"dli", + "code":"256", + "des":"Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in", + "doc_type":"sqlreference", + "kw":"Top-N,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"Top-N", + "githuburl":"" + }, + { + "uri":"dli_08_0328.html", + "product_code":"dli", + "code":"257", + "des":"Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.ROW_NUMBER(): Assigns a unique, sequential number to each row,", + "doc_type":"sqlreference", + "kw":"Deduplication,Data Manipulation Language (DML),SQL Syntax Reference", + "title":"Deduplication", + "githuburl":"" + }, + { + "uri":"dli_08_0329.html", + "product_code":"dli", + "code":"258", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Functions", + "title":"Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0330.html", + "product_code":"dli", + "code":"259", + "des":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "doc_type":"sqlreference", + "kw":"User-Defined Functions,Functions,SQL Syntax Reference", + "title":"User-Defined Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0331.html", + "product_code":"dli", + "code":"260", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Built-In Functions", + "title":"Built-In Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0332.html", + "product_code":"dli", + "code":"261", + "des":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "doc_type":"sqlreference", + "kw":"Mathematical Operation Functions,Built-In Functions,SQL Syntax Reference", + "title":"Mathematical Operation Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0333.html", + "product_code":"dli", + "code":"262", + "des":"SyntaxExampleTest input data.Test the data source kafka. The message content is as follows:\"{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}\"\n\"{na", + "doc_type":"sqlreference", + "kw":"String Functions,Built-In Functions,SQL Syntax Reference", + "title":"String Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0334.html", + "product_code":"dli", + "code":"263", + "des":"Table 1 lists the temporal functions supported by Flink OpenSource SQL.FunctionReturns a date parsed from string in form of yyyy-MM-dd.Returns a date parsed from string i", + "doc_type":"sqlreference", + "kw":"Temporal Functions,Built-In Functions,SQL Syntax Reference", + "title":"Temporal Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0335.html", + "product_code":"dli", + "code":"264", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Conditional Functions,Built-In Functions,SQL Syntax Reference", + "title":"Conditional Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0336.html", + "product_code":"dli", + "code":"265", + "des":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.The following example converts the amount value to an integer.Flink jobs do not sup", + "doc_type":"sqlreference", + "kw":"Type Conversion Function,Built-In Functions,SQL Syntax Reference", + "title":"Type Conversion Function", + "githuburl":"" + }, + { + "uri":"dli_08_0337.html", + "product_code":"dli", + "code":"266", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Collection Functions,Built-In Functions,SQL Syntax Reference", + "title":"Collection Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0338.html", + "product_code":"dli", + "code":"267", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Value Construction Functions,Built-In Functions,SQL Syntax Reference", + "title":"Value Construction Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0339.html", + "product_code":"dli", + "code":"268", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Value Access Functions,Built-In Functions,SQL Syntax Reference", + "title":"Value Access Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0340.html", + "product_code":"dli", + "code":"269", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Hash Functions,Built-In Functions,SQL Syntax Reference", + "title":"Hash Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0341.html", + "product_code":"dli", + "code":"270", + "des":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "doc_type":"sqlreference", + "kw":"Aggregate Function,Built-In Functions,SQL Syntax Reference", + "title":"Aggregate Function", + "githuburl":"" + }, + { + "uri":"dli_08_0342.html", + "product_code":"dli", + "code":"271", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Table-Valued Functions", + "title":"Table-Valued Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0357.html", + "product_code":"dli", + "code":"272", + "des":"The split_cursor function can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be use", + "doc_type":"sqlreference", + "kw":"split_cursor,Table-Valued Functions,SQL Syntax Reference", + "title":"split_cursor", + "githuburl":"" + }, + { + "uri":"dli_08_0356.html", + "product_code":"dli", + "code":"273", + "des":"The string_split function splits a target string into substrings based on the specified separator and returns a substring list.Prepare test input data.Source table disSou", + "doc_type":"sqlreference", + "kw":"string_split,Table-Valued Functions,SQL Syntax Reference", + "title":"string_split", + "githuburl":"" + }, + { + "uri":"dli_08_0450.html", + "product_code":"dli", + "code":"274", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Historical Versions", + "title":"Historical Versions", + "githuburl":"" + }, + { + "uri":"dli_08_0233.html", + "product_code":"dli", + "code":"275", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Flink SQL Syntax", + "title":"Flink SQL Syntax", + "githuburl":"" + }, + { + "uri":"dli_08_0075.html", + "product_code":"dli", + "code":"276", + "des":"Currently, Flink SQL only supports the following operations: SELECT, FROM, WHERE, UNION, aggregation, window, JOIN between stream and table data, and JOIN between streams", + "doc_type":"sqlreference", + "kw":"SQL Syntax Constraints and Definitions,Flink SQL Syntax,SQL Syntax Reference", + "title":"SQL Syntax Constraints and Definitions", + "githuburl":"" + }, + { + "uri":"dli_08_0275.html", + "product_code":"dli", + "code":"277", + "des":"This section describes the Flink SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.", + "doc_type":"sqlreference", + "kw":"SQL Syntax Overview of Stream Jobs,Flink SQL Syntax,SQL Syntax Reference", + "title":"SQL Syntax Overview of Stream Jobs", + "githuburl":"" + }, + { + "uri":"dli_08_0234.html", + "product_code":"dli", + "code":"278", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Source Stream", + "title":"Creating a Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0237.html", + "product_code":"dli", + "code":"279", + "des":"Create a source stream to obtain data from HBase of CloudTable as input data of the job. HBase is a column-oriented distributed cloud storage system that features enhance", + "doc_type":"sqlreference", + "kw":"CloudTable HBase Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"CloudTable HBase Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0235.html", + "product_code":"dli", + "code":"280", + "des":"Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove da", + "doc_type":"sqlreference", + "kw":"DIS Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"DIS Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0270.html", + "product_code":"dli", + "code":"281", + "des":"DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully man", + "doc_type":"sqlreference", + "kw":"DMS Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"DMS Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0238.html", + "product_code":"dli", + "code":"282", + "des":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "doc_type":"sqlreference", + "kw":"MRS Kafka Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"MRS Kafka Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0239.html", + "product_code":"dli", + "code":"283", + "des":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "doc_type":"sqlreference", + "kw":"Open-Source Kafka Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"Open-Source Kafka Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0236.html", + "product_code":"dli", + "code":"284", + "des":"Create a source stream to obtain data from OBS. DLI reads data stored by users in OBS as input data for jobs. OBS applies to various scenarios, such as big data analysis,", + "doc_type":"sqlreference", + "kw":"OBS Source Stream,Creating a Source Stream,SQL Syntax Reference", + "title":"OBS Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0240.html", + "product_code":"dli", + "code":"285", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Sink Stream", + "title":"Creating a Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0243.html", + "product_code":"dli", + "code":"286", + "des":"DLI exports the job output data to HBase of CloudTable. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performa", + "doc_type":"sqlreference", + "kw":"CloudTable HBase Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"CloudTable HBase Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0244.html", + "product_code":"dli", + "code":"287", + "des":"DLI exports the job output data to OpenTSDB of CloudTable. OpenTSDB is a distributed, scalable time series database based on HBase. It stores time series data. Time serie", + "doc_type":"sqlreference", + "kw":"CloudTable OpenTSDB Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"CloudTable OpenTSDB Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0286.html", + "product_code":"dli", + "code":"288", + "des":"DLI exports the output data of the Flink job to OpenTSDB of MRS.OpenTSDB has been installed in the MRS cluster.In this scenario, jobs must run on the dedicated queue of D", + "doc_type":"sqlreference", + "kw":"MRS OpenTSDB Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"MRS OpenTSDB Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0252.html", + "product_code":"dli", + "code":"289", + "des":"DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "doc_type":"sqlreference", + "kw":"CSS Elasticsearch Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"CSS Elasticsearch Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0253.html", + "product_code":"dli", + "code":"290", + "des":"DLI exports the Flink job output data to Redis of DCS. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scen", + "doc_type":"sqlreference", + "kw":"DCS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"DCS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0249.html", + "product_code":"dli", + "code":"291", + "des":"DLI outputs the job output data to Document Database Service (DDS).DDS is compatible with the MongoDB protocol and is secure, highly available, reliable, scalable, and ea", + "doc_type":"sqlreference", + "kw":"DDS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"DDS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0241.html", + "product_code":"dli", + "code":"292", + "des":"DLI writes the Flink job output data into DIS. This cloud ecosystem is applicable to scenarios where data is filtered and imported to the DIS stream for future processing", + "doc_type":"sqlreference", + "kw":"DIS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"DIS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0271.html", + "product_code":"dli", + "code":"293", + "des":"DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully man", + "doc_type":"sqlreference", + "kw":"DMS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"DMS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0247.html", + "product_code":"dli", + "code":"294", + "des":"DLI outputs the Flink job output data to Data Warehouse Service (DWS). DWS database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more co", + "doc_type":"sqlreference", + "kw":"DWS Sink Stream (JDBC Mode),Creating a Sink Stream,SQL Syntax Reference", + "title":"DWS Sink Stream (JDBC Mode)", + "githuburl":"" + }, + { + "uri":"dli_08_0248.html", + "product_code":"dli", + "code":"295", + "des":"Create a sink stream to export Flink job data to DWS through OBS-based dumping, specifically, output Flink job data to OBS and then import data from OBS to DWS. For detai", + "doc_type":"sqlreference", + "kw":"DWS Sink Stream (OBS-based Dumping),Creating a Sink Stream,SQL Syntax Reference", + "title":"DWS Sink Stream (OBS-based Dumping)", + "githuburl":"" + }, + { + "uri":"dli_08_0255.html", + "product_code":"dli", + "code":"296", + "des":"DLI exports the output data of the Flink job to HBase of MRS.An MRS cluster has been created by using your account. DLI can interconnect with HBase clusters with Kerberos", + "doc_type":"sqlreference", + "kw":"MRS HBase Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"MRS HBase Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0254.html", + "product_code":"dli", + "code":"297", + "des":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "doc_type":"sqlreference", + "kw":"MRS Kafka Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"MRS Kafka Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0257.html", + "product_code":"dli", + "code":"298", + "des":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "doc_type":"sqlreference", + "kw":"Open-Source Kafka Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"Open-Source Kafka Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0267.html", + "product_code":"dli", + "code":"299", + "des":"You can create a sink stream to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly according to the ge", + "doc_type":"sqlreference", + "kw":"File System Sink Stream (Recommended),Creating a Sink Stream,SQL Syntax Reference", + "title":"File System Sink Stream (Recommended)", + "githuburl":"" + }, + { + "uri":"dli_08_0242.html", + "product_code":"dli", + "code":"300", + "des":"Create a sink stream to export DLI data to OBS. DLI can export the job analysis results to OBS. OBS applies to various scenarios, such as big data analysis, cloud-native ", + "doc_type":"sqlreference", + "kw":"OBS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"OBS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0245.html", + "product_code":"dli", + "code":"301", + "des":"DLI outputs the Flink job output data to RDS. Currently, PostgreSQL and MySQL databases are supported. The PostgreSQL database can store data of more complex types and de", + "doc_type":"sqlreference", + "kw":"RDS Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"RDS Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0251.html", + "product_code":"dli", + "code":"302", + "des":"DLI exports Flink job output data to SMN.SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling ", + "doc_type":"sqlreference", + "kw":"SMN Sink Stream,Creating a Sink Stream,SQL Syntax Reference", + "title":"SMN Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0258.html", + "product_code":"dli", + "code":"303", + "des":"The temporary stream is used to simplify SQL logic. If complex SQL logic is followed, write SQL statements concatenated with temporary streams. The temporary stream is ju", + "doc_type":"sqlreference", + "kw":"Creating a Temporary Stream,Flink SQL Syntax,SQL Syntax Reference", + "title":"Creating a Temporary Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0259.html", + "product_code":"dli", + "code":"304", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Creating a Dimension Table", + "title":"Creating a Dimension Table", + "githuburl":"" + }, + { + "uri":"dli_08_0260.html", + "product_code":"dli", + "code":"305", + "des":"Create a Redis table to connect to the source stream.For details about the JOIN syntax, see JOIN Between Stream Data and Table Data.Redis clusters are not supported.Ensur", + "doc_type":"sqlreference", + "kw":"Creating a Redis Table,Creating a Dimension Table,SQL Syntax Reference", + "title":"Creating a Redis Table", + "githuburl":"" + }, + { + "uri":"dli_08_0261.html", + "product_code":"dli", + "code":"306", + "des":"Create an RDS/DWS table to connect to the source stream.For details about the JOIN syntax, see JOIN.Ensure that you have created a PostgreSQL or MySQL RDS instance in RDS", + "doc_type":"sqlreference", + "kw":"Creating an RDS Table,Creating a Dimension Table,SQL Syntax Reference", + "title":"Creating an RDS Table", + "githuburl":"" + }, + { + "uri":"dli_08_0272.html", + "product_code":"dli", + "code":"307", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Custom Stream Ecosystem", + "title":"Custom Stream Ecosystem", + "githuburl":"" + }, + { + "uri":"dli_08_0273.html", + "product_code":"dli", + "code":"308", + "des":"Compile code to obtain data from the desired cloud ecosystem or open-source ecosystem as the input data of Flink jobs.The user-defined source class needs to inherit the R", + "doc_type":"sqlreference", + "kw":"Custom Source Stream,Custom Stream Ecosystem,SQL Syntax Reference", + "title":"Custom Source Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0274.html", + "product_code":"dli", + "code":"309", + "des":"Compile code to write the data processed by DLI to a specified cloud ecosystem or open-source ecosystem.The user-defined sink class needs to inherit the RichSinkFunction ", + "doc_type":"sqlreference", + "kw":"Custom Sink Stream,Custom Stream Ecosystem,SQL Syntax Reference", + "title":"Custom Sink Stream", + "githuburl":"" + }, + { + "uri":"dli_08_0207.html", + "product_code":"dli", + "code":"310", + "des":"Data type is a basic attribute of data and used to distinguish different types of data. Different data types occupy different storage space and support different operatio", + "doc_type":"sqlreference", + "kw":"Data Type,Flink SQL Syntax,SQL Syntax Reference", + "title":"Data Type", + "githuburl":"" + }, + { + "uri":"dli_08_0086.html", + "product_code":"dli", + "code":"311", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Built-In Functions", + "title":"Built-In Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0191.html", + "product_code":"dli", + "code":"312", + "des":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "doc_type":"sqlreference", + "kw":"Mathematical Operation Functions,Built-In Functions,SQL Syntax Reference", + "title":"Mathematical Operation Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0096.html", + "product_code":"dli", + "code":"313", + "des":"The common character string functions of DLI are as follows:FunctionConcatenates two character strings.Concatenates two character strings.SyntaxVARCHAR VARCHAR a || VARCH", + "doc_type":"sqlreference", + "kw":"String Functions,Built-In Functions,SQL Syntax Reference", + "title":"String Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0097.html", + "product_code":"dli", + "code":"314", + "des":"Table 1 lists the time functions supported by Flink SQL.None", + "doc_type":"sqlreference", + "kw":"Temporal Functions,Built-In Functions,SQL Syntax Reference", + "title":"Temporal Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0112.html", + "product_code":"dli", + "code":"315", + "des":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.Flink jobs do not support the conversion of bigint to timestamp using CAST. You can", + "doc_type":"sqlreference", + "kw":"Type Conversion Functions,Built-In Functions,SQL Syntax Reference", + "title":"Type Conversion Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0104.html", + "product_code":"dli", + "code":"316", + "des":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "doc_type":"sqlreference", + "kw":"Aggregate Functions,Built-In Functions,SQL Syntax Reference", + "title":"Aggregate Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0206.html", + "product_code":"dli", + "code":"317", + "des":"Table-valued functions can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be used i", + "doc_type":"sqlreference", + "kw":"Table-Valued Functions,Built-In Functions,SQL Syntax Reference", + "title":"Table-Valued Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0101.html", + "product_code":"dli", + "code":"318", + "des":"Example:The returned number of elements in the array is 3.HELLO WORLD is returned.", + "doc_type":"sqlreference", + "kw":"Other Functions,Built-In Functions,SQL Syntax Reference", + "title":"Other Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0099.html", + "product_code":"dli", + "code":"319", + "des":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "doc_type":"sqlreference", + "kw":"User-Defined Functions,Flink SQL Syntax,SQL Syntax Reference", + "title":"User-Defined Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0209.html", + "product_code":"dli", + "code":"320", + "des":"Table 1 describes the basic geospatial geometric elements.You can build complex geospatial geometries based on basic geospatial geometric elements. Table 2 describes the ", + "doc_type":"sqlreference", + "kw":"Geographical Functions,Flink SQL Syntax,SQL Syntax Reference", + "title":"Geographical Functions", + "githuburl":"" + }, + { + "uri":"dli_08_0102.html", + "product_code":"dli", + "code":"321", + "des":"SyntaxDescriptionThe SELECT statement is used to select data from a table or insert constant data into a table.PrecautionsThe table to be queried must exist. Otherwise, a", + "doc_type":"sqlreference", + "kw":"SELECT,Flink SQL Syntax,SQL Syntax Reference", + "title":"SELECT", + "githuburl":"" + }, + { + "uri":"dli_08_0103.html", + "product_code":"dli", + "code":"322", + "des":"SyntaxorDescriptionIf the value of value is value1, result1 is returned. If the value is not any of the values listed in the clause, resultZ is returned. If no else state", + "doc_type":"sqlreference", + "kw":"Condition Expression,Flink SQL Syntax,SQL Syntax Reference", + "title":"Condition Expression", + "githuburl":"" + }, + { + "uri":"dli_08_0218.html", + "product_code":"dli", + "code":"323", + "des":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:time_attr can be processing-time or", + "doc_type":"sqlreference", + "kw":"Window,Flink SQL Syntax,SQL Syntax Reference", + "title":"Window", + "githuburl":"" + }, + { + "uri":"dli_08_0106.html", + "product_code":"dli", + "code":"324", + "des":"The JOIN operation allows you to query data from a table and write the query result to the sink stream. Currently, only RDSs and DCS Redis tables are supported. The ON ke", + "doc_type":"sqlreference", + "kw":"JOIN Between Stream Data and Table Data,Flink SQL Syntax,SQL Syntax Reference", + "title":"JOIN Between Stream Data and Table Data", + "githuburl":"" + }, + { + "uri":"dli_08_0107.html", + "product_code":"dli", + "code":"325", + "des":"Flink provides two time models: processing time and event time.DLI allows you to specify the time model during creation of the source stream and temporary stream.Processi", + "doc_type":"sqlreference", + "kw":"Configuring Time Models,Flink SQL Syntax,SQL Syntax Reference", + "title":"Configuring Time Models", + "githuburl":"" + }, + { + "uri":"dli_08_0108.html", + "product_code":"dli", + "code":"326", + "des":"Complex event processing (CEP) is used to detect complex patterns in endless data streams so as to identify and search patterns in various data rows. Pattern matching is ", + "doc_type":"sqlreference", + "kw":"Pattern Matching,Flink SQL Syntax,SQL Syntax Reference", + "title":"Pattern Matching", + "githuburl":"" + }, + { + "uri":"dli_08_0109.html", + "product_code":"dli", + "code":"327", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"StreamingML", + "title":"StreamingML", + "githuburl":"" + }, + { + "uri":"dli_08_0110.html", + "product_code":"dli", + "code":"328", + "des":"Anomaly detection applies to various scenarios, including intrusion detection, financial fraud detection, sensor data monitoring, medical diagnosis, natural data detectio", + "doc_type":"sqlreference", + "kw":"Anomaly Detection,StreamingML,SQL Syntax Reference", + "title":"Anomaly Detection", + "githuburl":"" + }, + { + "uri":"dli_08_0111.html", + "product_code":"dli", + "code":"329", + "des":"Modeling and forecasting time series is a common task in many business verticals. Modeling is used to extract meaningful statistics and other characteristics of the data.", + "doc_type":"sqlreference", + "kw":"Time Series Forecasting,StreamingML,SQL Syntax Reference", + "title":"Time Series Forecasting", + "githuburl":"" + }, + { + "uri":"dli_08_0216.html", + "product_code":"dli", + "code":"330", + "des":"Clustering algorithms belong to unsupervised algorithms. K-Means, a clustering algorithm, partitions data points into related clusters by calculating the distance between", + "doc_type":"sqlreference", + "kw":"Real-Time Clustering,StreamingML,SQL Syntax Reference", + "title":"Real-Time Clustering", + "githuburl":"" + }, + { + "uri":"dli_08_0088.html", + "product_code":"dli", + "code":"331", + "des":"Deep learning has a wide range of applications in many industries, such as image classification, image recognition, and speech recognition. DLI provides several functions", + "doc_type":"sqlreference", + "kw":"Deep Learning Model Prediction,StreamingML,SQL Syntax Reference", + "title":"Deep Learning Model Prediction", + "githuburl":"" + }, + { + "uri":"dli_08_0125.html", + "product_code":"dli", + "code":"332", + "des":"Flink SQL reserves some strings as keywords. If you want to use the following character strings as field names, ensure that they are enclosed by back quotes, for example,", + "doc_type":"sqlreference", + "kw":"Reserved Keywords,Flink SQL Syntax,SQL Syntax Reference", + "title":"Reserved Keywords", + "githuburl":"" + }, + { + "uri":"dli_08_0001.html", + "product_code":"dli", + "code":"333", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Identifiers", + "title":"Identifiers", + "githuburl":"" + }, + { + "uri":"dli_08_0002.html", + "product_code":"dli", + "code":"334", + "des":"None.Aggregate function.", + "doc_type":"sqlreference", + "kw":"aggregate_func,Identifiers,SQL Syntax Reference", + "title":"aggregate_func", + "githuburl":"" + }, + { + "uri":"dli_08_0003.html", + "product_code":"dli", + "code":"335", + "des":"None.Alias, which must be STRING type. It can be assigned to a field, table, view, or subquery.", + "doc_type":"sqlreference", + "kw":"alias,Identifiers,SQL Syntax Reference", + "title":"alias", + "githuburl":"" + }, + { + "uri":"dli_08_0004.html", + "product_code":"dli", + "code":"336", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"attr_expr,Identifiers,SQL Syntax Reference", + "title":"attr_expr", + "githuburl":"" + }, + { + "uri":"dli_08_0005.html", + "product_code":"dli", + "code":"337", + "des":"None.List of attr_expr, which is separated by commas (,).", + "doc_type":"sqlreference", + "kw":"attr_expr_list,Identifiers,SQL Syntax Reference", + "title":"attr_expr_list", + "githuburl":"" + }, + { + "uri":"dli_08_0006.html", + "product_code":"dli", + "code":"338", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"attrs_value_set_expr,Identifiers,SQL Syntax Reference", + "title":"attrs_value_set_expr", + "githuburl":"" + }, + { + "uri":"dli_08_0007.html", + "product_code":"dli", + "code":"339", + "des":"None.Return a boolean expression.", + "doc_type":"sqlreference", + "kw":"boolean_expression,Identifiers,SQL Syntax Reference", + "title":"boolean_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0009.html", + "product_code":"dli", + "code":"340", + "des":"None.Formal parameter for function call. It is usually a field name, which is the same as col_name.", + "doc_type":"sqlreference", + "kw":"col,Identifiers,SQL Syntax Reference", + "title":"col", + "githuburl":"" + }, + { + "uri":"dli_08_0010.html", + "product_code":"dli", + "code":"341", + "des":"None.Column (field) description, which must be STRING type and cannot exceed 256 bytes.", + "doc_type":"sqlreference", + "kw":"col_comment,Identifiers,SQL Syntax Reference", + "title":"col_comment", + "githuburl":"" + }, + { + "uri":"dli_08_0011.html", + "product_code":"dli", + "code":"342", + "des":"None.Column name, which must be STRING type and cannot exceed 128 bytes.", + "doc_type":"sqlreference", + "kw":"col_name,Identifiers,SQL Syntax Reference", + "title":"col_name", + "githuburl":"" + }, + { + "uri":"dli_08_0012.html", + "product_code":"dli", + "code":"343", + "des":"None.Field list, which consists of one col_name or more. If there is more than one col_name, separate them by using a comma (,).", + "doc_type":"sqlreference", + "kw":"col_name_list,Identifiers,SQL Syntax Reference", + "title":"col_name_list", + "githuburl":"" + }, + { + "uri":"dli_08_0013.html", + "product_code":"dli", + "code":"344", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"condition,Identifiers,SQL Syntax Reference", + "title":"condition", + "githuburl":"" + }, + { + "uri":"dli_08_0014.html", + "product_code":"dli", + "code":"345", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"condition_list,Identifiers,SQL Syntax Reference", + "title":"condition_list", + "githuburl":"" + }, + { + "uri":"dli_08_0015.html", + "product_code":"dli", + "code":"346", + "des":"None.Common expression name.", + "doc_type":"sqlreference", + "kw":"cte_name,Identifiers,SQL Syntax Reference", + "title":"cte_name", + "githuburl":"" + }, + { + "uri":"dli_08_0016.html", + "product_code":"dli", + "code":"347", + "des":"None.Data type. Currently, only the primitive data types are supported.", + "doc_type":"sqlreference", + "kw":"data_type,Identifiers,SQL Syntax Reference", + "title":"data_type", + "githuburl":"" + }, + { + "uri":"dli_08_0017.html", + "product_code":"dli", + "code":"348", + "des":"None.Database description, which must be STRING type and cannot exceed 256 characters.", + "doc_type":"sqlreference", + "kw":"db_comment,Identifiers,SQL Syntax Reference", + "title":"db_comment", + "githuburl":"" + }, + { + "uri":"dli_08_0018.html", + "product_code":"dli", + "code":"349", + "des":"None.Database name, which must be STRING type and cannot exceed 128 bytes.", + "doc_type":"sqlreference", + "kw":"db_name,Identifiers,SQL Syntax Reference", + "title":"db_name", + "githuburl":"" + }, + { + "uri":"dli_08_0019.html", + "product_code":"dli", + "code":"350", + "des":"None.Returned result for the ELSE clause of the CASE WHEN statement.", + "doc_type":"sqlreference", + "kw":"else_result_expression,Identifiers,SQL Syntax Reference", + "title":"else_result_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0020.html", + "product_code":"dli", + "code":"351", + "des":"| AVRO| CSV| JSON| ORC| PARQUETCurrently, the preceding formats are supported.Both USING and STORED AS can be used for specifying the data format. You can specify the pre", + "doc_type":"sqlreference", + "kw":"file_format,Identifiers,SQL Syntax Reference", + "title":"file_format", + "githuburl":"" + }, + { + "uri":"dli_08_0021.html", + "product_code":"dli", + "code":"352", + "des":"None.File path, which is the OBS path", + "doc_type":"sqlreference", + "kw":"file_path,Identifiers,SQL Syntax Reference", + "title":"file_path", + "githuburl":"" + }, + { + "uri":"dli_08_0022.html", + "product_code":"dli", + "code":"353", + "des":"None.Function name, which must be STRING type.", + "doc_type":"sqlreference", + "kw":"function_name,Identifiers,SQL Syntax Reference", + "title":"function_name", + "githuburl":"" + }, + { + "uri":"dli_08_0023.html", + "product_code":"dli", + "code":"354", + "des":"None.Expression that includes GROUP BY.", + "doc_type":"sqlreference", + "kw":"groupby_expression,Identifiers,SQL Syntax Reference", + "title":"groupby_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0024.html", + "product_code":"dli", + "code":"355", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"having_condition,Identifiers,SQL Syntax Reference", + "title":"having_condition", + "githuburl":"" + }, + { + "uri":"dli_08_0026.html", + "product_code":"dli", + "code":"356", + "des":"None.Input expression of the CASE WHEN statement.", + "doc_type":"sqlreference", + "kw":"input_expression,Identifiers,SQL Syntax Reference", + "title":"input_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0029.html", + "product_code":"dli", + "code":"357", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"join_condition,Identifiers,SQL Syntax Reference", + "title":"join_condition", + "githuburl":"" + }, + { + "uri":"dli_08_0030.html", + "product_code":"dli", + "code":"358", + "des":"None.The condition of an inequality join.", + "doc_type":"sqlreference", + "kw":"non_equi_join_condition,Identifiers,SQL Syntax Reference", + "title":"non_equi_join_condition", + "githuburl":"" + }, + { + "uri":"dli_08_0031.html", + "product_code":"dli", + "code":"359", + "des":"None.Maximum number of output lines specified by LIMIT. Which must be INT type.", + "doc_type":"sqlreference", + "kw":"number,Identifiers,SQL Syntax Reference", + "title":"number", + "githuburl":"" + }, + { + "uri":"dli_08_0034.html", + "product_code":"dli", + "code":"360", + "des":"None.Partition column name, that is, partition field name, which must be STRING type.", + "doc_type":"sqlreference", + "kw":"partition_col_name,Identifiers,SQL Syntax Reference", + "title":"partition_col_name", + "githuburl":"" + }, + { + "uri":"dli_08_0035.html", + "product_code":"dli", + "code":"361", + "des":"None.Partition column value, that is, partition field value.", + "doc_type":"sqlreference", + "kw":"partition_col_value,Identifiers,SQL Syntax Reference", + "title":"partition_col_value", + "githuburl":"" + }, + { + "uri":"dli_08_0036.html", + "product_code":"dli", + "code":"362", + "des":"partition_specs : (partition_col_name = partition_col_value, partition_col_name = partition_col_value, ...);Table partition list, which is expressed by using key=value pa", + "doc_type":"sqlreference", + "kw":"partition_specs,Identifiers,SQL Syntax Reference", + "title":"partition_specs", + "githuburl":"" + }, + { + "uri":"dli_08_0037.html", + "product_code":"dli", + "code":"363", + "des":"None.Property name, which must be STRING type.", + "doc_type":"sqlreference", + "kw":"property_name,Identifiers,SQL Syntax Reference", + "title":"property_name", + "githuburl":"" + }, + { + "uri":"dli_08_0038.html", + "product_code":"dli", + "code":"364", + "des":"None.Property value, which must be STRING type.", + "doc_type":"sqlreference", + "kw":"property_value,Identifiers,SQL Syntax Reference", + "title":"property_value", + "githuburl":"" + }, + { + "uri":"dli_08_0039.html", + "product_code":"dli", + "code":"365", + "des":"None.Pattern matching string, which supports wildcard matching.", + "doc_type":"sqlreference", + "kw":"regex_expression,Identifiers,SQL Syntax Reference", + "title":"regex_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0040.html", + "product_code":"dli", + "code":"366", + "des":"None.Returned result for the THEN clause of the CASE WHEN statement.", + "doc_type":"sqlreference", + "kw":"result_expression,Identifiers,SQL Syntax Reference", + "title":"result_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0042.html", + "product_code":"dli", + "code":"367", + "des":"None.Query clause for the basic SELECT statement.", + "doc_type":"sqlreference", + "kw":"select_statement,Identifiers,SQL Syntax Reference", + "title":"select_statement", + "githuburl":"" + }, + { + "uri":"dli_08_0043.html", + "product_code":"dli", + "code":"368", + "des":"None.Separator, which can be customized by users, for example, comma (,), semicolon (;), and colon (:). Which must be CHAR type.", + "doc_type":"sqlreference", + "kw":"separator,Identifiers,SQL Syntax Reference", + "title":"separator", + "githuburl":"" + }, + { + "uri":"dli_08_0045.html", + "product_code":"dli", + "code":"369", + "des":"None.SQL statement containing the common expression defined by cte_name.", + "doc_type":"sqlreference", + "kw":"sql_containing_cte_name,Identifiers,SQL Syntax Reference", + "title":"sql_containing_cte_name", + "githuburl":"" + }, + { + "uri":"dli_08_0046.html", + "product_code":"dli", + "code":"370", + "des":"None.Subquery.", + "doc_type":"sqlreference", + "kw":"sub_query,Identifiers,SQL Syntax Reference", + "title":"sub_query", + "githuburl":"" + }, + { + "uri":"dli_08_0047.html", + "product_code":"dli", + "code":"371", + "des":"None.Table description, which must be STRING type and cannot exceed 256 bytes.", + "doc_type":"sqlreference", + "kw":"table_comment,Identifiers,SQL Syntax Reference", + "title":"table_comment", + "githuburl":"" + }, + { + "uri":"dli_08_0048.html", + "product_code":"dli", + "code":"372", + "des":"NoneTable name, which cannot exceed 128 bytes. The string type and \"$\" symbol are supported.", + "doc_type":"sqlreference", + "kw":"table_name,Identifiers,SQL Syntax Reference", + "title":"table_name", + "githuburl":"" + }, + { + "uri":"dli_08_0049.html", + "product_code":"dli", + "code":"373", + "des":"None.Table property list, which is expressed by using key=value pairs. key represents property_name, and value represents property_value. If there is more than one key=va", + "doc_type":"sqlreference", + "kw":"table_properties,Identifiers,SQL Syntax Reference", + "title":"table_properties", + "githuburl":"" + }, + { + "uri":"dli_08_0050.html", + "product_code":"dli", + "code":"374", + "des":"None.Table or view name, which must be STRING type. It can also be a subquery. If it is subquery, an alias must also be provided.", + "doc_type":"sqlreference", + "kw":"table_reference,Identifiers,SQL Syntax Reference", + "title":"table_reference", + "githuburl":"" + }, + { + "uri":"dli_08_0053.html", + "product_code":"dli", + "code":"375", + "des":"None.When expression of the CASE WHEN statement. It is used for matching with the input expression.", + "doc_type":"sqlreference", + "kw":"when_expression,Identifiers,SQL Syntax Reference", + "title":"when_expression", + "githuburl":"" + }, + { + "uri":"dli_08_0054.html", + "product_code":"dli", + "code":"376", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"where_condition,Identifiers,SQL Syntax Reference", + "title":"where_condition", + "githuburl":"" + }, + { + "uri":"dli_08_0055.html", + "product_code":"dli", + "code":"377", + "des":"None.Analysis window function. For details, see Window Functions.", + "doc_type":"sqlreference", + "kw":"window_function,Identifiers,SQL Syntax Reference", + "title":"window_function", + "githuburl":"" + }, + { + "uri":"dli_08_0060.html", + "product_code":"dli", + "code":"378", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Operators", + "title":"Operators", + "githuburl":"" + }, + { + "uri":"dli_08_0061.html", + "product_code":"dli", + "code":"379", + "des":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "doc_type":"sqlreference", + "kw":"Relational Operators,Operators,SQL Syntax Reference", + "title":"Relational Operators", + "githuburl":"" + }, + { + "uri":"dli_08_0062.html", + "product_code":"dli", + "code":"380", + "des":"Arithmetic operators include binary operators and unary operators. For both types of operators, the returned results are numbers. Table 1 lists the arithmetic operators s", + "doc_type":"sqlreference", + "kw":"Arithmetic Operators,Operators,SQL Syntax Reference", + "title":"Arithmetic Operators", + "githuburl":"" + }, + { + "uri":"dli_08_0063.html", + "product_code":"dli", + "code":"381", + "des":"Common logical operators include AND, OR, and NOT. The operation result can be TRUE, FALSE, or NULL (which means unknown). The priorities of the operators are as follows:", + "doc_type":"sqlreference", + "kw":"Logical Operators,Operators,SQL Syntax Reference", + "title":"Logical Operators", + "githuburl":"" + }, + { + "uri":"dli_08_00005.html", + "product_code":"dli", + "code":"382", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"sqlreference", + "kw":"Change History,SQL Syntax Reference", + "title":"Change History", + "githuburl":"" + } +] \ No newline at end of file diff --git a/docs/dli/sqlreference/CLASS.TXT.json b/docs/dli/sqlreference/CLASS.TXT.json new file mode 100644 index 00000000..2925ad5d --- /dev/null +++ b/docs/dli/sqlreference/CLASS.TXT.json @@ -0,0 +1,3440 @@ +[ + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Spark SQL Syntax Reference", + "uri":"dli_08_0221.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"1" + }, + { + "desc":"This section describes the common configuration items of the SQL syntax for DLI batch jobs.", + "product_code":"dli", + "title":"Common Configuration Items of Batch SQL Jobs", + "uri":"dli_08_0266.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"2" + }, + { + "desc":"This section describes the Spark SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.", + "product_code":"dli", + "title":"SQL Syntax Overview of Batch Jobs", + "uri":"dli_08_0219.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"3" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Databases", + "uri":"dli_08_0070.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"4" + }, + { + "desc":"This statement is used to create a database.IF NOT EXISTS: Prevents system errors if the database to be created exists.COMMENT: Describes a database.DBPROPERTIES: Specifi", + "product_code":"dli", + "title":"Creating a Database", + "uri":"dli_08_0071.html", + "doc_type":"sqlreference", + "p_code":"4", + "code":"5" + }, + { + "desc":"This statement is used to delete a database.IF EXISTS: Prevents system errors if the database to be deleted does not exist.DATABASE and SCHEMA can be used interchangeably", + "product_code":"dli", + "title":"Deleting a Database", + "uri":"dli_08_0072.html", + "doc_type":"sqlreference", + "p_code":"4", + "code":"6" + }, + { + "desc":"This syntax is used to view the information about a specified database, including the database name and database description.EXTENDED: Displays the database properties.If", + "product_code":"dli", + "title":"Viewing a Specified Database", + "uri":"dli_08_0073.html", + "doc_type":"sqlreference", + "p_code":"4", + "code":"7" + }, + { + "desc":"This syntax is used to query all current databases.NoneKeyword DATABASES is equivalent to SCHEMAS. You can use either of them in this statement.View all the current datab", + "product_code":"dli", + "title":"Viewing All Databases", + "uri":"dli_08_0074.html", + "doc_type":"sqlreference", + "p_code":"4", + "code":"8" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating an OBS Table", + "uri":"dli_08_0223.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"9" + }, + { + "desc":"Create an OBS table using the DataSource syntax.The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of suppor", + "product_code":"dli", + "title":"Creating an OBS Table Using the DataSource Syntax", + "uri":"dli_08_0076.html", + "doc_type":"sqlreference", + "p_code":"9", + "code":"10" + }, + { + "desc":"This statement is used to create an OBS table using the Hive syntax. The main differences between the DataSource and the Hive syntax lie in the supported data formats and", + "product_code":"dli", + "title":"Creating an OBS Table Using the Hive Syntax", + "uri":"dli_08_0077.html", + "doc_type":"sqlreference", + "p_code":"9", + "code":"11" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a DLI Table", + "uri":"dli_08_0224.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"12" + }, + { + "desc":"This DataSource syntax can be used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number", + "product_code":"dli", + "title":"Creating a DLI Table Using the DataSource Syntax", + "uri":"dli_08_0098.html", + "doc_type":"sqlreference", + "p_code":"12", + "code":"13" + }, + { + "desc":"This Hive syntax is used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of suppor", + "product_code":"dli", + "title":"Creating a DLI Table Using the Hive Syntax", + "uri":"dli_08_0204.html", + "doc_type":"sqlreference", + "p_code":"12", + "code":"14" + }, + { + "desc":"This statement is used to delete tables.If the table is stored in OBS, only the metadata is deleted. The data stored on OBS is not deleted.If the table is stored in DLI, ", + "product_code":"dli", + "title":"Deleting a Table", + "uri":"dli_08_0087.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"15" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Viewing Tables", + "uri":"dli_08_0089.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"16" + }, + { + "desc":"This statement is used to view all tables and views in the current database.FROM/IN: followed by the name of a database whose tables and views will be displayed.NoneCreat", + "product_code":"dli", + "title":"Viewing All Tables", + "uri":"dli_08_0090.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"17" + }, + { + "desc":"This statement is used to show the statements for creating a table.CREATE TABLE: statement for creating a tableThe table specified in this statement must exist. Otherwise", + "product_code":"dli", + "title":"Viewing Table Creation Statements", + "uri":"dli_08_0091.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"18" + }, + { + "desc":"Check the properties of a table.TBLPROPERTIES: This statement allows you to add a key/value property to a table.property_name is case sensitive. You cannot specify multip", + "product_code":"dli", + "title":"Viewing Table Properties", + "uri":"dli_08_0092.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"19" + }, + { + "desc":"This statement is used to query all columns in a specified table.COLUMNS: columns in the current tableFROM/IN: followed by the name of a database whose tables and views w", + "product_code":"dli", + "title":"Viewing All Columns in a Specified Table", + "uri":"dli_08_0093.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"20" + }, + { + "desc":"This statement is used to view all partitions in a specified table.PARTITIONS: partitions in a specified tablePARTITION: a specified partitionThe table specified in this ", + "product_code":"dli", + "title":"Viewing All Partitions in a Specified Table", + "uri":"dli_08_0094.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"21" + }, + { + "desc":"This statement is used to view the table statistics. The names and data types of all columns in a specified table will be returned.EXTENDED: displays all metadata of the ", + "product_code":"dli", + "title":"Viewing Table Statistics", + "uri":"dli_08_0105.html", + "doc_type":"sqlreference", + "p_code":"16", + "code":"22" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Modifying a Table", + "uri":"dli_08_0262.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"23" + }, + { + "desc":"This statement is used to add one or more new columns to a table.ADD COLUMNS: columns to addCOMMENT: column descriptionDo not run this SQL statement concurrently. Otherwi", + "product_code":"dli", + "title":"Adding a Column", + "uri":"dli_08_0263.html", + "doc_type":"sqlreference", + "p_code":"23", + "code":"24" + }, + { + "desc":"DLI controls multiple versions of backup data for restoration. After the multiversion function is enabled, the system automatically backs up table data when you delete or", + "product_code":"dli", + "title":"Enabling or Disabling Multiversion Backup", + "uri":"dli_08_0354.html", + "doc_type":"sqlreference", + "p_code":"23", + "code":"25" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Syntax for Partitioning a Table", + "uri":"dli_08_0080.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"26" + }, + { + "desc":"After an OBS partitioned table is created, no partition information is generated for the table. Partition information is generated only after you:Insert data to the OBS p", + "product_code":"dli", + "title":"Adding Partition Data (Only OBS Tables Supported)", + "uri":"dli_08_0081.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"27" + }, + { + "desc":"This statement is used to rename partitions.PARTITION: a specified partitionRENAME: new name of the partitionThis statement is used for OBS table operations.The table and", + "product_code":"dli", + "title":"Renaming a Partition (Only OBS Tables Supported)", + "uri":"dli_08_0082.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"28" + }, + { + "desc":"Deletes one or more partitions from a partitioned table.The table in which partitions are to be deleted must exist. Otherwise, an error is reported.The to-be-deleted part", + "product_code":"dli", + "title":"Deleting a Partition", + "uri":"dli_08_0083.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"29" + }, + { + "desc":"This statement is used to delete one or more partitions based on specified conditions.This statement is used for OBS table operations only.The table in which partitions a", + "product_code":"dli", + "title":"Deleting Partitions by Specifying Filter Criteria (Only OBS Tables Supported)", + "uri":"dli_08_0343.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"30" + }, + { + "desc":"This statement is used to modify the positions of table partitions.PARTITION: a specified partitionLOCATION: path of the partitionFor a table partition whose position is ", + "product_code":"dli", + "title":"Altering the Partition Location of a Table (Only OBS Tables Supported)", + "uri":"dli_08_0084.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"31" + }, + { + "desc":"This statement is used to update the partition information about a table in the Metastore.OrPARTITIONS: partition informationSERDEPROPERTIES: Serde attributeThis statemen", + "product_code":"dli", + "title":"Updating Partitioned Table Data (Only OBS Tables Supported)", + "uri":"dli_08_0079.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"32" + }, + { + "desc":"Spark caches Parquet metadata to improve performance. If you update a Parquet table, the cached metadata is not updated. Spark SQL cannot find the newly inserted data and", + "product_code":"dli", + "title":"Updating Table Metadata with REFRESH TABLE", + "uri":"dli_08_0359.html", + "doc_type":"sqlreference", + "p_code":"26", + "code":"33" + }, + { + "desc":"The LOAD DATA function can be used to import data in CSV, Parquet, ORC, JSON, and Avro formats. The data is converted into the Parquet data format for storage.INPATH: pat", + "product_code":"dli", + "title":"Importing Data to the Table", + "uri":"dli_08_0100.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"34" + }, + { + "desc":"This statement is used to insert the SELECT query result or a certain data record into a table.Insert the SELECT query result into a table.INSERT INTO [TABLE] [db_name.]t", + "product_code":"dli", + "title":"Inserting Data", + "uri":"dli_08_0095.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"35" + }, + { + "desc":"This statement is used to delete data from the DLI or OBS table.Only data in the DLI or OBS table can be deleted.", + "product_code":"dli", + "title":"Clearing Data", + "uri":"dli_08_0217.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"36" + }, + { + "desc":"This statement is used to directly write query results to a specified directory. The query results can be stored in CSV, Parquet, ORC, JSON, or Avro format.USING: Specifi", + "product_code":"dli", + "title":"Exporting Search Results", + "uri":"dli_08_0205.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"37" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Backing Up and Restoring Data of Multiple Versions", + "uri":"dli_08_0349.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"38" + }, + { + "desc":"After multiversion is enabled, backup data is retained for seven days by default. You can change the retention period by setting system parameterdli.multi.version.retenti", + "product_code":"dli", + "title":"Setting the Retention Period for Multiversion Backup Data", + "uri":"dli_08_0350.html", + "doc_type":"sqlreference", + "p_code":"38", + "code":"39" + }, + { + "desc":"After the multiversion function is enabled, you can run the SHOW HISTORY command to view the backup data of a table. For details about the syntax for enabling or disablin", + "product_code":"dli", + "title":"Viewing Multiversion Backup Data", + "uri":"dli_08_0351.html", + "doc_type":"sqlreference", + "p_code":"38", + "code":"40" + }, + { + "desc":"After the multiversion function is enabled, you can run the RESTORE TABLE statement to restore a table or partition of a specified version. For details about the syntax f", + "product_code":"dli", + "title":"Restoring Multiversion Backup Data", + "uri":"dli_08_0352.html", + "doc_type":"sqlreference", + "p_code":"38", + "code":"41" + }, + { + "desc":"After the multiversion function is enabled, expired backup data will be directly deleted by the system when theinsert overwrite or truncate statement is executed. You can", + "product_code":"dli", + "title":"Configuring the Trash Bin for Expired Multiversion Data", + "uri":"dli_08_0353.html", + "doc_type":"sqlreference", + "p_code":"38", + "code":"42" + }, + { + "desc":"The retention period of multiversion backup data takes effect each time the insert overwrite or truncate statement is executed. If neither statement is executed for the t", + "product_code":"dli", + "title":"Deleting Multiversion Backup Data", + "uri":"dli_08_0355.html", + "doc_type":"sqlreference", + "p_code":"38", + "code":"43" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with an HBase Table", + "uri":"dli_08_0118.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"44" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing HBase table.Before creating a DLI table and associating it with HBase, you need to create a", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with HBase", + "uri":"dli_08_0119.html", + "doc_type":"sqlreference", + "p_code":"44", + "code":"45" + }, + { + "desc":"This statement is used to insert data in a DLI table to the associated HBase table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field", + "product_code":"dli", + "title":"Inserting Data to an HBase Table", + "uri":"dli_08_0120.html", + "doc_type":"sqlreference", + "p_code":"44", + "code":"46" + }, + { + "desc":"This statement is used to query data in an HBase table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be querie", + "product_code":"dli", + "title":"Querying an HBase Table", + "uri":"dli_08_0121.html", + "doc_type":"sqlreference", + "p_code":"44", + "code":"47" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with an OpenTSDB Table", + "uri":"dli_08_0220.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"48" + }, + { + "desc":"Run the CREATE TABLE statement to create the DLI table and associate it with the existing metric in OpenTSDB. This syntax supports the OpenTSDB of CloudTable and MRS.Befo", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with OpenTSDB", + "uri":"dli_08_0122.html", + "doc_type":"sqlreference", + "p_code":"48", + "code":"49" + }, + { + "desc":"Run the INSERT INTO statement to insert the data in the DLI table to the associated OpenTSDB metric.If no metric exists on the OpenTSDB, a new metric is automatically cre", + "product_code":"dli", + "title":"Inserting Data to the OpenTSDB Table", + "uri":"dli_08_0123.html", + "doc_type":"sqlreference", + "p_code":"48", + "code":"50" + }, + { + "desc":"This SELECT command is used to query data in an OpenTSDB table.If no metric exists in OpenTSDB, an error will be reported when the corresponding DLI table is queried.If t", + "product_code":"dli", + "title":"Querying an OpenTSDB Table", + "uri":"dli_08_0124.html", + "doc_type":"sqlreference", + "p_code":"48", + "code":"51" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with a DWS table", + "uri":"dli_08_0192.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"52" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing DWS table.Before creating a DLI table and associating it with DWS, you need to create a dat", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with DWS", + "uri":"dli_08_0193.html", + "doc_type":"sqlreference", + "p_code":"52", + "code":"53" + }, + { + "desc":"This statement is used to insert data in a DLI table to the associated DWS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "product_code":"dli", + "title":"Inserting Data to the DWS Table", + "uri":"dli_08_0194.html", + "doc_type":"sqlreference", + "p_code":"52", + "code":"54" + }, + { + "desc":"This statement is used to query data in a DWS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried m", + "product_code":"dli", + "title":"Querying the DWS Table", + "uri":"dli_08_0195.html", + "doc_type":"sqlreference", + "p_code":"52", + "code":"55" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with an RDS Table", + "uri":"dli_08_0196.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"56" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing RDS table. This function supports access to the MySQL and PostgreSQL clusters of RDS.Before", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with RDS", + "uri":"dli_08_0197.html", + "doc_type":"sqlreference", + "p_code":"56", + "code":"57" + }, + { + "desc":"This statement is used to insert data in a DLI table to the associated RDS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "product_code":"dli", + "title":"Inserting Data to the RDS Table", + "uri":"dli_08_0198.html", + "doc_type":"sqlreference", + "p_code":"56", + "code":"58" + }, + { + "desc":"This statement is used to query data in an RDS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried ", + "product_code":"dli", + "title":"Querying the RDS Table", + "uri":"dli_08_0199.html", + "doc_type":"sqlreference", + "p_code":"56", + "code":"59" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with a CSS Table", + "uri":"dli_08_0200.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"60" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing CSS table.Before creating a DLI table and associating it with CSS, you need to create a dat", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with CSS", + "uri":"dli_08_0201.html", + "doc_type":"sqlreference", + "p_code":"60", + "code":"61" + }, + { + "desc":"This statement is used to insert data in a DLI table to the associated CSS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "product_code":"dli", + "title":"Inserting Data to the CSS Table", + "uri":"dli_08_0202.html", + "doc_type":"sqlreference", + "p_code":"60", + "code":"62" + }, + { + "desc":"This statement is used to query data in a CSS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.The table to be queried m", + "product_code":"dli", + "title":"Querying the CSS Table", + "uri":"dli_08_0203.html", + "doc_type":"sqlreference", + "p_code":"60", + "code":"63" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with a DCS Table", + "uri":"dli_08_0225.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"64" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing DCS key.Before creating a DLI table and associating it with DCS, you need to create a datas", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with DCS", + "uri":"dli_08_0226.html", + "doc_type":"sqlreference", + "p_code":"64", + "code":"65" + }, + { + "desc":"This statement is used to insert data in a DLI table to the DCS key.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2...\n [FROM DL", + "product_code":"dli", + "title":"Inserting Data to a DCS Table", + "uri":"dli_08_0227.html", + "doc_type":"sqlreference", + "p_code":"64", + "code":"66" + }, + { + "desc":"This statement is used to query data in a DCS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.Query data in the test_re", + "product_code":"dli", + "title":"Querying the DCS Table", + "uri":"dli_08_0228.html", + "doc_type":"sqlreference", + "p_code":"64", + "code":"67" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Datasource Connection with a DDS Table", + "uri":"dli_08_0229.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"68" + }, + { + "desc":"This statement is used to create a DLI table and associate it with an existing DDS collection.Before creating a DLI table and associating it with DDS, you need to create ", + "product_code":"dli", + "title":"Creating a DLI Table and Associating It with DDS", + "uri":"dli_08_0230.html", + "doc_type":"sqlreference", + "p_code":"68", + "code":"69" + }, + { + "desc":"This statement is used to insert data in a DLI table to the associated DDS table.Insert the SELECT query result into a table.INSERT INTO DLI_TABLE\n SELECT field1,field2.", + "product_code":"dli", + "title":"Inserting Data to the DDS Table", + "uri":"dli_08_0231.html", + "doc_type":"sqlreference", + "p_code":"68", + "code":"70" + }, + { + "desc":"This statement is used to query data in a DDS table.LIMIT is used to limit the query results. Only INT type is supported by the number parameter.If schema information is ", + "product_code":"dli", + "title":"Querying the DDS Table", + "uri":"dli_08_0232.html", + "doc_type":"sqlreference", + "p_code":"68", + "code":"71" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Views", + "uri":"dli_08_0129.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"72" + }, + { + "desc":"This statement is used to create views.CREATE VIEW: creates views based on the given select statement. The result of the select statement will not be written into the dis", + "product_code":"dli", + "title":"Creating a View", + "uri":"dli_08_0130.html", + "doc_type":"sqlreference", + "p_code":"72", + "code":"73" + }, + { + "desc":"This statement is used to delete views.DROP: Deletes the metadata of a specified view. Although views and tables have many common points, the DROP TABLE statement cannot ", + "product_code":"dli", + "title":"Deleting a View", + "uri":"dli_08_0131.html", + "doc_type":"sqlreference", + "p_code":"72", + "code":"74" + }, + { + "desc":"This statement returns the logical plan and physical execution plan for the SQL statement.EXTENDED: After this keyword is specified, the logical and physical plans are ou", + "product_code":"dli", + "title":"Viewing the Execution Plan", + "uri":"dli_08_0138.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"75" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Permissions Management", + "uri":"dli_08_0139.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"76" + }, + { + "desc":"Table 1 describes the SQL statement permission matrix in DLI in terms of permissions on databases, tables, and roles.For privilege granting or revocation on databases and", + "product_code":"dli", + "title":"Data Permissions List", + "uri":"dli_08_0140.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"77" + }, + { + "desc":"This statement is used to create a role in the current database or a specified database.Only users with the CREATE_ROLE permission on the database can create roles. For e", + "product_code":"dli", + "title":"Creating a Role", + "uri":"dli_08_0141.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"78" + }, + { + "desc":"This statement is used to delete a role in the current database or a specified database.NoneThe role_name to be deleted must exist in the current database or the specifie", + "product_code":"dli", + "title":"Deleting a Role", + "uri":"dli_08_0148.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"79" + }, + { + "desc":"This statement is used to bind a user with a role.NoneThe role_name and username must exist. Otherwise, an error will be reported.", + "product_code":"dli", + "title":"Binding a Role", + "uri":"dli_08_0142.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"80" + }, + { + "desc":"This statement is used to unbind the user with the role.Nonerole_name and user_name must exist and user_name has been bound to role_name.To unbind the user_name1 from rol", + "product_code":"dli", + "title":"Unbinding a Role", + "uri":"dli_08_0147.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"81" + }, + { + "desc":"This statement is used to display all roles or roles bound to the user_name in the current database.ALL: Displays all roles.Keywords ALL and user_name cannot coexist.To d", + "product_code":"dli", + "title":"Displaying a Role", + "uri":"dli_08_0143.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"82" + }, + { + "desc":"This statement is used to grant permissions to a user or role.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must be a user.The privilege mu", + "product_code":"dli", + "title":"Granting a Permission", + "uri":"dli_08_0144.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"83" + }, + { + "desc":"This statement is used to revoke permissions granted to a user or role.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must be a user.The pri", + "product_code":"dli", + "title":"Revoking a Permission", + "uri":"dli_08_0146.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"84" + }, + { + "desc":"This statement is used to show the permissions granted to a user or role in the resource.ROLE: The subsequent role_name must be a role.USER: The subsequent user_name must", + "product_code":"dli", + "title":"Displaying the Granted Permissions", + "uri":"dli_08_0145.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"85" + }, + { + "desc":"This statement is used to display the binding relationship between roles and a user in the current database.NoneThe ROLE variable must exist.", + "product_code":"dli", + "title":"Displaying the Binding Relationship Between All Roles and Users", + "uri":"dli_08_0149.html", + "doc_type":"sqlreference", + "p_code":"76", + "code":"86" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Types", + "uri":"dli_08_0056.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"87" + }, + { + "desc":"Data type is a basic attribute of data. It is used to distinguish different types of data. Different data types occupy different storage space and support different opera", + "product_code":"dli", + "title":"Overview", + "uri":"dli_08_0057.html", + "doc_type":"sqlreference", + "p_code":"87", + "code":"88" + }, + { + "desc":"Table 1 lists the primitive data types supported by DLI.VARCHAR and CHAR data is stored in STRING type on DLI. Therefore, the string that exceeds the specified length wil", + "product_code":"dli", + "title":"Primitive Data Types", + "uri":"dli_08_0058.html", + "doc_type":"sqlreference", + "p_code":"87", + "code":"89" + }, + { + "desc":"Spark SQL supports complex data types, as shown in Table 1.When a table containing fields of the complex data type is created, the storage format of this table cannot be ", + "product_code":"dli", + "title":"Complex Data Types", + "uri":"dli_08_0059.html", + "doc_type":"sqlreference", + "p_code":"87", + "code":"90" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"User-Defined Functions", + "uri":"dli_08_0282.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"91" + }, + { + "desc":"DLI allows you to create and use user-defined functions (UDF) and user-defined table functions (UDTF) in Spark jobs.If a function with the same name exists in the databas", + "product_code":"dli", + "title":"Creating a Function", + "uri":"dli_08_0283.html", + "doc_type":"sqlreference", + "p_code":"91", + "code":"92" + }, + { + "desc":"This statement is used to delete functions.TEMPORARY: Indicates whether the function to be deleted is a temporary function.IF EXISTS: Used when the function to be deleted", + "product_code":"dli", + "title":"Deleting a Function", + "uri":"dli_08_0284.html", + "doc_type":"sqlreference", + "p_code":"91", + "code":"93" + }, + { + "desc":"Displays information about a specified function.EXTENDED: displays extended usage information.The metadata (implementation class and usage) of an existing function is ret", + "product_code":"dli", + "title":"Displaying Function Details", + "uri":"dli_08_0281.html", + "doc_type":"sqlreference", + "p_code":"91", + "code":"94" + }, + { + "desc":"View all functions in the current project.In the preceding statement, regex is a regular expression. For details about its parameters, see Table 1.For details about other", + "product_code":"dli", + "title":"Displaying All Functions", + "uri":"dli_08_0285.html", + "doc_type":"sqlreference", + "p_code":"91", + "code":"95" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Built-in Functions", + "uri":"dli_08_0064.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"96" + }, + { + "desc":"Table 1 lists the mathematical functions supported in DLI.", + "product_code":"dli", + "title":"Mathematical Functions", + "uri":"dli_08_0065.html", + "doc_type":"sqlreference", + "p_code":"96", + "code":"97" + }, + { + "desc":"Table 1 lists the date functions supported in DLI.", + "product_code":"dli", + "title":"Date Functions", + "uri":"dli_08_0066.html", + "doc_type":"sqlreference", + "p_code":"96", + "code":"98" + }, + { + "desc":"Table 1 lists the string functions supported by DLI.", + "product_code":"dli", + "title":"String Functions", + "uri":"dli_08_0067.html", + "doc_type":"sqlreference", + "p_code":"96", + "code":"99" + }, + { + "desc":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "product_code":"dli", + "title":"Aggregate Functions", + "uri":"dli_08_0068.html", + "doc_type":"sqlreference", + "p_code":"96", + "code":"100" + }, + { + "desc":"A window function performs a calculation operation on a set of values related to the current value. A window function can be an aggregate function used in the GROUP BY cl", + "product_code":"dli", + "title":"Window Functions", + "uri":"dli_08_0069.html", + "doc_type":"sqlreference", + "p_code":"96", + "code":"101" + }, + { + "desc":"This statement is a basic query statement and is used to return the query results.The table to be queried must exist. Otherwise, an error is reported.To filter the record", + "product_code":"dli", + "title":"Basic SELECT Statements", + "uri":"dli_08_0150.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"102" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Filtering", + "uri":"dli_08_0151.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"103" + }, + { + "desc":"This statement is used to filter the query results using the WHERE clause.All is used to return repeated rows. By default, all repeated rows are returned. It is followed ", + "product_code":"dli", + "title":"WHERE Filtering Clause", + "uri":"dli_08_0152.html", + "doc_type":"sqlreference", + "p_code":"103", + "code":"104" + }, + { + "desc":"This statement is used to filter the query results using the HAVING clause.All is used to return repeated rows. By default, all repeated rows are returned. It is followed", + "product_code":"dli", + "title":"HAVING Filtering Clause", + "uri":"dli_08_0153.html", + "doc_type":"sqlreference", + "p_code":"103", + "code":"105" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Sorting", + "uri":"dli_08_0154.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"106" + }, + { + "desc":"This statement is used to order the result set of a query by the specified field.ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the highe", + "product_code":"dli", + "title":"ORDER BY", + "uri":"dli_08_0155.html", + "doc_type":"sqlreference", + "p_code":"106", + "code":"107" + }, + { + "desc":"This statement is used to achieve the partial sorting of tables according to fields.ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the hi", + "product_code":"dli", + "title":"SORT BY", + "uri":"dli_08_0156.html", + "doc_type":"sqlreference", + "p_code":"106", + "code":"108" + }, + { + "desc":"This statement is used to bucket a table and sort the table within buckets.CLUSTER BY: Buckets are created based on specified fields. Single fields and multiple fields ar", + "product_code":"dli", + "title":"CLUSTER BY", + "uri":"dli_08_0157.html", + "doc_type":"sqlreference", + "p_code":"106", + "code":"109" + }, + { + "desc":"This statement is used to bucket a table according to the field.DISTRIBUTE BY: Buckets are created based on specified fields. A single field or multiple fields are suppor", + "product_code":"dli", + "title":"DISTRIBUTE BY", + "uri":"dli_08_0158.html", + "doc_type":"sqlreference", + "p_code":"106", + "code":"110" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Grouping", + "uri":"dli_08_0159.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"111" + }, + { + "desc":"This statement is used to group a table based on columns.Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.Single-column GROU", + "product_code":"dli", + "title":"Column-Based GROUP BY", + "uri":"dli_08_0160.html", + "doc_type":"sqlreference", + "p_code":"111", + "code":"112" + }, + { + "desc":"This statement is used to group a table according to expressions.The groupby_expression can contain a single field or multiple fields, and also can call aggregate functio", + "product_code":"dli", + "title":"Expression-Based GROUP BY", + "uri":"dli_08_0161.html", + "doc_type":"sqlreference", + "p_code":"111", + "code":"113" + }, + { + "desc":"This statement filters a table after grouping it using the HAVING clause.The groupby_expression can contain a single field or multiple fields, and can also call aggregate", + "product_code":"dli", + "title":"GROUP BY Using HAVING", + "uri":"dli_08_0162.html", + "doc_type":"sqlreference", + "p_code":"111", + "code":"114" + }, + { + "desc":"This statement is used to generate the aggregate row, super-aggregate row, and the total row. The statement can achieve multi-layer statistics from right to left and disp", + "product_code":"dli", + "title":"ROLLUP", + "uri":"dli_08_0163.html", + "doc_type":"sqlreference", + "p_code":"111", + "code":"115" + }, + { + "desc":"This statement is used to generate the cross-table row and achieve the cross-statistics of the GROUP BY field.GROUPING SETS is the expansion of GROUP BY. For example:SELE", + "product_code":"dli", + "title":"GROUPING SETS", + "uri":"dli_08_0164.html", + "doc_type":"sqlreference", + "p_code":"111", + "code":"116" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"JOIN", + "uri":"dli_08_0165.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"117" + }, + { + "desc":"This statement is used to join and return the rows that meet the JOIN conditions from two tables as the result set.JOIN/INNER JOIN: Only the records that meet the JOIN co", + "product_code":"dli", + "title":"INNER JOIN", + "uri":"dli_08_0166.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"118" + }, + { + "desc":"Join the left table with the right table and return all joined records of the left table. If no joined record is found, NULL will be returned.LEFT OUTER JOIN: Returns all", + "product_code":"dli", + "title":"LEFT OUTER JOIN", + "uri":"dli_08_0167.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"119" + }, + { + "desc":"Match the right table with the left table and return all matched records of the right table. If no matched record is found, NULL will be returned.RIGHT OUTER JOIN: Return", + "product_code":"dli", + "title":"RIGHT OUTER JOIN", + "uri":"dli_08_0168.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"120" + }, + { + "desc":"Join all records from the right table and the left table and return all joined records. If no joined record is found, NULL will be returned.FULL OUTER JOIN: Matches all r", + "product_code":"dli", + "title":"FULL OUTER JOIN", + "uri":"dli_08_0169.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"121" + }, + { + "desc":"This statement has the same function as INNER JOIN, that is, the result set that meet the WHERE condition is returned. However, IMPLICIT JOIN does not use the condition s", + "product_code":"dli", + "title":"IMPLICIT JOIN", + "uri":"dli_08_0170.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"122" + }, + { + "desc":"Cartesian JOIN joins each record of table A with all records in table B. For example, if there are m records in table A and n records in table B, m x n records will be ge", + "product_code":"dli", + "title":"Cartesian JOIN", + "uri":"dli_08_0171.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"123" + }, + { + "desc":"This statement is used to query the records that meet the JOIN condition from the left table.LEFT SEMI JOIN: Indicates to only return the records from the left table. LEF", + "product_code":"dli", + "title":"LEFT SEMI JOIN", + "uri":"dli_08_0172.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"124" + }, + { + "desc":"This statement is used to join multiple tables using unequal values and return the result set that meet the condition.The non_equi_join_condition is similar to join_condi", + "product_code":"dli", + "title":"NON-EQUIJOIN", + "uri":"dli_08_0173.html", + "doc_type":"sqlreference", + "p_code":"117", + "code":"125" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Subquery", + "uri":"dli_08_0174.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"126" + }, + { + "desc":"Subqueries are nested in the WHERE clause, and the subquery result is used as the filtering condition.All is used to return repeated rows. By default, all repeated rows a", + "product_code":"dli", + "title":"Subquery Nested by WHERE", + "uri":"dli_08_0175.html", + "doc_type":"sqlreference", + "p_code":"126", + "code":"127" + }, + { + "desc":"This statement is used to nest subquery by FROM and use the subquery results as the data source of the external SELECT statement.All is used to return repeated rows. By d", + "product_code":"dli", + "title":"Subquery Nested by FROM", + "uri":"dli_08_0176.html", + "doc_type":"sqlreference", + "p_code":"126", + "code":"128" + }, + { + "desc":"This statement is used to embed a subquery in the HAVING clause. The subquery result is used as a part of the HAVING clause.All is used to return repeated rows. By defaul", + "product_code":"dli", + "title":"Subquery Nested by HAVING", + "uri":"dli_08_0177.html", + "doc_type":"sqlreference", + "p_code":"126", + "code":"129" + }, + { + "desc":"This statement is used to nest queries in the subquery.All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) on", + "product_code":"dli", + "title":"Multi-Layer Nested Subquery", + "uri":"dli_08_0178.html", + "doc_type":"sqlreference", + "p_code":"126", + "code":"130" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Alias", + "uri":"dli_08_0179.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"131" + }, + { + "desc":"This statement is used to specify an alias for a table or the subquery result.table_reference: Can be a table, view, or subquery.As: Is used to connect to table_reference", + "product_code":"dli", + "title":"AS for Table", + "uri":"dli_08_0180.html", + "doc_type":"sqlreference", + "p_code":"131", + "code":"132" + }, + { + "desc":"This statement is used to specify an alias for a column.alias: gives an alias for the attr_expr field.AS: Whether to add AS does not affect the result.The to-be-queried t", + "product_code":"dli", + "title":"AS for Column", + "uri":"dli_08_0181.html", + "doc_type":"sqlreference", + "p_code":"131", + "code":"133" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Set Operations", + "uri":"dli_08_0182.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"134" + }, + { + "desc":"This statement is used to return the union set of multiple query results.UNION: The set operation is used to join the head and tail of a table based on certain conditions", + "product_code":"dli", + "title":"UNION", + "uri":"dli_08_0183.html", + "doc_type":"sqlreference", + "p_code":"134", + "code":"135" + }, + { + "desc":"This statement is used to return the intersection set of multiple query results.INTERSECT returns the intersection of multiple query results. The number of columns return", + "product_code":"dli", + "title":"INTERSECT", + "uri":"dli_08_0184.html", + "doc_type":"sqlreference", + "p_code":"134", + "code":"136" + }, + { + "desc":"This statement is used to return the difference set of two query results.EXCEPT minus the sets. A EXCEPT B indicates to remove the records that exist in both A and B from", + "product_code":"dli", + "title":"EXCEPT", + "uri":"dli_08_0185.html", + "doc_type":"sqlreference", + "p_code":"134", + "code":"137" + }, + { + "desc":"This statement is used to define the common table expression (CTE) using WITH...AS to simplify the query and make the result easier to read and maintain.cte_name: Name of", + "product_code":"dli", + "title":"WITH...AS", + "uri":"dli_08_0186.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"138" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"CASE...WHEN", + "uri":"dli_08_0187.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"139" + }, + { + "desc":"This statement is used to display result_expression according to the joined results of input_expression and when_expression.CASE: Subquery is supported in basic CASE stat", + "product_code":"dli", + "title":"Basic CASE Statement", + "uri":"dli_08_0188.html", + "doc_type":"sqlreference", + "p_code":"139", + "code":"140" + }, + { + "desc":"This statement is used to obtain the value of boolean_expression for each WHEN statement in a specified order. Then return the first result_expression with the value TRUE", + "product_code":"dli", + "title":"CASE Query Statement", + "uri":"dli_08_0189.html", + "doc_type":"sqlreference", + "p_code":"139", + "code":"141" + }, + { + "desc":"This statement is used together with the window function. The OVER statement is used to group data and sort the data within the group. The window function is used to gene", + "product_code":"dli", + "title":"OVER Clause", + "uri":"dli_08_0190.html", + "doc_type":"sqlreference", + "p_code":"1", + "code":"142" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Flink Opensource SQL 1.12 Syntax Reference", + "uri":"dli_08_0370.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"143" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Constraints and Definitions", + "uri":"dli_08_0371.html", + "doc_type":"sqlreference", + "p_code":"143", + "code":"144" + }, + { + "desc":"STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP,", + "product_code":"dli", + "title":"Supported Data Types", + "uri":"dli_08_0372.html", + "doc_type":"sqlreference", + "p_code":"144", + "code":"145" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Syntax", + "uri":"dli_08_0373.html", + "doc_type":"sqlreference", + "p_code":"144", + "code":"146" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Definition Language (DDL)", + "uri":"dli_08_0374.html", + "doc_type":"sqlreference", + "p_code":"146", + "code":"147" + }, + { + "desc":"Create a table with a specified name.COMPUTED COLUMNA computed column is a virtual column generated using column_name AS computed_column_expression. A computed column eva", + "product_code":"dli", + "title":"CREATE TABLE", + "uri":"dli_08_0375.html", + "doc_type":"sqlreference", + "p_code":"147", + "code":"148" + }, + { + "desc":"Create a view with multiple layers nested in it to simplify the development process.IF NOT EXISTSIf the view already exists, nothing happens.Create a view named viewName.", + "product_code":"dli", + "title":"CREATE VIEW", + "uri":"dli_08_0376.html", + "doc_type":"sqlreference", + "p_code":"147", + "code":"149" + }, + { + "desc":"Create a user-defined function.For details about how to create a user-defined function, see User-Defined Functions (UDFs).IF NOT EXISTSIf the function already exists, not", + "product_code":"dli", + "title":"CREATE FUNCTION", + "uri":"dli_08_0377.html", + "doc_type":"sqlreference", + "p_code":"147", + "code":"150" + }, + { + "desc":"SyntaxPrecautionsFlink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:The case of identifiers is preserved whether or not the", + "product_code":"dli", + "title":"Data Manipulation Language (DML)", + "uri":"dli_08_0378.html", + "doc_type":"sqlreference", + "p_code":"146", + "code":"151" + }, + { + "desc":"This section describes the Flink open source SQL 1.12 syntax supported by DLI. For details about the parameters and examples, see the syntax description.", + "product_code":"dli", + "title":"Overview", + "uri":"dli_08_0379.html", + "doc_type":"sqlreference", + "p_code":"143", + "code":"152" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"DDL Syntax", + "uri":"dli_08_0380.html", + "doc_type":"sqlreference", + "p_code":"143", + "code":"153" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating Source Tables", + "uri":"dli_08_0381.html", + "doc_type":"sqlreference", + "p_code":"153", + "code":"154" + }, + { + "desc":"DataGen is used to generate random data for debugging and testing.NoneWhen you create a DataGen table, the table field type cannot be Array, Map, or Row. You can use COMP", + "product_code":"dli", + "title":"DataGen Source Table", + "uri":"dli_08_0382.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"155" + }, + { + "desc":"DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types an", + "product_code":"dli", + "title":"GaussDB(DWS) Source Table", + "uri":"dli_08_0383.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"156" + }, + { + "desc":"Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excell", + "product_code":"dli", + "title":"HBase Source Table", + "uri":"dli_08_0384.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"157" + }, + { + "desc":"The JDBC connector is a Flink's built-in connector to read data from a database.An enhanced datasource connection with the instances has been established, so that you can", + "product_code":"dli", + "title":"JDBC Source Table", + "uri":"dli_08_0385.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"158" + }, + { + "desc":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "product_code":"dli", + "title":"Kafka Source Table", + "uri":"dli_08_0386.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"159" + }, + { + "desc":"The MySQL CDC source table, that is, the MySQL streaming source table, reads all historical data in the database first and then smoothly switches data read to the Binlog ", + "product_code":"dli", + "title":"MySQL CDC Source Table", + "uri":"dli_08_0387.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"160" + }, + { + "desc":"The Postgres CDC source table, that is, Postgres streaming source table, is used to read the full snapshot data and changed data of the PostgreSQL database in sequence. T", + "product_code":"dli", + "title":"Postgres CDC Source Table", + "uri":"dli_08_0388.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"161" + }, + { + "desc":"Create a source stream to obtain data from Redis as input for jobs.An enhanced datasource connection has been created for DLI to connect to the Redis database, so that yo", + "product_code":"dli", + "title":"Redis Source Table", + "uri":"dli_08_0389.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"162" + }, + { + "desc":"Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provi", + "product_code":"dli", + "title":"Upsert Kafka Source Table", + "uri":"dli_08_0390.html", + "doc_type":"sqlreference", + "p_code":"154", + "code":"163" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating Result Tables", + "uri":"dli_08_0391.html", + "doc_type":"sqlreference", + "p_code":"153", + "code":"164" + }, + { + "desc":"The BlackHole connector allows for swallowing all input records. It is designed for high-performance testing and UDF output. It is not a substantive sink. The BlackHole r", + "product_code":"dli", + "title":"BlackHole Result Table", + "uri":"dli_08_0392.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"165" + }, + { + "desc":"DLI can output Flink job data to the ClickHouse database. ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and prov", + "product_code":"dli", + "title":"ClickHouse Result Table", + "uri":"dli_08_0393.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"166" + }, + { + "desc":"DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex t", + "product_code":"dli", + "title":"GaussDB(DWS) Result Table", + "uri":"dli_08_0394.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"167" + }, + { + "desc":"DLI outputs Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "product_code":"dli", + "title":"Elasticsearch Result Table", + "uri":"dli_08_0395.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"168" + }, + { + "desc":"DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scal", + "product_code":"dli", + "title":"HBase Result Table", + "uri":"dli_08_0396.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"169" + }, + { + "desc":"DLI outputs the Flink job output data to RDS through the JDBC result table.An enhanced datasource connection with the instances has been established, so that you can conf", + "product_code":"dli", + "title":"JDBC Result Table", + "uri":"dli_08_0397.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"170" + }, + { + "desc":"DLI outputs the Flink job output data to Kafka through the Kafka result table.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subs", + "product_code":"dli", + "title":"Kafka Result Table", + "uri":"dli_08_0398.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"171" + }, + { + "desc":"The Print connector is used to print output data to the error file or TaskManager file, making it easier for you to view the result in code debugging.NoneThe Print result", + "product_code":"dli", + "title":"Print Result Table", + "uri":"dli_08_0399.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"172" + }, + { + "desc":"DLI outputs the Flink job output data to Redis. Redis is a key-value storage system that supports multiple types of data structures. It can be used in scenarios such as c", + "product_code":"dli", + "title":"Redis Result Table", + "uri":"dli_08_0400.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"173" + }, + { + "desc":"Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provi", + "product_code":"dli", + "title":"Upsert Kafka Result Table", + "uri":"dli_08_0401.html", + "doc_type":"sqlreference", + "p_code":"164", + "code":"174" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating Dimension Tables", + "uri":"dli_08_0402.html", + "doc_type":"sqlreference", + "p_code":"153", + "code":"175" + }, + { + "desc":"Create a GaussDB(DWS) table to connect to source streams for wide table generation.Ensure that you have created a GaussDB(DWS) cluster using your account.A DWS database t", + "product_code":"dli", + "title":"GaussDB(DWS) Dimension Table", + "uri":"dli_08_0403.html", + "doc_type":"sqlreference", + "p_code":"175", + "code":"176" + }, + { + "desc":"Create a Hbase dimension table to connect to the source streams for wide table generation.An enhanced datasource connection has been created for DLI to connect to HBase, ", + "product_code":"dli", + "title":"HBase Dimension Table", + "uri":"dli_08_0404.html", + "doc_type":"sqlreference", + "p_code":"175", + "code":"177" + }, + { + "desc":"Create a JDBC dimension table to connect to the source stream.You have created a JDBC instance for your account.When you create a Flink OpenSource SQL job, set Flink Vers", + "product_code":"dli", + "title":"JDBC Dimension Table", + "uri":"dli_08_0405.html", + "doc_type":"sqlreference", + "p_code":"175", + "code":"178" + }, + { + "desc":"Create a Redis table to connect to source streams for wide table generation.An enhanced datasource connection with Redis has been established, so that you can configure s", + "product_code":"dli", + "title":"Redis Dimension Table", + "uri":"dli_08_0406.html", + "doc_type":"sqlreference", + "p_code":"175", + "code":"179" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Format", + "uri":"dli_08_0407.html", + "doc_type":"sqlreference", + "p_code":"153", + "code":"180" + }, + { + "desc":"Apache Avro is supported for you to read and write Avro data based on an Avro schema with Flink. The Avro schema is derived from the table schema.KafkaUpsert KafkaCurrent", + "product_code":"dli", + "title":"Avro", + "uri":"dli_08_0408.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"181" + }, + { + "desc":"Canal is a Changelog Data Capture (CDC) tool that can stream changes in real-time from MySQL into other systems. Canal provides a unified format schema for changelog and ", + "product_code":"dli", + "title":"Canal", + "uri":"dli_08_0409.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"182" + }, + { + "desc":"The Avro Schema Registry (avro-confluent) format allows you to read records that were serialized by the io.confluent.kafka.serializers.KafkaAvroSerializer and to write re", + "product_code":"dli", + "title":"Confluent Avro", + "uri":"dli_08_0410.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"183" + }, + { + "desc":"The CSV format allows you to read and write CSV data based on a CSV schema. Currently, the CSV schema is derived from table schema.KafkaUpsert KafkaUse Kafka to send data", + "product_code":"dli", + "title":"CSV", + "uri":"dli_08_0411.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"184" + }, + { + "desc":"Debezium is a Changelog Data Capture (CDC) tool that can stream changes in real-time from other databases into Kafka. Debezium provides a unified format schema for change", + "product_code":"dli", + "title":"Debezium", + "uri":"dli_08_0412.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"185" + }, + { + "desc":"The JSON format allows you to read and write JSON data based on a JSON schema. Currently, the JSON schema is derived from table schema.KafkaUpsert KafkaElasticsearchIn th", + "product_code":"dli", + "title":"JSON", + "uri":"dli_08_0413.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"186" + }, + { + "desc":"Flink supports to interpret Maxwell JSON messages as INSERT/UPDATE/DELETE messages into Flink SQL system. This is useful in many cases to leverage this feature,such as:Sy", + "product_code":"dli", + "title":"Maxwell", + "uri":"dli_08_0414.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"187" + }, + { + "desc":"The raw format allows you to read and write raw (byte based) values as a single column.Note: This format encodes null values as null of the byte[] type. This may have lim", + "product_code":"dli", + "title":"Raw", + "uri":"dli_08_0415.html", + "doc_type":"sqlreference", + "p_code":"180", + "code":"188" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"DML Snytax", + "uri":"dli_08_0416.html", + "doc_type":"sqlreference", + "p_code":"143", + "code":"189" + }, + { + "desc":"SyntaxDescriptionSELECT is used to select data from a table.ALL indicates that all results are returned.DISTINCT indicates that the duplicated results are removed.Precaut", + "product_code":"dli", + "title":"SELECT", + "uri":"dli_08_0417.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"190" + }, + { + "desc":"SyntaxDescriptionUNION is used to return the union set of multiple query results.INTERSECT is used to return the intersection of multiple query results.EXCEPT is used to ", + "product_code":"dli", + "title":"Set Operations", + "uri":"dli_08_0418.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"191" + }, + { + "desc":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:Array functionsArray functionsGroup", + "product_code":"dli", + "title":"Window", + "uri":"dli_08_0419.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"192" + }, + { + "desc":"SyntaxPrecautionsCurrently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or ", + "product_code":"dli", + "title":"JOIN", + "uri":"dli_08_0420.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"193" + }, + { + "desc":"FunctionThis clause is used to sort data in ascending order on a time attribute.PrecautionsCurrently, only sorting by time attribute is supported.ExampleSort data in asce", + "product_code":"dli", + "title":"OrderBy & Limit", + "uri":"dli_08_0421.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"194" + }, + { + "desc":"Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in", + "product_code":"dli", + "title":"Top-N", + "uri":"dli_08_0422.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"195" + }, + { + "desc":"Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.ROW_NUMBER(): Assigns a unique, sequential number to each row,", + "product_code":"dli", + "title":"Deduplication", + "uri":"dli_08_0423.html", + "doc_type":"sqlreference", + "p_code":"189", + "code":"196" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Functions", + "uri":"dli_08_0424.html", + "doc_type":"sqlreference", + "p_code":"143", + "code":"197" + }, + { + "desc":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "product_code":"dli", + "title":"User-Defined Functions (UDFs)", + "uri":"dli_08_0425.html", + "doc_type":"sqlreference", + "p_code":"197", + "code":"198" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Built-In Functions", + "uri":"dli_08_0426.html", + "doc_type":"sqlreference", + "p_code":"197", + "code":"199" + }, + { + "desc":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "product_code":"dli", + "title":"Mathematical Operation Functions", + "uri":"dli_08_0427.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"200" + }, + { + "desc":"SyntaxExampleTest input data.Test the data source kafka. The message content is as follows:{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}\n{name:", + "product_code":"dli", + "title":"String Functions", + "uri":"dli_08_0428.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"201" + }, + { + "desc":"Table 1 lists the time functions supported by Flink OpenSource SQL.FunctionReturns a SQL date parsed from string in form of yyyy-MM-dd.Returns a SQL date parsed from stri", + "product_code":"dli", + "title":"Temporal Functions", + "uri":"dli_08_0429.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"202" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Conditional Functions", + "uri":"dli_08_0430.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"203" + }, + { + "desc":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.The following example converts the amount value to an integer.Flink jobs do not sup", + "product_code":"dli", + "title":"Type Conversion Functions", + "uri":"dli_08_0431.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"204" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Collection Functions", + "uri":"dli_08_0432.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"205" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Value Construction Functions", + "uri":"dli_08_0433.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"206" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Value Access Functions", + "uri":"dli_08_0434.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"207" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Hash Functions", + "uri":"dli_08_0435.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"208" + }, + { + "desc":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "product_code":"dli", + "title":"Aggregate Functions", + "uri":"dli_08_0436.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"209" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Table-Valued Functions", + "uri":"dli_08_0437.html", + "doc_type":"sqlreference", + "p_code":"199", + "code":"210" + }, + { + "desc":"The string_split function splits a target string into substrings based on the specified separator and returns a substring list.Create a Flink OpenSource SQL job by referr", + "product_code":"dli", + "title":"string_split", + "uri":"dli_08_0438.html", + "doc_type":"sqlreference", + "p_code":"210", + "code":"211" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Flink Opensource SQL 1.10 Syntax Reference", + "uri":"dli_08_0289.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"212" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Constraints and Definitions", + "uri":"dli_08_0290.html", + "doc_type":"sqlreference", + "p_code":"212", + "code":"213" + }, + { + "desc":"STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP,", + "product_code":"dli", + "title":"Supported Data Types", + "uri":"dli_08_0291.html", + "doc_type":"sqlreference", + "p_code":"213", + "code":"214" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Syntax Definition", + "uri":"dli_08_0292.html", + "doc_type":"sqlreference", + "p_code":"213", + "code":"215" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Definition Language (DDL)", + "uri":"dli_08_0293.html", + "doc_type":"sqlreference", + "p_code":"215", + "code":"216" + }, + { + "desc":"This clause is used to create a table with a specified name.COMPUTED COLUMNA computed column is a virtual column generated using column_name AS computed_column_expression", + "product_code":"dli", + "title":"CREATE TABLE", + "uri":"dli_08_0294.html", + "doc_type":"sqlreference", + "p_code":"216", + "code":"217" + }, + { + "desc":"Create a view with multiple layers nested in it to simplify the development process.IF NOT EXISTSIf the view already exists, nothing happens.Create a view named viewName.", + "product_code":"dli", + "title":"CREATE VIEW", + "uri":"dli_08_0295.html", + "doc_type":"sqlreference", + "p_code":"216", + "code":"218" + }, + { + "desc":"Create a user-defined function.IF NOT EXISTSIf the function already exists, nothing happens.LANGUAGE JAVA|SCALALanguage tag is used to instruct Flink runtime how to execu", + "product_code":"dli", + "title":"CREATE FUNCTION", + "uri":"dli_08_0296.html", + "doc_type":"sqlreference", + "p_code":"216", + "code":"219" + }, + { + "desc":"SyntaxPrecautionsFlink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:The case of identifiers is preserved whether they are q", + "product_code":"dli", + "title":"Data Manipulation Language (DML)", + "uri":"dli_08_0297.html", + "doc_type":"sqlreference", + "p_code":"215", + "code":"220" + }, + { + "desc":"This section describes the Flink OpenSource SQL syntax supported by DLI. For details about the parameters and examples, see the syntax description.", + "product_code":"dli", + "title":"Flink OpenSource SQL 1.10 Syntax", + "uri":"dli_08_0298.html", + "doc_type":"sqlreference", + "p_code":"212", + "code":"221" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Definition Language (DDL)", + "uri":"dli_08_0299.html", + "doc_type":"sqlreference", + "p_code":"212", + "code":"222" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Source Table", + "uri":"dli_08_0300.html", + "doc_type":"sqlreference", + "p_code":"222", + "code":"223" + }, + { + "desc":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "product_code":"dli", + "title":"Kafka Source Table", + "uri":"dli_08_0301.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"224" + }, + { + "desc":"Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove da", + "product_code":"dli", + "title":"DIS Source Table", + "uri":"dli_08_0302.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"225" + }, + { + "desc":"The JDBC connector is a Flink's built-in connector to read data from a database.An enhanced datasource connection with the database has been established, so that you can ", + "product_code":"dli", + "title":"JDBC Source Table", + "uri":"dli_08_0303.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"226" + }, + { + "desc":"DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types an", + "product_code":"dli", + "title":"GaussDB(DWS) Source Table", + "uri":"dli_08_0304.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"227" + }, + { + "desc":"Create a source stream to obtain data from Redis as input for jobs.An enhanced datasource connection with Redis has been established, so that you can configure security g", + "product_code":"dli", + "title":"Redis Source Table", + "uri":"dli_08_0305.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"228" + }, + { + "desc":"Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excell", + "product_code":"dli", + "title":"HBase Source Table", + "uri":"dli_08_0306.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"229" + }, + { + "desc":"You can call APIs to obtain data from the cloud ecosystem or an open source ecosystem and use the obtained data as input of Flink jobs.The customized source class needs t", + "product_code":"dli", + "title":"userDefined Source Table", + "uri":"dli_08_0358.html", + "doc_type":"sqlreference", + "p_code":"223", + "code":"230" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Result Table", + "uri":"dli_08_0307.html", + "doc_type":"sqlreference", + "p_code":"222", + "code":"231" + }, + { + "desc":"DLI exports Flink job data to ClickHouse result tables.ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provide", + "product_code":"dli", + "title":"ClickHouse Result Table", + "uri":"dli_08_0344.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"232" + }, + { + "desc":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "product_code":"dli", + "title":"Kafka Result Table", + "uri":"dli_08_0308.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"233" + }, + { + "desc":"DLI exports the output data of the Flink job to Kafka in upsert mode.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription ", + "product_code":"dli", + "title":"Upsert Kafka Result Table", + "uri":"dli_08_0309.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"234" + }, + { + "desc":"DLI writes the Flink job output data into DIS. The data is filtered and imported to the DIS stream for future processing.DIS addresses the challenge of transmitting data ", + "product_code":"dli", + "title":"DIS Result Table", + "uri":"dli_08_0310.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"235" + }, + { + "desc":"DLI exports the output data of the Flink job to RDS.An enhanced datasource connection with the database has been established, so that you can configure security group rul", + "product_code":"dli", + "title":"JDBC Result Table", + "uri":"dli_08_0311.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"236" + }, + { + "desc":"DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex t", + "product_code":"dli", + "title":"GaussDB(DWS) Result Table", + "uri":"dli_08_0312.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"237" + }, + { + "desc":"DLI exports the output data of the Flink job to Redis. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scen", + "product_code":"dli", + "title":"Redis Result Table", + "uri":"dli_08_0313.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"238" + }, + { + "desc":"DLI exports Flink job output data to SMN.SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling ", + "product_code":"dli", + "title":"SMN Result Table", + "uri":"dli_08_0314.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"239" + }, + { + "desc":"DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scal", + "product_code":"dli", + "title":"HBase Result Table", + "uri":"dli_08_0315.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"240" + }, + { + "desc":"DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "product_code":"dli", + "title":"Elasticsearch Result Table", + "uri":"dli_08_0316.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"241" + }, + { + "desc":"OpenTSDB is a distributed, scalable time series database based on HBase. OpenTSDB is designed to collect monitoring information of a large-scale cluster and query data in", + "product_code":"dli", + "title":"OpenTSDB Result Table", + "uri":"dli_08_0348.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"242" + }, + { + "desc":"Write your Java code to insert the processed data into a specified database supported by your cloud service.Implement the custom sink class :The custom sink class is inhe", + "product_code":"dli", + "title":"User-defined Result Table", + "uri":"dli_08_0347.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"243" + }, + { + "desc":"The print connector exports your data output to the error file or the out file of TaskManager. It is mainly used for code debugging and output viewing.Read data from Kafk", + "product_code":"dli", + "title":"Print Result Table", + "uri":"dli_08_0345.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"244" + }, + { + "desc":"You can create a file system result table to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly accord", + "product_code":"dli", + "title":"File System Result Table", + "uri":"dli_08_0346.html", + "doc_type":"sqlreference", + "p_code":"231", + "code":"245" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Dimension Table", + "uri":"dli_08_0317.html", + "doc_type":"sqlreference", + "p_code":"222", + "code":"246" + }, + { + "desc":"Create a JDBC dimension table to connect to the source stream.You have created a JDBC instance for your account.The RDS table is used to connect to the source stream.CREA", + "product_code":"dli", + "title":"JDBC Dimension Table", + "uri":"dli_08_0318.html", + "doc_type":"sqlreference", + "p_code":"246", + "code":"247" + }, + { + "desc":"Create a GaussDB(DWS) dimension table to connect to the input stream.You have created a GaussDB(DWS) instance for your account.Use an RDS table to connect to the source s", + "product_code":"dli", + "title":"GaussDB(DWS) Dimension Table", + "uri":"dli_08_0319.html", + "doc_type":"sqlreference", + "p_code":"246", + "code":"248" + }, + { + "desc":"Create a Hbase dimension table to connect to the source stream.An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the", + "product_code":"dli", + "title":"HBase Dimension Table", + "uri":"dli_08_0320.html", + "doc_type":"sqlreference", + "p_code":"246", + "code":"249" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Data Manipulation Language (DML)", + "uri":"dli_08_0321.html", + "doc_type":"sqlreference", + "p_code":"212", + "code":"250" + }, + { + "desc":"SyntaxDescriptionThis clause is used to select data from a table.ALL indicates that all results are returned.DISTINCT indicates that the duplicated results are removed.Pr", + "product_code":"dli", + "title":"SELECT", + "uri":"dli_08_0322.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"251" + }, + { + "desc":"SyntaxDescriptionUNION is used to return the union set of multiple query results.INTERSECT is used to return the intersection of multiple query results.EXCEPT is used to ", + "product_code":"dli", + "title":"Set Operations", + "uri":"dli_08_0323.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"252" + }, + { + "desc":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:Array functionsArray functionsGroup", + "product_code":"dli", + "title":"Window", + "uri":"dli_08_0324.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"253" + }, + { + "desc":"SyntaxPrecautionsCurrently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or ", + "product_code":"dli", + "title":"JOIN", + "uri":"dli_08_0325.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"254" + }, + { + "desc":"FunctionThis clause is used to sort data in ascending order on a time attribute.PrecautionsCurrently, only sorting by time attribute is supported.ExampleSort data in asce", + "product_code":"dli", + "title":"OrderBy & Limit", + "uri":"dli_08_0326.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"255" + }, + { + "desc":"Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in", + "product_code":"dli", + "title":"Top-N", + "uri":"dli_08_0327.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"256" + }, + { + "desc":"Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.ROW_NUMBER(): Assigns a unique, sequential number to each row,", + "product_code":"dli", + "title":"Deduplication", + "uri":"dli_08_0328.html", + "doc_type":"sqlreference", + "p_code":"250", + "code":"257" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Functions", + "uri":"dli_08_0329.html", + "doc_type":"sqlreference", + "p_code":"212", + "code":"258" + }, + { + "desc":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "product_code":"dli", + "title":"User-Defined Functions", + "uri":"dli_08_0330.html", + "doc_type":"sqlreference", + "p_code":"258", + "code":"259" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Built-In Functions", + "uri":"dli_08_0331.html", + "doc_type":"sqlreference", + "p_code":"258", + "code":"260" + }, + { + "desc":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "product_code":"dli", + "title":"Mathematical Operation Functions", + "uri":"dli_08_0332.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"261" + }, + { + "desc":"SyntaxExampleTest input data.Test the data source kafka. The message content is as follows:\"{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}\"\n\"{na", + "product_code":"dli", + "title":"String Functions", + "uri":"dli_08_0333.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"262" + }, + { + "desc":"Table 1 lists the temporal functions supported by Flink OpenSource SQL.FunctionReturns a date parsed from string in form of yyyy-MM-dd.Returns a date parsed from string i", + "product_code":"dli", + "title":"Temporal Functions", + "uri":"dli_08_0334.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"263" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Conditional Functions", + "uri":"dli_08_0335.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"264" + }, + { + "desc":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.The following example converts the amount value to an integer.Flink jobs do not sup", + "product_code":"dli", + "title":"Type Conversion Function", + "uri":"dli_08_0336.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"265" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Collection Functions", + "uri":"dli_08_0337.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"266" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Value Construction Functions", + "uri":"dli_08_0338.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"267" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Value Access Functions", + "uri":"dli_08_0339.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"268" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Hash Functions", + "uri":"dli_08_0340.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"269" + }, + { + "desc":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "product_code":"dli", + "title":"Aggregate Function", + "uri":"dli_08_0341.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"270" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Table-Valued Functions", + "uri":"dli_08_0342.html", + "doc_type":"sqlreference", + "p_code":"260", + "code":"271" + }, + { + "desc":"The split_cursor function can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be use", + "product_code":"dli", + "title":"split_cursor", + "uri":"dli_08_0357.html", + "doc_type":"sqlreference", + "p_code":"271", + "code":"272" + }, + { + "desc":"The string_split function splits a target string into substrings based on the specified separator and returns a substring list.Prepare test input data.Source table disSou", + "product_code":"dli", + "title":"string_split", + "uri":"dli_08_0356.html", + "doc_type":"sqlreference", + "p_code":"271", + "code":"273" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Historical Versions", + "uri":"dli_08_0450.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"274" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Flink SQL Syntax", + "uri":"dli_08_0233.html", + "doc_type":"sqlreference", + "p_code":"274", + "code":"275" + }, + { + "desc":"Currently, Flink SQL only supports the following operations: SELECT, FROM, WHERE, UNION, aggregation, window, JOIN between stream and table data, and JOIN between streams", + "product_code":"dli", + "title":"SQL Syntax Constraints and Definitions", + "uri":"dli_08_0075.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"276" + }, + { + "desc":"This section describes the Flink SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.", + "product_code":"dli", + "title":"SQL Syntax Overview of Stream Jobs", + "uri":"dli_08_0275.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"277" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Source Stream", + "uri":"dli_08_0234.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"278" + }, + { + "desc":"Create a source stream to obtain data from HBase of CloudTable as input data of the job. HBase is a column-oriented distributed cloud storage system that features enhance", + "product_code":"dli", + "title":"CloudTable HBase Source Stream", + "uri":"dli_08_0237.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"279" + }, + { + "desc":"Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove da", + "product_code":"dli", + "title":"DIS Source Stream", + "uri":"dli_08_0235.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"280" + }, + { + "desc":"DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully man", + "product_code":"dli", + "title":"DMS Source Stream", + "uri":"dli_08_0270.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"281" + }, + { + "desc":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "product_code":"dli", + "title":"MRS Kafka Source Stream", + "uri":"dli_08_0238.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"282" + }, + { + "desc":"Create a source stream to obtain data from Kafka as input data for jobs.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscripti", + "product_code":"dli", + "title":"Open-Source Kafka Source Stream", + "uri":"dli_08_0239.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"283" + }, + { + "desc":"Create a source stream to obtain data from OBS. DLI reads data stored by users in OBS as input data for jobs. OBS applies to various scenarios, such as big data analysis,", + "product_code":"dli", + "title":"OBS Source Stream", + "uri":"dli_08_0236.html", + "doc_type":"sqlreference", + "p_code":"278", + "code":"284" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Sink Stream", + "uri":"dli_08_0240.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"285" + }, + { + "desc":"DLI exports the job output data to HBase of CloudTable. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performa", + "product_code":"dli", + "title":"CloudTable HBase Sink Stream", + "uri":"dli_08_0243.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"286" + }, + { + "desc":"DLI exports the job output data to OpenTSDB of CloudTable. OpenTSDB is a distributed, scalable time series database based on HBase. It stores time series data. Time serie", + "product_code":"dli", + "title":"CloudTable OpenTSDB Sink Stream", + "uri":"dli_08_0244.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"287" + }, + { + "desc":"DLI exports the output data of the Flink job to OpenTSDB of MRS.OpenTSDB has been installed in the MRS cluster.In this scenario, jobs must run on the dedicated queue of D", + "product_code":"dli", + "title":"MRS OpenTSDB Sink Stream", + "uri":"dli_08_0286.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"288" + }, + { + "desc":"DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides th", + "product_code":"dli", + "title":"CSS Elasticsearch Sink Stream", + "uri":"dli_08_0252.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"289" + }, + { + "desc":"DLI exports the Flink job output data to Redis of DCS. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scen", + "product_code":"dli", + "title":"DCS Sink Stream", + "uri":"dli_08_0253.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"290" + }, + { + "desc":"DLI outputs the job output data to Document Database Service (DDS).DDS is compatible with the MongoDB protocol and is secure, highly available, reliable, scalable, and ea", + "product_code":"dli", + "title":"DDS Sink Stream", + "uri":"dli_08_0249.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"291" + }, + { + "desc":"DLI writes the Flink job output data into DIS. This cloud ecosystem is applicable to scenarios where data is filtered and imported to the DIS stream for future processing", + "product_code":"dli", + "title":"DIS Sink Stream", + "uri":"dli_08_0241.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"292" + }, + { + "desc":"DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully man", + "product_code":"dli", + "title":"DMS Sink Stream", + "uri":"dli_08_0271.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"293" + }, + { + "desc":"DLI outputs the Flink job output data to Data Warehouse Service (DWS). DWS database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more co", + "product_code":"dli", + "title":"DWS Sink Stream (JDBC Mode)", + "uri":"dli_08_0247.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"294" + }, + { + "desc":"Create a sink stream to export Flink job data to DWS through OBS-based dumping, specifically, output Flink job data to OBS and then import data from OBS to DWS. For detai", + "product_code":"dli", + "title":"DWS Sink Stream (OBS-based Dumping)", + "uri":"dli_08_0248.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"295" + }, + { + "desc":"DLI exports the output data of the Flink job to HBase of MRS.An MRS cluster has been created by using your account. DLI can interconnect with HBase clusters with Kerberos", + "product_code":"dli", + "title":"MRS HBase Sink Stream", + "uri":"dli_08_0255.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"296" + }, + { + "desc":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "product_code":"dli", + "title":"MRS Kafka Sink Stream", + "uri":"dli_08_0254.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"297" + }, + { + "desc":"DLI exports the output data of the Flink job to Kafka.Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It deli", + "product_code":"dli", + "title":"Open-Source Kafka Sink Stream", + "uri":"dli_08_0257.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"298" + }, + { + "desc":"You can create a sink stream to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly according to the ge", + "product_code":"dli", + "title":"File System Sink Stream (Recommended)", + "uri":"dli_08_0267.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"299" + }, + { + "desc":"Create a sink stream to export DLI data to OBS. DLI can export the job analysis results to OBS. OBS applies to various scenarios, such as big data analysis, cloud-native ", + "product_code":"dli", + "title":"OBS Sink Stream", + "uri":"dli_08_0242.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"300" + }, + { + "desc":"DLI outputs the Flink job output data to RDS. Currently, PostgreSQL and MySQL databases are supported. The PostgreSQL database can store data of more complex types and de", + "product_code":"dli", + "title":"RDS Sink Stream", + "uri":"dli_08_0245.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"301" + }, + { + "desc":"DLI exports Flink job output data to SMN.SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling ", + "product_code":"dli", + "title":"SMN Sink Stream", + "uri":"dli_08_0251.html", + "doc_type":"sqlreference", + "p_code":"285", + "code":"302" + }, + { + "desc":"The temporary stream is used to simplify SQL logic. If complex SQL logic is followed, write SQL statements concatenated with temporary streams. The temporary stream is ju", + "product_code":"dli", + "title":"Creating a Temporary Stream", + "uri":"dli_08_0258.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"303" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Creating a Dimension Table", + "uri":"dli_08_0259.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"304" + }, + { + "desc":"Create a Redis table to connect to the source stream.For details about the JOIN syntax, see JOIN Between Stream Data and Table Data.Redis clusters are not supported.Ensur", + "product_code":"dli", + "title":"Creating a Redis Table", + "uri":"dli_08_0260.html", + "doc_type":"sqlreference", + "p_code":"304", + "code":"305" + }, + { + "desc":"Create an RDS/DWS table to connect to the source stream.For details about the JOIN syntax, see JOIN.Ensure that you have created a PostgreSQL or MySQL RDS instance in RDS", + "product_code":"dli", + "title":"Creating an RDS Table", + "uri":"dli_08_0261.html", + "doc_type":"sqlreference", + "p_code":"304", + "code":"306" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Custom Stream Ecosystem", + "uri":"dli_08_0272.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"307" + }, + { + "desc":"Compile code to obtain data from the desired cloud ecosystem or open-source ecosystem as the input data of Flink jobs.The user-defined source class needs to inherit the R", + "product_code":"dli", + "title":"Custom Source Stream", + "uri":"dli_08_0273.html", + "doc_type":"sqlreference", + "p_code":"307", + "code":"308" + }, + { + "desc":"Compile code to write the data processed by DLI to a specified cloud ecosystem or open-source ecosystem.The user-defined sink class needs to inherit the RichSinkFunction ", + "product_code":"dli", + "title":"Custom Sink Stream", + "uri":"dli_08_0274.html", + "doc_type":"sqlreference", + "p_code":"307", + "code":"309" + }, + { + "desc":"Data type is a basic attribute of data and used to distinguish different types of data. Different data types occupy different storage space and support different operatio", + "product_code":"dli", + "title":"Data Type", + "uri":"dli_08_0207.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"310" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Built-In Functions", + "uri":"dli_08_0086.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"311" + }, + { + "desc":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "product_code":"dli", + "title":"Mathematical Operation Functions", + "uri":"dli_08_0191.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"312" + }, + { + "desc":"The common character string functions of DLI are as follows:FunctionConcatenates two character strings.Concatenates two character strings.SyntaxVARCHAR VARCHAR a || VARCH", + "product_code":"dli", + "title":"String Functions", + "uri":"dli_08_0096.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"313" + }, + { + "desc":"Table 1 lists the time functions supported by Flink SQL.None", + "product_code":"dli", + "title":"Temporal Functions", + "uri":"dli_08_0097.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"314" + }, + { + "desc":"This function is used to forcibly convert types.If the input is NULL, NULL is returned.Flink jobs do not support the conversion of bigint to timestamp using CAST. You can", + "product_code":"dli", + "title":"Type Conversion Functions", + "uri":"dli_08_0112.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"315" + }, + { + "desc":"An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved b", + "product_code":"dli", + "title":"Aggregate Functions", + "uri":"dli_08_0104.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"316" + }, + { + "desc":"Table-valued functions can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be used i", + "product_code":"dli", + "title":"Table-Valued Functions", + "uri":"dli_08_0206.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"317" + }, + { + "desc":"Example:The returned number of elements in the array is 3.HELLO WORLD is returned.", + "product_code":"dli", + "title":"Other Functions", + "uri":"dli_08_0101.html", + "doc_type":"sqlreference", + "p_code":"311", + "code":"318" + }, + { + "desc":"DLI supports the following three types of user-defined functions (UDFs):Regular UDF: takes in one or more input parameters and returns a single result.User-defined table-", + "product_code":"dli", + "title":"User-Defined Functions", + "uri":"dli_08_0099.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"319" + }, + { + "desc":"Table 1 describes the basic geospatial geometric elements.You can build complex geospatial geometries based on basic geospatial geometric elements. Table 2 describes the ", + "product_code":"dli", + "title":"Geographical Functions", + "uri":"dli_08_0209.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"320" + }, + { + "desc":"SyntaxDescriptionThe SELECT statement is used to select data from a table or insert constant data into a table.PrecautionsThe table to be queried must exist. Otherwise, a", + "product_code":"dli", + "title":"SELECT", + "uri":"dli_08_0102.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"321" + }, + { + "desc":"SyntaxorDescriptionIf the value of value is value1, result1 is returned. If the value is not any of the values listed in the clause, resultZ is returned. If no else state", + "product_code":"dli", + "title":"Condition Expression", + "uri":"dli_08_0103.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"322" + }, + { + "desc":"DescriptionGroup Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:time_attr can be processing-time or", + "product_code":"dli", + "title":"Window", + "uri":"dli_08_0218.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"323" + }, + { + "desc":"The JOIN operation allows you to query data from a table and write the query result to the sink stream. Currently, only RDSs and DCS Redis tables are supported. The ON ke", + "product_code":"dli", + "title":"JOIN Between Stream Data and Table Data", + "uri":"dli_08_0106.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"324" + }, + { + "desc":"Flink provides two time models: processing time and event time.DLI allows you to specify the time model during creation of the source stream and temporary stream.Processi", + "product_code":"dli", + "title":"Configuring Time Models", + "uri":"dli_08_0107.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"325" + }, + { + "desc":"Complex event processing (CEP) is used to detect complex patterns in endless data streams so as to identify and search patterns in various data rows. Pattern matching is ", + "product_code":"dli", + "title":"Pattern Matching", + "uri":"dli_08_0108.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"326" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"StreamingML", + "uri":"dli_08_0109.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"327" + }, + { + "desc":"Anomaly detection applies to various scenarios, including intrusion detection, financial fraud detection, sensor data monitoring, medical diagnosis, natural data detectio", + "product_code":"dli", + "title":"Anomaly Detection", + "uri":"dli_08_0110.html", + "doc_type":"sqlreference", + "p_code":"327", + "code":"328" + }, + { + "desc":"Modeling and forecasting time series is a common task in many business verticals. Modeling is used to extract meaningful statistics and other characteristics of the data.", + "product_code":"dli", + "title":"Time Series Forecasting", + "uri":"dli_08_0111.html", + "doc_type":"sqlreference", + "p_code":"327", + "code":"329" + }, + { + "desc":"Clustering algorithms belong to unsupervised algorithms. K-Means, a clustering algorithm, partitions data points into related clusters by calculating the distance between", + "product_code":"dli", + "title":"Real-Time Clustering", + "uri":"dli_08_0216.html", + "doc_type":"sqlreference", + "p_code":"327", + "code":"330" + }, + { + "desc":"Deep learning has a wide range of applications in many industries, such as image classification, image recognition, and speech recognition. DLI provides several functions", + "product_code":"dli", + "title":"Deep Learning Model Prediction", + "uri":"dli_08_0088.html", + "doc_type":"sqlreference", + "p_code":"327", + "code":"331" + }, + { + "desc":"Flink SQL reserves some strings as keywords. If you want to use the following character strings as field names, ensure that they are enclosed by back quotes, for example,", + "product_code":"dli", + "title":"Reserved Keywords", + "uri":"dli_08_0125.html", + "doc_type":"sqlreference", + "p_code":"275", + "code":"332" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Identifiers", + "uri":"dli_08_0001.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"333" + }, + { + "desc":"None.Aggregate function.", + "product_code":"dli", + "title":"aggregate_func", + "uri":"dli_08_0002.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"334" + }, + { + "desc":"None.Alias, which must be STRING type. It can be assigned to a field, table, view, or subquery.", + "product_code":"dli", + "title":"alias", + "uri":"dli_08_0003.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"335" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"attr_expr", + "uri":"dli_08_0004.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"336" + }, + { + "desc":"None.List of attr_expr, which is separated by commas (,).", + "product_code":"dli", + "title":"attr_expr_list", + "uri":"dli_08_0005.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"337" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"attrs_value_set_expr", + "uri":"dli_08_0006.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"338" + }, + { + "desc":"None.Return a boolean expression.", + "product_code":"dli", + "title":"boolean_expression", + "uri":"dli_08_0007.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"339" + }, + { + "desc":"None.Formal parameter for function call. It is usually a field name, which is the same as col_name.", + "product_code":"dli", + "title":"col", + "uri":"dli_08_0009.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"340" + }, + { + "desc":"None.Column (field) description, which must be STRING type and cannot exceed 256 bytes.", + "product_code":"dli", + "title":"col_comment", + "uri":"dli_08_0010.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"341" + }, + { + "desc":"None.Column name, which must be STRING type and cannot exceed 128 bytes.", + "product_code":"dli", + "title":"col_name", + "uri":"dli_08_0011.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"342" + }, + { + "desc":"None.Field list, which consists of one col_name or more. If there is more than one col_name, separate them by using a comma (,).", + "product_code":"dli", + "title":"col_name_list", + "uri":"dli_08_0012.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"343" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"condition", + "uri":"dli_08_0013.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"344" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"condition_list", + "uri":"dli_08_0014.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"345" + }, + { + "desc":"None.Common expression name.", + "product_code":"dli", + "title":"cte_name", + "uri":"dli_08_0015.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"346" + }, + { + "desc":"None.Data type. Currently, only the primitive data types are supported.", + "product_code":"dli", + "title":"data_type", + "uri":"dli_08_0016.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"347" + }, + { + "desc":"None.Database description, which must be STRING type and cannot exceed 256 characters.", + "product_code":"dli", + "title":"db_comment", + "uri":"dli_08_0017.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"348" + }, + { + "desc":"None.Database name, which must be STRING type and cannot exceed 128 bytes.", + "product_code":"dli", + "title":"db_name", + "uri":"dli_08_0018.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"349" + }, + { + "desc":"None.Returned result for the ELSE clause of the CASE WHEN statement.", + "product_code":"dli", + "title":"else_result_expression", + "uri":"dli_08_0019.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"350" + }, + { + "desc":"| AVRO| CSV| JSON| ORC| PARQUETCurrently, the preceding formats are supported.Both USING and STORED AS can be used for specifying the data format. You can specify the pre", + "product_code":"dli", + "title":"file_format", + "uri":"dli_08_0020.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"351" + }, + { + "desc":"None.File path, which is the OBS path", + "product_code":"dli", + "title":"file_path", + "uri":"dli_08_0021.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"352" + }, + { + "desc":"None.Function name, which must be STRING type.", + "product_code":"dli", + "title":"function_name", + "uri":"dli_08_0022.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"353" + }, + { + "desc":"None.Expression that includes GROUP BY.", + "product_code":"dli", + "title":"groupby_expression", + "uri":"dli_08_0023.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"354" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"having_condition", + "uri":"dli_08_0024.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"355" + }, + { + "desc":"None.Input expression of the CASE WHEN statement.", + "product_code":"dli", + "title":"input_expression", + "uri":"dli_08_0026.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"356" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"join_condition", + "uri":"dli_08_0029.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"357" + }, + { + "desc":"None.The condition of an inequality join.", + "product_code":"dli", + "title":"non_equi_join_condition", + "uri":"dli_08_0030.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"358" + }, + { + "desc":"None.Maximum number of output lines specified by LIMIT. Which must be INT type.", + "product_code":"dli", + "title":"number", + "uri":"dli_08_0031.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"359" + }, + { + "desc":"None.Partition column name, that is, partition field name, which must be STRING type.", + "product_code":"dli", + "title":"partition_col_name", + "uri":"dli_08_0034.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"360" + }, + { + "desc":"None.Partition column value, that is, partition field value.", + "product_code":"dli", + "title":"partition_col_value", + "uri":"dli_08_0035.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"361" + }, + { + "desc":"partition_specs : (partition_col_name = partition_col_value, partition_col_name = partition_col_value, ...);Table partition list, which is expressed by using key=value pa", + "product_code":"dli", + "title":"partition_specs", + "uri":"dli_08_0036.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"362" + }, + { + "desc":"None.Property name, which must be STRING type.", + "product_code":"dli", + "title":"property_name", + "uri":"dli_08_0037.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"363" + }, + { + "desc":"None.Property value, which must be STRING type.", + "product_code":"dli", + "title":"property_value", + "uri":"dli_08_0038.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"364" + }, + { + "desc":"None.Pattern matching string, which supports wildcard matching.", + "product_code":"dli", + "title":"regex_expression", + "uri":"dli_08_0039.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"365" + }, + { + "desc":"None.Returned result for the THEN clause of the CASE WHEN statement.", + "product_code":"dli", + "title":"result_expression", + "uri":"dli_08_0040.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"366" + }, + { + "desc":"None.Query clause for the basic SELECT statement.", + "product_code":"dli", + "title":"select_statement", + "uri":"dli_08_0042.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"367" + }, + { + "desc":"None.Separator, which can be customized by users, for example, comma (,), semicolon (;), and colon (:). Which must be CHAR type.", + "product_code":"dli", + "title":"separator", + "uri":"dli_08_0043.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"368" + }, + { + "desc":"None.SQL statement containing the common expression defined by cte_name.", + "product_code":"dli", + "title":"sql_containing_cte_name", + "uri":"dli_08_0045.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"369" + }, + { + "desc":"None.Subquery.", + "product_code":"dli", + "title":"sub_query", + "uri":"dli_08_0046.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"370" + }, + { + "desc":"None.Table description, which must be STRING type and cannot exceed 256 bytes.", + "product_code":"dli", + "title":"table_comment", + "uri":"dli_08_0047.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"371" + }, + { + "desc":"NoneTable name, which cannot exceed 128 bytes. The string type and \"$\" symbol are supported.", + "product_code":"dli", + "title":"table_name", + "uri":"dli_08_0048.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"372" + }, + { + "desc":"None.Table property list, which is expressed by using key=value pairs. key represents property_name, and value represents property_value. If there is more than one key=va", + "product_code":"dli", + "title":"table_properties", + "uri":"dli_08_0049.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"373" + }, + { + "desc":"None.Table or view name, which must be STRING type. It can also be a subquery. If it is subquery, an alias must also be provided.", + "product_code":"dli", + "title":"table_reference", + "uri":"dli_08_0050.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"374" + }, + { + "desc":"None.When expression of the CASE WHEN statement. It is used for matching with the input expression.", + "product_code":"dli", + "title":"when_expression", + "uri":"dli_08_0053.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"375" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"where_condition", + "uri":"dli_08_0054.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"376" + }, + { + "desc":"None.Analysis window function. For details, see Window Functions.", + "product_code":"dli", + "title":"window_function", + "uri":"dli_08_0055.html", + "doc_type":"sqlreference", + "p_code":"333", + "code":"377" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Operators", + "uri":"dli_08_0060.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"378" + }, + { + "desc":"All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.Relationship operators are binary operators. Two compared data ", + "product_code":"dli", + "title":"Relational Operators", + "uri":"dli_08_0061.html", + "doc_type":"sqlreference", + "p_code":"378", + "code":"379" + }, + { + "desc":"Arithmetic operators include binary operators and unary operators. For both types of operators, the returned results are numbers. Table 1 lists the arithmetic operators s", + "product_code":"dli", + "title":"Arithmetic Operators", + "uri":"dli_08_0062.html", + "doc_type":"sqlreference", + "p_code":"378", + "code":"380" + }, + { + "desc":"Common logical operators include AND, OR, and NOT. The operation result can be TRUE, FALSE, or NULL (which means unknown). The priorities of the operators are as follows:", + "product_code":"dli", + "title":"Logical Operators", + "uri":"dli_08_0063.html", + "doc_type":"sqlreference", + "p_code":"378", + "code":"381" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"dli", + "title":"Change History", + "uri":"dli_08_00005.html", + "doc_type":"sqlreference", + "p_code":"", + "code":"382" + } +] \ No newline at end of file diff --git a/docs/dli/sqlreference/PARAMETERS.txt b/docs/dli/sqlreference/PARAMETERS.txt new file mode 100644 index 00000000..6da8d5f0 --- /dev/null +++ b/docs/dli/sqlreference/PARAMETERS.txt @@ -0,0 +1,3 @@ +version="" +language="en-us" +type="" \ No newline at end of file diff --git a/docs/dli/sqlreference/dli_08_00005.html b/docs/dli/sqlreference/dli_08_00005.html new file mode 100644 index 00000000..7d779899 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_00005.html @@ -0,0 +1,20 @@ + + +

Change History

+
+
+ + + + + + + +

Released On

+

Description

+

2023-05-11

+

This is the first official release.

+
+
+
+ diff --git a/docs/dli/sqlreference/dli_08_0001.html b/docs/dli/sqlreference/dli_08_0001.html new file mode 100644 index 00000000..fa3eac02 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0001.html @@ -0,0 +1,97 @@ + + +

Identifiers

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0002.html b/docs/dli/sqlreference/dli_08_0002.html new file mode 100644 index 00000000..b448b2ec --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0002.html @@ -0,0 +1,14 @@ + + +

aggregate_func

+

Syntax

None.

+
+

Description

Aggregate function.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0003.html b/docs/dli/sqlreference/dli_08_0003.html new file mode 100644 index 00000000..f12764b9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0003.html @@ -0,0 +1,14 @@ + + +

alias

+

Syntax

None.

+
+

Description

Alias, which must be STRING type. It can be assigned to a field, table, view, or subquery.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0004.html b/docs/dli/sqlreference/dli_08_0004.html new file mode 100644 index 00000000..ca5108c3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0004.html @@ -0,0 +1,93 @@ + + +

attr_expr

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

attr_expr

+

Attribute expression.

+

attr

+

Table field, which is the same as col_name.

+

const_value

+

Constant value.

+

case_expr

+

Case expression.

+

math_func

+

Mathematical function.

+

date_func

+

Date function.

+

string_func

+

String function.

+

aggregate_func

+

Aggregate function.

+

window_func

+

Analysis window function.

+

user_define_func

+

User-defined function.

+

general_binary_operator

+

General binary operator.

+

general_unary_operator

+

General unary operator.

+

(

+

Start of the specified subattribute expression.

+

)

+

End of the specified subattribute expression.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0005.html b/docs/dli/sqlreference/dli_08_0005.html new file mode 100644 index 00000000..16a7911b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0005.html @@ -0,0 +1,14 @@ + + +

attr_expr_list

+

Syntax

None.

+
+

Description

List of attr_expr, which is separated by commas (,).

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0006.html b/docs/dli/sqlreference/dli_08_0006.html new file mode 100644 index 00000000..455ca5fc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0006.html @@ -0,0 +1,43 @@ + + +

attrs_value_set_expr

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + +

Syntax

+

Description

+

attrs_value_set_expr

+

Collection of attribute values.

+

sub_query

+

Subquery clause.

+

(

+

Start of the specified subquery expression.

+

)

+

End of the specified subquery expression.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0007.html b/docs/dli/sqlreference/dli_08_0007.html new file mode 100644 index 00000000..ebbac892 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0007.html @@ -0,0 +1,14 @@ + + +

boolean_expression

+

Syntax

None.

+
+

Description

Return a boolean expression.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0009.html b/docs/dli/sqlreference/dli_08_0009.html new file mode 100644 index 00000000..ef44a083 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0009.html @@ -0,0 +1,20 @@ + + + +

col

+ +
+

Syntax

None.

+
+ +

Description

Formal parameter for function call. It is usually a field name, which is the same as col_name.

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0010.html b/docs/dli/sqlreference/dli_08_0010.html new file mode 100644 index 00000000..251bb18f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0010.html @@ -0,0 +1,14 @@ + + +

col_comment

+

Syntax

None.

+
+

Description

Column (field) description, which must be STRING type and cannot exceed 256 bytes.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0011.html b/docs/dli/sqlreference/dli_08_0011.html new file mode 100644 index 00000000..46d9300a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0011.html @@ -0,0 +1,14 @@ + + +

col_name

+

Syntax

None.

+
+

Description

Column name, which must be STRING type and cannot exceed 128 bytes.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0012.html b/docs/dli/sqlreference/dli_08_0012.html new file mode 100644 index 00000000..862e1fc4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0012.html @@ -0,0 +1,20 @@ + + + +

col_name_list

+ +
+

Syntax

None.

+
+ +

Description

Field list, which consists of one col_name or more. If there is more than one col_name, separate them by using a comma (,).

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0013.html b/docs/dli/sqlreference/dli_08_0013.html new file mode 100644 index 00000000..baa42cf7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0013.html @@ -0,0 +1,123 @@ + + +

condition

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

condition

+

Judgment condition.

+

>

+

Relational operator: >

+

>=

+

Relational operator: ≥

+

<

+

Relational operator: <

+

<=

+

Relational operator: ≤

+

=

+

Relational operator: =

+

<>

+

Relational operator: <>

+

is

+

Relational operator: is

+

is not

+

Relational operator: is not

+

const_null

+

Constant value: null

+

like

+

Relational operator: used for wildcard matching.

+

pattern_string

+

Pattern matching string, which supports wildcard matching. In WHERE LIKE, SQL wildcard characters "%" and "_" are supported. "%" represents one or more characters. "_" represents only one character.

+

attr_expr

+

Attribute expression.

+

attrs_value_set_expr

+

Collection of attribute values.

+

in

+

Keyword used to determine whether attributes are in the same collection.

+

const_string

+

String constant.

+

const_int

+

Integer constant.

+

(

+

Start of the specified constant collection.

+

)

+

End of the specified constant collection.

+

,

+

Separator comma (,)

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0014.html b/docs/dli/sqlreference/dli_08_0014.html new file mode 100644 index 00000000..dc7830e0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0014.html @@ -0,0 +1,58 @@ + + +

condition_list

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

condition_list

+

List of judgment conditions.

+

and

+

Logical operator: AND

+

or

+

Logical operator: OR

+

not

+

Logical operator: NOT

+

(

+

Start of the subjudgment condition.

+

)

+

End of the subjudgment condition.

+

condition

+

Judgment condition.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0015.html b/docs/dli/sqlreference/dli_08_0015.html new file mode 100644 index 00000000..dcf7ff74 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0015.html @@ -0,0 +1,14 @@ + + +

cte_name

+

Syntax

None.

+
+

Description

Common expression name.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0016.html b/docs/dli/sqlreference/dli_08_0016.html new file mode 100644 index 00000000..7c8bef0a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0016.html @@ -0,0 +1,14 @@ + + +

data_type

+

Syntax

None.

+
+

Description

Data type. Currently, only the primitive data types are supported.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0017.html b/docs/dli/sqlreference/dli_08_0017.html new file mode 100644 index 00000000..0ec05b83 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0017.html @@ -0,0 +1,14 @@ + + +

db_comment

+

Syntax

None.

+
+

Description

Database description, which must be STRING type and cannot exceed 256 characters.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0018.html b/docs/dli/sqlreference/dli_08_0018.html new file mode 100644 index 00000000..9e8ece60 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0018.html @@ -0,0 +1,14 @@ + + +

db_name

+

Syntax

None.

+
+

Description

Database name, which must be STRING type and cannot exceed 128 bytes.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0019.html b/docs/dli/sqlreference/dli_08_0019.html new file mode 100644 index 00000000..4234f026 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0019.html @@ -0,0 +1,20 @@ + + + +

else_result_expression

+ +
+

Syntax

None.

+
+ +

Description

Returned result for the ELSE clause of the CASE WHEN statement.

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0020.html b/docs/dli/sqlreference/dli_08_0020.html new file mode 100644 index 00000000..ce79579d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0020.html @@ -0,0 +1,18 @@ + + +

file_format

+

Format

| AVRO

+

| CSV

+

| JSON

+

| ORC

+

| PARQUET

+
+

Description

  • Currently, the preceding formats are supported.
  • Both USING and STORED AS can be used for specifying the data format. You can specify the preceding data formats by USING, but only the ORC and PARQUET formats by STORED AS.
  • ORC has optimized RCFile to provide an efficient method to store Hive data.
  • PARQUET is an analytical service-oriented and column-based storage format.
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0021.html b/docs/dli/sqlreference/dli_08_0021.html new file mode 100644 index 00000000..800f6d4d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0021.html @@ -0,0 +1,14 @@ + + +

file_path

+

Syntax

None.

+
+

Description

File path, which is the OBS path

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0022.html b/docs/dli/sqlreference/dli_08_0022.html new file mode 100644 index 00000000..2213fa5e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0022.html @@ -0,0 +1,14 @@ + + +

function_name

+

Syntax

None.

+
+

Description

Function name, which must be STRING type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0023.html b/docs/dli/sqlreference/dli_08_0023.html new file mode 100644 index 00000000..5d847e41 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0023.html @@ -0,0 +1,14 @@ + + +

groupby_expression

+

Syntax

None.

+
+

Description

Expression that includes GROUP BY.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0024.html b/docs/dli/sqlreference/dli_08_0024.html new file mode 100644 index 00000000..4e59b126 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0024.html @@ -0,0 +1,93 @@ + + +

having_condition

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

having_condition

+

Judgment condition of having.

+

and

+

Logical operator: AND

+

or

+

Logical operator: OR

+

not

+

Logical operator: NOT

+

(

+

Start of the subjudgment condition.

+

)

+

End of the subjudgment condition.

+

condition

+

Judgment condition.

+

const_set

+

Collection of constants, which are separated by using comma (,).

+

in

+

Keyword used to determine whether attributes are in the same collection.

+

attrs_value_set_expr

+

Collection of attribute values.

+

attr_expr

+

Attribute expression.

+

Equality and inequality

+

Equation and inequality. For details, see Relational Operators.

+

pattern_string

+

Pattern matching string, which supports wildcard matching. In WHERE LIKE, SQL wildcard characters "%" and "_" are supported. "%" represents one or more characters. "_" represents only one character.

+

like

+

Relational operator: used for wildcard matching.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0026.html b/docs/dli/sqlreference/dli_08_0026.html new file mode 100644 index 00000000..de72e9f6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0026.html @@ -0,0 +1,20 @@ + + + +

input_expression

+ +
+

Syntax

None.

+
+ +

Description

Input expression of the CASE WHEN statement.

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0029.html b/docs/dli/sqlreference/dli_08_0029.html new file mode 100644 index 00000000..dfee6671 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0029.html @@ -0,0 +1,88 @@ + + +

join_condition

+

Syntax

+
+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

join_condition

+

Judgment condition of join.

+

and

+

Logical operator: AND

+

or

+

Logical operator: OR

+

not

+

Logical operator: NOT

+

(

+

Start of the subjudgment condition.

+

)

+

End of the subjudgment condition.

+

condition

+

Judgment condition.

+

const_set

+

Collection of constants, which are separated by using comma (,).

+

in

+

Keyword used to determine whether attributes are in the same collection.

+

atrrs_value_set_expr

+

Collection of attribute values.

+

attr_expr

+

Attribute expression.

+

Equality and inequality

+

Equation and inequality. For details, see Relational Operators.

+

pattern_string

+

Pattern matching string, which supports wildcard matching. In WHERE LIKE, SQL wildcard characters "%" and "_" are supported. "%" represents one or more characters. "_" represents only one character.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0030.html b/docs/dli/sqlreference/dli_08_0030.html new file mode 100644 index 00000000..15f0e5c5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0030.html @@ -0,0 +1,14 @@ + + +

non_equi_join_condition

+

Syntax

None.

+
+

Description

The condition of an inequality join.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0031.html b/docs/dli/sqlreference/dli_08_0031.html new file mode 100644 index 00000000..5a172e72 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0031.html @@ -0,0 +1,14 @@ + + +

number

+

Syntax

None.

+
+

Description

Maximum number of output lines specified by LIMIT. Which must be INT type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0034.html b/docs/dli/sqlreference/dli_08_0034.html new file mode 100644 index 00000000..f540e708 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0034.html @@ -0,0 +1,14 @@ + + +

partition_col_name

+

Syntax

None.

+
+

Description

Partition column name, that is, partition field name, which must be STRING type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0035.html b/docs/dli/sqlreference/dli_08_0035.html new file mode 100644 index 00000000..31ce7aaf --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0035.html @@ -0,0 +1,14 @@ + + +

partition_col_value

+

Syntax

None.

+
+

Description

Partition column value, that is, partition field value.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0036.html b/docs/dli/sqlreference/dli_08_0036.html new file mode 100644 index 00000000..a7dccddc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0036.html @@ -0,0 +1,14 @@ + + +

partition_specs

+

Syntax

partition_specs : (partition_col_name = partition_col_value, partition_col_name = partition_col_value, ...);

+
+

Description

Table partition list, which is expressed by using key=value pairs, in which key represents partition_col_name, and value represents partition_col_value. If there is more than one partition field, separate every two key=value pairs by using a comma (,).

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0037.html b/docs/dli/sqlreference/dli_08_0037.html new file mode 100644 index 00000000..50547b70 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0037.html @@ -0,0 +1,14 @@ + + +

property_name

+

Syntax

None.

+
+

Description

Property name, which must be STRING type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0038.html b/docs/dli/sqlreference/dli_08_0038.html new file mode 100644 index 00000000..47370ea4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0038.html @@ -0,0 +1,14 @@ + + +

property_value

+

Syntax

None.

+
+

Description

Property value, which must be STRING type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0039.html b/docs/dli/sqlreference/dli_08_0039.html new file mode 100644 index 00000000..b30f251f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0039.html @@ -0,0 +1,14 @@ + + +

regex_expression

+

Syntax

None.

+
+

Description

Pattern matching string, which supports wildcard matching.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0040.html b/docs/dli/sqlreference/dli_08_0040.html new file mode 100644 index 00000000..496d0a74 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0040.html @@ -0,0 +1,20 @@ + + + +

result_expression

+ +
+

Syntax

None.

+
+ +

Description

Returned result for the THEN clause of the CASE WHEN statement.

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0042.html b/docs/dli/sqlreference/dli_08_0042.html new file mode 100644 index 00000000..0f3a60f7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0042.html @@ -0,0 +1,14 @@ + + +

select_statement

+

Syntax

None.

+
+

Description

Query clause for the basic SELECT statement.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0043.html b/docs/dli/sqlreference/dli_08_0043.html new file mode 100644 index 00000000..aefee37f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0043.html @@ -0,0 +1,14 @@ + + +

separator

+

Syntax

None.

+
+

Description

Separator, which can be customized by users, for example, comma (,), semicolon (;), and colon (:). Which must be CHAR type.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0045.html b/docs/dli/sqlreference/dli_08_0045.html new file mode 100644 index 00000000..b912ab92 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0045.html @@ -0,0 +1,14 @@ + + +

sql_containing_cte_name

+

Syntax

None.

+
+

Description

SQL statement containing the common expression defined by cte_name.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0046.html b/docs/dli/sqlreference/dli_08_0046.html new file mode 100644 index 00000000..b3662c0d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0046.html @@ -0,0 +1,14 @@ + + +

sub_query

+

Syntax

None.

+
+

Description

Subquery.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0047.html b/docs/dli/sqlreference/dli_08_0047.html new file mode 100644 index 00000000..ec4e2306 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0047.html @@ -0,0 +1,14 @@ + + +

table_comment

+

Syntax

None.

+
+

Description

Table description, which must be STRING type and cannot exceed 256 bytes.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0048.html b/docs/dli/sqlreference/dli_08_0048.html new file mode 100644 index 00000000..4e5740a1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0048.html @@ -0,0 +1,14 @@ + + +

table_name

+

Syntax

None

+
+

Description

Table name, which cannot exceed 128 bytes. The string type and "$" symbol are supported.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0049.html b/docs/dli/sqlreference/dli_08_0049.html new file mode 100644 index 00000000..3ef85188 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0049.html @@ -0,0 +1,14 @@ + + +

table_properties

+

Syntax

None.

+
+

Description

Table property list, which is expressed by using key=value pairs. key represents property_name, and value represents property_value. If there is more than one key=value pair, separate every two key=value pairs by using a comma (,).

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0050.html b/docs/dli/sqlreference/dli_08_0050.html new file mode 100644 index 00000000..e1a88b24 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0050.html @@ -0,0 +1,14 @@ + + +

table_reference

+

Syntax

None.

+
+

Description

Table or view name, which must be STRING type. It can also be a subquery. If it is subquery, an alias must also be provided.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0053.html b/docs/dli/sqlreference/dli_08_0053.html new file mode 100644 index 00000000..c895e9df --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0053.html @@ -0,0 +1,20 @@ + + + +

when_expression

+ +
+

Syntax

None.

+
+ +

Description

When expression of the CASE WHEN statement. It is used for matching with the input expression.

+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0054.html b/docs/dli/sqlreference/dli_08_0054.html new file mode 100644 index 00000000..63cefcf4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0054.html @@ -0,0 +1,137 @@ + + + +

where_condition

+ +
+

Syntax

+
+ +

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Syntax

+

Description

+

where_condition

+

Judgment condition of where.

+

and

+

Logical operator: AND

+

or

+

Logical operator: OR

+

not

+

Logical operator: NOT

+

(

+

Start of the subjudgment condition.

+

)

+

End of the subjudgment condition.

+

condition

+

Judgment condition.

+

exists

+

Keyword used to determine whether a non-empty collection exists. If exists is followed by a subquery, then the subquery must contain a judgment condition.

+

in

+

Keyword used to determine whether attributes are in the same collection.

+

attrs_value_set_expr

+

Collection of attribute values.

+

attr_expr

+

Attribute expression.

+
+
+
+ +
+ +
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0055.html b/docs/dli/sqlreference/dli_08_0055.html new file mode 100644 index 00000000..e5e8d7d6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0055.html @@ -0,0 +1,14 @@ + + +

window_function

+

Syntax

None.

+
+

Description

Analysis window function. For details, see Window Functions.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0056.html b/docs/dli/sqlreference/dli_08_0056.html new file mode 100644 index 00000000..932e3b03 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0056.html @@ -0,0 +1,19 @@ + + +

Data Types

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0057.html b/docs/dli/sqlreference/dli_08_0057.html new file mode 100644 index 00000000..cbfa2709 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0057.html @@ -0,0 +1,12 @@ + + +

Overview

+

Data type is a basic attribute of data. It is used to distinguish different types of data. Different data types occupy different storage space and support different operations. Data is stored in data tables in the database. A data type is specified for each column of a data table. Therefore, data to be stored in a data table must comply with the attribute of the specific data type. Otherwise, errors may occur.

+

DLI only supports primitive data types.

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0058.html b/docs/dli/sqlreference/dli_08_0058.html new file mode 100644 index 00000000..276beb5b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0058.html @@ -0,0 +1,266 @@ + + +

Primitive Data Types

+

Table 1 lists the primitive data types supported by DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Primitive data types

Data Type

+

Description

+

Storage Space

+

Value Range

+

Support by OBS Table

+

Support by DLI Table

+

INT

+

Signed integer

+

4 bytes

+

–2147483648 to 2147483647

+

Yes

+

Yes

+

STRING

+

Character string

+

-

+

-

+

Yes

+

Yes

+

FLOAT

+

Single-precision floating point

+

4 bytes

+

-

+

Yes

+

Yes

+

DOUBLE

+

Double-precision floating-point

+

8 bytes

+

-

+

Yes

+

Yes

+

DECIMAL(precision,scale)

+

Decimal number. Data type of valid fixed places and decimal places, for example, 3.5.

+
  • precision: indicates the maximum number of digits that can be displayed.
  • scale: indicates the number of decimal places.
+

-

+

1<=precision<=38

+

0<=scale<=38

+

If precision and scale are not specified, DECIMAL (38,38) is used by default.

+

Yes

+

Yes

+

BOOLEAN

+

Boolean

+

1 byte

+

TRUE/FALSE

+

Yes

+

Yes

+

SMALLINT/SHORT

+

Signed integer

+

2 bytes

+

-32768~32767

+

Yes

+

Yes

+

TINYINT

+

Signed integer

+

1 byte

+

-128~127

+

Yes

+

No

+

BIGINT/LONG

+

Signed integer

+

8 bytes

+

–9223372036854775808 to 9223372036854775807

+

Yes

+

Yes

+

TIMESTAMP

+

Timestamp in raw data format, indicating the date and time Example: 1621434131222

+

-

+

-

+

Yes

+

Yes

+

CHAR

+

Fixed-length character string

+

-

+

-

+

Yes

+

Yes

+

VARCHAR

+

Variable-length character string

+

-

+

-

+

Yes

+

Yes

+

DATE

+

Date type in the format of yyyy-mm-dd, for example, 2014-05-29

+

-

+

DATE does not contain time information. Its value ranges from 0000-01-01 to 9999-12-31.

+

Yes

+

Yes

+
+
+
  • VARCHAR and CHAR data is stored in STRING type on DLI. Therefore, the string that exceeds the specified length will not be truncated.
  • FLOAT data is stored as DOUBLE data on DLI.
+
+

INT

Signed integer with a storage space of 4 bytes. Its value ranges from –2147483648 to 2147483647. If this field is NULL, value 0 is used by default.

+
+

STRING

Character string.

+
+

FLOAT

Single-precision floating point with a storage space of 4 bytes. If this field is NULL, value 0 is used by default.

+

Due to the limitation of storage methods of floating point data, do not use the formula a==b to check whether two floating point values are the same. You are advised to use the formula: absolute value of (a-b) <= EPSILON. EPSILON indicates the allowed error range which is usually 1.19209290E-07F. If the formula is satisfied, the compared two floating point values are considered the same.

+
+

DOUBLE

Double-precision floating point with a storage space of 8 bytes. If this field is NULL, value 0 is used by default.

+

Due to the limitation of storage methods of floating point data, do not use the formula a==b to check whether two floating point values are the same. You are advised to use the formula: absolute value of (a-b) <= EPSILON. EPSILON indicates the allowed error range which is usually 2.2204460492503131E-16. If the formula is satisfied, the compared two floating point values are considered the same.

+
+

DECIMAL

Decimal(p,s) indicates that the total digit length is p, including p – s integer digits and s fractional digits. p indicates the maximum number of decimal digits that can be stored, including the digits to both the left and right of the decimal point. The value of p ranges from 1 to 38. s indicates the maximum number of decimal digits that can be stored to the right of the decimal point. The fractional digits must be values ranging from 0 to p. The fractional digits can be specified only after significant digits are specified. Therefore, the following inequality is concluded: 0 ≤ sp. For example, decimal (10,6) indicates that the value contains 10 digits, in which there are four integer digits and six fractional digits.

+
+

BOOLEAN

Boolean, which can be TRUE or FALSE.

+
+

SMALLINT/SHORT

Signed integer with a storage space of 2 bytes. Its value ranges from –32768 to 32767. If this field is NULL, value 0 is used by default.

+
+

TINYINT

Signed integer with a storage space of 1 byte. Its value ranges from –128 to 127. If this field is NULL, value 0 is used by default.

+
+

BIGINT/LONG

Signed integer with a storage space of 8 bytes. Its value ranges from –9223372036854775808 to 9223372036854775807. It does not support scientific notation. If this field is NULL, value 0 is used by default.

+
+

TIMESTAMP

Legacy UNIX TIMESTAMP is supported, providing the precision up to the microsecond level. TIMESTAMP is defined by the difference between the specified time and UNIX epoch (UNIX epoch time: 1970-01-01 00:00:00) in seconds. Data of the STRING type supports implicit conversion to TIMESTAMP. (The STRING must in the yyyy-MM-dd HH:MM:SS[.ffffff] format. The precision after the decimal point is optional.)

+
+

CHAR

Character string with a fixed length. In DLI, the STRING type is used.

+
+

VARCHAR

VARCHAR is declared with a length that indicates the maximum number of characters in a string. During conversion from STRING to VARCHAR, if the number of characters in STRING exceeds the specified length, the excess characters of STRING are automatically trimmed. Similar to STRING, the spaces at the end of VARCHAR are meaningful and affect the comparison result. In DLI, the STRING type is used.

+
+

DATE

DATE supports only explicit conversion (cast) with DATE, TIMESTAMP, and STRING. For details, see Table 2.

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 2 cast function conversion

Explicit Conversion

+

Conversion Result

+

cast(date as date)

+

Same as value of DATE.

+

cast(timestamp as date)

+

The date (yyyy-mm-dd) is obtained from TIMESTAMP based on the local time zone and returned as the value of DATE.

+

cast(string as date)

+

If the STRING is in the yyyy-MM-dd format, the corresponding date (yyyy-mm-dd) is returned as the value of DATE. If the STRING is not in the yyyy-MM-dd format, NULL is returned.

+

cast(date as timestamp)

+

Timestamp that maps to the zero hour of the date (yyyy-mm-dd) specified by DATE is generated based on the local time zone and returned as the value of DATE.

+

cast(date as string)

+

A STRING in the yyyy-MM-dd format is generated based on the date (yyyy-mm-dd) specified by DATE and returned as the value of DATE.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0059.html b/docs/dli/sqlreference/dli_08_0059.html new file mode 100644 index 00000000..c7656b9e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0059.html @@ -0,0 +1,100 @@ + + +

Complex Data Types

+

Spark SQL supports complex data types, as shown in Table 1.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 Complex data types

Data Type

+

Description

+

Syntax

+

ARRAY

+

A set of ordered fields that construct an ARRAY with the specified values. The value can be of any type and the data type of all fields must be the same.

+

array(<value>,<value>[, ...])

+

For details, see Example of ARRAY.

+

MAP

+

A group of unordered key/value pairs used to generate a MAP. The key must be native data type, but the value can be either native data type or complex data type. The type of the same MAP key, as well as the MAP value, must be the same.

+

map(K <key1>, V <value1>, K <key2>, V <value2>[, ...])

+

For details, see Example of Map.

+

STRUCT

+

Indicates a group of named fields. The data types of the fields can be different.

+

struct(<value1>,<value2>[, ...])

+

For details, see Example of STRUCT.

+
+
+

Restrictions

  • When a table containing fields of the complex data type is created, the storage format of this table cannot be CSV (txt).
  • If a table contains fields of the complex data type, data in CSV (txt) files cannot be imported to the table.
  • When creating a table of the MAP data type, you must specify the schema and do not support the date, short, and timestamp data types.
  • For the OBS table in JSON format, the key type of the MAP supports only the STRING type.
  • The key of the MAP type cannot be NULL. Therefore, the MAP key does not support implicit conversion between inserted data formats where NULL values are allowed. For example, the STRING type cannot be converted to other native types, the FLOAT type cannot be converted to the TIMESTAMP type, and other native types cannot be converted to the DECIMAL type.
  • Values of the double or boolean data type cannot be included in the STRUCT data type does not support the.
+
+

Example of ARRAY

Create an array_test table, set id to ARRAY<INT>, and name to STRING. After the table is created, insert test data into array_test. The procedure is as follows:

+
  1. Create a table.

    CREATE TABLE array_test(name STRING, id ARRAY < INT >) USING PARQUET;

    +
  2. Run the following statements to insert test data:

    INSERT INTO array_test VALUES ('test',array(1,2,3,4));

    +

    INSERT INTO array_test VALUES ('test2',array(4,5,6,7))

    +

    INSERT INTO array_test VALUES ('test3',array(7,8,9,0));

    +
  3. Query the result.

    To query all data in the array_test table, run the following statement:

    +

    SELECT * FROM array_test;

    +
    test3	[7,8,9,0]
    +test2	[4,5,6,7]
    +test	[1,2,3,4]
    +

    To query the data of element 0 in the id array in the array_test table, run the following statement:

    +

    SELECT id[0] FROM array_test;

    +
    7
    +4
    +1
    +
+
+

Example of Map

Create the map_test table and set score to map<STRING,INT>. The key is of the STRING type and the value is of the INT type. After the table is created, insert test data to map_test. The procedure is as follows:

+
  1. Create a table.

    CREATE TABLE map_test(id STRING, score map<STRING,INT>) USING PARQUET;

    +
  2. Run the following statements to insert test data:

    INSERT INTO map_test VALUES ('test4',map('math',70,'chemistry',84));

    +

    INSERT INTO map_test VALUES ('test5',map('math',85,'chemistry',97));

    +

    INSERT INTO map_test VALUES ('test6',map('math',88,'chemistry',80));

    +
  3. Query the result.

    To query all data in the map_test table, run the following statement:

    +

    SELECT * FROM map_test;

    +
    test6	{"chemistry":80,"math":88}
    +test5	{"chemistry":97,"math":85}
    +test4	{"chemistry":84,"math":70}
    +

    To query the math score in the map_test table, run the following statement:

    +

    SELECT id, score['Math'] FROM map_test;

    +
    test6	88
    +test5	85
    +test4	70
    +
+
+

Example of STRUCT

Create a struct_test table and set info to the STRUCT<name:STRING, age:INT> data type (the field consists of name and age, where the type of name is STRING and age is INT). After the table is created, insert test data into the struct_test table. The procedure is as follows:

+
  1. Create a table.

    CREATE TABLE struct_test(id INT, info STRUCT<name:STRING,age:INT>) USING PARQUET;

    +
  2. Run the following statements to insert test data:

    INSERT INTO struct_test VALUES (8, struct('zhang',23));

    +

    INSERT INTO struct_test VALUES (9, struct('li',25));

    +

    INSERT INTO struct_test VALUES (10, struct('wang',26));

    +
  3. Query the result.

    To query all data in the struct_test table, run the following statement:

    +

    SELECT * FROM struct_test;

    +
    8	{"name":"zhang","age":23}
    +10	{"name":"wang","age":26}
    +9	{"name":"li","age":25}
    +

    Query name and age in the struct_test table.

    +

    SELECT id,info.name,info.age FROM struct_test;

    +
    8	zhang	23
    +10	wang	26
    +9	li	25
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0060.html b/docs/dli/sqlreference/dli_08_0060.html new file mode 100644 index 00000000..6d0c39d5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0060.html @@ -0,0 +1,15 @@ + + +

Operators

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0061.html b/docs/dli/sqlreference/dli_08_0061.html new file mode 100644 index 00000000..7ded999b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0061.html @@ -0,0 +1,144 @@ + + +

Relational Operators

+

All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.

+

Relationship operators are binary operators. Two compared data types must be of the same type or they must support implicit conversion.

+

Table 1 lists the relational operators provided by DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Relational operators

Operator

+

Result Type

+

Description

+

A = B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. This operator is used for value assignment.

+

A == B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. This operator cannot be used for value assignment.

+

A <=> B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A and B are NULL, then TRUE is returned. If A or B is NULL, then FALSE is returned.

+

A <> B

+

BOOLEAN

+

If A is not equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned. This operator follows the standard SQL syntax.

+

A != B

+

BOOLEAN

+

This operator is the same as the <> logical operator. It follows the SQL Server syntax.

+

A < B

+

BOOLEAN

+

If A is less than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A <= B

+

BOOLEAN

+

If A is less than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A > B

+

BOOLEAN

+

If A is greater than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A >= B

+

BOOLEAN

+

If A is greater than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A BETWEEN B AND C

+

BOOLEAN

+

If A is greater than or equal to B and less than or equal to C, then TRUE is returned. Otherwise, FALSE is returned. If A, B, or C is NULL, then NULL is returned.

+

A NOT BETWEEN B AND C

+

BOOLEAN

+

If A is less than B or greater than C, TRUE is returned; otherwise, FALSE is returned. If A, B, or C is NULL, then NULL is returned.

+

A IS NULL

+

BOOLEAN

+

If A is NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS NOT NULL

+

BOOLEAN

+

If A is not NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A LIKE B

+

BOOLEAN

+

If A matches B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A NOT LIKE B

+

BOOLEAN

+

If A does not match B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A RLIKE B

+

BOOLEAN

+

This operator is used for the LIKE operation of JAVA. If A or its substring matches B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A REGEXP B

+

BOOLEAN

+

The result is the same as A RLIKE B.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0062.html b/docs/dli/sqlreference/dli_08_0062.html new file mode 100644 index 00000000..6b7d9b88 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0062.html @@ -0,0 +1,86 @@ + + +

Arithmetic Operators

+

Arithmetic operators include binary operators and unary operators. For both types of operators, the returned results are numbers. Table 1 lists the arithmetic operators supported by DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Arithmetic operators

Operator

+

Result Type

+

Description

+

A + B

+

All numeric types

+

A plus B. The result type is associated with the operation data type. For example, if floating-point number is added to an integer, the result will be a floating-point number.

+

A–B

+

All numeric types

+

A minus B. The result type is associated with the operation data type.

+

A * B

+

All numeric types

+

Multiply A and B. The result type is associated with the operation data type.

+

A / B

+

All numeric types

+

Divide A by B. The result is a number of the double type (double-precision number).

+

A % B

+

All numeric types

+

A on the B Modulo. The result type is associated with the operation data type.

+

A & B

+

All numeric types

+

Check the value of the two parameters in binary expressions and perform the AND operation by bit. If the same bit of both expressions are 1, then the bit is set to 1. Otherwise, the bit is 0.

+

A | B

+

All numeric types

+

Check the value of the two parameters in binary expressions and perform the OR operation by bit. If one bit of either expression is 1, then the bit is set to 1. Otherwise, the bit is set to 0.

+

A ^ B

+

All numeric types

+

Check the value of the two parameters in binary expressions and perform the XOR operation by bit. Only when one bit of either expression is 1, the bit is 1. Otherwise, the bit is 0.

+

~A

+

All numeric types

+

Perform the NOT operation on one expression by bit.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0063.html b/docs/dli/sqlreference/dli_08_0063.html new file mode 100644 index 00000000..d26e3b3d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0063.html @@ -0,0 +1,80 @@ + + +

Logical Operators

+

Common logical operators include AND, OR, and NOT. The operation result can be TRUE, FALSE, or NULL (which means unknown). The priorities of the operators are as follows: NOT > AND > OR.

+

Table 1 lists the calculation rules, where A and B represent logical expressions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Logical operators

Operator

+

Result Type

+

Description

+

A AND B

+

BOOLEAN

+

If A and B are TRUE, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A OR B

+

BOOLEAN

+

If A or B is TRUE, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned. If one is TRUE and the other is NULL, then TRUE is returned.

+

NOT A

+

BOOLEAN

+

If A is FALSE, then TRUE is returned. If A is NULL, then NULL is returned. Otherwise, FALSE is returned.

+

! A

+

BOOLEAN

+

Same as NOT A.

+

A IN (val1, val2, ...)

+

BOOLEAN

+

If A is equal to any value in (val1, val2, ...), then TRUE is returned. Otherwise, FALSE is returned.

+

A NOT IN (val1, val2, ...)

+

BOOLEAN

+

If A is not equal to any value in (val1, val2, ...), then TRUE is returned. Otherwise, FALSE is returned.

+

EXISTS (subquery)

+

BOOLEAN

+

If the result of any subquery contains at least one line, then TRUE is returned. Otherwise, FALSE is returned.

+

NOT EXISTS (subquery)

+

BOOLEAN

+

If the subquery output does not contain any row, TRUE is returned; otherwise, FALSE is returned.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0064.html b/docs/dli/sqlreference/dli_08_0064.html new file mode 100644 index 00000000..c1426d18 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0064.html @@ -0,0 +1,23 @@ + + +

Built-in Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0065.html b/docs/dli/sqlreference/dli_08_0065.html new file mode 100644 index 00000000..66f7001f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0065.html @@ -0,0 +1,309 @@ + + +

Mathematical Functions

+

Table 1 lists the mathematical functions supported in DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Mathematical functions

Function

+

Return Type

+

Description

+

round(DOUBLE a)

+

DOUBLE

+

Round a.

+

round(DOUBLE a, INT d)

+

DOUBLE

+

Round a to d decimal places. Example: round(21.263,2) = 21.26.

+

bround(DOUBLE a)

+

DOUBLE

+

Round off a figure using the HALF_EVEN rounding mode.

+

If the figure to be rounded off ends in 5, the HALF_EVEN rounding mode is as follows:

+
  • Round up if the digit in the place preceding 5 is odd.
  • Round down if the digit in the place preceding 5 is even.
+

Example: bround(7.5) = 8.0, bround(6.5) = 6.0.

+

bround(DOUBLE a, INT d)

+

DOUBLE

+

Retain d decimal places and round the d+1 decimal place using the HALF_EVEN rounding mode.

+

If the figure to be rounded off ends in 5, it will be rounded off as follows:

+
  • Round up if the d decimal digit is odd.
  • Round down if the d decimal digit is even.
+

Example: bround(8.25, 1) = 8.2, bround(8.35, 1) = 8.4.

+

floor(DOUBLE a)

+

BIGINT

+

Return the largest integer that is less than or equal to a. Example: floor(21.2) = 21.

+

ceil(DOUBLE a), ceiling(DOUBLE a)

+

BIGINT

+

Return the smallest integer that is greater than or equal to a. Example: ceil(21.2) = 22.

+

rand(), rand(INT seed)

+

DOUBLE

+

Return a random number that is distributed uniformly from 0 through 1 (1 is exclusive). If the seed is specified, a stable random number sequence is displayed.

+

exp(DOUBLE a), exp(DECIMAL a)

+

DOUBLE

+

Return the value of e raised to the power of a.

+

ln(DOUBLE a), ln(DECIMAL a)

+

DOUBLE

+

Return the natural logarithm of the argument a.

+

log10(DOUBLE a), log10(DECIMAL a)

+

DOUBLE

+

Return the base 10 logarithm of the argument a.

+

log2(DOUBLE a), log2(DECIMAL a)

+

DOUBLE

+

Return the base 2 logarithm of the argument a.

+

log(DOUBLE base, DOUBLE a)

+

log(DECIMAL base, DECIMAL a)

+

DOUBLE

+

Return the base base logarithm of the argument a.

+

pow(DOUBLE a, DOUBLE p), power(DOUBLE a, DOUBLE p)

+

DOUBLE

+

Return the value of a raised to the power of p.

+

sqrt(DOUBLE a), sqrt(DECIMAL a)

+

DOUBLE

+

Return the square root of a.

+

bin(BIGINT a)

+

STRING

+

Return a number in binary format.

+

hex(BIGINT a) hex(STRING a)

+

STRING

+

Convert an integer or character to its hexadecimal representation.

+

conv(BIGINT num, INT from_base, INT to_base), conv(STRING num, INT from_base, INT to_base)

+

STRING

+

Convert a number from from_base to to_base. Example: Convert 5 from decimal to quaternary using conv(5,10,4) = 11.

+

abs(DOUBLE a)

+

DOUBLE

+

Return the absolute value.

+

pmod(INT a, INT b), pmod(DOUBLE a, DOUBLE b)

+

INT or DOUBLE

+

Return the positive value of the remainder after division of a by b.

+

sin(DOUBLE a), sin(DECIMAL a)

+

DOUBLE

+

Return the sine value of a.

+

asin(DOUBLE a), asin(DECIMAL a)

+

DOUBLE

+

Return the arc sine value of a.

+

cos(DOUBLE a), cos(DECIMAL a)

+

DOUBLE

+

Return the cosine value of a.

+

acos(DOUBLE a), acos(DECIMAL a)

+

DOUBLE

+

Return the arc cosine value of a.

+

tan(DOUBLE a), tan(DECIMAL a)

+

DOUBLE

+

Return the tangent value of a.

+

atan(DOUBLE a), atan(DECIMAL a)

+

DOUBLE

+

Return the arc tangent value of a.

+

degrees(DOUBLE a), degrees(DECIMAL a)

+

DOUBLE

+

Convert the value of a from radians to degrees.

+

radians(DOUBLE a), radians(DECIMAL a)

+

DOUBLE

+

Convert the value of a from degrees to radians.

+

positive(INT a), positive(DOUBLE a)

+

INT or DOUBLE

+

Return a. Example: positive(2) = 2.

+

negative(INT a), negative(DOUBLE a)

+

INT or DOUBLE

+

Return –a. Example: negative(2) = –2.

+

sign(DOUBLE a), sign(DECIMAL a)

+

DOUBLE or INT

+

Return the sign of a. 1.0 is returned if a is positive. –1.0 is returned if a is negative. Otherwise, 0.0 is returned.

+

e()

+

DOUBLE

+

Return the value of e.

+

pi()

+

DOUBLE

+

Return the value of pi.

+

factorial(INT a)

+

BIGINT

+

Return the factorial of a.

+

cbrt(DOUBLE a)

+

DOUBLE

+

Return the cube root of a.

+

shiftleft(TINYINT|SMALLINT|INT a, INT b)

+

shiftleft(BIGINT a, INT b)

+

INT

+

BIGINT

+

Bitwise signed left shift. Interpret a as a binary number and shift the binary number b positions to the left.

+

shiftright(TINYINT|SMALLINT|INT a, INT b)

+

shiftright(BIGINT a, INT b)

+

INT

+

BIGINT

+

Bitwise signed right shift. Interpret a as a binary number and shift the binary number b positions to the right.

+

shiftrightunsigned(TINYINT|SMALLINT|INT a, INT b),

+

shiftrightunsigned(BIGINT a, INT b)

+

INT

+

BIGINT

+

Bitwise unsigned right shift. Interpret a as a binary number and shift the binary number b positions to the right.

+

greatest(T v1, T v2, ...)

+

T

+

Return the maximum value of a list of values.

+

least(T v1, T v2, ...)

+

T

+

Return the minimum value of a list of values.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0066.html b/docs/dli/sqlreference/dli_08_0066.html new file mode 100644 index 00000000..9dccc808 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0066.html @@ -0,0 +1,207 @@ + + +

Date Functions

+

Table 1 lists the date functions supported in DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Date/time functions

Function

+

Return Type

+

Description

+

from_unixtime(bigint unixtime[, string format])

+

STRING

+

Convert a timestamp to the time format "yyyy-MM-dd HH:mm:ss" or "yyyyMMddHHmmss.uuuuuu".

+

For example, select FROM_UNIXTIME(1608135036,'yyyy-MM-dd HH:mm:ss').

+

unix_timestamp()

+

BIGINT

+

Return a Unix timestamp (the number of seconds that have elapsed since 1970-01-01 00:00:00) represented by an unsigned integer when the function is called without arguments.

+

unix_timestamp(string date)

+

BIGINT

+

Return the number of seconds between a specified date and 1970-01-01 00:00:00.

+

unix_timestamp(string date, string pattern)

+

BIGINT

+

Convert a time string with a given pattern to a Unix timestamp. Example: unix_timestamp("2009-03-20", "yyyy-MM-dd") = 1237532400.

+

to_date(string timestamp)

+

STRING

+

Return the date part of a time string. Example: to_date("1970-01-01 00:00:00") = "1970-01-01".

+

year(string date)

+

INT

+

Return the year part of a date.

+

quarter(string date/timestamp/string)

+

INT

+

Return the quarter of the year for a date, timestamp, or string. Example: quarter('2015-04-01')=2.

+

month(string date)

+

INT

+

Return the month (from 1 to 12) part of a date.

+

day(string date) dayofmonth(string date)

+

INT

+

Return the day part of a date.

+

hour(string date)

+

INT

+

Return the hour (from 0 to 23) part of a date.

+

minute(string date)

+

INT

+

Return the minute (from 0 to 59) part of a date.

+

second(string date)

+

INT

+

Return the second (from 0 to 59) part of a date.

+

weekofyear(string date)

+

INT

+

Return the week number (from 0 to 53) of a date.

+

datediff(string enddate, string startdate)

+

INT

+

Return the number of days from startdate to enddate.

+

date_add(string startdate, int days)

+

STRING

+

Add a number of days to a date.

+

date_sub(string startdate, int days)

+

STRING

+

Subtract a number of days from a date.

+

from_utc_timestamp(string timestamp, string timezone)

+

TIMESTAMP

+

Convert a UTC timestamp to a timestamp in a given time zone. For example, from_utc_timestamp('1970-01-01 08:00:00','PST') returns 1970-01-01 00:00:00.

+

to_utc_timestamp(string timestamp, string timezone)

+

TIMESTAMP

+

Convert a timestamp in a given time zone to a UTC timestamp. For example, to_utc_timestamp('1970-01-01 00:00:00','PST') returns 1970-01-01 08:00:00.

+

current_date()

+

DATE

+

Return the current date, for example, 2016-07-04.

+

current_timestamp()

+

TIMESTAMP

+

Return the current time, for example, 2016-07-04 11:18:11.685.

+

add_months(string start_date, int num_months)

+

STRING

+

Return the date that is num_months after start_date.

+

last_day(string date)

+

STRING

+

Return the last day of the month to which a date belongs. The returned date is in the format of yyyy-MM-dd, for example, 2015-08-31.

+

next_day(string start_date, string day_of_week)

+

STRING

+

Return the first date that is later than start_date and nearest to day_of_week. The returned date in the format of yyyy-MM-dd. day_of_week specifies a day of a week. For example, the value of day_of_week can be Monday or FRIDAY.

+

trunc(string date, string format)

+

STRING

+

Reset the date in a specified format. Supported formats are MONTH/MON/MM and YEAR/YYYY/YY. Example: trunc('2015-03-17', 'MM') = 2015-03-01.

+

months_between(string date1, string date2)

+

DOUBLE

+

Return number of months between dates date1 and date2.

+

date_format(date/timestamp/string ts, string fmt)

+

STRING

+

Return the formatted value of date/timestamp/string. The Java SimpleDateFormat format is supported. Example: date_format('2015-04-08', 'y') = '2015'.

+

In the format, y indicates the year. Y indicates the year when the current week is located. A week starts from Sunday and ends on Saturday. If a week crosses years, this week is counted as the next year.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0067.html b/docs/dli/sqlreference/dli_08_0067.html new file mode 100644 index 00000000..8199d1bb --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0067.html @@ -0,0 +1,235 @@ + + +

String Functions

+

Table 1 lists the string functions supported by DLI.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 String functions

Function

+

Return Type

+

Description

+

ascii(string str)

+

INT

+

Returns the numeric value of the first character in a string.

+

concat(string A, string B...)

+

STRING

+

Return a string resulting from concatenating the input strings. This function can take any number of input strings.

+

concat_ws(string SEP, string A, string B...)

+

STRING

+

Return a string resulting from concatenating the input strings, which are separated by specified separators.

+

encode(string src, string charset)

+

BINARY

+

Encode src in the encoding mode specified by charset.

+

find_in_set(string str, string strList)

+

INT

+

Return the position of the first occurrence of str in strList. If the value of any parameter is NULL, NULL is returned. If the first parameter contains a comma (,), 0 is returned.

+

get_json_object(string json_string, string path)

+

STRING

+

Parse the JSON object in a specified JSON path. The function will return NULL if the JSON object is invalid.

+

instr(string str, string substr)

+

INT

+

Return the position of the first occurrence of substr in str. Return NULL if NULL is contained in the parameters and return 0 if substr does not exist in str. Note that the subscripts start from 1.

+

length(string A)

+

INT

+

Return the length of a string.

+

locate(string substr, string str[, int pos])

+

INT

+

Return the position of the first occurrence of substr in str after position pos (starting from 1).

+

lower(string A) lcase(string A)

+

STRING

+

Convert all characters of a string to lower case.

+

lpad(string str, int len, string pad)

+

STRING

+

Return a string of a specified length. If the length of the given string (str) is shorter than the specified length (len), the given string is left-padded with pad to the specified length.

+

ltrim(string A)

+

STRING

+

Trim spaces from the left hand side of a string.

+

parse_url(string urlString, string partToExtract [, string keyToExtract])

+

STRING

+

Return the specified part of the specified URL. Valid values of partToExtract include HOST, PATH, QUERY, REF, PROTOCOL, AUTHORITY, FILE, and USERINFO.

+

For example, parse_url ('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1 ',' HOST ') returns 'facebook.com'.

+

When the second parameter is QUERY, the third parameter can be used to extract the value of a specific parameter. For example, parse_url('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1', 'QUERY', 'k1') returns 'v1'.

+

printf(String format, Obj... args)

+

STRING

+

Print the input according to a specified format.

+

regexp_extract(string subject, string pattern, int index)

+

STRING

+

Extract the string specified by the regular expression. regexp_extract ('foothebar ',' foo (.*?) (bar) '2) returns 'bar.'

+

regexp_replace(string A, string B, string C)

+

STRING

+

Replace character B in string A with character C.

+

repeat(string str, int n)

+

STRING

+

Repeat a string N times.

+

reverse(string A)

+

STRING

+

Return the reversed string.

+

rpad(string str, int len, string pad)

+

STRING

+

Return a string of a specified length. If the length of the given string (str) is shorter than the specified length (len), the given string is right-padded with pad to the specified length.

+

rtrim(string A)

+

STRING

+

Trim spaces from the right hand side of a string.

+

space(int n)

+

STRING

+

Returns a specified number of spaces.

+

substr(string A, int start) substring(string A, int start)

+

STRING

+

Return the substring starting from the specified start position in string A till the end of the string.

+

substr(string A, int start, int len) substring(string A, int start, int len)

+

STRING

+

Return the substring of a specified length starting from the specified start position in A string.

+

substring_index(string A, string delim, int count)

+

STRING

+

Return the substring from string A before count occurrences of the delimiter delim.

+

translate(string|char|varchar input, string|char|varchar from, string|char|varchar to)

+

STRING

+

Translate the input string by replacing the characters or string specified by from with the characters or string specified by to. For example, replace bcd in abcde with BCD using translate ("abcde", "bcd", "BCD").

+

trim(string A)

+

STRING

+

Trim spaces from both ends of a string.

+

upper(string A) ucase(string A)

+

STRING

+

Convert all characters of a string to upper case.

+

initcap(string A)

+

STRING

+

Convert the first letter of each word of a string to upper case and all other letters to lower case.

+

levenshtein(string A, string B)

+

INT

+

Return the Levenshtein distance between two strings. Example: levenshtein ('kitten ',' sitting ') = 3.

+

soundex(string A)

+

STRING

+

Return the soundex string from str. Example: soundex ('Miller ') = M460.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0068.html b/docs/dli/sqlreference/dli_08_0068.html new file mode 100644 index 00000000..55c28034 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0068.html @@ -0,0 +1,123 @@ + + +

Aggregate Functions

+

An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved by an SQL statement. Table 1 lists aggregate functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Aggregate functions

Function

+

Return Type

+

Description

+

count(*), count(expr), count(DISTINCT expr[, expr...])

+

BIGINT

+

Return the total number of retrieved records.

+

sum(col), sum(DISTINCT col)

+

DOUBLE

+

Return the sum of the values in a column.

+

avg(col), avg(DISTINCT col)

+

DOUBLE

+

Return the average of the values in a column.

+

min(col)

+

DOUBLE

+

Return the minimum value of a column.

+

max(col)

+

DOUBLE

+

Return the maximum value of a column.

+

variance(col), var_pop(col)

+

DOUBLE

+

Return the variance of a numeric column.

+

var_samp(col)

+

DOUBLE

+

Return the sample variance of a numeric column.

+

stddev_pop(col)

+

DOUBLE

+

Return the deviation of a numeric column.

+

stddev_samp(col)

+

DOUBLE

+

Return the sample deviation of a numeric column.

+

covar_pop(col1, col2)

+

DOUBLE

+

Return the covariance of a pair of numeric columns.

+

covar_samp(col1, col2)

+

DOUBLE

+

Return the sample covariance of a pair of numeric columns.

+

corr(col1, col2)

+

DOUBLE

+

Return the coefficient of correlation of a pair of numeric columns.

+

percentile(BIGINT col, p)

+

DOUBLE

+

Return the exact pth percentile of a column. p must be between 0 and 1. Otherwise, this function returns null. This function does not work with floating point types.

+

percentile_approx(DOUBLE col, p [, B])

+

DOUBLE

+

Return an approximate pth percentile of a numeric column (including floating point types) in a group. p must be between 0 and 1. B controls approximation accuracy. Higher values of B mean better approximations, and the default value is 10,000. When the number of distinct values in the numeric column is smaller than B, an exact percentile value is returned.

+
+
+

Functions such as var_pop, stddev_pop, var_samp, stddev_samp, covar_pop, covar_samp, corr, and percentile_approx, do not support non-numeral data types, such as TimeStamp.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0069.html b/docs/dli/sqlreference/dli_08_0069.html new file mode 100644 index 00000000..f90a555e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0069.html @@ -0,0 +1,79 @@ + + +

Window Functions

+

A window function performs a calculation operation on a set of values related to the current value. A window function can be an aggregate function used in the GROUP BY clause, such as sum, max, min, count, and avg functions. The window functions also include the functions listed in Table 1. A window contains multiple rows defined by an OVER clause. A window function works on one window.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Functions

Function

+

Return Type

+

Description

+

first_value(col)

+

Data type of the argument.

+

Return the value of the first data record from a column.

+

last_value(col)

+

Data type of the argument.

+

Return the value of the last data record from a column.

+

lag (col,n,DEFAULT)

+

Data type of the argument.

+

Return the value from the nth row preceding the current row. The first argument specifies the column name. The second argument specifies the nth row preceding the current row. The configuration of the second argument is optional, and the default argument value is 1 if the argument is not specified. The third argument is set to a default value. If the nth row preceding the current row is null, the default value is used. The default value of the third argument is NULL if the argument is not specified.

+

lead (col,n,DEFAULT)

+

Data type of the argument.

+

Return the value from the nth row following the current row. The first argument specifies the column name. The second argument specifies the nth row following the current row. The configuration of the second argument is optional, and the default argument value is 1 if the argument is not specified. The third argument is set to a default value. If the nth row following the current row is null, the default value is used. The default value of the third argument is NULL if the argument is not specified.

+

row_number() over (order by col_1[,col_2 ...])

+

INT

+

Assign a unique number to each row.

+

rank()

+

INT

+

Return the rank of a value in a set of values. When multiple values share the same rank, the next rank in the sequence is not consecutive.

+

cume_dist()

+

DOUBLE

+

Calculate the relative position of a value in a row.

+

percent_rank()

+

DOUBLE

+

Return the rank of a value from the column specified by the ORDER BY clause of the window. The return value is a decimal between 0 and 1, which is calculated using (RANK - 1)/(- 1).

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0070.html b/docs/dli/sqlreference/dli_08_0070.html new file mode 100644 index 00000000..a5e5518e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0070.html @@ -0,0 +1,21 @@ + + +

Databases

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0071.html b/docs/dli/sqlreference/dli_08_0071.html new file mode 100644 index 00000000..827c5786 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0071.html @@ -0,0 +1,64 @@ + + +

Creating a Database

+

Function

This statement is used to create a database.

+
+

Syntax

1
+2
+3
CREATE [DATABASE | SCHEMA] [IF NOT EXISTS] db_name
+  [COMMENT db_comment]
+  [WITH DBPROPERTIES (property_name=property_value, ...)];
+
+ +
+
+

Keyword

  • IF NOT EXISTS: Prevents system errors if the database to be created exists.
  • COMMENT: Describes a database.
+
  • DBPROPERTIES: Specifies database attributes. The attribute name and attribute value appear in pairs.
+
+

Parameters

+
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

db_comment

+

Database description

+

property_name

+

Database property name

+

property_value

+

Database property value

+
+
+
+

Precautions

  • DATABASE and SCHEMA can be used interchangeably. You are advised to use DATABASE.
  • The default database is a built-in database. You cannot create a database named default.
+
+

Example

  1. Create a queue. A queue is the basis for using DLI. Before executing SQL statements, you need to create a queue.
  2. On the DLI management console, click SQL Editor in the navigation pane on the left. The SQL Editor page is displayed.
  3. In the editing window on the right of the SQL Editor page, enter the following SQL statement for creating a database and click Execute. Read and agree to the privacy agreement, and click OK.

    If database testdb does not exist, run the following statement to create database testdb:

    +
    1
    CREATE DATABASE IF NOT EXISTS testdb;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0072.html b/docs/dli/sqlreference/dli_08_0072.html new file mode 100644 index 00000000..8d7c3d1d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0072.html @@ -0,0 +1,43 @@ + + +

Deleting a Database

+

Function

This statement is used to delete a database.

+
+

Syntax

1
DROP [DATABASE | SCHEMA] [IF EXISTS] db_name [RESTRICT|CASCADE];
+
+ +
+
+

Keyword

IF EXISTS: Prevents system errors if the database to be deleted does not exist.

+
+

Precautions

  • DATABASE and SCHEMA can be used interchangeably. You are advised to use DATABASE.
  • RESTRICT: If the database is not empty (tables exist), an error is reported and the DROP operation fails. RESTRICT is the default logic.
  • CASCADE: Even if the database is not empty (tables exist), the DROP will delete all the tables in the database. Therefore, exercise caution when using this function.
+
+

Parameters

+
+ + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+
+
+
+

Example

  1. Create a database, for example, testdb, by referring to Example.
  2. Run the following statement to delete database testdb if it exists:
    1
    DROP DATABASE IF EXISTS testdb;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0073.html b/docs/dli/sqlreference/dli_08_0073.html new file mode 100644 index 00000000..0f6c10d6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0073.html @@ -0,0 +1,43 @@ + + +

Viewing a Specified Database

+

Function

This syntax is used to view the information about a specified database, including the database name and database description.

+
+

Syntax

1
DESCRIBE DATABASE [EXTENDED] db_name;
+
+ +
+
+

Keyword

EXTENDED: Displays the database properties.

+
+

Parameters

+
+ + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+
+
+
+

Precautions

If the database to be viewed does not exist, the system reports an error.

+
+

Example

  1. Create a database, for example, testdb, by referring to Example.
  2. Run the following statement to query information about the testdb database:
    1
    DESCRIBE DATABASE testdb;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0074.html b/docs/dli/sqlreference/dli_08_0074.html new file mode 100644 index 00000000..23567df1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0074.html @@ -0,0 +1,48 @@ + + +

Viewing All Databases

+

Function

This syntax is used to query all current databases.

+
+

Syntax

1
SHOW [DATABASES | SCHEMAS] [LIKE regex_expression];
+
+ +
+
+

Keyword

None

+
+

Parameters

+
+ + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

regex_expression

+

Database name

+
+
+
+

Precautions

Keyword DATABASES is equivalent to SCHEMAS. You can use either of them in this statement.

+
+

Example

View all the current databases.

+
1
SHOW DATABASES;
+
+ +
+

View all databases whose names start with test.

+
1
SHOW DATABASES LIKE "test.*";
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0075.html b/docs/dli/sqlreference/dli_08_0075.html new file mode 100644 index 00000000..3ce7aef8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0075.html @@ -0,0 +1,121 @@ + + +

SQL Syntax Constraints and Definitions

+

Syntax Constraints

  • Currently, Flink SQL only supports the following operations: SELECT, FROM, WHERE, UNION, aggregation, window, JOIN between stream and table data, and JOIN between streams.
  • Data cannot be inserted into the source stream.
  • The sink stream cannot be used to perform query operations.
+
+

Data Types Supported by Syntax

  • Basic data types: VARCHAR, STRING, BOOLEAN, TINYINT, SMALLINT, INTEGER/INT, BIGINT, REAL/FLOAT, DOUBLE, DECIMAL, DATE, TIME, and TIMESTAMP
  • Array: Square brackets ([]) are used to quote fields. The following is an example:
    1
    insert into temp select CARDINALITY(ARRAY[1,2,3]) FROM OrderA;
    +
    + +
    +
+
+

Syntax Definition

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
INSERT INTO stream_name query;
+query:
+  values
+  | {
+      select
+      | selectWithoutFrom
+      | query UNION [ ALL ] query
+    }
+
+orderItem:
+  expression [ ASC | DESC ]
+
+select:
+  SELECT 
+  { * | projectItem [, projectItem ]* }
+  FROM tableExpression [ JOIN tableExpression ]
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+selectWithoutFrom:
+  SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+
+projectItem:
+  expression [ [ AS ] columnAlias ]
+  | tableAlias . *
+
+tableExpression:
+  tableReference 
+
+tableReference:
+  tablePrimary
+  [ [ AS ] alias [ '(' columnAlias [, columnAlias ]* ')' ] ]
+
+tablePrimary:
+  [ TABLE ] [ [ catalogName . ] schemaName . ] tableName
+  | LATERAL TABLE '(' functionName '(' expression [, expression ]* ')' ')'
+  | UNNEST '(' expression ')'
+
+values:
+  VALUES expression [, expression ]*
+
+groupItem:
+  expression
+  | '(' ')'
+  | '(' expression [, expression ]* ')'
+  | CUBE '(' expression [, expression ]* ')'
+  | ROLLUP '(' expression [, expression ]* ')'
+  | GROUPING SETS '(' groupItem [, groupItem ]* ')'
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0076.html b/docs/dli/sqlreference/dli_08_0076.html new file mode 100644 index 00000000..bfb5a9f8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0076.html @@ -0,0 +1,258 @@ + + +

Creating an OBS Table Using the DataSource Syntax

+

Function

Create an OBS table using the DataSource syntax.

+

The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of supported partitions. For details, see syntax and precautions.

+
+

Usage

  • The size of the table will not be calculated during table creation.
  • When data is added, the table size will be changed to 0.
  • You can view the table size on OBS.
+
+

Precautions

  • The table and column names are case-insensitive.
  • Descriptions of table names and column names support only string constants.
  • During table creation, you need to specify the column name and corresponding data type. The data type is primitive type.
  • If a folder and a file have the same name in the OBS directory, the file is preferred as the path when creating an OBS table.
  • During table creation, if the specified path is an OBS directory and it contains subdirectories (or nested subdirectories), all file types and content in the subdirectories are considered table content.

    Ensure that all file types in the specified directory and its subdirectories are consistent with the storage format specified in the table creation statement. All file content must be consistent with the fields in the table. Otherwise, errors will be reported in the query.

    +

    You can set multiLevelDirEnable to true in the OPTIONS statement to query the content in the subdirectory. The default value is false (Note that this configuration item is a table attribute, exercise caution when performing this operation). Hive tables do not support this configuration item.

    +
  • The OBS storage path must be a directory on the OBS. The directory must be created in advance and be empty.
  • When a partitioned table is created, the column specified in PARTITIONED BY must be a column in the table, and the partition type must be specified. The partition column supports only the string, boolean, tinyint, smallint, short, int, bigint, long, decimal, float, double, date, and timestamp type.
  • When a partitioned table is created, the partition field must be the last one or several fields of the table field, and the sequence of the partition fields must be the same. Otherwise, an error occurs.
  • A maximum of 7,000 partitions can be created in a single table.
  • The CREATE TABLE AS statement cannot specify table attributes or create partitioned tables.
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE TABLE [IF NOT EXISTS] [db_name.]table_name 
+  [(col_name1 col_type1 [COMMENT col_comment1], ...)]
+  USING file_format 
+  [OPTIONS (path 'obs_path', key1=val1, key2=val2, ...)] 
+  [PARTITIONED BY (col_name1, col_name2, ...)]
+  [COMMENT table_comment]
+  [AS select_statement];
+
+ +
+
+

Keyword

  • IF NOT EXISTS: Prevents system errors when the created table exists.
  • USING: Specifies the storage format.
  • OPTIONS: Specifies the attribute name and attribute value when a table is created.
  • COMMENT: Field or table description.
  • PARTITIONED BY: Partition field.
  • AS: Run the CREATE TABLE AS statement to create a table.
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name

+

The value can contain letters, numbers, and underscores (_), but cannot contain only numbers or start with a number or underscore (_).

+

table_name

+

Name of the table to be created in the database

+

The value can contain letters, numbers, and underscores (_), but cannot contain only numbers or start with a number or underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$.

+

Special characters must be enclosed in single quotation marks ('').

+

col_name

+

Column names with data types separated by commas (,)

+

The column name contains letters, digits, and underscores (_). It cannot contain only digits and must contain at least one letter.

+

col_type

+

Data type of a column field

+

col_comment

+

Column field description

+

file_format

+

Input format of the table. The value can be orc, parquet, json, csv, or avro.

+

path

+

OBS storage path where data files are stored

+

Format: obs://bucketName/tblPath

+

bucketName: bucket name

+

tblPath: directory name. You do not need to specify the file name following the directory.

+

For details about attribute names and values during table creation, see Table 2.

+

For details about the table attribute names and values when file_format is set to csv, see Table 2 and Table 3.

+

table_comment

+

Description of the table

+

select_statement

+

The CREATE TABLE AS statement is used to insert the SELECT query result of the source table or a data record to a new table in OBS bucket.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 OPTIONS parameter description

Parameter

+

Description

+

Default Value

+

path

+

Specified table storage location. Currently, only OBS is supported.

+

-

+

multiLevelDirEnable

+

Whether to iteratively query data in subdirectories when subdirectories are nested. When this parameter is set to true, all files in the table path, including files in subdirectories, are iteratively read when a table is queried.

+

false

+

dataDelegated

+

Whether to clear data in the path when deleting a table or partition

+

false

+

compression

+

Specified compression format. Generally, you need to set this parameter to zstd for parquet files.

+

-

+
+
+
When the file format is set to CSV, you can set the following OPTIONS parameters: +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 OPTIONS parameter description of the CSV data format

Parameter

+

Description

+

Default Value

+

delimiter

+

Data separator

+

Comma (,)

+

quote

+

Quotation character

+

Double quotation marks (" ")

+

escape

+

Escape character

+

Backslash (\)

+

multiLine

+

Whether the column data contains carriage return characters or transfer characters. The value true indicates yes and the value false indicates no.

+

false

+

dateFormat

+

Date format of the date field in a CSV file

+

yyyy-MM-dd

+

timestampFormat

+

Date format of the timestamp field in a CSV file

+

yyyy-MM-dd HH:mm:ss

+

mode

+

Mode for parsing CSV files. The options are as follows:

+
  • PERMISSIVE: Permissive mode. If an incorrect field is encountered, set the line to Null.
  • DROPMALFORMED: When an incorrect field is encountered, the entire line is discarded.
  • FAILFAST: Error mode. If an error occurs, it is automatically reported.
+

PERMISSIVE

+

header

+

Whether CSV contains header information. The value true indicates that the table header information is contained, and the value false indicates that the information is not included.

+

false

+

nullValue

+

Character that represents the null value. For example, nullValue= "\\N" indicates that \N represents the null value.

+

-

+

comment

+

Character that indicates the beginning of the comment. For example, comment= '#' indicates that the line starting with # is a comment.

+

-

+

compression

+

Data compression format. Currently, gzip, bzip2, and deflate are supported. If you do not want to compress data, enter none.

+

none

+

encoding

+

Data encoding format. Available values are utf-8, gb2312, and gbk. Value utf-8 will be used if this parameter is left empty.

+

utf-8

+
+
+
+

Example

  • Create a parquetTable OBS table.
    1
    CREATE TABLE parquetTable (name string, id int) USING parquet OPTIONS (path "obs://bucketName/filePath");
    +
    + +
    +
  • Create a parquetZstdTable OBS table and set the compression format to zstd.
    CREATE TABLE parquetZstdTable (name string, id string) USING parquet OPTIONS (path "obs://bucketName/filePath",compression='zstd');
    +
  • Create a student table that has two fields name and scoreand partition the table by classNo.
    1
    CREATE TABLE IF NOT EXISTS student(name STRING, score DOUBLE, classNo INT) USING csv OPTIONS (PATH 'obs://bucketName/filePath') PARTITIONED BY (classNo);
    +
    + +
    +

    The classNo field is a partition field and must be placed at the end of the table field, that is, student(name STRING, score DOUBLE, classNo INT).

    +
    +
  • To create table t1 and insert data of table t2 into table t1, run the following statement:
    1
    CREATE TABLE t1 USING parquet OPTIONS(path 'obs://bucketName/tblPath') AS select * from t2;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0077.html b/docs/dli/sqlreference/dli_08_0077.html new file mode 100644 index 00000000..a9cf61da --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0077.html @@ -0,0 +1,175 @@ + + +

Creating an OBS Table Using the Hive Syntax

+

Function

This statement is used to create an OBS table using the Hive syntax. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of supported partitions. For details, see syntax and precautions.

+
+

Usage

  • The size of the table will be calculated during creation.
  • When data is added, the table size will not be changed.
  • You can view the table size on OBS.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name 
+  [(col_name1 col_type1 [COMMENT col_comment1], ...)]
+  [COMMENT table_comment] 
+  [PARTITIONED BY (col_name2 col_type2, [COMMENT col_comment2], ...)] 
+  [ROW FORMAT row_format]
+  [STORED AS file_format] 
+  LOCATION 'obs_path'
+  [TBLPROPERTIES (key = value)]
+  [AS select_statement];
+
+row_format:
+  : SERDE serde_cls [WITH SERDEPROPERTIES (key1=val1, key2=val2, ...)]
+  | DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]]
+      [COLLECTION ITEMS TERMINATED BY char]
+      [MAP KEYS TERMINATED BY char]
+      [LINES TERMINATED BY char]
+      [NULL DEFINED AS char]
+
+ +
+
+

Keyword

  • EXTERNAL: Creates an OBS table.
  • IF NOT EXISTS: Prevents system errors when the created table exists.
  • COMMENT: Field or table description.
  • PARTITIONED BY: Partition field.
  • ROW FORMAT: Row data format.
  • STORED AS: Specifies the format of the file to be stored. Currently, only the TEXTFILE, AVRO, ORC, SEQUENCEFILE, RCFILE, and PARQUET format are supported.
  • LOCATION: Specifies the path of OBS. This keyword is mandatory when you create OBS tables.
  • TBLPROPERTIES: Allows you to add the key/value properties to a table.

    For example, you can use this statement to enable the multiversion function to back up and restore table data. After the multiversion function is enabled, the system automatically backs up table data when you delete or modify the data using insert overwrite or truncate, and retains the data for a certain period. You can quickly restore data within the retention period. For details about SQL syntax related to the multiversion function, see Enabling or Disabling Multiversion Backup and Backing Up and Restoring Data of Multiple Versions.

    +

    When creating an OBS table, you can use TBLPROPERTIES ("dli.multi.version.enable"="true") to enable multiversion. For details, see the following example.

    + +
    + + + + + + + + + + + + + + + + +
    Table 1 TBLPROPERTIES parameters

    Key

    +

    Value

    +

    dli.multi.version.enable

    +
    • true: Enable the multiversion backup function.
    • false: Disable the multiversion backup function.
    +

    comment

    +

    Description of the table

    +

    orc.compress

    +

    An attribute of the ORC table, which specifies the compression mode of the ORC storage. Available values are as follows:

    +
    • ZLIB
    • SNAPPY
    • NONE
    +

    auto.purge

    +

    If this parameter is set to true, the deleted or overwritten data is removed and will not be dumped to the recycle bin.

    +
    +
    +
  • AS: You can run the CREATE TABLE AS statement to create a table.
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

col_name

+

Field name

+

col_type

+

Field type

+

col_comment

+

Field description

+

row_format

+

Line data format

+

file_format

+

OBS table storage format. TEXTFILE, AVRO, ORC, SEQUENCEFILE, RCFILE, and PARQUET are supported.

+

table_comment

+

Table description

+

obs_path

+

OBS path

+

key = value

+

Set table properties and values.

+

For example, if you want to enable multiversion, you can set "dli.multi.version.enable"="true".

+

select_statement

+

The CREATE TABLE AS statement is used to insert the SELECT query result of the source table or a data record to a new table in OBS bucket.

+
+
+
+

Precautions

  • The table and column names are case-insensitive.
  • Descriptions of table names and column names support only string constants.
  • During table creation, you need to specify the column name and corresponding data type. The data type is primitive type.
  • If a folder and a file have the same name in the OBS directory, the file is preferred as the path when creating an OBS table.
  • When you create a partitioned table, ensure that the specified column in PARTITIONED BY is not a column in the table and the data type is specified. The partition column supports only the open-source Hive table types including string, boolean, tinyint, smallint, short, int, bigint, long, decimal, float, double, date, and timestamp.
  • Multiple partition fields can be specified. The partition fields need to be specified after the PARTITIONED BY keyword, instead of the table name. Otherwise, an error occurs.
  • A maximum of 100,000 partitions can be created in a single table.
  • The CREATE TABLE AS statement cannot specify table attributes or create partitioned tables.
+
+

Example

  • To create a Parquet table named student, in which the id, name, and score fields are contained and the data types of the respective fields are INT, STRING, and FLOAT, run the following statement:
    1
    CREATE TABLE student (id INT, name STRING, score FLOAT) STORED AS PARQUET LOCATION 'obs://bucketName/filePath';
    +
    + +
    +
  • To create a table named student, for which classNo is the partition field and two fields name and score are specified, run the following statement:
    1
    CREATE TABLE IF NOT EXISTS student(name STRING, score DOUBLE) PARTITIONED BY (classNo INT) STORED AS PARQUET LOCATION 'obs://bucketName/filePath';
    +
    + +
    +

    classNo is a partition field and must be specified after the PARTITIONED BY keyword, that is, PARTITIONED BY (classNo INT). It cannot be specified after the table name as a table field.

    +
    +
  • To create table t1 and insert data of table t2 into table t1 by using the Hive syntax, run the following statement:
    1
    CREATE TABLE t1 STORED AS parquet LOCATION 'obs://bucketName/filePath' as select * from t2;
    +
    + +
    +
  • Create the student table and enable multiversion by using the Hive syntax.
    1
    CREATE TABLE student (id INT, name STRING, score FLOAT) STORED AS PARQUET LOCATION 'obs://bucketName/filePath' TBLPROPERTIES ("dli.multi.version.enable"="true");
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0079.html b/docs/dli/sqlreference/dli_08_0079.html new file mode 100644 index 00000000..7909e5d0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0079.html @@ -0,0 +1,57 @@ + + +

Updating Partitioned Table Data (Only OBS Tables Supported)

+

Function

This statement is used to update the partition information about a table in the Metastore.

+
+

Syntax

1
MSCK REPAIR TABLE table_name;
+
+ +
+

Or

+
ALTER TABLE table_name RECOVER PARTITIONS;
+
+

Keyword

  • PARTITIONS: partition information
  • SERDEPROPERTIES: Serde attribute
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

partition_specs

+

Partition fields

+

obs_path

+

OBS path

+
+
+
+

Precautions

  • This statement is applied only to partitioned tables. After you manually add partition directories to OBS, run this statement to update the newly added partition information in the metastore. The SHOW PARTITIONS table_name statement can be used to query the newly-added partitions.
  • The partition directory name must be in the specified format, that is, tablepath/partition_column_name=partition_column_value.
+
+

Example

Run the following statements to update the partition information about table ptable in the Metastore:

+
1
MSCK REPAIR TABLE ptable;
+
+ +
+

Or

+
ALTER TABLE ptable RECOVER PARTITIONS;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0080.html b/docs/dli/sqlreference/dli_08_0080.html new file mode 100644 index 00000000..86459dd1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0080.html @@ -0,0 +1,27 @@ + + +

Syntax for Partitioning a Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0081.html b/docs/dli/sqlreference/dli_08_0081.html new file mode 100644 index 00000000..a01e080b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0081.html @@ -0,0 +1,95 @@ + + +

Adding Partition Data (Only OBS Tables Supported)

+

Function

After an OBS partitioned table is created, no partition information is generated for the table. Partition information is generated only after you:

+
  • Insert data to the OBS partitioned table. After the data is inserted successfully, the partition metadata can be queried, for example, by partition columns.
  • Copy the partition directory and data into the OBS path of the partitioned table, and run the partition adding statements described in this section to generate partition metadata. Then you can perform operations such as table query by partition columns.
+

The following describes how to use the ALTER TABLE statement to add a partition.

+
+

Syntax

1
+2
+3
+4
+5
ALTER TABLE table_name ADD [IF NOT EXISTS]
+  PARTITION partition_specs1
+  [LOCATION 'obs_path1']
+  PARTITION partition_specs2
+  [LOCATION 'obs_path2'];
+
+ +
+
+

Keyword

  • IF NOT EXISTS: prevents errors when partitions are repeatedly added.
  • PARTITION: specifies a partition.
  • LOCATION: specifies the partition path.
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

partition_specs

+

Partition fields

+

obs_path

+

OBS path

+
+
+
+

Precautions

  • When you add a partition to a table, the table and the partition column (specified by PARTITIONED BY during table creation) must exist, and the partition to be added cannot be added repeatedly. Otherwise, an error is reported. You can use IF NOT EXISTS to avoid errors if the partition does not exist.
  • If tables are partitioned by multiple fields, you need to specify all partitioning fields in any sequence when adding partitions.
  • By default, parameters in partition_specs contain parentheses (). For example: PARTITION (dt='2009-09-09',city='xxx').
  • If you need to specify an OBS path when adding a partition, the OBS path must exist. Otherwise, an error occurs.
  • To add multiple partitions, you need to use spaces to separate each set of LOCATION 'obs_path' in the PARTITION partition_specs. The following is an example:

    PARTITION partition_specs LOCATION 'obs_path' PARTITION partition_specs LOCATION 'obs_path'

    +
  • If the path specified in the new partition contains subdirectories (or nested subdirectories), all file types and content in the subdirectories are considered partition records. Ensure that all file types and file content in the partition directory are the same as those in the table. Otherwise, an error is reported.
+
+

Example

  • The following example shows you how to add partition data when the OBS table is partitioned by a single column.
    1. Use the DataSource syntax to create an OBS table, and partition the table by column external_data. The partition data is stored in obs://bucketName/datapath.
      create table testobstable(id varchar(128), external_data varchar(16)) using JSON OPTIONS (path 'obs://bucketName/datapath') PARTITIONED by (external_data);
      +
    2. Copy the partition directory to obs://bucketName/datapath. In this example, copy all files in the partition column external_data=22 to obs://bucketName/datapath.
    3. Run the following command to add partition data:
      ALTER TABLE testobstable ADD
      +  PARTITION (external_data='22')
      +  LOCATION 'obs://bucketName/datapath/external_data=22';
      +
    4. After the partition data is added successfully, you can perform operations such as data query based on the partition column.
      select * from testobstable where external_data='22';
      +
    +
  • The following example shows you how to add partition data when the OBS table is partitioned by multiple columns.
    1. Use the DataSource syntax to create an OBS table, and partition the table by columns external_data and dt. The partition data is stored in obs://bucketName/datapath.
      1
      +2
      +3
      +4
      +5
      create table testobstable(
      +  id varchar(128),
      +  external_data varchar(16),
      +  dt varchar(16)
      +) using JSON OPTIONS (path 'obs://bucketName/datapath') PARTITIONED by (external_data, dt);
      +
      + +
      +
    2. Copy the partition directories to obs://bucketName/datapath. In this example, copy files in external_data=22 and its subdirectory dt=2021-07-27 to obs://bucketName/datapath.
    3. Run the following command to add partition data:
      1
      +2
      +3
      +4
      ALTER TABLE
      +  testobstable
      +ADD
      +  PARTITION (external_data = '22', dt = '2021-07-27') LOCATION 'obs://bucketName/datapath/external_data=22/dt=2021-07-27';
      +
      + +
      +
    4. After the partition data is added successfully, you can perform operations such as data query based on the partition columns.
      1
      +2
      select * from testobstable where external_data = '22';
      +select * from testobstable where external_data = '22' and dt='2021-07-27';
      +
      + +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0082.html b/docs/dli/sqlreference/dli_08_0082.html new file mode 100644 index 00000000..e6070366 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0082.html @@ -0,0 +1,56 @@ + + +

Renaming a Partition (Only OBS Tables Supported)

+

Function

This statement is used to rename partitions.

+
+

Syntax

1
+2
+3
ALTER TABLE table_name
+  PARTITION partition_specs
+  RENAME TO PARTITION partition_specs;
+
+ +
+
+

Keyword

  • PARTITION: a specified partition
  • RENAME: new name of the partition
+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

partition_specs

+

Partition fields

+
+
+
+

Precautions

  • This statement is used for OBS table operations.
  • The table and partition to be renamed must exist. Otherwise, an error occurs. The name of the new partition must be unique. Otherwise, an error occurs.
  • If a table is partitioned using multiple fields, you are required to specify all the fields of a partition (at random order) when renaming the partition.
  • By default, the partition_specs parameter contains (). For example: PARTITION (dt='2009-09-09',city='xxx')
+
+

Example

To modify the name of the city='xxx',dt='2008-08-08' partition in the student table to city='xxx',dt='2009-09-09', run the following statement:

+
1
+2
+3
ALTER TABLE student
+  PARTITION (city='xxx',dt='2008-08-08')
+  RENAME TO PARTITION (city='xxx',dt='2009-09-09');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0083.html b/docs/dli/sqlreference/dli_08_0083.html new file mode 100644 index 00000000..291c27c4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0083.html @@ -0,0 +1,61 @@ + + +

Deleting a Partition

+

Function

Deletes one or more partitions from a partitioned table.

+
+

Precautions

  • The table in which partitions are to be deleted must exist. Otherwise, an error is reported.
  • The to-be-deleted partition must exist. Otherwise, an error is reported. To avoid this error, add IF EXISTS in this statement.
+
+

Syntax

1
+2
+3
ALTER TABLE [db_name.]table_name
+  DROP [IF EXISTS]
+  PARTITION partition_spec1[,PARTITION partition_spec2,...];
+
+ +
+
+

Keyword

  • DROP: deletes a partition.
  • IF EXISTS: The partition to be deleted must exist. Otherwise, an error is reported.
  • PARTITION: specifies the partition to be deleted
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits and cannot start with an underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). It cannot contain only digits and cannot start with an underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

partition_specs

+

Partition information, in the format of "key=value", where key indicates the partition field and value indicates the partition value. In a table partitioned using multiple fields, if you specify all the fields of a partition name, only the partition is deleted; if you specify only some fields of a partition name, all matching partitions will be deleted. By default, the partition_specs parameter contains (). For example: PARTITION (dt='2009-09-09',city='xxx')

+
+
+
+

Example

To delete the dt = '2008-08-08', city = 'xxx' partition in the student table, run the following statement:

+
1
+2
+3
ALTER TABLE student
+  DROP
+  PARTITION (dt = '2008-08-08', city = 'xxx');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0084.html b/docs/dli/sqlreference/dli_08_0084.html new file mode 100644 index 00000000..1193cbfc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0084.html @@ -0,0 +1,61 @@ + + +

Altering the Partition Location of a Table (Only OBS Tables Supported)

+

Function

This statement is used to modify the positions of table partitions.

+
+

Syntax

1
+2
+3
ALTER TABLE table_name
+  PARTITION partition_specs
+  SET LOCATION obs_path;
+
+ +
+
+

Keyword

  • PARTITION: a specified partition
  • LOCATION: path of the partition
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

partition_specs

+

Partition fields

+

obs_path

+

OBS path

+
+
+
+

Precautions

  • For a table partition whose position is to be modified, the table and partition must exist. Otherwise, an error is reported.
  • By default, the partition_specs parameter contains (). For example: PARTITION (dt='2009-09-09',city='xxx')
  • The specified OBS path must be an absolute path. Otherwise, an error is reported.
  • If the path specified in the new partition contains subdirectories (or nested subdirectories), all file types and content in the subdirectories are considered partition records. Ensure that all file types and file content in the partition directory are the same as those in the table. Otherwise, an error is reported.
+
+

Example

To set the OBS path of partition dt='2008-08-08',city='xxx' in table student to obs://bucketName/fileName/student/dt=2008-08-08/city=xxx, run the following statement:

+
1
+2
+3
ALTER TABLE student
+  PARTITION(dt='2008-08-08',city='xxx')
+  SET LOCATION 'obs://bucketName/fileName/student/dt=2008-08-08/city=xxx';
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0086.html b/docs/dli/sqlreference/dli_08_0086.html new file mode 100644 index 00000000..2d82dd0f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0086.html @@ -0,0 +1,27 @@ + + +

Built-In Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0087.html b/docs/dli/sqlreference/dli_08_0087.html new file mode 100644 index 00000000..63347607 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0087.html @@ -0,0 +1,48 @@ + + +

Deleting a Table

+

Function

This statement is used to delete tables.

+
+

Syntax

1
DROP TABLE [IF EXISTS] [db_name.]table_name;
+
+ +
+
+

Keyword

  • If the table is stored in OBS, only the metadata is deleted. The data stored on OBS is not deleted.
  • If the table is stored in DLI, the data and the corresponding metadata are all deleted.
+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+
+
+
+

Precautions

The to-be-deleted table must exist in the current database. Otherwise, an error is reported. To avoid this error, add IF EXISTS in this statement.

+
+

Example

  1. Create a table. For details, see Creating an OBS Table or Creating a DLI Table.
  2. Run the following statement to delete table test from the current database:
    1
    DROP TABLE IF EXISTS test;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0088.html b/docs/dli/sqlreference/dli_08_0088.html new file mode 100644 index 00000000..fd80981e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0088.html @@ -0,0 +1,122 @@ + + +

Deep Learning Model Prediction

+

Deep learning has a wide range of applications in many industries, such as image classification, image recognition, and speech recognition. DLI provides several functions to load deep learning models for prediction.

+

Currently, models DeepLearning4j and Keras are supported. In Keras, TensorFlow, CNTK, or Theano can serve as the backend engine. With importing of the neural network model from Keras, models of mainstream learning frameworks such as Theano, TensorFlow, Caffe, and CNTK can be imported.

+

Syntax

1
+2
+3
+4
+5
+6
+7
-- Image classification: returns the predicted category IDs used for image classification.
+DL_IMAGE_MAX_PREDICTION_INDEX(field_name, model_path, is_dl4j_model)
+DL_IMAGE_MAX_PREDICTION_INDEX(field_name, keras_model_config_path, keras_weights_path) -- Suitable for the Keras model
+
+--Text classification: returns the predicted category IDs used for text classification.
+DL_TEXT_MAX_PREDICTION_INDEX(field_name, model_path, is_dl4j_model) -- Use the default word2vec model.
+DL_TEXT_MAX_PREDICTION_INDEX(field_name, word2vec_path, model_path, is_dl4j_model)
+
+ +
+

Models and configuration files must be stored on OBS. The path format is obs://your_ak:your_sk@obs.your_obs_region.xxx.com:443/your_model_path.

+
+
+

Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

field_name

+

Yes

+

Name of the field, data in which is used for prediction, in the data stream.

+

In image classification, this parameter needs to declare ARRAY[TINYINT].

+

In image classification, this parameter needs to declare String.

+

model_path

+

Yes

+

Complete save path of the model on OBS, including the model structure and model weight.

+

is_dl4j_model

+

Yes

+

Whether the model is a Deeplearning4j model

+

Value true indicates that the model is a Deeplearning4j model, while value false indicates that the model is a Keras model.

+

keras_model_config_path

+

Yes

+

Complete save path of the model structure on OBS. In Keras, you can obtain the model structure by using model.to_json().

+

keras_weights_path

+

Yes

+

Complete save path of the model weight on OBS. In Keras, you can obtain the model weight by using model.save_weights(filepath).

+

word2vec_path

+

Yes

+

Complete save path of the word2vec model on OBS.

+
+
+
+

Example

For prediction in image classification, use the Mnist dataset as the input and load the pre-trained Deeplearning4j model or Keras model to predict the digit representing each image in real time.

+
1
+2
+3
+4
+5
+6
CREATE SOURCE STREAM Mnist(
+    image Array[TINYINT]
+)
+SELECT DL_IMAGE_MAX_PREDICTION_INDEX(image, 'your_dl4j_model_path', false) FROM Mnist
+SELECT DL_IMAGE_MAX_PREDICTION_INDEX(image, 'your_keras_model_path', true) FROM Mnist
+SELECT DL_IMAGE_MAX_PREDICTION_INDEX(image, 'your_keras_model_config_path', 'keras_weights_path') FROM Mnist
+
+ +
+

For prediction in text classification, use data of a group of news titles as the input and load the pre-trained Deeplearning4j model or Keras model to predict the category of each news title in real time, such as economy, sports, and entertainment.

+
1
+2
+3
+4
+5
+6
+7
CREATE SOURCE STREAM News(
+    title String
+)
+SELECT DL_TEXT_MAX_PREDICTION_INDEX(title, 'your_dl4j_word2vec_model_path','your_dl4j_model_path', false) FROM News
+SELECT DL_TEXT_MAX_PREDICTION_INDEX(title, 'your_keras_word2vec_model_path','your_keras_model_path', true) FROM News
+SELECT DL_TEXT_MAX_PREDICTION_INDEX(title, 'your_dl4j_model_path', false) FROM New
+SELECT DL_TEXT_MAX_PREDICTION_INDEX(title, 'your_keras_model_path', true) FROM New
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0089.html b/docs/dli/sqlreference/dli_08_0089.html new file mode 100644 index 00000000..cb6e1ad3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0089.html @@ -0,0 +1,25 @@ + + +

Viewing Tables

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0090.html b/docs/dli/sqlreference/dli_08_0090.html new file mode 100644 index 00000000..0d200bfa --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0090.html @@ -0,0 +1,52 @@ + + +

Viewing All Tables

+

Function

This statement is used to view all tables and views in the current database.

+
+

Syntax

1
SHOW TABLES [IN | FROM db_name] [LIKE regex_expression];
+
+ +
+
+

Keyword

FROM/IN: followed by the name of a database whose tables and views will be displayed.

+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

regex_expression

+

Name of a database table.

+
+
+
+

Precautions

None

+
+

Example

  1. Create a table. For details, see Creating an OBS Table or Creating a DLI Table.
  2. To show all tables and views in the current database, run the following statement:
    1
    SHOW TABLES;
    +
    + +
    +
  3. To show all tables started with test in the testdb database, run the following statement:
    1
    SHOW TABLES IN testdb LIKE "test*";
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0091.html b/docs/dli/sqlreference/dli_08_0091.html new file mode 100644 index 00000000..85a40bc7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0091.html @@ -0,0 +1,44 @@ + + +

Viewing Table Creation Statements

+

Function

This statement is used to show the statements for creating a table.

+
+

Syntax

1
SHOW CREATE TABLE table_name;
+
+ +
+
+

Keyword

CREATE TABLE: statement for creating a table

+
+

Parameters

+
+ + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+
+
+
+

Precautions

The table specified in this statement must exist. Otherwise, an error will occur.

+
+

Example

  1. Create a table. For details, see Creating an OBS Table or Creating a DLI Table.
+
  1. Run the following statement to view the statement that is used to create table test:
    1
    SHOW CREATE TABLE test;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0092.html b/docs/dli/sqlreference/dli_08_0092.html new file mode 100644 index 00000000..5917b5d1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0092.html @@ -0,0 +1,48 @@ + + +

Viewing Table Properties

+

Function

Check the properties of a table.

+
+

Syntax

1
SHOW TBLPROPERTIES table_name [('property_name')];
+
+ +
+
+

Keyword

TBLPROPERTIES: This statement allows you to add a key/value property to a table.

+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

property_name

+
  • If this parameter is not specified, all properties and their values are returned.
  • If a property name is specified, only the specified property and its value are returned.
+
+
+
+

Precautions

property_name is case sensitive. You cannot specify multiple property_name attributes at the same time. Otherwise, an error occurs.

+
+

Example

To return the value of property_key1 in the test table, run the following statement:

+
1
SHOW TBLPROPERTIES test ('property_key1');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0093.html b/docs/dli/sqlreference/dli_08_0093.html new file mode 100644 index 00000000..36a911f7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0093.html @@ -0,0 +1,48 @@ + + +

Viewing All Columns in a Specified Table

+

Function

This statement is used to query all columns in a specified table.

+
+

Syntax

1
SHOW COLUMNS {FROM | IN} table_name [{FROM | IN} db_name];
+
+ +
+
+

Keyword

  • COLUMNS: columns in the current table
  • FROM/IN: followed by the name of a database whose tables and views will be displayed. Keyword FROM is equivalent to IN. You can use either of them in a statement.
+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

table_name

+

Table name

+

db_name

+

Database name

+
+
+
+

Precautions

The specified table must exist in the database. If the table does not exist, an error is reported.

+
+

Example

Run the following statement to view all columns in the student table.

+
1
SHOW COLUMNS IN student;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0094.html b/docs/dli/sqlreference/dli_08_0094.html new file mode 100644 index 00000000..05467e6c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0094.html @@ -0,0 +1,59 @@ + + +

Viewing All Partitions in a Specified Table

+

Function

This statement is used to view all partitions in a specified table.

+
+

Syntax

1
+2
SHOW PARTITIONS [db_name.]table_name
+  [PARTITION partition_specs];
+
+ +
+
+

Keyword

  • PARTITIONS: partitions in a specified table
  • PARTITION: a specified partition
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits and cannot start with an underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). It cannot contain only digits and cannot start with an underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

partition_specs

+

Partition information, in the format of "key=value", where key indicates the partition field and value indicates the partition value. If a partition field contains multiple fields, the system displays all partition information that matches the partition field.

+
+
+
+

Precautions

The table specified in this statement must exist and must be a partitioned table. Otherwise, an error is reported.

+
+

Example

  • To show all partitions in the student table, run the following statement:
    1
    SHOW PARTITIONS student;
    +
    + +
    +
  • Check the dt='2010-10-10' partition in the student table, run the following statement:
    1
    SHOW PARTITIONS student PARTITION(dt='2010-10-10')
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0095.html b/docs/dli/sqlreference/dli_08_0095.html new file mode 100644 index 00000000..703ab465 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0095.html @@ -0,0 +1,105 @@ + + +

Inserting Data

+

Function

This statement is used to insert the SELECT query result or a certain data record into a table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    INSERT INTO [TABLE] [db_name.]table_name
    +  [PARTITION part_spec] select_statement;
    +
    + +
    +
    1
    +2
    INSERT OVERWRITE TABLE [db_name.]table_name
    +  [PARTITION part_spec] select_statement;
    +
    + +
    +
    part_spec:  
    +  : (part_col_name1=val1 [, part_col_name2=val2, ...])
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO [TABLE] [db_name.]table_name
    +  [PARTITION part_spec] VALUES values_row [, values_row ...];
    +
    + +
    +
    1
    +2
    INSERT OVERWRITE TABLE [db_name.]table_name
    +  [PARTITION part_spec] VALUES values_row [, values_row ...];
    +
    + +
    +
    values_row:
    +  : (val1 [, val2, ...])
    +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 INSERT parameter description

Parameter

+

Description

+

db_name

+

Name of the database where the target table resides.

+

table_name

+

Name of the target table.

+

part_spec

+

Detailed partition information. If there are multiple partition fields, all fields must be contained, but the corresponding values are optional. The system matches the corresponding partition. A maximum of 100,000 partitions can be created in a single table.

+

select_statement

+

SELECT query on the source table (DLI and OBS tables).

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • If no partition needs to be specified for dynamic partitioning, place part_spec in the SELECT statement as a common field.
  • During creation of the target OBS table, only the folder path can be specified.
  • The source table and the target table must have the same data types and column field quantity. Otherwise, data insertion fails.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
  • The INSERT INTO statement is used to add the query result to the target table.
  • The INSERT OVERWRITE statement is used to overwrite existing data in the source table.
  • The INSERT INTO statement can be batch executed, but the INSERT OVERWRITE statement can be batch executed only when data of different partitioned tables is inserted to different static partitions.
  • The INSERT INTO and INSERT OVERWRITE statements can be executed at the same time. However, the result is unknown.
  • When you insert data of the source table to the target table, you cannot import or update data of the source table.
  • The dynamic INSERT OVERWRITE statement of Hive partitioned tables can overwrite the involved partition data but cannot overwrite the entire table data.
  • To overwrite data in a specified partition of the datasource table, set dli.sql.dynamicPartitionOverwrite.enabled to true and run the insert overwrite statement. The default value of dli.sql.dynamicPartitionOverwrite.enabled is false, indicating that data in the entire table is overwritten. The following is an example:
    1
    insert overwrite table tb1 partition(part1='v1', part2='v2') select * from ...
    +
    + +
    +

    On the DLI management console, click SQL Editor. In the upper right corner of the editing window, click Settings to configure parameters.

    +
    +
  • You can configure the spark.sql.shuffle.partitions parameter to set the number of files to be inserted into the OBS bucket in the non-DLI table. In addition, to avoid data skew, you can add distribute by rand() to the end of the INSERT statement to increase the number of concurrent jobs. The following is an example:
    insert into table table_target select * from table_source distribute by cast(rand() * N as int);
    +
+
+

Example

Before importing data, you must create a table. For details, see Creating an OBS Table or Creating a DLI Table.

+
+
  • Insert the SELECT query result into a table.
    • Use the DataSource syntax to create a parquet partitioned table.
      CREATE TABLE data_source_tab1 (col1 INT, p1 INT, p2 INT)
      +  USING PARQUET PARTITIONED BY (p1, p2);
      +
    • Insert the query result to the partition (p1 = 3, p2 = 4).
      INSERT INTO data_source_tab1 PARTITION (p1 = 3, p2 = 4)
      +  SELECT id FROM RANGE(1, 3);
      +
    • Insert the new query result to the partition (p1 = 3, p2 = 4).
      INSERT OVERWRITE TABLE data_source_tab1 PARTITION (p1 = 3, p2 = 4)
      +  SELECT id FROM RANGE(3, 5);
      +
    +
  • Insert a data record into a table.
    • Create a Parquet partitioned table with Hive format
      CREATE TABLE hive_serde_tab1 (col1 INT, p1 INT, p2 INT)
      +  USING HIVE OPTIONS(fileFormat 'PARQUET') PARTITIONED BY (p1, p2);
      +
    • Insert two data records into the partition (p1 = 3, p2 = 4).
      INSERT INTO hive_serde_tab1 PARTITION (p1 = 3, p2 = 4)
      +  VALUES (1), (2);
      +
    • Insert new data to the partition (p1 = 3, p2 = 4).
      INSERT OVERWRITE TABLE hive_serde_tab1 PARTITION (p1 = 3, p2 = 4)
      +  VALUES (3), (4);
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0096.html b/docs/dli/sqlreference/dli_08_0096.html new file mode 100644 index 00000000..fd91fe5f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0096.html @@ -0,0 +1,735 @@ + + +

String Functions

+

The common character string functions of DLI are as follows:

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 String Operators

Operator

+

Returned Data Type

+

Description

+

||

+

VARCHAR

+

Concatenates two strings.

+

CHAR_LENGTH

+

INT

+

Returns the number of characters in a string.

+

CHARACTER_LENGTH

+

INT

+

Returns the number of characters in a string.

+

CONCAT

+

VARCHAR

+

Concatenates two or more string values to form a new string. If the value of any parameter is NULL, skip this parameter.

+

CONCAT_WS

+

VARCHAR

+

Concatenates each parameter value and the separator specified by the first parameter separator to form a new string. The length and type of the new string depend on the input value.

+

HASH_CODE

+

INT

+

Returns the absolute value of HASH_CODE() of a string. In addition to string, int, bigint, float, and double are also supported.

+

INITCAP

+

VARCHAR

+

Returns a string whose first letter is in uppercase and the other letters in lowercase. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.

+

IS_ALPHA

+

BOOLEAN

+

Checks whether a string contains only letters.

+

IS_DIGITS

+

BOOLEAN

+

Checks whether a string contains only digits.

+

IS_NUMBER

+

BOOLEAN

+

Checks whether a string is numeric.

+

IS_URL

+

BOOLEAN

+

Checks whether a string is a valid URL.

+

JSON_VALUE

+

VARCHAR

+

Obtains the value of a specified path in a JSON string.

+

KEY_VALUE

+

VARCHAR

+

Obtains the value of a key in a key-value pair string.

+

LOWER

+

VARCHAR

+

Returns a string of lowercase characters.

+

LPAD

+

VARCHAR

+

Concatenates the pad string to the left of the str string until the length of the new string reaches the specified length len.

+

MD5

+

VARCHAR

+

Returns the MD5 value of a string. If the parameter is an empty string (that is, the parameter is "), an empty string is returned.

+

OVERLAY

+

VARCHAR

+

Replaces the substring of x with y. Replace length+1 characters starting from start_position.

+

POSITION

+

INT

+

Returns the position of the first occurrence of the target string x in the queried string y. If the target string x does not exist in the queried string y, 0 is returned.

+

REPLACE

+

VARCHAR

+

Replaces all str2 in the str1 string with str3.

+
  • str1: original character.
  • str2: target character.
  • str3: replacement character.
+

RPAD

+

VARCHAR

+

Concatenates the pad string to the right of the str string until the length of the new string reaches the specified length len.

+

SHA1

+

STRING

+

Returns the SHA1 value of the expr string.

+

SHA256

+

STRING

+

Returns the SHA256 value of the expr string.

+

STRING_TO_ARRAY

+

ARRAY[STRING]

+

Separates the value string as string arrays by using the delimiter.

+

SUBSTRING

+

VARCHAR

+

Returns the substring starting from a fixed position of A. The start position starts from 1.

+

TRIM

+

STRING

+

Removes A at the start position, or end position, or both the start and end positions from B. By default, string expressions A at both the start and end positions are removed.

+

UPPER

+

VARCHAR

+

Returns a string converted to uppercase characters.

+
+
+

||

  • Function

    Concatenates two character strings.

    +
  • Syntax
    VARCHAR VARCHAR a || VARCHAR b
    +
  • Parameter description
    • a: character string.
    • b: character string.
    +
  • Example
    • Test statement
      SELECT "hello" || "world";
      +
    • Test result
      "helloworld"
      +
    +
+
+

CHAR_LENGTH

  • Function

    Returns the number of characters in a string.

    +
  • Syntax
    INT CHAR_LENGTH(a)
    +
  • Parameter description
    • a: character string.
    +
  • Example
    • Test statement
      SELECT  CHAR_LENGTH(var1) as aa FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 2 Test data and result

      Test Data (var1)

      +

      Test Result (aa)

      +

      abcde123

      +

      8

      +
      +
      +
    +
+
+

CHARACTER_LENGTH

  • Function

    Returns the number of characters in a string.

    +
  • Syntax
    INT CHARACTER_LENGTH(a)
    +
  • Parameter description
    • a: character string.
    +
  • Example
    • Test statement
      SELECT  CHARACTER_LENGTH(var1) as aa FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 3 Test data and result

      Test Data (var1)

      +

      Test Result (aa)

      +

      abcde123

      +

      8

      +
      +
      +
    +
+
+

CONCAT

  • Function

    Concatenates two or more string values to form a new string. If the value of any parameter is NULL, skip this parameter.

    +
  • Syntax
    VARCHAR CONCAT(VARCHAR var1, VARCHAR var2, ...)  
    +
  • Parameter description
    • var1: character string
    • var2: character string
    +
  • Example
    • Test statement
      SELECT CONCAT("abc", "def", "ghi", "jkl");
      +
    • Test result
      "abcdefghijkl"
      +
    +
+
+

CONCAT_WS

  • Function

    Concatenates each parameter value and the separator specified by the first parameter separator to form a new string. The length and type of the new string depend on the input value.

    +

    If the value of separator is null, separator is combined with an empty string. If other parameters are set to null, the parameters whose values are null are skipped during combination.

    +
    +
  • Syntax
    VARCHAR CONCAT_WS(VARCHAR separator, VARCHAR var1, VARCHAR var2, ...)
    +
  • Parameter description
    • separator: separator.
    • var1: character string
    • var2: character string
    +
  • Example
    • Test statement
      SELECT CONCAT_WS("-", "abc", "def", "ghi", "jkl");
      +
    • Test result
      "abc-def-ghi-jkl"
      +
    +
+
+

HASH_CODE

  • Function

    Returns the absolute value of HASH_CODE() of a string. In addition to string, int, bigint, float, and double are also supported.

    +
  • Syntax
    INT HASH_CODE(VARCHAR str)
    +
  • Parameter description
    • str: character string.
    +
  • Example
    • Test statement
      SELECT HASH_CODE("abc");
      +
    • Test result
      96354
      +
    +
+
+

INITCAP

  • Function

    Return the string whose first letter is in uppercase and the other letters in lowercase. Strings are sequences of alphanumeric characters separated by non-alphanumeric characters.

    +
  • Syntax
    VARCHAR INITCAP(a)
    +
  • Parameter description
    • a: character string.
    +
  • Example
    • Test statement
      SELECT INITCAP(var1)as aa FROM T1;
      +
    +
    • Test data and result +
      + + + + + + + +
      Table 4 Test data and result

      Test Data (var1)

      +

      Test Result (aa)

      +

      aBCde

      +

      Abcde

      +
      +
      +
    +
+
+

IS_ALPHA

  • Function

    Checks whether a character string contains only letters.

    +
  • Syntax
    BOOLEAN IS_ALPHA(VARCHAR content)
    +
  • Parameter description
    • content: Enter a character string.
    +
  • Example
    • Test statement
      SELECT IS_ALPHA(content)  AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + +
      Table 5 Test data and results

      Test Data (content)

      +

      Test Result (case_result)

      +

      Abc

      +

      true

      +

      abc1#$

      +

      false

      +

      null

      +

      false

      +

      Empty string

      +

      false

      +
      +
      +
    +
+
+

IS_DIGITS

  • Function

    Checks whether a character string contains only digits.

    +
  • Syntax
    BOOLEAN IS_DIGITS(VARCHAR content)
    +
  • Parameter description
    • content: Enter a character string.
    +
  • Example
    • Test statement
      SELECT IS_DIGITS(content) AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + + + + +
      Table 6 Test data and results

      Test Data (content)

      +

      Test Result (case_result)

      +

      78

      +

      true

      +

      78.0

      +

      false

      +

      78a

      +

      false

      +

      null

      +

      false

      +

      Empty string

      +

      false

      +
      +
      +
    +
+
+

IS_NUMBER

  • Function

    This function is used to check whether a character string is a numeric string.

    +
  • Syntax
    BOOLEAN IS_NUMBER(VARCHAR content)
    +
  • Parameter description
    • content: Enter a character string.
    +
  • Example
    • Test statement
      SELECT IS_NUMBER(content) AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + + + + +
      Table 7 Test data and results

      Test Data (content)

      +

      Test Result (case_result)

      +

      78

      +

      true

      +

      78.0

      +

      true

      +

      78a

      +

      false

      +

      null

      +

      false

      +

      Empty string

      +

      false

      +
      +
      +
    +
+
+

IS_URL

  • Function

    This function is used to check whether a character string is a valid URL.

    +
  • Syntax
    BOOLEAN IS_URL(VARCHAR content)
    +
  • Parameter description
    • content: Enter a character string.
    +
  • Example
    • Test statement
      SELECT IS_URL(content) AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + + + + +
      Table 8 Test data and results

      Test Data (content)

      +

      Test Result (case_result)

      +

      https://www.testweb.com

      +

      true

      +

      https://www.testweb.com:443

      +

      true

      +

      www.testweb.com:443

      +

      false

      +

      null

      +

      false

      +

      Empty string

      +

      false

      +
      +
      +
    +
+
+

JSON_VALUE

  • Function

    Obtains the value of a specified path in a JSON character string.

    +
  • Syntax
    VARCHAR JSON_VALUE(VARCHAR content, VARCHAR path)
    +
  • Parameter description
    • content: Enter a character string.
    • path: path to be obtained.
    +
  • Example
    • Test statement
      SELECT JSON_VALUE(content, path) AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + + + + + + + + + +
      Table 9 Test data and results

      Test Data (content and path)

      +

      Test Result (case_result)

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      $

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      $.a1

      +

      v1

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      $.a4

      +

      {"a41":"v41","a42": ["v1","v2"]}

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      $.a4.a42

      +

      ["v1","v2"]

      +

      { "a1":"v1","a2":7,"a3":8.0,"a4": {"a41":"v41","a42": ["v1","v2"]}}

      +

      $.a4.a42[0]

      +

      v1

      +
      +
      +
    +
+
+

KEY_VALUE

  • Function

    This function is used to obtain the value of a key in a key-value pair string.

    +
  • Syntax
    VARCHAR KEY_VALUE(VARCHAR content, VARCHAR split1, VARCHAR split2, VARCHAR key_name)
    +
  • Parameter description
    • content: Enter a character string.
    • split1: separator of multiple key-value pairs.
    • split2: separator between the key and value.
    • key_name: name of the key to be obtained.
    +
  • Example
    • Test statement
      SELECT KEY_VALUE(content, split1, split2, key_name)  AS case_result FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + + + + + + + + +
      Table 10 Test data and results

      Test Data (content, split1, split2, and key_name)

      +

      Test Result (case_result)

      +

      k1=v1;k2=v2

      +

      ;

      +

      =

      +

      k1

      +

      v1

      +

      null

      +

      ;

      +

      =

      +

      k1

      +

      null

      +

      k1=v1;k2=v2

      +

      null

      +

      =

      +

      k1

      +

      null

      +
      +
      +
    +
+
+

LOWER

  • Function

    Returns a string of lowercase characters.

    +
  • Syntax
    VARCHAR LOWER(A)
    +
  • Parameter description
    • A: character string.
    +
  • Example
    • Test statement
      SELECT LOWER(var1) AS aa FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 11 Test data and result

      Test Data (var1)

      +

      Test Result (aa)

      +

      ABc

      +

      abc

      +
      +
      +
    +
+
+

LPAD

  • Function

    Concatenates the pad string to the left of the str string until the length of the new string reaches the specified length len.

    +
  • Syntax
    VARCHAR LPAD(VARCHAR str, INT len, VARCHAR pad)
    +
  • Parameter description
    • str: character string before concatenation.
    • len: length of the concatenated character string.
    • pad: character string to be concatenated.
    +
    • If any parameter is null, null is returned.
    • If the value of len is a negative number, value null is returned.
    • If the value of len is less than the length of str, the first chunk of str characters in len length is returned.
    +
    +
  • Example
    • Test statement
      SELECT
      +  LPAD("adc", 2, "hello"),
      +  LPAD("adc", -1, "hello"),
      +  LPAD("adc", 10, "hello");
      +
    • Test result
      "ad",,"helloheadc"
      +
    +
+
+

MD5

  • Function

    Returns the MD5 value of a string. If the parameter is an empty string (that is, the parameter is "), an empty string is returned.

    +
  • Syntax
    VARCHAR MD5(VARCHAR str)
    +
  • Parameter description
    • str: character string
    +
  • Example
    • Test statement
      SELECT MD5("abc");
      +
    • Test result
      "900150983cd24fb0d6963f7d28e17f72"
      +
    +
+
+

OVERLAY

  • Function

    Replaces the substring of x with y. Replaces length+1 characters starting from start_position.

    +
  • Syntax
    VARCHAR OVERLAY ( (VARCHAR x PLACING VARCHAR y FROM INT start_position [ FOR INT length ]) )
    +
  • Parameter description
    • x: character string
    • y: character string.
    • start_position: start position.
    • length (optional): indicates the character length.
    +
  • Example
    • Test statement
      OVERLAY('abcdefg' PLACING 'xyz' FROM 2 FOR 2) AS result FROM T1;
      +
    • Test result +
      + + + + + +
      Table 12 Test result

      result

      +

      axyzdefg

      +
      +
      +
    +
+
+

POSITION

  • Function

    Returns the position of the first occurrence of the target string x in the queried string y. If the target character string x does not exist in the queried character string y, 0 is returned.

    +
  • Syntax
    INTEGER POSITION(x IN y)
    +
  • Parameter description
    • x: character string
    • y: character string.
    +
  • Example
    • Test statement
      POSITION('in' IN 'chin') AS result FROM T1;
      +
    • Test result +
      + + + + + +
      Table 13 Test result

      result

      +

      3

      +
      +
      +
    +
+
+

REPLACE

  • Function

    The character string replacement function is used to replace all str2 in the str1 string with str3.

    +
  • Syntax
    VARCHAR REPLACE(VARCHAR str1, VARCHAR str2, VARCHAR str3)
    +
  • Parameter description
    • str1: original character.
    • str2: target character.
    • str3: replacement character.
    +
  • Example
    • Test statement
      SELECT
      +  replace(
      +    "hello world hello world hello world",
      +    "world",
      +    "hello"
      +  );
      +
    • Test result
      "hello hello hello hello hello hello"
      +
    +
+
+

RPAD

  • Function

    Concatenates the pad string to the right of the str string until the length of the new string reaches the specified length len.

    +
    • If any parameter is null, null is returned.
    • If the value of len is a negative number, value null is returned.
    • The value of pad is an empty string. If the value of len is less than the length of str, the string whose length is the same as the length of str is returned.
    +
  • Syntax
    VARCHAR RPAD(VARCHAR str, INT len, VARCHAR pad)
    +
  • Parameter description
    • str: start character string.
    • len: indicates the length of the new character string.
    • pad: character string that needs to be added repeatedly.
    +
  • Example
    • Test statement
      SELECT
      +  RPAD("adc", 2, "hello"),
      +  RPAD("adc", -1, "hello"),
      +  RPAD("adc", 10, "hello");
      +
    • Test result
      "ad",,"adchellohe"
      +
    +
+
+

SHA1

  • Function

    Returns the SHA1 value of the expr string.

    +
  • Syntax
    STRING SHA1(STRING expr)
    +
  • Parameter description
    • expr: character string.
    +
  • Example
    • Test statement
      SELECT SHA1("abc");
      +
    • Test result
      "a9993e364706816aba3e25717850c26c9cd0d89d"
      +
    +
+
+

SHA256

  • Function

    Returns the SHA256 value of the expr string.

    +
  • Syntax
    STRING SHA256(STRING expr)
    +
  • Parameter description
    • expr: character string.
    +
  • Example
    • Test statement
      SELECT SHA256("abc");
      +
    • Test result
      "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
      +
    +
+
+

STRING_TO_ARRAY

  • Function

    Separates the value string as character string arrays by using the delimiter.

    +

    delimiter uses the Java regular expression. If special characters are used, they need to be escaped.

    +
    +
  • Syntax
    ARRAY[String] STRING_TO_ARRAY(STRING value, VARCHAR delimiter)
    +
  • Parameter description
    • value: character string.
    • delimiter: specifies the delimiter.
    +
  • Example
    • Test statement
      SELECT
      +  string_to_array("127.0.0.1", "\\."),
      +  string_to_array("red-black-white-blue", "-");
      +
    • Test result
      [127,0,0,1],[red,black,white,blue]
      +
    +
+
+

SUBSTRING

  • Function

    Returns the substring that starts from a fixed position of A. The start position starts from 1.

    +
    • If len is not specified, the substring from the start position to the end of the string is truncated.
    • If len is specified, the substring starting from the position specified by start is truncated. The length is specified by len.
    +

    The value of start starts from 1. If the value is 0, it is regarded as 1. If the value of start is a negative number, the position is calculated from the end of the character string in reverse order.

    +
    +
  • Syntax
    VARCHAR SUBSTRING(STRING A FROM INT start) 
    +

    Or

    +
    VARCHAR SUBSTRING(STRING A FROM INT start FOR INT len)
    +
  • Parameter description
    • A: specified character string.
    • start: start position for truncating the character string A.
    • len: intercepted length.
    +
  • Example
    • Test statement 1
      SELECT SUBSTRING("123456" FROM 2);
      +
    • Test result 1
      "23456"
      +
    • Test statement 2
      SELECT SUBSTRING("123456" FROM 2 FOR 4);
      +
    • Test result 2
      "2345"
      +
    +
+
+

TRIM

  • Function

    Remove A at the start position, or end position, or both the start and end positions from B. By default, string expressions A at both the start and end positions are removed.

    +
  • Syntax
    STRING TRIM( { BOTH | LEADING | TRAILING } STRING a FROM STRING b)
    +
  • Parameter description
    • a: character string.
    • b: character string.
    +
  • Example
    • Test statement
      SELECT TRIM(BOTH " " FROM "  hello world  ");
      +
    • Test result
      "hello world"
      +
    +
+
+

UPPER

  • Function

    Returns a string converted to an uppercase character.

    +
  • Syntax
    VARCHAR UPPER(A)
    +
  • Parameter description
    • A: character string.
    +
  • Example
    • Test statement
      SELECT UPPER("hello world");
      +
    • Test result
      "HELLO WORLD"
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0097.html b/docs/dli/sqlreference/dli_08_0097.html new file mode 100644 index 00000000..88ed01b0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0097.html @@ -0,0 +1,166 @@ + + +

Temporal Functions

+

Table 1 lists the time functions supported by Flink SQL.

+

Function Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Time Function

Function

+

Return Type

+

Description

+

DATE string

+

DATE

+

Parse the date string (yyyy-MM-dd) to a SQL date.

+

TIME string

+

TIME

+

Parse the time string (HH:mm:ss) to the SQL time.

+

TIMESTAMP string

+

TIMESTAMP

+

Convert the time string into timestamp. The time string format is yyyy-MM-dd HH:mm:ss.fff.

+

INTERVAL string range

+

INTERVAL

+

There are two types of intervals: yyyy-MM and dd HH:mm:sss.fff'. The range of yyyy-MM can be YEAR or YEAR TO MONTH, with the precision of month. The range of dd HH:mm:sss.fff' can be DAY TO HOUR, DAY TO MINUTE, DAY TO SECOND, or DAY TO MILLISECONDS, with the precision of millisecond. For example, if the range is DAY TO SECOND, the day, hour, minute, and second are all valid and the precision is second. DAY TO MINUTE indicates that the precision is minute.

+

The following is an example:

+

INTERVAL '10 00:00:00.004' DAY TO milliseconds indicates that the interval is 10 days and 4 milliseconds.

+

INTERVAL '10' DAY indicates that the interval is 10 days and INTERVAL '2-10' YEAR TO MONTH indicates that the interval is 2 years and 10 months.

+

CURRENT_DATE

+

DATE

+

Return the SQL date of UTC time zone.

+

CURRENT_TIME

+

TIME

+

Return the SQL time of UTC time zone.

+

CURRENT_TIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of UTC time zone.

+

LOCALTIME

+

TIME

+

Return the SQL time of the current time zone.

+

LOCALTIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of the current time zone.

+

EXTRACT(timeintervalunit FROM temporal)

+

INT

+

Extract part of the time point or interval. Return the part in the int type.

+

For example, 5 is returned from EXTRACT(DAY FROM DATE "2006-06-05").

+

FLOOR(timepoint TO timeintervalunit)

+

TIME

+

Round a time point down to the given unit.

+

For example, 12:44:00 is returned from FLOOR(TIME '12:44:31' TO MINUTE).

+

CEIL(timepoint TO timeintervalunit)

+

TIME

+

Round a time point up to the given unit.

+

For example, 12:45:00 is returned from CEIL(TIME '12:44:31' TO MINUTE).

+

QUARTER(date)

+

INT

+

Return the quarter from the SQL date.

+

(timepoint, temporal) OVERLAPS (timepoint, temporal)

+

BOOLEAN

+

Check whether two intervals overlap. The time points and time are converted into a time range with a start point and an end point. The function is leftEnd >= rightStart && rightEnd >= leftStart. If leftEnd is greater than or equal to rightStart and rightEnd is greater than or equal to leftStart, true is returned. Otherwise, false is returned.

+

The following is an example:

+
  • If leftEnd is 3:55:00 (2:55:00+1:00:00), rightStart is 3:30:00, rightEnd is 5:30:00 (3:30:00+2:00:00), and leftStart is 2:55:00, true will be returned.

    Specifically, true is returned from (TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR).

    +
+
  • If leftEnd is 10:00:00, rightStart is 10:15:00, rightEnd is 13:15:00 (10:15:00+3:00:00), and leftStart is 9:00:00, false will be returned.

    Specifically, false is returned from (TIME '9:00:00', TIME '10:00:00') OVERLAPS (TIME '10:15:00', INTERVAL '3' HOUR).

    +
+

TO_TIMESTAMP(long expr)

+

TIMESTAMP

+

Convert a timestamp to time.

+

The input parameter this function must be of the BIGINT type. Other data types, such as VARCHAR and STRING, are not supported.

+

For example, TO_TIMESTAMP (1628765159000) is converted to 2021-08-12 18:45:59.

+

UNIX_TIMESTAMP

+

BIGINT

+

Returns the timestamp of a specified parameter. The timestamp type is BIGINT and the unit is second.

+

The following methods are supported:

+
  • UNIX_TIMESTAMP(): returns the timestamp of the current time if no parameter is specified.
  • UNIX_TIMESTAMP(STRING datestr): returns the timestamp indicated by the parameter if only one parameter is contained. The format of datestr must be yyyy-MM-dd HH:mm:ss.
  • UNIX_TIMESTAMP(STRING datestr, STRING format): returns the timestamp indicated by the first parameter if two parameters are contained. The second parameter can specify the format of datestr.
+

UNIX_TIMESTAMP_MS

+

BIGINT

+

Returns the timestamp of a specified parameter. The timestamp type is BIGINT and the unit is millisecond.

+

The following methods are supported:

+
  • UNIX_TIMESTAMP_MS(): returns the timestamp of the current time if no parameter is specified.
  • UNIX_TIMESTAMP_MS(STRING datestr): returns the timestamp indicated by the parameter if only one parameter is contained. The format of datestr must be yyyy-MM-dd HH:mm:ss.SSS.
  • UNIX_TIMESTAMP_MS(STRING datestr, STRING format): returns the timestamp indicated by the first parameter if two parameters are contained. The second parameter can specify the format of datestr.
+
+
+
+

Precautions

None

+
+

Example

insert into temp SELECT Date '2015-10-11'  FROM  OrderA;//Date is returned
+insert into temp1 SELECT Time '12:14:50'  FROM  OrderA;//Time is returned
+insert into temp2 SELECT Timestamp '2015-10-11 12:14:50'  FROM  OrderA;//Timestamp is returned
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0098.html b/docs/dli/sqlreference/dli_08_0098.html new file mode 100644 index 00000000..0ac5766d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0098.html @@ -0,0 +1,127 @@ + + +

Creating a DLI Table Using the DataSource Syntax

+

Function

This DataSource syntax can be used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of supported partitions. For details, see syntax and precautions.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE TABLE [IF NOT EXISTS] [db_name.]table_name 
+  [(col_name1 col_type1 [COMMENT col_comment1], ...)]
+  USING file_format 
+  [OPTIONS (key1=val1, key2=val2, ...)] 
+  [PARTITIONED BY (col_name1, col_name2, ...)]
+  [COMMENT table_comment]
+  [AS select_statement];
+
+ +
+
+

Keyword

  • IF NOT EXISTS: Prevents system errors when the created table exists.
  • USING: Specifies the storage format.
  • OPTIONS: Specifies the attribute name and attribute value when a table is created.
  • COMMENT: Field or table description.
  • PARTITIONED BY: Partition field.
  • AS: Run the CREATE TABLE AS statement to create a table.
+
+

Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

col_name

+

Column names with data types separated by commas (,). The column name contains letters, digits, and underscores (_). It cannot contain only digits and must contain at least one letter.

+

col_type

+

Field type

+

col_comment

+

Field description

+

file_format

+

Data storage format of DLI tables. The value can be parquet only.

+

table_comment

+

Table description

+

select_statement

+

The CREATE TABLE AS statement is used to insert the SELECT query result of the source table or a data record to a newly created DLI table.

+
+
+ +
+ + + + + + + + + + + + + +
Table 2 OPTIONS parameter description

Parameter

+

Description

+

Default Value

+

multiLevelDirEnable

+

Whether to iteratively query data in subdirectories. When this parameter is set to true, all files in the table path, including files in subdirectories, are iteratively read when a table is queried.

+

false

+

compression

+

Specified compression format. Generally, you need to set this parameter to zstd for parquet files.

+

-

+
+
+
+

Precautions

  • If no delimiter is specified, the comma (,) is used by default.
+
  • When a partitioned table is created, the column specified in PARTITIONED BY must be a column in the table, and the partition type must be specified. The partition column supports only the string, boolean, tinyint, smallint, short, int, bigint, long, decimal, float, double, date, and timestamp type.
  • When a partitioned table is created, the partition field must be the last one or several fields of the table field, and the sequence of the partition fields must be the same. Otherwise, an error occurs.
  • A maximum of 7,000 partitions can be created in a single table.
  • The CREATE TABLE AS statement cannot specify table attributes or create partitioned tables.
+
+

Example

  • Create a src table that has two columns key and value in INT and STRING types respectively, and set the compression format to zstd.
    1
    CREATE TABLE src(key INT, value STRING) USING PARQUET OPTIONS(compression = 'zstd');
    +
    + +
    +
  • Create a student table that has name, score, and classNo columns and stores data in Parquet format. Partition the table by classNo.
    1
    CREATE TABLE student(name STRING, score INT, classNo INT) USING PARQUET OPTIONS('key1' = 'value1') PARTITIONED BY(classNo) ;
    +
    + +
    +

    classNo is the partition field, which must be placed at the end of the table field, that is, student(name STRING, score INT, classNo INT).

    +
    +
  • Create table t1 and insert t2 data into table t1.
    1
    CREATE TABLE t1 USING parquet AS select * from t2;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0099.html b/docs/dli/sqlreference/dli_08_0099.html new file mode 100644 index 00000000..70372b89 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0099.html @@ -0,0 +1,196 @@ + + +

User-Defined Functions

+

Overview

DLI supports the following three types of user-defined functions (UDFs):

+
+
  • Regular UDF: takes in one or more input parameters and returns a single result.
  • User-defined table-generating function (UDTF): takes in one or more input parameters and returns multiple rows or columns.
  • User-defined aggregate function (UDAF): aggregates multiple records into one value.
+

UDFs can only be used in dedicated queues.

+
+

POM Dependency

<dependency>
+   <groupId>org.apache.flink</groupId>
+   <artifactId>flink-table_2.11</artifactId>
+   <version>1.7.2</version>
+   <scope>provided</scope>
+</dependency>
+<dependency>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-streaming-java_2.11</artifactId>
+        <version>1.7.2</version>
+        <scope>provided</scope>
+</dependency>
+
+

Precautions

  • Currently, Python is not supported for programming UDFs, UDTFs, and UDAFs.
  • If you use IntelliJ IDEA to debug the created UDF, select include dependencies with "Provided" scope. Otherwise, the dependency packages in the POM file cannot be loaded for local debugging.

    The following uses IntelliJ IDEA 2020.2 as an example:

    +
    1. On the IntelliJ IDEA page, select the configuration file you need to debug and click Edit Configurations.

      +
    2. On the Run/Debug Configurations page, select include dependencies with "Provided" scope.

      +
    3. Click OK.
    +
+
+

Using UDFs

  1. Write the code of custom functions. For details about the code examples, see UDF, UDTF, or UDAF.
  2. Compile the UDF code, pack it into a JAR package, and upload the package to OBS.
  3. In the left navigation pane of the DLI management console, click Job Management > Flink Jobs. Locate the row where the target resides and click Edit in the Operation column to switch to the page where you can edit the job.
  4. On the Running Parameters tab page, select an exclusive queue for Queue. The UDF Jar parameter is displayed. Select the JAR file stored on OBS and click Save.

    Before selecting a user-defined function JAR package, upload the JAR package to the created OBS bucket.

    +
    +

    After the JAR package is selected, add the UDF statement to the SQL statement.

    +
+
+

UDF

The regular UDF must inherit the ScalarFunction function and implement the eval method. The open and close functions are optional.

+
+

Example code

+
import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.ScalarFunction;
+public class UdfScalarFunction extends ScalarFunction {
+  private int factor = 12;
+  public UdfScalarFunction() {
+    this.factor = 12;
+  }
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  /**
+   * Custom logic
+   * @param s
+   * @return
+   */
+   public int eval(String s) {
+     return s.hashCode() * factor;
+   }
+   /**
+    * Optional
+    */
+   @Override
+   public void close() {}
+}
+

Example

+
1
+2
CREATE FUNCTION udf_test AS 'com.xxx.udf.UdfScalarFunction';
+INSERT INTO sink_stream select udf_test(attr) FROM source_stream;
+
+ +
+

UDTF

The UDTF must inherit the TableFunction function and implement the eval method. The open and close functions are optional. If the UDTF needs to return multiple columns, you only need to declare the returned value as Tuple or Row. If Row is used, you need to overload the getResultType method to declare the returned field type.

+
+

Example code

+
import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.TableFunction;
+import org.apache.flink.types.Row;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+public class UdfTableFunction extends TableFunction<Row> {
+  private Logger log = LoggerFactory.getLogger(TableFunction.class);
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  public void eval(String str, String split) {
+    for (String s : str.split(split)) {
+      Row row = new Row(2);
+      row.setField(0, s);
+      row.setField(1, s.length());
+      collect(row);
+    }
+  }
+  /**
+   * Declare the type returned by the function
+   * @return
+   */
+  @Override
+  public TypeInformation<Row> getResultType() {
+  return Types.ROW(Types.STRING, Types.INT);
+  }
+  /**
+    * Optional
+   */
+  @Override
+  public void close() {}
+ }
+

Example

+

The UDTF supports CROSS JOIN and LEFT JOIN. When the UDTF is used, the LATERAL and TABLE keywords must be included.

+
  • CROSS JOIN: does not output the data of a row in the left table if the UDTF does not output the result for the data of the row.
  • LEFT JOIN: outputs the data of a row in the left table even if the UDTF does not output the result for the data of the row, but pads null with UDTF-related fields.
+
1
+2
+3
+4
+5
+6
+7
CREATE FUNCTION udtf_test AS 'com.xxx.udf.TableFunction';
+// CROSS JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream, LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length);
+// LEFT JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream LEFT JOIN LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length) ON TRUE;
+
+ +
+

UDAF

The UDAF must inherit the AggregateFunction function. You need to create an accumulator for storing the computing result, for example, WeightedAvgAccum in the following example code.

+
+

Example code

+
public class WeightedAvgAccum {
+public long sum = 0;
+public int count = 0;
+}
+

+
import org.apache.flink.table.functions.AggregateFunction;
+import java.util.Iterator;
+/**
+* The first type variable is the type returned by the aggregation function, and the second type variable is of the Accumulator type.
+* Weighted Average user-defined aggregate function.
+*/
+public class UdfAggFunction extends AggregateFunction<Long, WeightedAvgAccum> {
+// Initialize the accumulator.
+  @Override
+  public WeightedAvgAccum createAccumulator() {
+    return new WeightedAvgAccum();
+  }
+// Return the intermediate computing value stored in the accumulator.
+  @Override
+  public Long getValue(WeightedAvgAccum acc) {
+    if (acc.count == 0) {
+       return null;
+    } else {
+      return acc.sum / acc.count;
+ }
+}
+// Update the intermediate computing value according to the input.
+public void accumulate(WeightedAvgAccum acc, long iValue) {
+acc.sum += iValue;
+acc.count += 1;
+}
+// Perform the retraction operation, which is opposite to the accumulate operation.
+public void retract(WeightedAvgAccum acc, long iValue) {
+acc.sum -= iValue;
+acc.count -= 1;
+}
+// Combine multiple accumulator values.
+public void merge(WeightedAvgAccum acc, Iterable<WeightedAvgAccum> it) {
+Iterator<WeightedAvgAccum> iter = it.iterator();
+while (iter.hasNext()) {
+WeightedAvgAccum a = iter.next();
+acc.count += a.count;
+acc.sum += a.sum;
+}
+}
+// Reset the intermediate computing value.
+public void resetAccumulator(WeightedAvgAccum acc) {
+acc.count = 0;
+acc.sum = 0L;
+}
+}
+

Example

+
1
+2
CREATE FUNCTION udaf_test AS 'com.xxx.udf.UdfAggFunction';
+INSERT INTO sink_stream SELECT udaf_test(attr2) FROM source_stream GROUP BY attr1;
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0100.html b/docs/dli/sqlreference/dli_08_0100.html new file mode 100644 index 00000000..7d930dac --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0100.html @@ -0,0 +1,233 @@ + + +

Importing Data to the Table

+

Function

The LOAD DATA function can be used to import data in CSV, Parquet, ORC, JSON, and Avro formats. The data is converted into the Parquet data format for storage.

+
+

Syntax

1
+2
LOAD DATA INPATH 'folder_path' INTO TABLE [db_name.]table_name
+  OPTIONS(property_name=property_value, ...);
+
+ +
+
+

Keyword

  • INPATH: path of data to be imported
  • OPTIONS: list of properties
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

folder_path

+

OBS path of the file or folder used for storing the raw data.

+

db_name

+

Enter the database name. If this parameter is not specified, the current database is used.

+

table_name

+

Name of the DLI table to which data is to be imported.

+
+
+

The following configuration options can be used during data import:

+
  • DATA_TYPE: specifies the type of data to be imported. Currently, CSV, Parquet, ORC, JSON, and Avro are supported. The default value is CSV.

    The configuration item is OPTIONS ('DATA_TYPE' = 'CSV').

    +

    When importing a CSV file or a JSON file, you can select one of the following modes:

    +
    • PERMISSIVE: When the PERMISSIVE mode is selected, the data of a column is set to null if its data type does not match that of the target table column.
    • DROPMALFORMED: When the DROPMALFORMED mode is selected, the data of a column s not imported if its data type does not match that of the target table column.
    • FAILFAST: When the FAILFAST mode is selected, exceptions might occur and the import may fail if a column type does not match.
    +

    You can set the mode by adding OPTIONS ('MODE' = 'PERMISSIVE') to the OPTIONS parameter.

    +
  • DELIMITER: You can specify a separator in the import statement. The default value is ,.

    The configuration item is OPTIONS('DELIMITER'=',').

    +

    For CSV data, the following delimiters are supported:

    +
    • Tab character, for example, 'DELIMITER'='\t'.
    • Any binary character, for example, 'DELIMITER'='\u0001(^A)'.
    • Single quotation mark ('). A single quotation mark must be enclosed in double quotation marks (" "). For example, 'DELIMITER'= "'".
    • \001(^A) and \017(^Q) are also supported, for example, 'DELIMITER'='\001(^A)' and 'DELIMITER'='\017(^Q)'.
    +
  • QUOTECHAR: You can specify quotation marks in the import statement. The default value is double quotation marks (").

    The configuration item is OPTIONS('QUOTECHAR'='"').

    +
  • COMMENTCHAR: You can specify the comment character in the import statement. During the import operation, if a comment character is at the beginning of a row, the row is considered as a comment and will not be imported. The default value is a pound key (#).

    The configuration item is OPTIONS('COMMENTCHAR'='#').

    +
  • HEADER: Indicates whether the source file contains a header. Possible values can be true and false. true indicates that the source file contains a header, and false indicates that the source file does not contain a header. The default value is false. If no header exists, specify the FILEHEADER parameter in the LOAD DATA statement to add a header.

    The configuration item is OPTIONS('HEADER'='true').

    +
  • FILEHEADER: If the source file does not contain any header, add a header to the LOAD DATA statement.

    OPTIONS('FILEHEADER'='column1,column2')

    +
  • ESCAPECHAR: Is used to perform strict verification of the escape character on CSV files. The default value is a slash (\\).
    The configuration item is OPTIONS. (ESCAPECHAR?=?\\?)

    Enter ESCAPECHAR in the CSV data. ESCAPECHAR must be enclosed in double quotation marks (" "). For example, "a\b".

    +
    +
    +
  • MAXCOLUMNS: This parameter is optional and specifies the maximum number of columns parsed by a CSV parser in a line.

    The configuration item is OPTIONS('MAXCOLUMNS'='400').

    + +
    + + + + + + + + + +
    Table 2 MAXCOLUMNS

    Name of the Optional Parameter

    +

    Default Value

    +

    Maximum Value

    +

    MAXCOLUMNS

    +

    2000

    +

    20000

    +
    +
    +

    After the value of MAXCOLUMNS Option is set, data import will require the memory of executor. As a result, data may fail to be imported due to insufficient executor memory.

    +
    +
  • DATEFORMAT: Specifies the date format of a column.

    OPTIONS('DATEFORMAT'='dateFormat')

    +
    • The default value is yyyy-MM-dd.
    • The date format is specified by the date mode string of Java. For the Java strings describing date and time pattern, characters A to Z and a to z without single quotation marks (') are interpreted as pattern characters , which are used to represent date or time string elements. If the pattern character is quoted by single quotation marks ('), text matching rather than parsing is performed. For the definition of pattern characters in Java, see Table 3.
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 3 Definition of characters involved in the date and time patterns

    Character

    +

    Date or Time Element

    +

    Example

    +

    G

    +

    Epoch ID

    +

    AD

    +

    y

    +

    Year

    +

    1996; 96

    +

    M

    +

    Month

    +

    July; Jul; 07

    +

    w

    +

    Number of the week in a year

    +

    27 (the twenty-seventh week of the year)

    +

    W

    +

    Number of the week in a month

    +

    2 (the second week of the month)

    +

    D

    +

    Number of the day in a year

    +

    189 (the 189th day of the year)

    +

    d

    +

    Number of the day in a month

    +

    10 (the tenth day of the month)

    +

    u

    +

    Number of the day in a week

    +

    1 (Monday), ..., 7 (Sunday)

    +

    a

    +

    am/pm flag

    +

    pm (12:00-24:00)

    +

    H

    +

    Hour time (0-23)

    +

    2

    +

    h

    +

    Hour time (1-12)

    +

    12

    +

    m

    +

    Number of minutes

    +

    30

    +

    s

    +

    Number of seconds

    +

    55

    +

    S

    +

    Number of milliseconds

    +

    978

    +

    z

    +

    Time zone

    +

    Pacific Standard Time; PST; GMT-08:00

    +
    +
    +
  • TIMESTAMPFORMAT: Specifies the timestamp format of a column.

    OPTIONS('TIMESTAMPFORMAT'='timestampFormat')

    +
    +
    +
+
+
  • Mode: Specifies the processing mode of error records while importing. The options are as follows: PERMISSIVE, DROPMALFORMED, and FAILFAST.

    OPTIONS('MODE'='permissive')

    +
    • PERMISSIVE (default): Parse bad records as much as possible. If a field cannot be converted, the entire row is null.
    • DROPMALFORMED: Ignore the bad records that cannot be parsed.
    • FAILFAST: If a record cannot be parsed, an exception is thrown and the job fails.
    +
    +
  • BADRECORDSPATH: Specifies the directory for storing error records during the import.

    OPTIONS('BADRECORDSPATH'='obs://bucket/path')

    +

    It is recommended that this option be used together with the DROPMALFORMED pattern to import the records that can be successfully converted into the target table and store the records that fail to be converted to the specified error record storage directory.

    +
    +
+

Precautions

  • When importing or creating an OBS table, you must specify a folder as the directory. If a file is specified, data import may be failed.
  • Only the raw data stored in the OBS path can be imported.
  • You are advised not to concurrently import data in to a table. If you concurrently import data into a table, there is a possibility that conflicts occur, leading to failed data import.
  • Only one path can be specified during data import. The path cannot contain commas (,).
  • If a folder and a file with the same name exist in the OBS bucket directory, the data is preferentially to be imported directed to the file rather than the folder.
  • When importing data of the PARQUET, ORC, or JSON format, you must specify DATA_TYPE. Otherwise, the data is parsed into the default format CSV. In this case, the format of the imported data is incorrect.
  • If the data to be imported is in the CSV or JSON format and contains the date and columns, you need to specify DATEFORMAT and TIMESTAMPFORMAT. Otherwise, the data will be parsed into the default date and timestamp formats.
+
+

Example

Before importing data, you must create a table. For details, see Creating an OBS Table or Creating a DLI Table.

+
+
  • To import a CSV file to a DLI table named t, run the following statement:
    1
    +2
    LOAD DATA INPATH 'obs://dli/data.csv' INTO TABLE t
    +  OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','COMMENTCHAR'='#','HEADER'='false');
    +
    + +
    +
  • To import a JSON file to a DLI table named jsontb, run the following statement:
    1
    +2
    LOAD DATA INPATH 'obs://dli/alltype.json' into table jsontb
    +  OPTIONS('DATA_TYPE'='json','DATEFORMAT'='yyyy/MM/dd','TIMESTAMPFORMAT'='yyyy/MM/dd HH:mm:ss');
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0101.html b/docs/dli/sqlreference/dli_08_0101.html new file mode 100644 index 00000000..8fabf3ce --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0101.html @@ -0,0 +1,69 @@ + + +

Other Functions

+

Array Functions

+
+ + + + + + + + + + + + + +
Table 1 Array functions

Function

+

Return Data Type

+

Description

+

CARDINALITY(ARRAY)

+

INT

+

Return the element count of an array.

+

ELEMENT(ARRAY)

+

-

+

Return the sole element of an array with a single element. If the array contains no elements, null is returned. If the array contains multiple elements, an exception is reported.

+
+
+

Example:

+

The returned number of elements in the array is 3.

+
insert into temp select CARDINALITY(ARRAY[TRUE, TRUE, FALSE]) from source_stream;
+

HELLO WORLD is returned.

+
insert into temp select ELEMENT(ARRAY['HELLO WORLD']) from source_stream;
+
+

Attribute Access Functions

+
+ + + + + + + + + + + + + +
Table 2 Attribute access functions

Function

+

Return Data Type

+

Description

+

tableName.compositeType.field

+

-

+

Select a single field, use the name to access the field of Apache Flink composite types, such as Tuple and POJO, and return the value.

+

tableName.compositeType.*

+

-

+

Select all fields, and convert Apache Flink composite types, such as Tuple and POJO, and all their direct subtypes into a simple table. Each subtype is a separate field.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0102.html b/docs/dli/sqlreference/dli_08_0102.html new file mode 100644 index 00000000..d3cd84bb --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0102.html @@ -0,0 +1,194 @@ + + +

SELECT

+

SELECT

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]  { * | projectItem [, projectItem ]* }  
+  FROM tableExpression  
+  [ WHERE booleanExpression ]  
+  [ GROUP BY { groupItem [, groupItem ]* } ]  
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

The SELECT statement is used to select data from a table or insert constant data into a table.

+

Precautions

+
  • The table to be queried must exist. Otherwise, an error is reported.
  • WHERE is used to specify the filtering condition, which can be the arithmetic operator, relational operator, or logical operator.
  • GROUP BY is used to specify the grouping field, which can be one or more multiple fields.
+

Example

+

Select the order which contains more than 3 pieces of data.

+
1
insert into temp SELECT  * FROM Orders WHERE units > 3; 
+
+ +
+

Insert a group of constant data.

+
1
insert into temp select 'Lily', 'male', 'student', 17;
+
+ +
+
+

WHERE Filtering Clause

Syntax

+
1
+2
+3
SELECT   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+
+ +
+

Description

+

This statement is used to filter the query results using the WHERE clause.

+

Precautions

+
  • The to-be-queried table must exist.
  • WHERE filters the records that do not meet the requirements.
+

Example

+

Filter orders which contain more than 3 pieces and fewer than 10 pieces of data.

+
1
+2
insert into temp SELECT  * FROM Orders
+  WHERE units > 3 and units < 10; 
+
+ +
+
+

HAVING Filtering Clause

Function

+

This statement is used to filter the query results using the HAVING clause.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering. The arithmetic operation and aggregate function are supported by the HAVING clause.

+

Precautions

+

If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering.

+

Example

+

Group the student table according to the name field and filter the records in which the maximum score is higher than 95 based on groups.

+
1
+2
+3
insert into temp SELECT name, max(score) FROM student
+  GROUP BY name
+  HAVING max(score) >95
+
+ +
+
+

Column-Based GROUP BY

Function

+

This statement is used to group a table based on columns.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.

+
  • Single-column GROUP BY indicates that the GROUP BY clause contains only one column.
  • Multi-column GROUP BY indicates that the GROUP BY clause contains multiple columns. The table will be grouped according to all fields in the GROUP BY clause. The records whose fields are the same are grouped into one group.
+

Precautions

+

None

+

Example

+

Group the student table according to the score and name fields and return the grouping results.

+
1
+2
insert into temp SELECT name,score, max(score) FROM student 
+  GROUP BY name,score;
+
+ +
+
+

Expression-Based GROUP BY

Function

+

This statement is used to group a table according to expressions.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

groupItem can have one or more fields. The fields can be called by string functions, but cannot be called by aggregate functions.

+

Precautions

+

None

+

Example

+

Use the substring function to obtain the character string from the name field, group the student table according to the obtained character string, and return each sub character string and the number of records.

+
1
+2
insert into temp SELECT substring(name,6),count(name) FROM student
+  GROUP BY substring(name,6);
+
+ +
+
+

GROUP BY Using HAVING

Function

+

This statement filters a table after grouping it using the HAVING clause.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering.

+

Precautions

+
  • If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering. HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering.
  • Fields used in HAVING, except for those used for aggregate functions, must exist in GROUP BY.
  • The arithmetic operation and aggregate function are supported by the HAVING clause.
+

Example

+

Group the transactions according to num, use the HAVING clause to filter the records in which the maximum value derived from multiplying price with amount is higher than 5000, and return the filtered results.

+
1
+2
+3
+4
insert into temp SELECT num, max(price*amount) FROM transactions
+  WHERE time > '2016-06-01'
+  GROUP BY num
+  HAVING max(price*amount)>5000;
+
+ +
+
+

UNION

Syntax

+
1
query UNION [ ALL ] query
+
+ +
+

Description

+

This statement is used to return the union set of multiple query results.

+

Precautions

+
  • Set operation is to join tables from head to tail under certain conditions. The quantity of columns returned by each SELECT statement must be the same. Column types must be the same. Column names can be different.
  • By default, the repeated records returned by UNION are removed. The repeated records returned by UNION ALL are not removed.
+

Example

+

Output the union set of Orders1 and Orders2 without duplicate records.

+
1
+2
insert into temp SELECT  * FROM Orders1
+  UNION SELECT  * FROM Orders2;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0103.html b/docs/dli/sqlreference/dli_08_0103.html new file mode 100644 index 00000000..fcc0340a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0103.html @@ -0,0 +1,77 @@ + + +

Condition Expression

+

CASE Expression

Syntax

+
1
+2
+3
CASE value WHEN value1 [, value11 ]* THEN result1
+  [ WHEN valueN [, valueN1 ]* THEN resultN ]* [ ELSE resultZ ]
+  END
+
+ +
+

or

+
1
+2
+3
CASE WHEN condition1 THEN result1
+  [ WHEN conditionN THEN resultN ]* [ ELSE resultZ ]
+  END
+
+ +
+

Description

+
  • If the value of value is value1, result1 is returned. If the value is not any of the values listed in the clause, resultZ is returned. If no else statement is specified, null is returned.
  • If the value of condition1 is true, result1 is returned. If the value does not match any condition listed in the clause, resultZ is returned. If no else statement is specified, null is returned.
+

Precautions

+
  • All results must be of the same type.
  • All conditions must be of the Boolean type.
  • If the value does not match any condition, the value of ELSE is returned when the else statement is specified, and null is returned when no else statement is specified.
+

Example

+

If the value of units equals 5, 1 is returned. Otherwise, 0 is returned.

+

Example 1:

+
1
insert into temp SELECT  CASE units WHEN 5 THEN 1 ELSE 0 END FROM Orders;
+
+ +
+

Example 2:

+
1
insert into temp SELECT CASE WHEN units = 5 THEN 1 ELSE 0 END FROM Orders;
+
+ +
+
+

NULLIF Expression

Syntax

+
1
NULLIF(value, value)
+
+ +
+

Description

+

If the values are the same, NULL is returned. For example, NULL is returned from NULLIF (5,5) and 5 is returned from NULLIF (5,0).

+

Precautions

+

None

+

Example

+

If the value of units equals 3, null is returned. Otherwise, the value of units is returned.

+
1
insert into temp SELECT  NULLIF(units, 3) FROM Orders;
+
+ +
+
+

COALESCE Expression

Syntax

+
1
COALESCE(value, value [, value ]* )
+
+ +
+

Description

+

Return the first value that is not NULL, counting from left to right.

+

Precautions

+

All values must be of the same type.

+

Example

+

5 is returned from the following example:

+
1
insert into temp SELECT  COALESCE(NULL, 5) FROM Orders;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0104.html b/docs/dli/sqlreference/dli_08_0104.html new file mode 100644 index 00000000..c32b9003 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0104.html @@ -0,0 +1,433 @@ + + +

Aggregate Functions

+

An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved by an SQL statement. Table 1 lists aggregate functions.

+
Sample data: Table T1
|score|
+|81   |
+|100  |
+|60   |
+|95   |
+|86   |
+
+

Common Aggregate Functions

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Common aggregation functions

Function

+

Return Data Type

+

Description

+

COUNT(*)

+

BIGINT

+

Return count of tuples.

+

COUNT([ ALL ] expression...

+

BIGINT

+

Returns the number of input rows for which the expression is not NULL. Use DISTINCT for a unique instance of each value.

+

AVG(numeric)

+

DOUBLE

+

Return average (arithmetic mean) of all input values.

+

SUM(numeric)

+

DOUBLE

+

Return the sum of all input numerical values.

+

MAX(value)

+

DOUBLE

+

Return the maximum value of all input values.

+

MIN(value)

+

DOUBLE

+

Return the minimum value of all input values.

+

STDDEV_POP(value)

+

DOUBLE

+

Return the population standard deviation of all numeric fields of all input values.

+

STDDEV_SAMP(value)

+

DOUBLE

+

Return the sample standard deviation of all numeric fields of all input values.

+

VAR_POP(value)

+

DOUBLE

+

Return the population variance (square of population standard deviation) of numeral fields of all input values.

+

VAR_SAMP(value)

+

DOUBLE

+

Return the sample variance (square of the sample standard deviation) of numeric fields of all input values.

+
+
+
+

Example

  • COUNT(*)
    • Test statement
      SELECT COUNT(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 2 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      5

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • COUNT([ ALL ] expression | DISTINCT expression1 [, expression2]*)
    • Test statement
      SELECT COUNT(DISTINCT content ) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 3 T1

      content (STRING)

      +

      Test Result

      +

      "hello1 "

      +

      2

      +

      +

      +

      +

      +

      "hello2 "

      +

      "hello2"

      +

      null

      +

      86

      +
      +
      +
    +
  • AVG(numeric)
    • Test statement
      SELECT AVG(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 4 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      84.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • SUM(numeric)
    • Test statement
      SELECT SUM(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 5 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      422.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • MAX(value)
    • Test statement
      SELECT MAX(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 6 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      100.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • MIN(value)
    • Test statement
      SELECT MIN(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 7 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      60.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • STDDEV_POP(value)
    • Test statement
      SELECT STDDEV_POP(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 8 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      13.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • STDDEV_SAMP(value)
    • Test statement
      SELECT STDDEV_SAMP(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 9 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      15.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • VAR_POP(value)
    • Test statement
      SELECT VAR_POP(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 10 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      193.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
  • VAR_SAMP(value)
    • Test statement
      SELECT VAR_SAMP(score) FROM T1;
      +
    • Test data and results +
      + + + + + + + + + + + + + + + +
      Table 11 T1

      Test Data (score)

      +

      Test Result

      +

      81

      +

      241.0

      +

      +

      +

      +

      +

      100

      +

      60

      +

      95

      +

      86

      +
      +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0105.html b/docs/dli/sqlreference/dli_08_0105.html new file mode 100644 index 00000000..f377aa39 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0105.html @@ -0,0 +1,48 @@ + + +

Viewing Table Statistics

+

Function

This statement is used to view the table statistics. The names and data types of all columns in a specified table will be returned.

+
+

Syntax

1
DESCRIBE [EXTENDED|FORMATTED] [db_name.]table_name;
+
+ +
+
+

Keyword

  • EXTENDED: displays all metadata of the specified table. It is used during debugging in general.
  • FORMATTED: displays all metadata of the specified table in a form.
+
+

Parameters

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+
+
+
+

Precautions

The to-be-queried table must exist. If this statement is used to query the information about a table that does not exist, an error is reported.

+
+

Example

To query the names and data types of all columns in the student table, run the following statement:

+
1
DESCRIBE student;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0106.html b/docs/dli/sqlreference/dli_08_0106.html new file mode 100644 index 00000000..6adce888 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0106.html @@ -0,0 +1,146 @@ + + +

JOIN Between Stream Data and Table Data

+

The JOIN operation allows you to query data from a table and write the query result to the sink stream. Currently, only RDSs and DCS Redis tables are supported. The ON keyword describes the Key used for data query and then writes the Value field to the sink stream.

+

For details about the data definition statements of RDS tables, see Creating an RDS Table.

+

For details about the data definition statements of Redis tables, see Creating a Redis Table.

+

Syntax

1
+2
FROM tableExpression JOIN tableExpression
+  ON value11 = value21 [ AND value12 = value22]
+
+ +
+
+

Syntax Description

The ON keyword only supports equivalent query of table attributes. If level-2 keys exist (specifically, the Redis value type is HASH), the AND keyword needs to be used to express the equivalent query between Key and Hash Key.

+
+

Precautions

None

+
+

Example

Perform equivalent JOIN between the vehicle information source stream and the vehicle price table, get the vehicle price data, and write the price data into the vehicle information sink stream.

+
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
CREATE SOURCE STREAM car_infos (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_detail_type STRING
+)
+WITH (
+  type = "dis",
+  region = "",
+  channel = "dliinput",
+  partition_count = "1",
+  encode = "csv",
+  field_delimiter = ","
+);
+
+/** Create a data dimension table to connect to the source stream to fulfill field backfill.
+  *
+  * Reconfigure the following options according to actual conditions:
+  * value_type: indicates the value type of the Redis key value. The value can be STRING, HASH, SET, ZSET, or LIST. For the HASH type, you need to specify hash_key_column as the layer-2 primary key. For the SET type, you need to concatenate all queried values using commas (,).
+  * key_column: indicates the column name corresponding to the primary key of the dimension table.
+  * hash_key_column: indicates the column name corresponding to the KEY of the HASHMAP when value_type is HASH. If value_type is not HASH, you do not need to set this option.
+  * cluster_address: indicates the DCS Redis cluster address.
+  * password: indicates the DCS Redis cluster password.
+  **/
+CREATE TABLE car_price_table (
+  car_brand STRING,
+  car_detail_type STRING,
+  car_price STRING
+)
+WITH (
+  type = "dcs_redis",
+  value_type = "hash",
+  key_column = "car_brand",
+  hash_key_column = "car_detail_type",
+  cluster_address = "192.168.1.238:6379",
+  password = "xxxxxxxx"
+);
+
+CREATE SINK STREAM audi_car_owner_info (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_detail_type STRING,
+  car_price STRING
+)
+WITH (
+  type = "dis",
+  region = "",
+  channel = "dlioutput",
+  partition_key = "car_owner",
+  encode = "csv",
+  field_delimiter = ","
+);
+
+INSERT INTO audi_car_owner_info
+SELECT t1.car_id, t1.car_owner, t2.car_brand, t1.car_detail_type, t2.car_price
+FROM car_infos as t1 join car_price_table as t2
+ON t2.car_brand = t1.car_brand and t2.car_detail_type = t1.car_detail_type
+WHERE t1.car_brand = "audi";
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0107.html b/docs/dli/sqlreference/dli_08_0107.html new file mode 100644 index 00000000..b1a75c83 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0107.html @@ -0,0 +1,186 @@ + + +

Configuring Time Models

+

Flink provides two time models: processing time and event time.

+

DLI allows you to specify the time model during creation of the source stream and temporary stream.

+

Configuring Processing Time

Processing time refers to the system time, which is irrelevant to the data timestamp.

+

Syntax

+
1
+2
+3
+4
CREATE SOURCE STREAM stream_name(...) WITH (...)
+TIMESTAMP BY proctime.proctime;
+CREATE TEMP STREAM stream_name(...)
+TIMESTAMP BY proctime.proctime;
+
+ +
+

Description

+

To set the processing time, you only need to add proctime.proctime following TIMESTAMP BY. You can directly use the proctime field later.

+

Precautions

+

None

+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE SOURCE STREAM student_scores (
+  student_number STRING, /* Student ID */
+  student_name STRING, /* Name */
+  subject STRING, /* Subject */
+  score INT /* Score */
+)
+WITH (
+  type = "dis",
+  region = "",
+  channel = "dliinput",
+  partition_count = "1",
+  encode = "csv",
+  field_delimiter=","
+)TIMESTAMP BY proctime.proctime;
+
+INSERT INTO score_greate_90
+SELECT student_name, sum(score) over (order by proctime RANGE UNBOUNDED PRECEDING) 
+FROM student_scores;
+
+ +
+
+

Configuring Event Time

Event Time refers to the time when an event is generated, that is, the timestamp generated during data generation.

+

Syntax

+
1
+2
+3
CREATE SOURCE STREAM stream_name(...) WITH (...)
+TIMESTAMP BY {attr_name}.rowtime
+SET WATERMARK (RANGE {time_interval} | ROWS {literal}, {time_interval});
+
+ +
+

Description

+

To set the event time, you need to select a certain attribute in the stream as the timestamp and set the watermark policy.

+

Out-of-order events or late events may occur due to network faults. The watermark must be configured to trigger the window for calculation after waiting for a certain period of time. Watermarks are mainly used to process out-of-order data before generated events are sent to DLI during stream processing.

+

The following two watermark policies are available:

+
  • By time interval
    1
    SET WATERMARK(range interval {time_unit}, interval {time_unit})
    +
    + +
    +
  • By event quantity
    1
    SET WATERMARK(rows literal, interval {time_unit})
    +
    + +
    +
+

Parameters are separated by commas (,). The first parameter indicates the watermark sending interval and the second indicates the maximum event delay.

+
+

Precautions

+

None

+

Example

+
  • Send a watermark every 10s the time2 event is generated. The maximum event latency is 20s.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    CREATE SOURCE STREAM student_scores (
    +  student_number STRING, /* Student ID */
    +  student_name STRING, /* Name */
    +  subject STRING, /* Subject */
    +  score INT, /* Score */
    +  time2 TIMESTAMP
    +)
    +WITH (
    +  type = "dis",
    +  region = "",
    +  channel = "dliinput",
    +  partition_count = "1",
    +  encode = "csv",
    +  field_delimiter=","
    +)
    +TIMESTAMP BY time2.rowtime
    +SET WATERMARK (RANGE interval 10 second, interval 20 second);
    +
    +INSERT INTO score_greate_90
    +SELECT student_name, sum(score) over (order by time2 RANGE UNBOUNDED PRECEDING) 
    +FROM student_scores;
    +
    + +
    +
  • Send the watermark every time when 10 pieces of data are received, and the maximum event latency is 20s.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    CREATE SOURCE STREAM student_scores (
    +  student_number STRING, /* Student ID */
    +  student_name STRING, /* Name */
    +  subject STRING, /* Subject */
    +  score INT, /* Score */
    +  time2 TIMESTAMP
    +)
    +WITH (
    +  type = "dis",
    +  region = "",
    +  channel = "dliinput",
    +  partition_count = "1",
    +  encode = "csv",
    +  field_delimiter=","
    +)
    +TIMESTAMP BY time2.rowtime
    +SET WATERMARK (ROWS 10, interval 20 second);
    +
    +INSERT INTO score_greate_90
    +SELECT student_name, sum(score) over (order by time2 RANGE UNBOUNDED PRECEDING) 
    +FROM student_scores;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0108.html b/docs/dli/sqlreference/dli_08_0108.html new file mode 100644 index 00000000..2e621783 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0108.html @@ -0,0 +1,223 @@ + + +

Pattern Matching

+

Complex event processing (CEP) is used to detect complex patterns in endless data streams so as to identify and search patterns in various data rows. Pattern matching is a powerful aid to complex event handling.

+

CEP is used in a collection of event-driven business processes, such as abnormal behavior detection in secure applications and the pattern of searching for prices, transaction volume, and other behavior in financial applications. It also applies to fraud detection and sensor data analysis.

+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
MATCH_RECOGNIZE (
+      [ PARTITION BY expression [, expression ]* ]
+      [ ORDER BY orderItem [, orderItem ]* ]
+      [ MEASURES measureColumn [, measureColumn ]* ]
+      [ ONE ROW PER MATCH | ALL ROWS PER MATCH ]
+      [ AFTER MATCH
+            ( SKIP TO NEXT ROW
+            | SKIP PAST LAST ROW
+            | SKIP TO FIRST variable
+            | SKIP TO LAST variable
+            | SKIP TO variable )
+      ]
+      PATTERN ( pattern )
+      [ WITHIN intervalLiteral ]
+      DEFINE variable AS condition [, variable AS condition ]*
+) MR
+
+ +
+

Pattern matching in SQL is performed using the MATCH_RECOGNIZE clause. MATCH_RECOGNIZE enables you to do the following tasks:

+
  • Logically partition and order the data that is used in the MATCH_RECOGNIZE clause with its PARTITION BY and ORDER BY clauses.
  • Define patterns of rows to seek using the PATTERN clause of the MATCH_RECOGNIZE clause. These patterns use regular expression syntax.
  • Specify the logical conditions required to map a row to a row pattern variable in the DEFINE clause.
  • Define measures, which are expressions usable in other parts of the SQL query, in the MEASURES clause.
+
+
+

Syntax description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Syntax description

Parameter

+

Mandatory

+

Description

+

PARTITION BY

+

No

+

Logically divides the rows into groups.

+

ORDER BY

+

No

+

Logically orders the rows in a partition.

+

[ONE ROW | ALL ROWS] PER MATCH

+

No

+

Chooses summaries or details for each match.

+
  • ONE ROW PER MATCH: Each match produces one summary row.
  • ALL ROWS PER MATCH: A match spanning multiple rows will produce one output row for each row in the match.
+

The following provides an example:

+
		SELECT * FROM MyTable MATCH_RECOGNIZE
+		(
+		  MEASURES AVG(B.id) as Bid
+		  ALL ROWS PER MATCH
+		  PATTERN (A B C)
+		  DEFINE
+			A AS A.name = 'a',
+			B AS B.name = 'b',
+			C as C.name = 'c'
+		) MR
+

Example description

+

Assume that the format of MyTable is (id, name) and there are three data records: (1, a), (2, b), and (3, c).

+

ONE ROW PER MATCH outputs the average value 2 of B.

+

ALL ROWS PER MATCH outputs each record and the average value of B, specifically, (1,a, null), (2,b,2), (3,c,2).

+

MEASURES

+

No

+

Defines calculations for export from the pattern matching.

+

PATTERN

+

Yes

+

Defines the row pattern that will be matched.

+
  • PATTERN (A B C) indicates to detect concatenated events A, B, and C.
  • PATTERN (A | B) indicates to detect A or B.
+
  • Modifiers
    • *: 0 or more iterations. For example, A* indicates to match A for 0 or more times.
    • +: 1 or more iterations. For example, A+ indicates to match A for 1 or more times.
    • ? : 0 or 1 iteration. For example, A? indicates to match A for 0 times or once.
    • {n}: n iterations (n > 0). For example, A{5} indicates to match A for five times.
    • {n,}: n or more iterations (n ≥ 0). For example, A{5,} indicates to match A for five or more times.
    • {n, m}: between n and m (inclusive) iterations (0 ≤ nm, 0 < m). For example, A{3,6} indicates to match A for 3 to 6 times.
    • {, m}: between 0 and m (inclusive) iterations (m > 0). For example, A{,4} indicates to match A for 0 to 4 times.
    +
+

DEFINE

+

Yes

+

Defines primary pattern variables.

+

AFTER MATCH SKIP

+

No

+

Defines where to restart the matching process after a match is found.

+
  • SKIP TO NEXT ROW: Resumes pattern matching at the row after the first row of the current match.
  • SKIP PAST LAST ROW: Resumes pattern matching at the next row after the last row of the current match.
  • SKIP TO FIRST variable: Resumes pattern matching at the first row that is mapped to the pattern variable.
  • SKIP TO LAST variable: Resumes pattern matching at the last row that is mapped to the pattern variable.
  • SKIP TO variable: Same as SKIP TO LAST variable.
+
+
+
+

Functions Supported by CEP

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 2 Function description

Function

+

Description

+

MATCH_NUMBER()

+

Finds which rows are in which match. It can be used in the MEASURES and DEFINE clauses.

+

CLASSIFIER()

+

Finds which pattern variable applies to which rows. It can be used in the MEASURES and DEFINE clauses.

+

FIRST()/LAST()

+

FIRST returns the value of an expression evaluated in the first row of the group of rows mapped to a pattern variable. LAST returns the value of an expression evaluated in the last row of the group of rows mapped to a pattern variable. In PATTERN (A B+ C), FIRST (B.id) indicates the ID of the first B in the match, and LAST (B.id) indicates the ID of the last B in the match.

+

NEXT()/PREV()

+

Relative offset, which can be used in DEFINE. For example, PATTERN (A B+) DEFINE B AS B.price > PREV(B.price)

+

RUNNING/FINAL

+

RUNNING indicates to match the middle value, while FINAL indicates to match the final result value. Generally, RUNNING/FINAL is valid only in ALL ROWS PER MATCH. For example, if there are three records (a, 2), (b, 6), and (c, 12), then the values of RUNNING AVG (A.price) and FINAL AVG (A.price) are (2,6), (4,6), (6,6).

+

Aggregate functions (COUNT, SUM, AVG, MAX, MIN)

+

Aggregation operations. These functions can be used in the MEASURES and DEFINE clauses. For details, see Aggregate Functions.

+
+
+
+

Example

  • Fake plate vehicle detection
+

CEP conducts pattern matching based on license plate switchover features on the data of vehicles collected by cameras installed on urban roads or high-speed roads in different areas within 5 minutes.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
INSERT INTO fake_licensed_car
+SELECT * FROM camera_license_data MATCH_RECOGNIZE
+(
+  PARTITION BY car_license_number
+  ORDER BY proctime
+  MEASURES A.car_license_number as car_license_number, A.camera_zone_number as first_zone, B.camera_zone_number as second_zone
+  ONE ROW PER MATCH
+  AFTER MATCH SKIP TO LAST C
+  PATTERN (A B+ C)
+  WITHIN interval '5' minute
+  DEFINE
+    B AS B.camera_zone_number <> A.camera_zone_number,
+	C AS C.camera_zone_number = A.camera_zone_number
+) MR;
+
+ +
+

+

According to this rule, if a vehicle of a license plate number drives from area A to area B but another vehicle of the same license plate number is detected in area A within 5 minutes, then the vehicle in area A is considered to carry a fake license plate.

+

Input data:

+
Zhejiang B88888, zone_A
+Zhejiang AZ626M, zone_A
+Zhejiang B88888, zone_A
+Zhejiang AZ626M, zone_A
+Zhejiang AZ626M, zone_A
+Zhejiang B88888, zone_B
+Zhejiang B88888, zone_B
+Zhejiang AZ626M, zone_B
+Zhejiang AZ626M, zone_B
+Zhejiang AZ626M, zone_C
+Zhejiang B88888, zone_A
+Zhejiang B88888, zone_A
+

The output is as follows:

+
Zhejiang B88888, zone_A, zone_B
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0109.html b/docs/dli/sqlreference/dli_08_0109.html new file mode 100644 index 00000000..3e52dc99 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0109.html @@ -0,0 +1,21 @@ + + +

StreamingML

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0110.html b/docs/dli/sqlreference/dli_08_0110.html new file mode 100644 index 00000000..285dc8dd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0110.html @@ -0,0 +1,115 @@ + + +

Anomaly Detection

+

Anomaly detection applies to various scenarios, including intrusion detection, financial fraud detection, sensor data monitoring, medical diagnosis, natural data detection, and more. The typical algorithms for anomaly detection include the statistical modeling method, distance-based calculation method, linear model, and nonlinear model.

+

DLI uses an anomaly detection method based on the random forest, which has the following characteristics:

+
  • The one-pass algorithm is used with O(1) amortized time complexity and O(1) space complexity.
  • The random forest structure is constructed only once. The model update operation only updates the node data distribution values.
  • The node stores data distribution information of multiple windows, and the algorithm can detect data distribution changes.
  • Anomaly detection and model updates are completed in the same code framework.
+

Syntax

1
SRF_UNSUP(ARRAY[Field 1, Field 2, ...], 'Optional parameter list')
+
+ +
+
  • The anomaly score returned by the function is a DOUBLE value in the range of [0, 1].
  • The field names must be of the same type. If the field types are different, you can use the CAST function to escape the field names, for example, [a, CAST(b as DOUBLE)].
  • The syntax of the optional parameter list is as follows: "key1=value,key2=value2,..."
+
+
+

Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter Description

Parameter

+

Mandatory

+

Description

+

Default Value

+

transientThreshold

+

No

+

Threshold for which the histogram change is indicating a change in the data.

+

5

+

numTrees

+

No

+

Number of trees composing the random forest.

+

15

+

maxLeafCount

+

No

+

Maximum number of leaf nodes one tree can have.

+

15

+

maxTreeHeight

+

No

+

Maximum height of the tree.

+

12

+

seed

+

No

+

Random seed value used by the algorithm.

+

4010

+

numClusters

+

No

+

Number of types of data to be detected. By default, the following two data types are available: anomalous and normal data.

+

2

+

dataViewMode

+

No

+

Algorithm learning mode.

+
  • Value history indicates that all historical data is considered.
  • Value horizon indicates that only historical data of a recent time period (typically a size of 4 windows) is considered.
+

history

+
+
+
+

Example

Anomaly detection is conducted on the c field in data stream MyTable. If the anomaly score is greater than 0.8, then the detection result is considered to be anomaly.

+
1
+2
+3
+4
+5
+6
SELECT c,
+	CASE WHEN SRF_UNSUP(ARRAY[c], "numTrees=15,seed=4010") OVER (ORDER BY proctime RANGE BETWEEN INTERVAL '99' SECOND PRECEDING AND CURRENT ROW) > 0.8
+		 THEN 'anomaly' 
+		 ELSE 'not anomaly' 
+	END
+FROM MyTable  
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0111.html b/docs/dli/sqlreference/dli_08_0111.html new file mode 100644 index 00000000..10ab8ed1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0111.html @@ -0,0 +1,138 @@ + + +

Time Series Forecasting

+

Modeling and forecasting time series is a common task in many business verticals. Modeling is used to extract meaningful statistics and other characteristics of the data. Forecasting is the use of a model to predict future data. DLI provides a series of stochastic linear models to help users conduct online modeling and forecasting in real time.

+

ARIMA (Non-Seasonal)

Auto-Regressive Integrated Moving Average (ARIMA) is a classical model used for time series forecasting and is closely correlated with the AR, MA, and ARMA models.

+
+
  • The AR, MA, and ARMA models are applicable to stationary sequences.
    • AR(p) is an autoregressive model. An AR(p) is a linear combination of p consecutive values from immediate past. The model can predict the next value by using the weight of linear combination.
    • MA(q) is a moving average model. An MA(q) is a linear combination of q white noise values from the past plus the average value. The model can also predict the next value by using the weight of linear combination.
    • ARMA(p, q) is an autoregressive moving average model, which integrates the advantages of both AR and MA models. In the ARMA model, the autoregressive process is responsible for quantizing the relationship between the current data and the previous data, and the moving average process is responsible for solving problems of random variables. Therefore, the ARMA model is more effective than AR/MA.
    +
  • ARIMA is suitable for non-stationary series. In ARIMA(p, q, d), p indicates the autoregressive order, q indicates the moving average order, and d indicates the difference order.
+

Syntax

+
1
+2
+3
+4
+5
AR_PRED(field, degree): Use the AR model to forecast new data.
+AR_COEF(field, degree): Return the weight of the AR model.
+ARMA_PRED(field, degree): Use the ARMA model to forecast new data.
+ARMA_COEF(field, degree): Return the weight of the ARMA model.
+ARIMA_PRED(field, degree, derivativeOrder): Use ARIMA to forecast new data.
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter Description

Parameter

+

Mandatory

+

Description

+

Default Value

+

field

+

Yes

+

Name of the field, data in which is used for prediction, in the data stream.

+

-

+

degree

+

No

+

Defines how many steps in the past are going to be considered for the next prediction. Currently, only "p = q = degree" is allowed.

+

5

+

derivativeOrder

+

No

+

Derivative order. Generally, this parameter is set to 1 or 2.

+

1

+
+
+

Example

+

Separately use AR, ARMA, and ARIMA to forecast the time series ordered by rowtime.

+
1
+2
+3
+4
+5
SELECT b, 
+	AR_PRED(b) OVER (ORDER BY rowtime ROWS  BETWEEN 5 PRECEDING AND CURRENT ROW) AS ar, 
+	ARMA_PRED(b) OVER (ORDER BY rowtime ROWS  BETWEEN 5 PRECEDING AND CURRENT ROW) AS arma,  
+	ARIMA_PRED(b) OVER (ORDER BY rowtime ROWS  BETWEEN 5 PRECEDING AND CURRENT ROW) AS arima 
+FROM MyTable
+
+ +
+

Holt Winters

The Holt-Winters algorithm is one of the Exponential smoothing methods used to forecast seasonal data in time series.

+
+

Syntax

+
1
HOLT_WINTERS(field, seasonality, forecastOrder)
+
+ +
+ +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter Description

Parameter

+

Mandatory

+

Description

+

field

+

Yes

+

Name of the field, data in which is used for prediction, in the data stream.

+

seasonality

+

Yes

+

Seasonality space used to perform the prediction. For example, if data samples are collected daily, and the season space to consider is a week, then seasonality is 7.

+

forecastOrder

+

No

+

Value to be forecast, specifically, the number of steps to be considered in the future for producing the forecast.

+

If forecastOrder is set to 1, the algorithm forecasts the next value.

+

If forecastOrder is set to 2, the algorithm forecasts the value of 2 steps ahead in the future. The default value is 1.

+

When using this parameter, ensure that the OVER window size is greater than the value of this parameter.

+
+
+

Example

+

Use Holt-Winters to forecast time series ordered by rowtime.

+
1
+2
+3
+4
SELECT b, 
+	HOLT_WINTERS(b, 5) OVER (ORDER BY rowtime ROWS  BETWEEN 5 PRECEDING AND CURRENT ROW) AS a1, 
+	HOLT_WINTERS(b, 5, 2) OVER (ORDER BY rowtime ROWS  BETWEEN 5 PRECEDING AND CURRENT ROW) AS a2
+FROM MyTable  
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0112.html b/docs/dli/sqlreference/dli_08_0112.html new file mode 100644 index 00000000..72217190 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0112.html @@ -0,0 +1,202 @@ + + +

Type Conversion Functions

+

Syntax

CAST(value AS type)
+
+

Syntax Description

This function is used to forcibly convert types.

+
+

Precautions

  • If the input is NULL, NULL is returned.
  • Flink jobs do not support the conversion of bigint to timestamp using CAST. You can convert it using to_timestamp or to_localtimestamp.
+
+

Example

Convert amount into a character string. The specified length of the string is invalid after the conversion.

+
insert into temp select cast(amount as VARCHAR(10)) from source_stream;
+
+

Common Type Conversion Functions

+
+ + + + + + + + + + + + + + + + +
Table 1 Common type conversion functions

Function

+

Description

+

cast(v1 as varchar)

+

Converts v1 to a string. The value of v1 can be of the numeric type or of the timestamp, date, or time type.

+

cast (v1 as int)

+

Converts v1 to the int type. The value of v1 can be a number or a character.

+

cast(v1 as timestamp)

+

Converts v1 to the timestamp type. The value of v1 can be of the string, date, or time type.

+

cast(v1 as date)

+

Converts v1 to the date type. The value of v1 can be of the string or timestamp type.

+
+
+
+
  • cast(v1 as varchar)
    • Test statement
      SELECT cast(content as varchar) FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 2 T1

      content (INT)

      +

      varchar

      +

      5

      +

      "5"

      +
      +
      +
    +
  • cast (v1 as int)
    • Test statement
      SELECT cast(content as int) FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 3 T1

      content (STRING)

      +

      int

      +

      "5"

      +

      5

      +
      +
      +
    +
  • cast(v1 as timestamp)
    • Test statement
      SELECT cast(content as timestamp) FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 4 T1

      content (STRING)

      +

      timestamp

      +

      "2018-01-01 00:00:01"

      +

      1514736001000

      +
      +
      +
    +
  • cast(v1 as date)
    • Test statement
      SELECT cast(content as date) FROM T1;
      +
    • Test data and result +
      + + + + + + + +
      Table 5 T1

      content (TIMESTAMP)

      +

      date

      +

      1514736001000

      +

      "2018-01-01"

      +
      +
      +
    +
+

Detailed Sample Code

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
/** source **/
+CREATE
+SOURCE STREAM car_infos (cast_int_to_varchar int, cast_String_to_int string,
+case_string_to_timestamp string, case_timestamp_to_date timestamp) WITH (
+  type = "dis",
+  region = "xxxxx",
+  channel = "dis-input",
+  partition_count = "1",
+  encode = "json",
+  offset = "13",
+  json_config =
+"cast_int_to_varchar=cast_int_to_varchar;cast_String_to_int=cast_String_to_int;case_string_to_timestamp=case_string_to_timestamp;case_timestamp_to_date=case_timestamp_to_date"
+ 
+);
+/** sink **/
+CREATE
+SINK STREAM cars_infos_out (cast_int_to_varchar varchar, cast_String_to_int
+int, case_string_to_timestamp timestamp, case_timestamp_to_date date) WITH (
+  type = "dis",
+  region = "xxxxx",
+  channel = "dis-output",
+  partition_count = "1",
+  encode = "json",
+  offset = "4",
+  json_config =
+"cast_int_to_varchar=cast_int_to_varchar;cast_String_to_int=cast_String_to_int;case_string_to_timestamp=case_string_to_timestamp;case_timestamp_to_date=case_timestamp_to_date",
+  enable_output_null="true"
+);
+/** Statistics on static car information**/
+INSERT
+INTO
+  cars_infos_out
+SELECT
+  cast(cast_int_to_varchar as varchar),
+  cast(cast_String_to_int as int),
+  cast(case_string_to_timestamp as timestamp),
+  cast(case_timestamp_to_date as date)
+FROM
+  car_infos;
+
+ +
+

Returned data

+
{"case_string_to_timestamp":1514736001000,"cast_int_to_varchar":"5","case_timestamp_to_date":"2018-01-01","cast_String_to_int":100}
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0118.html b/docs/dli/sqlreference/dli_08_0118.html new file mode 100644 index 00000000..d741ee1c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0118.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with an HBase Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0119.html b/docs/dli/sqlreference/dli_08_0119.html new file mode 100644 index 00000000..4b9903a1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0119.html @@ -0,0 +1,120 @@ + + +

Creating a DLI Table and Associating It with HBase

+

Function

This statement is used to create a DLI table and associate it with an existing HBase table.

+
+

Prerequisites

  • Before creating a DLI table and associating it with HBase, you need to create a datasource connection. For details about operations on the management console, see
  • Ensure that the /etc/hosts information of the master node in the MRS cluster is added to the host file of the DLI queue.

    For details about how to add an IP-domain mapping, see Enhanced Datasource Connection in the Data Lake Insight User Guide.

    +
  • The syntax is not supported for security clusters.
+
+

Syntax

  • Single row key
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE TABLE [IF NOT EXISTS] TABLE_NAME (
    +  ATTR1 TYPE,
    +  ATTR2 TYPE,
    +  ATTR3 TYPE)
    +  USING [CLOUDTABLE | HBASE] OPTIONS (
    +  'ZKHost'='xx',
    +  'TableName'='TABLE_IN_HBASE',
    +  'RowKey'='ATTR1',
    +  'Cols'='ATTR2:CF1.C1, ATTR3:CF1.C2');
    +
    + +
    +
  • Combined row key
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE TABLE [IF NOT EXISTS] TABLE_NAME (
    +  ATTR1 String,
    +  ATTR2 String,
    +  ATTR3 TYPE)
    +  USING [CLOUDTABLE | HBASE] OPTIONS (
    +  'ZKHost'='xx',
    +  'TableName'='TABLE_IN_HBASE',
    +  'RowKey'='ATTR1:2, ATTR2:10',
    +  'Cols'='ATTR2:CF1.C1, ATTR3:CF1.C2'
    +
    + +
    +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

USING [CLOUDTABLE | HBASE]

+

Specify the HBase datasource to CLOUDTABLE or HBASE. The value is case insensitive.

+

ZKHost

+

ZooKeeper IP address of the HBase cluster.

+

Before obtaining the ZooKeeper IP address, you need to create a datasource connection first..

+
  • Access the CloudTable cluster and enter the ZooKeeper IP address (internal network).
  • To access the MRS cluster, enter the IP address of the node where the ZooKeeper is located and the external port number of the ZooKeeper. The format is ZK_IP1:ZK_PORT1,ZK_IP2:ZK_PORT2.
+
NOTE:
+

TableName

+

Specifies the name of a table that has been created in the HBase cluster.

+

RowKey

+

Specifies the row key field of the table connected to DLI. The single and composite row keys are supported. A single row key can be of the numeric or string type. The length does not need to be specified. The composite row key supports only fixed-length data of the string type. The format is attribute name 1:Length, attribute name 2:length.

+

Cols

+

Provides mappings between fields in the DLI table and columns in the HBase table. The mappings are separated by commas (,). In a mapping, the field in the DLI table is located before the colon (:) and information about the HBase table follows the colon (:). In the HBase table information, the column family and column name are separated using a dot (.).

+
+
+
+

Precautions

  • If the to-be-created table exists, an error is reported. To avoid such error, add IF NOT EXISTS in this statement.
  • All parameters in OPTIONS are mandatory. Parameter names are case-insensitive, while parameter values are case-sensitive.
  • In OPTIONS, spaces are not allowed before or after the value in the quotation marks because spaces are also considered as a part of the value.
  • Descriptions of table names and column names support only string constants.
  • When creating a table, specify the column name and the corresponding data types. Currently, supported data types include Boolean, short, int, long, float, double, and string.
  • The value of row key (for example, ATTR1) cannot be null, and its length must be greater than 0 and less than or equal to 32767.
  • The total number of fields in Cols and row key must be the same as that in the DLI table. Specifically, all fields in the table are mapped to Cols and row key without sequence requirements specified.
  • The combined row key only supports data of the string type. If the combined row key is used, the length must follow each attribute name. If only one field is specified as the row key, the field type can be any supported data type and you do not need to specify the length.
  • If the combined row key is used:
    • When the row key is inserted, if the actual attribute length is shorter than the specified length when the attribute is used as the row key, add \0 after the attribute. If it is longer, the attribute will be truncated when it is inserted into HBase.
    • When reading the row key field in HBase, if the actual data length of an attribute is shorter than that specified when the attribute is used as the row key, an error message (OutofBoundException) is reported. If it is longer, the attribute will be truncated during data reading.
    +
+
+

Example

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE TABLE test_hbase(
+ATTR1 int,
+ATTR2 int,
+ATTR3 string)
+using hbase OPTIONS (
+'ZKHost'='to-hbase-1174405101-CE1bDm5B.datasource.com:2181',
+'TableName'='HBASE_TABLE',
+'RowKey'='ATTR1',
+'Cols'='ATTR2:CF1.C1, ATTR3:CF1.C2');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0120.html b/docs/dli/sqlreference/dli_08_0120.html new file mode 100644 index 00000000..9a57ab5d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0120.html @@ -0,0 +1,99 @@ + + +

Inserting Data to an HBase Table

+

Function

This statement is used to insert data in a DLI table to the associated HBase table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • In the column family created in Creating a Table and Associating It with HBase, if the column family specified by Cols in OPTIONS does not exist, an error is reported when INSERT INTO is executed.
  • If the row key, column family, or column you need to insert to the HBase table already exists, the existing data in HBase table will be overwritten.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
  • INSERT OVERWRITE is not supported.
+
+

Example

  • Query data in the user table and insert the data into the test table.
    1
    +2
    +3
    +4
    +5
    +6
    INSERT INTO test
    +  SELECT ATTR_EXPR
    +  FROM user
    +  WHERE user_name='cyz'
    +  LIMIT 3
    +  GROUP BY user_age
    +
    + +
    +
  • Insert data 1 into the test table.
    INSERT INTO test 
    +  VALUES (1);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0121.html b/docs/dli/sqlreference/dli_08_0121.html new file mode 100644 index 00000000..4019514a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0121.html @@ -0,0 +1,72 @@ + + +

Querying an HBase Table

+

This statement is used to query data in an HBase table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

Query data in the test_ct table.

+
1
SELECT * FROM test_hbase limit 100;
+
+ +
+
+

Query Pushdown

Query pushdown implements data filtering using HBase. Specifically, the HBase Client sends filtering conditions to the HBase server, and the HBase server returns only the required data, speeding up your Spark SQL queries. For the filter criteria that HBase does not support, for example, query with the composite row key, Spark SQL performs data filtering.

+
  • Scenarios where query pushdown is supported
    • Query pushdown can be performed on data of the following types:
      • Int
      • boolean
      • short
      • long
      • double
      • string
      +

      Data of the float type does not support query pushdown.

      +
      +
    • Query pushdown is not supported for the following filter criteria:
      • >, <, >=, <=, =, !=, and, or

        The following is an example:

        +
        1
        select * from tableName where (column1 >= value1 and column2<= value2) or column3 != value3
        +
        + +
        +
      • The filtering conditions are like and not like. The prefix, suffix, and inclusion match are supported.

        The following is an example:

        +
        1
        select * from tableName where column1 like "%value" or column2 like "value%" or column3 like "%value%"
        +
        + +
        +
      • IsNotNull()

        The following is an example:

        +
        1
        select * from tableName where IsNotNull(column)
        +
        + +
        +
      • in and not in

        The following is an example:

        +
        1
        select * from tableName where column1 in (value1,value2,value3)  and column2 not in (value4,value5,value6)
        +
        + +
        +
      • between _ and _

        The following is an example:

        +
        1
        select * from tableName where column1 between value1 and value2
        +
        + +
        +
      • Filtering of the row sub-keys in the composite row key

        For example, to perform row sub-key query on the composite row key column1+column2+column3, run the following statement:

        +
        1
        select * from tableName where column1= value1
        +
        + +
        +
      +
    +
  • Scenarios where query pushdown is not supported
    • Query pushdown can be performed on data of the following types:

      Except for the preceding data types where query pushdown is supported, data of other types does not support query pushdown.

      +
    • Query pushdown is not supported for the following filter criteria:
      • Length, count, max, min, join, groupby, orderby, limit, and avg
      • Column comparison

        The following is an example:

        +
        1
        select * from tableName where column1 > (column2+column3)
        +
        + +
        +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0122.html b/docs/dli/sqlreference/dli_08_0122.html new file mode 100644 index 00000000..fefe1a2f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0122.html @@ -0,0 +1,71 @@ + + +

Creating a DLI Table and Associating It with OpenTSDB

+

Function

Run the CREATE TABLE statement to create the DLI table and associate it with the existing metric in OpenTSDB. This syntax supports the OpenTSDB of CloudTable and MRS.

+
+

Prerequisites

Before creating a DLI table and associating it with OpenTSDB, you need to create a datasource connection. For details about operations on the management console, see

+
+

Syntax

1
+2
+3
+4
+5
CREATE TABLE [IF NOT EXISTS] UQUERY_OPENTSDB_TABLE_NAME
+  USING OPENTSDB OPTIONS (
+  'host' = 'xx;xx',
+  'metric' = 'METRIC_NAME',
+  'tags' = 'TAG1,TAG2');
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

host

+

OpenTSDB IP address.

+

Before obtaining the OpenTSDB IP address, you need to create a datasource connection first..

+
  • After successfully created a connection, you can access the CloudTable OpenTSDB by entering the IP address of the OpenTSDB.
  • You can also access the MRS OpenTSDB. If you have created an enhanced datasource connection, enter the IP address and port number of the node where the OpenTSDB is located. The format is IP:PORT. If the OpenTSDB has multiple nodes, enter one of the node IP addresses.
+

metric

+

Name of the metric in OpenTSDB corresponding to the DLI table to be created.

+

tags

+

Tags corresponding to the metric. The tags are used for classification, filtering, and quick retrieval. You can set 1 to 8 tags, which are separated by commas (,). The parameter value includes values of all tagKs in the corresponding metric.

+
+
+
+

Precautions

When creating a DLI table, you do not need to specify the timestamp and value fields. The system automatically builds the following fields based on the specified tags. The fields TAG1 and TAG2 are specified by tags.

+
  • TAG1 String
  • TAG2 String
  • timestamp Timestamp
  • value double
+
+

Example

1
+2
+3
+4
+5
CREATE table opentsdb_table
+  USING OPENTSDB OPTIONS (
+  'host' = 'opentsdb-3xcl8dir15m58z3.cloudtable.com:4242',
+  'metric' = 'city.temp',
+  'tags' = 'city,location');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0123.html b/docs/dli/sqlreference/dli_08_0123.html new file mode 100644 index 00000000..1649663c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0123.html @@ -0,0 +1,51 @@ + + +

Inserting Data to the OpenTSDB Table

+

Function

Run the INSERT INTO statement to insert the data in the DLI table to the associated OpenTSDB metric.

+

If no metric exists on the OpenTSDB, a new metric is automatically created on the OpenTSDB when data is inserted.

+
+
+

Syntax

1
INSERT INTO TABLE TABLE_NAME SELECT * FROM DLI_TABLE;
+
+ +
+
1
INSERT INTO TABLE TABLE_NAME VALUES(XXX);
+
+ +
+
+

Keyword

+
+ + + + + + + + + + +
Table 1 INSERT INTO parameter description

Parameter

+

Description

+

TABLE_NAME

+

Name of the associated OpenTSDB table.

+

DLI_TABLE

+

Name of the DLI table created.

+
+
+
+

Precautions

  • The inserted data cannot be null. If the inserted data is the same as the original data or only the value is different, the inserted data overwrites the original data.
  • INSERT OVERWRITE is not supported.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
  • The TIMESTAMP format supports only yyyy-MM-dd hh:mm:ss.
+
+

Example

1
INSERT INTO TABLE opentsdb_table VALUES('xxx','xxx','2018-05-03 00:00:00',21);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0124.html b/docs/dli/sqlreference/dli_08_0124.html new file mode 100644 index 00000000..fdd325d0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0124.html @@ -0,0 +1,28 @@ + + +

Querying an OpenTSDB Table

+

This SELECT command is used to query data in an OpenTSDB table.

+
  • If no metric exists in OpenTSDB, an error will be reported when the corresponding DLI table is queried.
  • If the security mode is enabled, you need to set conf:dli.sql.mrs.opentsdb.ssl.enabled to true when connecting to OpenTSDB.
+
+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

Query data in the opentsdb_table table.

+
1
SELECT * FROM opentsdb_table limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0125.html b/docs/dli/sqlreference/dli_08_0125.html new file mode 100644 index 00000000..c7223e8e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0125.html @@ -0,0 +1,63 @@ + + +

Reserved Keywords

+

Flink SQL reserves some strings as keywords. If you want to use the following character strings as field names, ensure that they are enclosed by back quotes, for example, `value` and `count`.

+

A

  • A
  • ABS
  • ABSOLUTE
  • ACTION
  • ADA
  • ADD
  • ADMIN
  • AFTER
  • AK
  • ALL
  • ALLOCATE
  • ALLOW
  • ALTER
  • ALWAYS
  • AND
  • ANY
  • APPEND
  • APP_ID
  • ARE
  • ARRAY
  • ARRAY_BRACKET
  • AS
  • ASC
  • ASENSITIVE
  • ASSERTION
  • ASSIGNMENT
  • ASYMMETRIC
  • AT
  • AT_LEAST_ONCE
  • ATOMIC
  • ATTRIBUTE
  • ATTRIBUTES
  • AUTHORIZATION
  • AVG
  • AVRO_CONFIG
  • AVRO_DATA
  • AVRO_SCHEMA
+
+

B

  • BATCH_INSERT_DATA_NUM
  • BEFORE
  • BEGIN
  • BERNOULLI
  • BETWEEN
  • BIGINT
  • BINARY
  • BIT
  • BLOB
  • BOOL
  • BOOLEAN
  • BOTH
  • BREADTH
  • BUCKET
  • BY
+
+

C

  • C
  • CACHE_MAX_NUM
  • CACHE_TIME
  • CALL
  • CALLED
  • CARDINALITY
  • CASCADE
  • CASCADED
  • CASE
  • CAST
  • CATALOG
  • CATALOG_NAME
  • CEIL
  • CEILING
  • CENTURY
  • CHAIN
  • CHANNEL
  • CHAR
  • CHARACTER
  • CHARACTERISTICTS
  • CHARACTERS
  • CHARACTER_LENGTH
  • CHARACTER_SET_CATALOG
  • CHARACTER_SET_NAME
  • CHARACTER_SET_SCHEMA
  • CHAR_LENGTH
  • CHECK
  • CHECKPOINT_APP_NAME
  • CHECKPOINT_INTERVAL
  • CHECKPOINTINTERVAL
  • CLASS_ORIGIN
  • CLOB
  • CLOSE
  • CLUSTER_ADDRESS
  • CLUSTER_ID
  • CLUSTER_NAME
  • COALESCE
  • COBOL
  • COLLATE
  • COLLATION
  • COLLATION_CATALOG
  • COLLATION_NAME
  • COLLATION_SCHEMA
  • COLLECT
  • COLUMN
  • COLUMN_NAME
  • COLUMN_NAME_MAP
  • COMMAND_FUNCTION
  • COMMAND_FUNCTION_CODE
  • COMMIT
  • COMMITTED
  • CONDITION
  • CONDITION_NUMBER
  • CONFIGURATION
  • CONFLUENT_CERTIFICATE_NAME
  • CONFLUENT_PROPERTIES
  • CONFLUENT_SCHEMA_FIELD
  • CONFLUENT_URL
  • CONNECT
  • CONNECTION_NAME
  • CONSTRAINT
  • CONSTRAINTS
  • CONSTRAINT_CATALOG
  • CONSTRAINT_NAME
  • CONSTRAINT_SCHEMA
  • CONSTRUCTOR
  • CONTAINS
  • CONTINUE
  • CONVERT
  • CORR
  • CORRESPONDING
  • COUNT
  • COVAR_POP
  • COVAR_SAMP
  • CREATE
  • CREATE_IF_NOT_EXIST
  • CROSS
  • CUBE
  • CUME_DIST
  • CURRENT
  • CURRENT_CATALOG
  • CURRENT_DATE
  • CURRENT_DEFAULT_TRANSFORM_GROUP
  • CURRENT_PATH
  • CURRENT_ROLE
  • CURRENT_SCHEMA
  • CURRENT_TIMESTAMP
  • CURRENT_TRANSFORM_GROUP_FOR_TYPE
  • CURRENT_USER
  • CURSOR
  • CURSOR_NAME
  • CYCLE
+
+

D

  • DATE
  • DATABASE
  • DATE
  • DATETIME_INTERVAL_CODE
  • DATETIME_INTERVAL_PRECISION
  • DAY
  • DB_COLUMNS
  • DB_URL
  • DB_OBS_SERVER
  • DB_TYPE
  • DEALLOCATE
  • DEC
  • DECADE
  • DECIMAL
  • DECLARE
  • DEFAULTS
  • DEFERRABLE
  • DEFERRED
  • DEFINER
  • DEGREE
  • DELETE
  • DELETE_OBS_TEMP_FILE
  • DENSE_RANK
  • DEPTH
  • DEREF
  • DERIVED
  • DESC
  • DESCRIBE
  • DESCRIPTION
  • DESCRIPTOR
  • DETERMINISTIC
  • DIAGNOSTICS
  • DISALLOW
  • DISCONNECT
  • DIS_NOTICE_CHANNEL
  • DISPATCH
  • DISTINCT
  • DOMAIN
  • DOUBLE
  • DOW
  • DOY
  • DRIVER
  • DROP
  • DUMP_INTERVAL
  • DYNAMIC
  • DYNAMIC_FUNCTION
  • DYNAMIC_FUNCTION_CODE
+
+

E

  • EACH
  • ELEMENT
  • ELSE
  • EMAIL_KEY
  • ENABLECHECKPOINT
  • ENABLE_CHECKPOINT
  • ENABLE_OUTPUT_NULL
  • ENCODE
  • ENCODE_CLASS_NAME
  • ENCODE_CLASS_PARAMETER
  • ENCODED_DATA
  • END
  • ENDPOINT
  • END_EXEC
  • EPOCH
  • EQUALS
  • ESCAPE
  • ES_FIELDS
  • ES_INDEX
  • ES_TYPE
  • ESTIMATEMEM
  • ESTIMATEPARALLELISM
  • EXACTLY_ONCE
  • EXCEPT
  • EXCEPTION
  • EXCLUDE
  • EXCLUDING
  • EXEC
  • EXECUTE
  • EXISTS
  • EXP
  • EXPLAIN
  • EXTEND
  • EXTERNAL
  • EXTRACT
  • EVERY
+
+

F

  • FALSE
  • FETCH
  • FIELD_DELIMITER
  • FIELD_NAMES
  • FILE_PREFIX
  • FILTER
  • FINAL
  • FIRST
  • FIRST_VALUE
  • FLOAT
  • FLOOR
  • FOLLOWING
  • FOR
  • FUNCTION
  • FOREIGN
  • FORTRAN
  • FOUND
  • FRAC_SECOND
  • FREE
  • FROM
  • FULL
  • FUSION
+
+

G

  • G
  • GENERAL
  • GENERATED
  • GET
  • GLOBAL
  • GO
  • GOTO
  • GRANT
  • GRANTED
  • GROUP
  • GROUPING
  • GW_URL
+
+

H

  • HASH_KEY_COLUMN
  • HAVING
  • HIERARCHY
  • HOLD
  • HOUR
  • HTTPS_PORT
+
+

I

  • IDENTITY
  • ILLEGAL_DATA_TABLE
  • IMMEDIATE
  • IMPLEMENTATION
  • IMPORT
  • IN
  • INCLUDING
  • INCREMENT
  • INDICATOR
  • INITIALLY
  • INNER
  • INOUT
  • INPUT
  • INSENSITIVE
  • INSERT
  • INSTANCE
  • INSTANTIABLE
  • INT
  • INTEGER
  • INTERSECT
  • INTERSECTION
  • INTERVAL
  • INTO
  • INVOKER
  • IN_WITH_SCHEMA
  • IS
  • ISOLATION
+
+

J

  • JAVA
  • JOIN
  • JSON_CONFIG
  • JSON_SCHEMA
+
+

K

  • K
  • KAFKA_BOOTSTRAP_SERVERS
  • KAFKA_CERTIFICATE_NAME
  • KAFKA_GROUP_ID
  • KAFKA_PROPERTIES
  • KAFKA_PROPERTIES_DELIMITER
  • KAFKA_TOPIC
  • KEY
  • KEY_COLUMN
  • KEY_MEMBER
  • KEY_TYPE
  • KEY_VALUE
  • KRB_AUTH
+
+

L

  • LABEL
  • LANGUAGE
  • LARGE
  • LAST
  • LAST_VALUE
  • LATERAL
  • LEADING
  • LEFT
  • LENGTH
  • LEVEL
  • LIBRARY
  • LIKE
  • LIMIT
  • LONG
+
+

M

  • M
  • MAP
  • MATCH
  • MATCHED
  • MATCHING_COLUMNS
  • MATCHING_REGEX
  • MAX
  • MAXALLOWEDCPU
  • MAXALLOWEDMEM
  • MAXALLOWEDPARALLELISM
  • MAX_DUMP_FILE_NUM
  • MAX_RECORD_NUM_CACHE
  • MAX_RECORD_NUM_PER_FILE
  • MAXVALUE
  • MEMBER
  • MERGE
  • MESSAGE_COLUMN
  • MESSAGE_LENGTH
  • MESSAGE_OCTET_LENGTH
  • MESSAGE_SUBJECT
  • MESSAGE_TEXT
  • METHOD
  • MICROSECOND
  • MILLENNIUM
  • MIN
  • MINUTE
  • MINVALUE
  • MOD
  • MODIFIES
  • MODULE
  • MONTH
  • MORE
  • MS
  • MULTISET
  • MUMPS
+
+

N

  • NAME
  • NAMES
  • NATIONAL
  • NATURAL
  • NCHAR
  • NCLOB
  • NESTING
  • NEW
  • NEXT
  • NO
  • NONE
  • NORMALIZE
  • NORMALIZED
  • NOT
  • NULL
  • NULLABLE
  • NULLIF
  • NULLS
  • NUMBER
  • NUMERIC
+
+

O

  • OBJECT
  • OBJECT_NAME
  • OBS_DIR
  • OCTETS
  • OCTET_LENGTH
  • OF
  • OFFSET
  • OLD
  • ON
  • ONLY
  • OPEN
  • OPERATION_FIELD
  • OPTION
  • OPTIONS
  • OR
  • ORDER
  • ORDERING
  • ORDINALITY
  • OTHERS
  • OUT
  • OUTER
  • OUTPUT
  • OVER
  • OVERLAPS
  • OVERLAY
  • OVERRIDING
+
+

P

  • PAD
  • PARALLELISM
  • PARAMETER
  • PARAMETER_MODE
  • PARAMETER_NAME
  • PARAMETER_ORDINAL_POSITION
  • PARAMETER_SPECIFIC_CATALOG
  • PARAMETER_SPECIFIC_NAME
  • PARAMETER_SPECIFIC_SCHEMA
  • PARTIAL
  • PARTITION
  • PARTITION_COUNT
  • PARTITION_KEY
  • PARTITION_RANGE
  • PASCAL
  • PASSTHROUGH
  • PASSWORD
  • PATH
  • PERCENTILE_CONT
  • PERCENTILE_DISC
  • PERCENT_RANK
  • PERSIST_SCHEMA
  • PIPELINE_ID
  • PLACING
  • PLAN
  • PLI
  • POSITION
  • POWER
  • PRECEDING
  • PRECISION
  • PREPARE
  • PRESERVE
  • PRIMARY
  • PRIMARY_KEY
  • PRIOR
  • PRIVILEGES
  • PROCEDURE
  • PROCTIME
  • PROJECT_ID
  • PUBLIC
+
+

Q

  • QUARTER
  • QUOTE
+
+

R

  • RANGE
  • RANK
  • RAW
  • READ
  • READS
  • READ_ONCE
  • REAL
  • RECURSIVE
  • REF
  • REFERENCES
  • REFERENCING
  • REGION
  • REGR_AVGX
  • REGR_AVGY
  • REGR_COUNT
  • REGR_INTERCEPT
  • REGR_R2
  • REGR_SLOPE
  • REGR_SXX
  • REGR_SXY
  • REGR_SYY
  • RELATIVE
  • RELEASE
  • REPEATABLE
  • RESET
  • RESTART
  • RESTRICT
  • RESULT
  • RETURN
  • RETURNED_CARDINALITY
  • RETURNED_LENGTH
  • RETURNED_OCTET_LENGTH
  • RETURNED_SQLSTATE
  • RETURNS
  • REVOKE
  • RIGHT
  • ROLE
  • ROLLBACK
  • ROLLING_INTERVAL
  • ROLLING_SIZE
  • ROLLUP
  • ROUTINE
  • ROUTINE_CATALOG
  • ROUTINE_NAME
  • ROUTINE_SCHEMA
  • ROW
  • ROW_COUNT
  • ROW_DELIMITER
  • ROW_NUMBER
  • ROWS
  • ROWTIME
+
+

S

  • SAVEPOINT
  • SCALE
  • SCHEMA
  • SCHEMA_CASE_SENSITIVE
  • SCHEMA_NAME
  • SCOPE
  • SCOPE_CATALOGS
  • SCOPE_NAME
  • SCOPE_SCHEMA
  • SCROLL
  • SEARCH
  • SECOND
  • SECTION
  • SECURITY
  • SELECT
  • SELF
  • SENSITIVE
  • SEQUENCE
  • SERIALIZABLE
  • SERVER
  • SERVER_NAME
  • SESSION
  • SESSION_USER
  • SET
  • SETS
  • SIMILAR
  • SIMPLE
  • SINK
  • SIZE
  • SK
  • SMALLINT
  • SOME
  • SOURCE
  • SPACE
  • SPECIFIC
  • SPECIFICTYPE
  • SPECIFIC_NAME
  • SQL
  • SQLEXCEPTION
  • SQLSTATE
  • SQLWARNING
  • SQL_TSI_DAY
  • SQL_TSI_FRAC_SECOND
  • SQL_TSI_HOUR
  • SQL_TSI_MICROSECOND
  • SQL_TSI_MINUTE
  • SQL_TSI_MONTH
  • SQL_TSI_QUARTER
  • SQL_TSI_SECOND
  • SQL_TSI_WEEK
  • SQL_TSI_YEAR
  • SQRT
  • START
  • START_TIME
  • STATE
  • STATEMENT
  • STATIC
  • STDDEV_POP
  • STDDEV_SAMP
  • STREAM
  • STRING
  • STRUCTURE
  • STYLE
  • SUBCLASS_ORIGIN
  • SUBMULTISET
  • SUBSTITUTE
  • SUBSTRING
  • SUM
  • SYMMETRIC
  • SYSTEM
  • SYSTEM_USER
+
+

T

  • TABLE
  • TABLESAMPLE
  • TABLE_COLUMNS
  • TABLE_NAME
  • TABLE_NAME_MAP
  • TEMP
  • TEMPORARY
  • THEN
  • TIES
  • TIME
  • TIMESTAMP
  • TIMESTAMPADD
  • TIMESTAMPDIFF
  • TIMEZONE_HOUR
  • TIMEZONE_MINUTE
  • TINYINT
  • TO
  • TOP_LEVEL_COUNT
  • TOPIC
  • TOPIC_URN
  • TRAILING
  • TRANSACTION
  • TRANSACTIONAL_TABLE
  • TRANSACTIONS_ACTIVE
  • TRANSACTIONS_COMMITTED
  • TRANSACTIONS_ROLLED_BACK
  • TRANSFORM
  • TRANSFORMS
  • TRANSLATE
  • TRANSLATION
  • TRANX_ID
  • TREAT
  • TRIGGER
  • TRIGGER_CATALOG
  • TRIGGER_NAME
  • TRIGGER_SCHEMA
  • TRIM
  • TRUE
  • TSDB_LINK_ADDRESS
  • TSDB_METRICS
  • TSDB_TIMESTAMPS
  • TSDB_TAGS
  • TSDB_VALUES
  • TYPE
  • TYPE_CLASS_NAME
  • TYPE_CLASS_PARAMETER
+
+

U

  • UESCAPE
  • UNBOUNDED
  • UNCOMMITTED
  • UNDER
  • UNION
  • UNIQUE
  • UNKNOWN
  • UNNAMED
  • UNNEST
  • UPDATE
  • UPPER
  • UPSERT
  • URN_COLUMN
  • USAGE
  • USER
  • USER_DEFINED_TYPE_CATALOG
  • USER_DEFINED_TYPE_CODE
  • USER_DEFINED_TYPE_NAME
  • USER_DEFINED_TYPE_SCHEMA
  • USERNAME
  • USING
+
+

V

  • VALUE
  • VALUES
  • VALUE_TYPE
  • VARBINARY
  • VARCHAR
  • VARYING
  • VAR_POP
  • VAR_SAMP
  • VERSION
  • VERSION_ID
  • VIEW
+
+

W

  • WATERMARK
  • WEEK
  • WHEN
  • WHENEVER
  • WHERE
  • WIDTH_BUCKET
  • WINDOW
  • WITH
  • WITHIN
  • WITHOUT
  • WORK
  • WRAPPER
  • WRITE
+
+

X

  • XML
  • XML_CONFIG
+
+

Y

  • YEAR
+
+

Z

  • ZONE
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0129.html b/docs/dli/sqlreference/dli_08_0129.html new file mode 100644 index 00000000..b6a2948b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0129.html @@ -0,0 +1,17 @@ + + +

Views

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0130.html b/docs/dli/sqlreference/dli_08_0130.html new file mode 100644 index 00000000..b60fbfad --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0130.html @@ -0,0 +1,27 @@ + + +

Creating a View

+

Function

This statement is used to create views.

+
+

Syntax

1
CREATE [OR REPLACE] VIEW view_name AS select_statement;
+
+ +
+
+

Keyword

  • CREATE VIEW: creates views based on the given select statement. The result of the select statement will not be written into the disk.
  • OR REPLACE: updates views using the select statement. No error is reported and the view definition is updated using the SELECT statement if a view exists.
+
+

Precautions

  • The view to be created must not exist in the current database. Otherwise, an error will be reported. When the view exists, you can add keyword OR REPLACE to avoid the error message.
  • The table or view information contained in the view cannot be modified. If the table or view information is modified, the query may fail.
+
+

Example

To create a view named student_view for the queried ID and name of the student table, run the following statement:

+
1
CREATE VIEW student_view AS SELECT id, name FROM student;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0131.html b/docs/dli/sqlreference/dli_08_0131.html new file mode 100644 index 00000000..0f5ce938 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0131.html @@ -0,0 +1,27 @@ + + +

Deleting a View

+

Function

This statement is used to delete views.

+
+

Syntax

1
DROP VIEW [IF EXISTS] [db_name.]view_name;
+
+ +
+
+

Keyword

DROP: Deletes the metadata of a specified view. Although views and tables have many common points, the DROP TABLE statement cannot be used to delete views.

+
+

Precautions

The to-be-deleted view must exist. If you run this statement to delete a view that does not exist, an error is reported. To avoid such an error, you can add IF EXISTS in this statement.

+
+

Example

To delete a view named student_view, run the following statement:

+
1
DROP VIEW student_view;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0138.html b/docs/dli/sqlreference/dli_08_0138.html new file mode 100644 index 00000000..4c3ca3e7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0138.html @@ -0,0 +1,28 @@ + + +

Viewing the Execution Plan

+

Function

This statement returns the logical plan and physical execution plan for the SQL statement.

+
+

Syntax

1
EXPLAIN [EXTENDED | CODEGEN] statement;
+
+ +
+
+

Keyword

EXTENDED: After this keyword is specified, the logical and physical plans are outputted at the same time.

+

CODEGEN: After this keyword is specified, code generated by using the Codegen is also outputted.

+
+

Precautions

None

+
+

Example

To return the logical and physical plans of SELECT * FROM test, run the following statement:

+
1
EXPLAIN EXTENDED select * from test;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0139.html b/docs/dli/sqlreference/dli_08_0139.html new file mode 100644 index 00000000..c8c69a26 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0139.html @@ -0,0 +1,33 @@ + + +

Data Permissions Management

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0140.html b/docs/dli/sqlreference/dli_08_0140.html new file mode 100644 index 00000000..604c6be5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0140.html @@ -0,0 +1,203 @@ + + +

Data Permissions List

+

Table 1 describes the SQL statement permission matrix in DLI in terms of permissions on databases, tables, and roles.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Permission matrix

Category

+

SQL statement

+

Permission

+

Description

+

Database

+

DROP DATABASE db1

+

The DROP_DATABASE permission of database.db1

+

-

+

CREATE TABLE tb1(...)

+

The CREATE_TABLE permission of database.db1

+

-

+

CREATE VIEW v1

+

The CREATE_VIEW permission of database.db1

+

-

+

EXPLAIN query

+

The EXPLAIN permission of database.db1

+

Depending on the permissions required by query statements.

+

Table

+

SHOW CREATE TABLE tb1

+

The SHOW_CREATE_TABLE permission of database.db1.tables.tb1

+

-

+

DESCRIBE [EXTENDED|FORMATTED] tb1

+

The DESCRIBE_TABLE permission of databases.db1.tables.tb1

+

-

+

DROP TABLE [IF EXISTS] tb1

+

The DROP_TABLE permission of database.db1.tables.tb1

+

-

+

SELECT * FROM tb1

+

The SELECT permission of database.db1.tables.tb1

+

-

+

SELECT count(*) FROM tb1

+

The SELECT permission of database.db1.tables.tb1

+

-

+

SELECT * FROM view1

+

The SELECT permission of database.db1.tables.view1

+

-

+

SELECT count(*) FROM view1

+

The SELECT permission of database.db1.tables.view1

+

-

+

LOAD DLI TABLE

+

The INSERT_INTO_TABLE permission of database.db1.tables.tb1

+

-

+

INSERT INTO TABLE

+

The INSERT_INTO_TABLE permission of database.db1.tables.tb1

+

-

+

INSERT OVERWRITE TABLE

+

The INSERT_OVERWRITE_TABLE permission of database.db1.tables.tb1

+

-

+

ALTER TABLE ADD COLUMNS

+

The ALTER_TABLE_ADD_COLUMNS permission of database.db1.tables.tb1

+

-

+

ALTER TABLE RENAME

+

The ALTER_TABLE_RENAME permission of database.db1.tables.tb1

+

-

+

ROLE&PRIVILEGE

+

CREATE ROLE

+

The CREATE_ROLE permission of db

+

-

+

DROP ROLE

+

The DROP_ROLE permission of db

+

-

+

SHOW ROLES

+

The SHOW_ROLES permission of db

+

-

+

GRANT ROLES

+

The GRANT_ROLE permission of db

+

-

+

REVOKE ROLES

+

The REVOKE_ROLE permission of db

+

-

+

GRANT PRIVILEGE

+

The GRANT_PRIVILEGE permission of db or table

+

-

+

REVOKE PRIVILEGE

+

The REVOKE_PRIVILEGE permission of db or table

+

-

+

SHOW GRANT

+

The SHOW_GRANT permission of db or table

+

-

+
+
+

For privilege granting or revocation on databases and tables, DLI supports the following permissions:

+
  • Permissions that can be assigned or revoked on databases are as follows:
    • DROP_DATABASE (Deleting a database)
    • CREATE_TABLE (Creating a table)
    • CREATE_VIEW (Creating a view)
    • EXPLAIN (Explaining a SQL statement as an execution plan)
    • CREATE_ROLE (Creating a role)
    • DROP_ROLE (Deleting a role)
    • SHOW_ROLES (Displaying a role)
    • GRANT_ROLE (Bounding a role)
    • REVOKE_ROLE (Unbinding a role)
    • DESCRIBE_TABLE (Describing a table)
    • DROP_TABLE (Deleting a table)
    • Select (Querying a table)
    • INSERT_INTO_TABLE (Inserting)
    • INSERT_OVERWRITE_TABLE (Overwriting)
    • GRANT_PRIVILEGE (Granting permissions to a database)
    • REVOKE_PRIVILEGE (Revoking permissions from a database)
    • SHOW_PRIVILEGES (Viewing the database permissions of other users)
    • ALTER_TABLE_ADD_PARTITION (Adding partitions to a partitioned table)
    • ALTER_TABLE_DROP_PARTITION (Deleting partitions from a partitioned table)
    • ALTER_TABLE_RENAME_PARTITION (Renaming table partitions)
    • ALTER_TABLE_RECOVER_PARTITION (Restoring table partitions)
    • ALTER_TABLE_SET_LOCATION (Setting the path of a partition)
    • SHOW_PARTITIONS (Displaying all partitions)
    • SHOW_CREATE_TABLE (Viewing table creation statements)
    +
  • Permissions that can be assigned or revoked on tables are as follows:
    • DESCRIBE_TABLE (Describing a table)
    • DROP_TABLE (Deleting a table)
    • Select (Querying a table)
    • INSERT_INTO_TABLE (Inserting)
    • INSERT_OVERWRITE_TABLE (Overwriting)
    • GRANT_PRIVILEGE (Granting permissions to a table)
    • REVOKE_PRIVILEGE (Revoking permissions from a table)
    • SHOW_PRIVILEGES (Viewing the table permissions of other users)
    • ALTER_TABLE_ADD_COLUMNS (Adding a column)
    • ALTER_TABLE_RENAME (Renaming a table)
    • ALTER_TABLE_ADD_PARTITION (Adding partitions to a partitioned table)
    • ALTER_TABLE_DROP_PARTITION (Deleting partitions from a partitioned table)
    • ALTER_TABLE_RENAME_PARTITION (Renaming table partitions)
    • ALTER_TABLE_RECOVER_PARTITION (Restoring table partitions)
    • ALTER_TABLE_SET_LOCATION (Setting the path of a partition)
    • SHOW_PARTITIONS (Displaying all partitions)
    • SHOW_CREATE_TABLE (Viewing table creation statements)
    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0141.html b/docs/dli/sqlreference/dli_08_0141.html new file mode 100644 index 00000000..b3eb8af8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0141.html @@ -0,0 +1,26 @@ + + +

Creating a Role

+

Function

  • This statement is used to create a role in the current database or a specified database.
  • Only users with the CREATE_ROLE permission on the database can create roles. For example, the administrator, database owner, and other users with the CREATE_ROLE permission.
  • Each role must belong to only one database.
+
+

Syntax

1
CREATE ROLE [db_name].role_name;
+
+ +
+
+

Keyword

None

+
+

Precautions

  • The role_name to be created must not exist in the current database or the specified database. Otherwise, an error will be reported.
  • If db_name is not specified, the role is created in the current database.
+
+

Example

1
CREATE ROLE role1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0142.html b/docs/dli/sqlreference/dli_08_0142.html new file mode 100644 index 00000000..e0c731de --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0142.html @@ -0,0 +1,26 @@ + + +

Binding a Role

+

Function

This statement is used to bind a user with a role.

+
+

Syntax

1
GRANT ([db_name].role_name,...) TO (user_name,...);
+
+ +
+
+

Keyword

None

+
+

Precautions

The role_name and username must exist. Otherwise, an error will be reported.

+
+

Example

1
GRANT role1 TO user_name1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0143.html b/docs/dli/sqlreference/dli_08_0143.html new file mode 100644 index 00000000..5e4c7965 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0143.html @@ -0,0 +1,37 @@ + + +

Displaying a Role

+

Function

This statement is used to display all roles or roles bound to the user_name in the current database.

+
+

Syntax

1
SHOW [ALL] ROLES [user_name];
+
+ +
+
+

Keyword

ALL: Displays all roles.

+
+

Precautions

Keywords ALL and user_name cannot coexist.

+
+

Example

  • To display all roles bound to the user, run the following statement:
    1
    SHOW ROLES;
    +
    + +
    +
  • To display all roles in the project, run the following statement:
    1
    SHOW ALL ROLES;
    +
    + +
    +

    Only the administrator has the permission to run the show all roles statement.

    +
    +
  • To display all roles bound to the user named user_name1, run the following statement:
    1
    SHOW ROLES user_name1;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0144.html b/docs/dli/sqlreference/dli_08_0144.html new file mode 100644 index 00000000..7344fc4b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0144.html @@ -0,0 +1,94 @@ + + +

Granting a Permission

+

Function

This statement is used to grant permissions to a user or role.

+
+

Syntax

1
GRANT (privilege,...) ON (resource,..) TO ((ROLE [db_name].role_name) | (USER user_name)),...);
+
+ +
+
+

Keyword

ROLE: The subsequent role_name must be a role.

+

USER: The subsequent user_name must be a user.

+
+

Precautions

  • The privilege must be one of the authorizable permissions. If the object has the corresponding permission on the resource or the upper-level resource, the permission fails to be granted. For details about the permission types supported by the privilege, see Data Permissions List.
  • The resource can be a queue, database, table, view, or column. The formats are as follows:
    • Queue format: queues.queue_name

      The following table lists the permission types supported by a queue.

      + +
      + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      Operation

      +

      Description

      +

      DROP_QUEUE

      +

      Deleting a queue

      +

      SUBMIT_JOB

      +

      Submitting a job

      +

      CANCEL_JOB

      +

      Cancel a job

      +

      RESTART

      +

      Restarting a queue

      +

      SCALE_QUEUE

      +

      Scaling out/in a queue

      +

      GRANT_PRIVILEGE

      +

      Granting queue permissions

      +

      REVOKE_PRIVILEGE

      +

      Revoking queue permissions

      +

      SHOW_PRIVILEGES

      +

      Viewing queue permissions of other users

      +
      +
      +
    • Database format: databases.db_name

      For details about the permission types supported by a database, see Data Permissions List.

      +
    • Table format: databases.db_name.tables.table_name

      For details about the permission types supported by a table, see Data Permissions List.

      +
    • View format: databases.db_name.tables.view_name

      Permission types supported by a view are the same as those supported by a table. For details, see table permissions in Data Permissions List.

      +
    • Column format: databases.db_name.tables.table_name.columns.column_name

      Columns support only the SELECT permission.

      +
    +
+
+

Example

Run the following statement to grant user_name1 the permission to delete the db1 database:

+
1
GRANT DROP_DATABASE ON databases.db1 TO USER user_name1;
+
+ +
+

Run the following statement to grant user_name1 the SELECT permission of data table tb1 in the db1 database:

+
1
GRANT SELECT ON databases.db1.tables.tb1 TO USER user_name1;
+
+ +
+

Run the following statement to grant role_name the SELECT permission of data table tb1 in the db1 database:

+
1
GRANT SELECT ON databases.db1.tables.tb1 TO ROLE role_name;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0145.html b/docs/dli/sqlreference/dli_08_0145.html new file mode 100644 index 00000000..ede38800 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0145.html @@ -0,0 +1,34 @@ + + +

Displaying the Granted Permissions

+

Function

This statement is used to show the permissions granted to a user or role in the resource.

+
+

Syntax

1
SHOW GRANT ((ROLE [db_name].role_name) | (USER user_name)) ON resource;
+
+ +
+
+

Keyword

ROLE: The subsequent role_name must be a role.

+

USER: The subsequent user_name must be a user.

+
+

Precautions

The resource can be a queue, database, table, view, or column. The formats are as follows:

+
  • Queue format: queues.queue_name
  • Database format: databases.db_name
  • Table format: databases.db_name.tables.table_name
  • Column format: databases.db_name.tables.table_name.columns.column_name
  • View format: databases.db_name.tables.view_name
+
+

Example

Run the following statement to show permissions of user_name1 in the db1 database:

+
1
SHOW GRANT USER user_name1 ON databases.db1;
+
+ +
+

Run the following statement to show permissions of role_name on table tb1 in the db1 database:

+
1
SHOW GRANT ROLE role_name ON databases.db1.tables.tb1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0146.html b/docs/dli/sqlreference/dli_08_0146.html new file mode 100644 index 00000000..28d3434e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0146.html @@ -0,0 +1,39 @@ + + +

Revoking a Permission

+

Function

This statement is used to revoke permissions granted to a user or role.

+
+

Syntax

1
REVOKE (privilege,...) ON (resource,..) FROM ((ROLE [db_name].role_name) | (USER user_name)),...);
+
+ +
+
+

Keyword

ROLE: The subsequent role_name must be a role.

+

USER: The subsequent user_name must be a user.

+
+

Precautions

  • The privilege must be the granted permissions of the authorized object in the resource. Otherwise, the permission fails to be revoked. For details about the permission types supported by the privilege, see Data Permissions List.
  • The resource can be a queue, database, table, view, or column. The formats are as follows:
    • Queue format: queues.queue_name
    • Database format: databases.db_name
    • Table format: databases.db_name.tables.table_name
    • View format: databases.db_name.tables.view_name
    • Column format: databases.db_name.tables.table_name.columns.column_name
    +
+
+

Example

To revoke the permission of user user_name1 to delete database db1, run the following statement:

+
1
REVOKE DROP_DATABASE ON databases.db1 FROM USER user_name1;
+
+ +
+

To revoke the SELECT permission of user user_name1 on table tb1 in database db1, run the following statement:

+
1
REVOKE SELECT ON databases.db1.tables.tb1 FROM USER user_name1;
+
+ +
+

To revoke the SELECT permission of role role_name on table tb1 in database db1, run the following statement:

+
1
REVOKE SELECT ON databases.db1.tables.tb1 FROM ROLE role_name;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0147.html b/docs/dli/sqlreference/dli_08_0147.html new file mode 100644 index 00000000..d2d840ea --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0147.html @@ -0,0 +1,27 @@ + + +

Unbinding a Role

+

Function

This statement is used to unbind the user with the role.

+
+

Syntax

1
REVOKE ([db_name].role_name,...) FROM (user_name,...);
+
+ +
+
+

Keyword

None

+
+

Precautions

role_name and user_name must exist and user_name has been bound to role_name.

+
+

Example

To unbind the user_name1 from role1, run the following statement:

+
1
REVOKE role1 FROM user_name1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0148.html b/docs/dli/sqlreference/dli_08_0148.html new file mode 100644 index 00000000..19a64a27 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0148.html @@ -0,0 +1,26 @@ + + +

Deleting a Role

+

Function

This statement is used to delete a role in the current database or a specified database.

+
+

Syntax

1
DROP ROLE [db_name].role_name;
+
+ +
+
+

Keyword

None

+
+

Precautions

  • The role_name to be deleted must exist in the current database or the specified database. Otherwise, an error will be reported.
  • If db_name is not specified, the role is deleted in the current database.
+
+

Example

1
DROP ROLE role1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0149.html b/docs/dli/sqlreference/dli_08_0149.html new file mode 100644 index 00000000..2d5ef5f4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0149.html @@ -0,0 +1,26 @@ + + +

Displaying the Binding Relationship Between All Roles and Users

+

Function

This statement is used to display the binding relationship between roles and a user in the current database.

+
+

Syntax

1
SHOW PRINCIPALS ROLE;
+
+ +
+
+

Keyword

None

+
+

Precautions

The ROLE variable must exist.

+
+

Example

1
SHOW PRINCIPALS role1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0150.html b/docs/dli/sqlreference/dli_08_0150.html new file mode 100644 index 00000000..1fe83967 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0150.html @@ -0,0 +1,112 @@ + + +

Basic SELECT Statements

+

Function

This statement is a basic query statement and is used to return the query results.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
SELECT [ALL | DISTINCT] attr_expr_list FROM table_reference
+  [WHERE where_condition]
+  [GROUP BY col_name_list]
+  [ORDER BY col_name_list][ASC | DESC]
+  [CLUSTER BY col_name_list | DISTRIBUTE BY col_name_list]
+  [SORT BY col_name_list]]
+  [LIMIT number];
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SELECT parameter description

Parameter

+

Description

+

ALL

+

Returns duplicate rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.

+

DISTINCT

+

Removes duplicate rows from the result set.

+

WHERE

+

Specifies the filter criteria for a query. Arithmetic operators, relational operators, and logical operators are supported.

+

where_condition

+

Filter criteria.

+

GROUP BY

+

Specifies the grouping field. Single-field grouping and multi-field grouping are supported.

+

col_name_list

+

Field list

+

ORDER BY

+

Sort the query results.

+

ASC/DESC

+

ASC sorts from the lowest value to the highest value. DESC sorts from the highest value to the lowest value. ASC is the default sort order.

+

CLUSTER BY

+

CLUSTER BY is used to bucket the table according to the bucketing fields and then sort within the bucketed table. If the field of DISTRIBUTE BY is the same as the field of SORT BY and the sorting is in descending order, the combination of DISTRIBUTE BY and SORT BY achieves the same function as CLUSTER BY.

+

DISTRIBUTE BY

+

Specifies the bucketing fields without sorting the table.

+

SORT BY

+

The objects will be sorted in the bucket.

+

LIMIT

+

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+
+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

To filter the record, in which the name is Mike, from the student table and sort the results in ascending order of score, run the following statement:

+
1
+2
+3
SELECT * FROM student
+  WHERE name = 'Mike'
+  ORDER BY score;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0151.html b/docs/dli/sqlreference/dli_08_0151.html new file mode 100644 index 00000000..2783ff66 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0151.html @@ -0,0 +1,17 @@ + + +

Filtering

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0152.html b/docs/dli/sqlreference/dli_08_0152.html new file mode 100644 index 00000000..f6a762c4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0152.html @@ -0,0 +1,31 @@ + + +

WHERE Filtering Clause

+

Function

This statement is used to filter the query results using the WHERE clause.

+
+

Syntax

1
+2
SELECT [ALL | DISTINCT] attr_expr_list FROM table_reference
+  WHERE where_condition;
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
  • WHERE is used to filter out records that do not meet the condition and return records that meet the condition.
+
+

Precautions

The to-be-queried table must exist.

+
+

Example

To filter the records in which the scores are higher than 90 and lower than 95 in the student table, run the following statement:

+
1
+2
SELECT * FROM student
+  WHERE score > 90 AND score < 95;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0153.html b/docs/dli/sqlreference/dli_08_0153.html new file mode 100644 index 00000000..75369367 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0153.html @@ -0,0 +1,37 @@ + + +

HAVING Filtering Clause

+

Function

This statement is used to filter the query results using the HAVING clause.

+
+

Syntax

1
+2
+3
+4
SELECT [ALL | DISTINCT] attr_expr_list FROM table_reference
+  [WHERE where_condition]
+  [GROUP BY col_name_list]
+  HAVING having_condition;
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
  • Generally, HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering. The arithmetic operation and aggregate function are supported by the HAVING clause.
+
+

Precautions

  • The to-be-queried table must exist.
  • If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering.
+
+

Example

Group the student table according to the name field and filter the records in which the maximum score is higher than 95 based on groups.

+
1
+2
+3
SELECT name, max(score) FROM student
+  GROUP BY name
+  HAVING max(score) >95;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0154.html b/docs/dli/sqlreference/dli_08_0154.html new file mode 100644 index 00000000..69b10cf9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0154.html @@ -0,0 +1,21 @@ + + +

Sorting

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0155.html b/docs/dli/sqlreference/dli_08_0155.html new file mode 100644 index 00000000..9987946f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0155.html @@ -0,0 +1,33 @@ + + +

ORDER BY

+

Function

This statement is used to order the result set of a query by the specified field.

+
+

Syntax

1
+2
+3
SELECT attr_expr_list FROM table_reference
+  ORDER BY col_name
+  [ASC | DESC] [,col_name [ASC | DESC],...];
+
+ +
+
+

Keyword

  • ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the highest value to the lowest value. ASC is the default sort order.
  • ORDER BY: specifies that the values in one or more columns should be sorted globally. When ORDER BY is used with GROUP BY, ORDER BY can be followed by the aggregate function.
+
+

Precautions

The to-be-sorted table must exist. If this statement is used to sort a table that does not exist, an error is reported.

+
+

Example

To sort table student in ascending order according to field score and return the sorting result, run the following statement:

+
1
+2
SELECT * FROM student
+  ORDER BY score;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0156.html b/docs/dli/sqlreference/dli_08_0156.html new file mode 100644 index 00000000..70cda4fd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0156.html @@ -0,0 +1,33 @@ + + +

SORT BY

+

Function

This statement is used to achieve the partial sorting of tables according to fields.

+
+

Syntax

1
+2
+3
SELECT attr_expr_list FROM table_reference
+  SORT BY col_name
+  [ASC | DESC] [,col_name [ASC | DESC],...];
+
+ +
+
+

Keyword

  • ASC/DESC: ASC sorts from the lowest value to the highest value. DESC sorts from the highest value to the lowest value. ASC is the default sort order.
  • SORT BY: Used together with GROUP BY to perform local sorting of a single column or multiple columns for PARTITION.
+
+

Precautions

The to-be-sorted table must exist. If this statement is used to sort a table that does not exist, an error is reported.

+
+

Example

To sort the student table in ascending order of the score field in Reducer, run the following statement:

+
1
+2
SELECT * FROM student
+  SORT BY score;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0157.html b/docs/dli/sqlreference/dli_08_0157.html new file mode 100644 index 00000000..577fa685 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0157.html @@ -0,0 +1,31 @@ + + +

CLUSTER BY

+

Function

This statement is used to bucket a table and sort the table within buckets.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  CLUSTER BY col_name [,col_name ,...];
+
+ +
+
+

Keyword

CLUSTER BY: Buckets are created based on specified fields. Single fields and multiple fields are supported, and data is sorted in buckets.

+
+

Precautions

The to-be-sorted table must exist. If this statement is used to sort a table that does not exist, an error is reported.

+
+

Example

To bucket the student table according to the score field and sort tables within buckets in descending order, run the following statement:

+
1
+2
SELECT * FROM student
+  CLUSTER BY score;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0158.html b/docs/dli/sqlreference/dli_08_0158.html new file mode 100644 index 00000000..c84ba13e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0158.html @@ -0,0 +1,31 @@ + + +

DISTRIBUTE BY

+

Function

This statement is used to bucket a table according to the field.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  DISTRIBUTE BY col_name [,col_name ,...];
+
+ +
+
+

Keyword

DISTRIBUTE BY: Buckets are created based on specified fields. A single field or multiple fields are supported, and the fields are not sorted in the bucket. This parameter is used together with SORT BY to sort data after bucket division.

+
+

Precautions

The to-be-sorted table must exist. If this statement is used to sort a table that does not exist, an error is reported.

+
+

Example Value

To bucket the student table according to the score field, run the following statement:

+
1
+2
SELECT * FROM student
+  DISTRIBUTE BY score;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0159.html b/docs/dli/sqlreference/dli_08_0159.html new file mode 100644 index 00000000..a98601ef --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0159.html @@ -0,0 +1,23 @@ + + +

Grouping

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0160.html b/docs/dli/sqlreference/dli_08_0160.html new file mode 100644 index 00000000..714e5ef6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0160.html @@ -0,0 +1,32 @@ + + +

Column-Based GROUP BY

+

Function

This statement is used to group a table based on columns.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  GROUP BY col_name_list;
+
+ +
+
+

Keyword

Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.

+
  • Single-column GROUP BY indicates that the GROUP BY clause contains only one column. The fields in col_name_list must exist in attr_expr_list. The aggregate function, count() and sum() for example, is supported in attr_expr_list. The aggregate function can contain other fields.
  • Multi-column GROUP BY indicates that there is more than one column in the GROUP BY clause. The query statement is grouped according to all the fields in the GROUP BY clause. The records with the same fields are put in the same group. Similarly, the fields in the GROUP BY clause must be in the fields in attr_expr_list. The attr_expr_list field can also use the aggregate function.
+
+

Precautions

The to-be-grouped table must exist. Otherwise, an error is reported.

+
+

Example

Group the student table according to the score and name fields and return the grouping results.

+
1
+2
SELECT score, count(name) FROM student
+  GROUP BY score,name;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0161.html b/docs/dli/sqlreference/dli_08_0161.html new file mode 100644 index 00000000..30b0e79c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0161.html @@ -0,0 +1,31 @@ + + +

Expression-Based GROUP BY

+

Function

This statement is used to group a table according to expressions.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  GROUP BY groupby_expression [, groupby_expression, ...];
+
+ +
+
+

Keyword

The groupby_expression can contain a single field or multiple fields, and also can call aggregate functions or string functions.

+
+

Precautions

  • The to-be-grouped table must exist. Otherwise, an error is reported.
  • In the same single-column group, built-in functions and self-defined functions are supported in the expression in the GRUOP BY fields that must exit in attr_expr_list.
+
+

Example

To use the substr function to obtain the character string from the name field, group the student table according to the obtained character string, and return each sub character string and the number of records, run the following statement:

+
1
+2
SELECT substr(name,6),count(name) FROM student
+  GROUP BY substr(name,6);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0162.html b/docs/dli/sqlreference/dli_08_0162.html new file mode 100644 index 00000000..c712014d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0162.html @@ -0,0 +1,37 @@ + + +

GROUP BY Using HAVING

+

Function

This statement filters a table after grouping it using the HAVING clause.

+
+

Syntax

1
+2
+3
SELECT attr_expr_list FROM table_reference
+  GROUP BY groupby_expression [, groupby_expression...]
+  HAVING having_expression;
+
+ +
+
+

Keyword

The groupby_expression can contain a single field or multiple fields, and can also call aggregate functions or string functions.

+
+

Precautions

  • The to-be-grouped table must exist. Otherwise, an error is reported.
  • If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering. If HAVING and GROUP BY are used together, GROUP BY applies first for grouping and HAVING then applies for filtering. The arithmetic operation and aggregate function are supported by the HAVING clause.
+
+

Example

Group the transactions according to num, use the HAVING clause to filter the records in which the maximum value derived from multiplying price with amount is higher than 5000, and return the filtered results.

+
1
+2
+3
+4
SELECT num, max(price*amount) FROM transactions
+  WHERE time > '2016-06-01'
+  GROUP BY num
+  HAVING max(price*amount)>5000;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0163.html b/docs/dli/sqlreference/dli_08_0163.html new file mode 100644 index 00000000..377cf2eb --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0163.html @@ -0,0 +1,58 @@ + + +

ROLLUP

+

Function

This statement is used to generate the aggregate row, super-aggregate row, and the total row. The statement can achieve multi-layer statistics from right to left and display the aggregation of a certain layer.

+
+

Syntax

1
+2
+3
SELECT attr_expr_list FROM table_reference
+  GROUP BY col_name_list
+  WITH ROLLUP;
+
+ +
+
+

Keyword

ROLLUP is the expansion of GROUP BY. For example, SELECT a, b, c, SUM(expression) FROM table GROUP BY a, b, c WITH ROLLUP; can be transformed into the following query statements:
  • Counting the (a, b, c) combinations
    1
    +2
    SELECT a, b, c, sum(expression) FROM table
    +  GROUP BY a, b, c;
    +
    + +
    +
  • Counting the (a, b) combinations
    1
    +2
    SELECT a, b, NULL, sum(expression) FROM table
    +  GROUP BY a, b;
    +
    + +
    +
  • Counting the (a) combinations
    1
    +2
    SELECT a, NULL, NULL, sum(expression) FROM table
    +  GROUP BY a;
    +
    + +
    +
  • Total
    1
    SELECT NULL, NULL, NULL, sum(expression) FROM table;
    +
    + +
    +
+
+
+

Precautions

The to-be-grouped table must exist. If this statement is used to group a table that does not exist, an error is reported.

+
+

Example

To generate the aggregate row, super-aggregate row, and total row according to the group_id and job fields and return the total salary on each aggregation condition, run the following statement:

+
1
+2
+3
SELECT group_id, job, SUM(salary) FROM group_test
+  GROUP BY group_id, job
+  WITH ROLLUP;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0164.html b/docs/dli/sqlreference/dli_08_0164.html new file mode 100644 index 00000000..14c8eb2e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0164.html @@ -0,0 +1,78 @@ + + +

GROUPING SETS

+

Function

This statement is used to generate the cross-table row and achieve the cross-statistics of the GROUP BY field.

+
+

Syntax

1
+2
+3
SELECT attr_expr_list FROM table_reference
+  GROUP BY col_name_list
+  GROUPING SETS(col_name_list);
+
+ +
+
+

Keyword

GROUPING SETS is the expansion of GROUP BY. For example:

+
  • SELECT a, b, sum(expression) FROM table GROUP BY a, b GROUPING SETS((a,b));
    It can be converted to the following query:
    1
    +2
    SELECT a, b, sum(expression) FROM table
    +  GROUP BY a, b;
    +
    + +
    +
    +
  • SELECT a, b, sum(expression) FROM table GROUP BY a, b GROUPING SETS(a,b);

    It can be converted to the following two queries:

    +
    1
    +2
    +3
    SELECT a, NULL, sum(expression) FROM table GROUP BY a;
    +UNION
    +SELECT NULL, b, sum(expression) FROM table GROUP BY b;
    +
    + +
    +
  • SELECT a, b, sum(expression) FROM table GROUP BY a, b GROUPING SETS((a,b), a);
    It can be converted to the following two queries:
    1
    +2
    +3
    SELECT a, b, sum(expression) FROM table GROUP BY a, b;
    +UNION
    +SELECT a, NULL, sum(expression) FROM table GROUP BY a;
    +
    + +
    +
    +
  • SELECT a, b, sum(expression) FROM table GROUP BY a, b GROUPING SETS((a,b), a, b, ());
    It can be converted to the following four queries:
    1
    +2
    +3
    +4
    +5
    +6
    +7
    SELECT a, b, sum(expression) FROM table GROUP BY a, b;
    +UNION
    +SELECT a, NULL, sum(expression) FROM table GROUP BY a, NULL;
    +UNION
    +SELECT NULL, b, sum(expression) FROM table GROUP BY NULL, b;
    +UNION
    +SELECT NULL, NULL, sum(expression) FROM table;
    +
    + +
    +
    +
+
+

Precautions

  • The to-be-grouped table must exist. Otherwise, an error is reported.
  • Different from ROLLUP, there is only one syntax for GROUPING SETS.
+
+

Example

To generate the cross-table row according to the group_id and job fields and return the total salary on each aggregation condition, run the following statement:

+
1
+2
+3
SELECT group_id, job, SUM(salary) FROM group_test
+  GROUP BY group_id, job
+  GROUPING SETS (group_id, job);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0165.html b/docs/dli/sqlreference/dli_08_0165.html new file mode 100644 index 00000000..d362f0c2 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0165.html @@ -0,0 +1,29 @@ + + +

JOIN

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0166.html b/docs/dli/sqlreference/dli_08_0166.html new file mode 100644 index 00000000..0eded336 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0166.html @@ -0,0 +1,31 @@ + + +

INNER JOIN

+

Function

This statement is used to join and return the rows that meet the JOIN conditions from two tables as the result set.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  {JOIN | INNER JOIN} table_reference ON join_condition;
+
+ +
+
+

Keyword

JOIN/INNER JOIN: Only the records that meet the JOIN conditions in joined tables will be displayed.

+
+

Precautions

  • The to-be-joined table must exist. Otherwise, an error is reported.
  • INNER JOIN can join more than two tables at one query.
+
+

Example

To join the course IDs from the student_info and course_info tables and check the mapping between student names and courses, run the following statement:

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info
+  JOIN course_info ON (student_info.courseId = course_info.courseId);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0167.html b/docs/dli/sqlreference/dli_08_0167.html new file mode 100644 index 00000000..1961db9b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0167.html @@ -0,0 +1,31 @@ + + +

LEFT OUTER JOIN

+

Function

Join the left table with the right table and return all joined records of the left table. If no joined record is found, NULL will be returned.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  LEFT OUTER JOIN table_reference ON join_condition;
+
+ +
+
+

Keyword

LEFT OUTER JOIN: Returns all joined records of the left table. If no record is matched, NULL is returned.

+
+

Precautions

The to-be-joined table must exist. Otherwise, an error is reported.

+
+

Example

To join the courseId from the student_info table to the courseId from the course_info table for inner join and return the name of the students who have selected course, run the following statement. If no joined record is found, NULL will be returned.

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info
+  LEFT OUTER JOIN course_info ON (student_info.courseId = course_info.courseId);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0168.html b/docs/dli/sqlreference/dli_08_0168.html new file mode 100644 index 00000000..143801a4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0168.html @@ -0,0 +1,31 @@ + + +

RIGHT OUTER JOIN

+

Function

Match the right table with the left table and return all matched records of the right table. If no matched record is found, NULL will be returned.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  RIGHT OUTER JOIN table_reference ON join_condition;
+
+ +
+
+

Keyword

RIGHT OUTER JOIN: Return all matched records of the right table. If no record is matched, NULL is returned.

+
+

Precautions

The to-be-joined table must exist. Otherwise, an error is reported.

+
+

Example

To join the courseId from the course_info table to the courseId from the student_info table for inner join and return the records in the course_info table, run the following statement. If no joined record is found, NULL will be returned.

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info
+  RIGHT OUTER JOIN course_info ON (student_info.courseId = course_info.courseId);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0169.html b/docs/dli/sqlreference/dli_08_0169.html new file mode 100644 index 00000000..43a87218 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0169.html @@ -0,0 +1,31 @@ + + +

FULL OUTER JOIN

+

Function

Join all records from the right table and the left table and return all joined records. If no joined record is found, NULL will be returned.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  FULL OUTER JOIN table_reference ON join_condition;
+
+ +
+
+

Keyword

FULL OUTER JOIN: Matches all records in the left and right tables. If no record is matched, NULL is returned.

+
+

Precautions

The to-be-joined table must exist. Otherwise, an error is reported.

+
+

Example

To join all records from the right table and the left table and return all joined records, run the following statement. If no joined record is found, NULL will be returned.

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info
+  FULL OUTER JOIN course_info ON (student_info.courseId = course_info.courseId);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0170.html b/docs/dli/sqlreference/dli_08_0170.html new file mode 100644 index 00000000..6117aa61 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0170.html @@ -0,0 +1,31 @@ + + +

IMPLICIT JOIN

+

Function

This statement has the same function as INNER JOIN, that is, the result set that meet the WHERE condition is returned. However, IMPLICIT JOIN does not use the condition specified by JOIN.

+
+

Syntax

1
+2
SELECT table_reference.col_name, table_reference.col_name, ... FROM table_reference, table_reference
+  WHERE table_reference.col_name = table_reference.col_name;
+
+ +
+
+

Keyword

The keyword WHERE achieves the same function as JOIN...ON... and the mapped records will be returned. Syntax shows the WHERE filtering according to an equation. The WHERE filtering according to an inequation is also supported.

+
+

Precautions

  • The to-be-joined table must exist. Otherwise, an error is reported.
  • The statement of IMPLICIT JOIN does not contain keywords JOIN...ON.... Instead, the WHERE clause is used as the condition to join two tables.
+
+

Example

To return the student names and course names that match courseId, run the following statement:

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info,course_info
+  WHERE student_info.courseId = course_info.courseId;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0171.html b/docs/dli/sqlreference/dli_08_0171.html new file mode 100644 index 00000000..3d55fcea --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0171.html @@ -0,0 +1,31 @@ + + +

Cartesian JOIN

+

Function

Cartesian JOIN joins each record of table A with all records in table B. For example, if there are m records in table A and n records in table B, m x n records will be generated by Cartesian JOIN.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  CROSS JOIN table_reference ON join_condition;
+
+ +
+
+

Keyword

The join_condition is the condition for joining. If join_condition is always true, for example 1=1, the join is Cartesian JOIN. Therefore, the number of records output by Cartesian join is equal to the product of the number of records in the joined table. If Cartesian join is required, use the special keyword CROSS JOIN. CROSS JOIN is the standard way to calculate Cartesian product.

+
+

Precautions

The to-be-joined table must exist. Otherwise, an error is reported.

+
+

Example

To return all the JOIN results of the student name and course name from the student_info and course_info tables, run the following statement:

+
1
+2
SELECT student_info.name, course_info.courseName FROM student_info
+  CROSS JOIN course_info ON (1 = 1);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0172.html b/docs/dli/sqlreference/dli_08_0172.html new file mode 100644 index 00000000..8a20c9db --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0172.html @@ -0,0 +1,31 @@ + + +

LEFT SEMI JOIN

+

Function

This statement is used to query the records that meet the JOIN condition from the left table.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  LEFT SEMI JOIN table_reference ON join_condition;
+
+ +
+
+

Keyword

LEFT SEMI JOIN: Indicates to only return the records from the left table. LEFT SEMI JOIN can be achieved by nesting subqueries in LEFT SEMI JOIN, WHERE...IN, or WHERE EXISTS. LEFT SEMI JOIN returns the records that meet the JOIN condition from the left table, while LEFT OUTER JOIN returns all the records from the left table or NULL if no records that meet the JOIN condition are found.

+
+

Precautions

  • The to-be-joined table must exist. Otherwise, an error is reported.
  • he fields in attr_expr_list must be the fields in the left table. Otherwise, an error is reported.
+
+

Example

To return the names of students who select the courses and the course IDs, run the following statement:

+
1
+2
SELECT student_info.name, student_info.courseId FROM student_info
+  LEFT SEMI JOIN course_info ON (student_info.courseId = course_info.courseId);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0173.html b/docs/dli/sqlreference/dli_08_0173.html new file mode 100644 index 00000000..d1edb237 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0173.html @@ -0,0 +1,31 @@ + + +

NON-EQUIJOIN

+

Function

This statement is used to join multiple tables using unequal values and return the result set that meet the condition.

+
+

Syntax

1
+2
SELECT attr_expr_list FROM table_reference
+  JOIN table reference ON non_equi_join_condition;
+
+ +
+
+

Keyword

The non_equi_join_condition is similar to join_condition. The only difference is that the JOIN condition is inequation.

+
+

Precautions

The to-be-joined table must exist. Otherwise, an error is reported.

+
+

Example

To return all the pairs of different student names from the student_info_1 and student_info_2 tables, run the following statement:

+
1
+2
SELECT student_info_1.name, student_info_2.name FROM student_info_1
+  JOIN student_info_2 ON (student_info_1. name <> student_info_2. name);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0174.html b/docs/dli/sqlreference/dli_08_0174.html new file mode 100644 index 00000000..e1a11203 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0174.html @@ -0,0 +1,21 @@ + + +

Subquery

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0175.html b/docs/dli/sqlreference/dli_08_0175.html new file mode 100644 index 00000000..c546922c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0175.html @@ -0,0 +1,32 @@ + + +

Subquery Nested by WHERE

+

Function

Subqueries are nested in the WHERE clause, and the subquery result is used as the filtering condition.

+
+

Syntax

1
+2
SELECT [ALL | DISTINCT] attr_expr_list FROM table_reference
+  WHERE {col_name operator (sub_query) | [NOT] EXISTS sub_query};
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
  • The subquery results are used as the filter condition in the subquery nested by WHERE.
  • The operator includes the equation and inequation operators, and IN, NOT IN, EXISTS, and NOT EXISTS operators.
    • If the operator is IN or NOT IN, the returned records are in a single column.
    • If the operator is EXISTS or NOT EXISTS, the subquery must contain WHERE. If any a field in the subquery is the same as that in the external query, add the table name before the field in the subquery.
    +
+
+

Precautions

The to-be-queried table must exist. If this statement is used to query a table that does not exist, an error is reported.

+
+

Example

To query the courseId of Biology from the course_info table, and then query the student name matched the courseId from the student_info table, run the following statement:

+
1
+2
SELECT name FROM student_info
+  WHERE courseId = (SELECT courseId FROM course_info WHERE courseName = 'Biology');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0176.html b/docs/dli/sqlreference/dli_08_0176.html new file mode 100644 index 00000000..6cf6da82 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0176.html @@ -0,0 +1,29 @@ + + +

Subquery Nested by FROM

+

Function

This statement is used to nest subquery by FROM and use the subquery results as the data source of the external SELECT statement.

+
+

Syntax

1
SELECT [ALL | DISTINCT] attr_expr_list FROM (sub_query) [alias];
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
+
+

Precautions

  • The to-be-queried table must exist. If this statement is used to query a table that does not exist, an error is reported.
  • The subquery nested in FROM must have an alias. The alias must be specified before the running of the statement. Otherwise, an error is reported. It is advised to specify a unique alias.
  • The subquery results sequent to FROM must be followed by the specified alias. Otherwise, an error is reported.
+
+

Example

To return the names of students who select the courses in the course_info table and remove the repeated records using DISTINCT, run the following statement:

+
1
+2
SELECT DISTINCT name FROM (SELECT name FROM student_info
+  JOIN course_info ON student_info.courseId = course_info.courseId) temp;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0177.html b/docs/dli/sqlreference/dli_08_0177.html new file mode 100644 index 00000000..68a36a0b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0177.html @@ -0,0 +1,36 @@ + + +

Subquery Nested by HAVING

+

Function

This statement is used to embed a subquery in the HAVING clause. The subquery result is used as a part of the HAVING clause.

+
+

Syntax

1
+2
+3
SELECT [ALL | DISTINCT] attr_expr_list FROM table_reference
+  GROUP BY groupby_expression
+  HAVING aggregate_func(col_name) operator (sub_query);
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
+
  • The groupby_expression can contain a single field or multiple fields, and also can call aggregate functions or string functions.
  • The operator includes the equation and inequation operators, and IN and NOT IN operators.
+
+

Precautions

  • The to-be-queried table must exist. If this statement is used to query a table that does not exist, an error is reported.
  • The sequence of sub_query and the aggregate function cannot be changed.
+
+

Example

To group the student_info table according to the name field, count the records of each group, and return the number of records in which the name fields in the student_info table equal to the name fields in the course_info table if the two tables have the same number of records, run the following statement:

+
1
+2
+3
SELECT name FROM student_info
+  GROUP BY name
+  HAVING count(name) = (SELECT count(*) FROM course_info);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0178.html b/docs/dli/sqlreference/dli_08_0178.html new file mode 100644 index 00000000..b19e1757 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0178.html @@ -0,0 +1,27 @@ + + +

Multi-Layer Nested Subquery

+

Function

This statement is used to nest queries in the subquery.

+
+

Syntax

1
SELECT attr_expr FROM ( SELECT attr_expr FROM ( SELECT attr_expr FROM... ... ) [alias] ) [alias];
+
+ +
+
+

Keyword

  • All is used to return repeated rows. By default, all repeated rows are returned. It is followed by asterisks (*) only. Otherwise, an error will occur.
  • DISTINCT is used to remove the repeated line from the result.
+
+

Precautions

  • The to-be-queried table must exist. If this statement is used to query a table that does not exist, an error is reported.
  • The alias of the subquery must be specified in the nested query. Otherwise, an error is reported.
  • The alias must be specified before the running of the statement. Otherwise, an error is reported. It is advised to specify a unique alias.
+
+

Example

To return the name field from the user_info table after three queries, run the following statement:

+
1
SELECT name FROM ( SELECT name, acc_num FROM ( SELECT name, acc_num, password FROM ( SELECT name, acc_num, password, bank_acc FROM user_info) a ) b ) c;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0179.html b/docs/dli/sqlreference/dli_08_0179.html new file mode 100644 index 00000000..99a4a7b7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0179.html @@ -0,0 +1,17 @@ + + +

Alias

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0180.html b/docs/dli/sqlreference/dli_08_0180.html new file mode 100644 index 00000000..7b79352c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0180.html @@ -0,0 +1,31 @@ + + +

AS for Table

+

Function

This statement is used to specify an alias for a table or the subquery result.

+
+

Syntax

1
SELECT attr_expr_list FROM table_reference [AS] alias;
+
+ +
+
+

Keyword

  • table_reference: Can be a table, view, or subquery.
  • As: Is used to connect to table_reference and alias. Whether this keyword is added or not does not affect the command execution result.
+
+

Precautions

  • The to-be-queried table must exist. Otherwise, an error is reported.
  • The alias must be specified before execution of the statement. Otherwise, an error is reported. You are advised to specify a unique alias.
+
+

Example

  • To specify alias n for table simple_table and visit the name field in table simple_table by using n.name, run the following statement:
    1
    SELECT n.score FROM simple_table n WHERE n.name = "leilei";
    +
    + +
    +
  • To specify alias m for the subquery result and return all the query results using SELECT * FROM m, run the following statement:
    1
    SELECT * FROM (SELECT * FROM simple_table WHERE score > 90) AS m;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0181.html b/docs/dli/sqlreference/dli_08_0181.html new file mode 100644 index 00000000..19a689ed --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0181.html @@ -0,0 +1,27 @@ + + +

AS for Column

+

Function

This statement is used to specify an alias for a column.

+
+

Syntax

1
SELECT attr_expr [AS] alias, attr_expr [AS] alias, ... FROM table_reference;
+
+ +
+
+

Keyword

  • alias: gives an alias for the attr_expr field.
  • AS: Whether to add AS does not affect the result.
+
+

Precautions

  • The to-be-queried table must exist. Otherwise, an error is reported.
  • The alias must be specified before execution of the statement. Otherwise, an error is reported. You are advised to specify a unique alias.
+
+

Example

Run SELECT name AS n FROM simple_table WHERE score > 90 to obtain the subquery result. The alias n for name can be used by external SELECT statement.

+
1
SELECT n FROM (SELECT name AS n FROM simple_table WHERE score > 90) m WHERE n = "xiaoming";
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0182.html b/docs/dli/sqlreference/dli_08_0182.html new file mode 100644 index 00000000..d180fc6c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0182.html @@ -0,0 +1,19 @@ + + +

Set Operations

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0183.html b/docs/dli/sqlreference/dli_08_0183.html new file mode 100644 index 00000000..4609a28c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0183.html @@ -0,0 +1,27 @@ + + +

UNION

+

Function

This statement is used to return the union set of multiple query results.

+
+

Syntax

1
select_statement UNION [ALL] select_statement;
+
+ +
+
+

Keyword

UNION: The set operation is used to join the head and tail of a table based on certain conditions. The number of columns returned by each SELECT statement must be the same. The column type and column name may not be the same.

+
+

Precautions

  • By default, the repeated records returned by UNION are removed. The repeated records returned by UNION ALL are not removed.
  • Do not add brackets between multiple set operations, such as UNION, INTERSECT, and EXCEPT. Otherwise, an error is reported.
+
+

Example

To return the union set of the query results of the SELECT * FROM student _1 and SELECT * FROM student _2 commands with the repeated records removed, run the following statement:

+
1
SELECT * FROM student_1 UNION SELECT * FROM student_2;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0184.html b/docs/dli/sqlreference/dli_08_0184.html new file mode 100644 index 00000000..668a45ca --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0184.html @@ -0,0 +1,27 @@ + + +

INTERSECT

+

Function

This statement is used to return the intersection set of multiple query results.

+
+

Syntax

1
select_statement INTERSECT select_statement;
+
+ +
+
+

Keyword

INTERSECT returns the intersection of multiple query results. The number of columns returned by each SELECT statement must be the same. The column type and column name may not be the same. By default, INTERSECT deduplication is used.

+
+

Precautions

Do not add brackets between multiple set operations, such as UNION, INTERSECT, and EXCEPT. Otherwise, an error is reported.

+
+

Example

To return the intersection set of the query results of the SELECT * FROM student _1 and SELECT * FROM student _2 commands with the repeated records removed, run the following statement:

+
1
SELECT * FROM student _1 INTERSECT SELECT * FROM student _2;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0185.html b/docs/dli/sqlreference/dli_08_0185.html new file mode 100644 index 00000000..278f6fb7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0185.html @@ -0,0 +1,27 @@ + + +

EXCEPT

+

Function

This statement is used to return the difference set of two query results.

+
+

Syntax

1
select_statement EXCEPT select_statement;
+
+ +
+
+

Keyword

EXCEPT minus the sets. A EXCEPT B indicates to remove the records that exist in both A and B from A and return the results. The repeated records returned by EXCEPT are not removed by default. The number of columns returned by each SELECT statement must be the same. The types and names of columns do not have to be the same.

+
+

Precautions

Do not add brackets between multiple set operations, such as UNION, INTERSECT, and EXCEPT. Otherwise, an error is reported.

+
+

Example

To remove the records that exist in both SELECT * FROM student_1 and SELECT * FROM student_2 from SELECT * FROM student_1 and return the results, run the following statement:

+
1
SELECT * FROM student_1 EXCEPT SELECT * FROM student_2;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0186.html b/docs/dli/sqlreference/dli_08_0186.html new file mode 100644 index 00000000..87cdef54 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0186.html @@ -0,0 +1,27 @@ + + +

WITH...AS

+

Function

This statement is used to define the common table expression (CTE) using WITH...AS to simplify the query and make the result easier to read and maintain.

+
+

Syntax

1
WITH cte_name AS (select_statement) sql_containing_cte_name;
+
+ +
+
+

Keyword

  • cte_name: Name of a public expression. The name must be unique.
  • select_statement: complete SELECT clause.
  • sql_containing_cte_name: SQL statement containing the defined common expression.
+
+

Precautions

  • A CTE must be used immediately after it is defined. Otherwise, the definition becomes invalid.
  • Multiple CTEs can be defined by WITH at a time. The CTEs are separated by commas and the CTEs defined later can quote the CTEs defined earlier.
+
+

Example

Define SELECT courseId FROM course_info WHERE courseName = 'Biology' as CTE nv and use nv as the SELECT statement in future queries.

+
1
WITH nv AS (SELECT courseId FROM course_info WHERE courseName = 'Biology') SELECT DISTINCT courseId FROM nv;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0187.html b/docs/dli/sqlreference/dli_08_0187.html new file mode 100644 index 00000000..7bed677b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0187.html @@ -0,0 +1,17 @@ + + +

CASE...WHEN

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0188.html b/docs/dli/sqlreference/dli_08_0188.html new file mode 100644 index 00000000..55d029a9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0188.html @@ -0,0 +1,28 @@ + + +

Basic CASE Statement

+

Function

This statement is used to display result_expression according to the joined results of input_expression and when_expression.

+
+

Syntax

1
CASE input_expression WHEN when_expression THEN result_expression [...n] [ELSE else_result_expression] END;
+
+ +
+
+

Keyword

CASE: Subquery is supported in basic CASE statement. However, input_expression and when_expression must be joinable.

+
+

Precautions

If there is no input_expression = when_expression with the TRUE value, else_result_expression will be returned when the ELSE clause is specified. If the ELSE clause is not specified, NULL will be returned.

+
+

Example

To return the name field and the character that is matched to id from the student table with the following matching rules, run the following statement:

+
  • If id is 1, 'a' is returned.
  • If id is 2, 'b' is returned.
  • If id is 3, 'c' is returned.
  • Otherwise, NULL is returned.
+
1
SELECT name, CASE id WHEN 1 THEN 'a' WHEN 2 THEN 'b' WHEN 3 THEN 'c' ELSE NULL END FROM student;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0189.html b/docs/dli/sqlreference/dli_08_0189.html new file mode 100644 index 00000000..4f159174 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0189.html @@ -0,0 +1,27 @@ + + +

CASE Query Statement

+

Function

This statement is used to obtain the value of boolean_expression for each WHEN statement in a specified order. Then return the first result_expression with the value TRUE of boolean_expression.

+
+

Syntax

1
CASE WHEN boolean_expression THEN result_expression [...n] [ELSE else_result_expression] END;
+
+ +
+
+

Keyword

boolean_expression: can include subquery. However, the return value of boolean_expression can only be of Boolean type.

+
+

Precautions

If there is no Boolean_expression with the TRUE value, else_result_expression will be returned when the ELSE clause is specified. If the ELSE clause is not specified, NULL will be returned.

+
+

Example

To query the student table and return the related results for the name and score fields: EXCELLENT if the score is higher than 90, GOOD if the score ranges from 80 to 90, and BAD if the score is lower than 80, run the following statement:

+
1
SELECT name, CASE WHEN score >= 90 THEN 'EXCELLENT' WHEN 80 < score AND score < 90 THEN 'GOOD' ELSE 'BAD' END AS level FROM student;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0190.html b/docs/dli/sqlreference/dli_08_0190.html new file mode 100644 index 00000000..09177dd4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0190.html @@ -0,0 +1,57 @@ + + +

OVER Clause

+

Function

This statement is used together with the window function. The OVER statement is used to group data and sort the data within the group. The window function is used to generate serial numbers for values within the group.

+
+

Syntax

1
+2
+3
+4
+5
SELECT window_func(args) OVER
+  ([PARTITION BY col_name, col_name, ...]
+   [ORDER BY col_name, col_name, ...]
+   [ROWS | RANGE BETWEEN (CURRENT ROW | (UNBOUNDED |[num]) PRECEDING)
+  AND (CURRENT ROW | ( UNBOUNDED | [num]) FOLLOWING)]);
+
+ +
+
+

Keyword

  • PARTITION BY: used to partition a table with one or multiple fields. Similar to GROUP BY, PARTITION BY is used to partition table by fields and each partition is a window. The window function can apply to the entire table or specific partitions. A maximum of 7,000 partitions can be created in a single table.
  • ORDER BY: used to specify the order for the window function to obtain the value. ORDER BY can be used to sort table with one or multiple fields. The sorting order can be ascending (specified by ASC) or descending (specified by DESC). The window is specified by WINDOW. If the window is not specified, the default window is ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW. In other words, the window starts from the head of the table or partition (if PARTITION BY is used in the OVER clause) to the current row.
  • WINDOW: used to define the window by specifying a range of rows.
  • CURRENT ROW: indicates the current row.
  • num PRECEDING: used to specify the start of the defined window. The window starts from the num row precedes the current row.
  • UNBOUNDED PRECEDING: used to indicate that there is no start of the window.
  • num FOLLOWING: used to specify the end of the defined window. The window ends from the num row following the current row.
  • UNBOUNDED FOLLOWING: used to indicate that there is no end of the window.
  • The differences between ROWS BETWEEN... and RANGE BETWEEN... are as follows:
    • ROWS refers to the physical window. After the data is sorted, the physical window starts at the nth row in front of the current row and ends at the mth row following the current row.
    • RANGE refers to the logic window. The column of the logic window is determined by the values rather than the location of rows.
    +
  • The scenarios of the window are as follows:
    • The window only contains the current row.
      1
      ROWS BETWEEN CURRENT ROW AND CURRENT ROW
      +
      + +
      +
    • The window starts from three rows precede the current row and ends at the fifth row follows the current row.
      1
      ROWS BETWEEN 3 PRECEDING AND 5 FOLLOWING
      +
      + +
      +
    • The window starts from the beginning of the table or partition and ends at the current row.
      1
      ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
      +
      + +
      +
    • The window starts from the current window and ends at the end of the table or partition.
      1
      ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
      +
      + +
      +
    • The window starts from the beginning of the table or partition and ends at the end of the table or partition.
      1
      ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
      +
      + +
      +
    +
+
+

Precautions

The three options of the OVER clause are PARTITION BY, ORDER BY, and WINDOW. They are optional and can be used together. If the OVER clause is empty, the window is the entire table.

+
+

Example

To start the window from the beginning of the table or partition and end the window at the current row, sort the over_test table according to the id field, and return the sorted id fields and corresponding serial numbers, run the following statement:

+
+
1
SELECT id, count(id) OVER (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM over_test;
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0191.html b/docs/dli/sqlreference/dli_08_0191.html new file mode 100644 index 00000000..bb95f1ec --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0191.html @@ -0,0 +1,445 @@ + + +

Mathematical Operation Functions

+

Relational Operators

All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.

+

Relationship operators are binary operators. Two compared data types must be of the same type or they must support implicit conversion.

+

Table 1 lists all relational operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Relational Operators

Operator

+

Returned Data Type

+

Description

+

A = B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. This operator is used for value assignment.

+

A <> B

+

BOOLEAN

+

If A is not equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned. This operator follows the standard SQL syntax.

+

A < B

+

BOOLEAN

+

If A is less than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A <= B

+

BOOLEAN

+

If A is less than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A > B

+

BOOLEAN

+

If A is greater than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A >= B

+

BOOLEAN

+

If A is greater than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A IS NULL

+

BOOLEAN

+

If A is NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS NOT NULL

+

BOOLEAN

+

If A is not NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS DISTINCT FROM B

+

BOOLEAN

+

If A is not equal to B, TRUE is returned. NULL indicates A equals B.

+

A IS NOT DISTINCT FROM B

+

BOOLEAN

+

If A is equal to B, TRUE is returned. NULL indicates A equals B.

+

A BETWEEN [ASYMMETRIC | SYMMETRIC] B AND C

+

BOOLEAN

+

If A is greater than or equal to B but less than or equal to C, TRUE is returned.

+
  • ASYMMETRIC: indicates that B and C are location-related.

    For example, "A BETWEEN ASYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C".

    +
  • SYMMETRIC: indicates that B and C are not location-related.

    For example, "A BETWEEN SYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C) OR (A BETWEEN C AND B".

    +
+

A NOT BETWEEN B AND C

+

BOOLEAN

+

If A is less than B or greater than C, TRUE is returned.

+

A LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A matches pattern B, TRUE is returned. The escape character C can be defined as required.

+

A NOT LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match pattern B, TRUE is returned. The escape character C can be defined as required.

+

A SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A matches regular expression B, TRUE is returned. The escape character C can be defined as required.

+

A NOT SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match regular expression B, TRUE is returned. The escape character C can be defined as required.

+

value IN (value [, value]* )

+

BOOLEAN

+

If the value is equal to any value in the list, TRUE is returned.

+

value NOT IN (value [, value]* )

+

BOOLEAN

+

If the value is not equal to any value in the list, TRUE is returned.

+
+
+
  • Values of the double, real, and float types may be different in precision. The equal sign (=) is not recommended for comparing two values of the double type. You are advised to obtain the absolute value by subtracting these two values of the double type and determine whether they are the same based on the absolute value. If the absolute value is small enough, the two values of the double data type are regarded equal. For example:
    abs(0.9999999999 - 1.0000000000) < 0.000000001 //The precision decimal places of 0.9999999999 and 1.0000000000 are 10, while the precision decimal place of 0.000000001 is 9. Therefore, 0.9999999999 can be regarded equal to 1.0000000000.
    +
+
  • Comparison between data of the numeric type and character strings is allowed. During comparison using relational operators, including >, <, ≤, and ≥, data of the string type is converted to numeric type by default. No characters other than numeric characters are allowed.
  • Character strings can be compared using relational operators.
+
+
+

Logical Operators

Common logical operators are AND, OR, and NOT. Their priority order is NOT > AND > OR.

+

Table 2 lists the calculation rules. A and B indicate logical expressions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Logical Operators

Operator

+

Result Type

+

Description

+

A OR B

+

BOOLEAN

+

If A or B is TRUE, TRUE is returned. Three-valued logic is supported.

+

A AND B

+

BOOLEAN

+

If both A and B are TRUE, TRUE is returned. Three-valued logic is supported.

+

NOT A

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, UNKNOWN is returned.

+

A IS FALSE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT FALSE

+

BOOLEAN

+

If A is not FALSE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS TRUE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT TRUE

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS UNKNOWN

+

BOOLEAN

+

If A is UNKNOWN, TRUE is returned.

+

A IS NOT UNKNOWN

+

BOOLEAN

+

If A is not UNKNOWN, TRUE is returned.

+
+
+

Only data of the Boolean type can be used for calculation using logical operators. Implicit type conversion is not supported.

+
+
+

Arithmetic Operators

Arithmetic operators include binary operators and unary operators, for all of which, the returned results are of the numeric type. Table 3 lists arithmetic operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Arithmetic Operators

Operator

+

Result Type

+

Description

+

+ numeric

+

All numeric types

+

Returns numbers.

+

- numeric

+

All numeric types

+

Returns negative numbers.

+

A + B

+

All numeric types

+

A plus B. The result type is associated with the operation data type. For example, if floating-point number is added to an integer, the result will be a floating-point number.

+

A - B

+

All numeric types

+

A minus B. The result type is associated with the operation data type.

+

A * B

+

All numeric types

+

Multiply A and B. The result type is associated with the operation data type.

+

A / B

+

All numeric types

+

Divide A by B. The result is a number of the double type (double-precision number).

+

POWER(A, B)

+

All numeric types

+

Returns the value of A raised to the power B.

+

ABS(numeric)

+

All numeric types

+

Returns the absolute value of a specified value.

+

MOD(A, B)

+

All numeric types

+

Returns the remainder (modulus) of A divided by B. A negative value is returned only when A is a negative value.

+

SQRT(A)

+

All numeric types

+

Returns the square root of A.

+

LN(A)

+

All numeric types

+

Returns the nature logarithm of A (base e).

+

LOG10(A)

+

All numeric types

+

Returns the base 10 logarithms of A.

+

EXP(A)

+

All numeric types

+

Return the value of e raised to the power of a.

+

CEIL(A)

+

CEILING(A)

+

All numeric types

+

Return the smallest integer that is greater than or equal to a. For example: ceil(21.2) = 22.

+

FLOOR(A)

+

All numeric types

+

Return the largest integer that is less than or equal to a. For example: floor(21.2) = 21.

+

SIN(A)

+

All numeric types

+

Returns the sine value of A.

+

COS(A)

+

All numeric types

+

Returns the cosine value of A.

+

TAN(A)

+

All numeric types

+

Returns the tangent value of A.

+

COT(A)

+

All numeric types

+

Returns the cotangent value of A.

+

ASIN(A)

+

All numeric types

+

Returns the arc sine value of A.

+

ACOS(A)

+

All numeric types

+

Returns the arc cosine value of A.

+

ATAN(A)

+

All numeric types

+

Returns the arc tangent value of A.

+

DEGREES(A)

+

All numeric types

+

Convert the value of a from radians to degrees.

+

RADIANS(A)

+

All numeric types

+

Convert the value of a from degrees to radians.

+

SIGN(A)

+

All numeric types

+

Returns the sign of A. 1 is returned if A is positive. –1 is returned if A is negative. Otherwise, 0 is returned.

+

ROUND(A, d)

+

All numeric types

+

Round A to d places right to the decimal point. d is an int type. For example: round(21.263,2) = 21.26.

+

PI()

+

All numeric types

+

Return the value of pi.

+
+
+

Data of the string type is not allowed in arithmetic operations.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0192.html b/docs/dli/sqlreference/dli_08_0192.html new file mode 100644 index 00000000..bf54f783 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0192.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with a DWS table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0193.html b/docs/dli/sqlreference/dli_08_0193.html new file mode 100644 index 00000000..fd27ab90 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0193.html @@ -0,0 +1,146 @@ + + +

Creating a DLI Table and Associating It with DWS

+

Function

This statement is used to create a DLI table and associate it with an existing DWS table.

+
+

Prerequisites

Before creating a DLI table and associating it with DWS, you need to create a datasource connection. For details about operations on the management console, see

+
+

Syntax

1
+2
+3
+4
+5
+6
CREATE TABLE [IF NOT EXISTS] TABLE_NAME
+  USING JDBC OPTIONS (
+  'url'='xx',
+  'dbtable'='db_name_in_DWS.table_name_in_DWS',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

url

+

Before obtaining the DWS IP address, you need to create a datasource connection first..

+

If you have created an enhanced datasource connection, you can use the JDBC Connection String (intranet) provided by DWS or the intranet address and port number to access DWS. The format is protocol header: //Internal IP address:Internal network port/Database name, for example: jdbc:postgresql://192.168.0.77:8000/postgres.

+
NOTE:

The DWS IP address is in the following format: protocol header://IP address:port number/database name

+

The following is an example:

+

jdbc:postgresql://to-dws-1174405119-ihlUr78j.datasource.com:8000/postgres

+

If you want to connect to a database created in DWS, change postgres to the corresponding database name in this connection.

+
+

dbtable

+

Specifies the name or Schema name.Table name of the table that is associated with the DWS. For example: public.table_name.

+

user

+

(Discarded) DWS username.

+

password

+

User password of the DWS cluster.

+

passwdauth

+

Datasource password authentication name. For details about how to create datasource authentication, see Datasource Authentication in the Data Lake Insight User Guide.

+

encryption

+

Set this parameter to true when datasource password authentication is used.

+

partitionColumn

+

This parameter is used to set the numeric field used concurrently when data is read.

+
NOTE:
  • The partitionColumn, lowerBound, upperBound, and numPartitions parameters must be set at the same time.
  • To improve the concurrent read performance, you are advised to use auto-increment columns.
+
+

lowerBound

+

Minimum value of a column specified by partitionColumn. The value is contained in the returned result.

+

upperBound

+

Maximum value of a column specified by partitionColumn. The value is not contained in the returned result.

+

numPartitions

+

Number of concurrent read operations.

+
NOTE:

When data is read, the number of concurrent operations are evenly allocated to each task according to the lowerBound and upperBound to obtain data. The following is an example:

+
'partitionColumn'='id',
+'lowerBound'='0',
+'upperBound'='100',
+'numPartitions'='2'
+

Two concurrent tasks are started in DLI. The execution ID of one task is greater than or equal to 0 and the ID is less than 50, and the execution ID of the other task is greater than or equal to 50 and the ID is less than 100.

+
+

fetchsize

+

Number of data records obtained in each batch during data reading. The default value is 1000. If this parameter is set to a large value, the performance is good but more memory is occupied. If this parameter is set to a large value, memory overflow may occur.

+

batchsize

+

Number of data records written in each batch. The default value is 1000. If this parameter is set to a large value, the performance is good but more memory is occupied. If this parameter is set to a large value, memory overflow may occur.

+

truncate

+

Indicates whether to clear the table without deleting the original table when overwrite is executed. The options are as follows:

+
  • true
  • false
+

The default value is false, indicating that the original table is deleted and then a new table is created when the overwrite operation is performed.

+

isolationLevel

+

Transaction isolation level. The options are as follows:

+
  • NONE
  • READ_UNCOMMITTED
  • READ_COMMITTED
  • REPEATABLE_READ
  • SERIALIZABLE
+

The default value is READ_UNCOMMITTED.

+
+
+
+

Precautions

When creating a table associated with DWS, you do not need to specify the Schema of the associated table. DLI automatically obtains the schema of the table in the dbtable parameter of DWS.

+
+

Example

1
+2
+3
+4
+5
+6
CREATE TABLE IF NOT EXISTS dli_to_dws
+  USING JDBC OPTIONS (
+  'url'='jdbc:postgresql://to-dws-1174405119-ih1Ur78j.datasource.com:8000/postgres',
+  'dbtable'='test_dws',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0194.html b/docs/dli/sqlreference/dli_08_0194.html new file mode 100644 index 00000000..5d401935 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0194.html @@ -0,0 +1,99 @@ + + +

Inserting Data to the DWS Table

+

Function

This statement is used to insert data in a DLI table to the associated DWS table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • When creating the DLI table, you do not need to specify the Schema information. The Schema information complies with that in the DWS table. If the number and type of fields selected in the SELECT clause do not match the Schema information in the DWS table, the system reports an error.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
+
+

Example

  • Query data in the user table and insert the data into the test table.
    1
    +2
    +3
    +4
    +5
    +6
    INSERT INTO test
    +  SELECT ATTR_EXPR
    +  FROM user
    +  WHERE user_name='cyz'
    +  LIMIT 3
    +  GROUP BY user_age
    +
    + +
    +
  • Insert data 1 into the test table.
    INSERT INTO test 
    +  VALUES (1);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0195.html b/docs/dli/sqlreference/dli_08_0195.html new file mode 100644 index 00000000..82204650 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0195.html @@ -0,0 +1,26 @@ + + +

Querying the DWS Table

+

This statement is used to query data in a DWS table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

To query data in the dli_to_dws table, enter the following statement:

+
1
SELECT * FROM dli_to_dws limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0196.html b/docs/dli/sqlreference/dli_08_0196.html new file mode 100644 index 00000000..b0655872 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0196.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with an RDS Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0197.html b/docs/dli/sqlreference/dli_08_0197.html new file mode 100644 index 00000000..a0069259 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0197.html @@ -0,0 +1,174 @@ + + +

Creating a DLI Table and Associating It with RDS

+

Function

This statement is used to create a DLI table and associate it with an existing RDS table. This function supports access to the MySQL and PostgreSQL clusters of RDS.

+
+

Prerequisites

Before creating a DLI table and associating it with RDS, you need to create a datasource connection. For details about operations on the management console, see

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE TABLE [IF NOT EXISTS] TABLE_NAME
+  USING JDBC OPTIONS (
+  'url'='xx',
+  'driver'='DRIVER_NAME',
+  'dbtable'='db_name_in_RDS.table_name_in_RDS',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+
+

Keywords

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

url

+

Before obtaining the RDS IP address, you need to create a datasource connection first..

+

After an enhanced datasource connection is created, use the internal network domain name or internal network address and database port number provided by RDS to connect to DLI. If MySQL is used, the format is protocol header://internal IP address:internal network port number. If PostgreSQL is used, the format is protocol header://internal IP address:internal network port number/database name.

+

For example: jdbc:mysql://192.168.0.193:3306 or jdbc:postgresql://192.168.0.193:3306/postgres.

+

driver

+

JDBC driver class name. To connect to a MySQL cluster, enter com.mysql.jdbc.Driver. To connect to a PostgreSQL cluster, enter org.postgresql.Driver.

+

dbtable

+
  • To access the MySQL cluster, enter Database name.Table name.
    CAUTION:

    The name of the RDS database cannot contain hyphens (-) or ^. Otherwise, the table fails to be created.

    +
    +
  • To access the PostGre cluster, enter Schema name.Table name
    NOTE:

    The schema name is the name of the database schema. A schema is a collection of database objects, including tables and views.

    +
    +
+

user

+

(Discarded) Specifies the RDS username.

+

password

+

(Discarded) Specifies the RDS username and password.

+

passwdauth

+

Datasource password authentication name. For details about how to create datasource authentication, see Datasource Authentication in the Data Lake Insight User Guide.

+

encryption

+

Set this parameter to true when datasource password authentication is used.

+

partitionColumn

+

This parameter is used to set the numeric field used concurrently when data is read.

+
NOTE:
  • The partitionColumn, lowerBound, upperBound, and numPartitions parameters must be set at the same time.
  • To improve the concurrent read performance, you are advised to use auto-increment columns.
+
+

lowerBound

+

Minimum value of a column specified by partitionColumn. The value is contained in the returned result.

+

upperBound

+

Maximum value of a column specified by partitionColumn. The value is not contained in the returned result.

+

numPartitions

+

Number of concurrent read operations.

+
NOTE:

When data is read, the number of concurrent operations are evenly allocated to each task according to the lowerBound and upperBound to obtain data. The following is an example:

+
'partitionColumn'='id',
+'lowerBound'='0',
+'upperBound'='100',
+'numPartitions'='2'
+

Two concurrent tasks are started in DLI. The execution ID of one task is greater than or equal to 0 and the ID is less than 50, and the execution ID of the other task is greater than or equal to 50 and the ID is less than 100.

+
+

fetchsize

+

Number of data records obtained in each batch during data reading. The default value is 1000. If this parameter is set to a large value, the performance is good but more memory is occupied. If this parameter is set to a large value, memory overflow may occur.

+

batchsize

+

Number of data records written in each batch. The default value is 1000. If this parameter is set to a large value, the performance is good but more memory is occupied. If this parameter is set to a large value, memory overflow may occur.

+

truncate

+

Indicates whether to clear the table without deleting the original table when overwrite is executed. The options are as follows:

+
  • true
  • false
+

The default value is false, indicating that the original table is deleted and then a new table is created when the overwrite operation is performed.

+

isolationLevel

+

Transaction isolation level. The options are as follows:

+
  • NONE
  • READ_UNCOMMITTED
  • READ_COMMITTED
  • REPEATABLE_READ
  • SERIALIZABLE
+

The default value is READ_UNCOMMITTED.

+
+
+
+

Precautions

When creating a table associated with RDS, you do not need to specify the Schema of the associated table. DLI automatically obtains the schema of the table in the dbtable parameter of RDS.

+
+

Example

Accessing MySQL:

+
1
+2
+3
+4
+5
+6
+7
CREATE TABLE IF NOT EXISTS dli_to_rds
+  USING JDBC OPTIONS (
+  'url'='jdbc:mysql://to-rds-117405104-3eAHxnlz.datasource.com:3306',
+  'driver'='com.mysql.jdbc.Driver',
+  'dbtable'='rds_test.test1',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+

+

Accessing PostgreSQL:

+
1
+2
+3
+4
+5
+6
+7
CREATE TABLE IF NOT EXISTS dli_to_rds
+  USING JDBC OPTIONS (
+  'url'='jdbc:postgresql://to-rds-1174405119-oLRHAGE7.datasource.com:3306/postgreDB',
+  'driver'='org.postgresql.Driver',
+  'dbtable'='pg_schema.test1',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0198.html b/docs/dli/sqlreference/dli_08_0198.html new file mode 100644 index 00000000..609559c5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0198.html @@ -0,0 +1,99 @@ + + +

Inserting Data to the RDS Table

+

Function

This statement is used to insert data in a DLI table to the associated RDS table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • When creating the DLI table, you do not need to specify the Schema information. The Schema information complies with that in the RDS table. If the number and type of fields selected in the SELECT clause do not match the Schema information in the RDS table, the system reports an error.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
+
+

Example

  • Query data in the user table and insert the data into the test table.
    1
    +2
    +3
    +4
    +5
    +6
    INSERT INTO test
    +  SELECT ATTR_EXPR
    +  FROM user
    +  WHERE user_name='cyz'
    +  LIMIT 3
    +  GROUP BY user_age
    +
    + +
    +
  • Insert data 1 into the test table.
    INSERT INTO test 
    +  VALUES (1);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0199.html b/docs/dli/sqlreference/dli_08_0199.html new file mode 100644 index 00000000..c3a650de --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0199.html @@ -0,0 +1,26 @@ + + +

Querying the RDS Table

+

This statement is used to query data in an RDS table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

Query data in the test_ct table.

+
1
SELECT * FROM dli_to_rds limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0200.html b/docs/dli/sqlreference/dli_08_0200.html new file mode 100644 index 00000000..36684622 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0200.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with a CSS Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0201.html b/docs/dli/sqlreference/dli_08_0201.html new file mode 100644 index 00000000..436988a7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0201.html @@ -0,0 +1,128 @@ + + +

Creating a DLI Table and Associating It with CSS

+

Function

This statement is used to create a DLI table and associate it with an existing CSS table.

+
+

Prerequisites

Before creating a DLI table and associating it with CSS, you need to create a datasource connection. For details about operations on the management console, see

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
CREATE TABLE [IF NOT EXISTS] TABLE_NAME(
+  FIELDNAME1 FIELDTYPE1,
+  FIELDNAME2 FIELDTYPE2)
+  USING CSS OPTIONS (
+  'es.nodes'='xx',
+  'resource'='type_path_in_CSS',
+  'pushdown'='true',
+  'strict'='false',
+  'batch.size.entries'= '1000',
+  'batch.size.bytes'= '1mb',
+  'es.nodes.wan.only' = 'true',
+  'es.mapping.id' = 'FIELDNAME');
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

es.nodes

+

Before obtaining the CSS IP address, you need to create a datasource connection first..

+

If you have created an enhanced datasource connection, you can use the internal IP address provided by CSS. The format is IP1:PORT1,IP2:PORT2.

+

resource

+

The resource is used to specify the CSS datasource connection name. You can use /index/type to specify the resource location (for easier understanding, the index can be seen as database and type as table).

+
NOTE:
  • In ES 6.X, a single index supports only one type, and the type name can be customized.
  • In ES 7.X, a single index uses _doc as the type name and cannot be customized. To access ES 7.X, set this parameter to index.
+
+

pushdown

+

Indicates whether the press function of CSS is enabled. The default value is set to true. If there are a large number of I/O transfer tables, the pushdown can be enabled to reduce I/Os when the where filtering conditions are met.

+

strict

+

Indicates whether the CSS pushdown is strict. The default value is set to false. In exact match scenarios, more I/Os are reduced than pushdown.

+

batch.size.entries

+

Maximum number of entries that can be inserted to a batch processing. The default value is 1000. If the size of a single data record is so large that the number of data records in the bulk storage reaches the upper limit of the data amount of a single batch processing, the system stops storing data and submits the data based on the batch.size.bytes.

+

batch.size.bytes

+

Maximum amount of data in a single batch processing. The default value is 1 MB. If the size of a single data record is so small that the number of data records in the bulk storage reaches the upper limit of the data amount of a single batch processing, the system stops storing data and submits the data based on the batch.size.entries.

+

es.nodes.wan.only

+

Indicates whether to access the Elasticsearch node using only the domain name. The default value is false. If the original internal IP address provided by CSS is used as the es.nodes, you do not need to set this parameter or set it to false.

+

es.mapping.id

+

Specifies a field whose value is used as the document ID in the Elasticsearch node.

+
NOTE:
  • The document ID in the same /index/type is unique. If a field that functions as a document ID has duplicate values, the document with the duplicate ID will be overwritten when the ES is inserted.
  • This feature can be used as a fault tolerance solution. When data is being inserted, the DLI job fails and some data has been inserted into Elasticsearch. The data is redundant. If Document id is set, the last redundant data will be overwritten when the DLI job is executed again.
+
+

es.net.ssl

+

Whether to connect to the secure CSS cluster. The default value is false.

+

es.certificate.name

+

Name of the datasource authentication used to connect to the secure CSS cluster. For details about how to create datasource authentication, see Datasource Authentication in the Data Lake Insight User Guide.

+
+
+

batch.size.entries and batch.size.bytes limit the number of data records and data volume respectively.

+
+
+

Example

1
+2
+3
+4
+5
+6
+7
+8
CREATE TABLE IF NOT EXISTS dli_to_css (doc_id String, name string, age int)
+  USING CSS OPTIONS (
+  es.nodes 'to-css-1174404703-LzwpJEyx.datasource.com:9200',
+  resource '/dli_index/dli_type',
+  pushdown 'false',
+  strict 'true',
+  es.nodes.wan.only 'true',
+  es.mapping.id 'doc_id');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0202.html b/docs/dli/sqlreference/dli_08_0202.html new file mode 100644 index 00000000..02e3e32d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0202.html @@ -0,0 +1,99 @@ + + +

Inserting Data to the CSS Table

+

Function

This statement is used to insert data in a DLI table to the associated CSS table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • When creating the DLI table, you need to specify the schema information. If the number and type of fields selected in the SELECT clause or in Values do not match the Schema information in the CSS table, the system reports an error.
  • Inconsistent types may not always cause error reports. For example, if the data of the int type is inserted, but the text type is saved in the CSS Schema, the int type will be converted to the text type and no error will be reported.
  • You are advised not to concurrently insert data into a table. If you concurrently insert data into a table, there is a possibility that conflicts occur, leading to failed data insertion.
+
+

Example

  • Query data in the user table and insert the data into the test table.
    1
    +2
    +3
    +4
    +5
    +6
    INSERT INTO test
    +  SELECT ATTR_EXPR
    +  FROM user
    +  WHERE user_name='cyz'
    +  LIMIT 3
    +  GROUP BY user_age
    +
    + +
    +
  • Insert data 1 into the test table.
    INSERT INTO test 
    +  VALUES (1);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0203.html b/docs/dli/sqlreference/dli_08_0203.html new file mode 100644 index 00000000..91afb0dd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0203.html @@ -0,0 +1,26 @@ + + +

Querying the CSS Table

+

This statement is used to query data in a CSS table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

The table to be queried must exist. Otherwise, an error is reported.

+
+

Example

To query data in the dli_to_css table, enter the following statement:

+
1
SELECT * FROM dli_to_css limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0204.html b/docs/dli/sqlreference/dli_08_0204.html new file mode 100644 index 00000000..95310f11 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0204.html @@ -0,0 +1,138 @@ + + +

Creating a DLI Table Using the Hive Syntax

+

Function

This Hive syntax is used to create a DLI table. The main differences between the DataSource and the Hive syntax lie in the supported data formats and the number of supported partitions. For details, see syntax and precautions.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE TABLE [IF NOT EXISTS] [db_name.]table_name 
+  [(col_name1 col_type1 [COMMENT col_comment1], ...)]
+  [COMMENT table_comment] 
+  [PARTITIONED BY (col_name2 col_type2, [COMMENT col_comment2], ...)] 
+  [ROW FORMAT row_format]
+  STORED AS file_format 
+  [TBLPROPERTIES (key1=val1, key2=val2, ...)]
+  [AS select_statement];
+
+row_format:
+  : SERDE serde_cls [WITH SERDEPROPERTIES (key1=val1, key2=val2, ...)]
+  | DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]]
+      [COLLECTION ITEMS TERMINATED BY char]
+      [MAP KEYS TERMINATED BY char]
+      [LINES TERMINATED BY char]
+      [NULL DEFINED AS char]
+
+ +
+
+

Keyword

  • IF NOT EXISTS: Prevents system errors when the created table exists.
  • COMMENT: Field or table description.
  • PARTITIONED BY: Partition field.
  • ROW FORMAT: Row data format.
  • STORED AS: Specifies the format of the file to be stored. Currently, only the TEXTFILE, AVRO, ORC, SEQUENCEFILE, RCFILE, and PARQUET format are supported. This keyword is mandatory when you create DLI tables.
  • TBLPROPERTIES: The TBLPROPERTIES clause allows you to add the key/value attribute to a table.

    For example, if the table storage format is Parquet, you can use TBLPROPERTIES(parquet.compression = 'zstd') to set the table compression format to zstd.

    +
  • AS: Run the CREATE TABLE AS statement to create a table.
+
+

Parameter Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). The value cannot contain only digits and cannot start with a digit or underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

col_name

+

Column names with data types separated by commas (,). The column name contains letters, digits, and underscores (_). It cannot contain only digits and must contain at least one letter.

+

col_type

+

Field type

+

col_comment

+

Field description

+

row_format

+

Line data format

+

file_format

+

Data storage format: TEXTFILE, AVRO, ORC, SEQUENCEFILE, RCFILE, PARQUET.

+

table_comment

+

Table description

+

select_statement

+

The CREATE TABLE AS statement is used to insert the SELECT query result of the source table or a data record to a newly created DLI table.

+
+
+
+

Precautions

  • When you create a partitioned table, ensure that the specified column in PARTITIONED BY is not a column in the table and the data type is specified. The partition column supports only the open-source Hive table types including string, boolean, tinyint, smallint, short, int, bigint, long, decimal, float, double, date, and timestamp.
  • Multiple partition fields can be specified. The partition fields need to be specified after the PARTITIONED BY keyword, instead of the table name. Otherwise, an error occurs.
  • A maximum of 100,000 partitions can be created in a single table.
  • The CREATE TABLE AS statement cannot specify table attributes or create partitioned tables.
+
+

Example

  • Create a src table that has key and value columns in INT and STRING types respectively, and specify a property as required.
    1
    +2
    +3
    +4
    CREATE TABLE src
    +  (key INT, value STRING)
    +  STORED AS PARQUET
    +  TBLPROPERTIES('key1' = 'value1');
    +
    + +
    +
  • Create a student table that has name, score, and classNo columns, and partition the table by classNo.
    1
    +2
    +3
    +4
    CREATE TABLE student
    +  (name STRING, score INT)
    +  STORED AS PARQUET
    +  TBLPROPERTIES(parquet.compression = 'zstd') PARTITIONED BY(classNo INT);
    +
    + +
    +
  • Create table t1 and insert t2 data into table t1.
    1
    +2
    +3
    CREATE TABLE t1
    +  STORED AS PARQUET
    +  AS select * from t2;
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0205.html b/docs/dli/sqlreference/dli_08_0205.html new file mode 100644 index 00000000..3248d403 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0205.html @@ -0,0 +1,64 @@ + + +

Exporting Search Results

+

Function

This statement is used to directly write query results to a specified directory. The query results can be stored in CSV, Parquet, ORC, JSON, or Avro format.

+
+

Syntax

1
+2
+3
+4
INSERT OVERWRITE DIRECTORY path
+  USING file_format
+  [OPTIONS(key1=value1)]
+  select_statement;
+
+ +
+
+

Keyword

  • USING: Specifies the storage format.
  • OPTIONS: Specifies the list of attributes to be exported. This parameter is optional.
+
+

Parameter

+
+ + + + + + + + + + +
Table 1 INSERT OVERWRITE DIRECTORY parameter description

Parameter

+

Description

+

path

+

The OBS path to which the query result is to be written.

+

file_format

+

Format of the file to be written. The value can be CSV, Parquet, ORC, JSON, or Avro.

+
+
+

If the file format is set to CSV, see the Table 3 for the OPTIONS parameters.

+
+
+

Precautions

  • You can configure the spark.sql.shuffle.partitions parameter to set the number of files to be inserted into the OBS bucket in the non-DLI table. In addition, to avoid data skew, you can add distribute by rand() to the end of the INSERT statement to increase the number of concurrent jobs. The following is an example:
    insert into table table_target select * from table_source distribute by cast(rand() * N as int);
    +
  • When the configuration item is OPTIONS('DELIMITER'=','), you can specify a separator. The default value is ,.

    For CSV data, the following delimiters are supported:

    +
    • Tab character, for example, 'DELIMITER'='\t'.
    • Any binary character, for example, 'DELIMITER'='\u0001(^A)'.
    • Single quotation mark ('). A single quotation mark must be enclosed in double quotation marks (" "). For example, 'DELIMITER'= "'".
    • \001(^A) and \017(^Q) are also supported, for example, 'DELIMITER'='\001(^A)' and 'DELIMITER'='\017(^Q)'.
    +
+
+

Example

1
+2
+3
+4
INSERT OVERWRITE DIRECTORY 'obs://bucket/dir'
+  USING csv
+  OPTIONS(key1=value1)
+  select * from db1.tb1;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0206.html b/docs/dli/sqlreference/dli_08_0206.html new file mode 100644 index 00000000..00debeb8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0206.html @@ -0,0 +1,34 @@ + + +

Table-Valued Functions

+

Table-valued functions can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be used in JOIN LATERAL TABLE.

+ +
+ + + + + + + + + +
Table 1 Table-valued functions

Function

+

Return Data Type

+

Description

+

split_cursor(value, delimiter)

+

cursor

+

Separates the "value" string into multiple rows of strings by using the delimiter.

+
+
+

Example

Input one record ("student1", "student2, student3") and output two records ("student1", "student2") and ("student1", "student3").

+
create source stream s1(attr1 string, attr2 string) with (......);
+insert into s2 select  attr1, b1 from s1 left join lateral table(split_cursor(attr2, ',')) as T(b1) on true;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0207.html b/docs/dli/sqlreference/dli_08_0207.html new file mode 100644 index 00000000..d127ad78 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0207.html @@ -0,0 +1,368 @@ + + +

Data Type

+

Overview

Data type is a basic attribute of data and used to distinguish different types of data. Different data types occupy different storage space and support different operations. Data is stored in data tables in the database. Each column of a data table defines the data type. During storage, data must be stored according to data types.

+

Similar to the open source community, Flink SQL of the big data platform supports both native data types and complex data types.

+
+

Primitive Data Types

Table 1 lists native data types supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Primitive Data Types

Data Type

+

Description

+

Storage Space

+

Value Range

+

VARCHAR

+

Character with a variable length

+

-

+

-

+

BOOLEAN

+

Boolean

+

-

+

TRUE/FALSE

+

TINYINT

+

Signed integer

+

1 byte

+

-128-127

+

SMALLINT

+

Signed integer

+

2 bytes

+

-32768-32767

+

INT

+

Signed integer

+

4 bytes

+

–2147483648 to 2147483647

+

INTEGER

+

Signed integer

+

4 bytes

+

–2147483648 to 2147483647

+

BIGINT

+

Signed integer

+

8 bytes

+

–9223372036854775808 to 9223372036854775807

+

REAL

+

Single-precision floating point

+

4 bytes

+

-

+

FLOAT

+

Single-precision floating point

+

4 bytes

+

-

+

DOUBLE

+

Double-precision floating-point

+

8 bytes

+

-

+

DECIMAL

+

Data type of valid fixed places and decimal places

+

-

+

-

+

DATE

+

Date type in the format of yyyy-MM-dd, for example, 2014-05-29

+

-

+

DATE does not contain time information. Its value ranges from 0000-01-01 to 9999-12-31.

+

TIME

+

Time type in the format of HH:MM:SS

+

For example, 20:17:40

+

-

+

-

+

TIMESTAMP(3)

+

Timestamp of date and time

+

For example, 1969-07-20 20:17:40

+

-

+

-

+

INTERVAL timeUnit [TO timeUnit]

+

Time interval

+

For example, INTERVAL '1:5' YEAR TO MONTH, INTERVAL '45' DAY

+

-

+

-

+
+
+
+

Complex Data Types

Flink SQL supports complex data types and complex type nesting. Table 2 describes complex data types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Complex Data Types

Data Type

+

Description

+

Declaration Method

+

Reference Method

+

Construction Method

+

ARRAY

+

Indicates a group of ordered fields that are of the same data type.

+

ARRAY[TYPE]

+

Variable name [subscript]. The subscript starts from 1, for example, v1[1].

+

Array[value1, value2, ...] as v1

+

MAP

+

Indicates a group of unordered key/value pairs. The key must be native data type, but the value can be either native data type or complex data type. The type of the same MAP key, as well as the MAP value, must be the same.

+

MAP [TYPE, TYPE]

+

Variable name [key], for example, v1[key]

+

Map[key, value, key2, value2, key3, value3.......] as v1

+

ROW

+

Indicates a group of named fields. The data types of the fields can be different.

+

ROW<a1 TYPE1, a2 TYPE2>

+

Variable name. Field name, for example, v1.a1.

+

Row('1',2) as v1

+
+
+
Here is a sample code:
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
CREATE SOURCE STREAM car_infos (
+  car_id STRING,
+  address ROW<city STRING, province STRING, country STRING>,
+  average_speed MAP[STRING, LONG],
+  speeds ARRAY[LONG]
+) 
+  WITH (
+    type = "dis",
+    region = "xxx",
+    channel = "dliinput",
+    encode = "json"
+);
+
+CREATE temp STREAM car_speed_infos (
+  car_id STRING,
+  province STRING,
+  average_speed LONG,
+  start_speed LONG
+);
+
+INSERT INTO car_speed_infos SELECT
+   car_id,
+   address.province,
+   average_speed[address.city],
+   speeds[1]
+FROM car_infos;
+
+ +
+
+
+

Complex Type Nesting

  • JSON format enhancement

    The following uses Source as an example. The method of using Sink is the same.

    +
    • json_schema can be configured.
      After json_schema is configured, fields in DDL can be automatically generated from json_schema without declaration. Here is a sample code:
      CREATE SOURCE STREAM data_with_schema WITH (
      +       type = "dis",
      +       region = "xxx",
      +       channel = "dis-in",
      +       encode = "json",
      +       json_schema = '{"definitions":{"address":{"type":"object","properties":{"street_address":{"type":"string"},"city":{"type":"string"},"state":{"type":"string"}},"required":["street_address","city","state"]}},"type":"object","properties":{"billing_address":{"$ref":"#/definitions/address"},"shipping_address":{"$ref":"#/definitions/address"},"optional_address":{"oneOf":[{"type":"null"},{"$ref":"#/definitions/address"}]}}}'
      +     );
      +
      +     CREATE SINK STREAM buy_infos (
      +       billing_address_city STRING,
      +       shipping_address_state string
      +     ) WITH (
      +       type = "obs",
      +       encode = "csv",
      +       region = "xxx" ,
      +       field_delimiter = ",",
      +       row_delimiter = "\n",
      +       obs_dir = "bucket/car_infos",
      +       file_prefix = "over",
      +       rolling_size = "100m"
      +     );
      +
      +     insert into buy_infos select billing_address.city, shipping_address.state from data_with_schema;
      +
      +

      Example data

      +
      {
      + "billing_address":
      +  {
      +   "street_address":"xxx",
      +   "city":"xxx",
      +   "state":"xxx"
      +   },
      + "shipping_address":
      +  {
      +   "street_address":"xxx",
      +   "city":"xxx",
      +   "state":"xxx"
      +  }
      +}
      +
    • The json_schema and json_config parameters can be left empty. For details about how to use json_config, see the example in Open-Source Kafka Source Stream.

      In this case, the attribute name in the DDL is used as the JSON key for parsing by default.

      +

      The following is example data. It contains nested JSON fields, such as billing_address and shipping_address, and non-nested fields id and type2.

      +
      {
      + "id":"1",
      + "type2":"online",
      + "billing_address":
      +  {
      +   "street_address":"xxx",
      +   "city":"xxx",
      +   "state":"xxx"
      +   },
      + "shipping_address":
      +  {
      +   "street_address":"xxx",
      +   "city":"xxx",
      +   "state":"xxx"
      +  }
      +}
      +
      The table creation and usage examples are as follows:
      CREATE SOURCE STREAM car_info_data (
      +       id STRING,
      +       type2 STRING,
      +       billing_address Row<street_address string, city string, state string>,
      +       shipping_address Row<street_address string, city string, state string>,
      +       optional_address Row<street_address string, city string, state string>
      +     ) WITH (
      +       type = "dis",
      +       region = "xxx",
      +       channel = "dis-in",
      +       encode = "json"
      +	 );
      +	
      +    CREATE SINK STREAM buy_infos (
      +       id STRING,
      +       type2 STRING,
      +       billing_address_city STRING,
      +       shipping_address_state string
      +     ) WITH (
      +       type = "obs",
      +       encode = "csv",
      +       region = "xxx",
      +       field_delimiter = ",",
      +       row_delimiter = "\n",
      +       obs_dir = "bucket/car_infos",
      +       file_prefix = "over",
      +       rolling_size = "100m"
      +     );
      +
      +     insert into buy_infos select id, type2, billing_address.city, shipping_address.state from car_info_data;   
      +
      +
    +
  • Complex data types supported by sink serialization
    • Currently, only the CSV and JSON formats support complex data types.
    • For details about the JSON format, see Json format enhancement.
    • There is no standard format for CSV files. Therefore, only sink parsing is supported.
    • Output format: It is recommended that the output format be the same as that of the native Flink.

      Map: {key1=Value1, key2=Value2}

      +

      Row: Attributes are separated by commas (,), for example, Row(1,'2') => 1,'2'.

      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0209.html b/docs/dli/sqlreference/dli_08_0209.html new file mode 100644 index 00000000..2b48273f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0209.html @@ -0,0 +1,359 @@ + + +

Geographical Functions

+

Function description

Table 1 describes the basic geospatial geometric elements.

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Basic geospatial geometric element table

Geospatial geometric elements

+

Description

+

Example Value

+

ST_POINT(latitude, longitude)

+

Indicates a geographical point, including the longitude and latitude.

+

ST_POINT(1.12012, 1.23401)

+

ST_LINE(array[point1...pointN])

+

Indicates a geographical line formed by connecting multiple geographical points (ST_POINT) in sequence. The line can be a polygonal line or a straight line.

+

ST_LINE(ARRAY[ST_POINT(1.12, 2.23), ST_POINT(1.13, 2.44), ST_POINT(1.13, 2.44)])

+

ST_POLYGON(array[point1...point1])

+

Indicates a geographical polygon, which is a closed polygon area formed by connecting multiple geographical points (ST_POINT) with the same start and end points in sequence.

+

ST_POLYGON(ARRAY[ST_POINT(1.0, 1.0), ST_POINT(2.0, 1.0), ST_POINT(2.0, 2.0), ST_POINT(1.0, 1.0)])

+

ST_CIRCLE(point, radius)

+

Indicates a geographical circle that consists of ST_POINT and a radius.

+

ST_CIRCLE(ST_POINT(1.0, 1.0), 1.234)

+
+
+

You can build complex geospatial geometries based on basic geospatial geometric elements. Table 2 describes the related transformation methods.

+ +
+ + + + + + + + + + + + + + + + + +
Table 2 Transformation methods for building complex geometric elements based on basic geospatial geometric elements

Transformation Method

+

Description

+

Example Value

+

ST_BUFFER(geometry, distance)

+

Creates a polygon that surrounds the geospatial geometric elements at a given distance. Generally, this function is used to build the road area of a certain width for yaw detection.

+

ST_BUFFER(ST_LINE(ARRAY[ST_POINT(1.12, 2.23), ST_POINT(1.13, 2.44), ST_POINT(1.13, 2.44)]),1.0)

+

ST_INTERSECTION(geometry, geometry)

+

Creates a polygon that delimits the overlapping area of two given geospatial geometric elements.

+

ST_INTERSECTION(ST_CIRCLE(ST_POINT(1.0, 1.0), 2.0), ST_CIRCLE(ST_POINT(3.0, 1.0), 1.234))

+

ST_ENVELOPE(geometry)

+

Creates the minimal rectangle polygon including the given geospatial geometric elements.

+

ST_ENVELOPE(ST_CIRCLE(ST_POINT(1.0, 1.0), 2.0))

+
+
+

DLI provides multiple functions used for performing operations on and determining locations of geospatial geometric elements. Table 3 describes the SQL scalar functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SQL scalar function table

Function

+

Return Type

+

Description

+

ST_DISTANCE(point_1, point_2)

+

DOUBLE

+

Calculates the Euclidean distance between the two geographical points.

+

The following provides an example:

+

Select ST_DISTANCE(ST_POINT(x1, y1), ST_POINT(x2, y2)) FROM input

+

ST_GEODESIC_DISTANCE(point_1, point_2)

+

DOUBLE

+

Calculates the shortest distance along the surface between two geographical points.

+

The following provides an example:

+

Select ST_GEODESIC_DISTANCE(ST_POINT(x1, y1), ST_POINT(x2, y2)) FROM input

+

ST_PERIMETER(polygon)

+

DOUBLE

+

Calculates the circumference of a polygon.

+

The following provides an example:

+

Select ST_PERIMETER(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]) FROM input

+

ST_AREA(polygon)

+

DOUBLE

+

Calculates the area of a polygon.

+

The following provides an example:

+

Select ST_AREA(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]) FROM input

+

ST_OVERLAPS(polygon_1, polygon_2)

+

BOOLEAN

+

Checks whether one polygon overlaps with another.

+

The following provides an example:

+

SELECT ST_OVERLAPS(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]), ST_POLYGON(ARRAY[ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23), ST_POINT(x21, y21)])) FROM input

+

ST_INTERSECT(line1, line2)

+

BOOLEAN

+

Checks whether two line segments, rather than the two straight lines where the two line segments are located, intersect each other.

+

The following provides an example:

+

SELECT ST_INTERSECT(ST_LINE(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12)]), ST_LINE(ARRAY[ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23)])) FROM input

+

ST_WITHIN(point, polygon)

+

BOOLEAN

+

Checks whether one point is contained inside a geometry (polygon or circle).

+

The following provides an example:

+

SELECT ST_WITHIN(ST_POINT(x11, y11), ST_POLYGON(ARRAY[ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23), ST_POINT(x21, y21)])) FROM input

+

ST_CONTAINS(polygon_1, polygon_2)

+

BOOLEAN

+

Checks whether the first geometry contains the second geometry.

+

The following provides an example:

+

SELECT ST_CONTAINS(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]), ST_POLYGON(ARRAY[ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23), ST_POINT(x21, y21)])) FROM input

+

ST_COVERS(polygon_1, polygon_2)

+

BOOLEAN

+

Checks whether the first geometry covers the second geometry. This function is similar to ST_CONTAINS except the situation when judging the relationship between a polygon and the boundary line of polygon, for which ST_COVER returns TRUE and ST_CONTAINS returns FALSE.

+

The following provides an example:

+

SELECT ST_COVERS(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]), ST_POLYGON([ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23), ST_POINT(x21, y21)])) FROM input

+

ST_DISJOINT(polygon_1, polygon_2)

+

BOOLEAN

+

Checks whether one polygon is disjoint (not overlapped) with the other polygon.

+

The following provides an example:

+

SELECT ST_DISJOINT(ST_POLYGON(ARRAY[ST_POINT(x11, y11), ST_POINT(x12, y12), ST_POINT(x11, y11)]), ST_POLYGON(ARRAY[ST_POINT(x21, y21), ST_POINT(x22, y22), ST_POINT(x23, y23), ST_POINT(x21, y21)])) FROM input

+
+
+

The World Geodetic System 1984 (WGS84) is used as the reference coordinate system for geographical functions. Due to offsets, the GPS coordinates cannot be directly used in the Baidu Map (compliant with BD09) and the Google Map (compliant with GCJ02). To implement switchover between different geographical coordinate systems, DLI provides a series of functions related to coordinate system conversion as well as functions related to conversion between geographical distances and the unit meter. For details, see Table 4.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Functions for geographical coordinate system conversion and distance-unit conversion

Function

+

Return Type

+

Description

+

WGS84_TO_BD09(geometry)

+

Geospatial geometric elements in the Baidu Map coordinate system

+

Converts the geospatial geometric elements in the GPS coordinate system into those in the Baidu Map coordinate system. The following provides an example:

+

WGS84_TO_BD09(ST_CIRCLE(ST_POINT(x, y), r))

+

WGS84_TO_CJ02(geometry)

+

Geospatial geometric elements in the Google Map coordinate system

+

Converts the geospatial geometric elements in the GPS coordinate system into those in the Google Map coordinate system. The following provides an example:

+

WGS84_TO_CJ02(ST_CIRCLE(ST_POINT(x, y), r))

+

BD09_TO_WGS84(geometry)

+

Geospatial geometric elements in the GPS coordinate system

+

Converts the geospatial geometric elements in the Baidu Map coordinate system into those in the GPS coordinate system. The following provides an example:

+

BD09_TO_WGS84(ST_CIRCLE(ST_POINT(x, y), r))

+

BD09_TO_CJ02(geometry)

+

Geospatial geometric elements in the Google Map coordinate system

+

Converts the geospatial geometric elements in the Baidu Map coordinate system into those in the Google Map coordinate system. The following provides an example:

+

BD09_TO_CJ02(ST_CIRCLE(ST_POINT(x, y), r))

+

CJ02_TO_WGS84(geometry)

+

Geospatial geometric elements in the GPS coordinate system

+

Converts the geospatial geometric elements in the Google Map coordinate system into those in the GPS coordinate system. The following provides an example:

+

CJ02_TO_WGS84(ST_CIRCLE(ST_POINT(x, y), r))

+

CJ02_TO_BD09(geometry)

+

Geospatial geometric elements in the Baidu Map coordinate system

+

Converts the geospatial geometric elements in the Google Map coordinate system into those in the Baidu Map coordinate system. The following provides an example:

+

CJ02_TO_BD09(ST_CIRCLE(ST_POINT(x, y), r))

+

DEGREE_TO_METER(distance)

+

DOUBLE

+

Converts the distance value of the geographical function to a value in the unit of meter. In the following example, you calculate the circumference of a triangle in the unit of meter.

+

DEGREE_TO_METER(ST_PERIMETER(ST_POLYGON(ARRAY[ST_POINT(x1,y1), ST_POINT(x2,y2), ST_POINT(x3,y3), ST_POINT(x1,y1)])))

+

METER_TO_DEGREE(numerical_value)

+

DOUBLE

+

Convert the value in the unit of meter to the distance value that can be calculated using the geographical function. In the following example, you draw a circle which takes a specified geographical point as the center and has a radius of 1 km.

+

ST_CIRCLE(ST_POINT(x,y), METER_TO_DEGREE(1000))

+
+
+

DLI also provides window-based SQL geographical aggregation functions specific for scenarios where SQL logic involves windows and aggregation. For details about the functions, see Table 5.

+ +
+ + + + + + + + + + + + + +
Table 5 Time-related SQL geographical aggregation function table

Function

+

Description

+

Example Value

+

AGG_DISTANCE(point)

+

Distance aggregation function, which is used to calculate the total distance of all adjacent geographical points in the window.

+

SELECT AGG_DISTANCE(ST_POINT(x,y)) FROM input GROUP BY HOP(rowtime, INTERVAL '1' HOUR, INTERVAL '1' DAY)

+

AVG_SPEED(point)

+

Average speed aggregation function, which is used to calculate the average speed of moving tracks formed by all geographical points in a window. The average speed is in the unit of m/s.

+

SELECT AVG_SPEED(ST_POINT(x,y)) FROM input GROUP BY TUMBLE(proctime, INTERVAL '1' DAY)

+
+
+
+

Precautions

None

+
+

Example

Example of yaw detection:

+
1
+2
+3
+4
INSERT INTO yaw_warning
+SELECT "The car is yawing"
+FROM driver_behavior
+WHERE NOT ST_WITHIN(ST_POINT(cast(Longitude as DOUBLE), cast(Latitude as DOUBLE)), ST_BUFFER(ST_LINE(ARRAY[ST_POINT(34.585555,105.725221),ST_POINT(34.586729,105.735974),ST_POINT(34.586492,105.740538),ST_POINT(34.586388,105.741651),ST_POINT(34.586135,105.748712),ST_POINT(34.588691,105.74997)]),0.001));
+
+ +
+
+

IP Functions

Currently, only IPv4 addresses are supported.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 IP functions

Function

+

Return Type

+

Description

+

IP_TO_COUNTRY

+

STRING

+

Obtains the name of the country where the IP address is located.

+

IP_TO_PROVINCE

+

STRING

+

Obtains the province where the IP address is located.

+

Usage:

+
  • IP_TO_PROVINCE(STRING ip): Determines the province where the IP address is located and returns the province name.
  • IP_TO_PROVINCE(STRING ip, STRING lang): Determines the province where the IP is located and returns the province name of the specified language.
    NOTE:
    • If the province where the IP address is located cannot be obtained through IP address parsing, the country where the IP address is located is returned. If the IP address cannot be parsed, Unknown is returned.
    • The name returned by the function for the province is the short name.
    +
    +
+

IP_TO_CITY

+

STRING

+

Obtains the name of the city where the IP address is located.

+
NOTE:

If the city where the IP address is located cannot be obtained through IP address parsing, the province or the country where the IP address is located is returned. If the IP address cannot be parsed, Unknown is returned.

+
+

IP_TO_CITY_GEO

+

STRING

+

Obtains the longitude and latitude of the city where the IP address is located. The parameter value is in the following format: Latitude, Longitude.

+

Usage:

+

IP_TO_CITY_GEO(STRING ip): Returns the longitude and latitude of the city where the IP address is located.

+
+
+

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0216.html b/docs/dli/sqlreference/dli_08_0216.html new file mode 100644 index 00000000..6d5966b1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0216.html @@ -0,0 +1,76 @@ + + +

Real-Time Clustering

+

Clustering algorithms belong to unsupervised algorithms. K-Means, a clustering algorithm, partitions data points into related clusters by calculating the distance between data points based on the predefined cluster quantity. For offline static datasets, we can determine the clusters based on field knowledge and run K-Means to achieve a better clustering effect. However, online real-time streaming data is always changing and evolving, and the cluster quantity is likely to change. To address clustering issues on online real-time streaming data, DLI provides a low-delay online clustering algorithm that does not require predefined cluster quantity.

+

The algorithm works as follows: Given a distance function, if the distance between two data points is less than a threshold, both data points will be partitioned into the same cluster. If the distances between a data point and the central data points in several cluster centers are less than the threshold, then related clusters will be merged. When data in a data stream arrives, the algorithm computes the distances between each data point and the central data points of all clusters to determine whether the data point can be partitioned into to an existing or new cluster.

+

Syntax

1
+2
+3
+4
CENTROID(ARRAY[field_names], distance_threshold): Compute the centroid of the cluster where the current data point is assigned.
+CLUSTER_CENTROIDS(ARRAY[field_names], distance_threshold): Compute all centroids after the data point is assigned.
+ALL_POINTS_OF_CLUSTER(ARRAY[field_names], distance_threshold): Compute all data points in the cluster where the current data point is assigned.
+ALL_CLUSTERS_POINTS(ARRAY[field_names], distance_threshold): Computers all data points in each cluster after the current data point is assigned.
+
+ +
+
  • Clustering algorithms can be applied in unbounded streams.
+
+
+

Parameter Description

+
+ + + + + + + + + + + + + +
Table 1 Parameter Description

Parameter

+

Mandatory

+

Description

+

field_names

+

Yes

+

Name of the field where the data is located in the data stream. Multiple fields are separated by commas (,). For example, ARRAY[a, b, c].

+

distance_threshold

+

Yes

+

Distance threshold. When the distance between two data points is less than the threshold, both data points are placed in the same cluster.

+
+
+
+

Example

Use four functions to compute information related to clusters over windows.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
SELECT 
+  CENTROID(ARRAY[c,e], 1.0) OVER (ORDER BY proctime RANGE UNBOUNDED PRECEDING) AS centroid,
+  CLUSTER_CENTROIDS(ARRAY[c,e], 1.0) OVER (ORDER BY proctime RANGE UNBOUNDED PRECEDING) AS centroids
+FROM MyTable
+
+SELECT 
+  CENTROID(ARRAY[c,e], 1.0) OVER (ORDER BY proctime RANGE BETWEEN INTERVAL '60' MINUTE PRECEDING AND CURRENT ROW) AS centroidCE, 
+  ALL_POINTS_OF_CLUSTER(ARRAY[c,e], 1.0) OVER (ORDER BY proctime RANGE BETWEEN INTERVAL '60' MINUTE PRECEDING AND CURRENT ROW) AS itemList,
+  ALL_CLUSTERS_POINTS(ARRAY[c,e], 1.0) OVER (ORDER BY proctime RANGE  BETWEEN INTERVAL '60' MINUTE PRECEDING AND CURRENT ROW) AS listoflistofpoints
+FROM MyTable
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0217.html b/docs/dli/sqlreference/dli_08_0217.html new file mode 100644 index 00000000..eb462306 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0217.html @@ -0,0 +1,45 @@ + + +

Clearing Data

+

Function

This statement is used to delete data from the DLI or OBS table.

+
+

Syntax

1
TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)];
+
+ +
+
+

Keyword

+
+ + + + + + + + + + +
Table 1 Parameter

Parameter

+

Description

+

tablename

+

Name of the target DLI or OBS table that runs the Truncate statement.

+

partcol1

+

Partition name of the DLI or OBS table to be deleted.

+
+
+
+

Precautions

Only data in the DLI or OBS table can be deleted.

+
+

Example

1
truncate table test PARTITION (class = 'test');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0218.html b/docs/dli/sqlreference/dli_08_0218.html new file mode 100644 index 00000000..5f003702 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0218.html @@ -0,0 +1,222 @@ + + +

Window

+

GROUP WINDOW

Description

+

Group Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:

+
  • time_attr can be processing-time or event-time.
    • event-time: Specify the data type to bigint or timestamp.
    • processing-time: No need to specify the type.
    +
  • interval specifies the window period.
+
+
  • Array functions +
    + + + + + + + + + + + + + +
    Table 1 Array functions

    Function Name

    +

    Description

    +

    TUMBLE(time_attr, interval)

    +

    Indicates the tumble window.

    +

    HOP(time_attr, interval, interval)

    +

    Indicates the extended tumble window (similar to the datastream sliding window). You can set the output triggering cycle and window period.

    +

    SESSION(time_attr, interval)

    +

    Indicates the session window. A session window will be closed if no response is returned within a duration specified by interval.

    +
    +
    +
  • Window functions +
    + + + + + + + + + + + + + + + + + + + + + + +
    Table 2 Window functions

    Function Name

    +

    Description

    +

    TUMBLE_START(time_attr, interval)

    +

    Indicates the start time of returning to the tumble window. The parameter is a UTC time zone.

    +

    TUMBLE_END(time_attr, interval)

    +

    Indicates the end time of returning to the tumble window. The parameter is a UTC time zone.

    +

    HOP_START(time_attr, interval, interval)

    +

    Indicates the start time of returning to the extended tumble window. The parameter is a UTC time zone.

    +

    HOP_END(time_attr, interval, interval)

    +

    Indicates the end time of returning to the extended tumble window. The parameter is a UTC time zone.

    +

    SESSION_START(time_attr, interval)

    +

    Indicates the start time of returning to the session window. The parameter is a UTC time zone.

    +

    SESSION_END(time_attr, interval)

    +

    Indicates the end time of returning to the session window. The parameter is a UTC time zone.

    +
    +
    +
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
//Calculate the SUM every day (event time).
+insert into temp SELECT name,
+    TUMBLE_START(ts, INTERVAL '1' DAY) as wStart,
+    SUM(amount)
+    FROM Orders
+    GROUP BY TUMBLE(ts, INTERVAL '1' DAY), name;
+
+//Calculate the SUM every day (processing time). 
+insert into temp SELECT name, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY TUMBLE(proctime, INTERVAL '1' DAY), name;
+
+//Calculate the SUM over the recent 24 hours every hour (event time).
+insert into temp SELECT product, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '1' DAY), product;
+
+//Calculate the SUM of each session and an inactive interval every 12 hours (event time).
+insert into temp SELECT name, 
+    SESSION_START(ts, INTERVAL '12' HOUR) AS sStart,
+    SESSION_END(ts, INTERVAL '12' HOUR) AS sEnd,
+    SUM(amount)
+    FROM Orders
+    GROUP BY SESSION(ts, INTERVAL '12' HOUR), name;
+
+ +
+

+
+

OVER WINDOW

The difference between Over Window and Group Window is that one record is generated from one row in Over Window.

+

Syntax

+
1
+2
+3
+4
OVER (
+  [PARTITION BY partition_name]
+  ORDER BY proctime|rowtime(ROWS number PRECEDING) |(RANGE (BETWEEN INTERVAL '1' SECOND PRECEDING AND CURRENT ROW | UNBOUNDED preceding))
+)
+
+ +
+

Description

+ +
+ + + + + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Parameter Description

+

PARTITION BY

+

Indicates the primary key of the specified group. Each group separately performs calculation.

+

ORDER BY

+

Indicates the processing time or event time as the timestamp for data.

+

ROWS

+

Indicates the count window.

+

RANGE

+

Indicates the time window.

+
+
+

Precautions

+
  • In the same SELECT statement, windows defined by aggregate functions must be the same.
  • Currently, Over Window only supports forward calculation (preceding).
  • The value of ORDER BY must be specified as processing time or event time.
  • Constants do not support aggregation, such as sum(2).
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
//Calculate the count and total number from syntax rules enabled to now (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt2
+    FROM Orders;
+  
+//Calculate the count and total number of the recent four records (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+//Calculate the count and total number last 60s (in eventtime). Process the events based on event time, which is the timeattr field in Orders.
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+ +
+

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0219.html b/docs/dli/sqlreference/dli_08_0219.html new file mode 100644 index 00000000..fbcfbfac --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0219.html @@ -0,0 +1,269 @@ + + +

SQL Syntax Overview of Batch Jobs

+

This section describes the Spark SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQL syntax of batch jobs

Classification

+

Function

+

Database-related Syntax

+

Creating a Database

+

Deleting a Database

+

Viewing a Specified Database

+

Viewing All Databases

+

Syntax for Creating an OBS Table

+

Creating an OBS Table Using the Datasource Syntax

+

Creating an OBS Table Using the Hive Syntax

+

Syntax for Creating a DLI Table

+

Creating a DLI Table Using the Datasource Syntax

+

Creating a DLI Table Using the Hive Syntax

+

Syntax for Deleting a Table

+

Deleting a Table

+

Syntax for Viewing a Table

+

Viewing All Tables

+

Viewing Table Creation Statements

+

Viewing Table Properties

+

Viewing All Columns in a Specified Table

+

Viewing All Partitions in a Specified Table

+

Viewing Table Statistics

+

Syntax for Modifying a Table

+

Adding a Column

+

Syntax for Partitioning a Table

+

Adding a Partition (Only OBS Tables Supported)

+

Renaming a Partition

+

Deleting a Partition

+

Altering the Partition Location of a Table (Only OBS Tables Supported)

+

Updating Partitioned Table Data (Only OBS Tables Supported)

+

Syntax for Importing Data

+

Importing Data

+

Syntax for Inserting Data

+

Inserting Data

+

Syntax for Clearing Data

+

Clearing Data

+

Syntax for Exporting Query Results

+

Exporting Query Result

+

Syntax for Datasource Connection to an HBase Table

+

Creating a Table and Associating It with HBase

+

Inserting Data to an HBase Table

+

Querying an HBase Table

+

Syntax for Datasource Connection to an OpenTSDB Table

+

Creating a Table and Associating It with OpenTSDB

+

Inserting Data to an OpenTSDB Table

+

Querying an OpenTSDB Table

+

Syntax for Datasource Connection to a DWS Table

+

Creating a Table and Associating It with DWS

+

Inserting Data to a DWS Table

+

Querying a DWS Table

+

Syntax for Datasource Connection to an RDS Table

+

Creating a Table and Associating It with RDS

+

Inserting Data to an RDS Table

+

Querying an RDS Table

+

Syntax for Datasource Connection to a CSS Table

+

Creating a Table and Associating It with CSS

+

Inserting Data to a CSS Table

+

Querying a CSS Table

+

Syntax for Datasource Connection to a DCS Table

+

+

+

Creating a Table and Associating It with DCS

+

Inserting Data to a DCS Table

+

Querying a DCS Table

+

Syntax for Datasource Connection to a DDS Table

+

+

+

Creating a Table and Associating It with DDS

+

Inserting Data to a DDS Table

+

Querying a DDS Table

+

View-related Syntax

+

Creating a View

+

Deleting a View

+

Syntax for Viewing the Execution Plan

+

Viewing the Execution Plan

+

Syntax Related to Data Permissions

+

Creating a Role

+

Deleting a Role

+

Binding a Role

+

Unbinding a Role

+

Displaying a Role

+

Granting a Permission

+

Revoking a Permission

+

Displaying the Granted Permissions

+

Displaying the Binding Relationship Between All Roles and Users

+

UDF-related Syntax

+

+

+

+

Creating a Function

+

Deleting a Function

+

Displaying Function Details

+

Displaying All Functions

+

Multiversion-related Syntax

+

Enabling Multiversion Backup When Creating an OBS Table

+

Enabling or Disabling Multiversion Backup When Modifying a Table

+

Setting the Retention Period for Multiversion Backup Data

+

Viewing Multiversion Backup Data

+

Restoring Multiversion Backup Data

+

Configuring the Trash Bin for Expired Multiversion Data

+

Deleting Multiversion Backup Data

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0220.html b/docs/dli/sqlreference/dli_08_0220.html new file mode 100644 index 00000000..81ad9292 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0220.html @@ -0,0 +1,20 @@ + + +

Creating a Datasource Connection with an OpenTSDB Table

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0221.html b/docs/dli/sqlreference/dli_08_0221.html new file mode 100644 index 00000000..e4c5a683 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0221.html @@ -0,0 +1,86 @@ + + +

Spark SQL Syntax Reference

+

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0223.html b/docs/dli/sqlreference/dli_08_0223.html new file mode 100644 index 00000000..594aec57 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0223.html @@ -0,0 +1,18 @@ + + +

Creating an OBS Table

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0224.html b/docs/dli/sqlreference/dli_08_0224.html new file mode 100644 index 00000000..fafa74dc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0224.html @@ -0,0 +1,18 @@ + + +

Creating a DLI Table

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0225.html b/docs/dli/sqlreference/dli_08_0225.html new file mode 100644 index 00000000..e8a7b6c8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0225.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with a DCS Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0226.html b/docs/dli/sqlreference/dli_08_0226.html new file mode 100644 index 00000000..6c40086d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0226.html @@ -0,0 +1,180 @@ + + +

Creating a DLI Table and Associating It with DCS

+

Function

This statement is used to create a DLI table and associate it with an existing DCS key.

+
+

Prerequisites

Before creating a DLI table and associating it with DCS, you need to create a datasource connection and bind it to a queue. For details about operations on the management console, see

+
+

Syntax

  • Specified key
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    CREATE TABLE [IF NOT EXISTS] TABLE_NAME(
    +    FIELDNAME1 FIELDTYPE1,
    +    FIELDNAME2 FIELDTYPE2)
    +  USING REDIS OPTIONS (
    +  'host'='xx',
    +  'port'='xx',
    +  'passwdauth' = 'xxx',
    +  'encryption' = 'true',
    +  'table'='namespace_in_redis:key_in_redis',
    +  'key.column'= 'FIELDNAME1'
    +);
    +
    + +
    +
  • Wildcard key
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    CREATE TABLE [IF NOT EXISTS] TABLE_NAME(
    +    FIELDNAME1 FIELDTYPE1,
    +    FIELDNAME2 FIELDTYPE2)
    +  USING REDIS OPTIONS (
    +  'host'='xx',
    +  'port'='xx',
    +  'passwdauth' = 'xxx',
    +  'encryption' = 'true',
    +  'keys.pattern'='key*:*',
    +  'key.column'= 'FIELDNAME1'
    +);
    +
    + +
    +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

host

+

To connect to DCS, you need to create a datasource connection first.

+

After creating an enhanced datasource connection, use the connection address provided by DCS. If there are multiple connection addresses, select one of them.

+
NOTE:

Currently, only enhanced datasource is supported.

+
+

port

+

DCS connection port, for example, 6379.

+

password

+

Password entered during DCS cluster creation. You do not need to set this parameter when accessing a non-secure Redis cluster.

+

passwdauth

+

Datasource password authentication name. For details about how to create datasource authentication, see Datasource Authentication in the Data Lake Insight User Guide.

+

encryption

+

Set this parameter to true when datasource password authentication is used.

+

table

+

The key or hash key in Redis.

+
  • This parameter is mandatory when Redis data is inserted.
  • Either this parameter or the keys.pattern parameter when Redis data is queried.
+

keys.pattern

+

Use a regular expression to match multiple keys or hash keys. This parameter is used only for query. Either this parameter or table is used to query Redis data.

+

key.column

+

(Optional) Specifies a field in the schema as the key ID in Redis. This parameter is used together with the table parameter when data is inserted.

+

partitions.number

+

Number of concurrent tasks during data reading.

+

scan.count

+

Number of data records read in each batch. The default value is 100. If the CPU usage of the Redis cluster still needs to be improved during data reading, increase the value of this parameter.

+

iterator.grouping.size

+

Number of data records inserted in each batch. The default value is 100. If the CPU usage of the Redis cluster still needs to be improved during the insertion, increase the value of this parameter.

+

timeout

+

Timeout interval for connecting to the Redis, in milliseconds. The default value is 2000 (2 seconds).

+
+
+

When connecting to DCS, complex data types such as Array, Struct, and Map are not supported.

+

The following methods can be used to process complex data:

+
  • Place the fields of the next level in the Schema field of the same level.
  • Write and read data in binary mode, and encode and decode it using user-defined functions.
+
+
+

Example

  • Specified table
+
1
+2
+3
+4
+5
+6
+7
create table test_redis(name string, age int) using redis options(
+  'host' = '192.168.4.199',
+  'port' = '6379',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true',
+  'table' = 'person'
+);
+
+ +
+
  • Wildcard table name
+
1
+2
+3
+4
+5
+6
+7
+8
create table test_redis_keys_patten(id string, name string, age int) using redis options(
+  'host' = '192.168.4.199',
+  'port' = '6379',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true',
+  'keys.pattern' = 'p*:*',
+  'key.column' = 'id'
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0227.html b/docs/dli/sqlreference/dli_08_0227.html new file mode 100644 index 00000000..57c03bac --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0227.html @@ -0,0 +1,128 @@ + + +

Inserting Data to a DCS Table

+

Function

This statement is used to insert data in a DLI table to the DCS key.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

  • The target DLI table must exist.
  • When creating a DLI table, you need to specify the schema information.
  • If key.column is specified during table creation, the value of the specified field is used as a part of the Redis key name. The following is an example:
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    create table test_redis(name string, age int) using redis options(
    +  'host' = '192.168.4.199',
    +  'port' = '6379',
    +  'password' = '******',
    +  'table' = 'test_with_key_column',
    +  'key.column' = 'name'
    +);
    +insert into test_redis values("James", 35), ("Michael", 22);
    +
    + +
    +

    The Redis database contains two tables, naming test_with_key_column:James and test_with_key_column:Michael respectively.

    +

    +

    +
  • If key.column is not specified during table creation, the key name in Redis uses the UUID. The following is an example:
    1
    +2
    +3
    +4
    +5
    +6
    +7
    create table test_redis(name string, age int) using redis options(
    +  'host' = '192.168.7.238',
    +  'port' = '6379',
    +  'password' = '******',
    +  'table' = 'test_without_key_column'
    +);
    +insert into test_redis values("James", 35), ("Michael", 22);
    +
    + +
    +

    In Redis, there are two tables named test_without_key_column:uuid.

    +

    +

    +
+
+

Example

1
+2
INSERT INTO test_redis
+  VALUES("James", 35), ("Michael", 22);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0228.html b/docs/dli/sqlreference/dli_08_0228.html new file mode 100644 index 00000000..1dbb8bc1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0228.html @@ -0,0 +1,24 @@ + + +

Querying the DCS Table

+

This statement is used to query data in a DCS table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Example

Query data in the test_redis table.

+
1
SELECT * FROM test_redis limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0229.html b/docs/dli/sqlreference/dli_08_0229.html new file mode 100644 index 00000000..327199ed --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0229.html @@ -0,0 +1,19 @@ + + +

Creating a Datasource Connection with a DDS Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0230.html b/docs/dli/sqlreference/dli_08_0230.html new file mode 100644 index 00000000..4bcdb6e6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0230.html @@ -0,0 +1,103 @@ + + +

Creating a DLI Table and Associating It with DDS

+

Function

This statement is used to create a DLI table and associate it with an existing DDS collection.

+
+

Prerequisites

Before creating a DLI table and associating it with DDS, you need to create a datasource connection and bind it to a queue. For details about operations on the management console, see

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE TABLE [IF NOT EXISTS] TABLE_NAME(
+    FIELDNAME1 FIELDTYPE1,
+    FIELDNAME2 FIELDTYPE2)
+  USING MONGO OPTIONS (
+  'url'='IP:PORT[,IP:PORT]/[DATABASE][.COLLECTION][AUTH_PROPERTIES]',
+  'database'='xx',
+  'collection'='xx',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true'
+);
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 CREATE TABLE parameter description

Parameter

+

Description

+

url

+

Before obtaining the DDS IP address, you need to create a datasource connection first..

+

After creating an enhanced datasource connection, use the random connection address provided by DDS. The format is as follows:

+

"IP:PORT[,IP:PORT]/[DATABASE][.COLLECTION][AUTH_PROPERTIES]"

+

Example: "192.168.4.62:8635,192.168.5.134:8635/test?authSource=admin"

+

database

+

DDS database name. If the database name is specified in the URL, the database name in the URL does not take effect.

+

collection

+

Collection name in the DDS. If the collection is specified in the URL, the collection in the URL does not take effect.

+

user

+

(Discarded) Username for accessing the DDS cluster.

+

password

+

(Discarded) Password for accessing the DDS cluster.

+

passwdauth

+

Datasource password authentication name. For details about how to create datasource authentication, see Datasource Authentication in the Data Lake Insight User Guide.

+

encryption

+

Set this parameter to true when datasource password authentication is used.

+
+
+

If a collection already exists in DDS, you do not need to specify schema information when creating a table. DLI automatically generates schema information based on data in the collection.

+
+
+

Example

1
+2
+3
+4
+5
+6
create table 1_datasource_mongo.test_mongo(id string, name string, age int) using mongo options(
+  'url' = '192.168.4.62:8635,192.168.5.134:8635/test?authSource=admin',
+  'database' = 'test',
+  'collection' = 'test',
+  'passwdauth' = 'xxx',
+  'encryption' = 'true');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0231.html b/docs/dli/sqlreference/dli_08_0231.html new file mode 100644 index 00000000..3f66569e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0231.html @@ -0,0 +1,116 @@ + + +

Inserting Data to the DDS Table

+

Function

This statement is used to insert data in a DLI table to the associated DDS table.

+
+

Syntax

  • Insert the SELECT query result into a table.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT INTO DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
  • Insert a data record into a table.
    1
    +2
    INSERT INTO DLI_TABLE
    +  VALUES values_row [, values_row ...];
    +
    + +
    +
+
  • Overwriting the inserted data
    1
    +2
    +3
    +4
    +5
    +6
    +7
    INSERT OVERWRITE TABLE DLI_TABLE
    +  SELECT field1,field2...
    +  [FROM DLI_TEST]
    +  [WHERE where_condition]
    +  [LIMIT num]
    +  [GROUP BY field]
    +  [ORDER BY field] ...;
    +
    + +
    +
+
+

Keywords

For details about the SELECT keywords, see Basic SELECT Statements.

+
+

Parameter description

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

DLI_TABLE

+

Name of the DLI table for which a datasource connection has been created.

+

DLI_TEST

+

indicates the table that contains the data to be queried.

+

field1,field2..., field

+

Column values in the DLI_TEST table must match the column values and types in the DLI_TABLE table.

+

where_condition

+

Query condition.

+

num

+

Limit the query result. The num parameter supports only the INT type.

+

values_row

+

Value to be inserted to a table. Use commas (,) to separate columns.

+
+
+
+

Precautions

The target DLI table must exist.

+
+

Example

  • Query data in the user table and insert the data into the test table.
    1
    +2
    +3
    +4
    +5
    +6
    INSERT INTO test
    +  SELECT ATTR_EXPR
    +  FROM user
    +  WHERE user_name='cyz'
    +  LIMIT 3
    +  GROUP BY user_age
    +
    + +
    +
  • Insert data 1 into the test table.
    INSERT INTO test 
    +  VALUES (1);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0232.html b/docs/dli/sqlreference/dli_08_0232.html new file mode 100644 index 00000000..063a7b29 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0232.html @@ -0,0 +1,26 @@ + + +

Querying the DDS Table

+

This statement is used to query data in a DDS table.

+

Syntax

1
SELECT * FROM table_name LIMIT number;
+
+ +
+
+

Keyword

LIMIT is used to limit the query results. Only INT type is supported by the number parameter.

+
+

Precautions

If schema information is not specified during table creation, the query result contains the _id field for storing _id in the DOC file.

+
+

Example

Query data in the test_mongo table.

+
1
SELECT * FROM test_mongo limit 100;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0233.html b/docs/dli/sqlreference/dli_08_0233.html new file mode 100644 index 00000000..cc2a0b08 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0233.html @@ -0,0 +1,52 @@ + + +

Flink SQL Syntax

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0234.html b/docs/dli/sqlreference/dli_08_0234.html new file mode 100644 index 00000000..1da0b7e3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0234.html @@ -0,0 +1,26 @@ + + +

Creating a Source Stream

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0235.html b/docs/dli/sqlreference/dli_08_0235.html new file mode 100644 index 00000000..4d73ef49 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0235.html @@ -0,0 +1,387 @@ + + +

DIS Source Stream

+

Function

Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove data from producers using DIS source sources for continuous processing. Flink jobs are applicable to scenarios where data outside the cloud service is imported to the cloud service for filtering, real-time analysis, monitoring reports, and dumping.

+

DIS addresses the challenge of transmitting data outside cloud services to cloud services. DIS builds data intake streams for custom applications capable of processing or analyzing streaming data. DIS continuously captures, transmits, and stores terabytes of data from hundreds of thousands of sources every hour, such as logs, Internet of Things (IoT) data, social media feeds, website clickstreams, and location-tracking events. For more information about DIS, see the Data Ingestion Service User Guide.

+
+

Syntax

CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "dis",
+    region = "",
+    channel = "",
+    partition_count = "",
+    encode = "",
+    field_delimiter = "",
+    offset= "");
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. dis indicates that the data source is DIS.

+

region

+

Yes

+

Region where DIS for storing the data is located.

+

ak

+

No

+

Access Key ID (AK).

+

sk

+

No

+

Specifies the secret access key used together with the ID of the access key.

+

channel

+

Yes

+

Name of the DIS stream where data is located.

+

partition_count

+

No

+

Number of partitions of the DIS stream where data is located. This parameter and partition_range cannot be configured at the same time. If this parameter is not specified, data of all partitions is read by default.

+

partition_range

+

No

+

Range of partitions of a DIS stream, data in which is ingested by the DLI job. This parameter and partition_count cannot be configured at the same time. If this parameter is not specified, data of all partitions is read by default.

+

If you set this parameter to [0:2], data will be read from partitions 1, 2, and 3.

+

encode

+

Yes

+

Data encoding format. The value can be csv, json, xml, email, blob, or user_defined.

+
  • field_delimiter must be specified if this parameter is set to csv.
  • json_config must be specified if this parameter is set to json.
  • xml_config must be specified if this parameter is set to xml.
  • email_key must be specified if this parameter is set to email.
  • If this parameter is set to blob, the received data is not parsed, only one stream attribute exists, and the data format is ARRAY[TINYINT].
  • encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.
+

field_delimiter

+

No

+

Attribute delimiter. This parameter is mandatory only when the CSV encoding format is used. You can set this parameter, for example, to a comma (,).

+

quote

+

No

+

Quoted symbol in a data format. The attribute delimiters between two quoted symbols are treated as common characters.

+
  • If double quotation marks are used as the quoted symbol, set this parameter to \u005c\u0022 for character conversion.
  • If a single quotation mark is used as the quoted symbol, set this parameter to a single quotation mark (').
+
NOTE:
  • Currently, only the CSV format is supported.
  • After this parameter is specified, ensure that each field does not contain quoted symbols or contains an even number of quoted symbols. Otherwise, parsing will fail.
+
+

json_config

+

No

+

When the encoding format is JSON, you need to use this parameter to specify the mapping between JSON fields and stream definition fields. The format is field1=data_json.field1; field2=data_json.field2; field3=$, where field3=$ indicates that the content of field3 is the entire JSON string.

+

xml_config

+

No

+

If encode is set to xml, you need to set this parameter to specify the mapping between the xml field and the stream definition field. An example of the format is as follows: field1=data_xml.field1; field2=data_xml.field2.

+

email_key

+

No

+

If encode is set to email, you need to set the parameter to specify the information to be extracted. You need to list the key values that correspond to stream definition fields. Multiple key values are separated by commas (,), for example, "Message-ID, Date, Subject, body". There is no keyword in the email body and DLI specifies "body" as the keyword.

+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+

offset

+

No

+
  • If data is imported to the DIS stream after the job is started, this parameter will become invalid.
  • If the job is started after data is imported to the DIS stream, you can set the parameter as required.

    For example, if offset is set to 100, DLI starts from the 100th data record in DIS.

    +
+

start_time

+

No

+

Start time for reading DIS data.

+
  • If this parameter is specified, DLI reads data read from the specified time. The format is yyyy-MM-dd HH:mm:ss.
  • If neither start_time nor offset is specified, DLI reads the latest data.
  • If start_time is not specified but offset is specified, DLI reads data from the data record specified by offset.
+

enable_checkpoint

+

No

+

Whether to enable the checkpoint function. The value can be true (enabled) or false (disabled). The default value is false.

+

checkpoint_app_name

+

No

+

ID of a DIS consumer. If a DIS stream is consumed by different jobs, you need to configure the consumer ID for each job to avoid checkpoint confusion.

+

checkpoint_interval

+

No

+

Interval of checkpoint operations on the DIS source operator. The value is in the unit of seconds. The default value is 60.

+
+
+
+

Precautions

When creating a source stream, you can specify a time model for subsequent calculation. Currently, DLI supports two time models: Processing Time and Event Time. For details about the syntax, see Configuring Time Models.

+
+

Example

  • In CSV encoding format, DLI reads data from the DIS stream and records it as codes in CSV format. The codes are separated by commas (,).
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE SOURCE STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_age INT,
    +  average_speed INT,
    +  total_miles INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "dis",
    +    region = "xxx",
    +    channel = "dliinput",
    +    encode = "csv",
    +    field_delimiter = ","
    +);
    +
    + +
    +
  • In JSON encoding format, DLI reads data from the DIS stream and records it as codes in JSON format. For example, {"car":{"car_id":"ZJA710XC", "car_owner":"coco", "car_age":5, "average_speed":80, "total_miles":15000, "car_timestamp":1526438880}}
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE SOURCE STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_age INT,
    +  average_speed INT,
    +  total_miles INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "dis",
    +    region = "xxx",
    +    channel = "dliinput",
    +    encode = "json",
    +    json_config = "car_id=car.car_id;car_owner =car.car_owner;car_age=car.car_age;average_speed =car.average_speed ;total_miles=car.total_miles;"
    +);
    +
    + +
    +
  • In XML encoding format, DLI reads data from the DIS stream and records it as codes in XML format.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    CREATE SOURCE STREAM person_infos (
    +    pid BIGINT,
    +    pname STRING,
    +    page int,
    +    plocation STRING,
    +    pbir DATE,
    +    phealthy BOOLEAN,
    +    pgrade ARRAY[STRING]
    +)
    +  WITH (
    +    type = "dis",
    +    region = "xxx",
    +    channel = "dis-dli-input",
    +    encode = "xml",
    +    field_delimiter = ",",
    +    xml_config = "pid=person.pid;page=person.page;pname=person.pname;plocation=person.plocation;pbir=person.pbir;pgrade=person.pgrade;phealthy=person.phealthy"
    +);
    +
    + +
    +
    An example of XML data is as follows:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    <?xml version="1.0" encoding="utf-8"?>
    +
    +<root>
    +  <person>
    +    <pid>362305199010025042</pid>
    +    <pname>xiaoming</pname>
    +    <page>28</page>
    +    <plocation>xxx</plocation>
    +    <pbir>1990-10-02</pbir>
    +    <phealthy>true</phealthy>
    +    <pgrade>[A,B,C]</pgrade>
    +  </person>
    +</root>
    +
    + +
    +
    +
  • In EMAIL encoding format, DLI reads data from the DIS stream and records it as a complete Email.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    CREATE SOURCE STREAM email_infos (  
    +  Event_ID String,  
    +  Event_Time Date,  
    +  Subject String,  
    +  From_Email String,  
    +  To_EMAIL String,  
    +  CC_EMAIL Array[String],  
    +  BCC_EMAIL String,  
    +  MessageBody String,  
    +  Mime_Version String,  
    +  Content_Type String,  
    +  charset String,  
    +  Content_Transfer_Encoding String
    +)
    +  WITH (  
    +    type = "dis",  
    +    region = "xxx",
    +    channel = "dliinput",  
    +    encode = "email",  
    +    email_key = "Message-ID, Date, Subject, From, To, CC, BCC, Body, Mime-Version, Content-Type, charset, Content_Transfer_Encoding"  
    +);
    +
    + +
    +

    An example of email data is as follows:

    +
    Message-ID: <200906291839032504254@sample.com>
    +Date: Fri, 11 May 2001 09:54:00 -0700 (PDT)
    +From: zhangsan@sample.com
    +To: lisi@sample.com, wangwu@sample.com 
    +Subject:  "Hello World" 
    +Cc: lilei@sample.com, hanmei@sample.com
    +Mime-Version: 1.0
    +Content-Type: text/plain; charset=us-ascii
    +Content-Transfer-Encoding: 7bit
    +Bcc: jack@sample.com, lily@sample.com
    +X-From: Zhang San
    +X-To: Li Si, Wang Wu
    +X-cc: Li Lei, Han Mei
    +X-bcc: 
    +X-Folder: \Li_Si_June2001\Notes Folders\Notes inbox
    +X-Origin: Lucy
    +X-FileName: sample.nsf
    +
    +Dear Associate / Analyst Committee:
    +
    +Hello World! 
    +
    +Thank you,
    +
    +Associate / Analyst Program 
    +zhangsan
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0236.html b/docs/dli/sqlreference/dli_08_0236.html new file mode 100644 index 00000000..9690f527 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0236.html @@ -0,0 +1,181 @@ + + +

OBS Source Stream

+

Function

Create a source stream to obtain data from OBS. DLI reads data stored by users in OBS as input data for jobs. OBS applies to various scenarios, such as big data analysis, cloud-native application program data, static website hosting, backup/active archive, and deep/cold archive.

+

OBS is an object-based storage service. It provides massive, secure, highly reliable, and low-cost data storage capabilities. For more information about OBS, see the Object Storage Service Console Operation Guide.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "obs",
+    region = "",
+    bucket = "",
+    object_name = "",
+    row_delimiter = "\n",
+    field_delimiter = '',
+    version_id = ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. obs indicates that the data source is OBS.

+

region

+

Yes

+

Region to which OBS belongs.

+

encode

+

No

+

Data encoding format. The value can be csv or json. The default value is csv.

+

ak

+

No

+

Access Key ID (AK).

+

sk

+

No

+

Secret access key used together with the ID of the access key.

+

bucket

+

Yes

+

Name of the OBS bucket where data is located.

+

object_name

+

Yes

+

Name of the object stored in the OBS bucket where data is located. If the object is not in the OBS root directory, you need to specify the folder name, for example, test/test.csv. For the object file format, see the encode parameter.

+

row_delimiter

+

Yes

+

Separator used to separate every two rows.

+

field_delimiter

+

No

+

Separator used to separate every two attributes.

+
  • This parameter is mandatory when encode is csv. You use custom attribute separators.
  • If encode is json, you do not need to set this parameter.
+

quote

+

No

+

Quoted symbol in a data format. The attribute delimiters between two quoted symbols are treated as common characters.

+
  • If double quotation marks are used as the quoted symbol, set this parameter to \u005c\u0022 for character conversion.
  • If a single quotation mark is used as the quoted symbol, set this parameter to a single quotation mark (').
+
NOTE:
  • Currently, only the CSV format is supported.
  • After this parameter is specified, ensure that each field does not contain quoted symbols or contains an even number of quoted symbols. Otherwise, parsing will fail.
+
+

version_id

+

No

+

Version number. This parameter is optional and required only when the OBS bucket or object has version settings.

+
+
+
+

Precautions

When creating a source stream, you can specify a time model for subsequent calculation. Currently, DLI supports two time models: Processing Time and Event Time. For details about the syntax, see Configuring Time Models.

+
+

Example

  • The input.csv file is read from the OBS bucket. Rows are separated by '\n' and columns are separated by ','.

    To use the test data, create an input.txt file, copy and paste the following text data, and save the file as input.csv. Upload the input.csv file to the target OBS bucket directory. For example, upload the file to the dli-test-obs01 bucket directory.

    +
    1,2,3,4,1403149534
    +5,6,7,8,1403149535
    +
    The following is an example for creating the table:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE SOURCE STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "obs",
    +    bucket = "dli-test-obs01",
    +    region = "xxx",
    +    object_name = "input.csv",
    +    row_delimiter = "\n",
    +    field_delimiter = ","
    +);
    +
    + +
    +
    +
  • The input.json file is read from the OBS bucket. Rows are separated by '\n'.
    CREATE SOURCE STREAM obs_source (
    +  str STRING
    +)
    +  WITH (
    +    type = "obs",
    +    bucket = "obssource",
    +    region = "xxx",
    +    encode = "json",
    +    row_delimiter = "\n",
    +    object_name = "input.json"
    +);
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0237.html b/docs/dli/sqlreference/dli_08_0237.html new file mode 100644 index 00000000..652ba7c3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0237.html @@ -0,0 +1,119 @@ + + +

CloudTable HBase Source Stream

+

Function

Create a source stream to obtain data from HBase of CloudTable as input data of the job. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. DLI can read data from HBase for filtering, analysis, and data dumping.

+

CloudTable is a distributed, scalable, and fully-hosted key-value data storage service based on Apache HBase. It provides DLI with high-performance random read and write capabilities, which are helpful when applications need to store and query a massive amount of structured data, semi-structured data, and time series data. CloudTable applies to IoT scenarios and storage and query of massive volumes of key-value data. For more information about CloudTable, see the CloudTable Service User Guide.

+
+

Prerequisites

In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with CloudTable HBase. You can also set the security group rules as required.

+

For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

+

For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "cloudtable",
+    region = "",
+    cluster_id = "",
+    table_name = "",
+    table_columns = ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. CloudTable indicates that the data source is CloudTable.

+

region

+

Yes

+

Region to which CloudTable belongs.

+

cluster_id

+

Yes

+

ID of the cluster to which the data table to be read belongs.

+

For details about how to view the ID of the CloudTable cluster, see section "Viewing Basic Cluster Information" in the CloudTable Service User Guide.

+

table_name

+

Yes

+

Name of the table from which data is to be read. If a namespace needs to be specified, set it to namespace_name:table_name.

+

table_columns

+

Yes

+

Column to be read. The format is rowKey,f1:c1,f1:c2,f2:c1. The number of columns must be the same as the number of attributes specified in the source stream.

+
+
+
+

Precautions

When creating a source stream, you can specify a time model for subsequent calculation. Currently, DLI supports two time models: Processing Time and Event Time. For details about the syntax, see Configuring Time Models.

+
+

Example

Read the car_infos table from HBase of CloudTable.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE SOURCE STREAM car_infos (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT
+)
+  WITH (
+    type = "cloudtable",
+    region = "xxx",
+    cluster_id = "209ab1b6-de25-4c48-8e1e-29e09d02de28",
+    table_name = "carinfo",
+    table_columns = "rowKey,info:owner,info:age,car:speed,car:miles"
+); 
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0238.html b/docs/dli/sqlreference/dli_08_0238.html new file mode 100644 index 00000000..7b990eb5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0238.html @@ -0,0 +1,211 @@ + + +

MRS Kafka Source Stream

+

Function

Create a source stream to obtain data from Kafka as input data for jobs.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages. Kafka clusters are deployed and hosted on MRS that is powered on Apache Kafka.

+
+

Prerequisites

  • If the Kafka server listens on the port using hostname, you need to add the mapping between the hostname and IP address of the Kafka Broker node to the DLI queue. Contact the Kafka service deployment personnel to obtain the hostname and IP address of the Kafka Broker node. For details about how to add an IP-domain mapping, see Enhanced Datasource Connections > Modifying the Host Information in the Data Lake Insight User Guide.
  • Kafka is an offline cluster. You need to use the enhanced datasource connection function to connect Flink jobs to Kafka. You can also set security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "kafka",
+    kafka_bootstrap_servers = "",
+    kafka_group_id = "",
+    kafka_topic = "",
+    encode = "json"
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. Kafka indicates that the data source is Kafka.

+

kafka_bootstrap_servers

+

Yes

+

Port that connects DLI to Kafka. Use enhanced datasource connections to connect DLI queues with Kafka clusters.

+

kafka_group_id

+

No

+

Group ID

+

kafka_topic

+

Yes

+

Kafka topic to be read. Currently, only one topic can be read at a time.

+

encode

+

Yes

+

Data encoding format. The value can be csv, json, blob, or user_defined.

+
  • field_delimiter must be specified if this parameter is set to csv.
  • json_config must be specified if this parameter is set to json.
  • If this parameter is set to blob, the received data is not parsed, only one stream attribute exists, and the stream attribute is of the Array[TINYINT] type.
  • encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.
+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+

krb_auth

+

No

+

The authentication name for creating a datasource connection authentication. This parameter is mandatory when Kerberos authentication is enabled.

+
NOTE:

Ensure that the /etc/hosts information of the master node in the MRS cluster is added to the host file of the DLI queue.

+
+

json_config

+

No

+

If encode is set to json, you can use this parameter to specify the mapping between JSON fields and stream attributes.

+

The format is field1=json_field1;field2=json_field2.

+

field1 and field2 indicate the names of the created table fields. json_field1 and json_field2 are key fields of the JSON strings in the Kafka input data.

+

For details, see the example.

+

field_delimiter

+

No

+

If encode is set to csv, you can use this parameter to specify the separator between CSV fields. By default, the comma (,) is used.

+

quote

+

No

+

Quoted symbol in a data format. The attribute delimiters between two quoted symbols are treated as common characters.

+
  • If double quotation marks are used as the quoted symbol, set this parameter to \u005c\u0022 for character conversion.
  • If a single quotation mark is used as the quoted symbol, set this parameter to a single quotation mark (').
+
NOTE:
  • Currently, only the CSV format is supported.
  • After this parameter is specified, ensure that each field does not contain quoted symbols or contains an even number of quoted symbols. Otherwise, parsing will fail.
+
+

start_time

+

No

+

Start time when Kafka data is ingested.

+

If this parameter is specified, DLI reads data read from the specified time. The format is yyyy-MM-dd HH:mm:ss. Ensure that the value of start_time is not later than the current time. Otherwise, no data will be obtained.

+

kafka_properties

+

No

+

This parameter is used to configure the native attributes of Kafka. The format is key1=value1;key2=value2.

+

kafka_certificate_name

+

No

+

Specifies the name of the datasource authentication information. This parameter is valid only when the datasource authentication type is set to Kafka_SSL.

+
NOTE:
  • If this parameter is specified, the service loads only the specified file and password under the authentication. The system automatically sets this parameter to kafka_properties.
  • Other configuration information required for Kafka SSL authentication needs to be manually configured in the kafka_properties attribute.
+
+
+
+
+

Precautions

When creating a source stream, you can specify a time model for subsequent calculation. Currently, DLI supports two time models: Processing Time and Event Time. For details about the syntax, see Configuring Time Models.

+
+

Example

  • Read data from the Kafka topic test.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    CREATE SOURCE STREAM kafka_source (
    +  name STRING, 
    +  age int
    + )
    +  WITH (
    +    type = "kafka",
    +    kafka_bootstrap_servers = "ip1:port1,ip2:port2", 
    +    kafka_group_id = "sourcegroup1", 
    +    kafka_topic = "test",
    +    encode = "json"
    +);
    +
    + +
    +
+
  • Read the topic whose object is test from Kafka and use json_config to map JSON data to table fields.

    The data encoding format is non-nested JSON.

    +
    {"attr1": "lilei", "attr2": 18}
    +
    The table creation statement is as follows:
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE SOURCE STREAM kafka_source (name STRING, age int)
    +WITH (
    +  type = "kafka",
    +  kafka_bootstrap_servers = "ip1:port1,ip2:port2", 
    +  kafka_group_id = "sourcegroup1", 
    +  kafka_topic = "test",
    +  encode = "json",
    +  json_config = "name=attr1;age=attr2"
    +);
    +
    + +
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0239.html b/docs/dli/sqlreference/dli_08_0239.html new file mode 100644 index 00000000..8d43f192 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0239.html @@ -0,0 +1,220 @@ + + +

Open-Source Kafka Source Stream

+

Function

Create a source stream to obtain data from Kafka as input data for jobs.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

  • If the Kafka server listens on the port using hostname, you need to add the mapping between the hostname and IP address of the Kafka Broker node to the DLI queue. Contact the Kafka service deployment personnel to obtain the hostname and IP address of the Kafka Broker node. For details about how to add an IP-domain mapping, see Enhanced Datasource Connections > Modifying the Host Information in the Data Lake Insight User Guide.
  • Kafka is an offline cluster. You need to use the enhanced datasource connection function to connect Flink jobs to Kafka. You can also set security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "kafka",
+    kafka_bootstrap_servers = "",
+    kafka_group_id = "",
+    kafka_topic = "",
+    encode = "json",
+    json_config=""
+  );
+
+ +
+
+

Keywords

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. Kafka indicates that the data source is Kafka.

+

kafka_bootstrap_servers

+

Yes

+

Port that connects DLI to Kafka. Use enhanced datasource connections to connect DLI queues with Kafka clusters.

+

kafka_group_id

+

No

+

Group ID.

+

kafka_topic

+

Yes

+

Kafka topic to be read. Currently, only one topic can be read at a time.

+

encode

+

Yes

+

Data encoding format. The value can be csv, json, blob, or user_defined.

+
  • field_delimiter must be specified if this parameter is set to csv.
  • json_config must be specified if this parameter is set to json.
  • If this parameter is set to blob, the received data will not be parsed, and only one Array[TINYINT] field exists in the table.
  • encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.
+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+

json_config

+

No

+

If encode is set to json, you can use this parameter to specify the mapping between JSON fields and stream attributes.

+

The format is field1=json_field1;field2=json_field2.

+

field1 and field2 indicate the names of the created table fields. json_field1 and json_field2 are key fields of the JSON strings in the Kafka input data.

+

For details, see Example.

+
NOTE:

If the attribute names in the source stream are the same as those in JSON fields, you do not need to set this parameter.

+
+

field_delimiter

+

No

+

If encode is set to csv, you can use this parameter to specify the separator between CSV fields. By default, the comma (,) is used.

+

quote

+

No

+

Quoted symbol in a data format. The attribute delimiters between two quoted symbols are treated as common characters.

+
  • If double quotation marks are used as the quoted symbol, set this parameter to \u005c\u0022 for character conversion.
  • If a single quotation mark is used as the quoted symbol, set this parameter to a single quotation mark (').
+
NOTE:
  • Currently, only the CSV format is supported.
  • After this parameter is specified, ensure that each field does not contain quoted symbols or contains an even number of quoted symbols. Otherwise, parsing will fail.
+
+

start_time

+

No

+

Start time when Kafka data is ingested.

+

If this parameter is specified, DLI reads data read from the specified time. The format is yyyy-MM-dd HH:mm:ss. Ensure that the value of start_time is not later than the current time. Otherwise, no data will be obtained.

+

If you set this parameter, only the data generated after the specified time for the Kafka topic will be read.

+

kafka_properties

+

No

+

Native properties of Kafka. The format is key1=value1;key2=value2. For details about the property values, see the description in Apache Kafka.

+

kafka_certificate_name

+

No

+

Name of the datasource authentication information. This parameter is valid only when the datasource authentication type is set to Kafka_SSL.

+
NOTE:
  • If this parameter is specified, the service loads only the specified file and password under the authentication. The system automatically sets this parameter to kafka_properties.
  • Other configuration information required for Kafka SSL authentication needs to be manually configured in the kafka_properties attribute.
+
+
+
+
+

Precautions

When creating a source stream, you can specify a time model for subsequent calculation. Currently, DLI supports two time models: Processing Time and Event Time. For details about the syntax, see Configuring Time Models.

+
+

Example

  • Read Kafka topic test. The data encoding format is non-nested JSON, for example, {"attr1": "lilei", "attr2": 18}.
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    CREATE SOURCE STREAM kafka_source (name STRING, age int)
    +WITH (
    +  type = "kafka",
    +  kafka_bootstrap_servers = "ip1:port1,ip2:port2", 
    +  kafka_group_id = "sourcegroup1", 
    +  kafka_topic = "test",
    +  encode = "json",
    +  json_config = "name=attr1;age=attr2"
    +);
    +
    + +
    +
  • Read Kafka topic test. The data is encoded in JSON format and nested. This example uses the complex data type ROW. For details about the syntax of ROW, see Data Type.

    The test data is as follows:

    +
    {
    +    "id":"1",
    +    "type2":"online",
    +    "data":{
    +        "patient_id":1234,
    +        "name":"bob1234"
    +    }
    +}
    +
    An example of the table creation statements is as follows:
    CREATE SOURCE STREAM kafka_source 
    +(
    +  id STRING,
    +  type2 STRING,
    +  data ROW<
    +    patient_id STRING, 
    +    name STRING>
    +)
    +WITH (
    +  type = "kafka",
    +  kafka_bootstrap_servers = "ip1:port1,ip2:port2", 
    +  kafka_group_id = "sourcegroup1", 
    +  kafka_topic = "test",
    +  encode = "json"
    +);
    +
    +CREATE SINK STREAM kafka_sink 
    +(
    +  id STRING,
    +  type2 STRING,
    +  patient_id STRING, 
    +  name STRING
    +)
    +  WITH (
    +    type="kafka",
    +    kafka_bootstrap_servers =  "ip1:port1,ip2:port2",
    +    kafka_topic = "testsink",
    +    encode = "csv" 
    +  );
    +
    +INSERT INTO kafka_sink select id, type2, data.patient_id, data.name from kafka_source;
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0240.html b/docs/dli/sqlreference/dli_08_0240.html new file mode 100644 index 00000000..082f726b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0240.html @@ -0,0 +1,48 @@ + + +

Creating a Sink Stream

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0241.html b/docs/dli/sqlreference/dli_08_0241.html new file mode 100644 index 00000000..8550f079 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0241.html @@ -0,0 +1,198 @@ + + +

DIS Sink Stream

+

Function

DLI writes the Flink job output data into DIS. This cloud ecosystem is applicable to scenarios where data is filtered and imported to the DIS stream for future processing.

+

DIS addresses the challenge of transmitting data outside cloud services to cloud services. DIS builds data intake streams for custom applications capable of processing or analyzing streaming data. DIS continuously captures, transmits, and stores terabytes of data from hundreds of thousands of sources every hour, such as logs, Internet of Things (IoT) data, social media feeds, website clickstreams, and location-tracking events. For more information about DIS, see the Data Ingestion Service User Guide.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "dis",
+    region = "",
+    channel = "",
+    partition_key = "",
+    encode= "",
+    field_delimiter= ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. dis indicates that data is exported to DIS.

+

region

+

Yes

+

Region where DIS for storing the data is located.

+

ak

+

No

+

Access Key ID (AK).

+

sk

+

No

+

Specifies the secret access key used together with the ID of the access key.

+

channel

+

Yes

+

DIS stream.

+

partition_key

+

No

+

Group primary key. Multiple primary keys are separated by commas (,). If this parameter is not specified, data is randomly written to DIS partitions.

+

encode

+

Yes

+

Data encoding format. The value can be csv, json, or user_defined.

+
NOTE:
  • field_delimiter must be specified if this parameter is set to csv.
  • If the encoding format is json, you need to configure enable_output_null to determine whether to generate an empty field. For details, see the examples.
  • encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.
+
+

field_delimiter

+

Yes

+

Separator used to separate every two attributes.

+
  • This parameter needs to be configured if the CSV encoding format is adopted. It can be user-defined, for example, a comma (,).
  • This parameter is not required if the JSON encoding format is adopted.
+

json_config

+

No

+

If encode is set to json, you can set this parameter to specify the mapping between the JSON field and the stream definition field. An example of the format is as follows: field1=data_json.field1; field2=data_json.field2.

+

enable_output_null

+

No

+

If encode is set to json, you need to specify this parameter to determine whether to generate an empty field.

+

If this parameter is set to true, an empty field (the value is null) is generated. If set to false, no empty field is generated. The default value is true.

+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+
+
+
+

Precautions

None

+
+

Example

  • CSV: Data is written to the DIS stream and encoded using CSV. CSV fields are separated by commas (,). If there are multiple partitions, car_owner is used as the key to distribute data to different partitions. An example is as follows: "ZJA710XC", "lilei", "BMW", 700000.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    CREATE SINK STREAM audi_cheaper_than_30w (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT
    +)
    +  WITH (
    +    type = "dis",
    +    region = "xxx",
    +    channel = "dlioutput",
    +    encode = "csv",
    +    field_delimiter = ","
    +); 
    +
    + +
    +
+
  • JSON: Data is written to the DIS stream and encoded using JSON. If there are multiple partitions, car_owner and car_brand are used as the keys to distribute data to different partitions. If enableOutputNull is set to true, an empty field (the value is null) is generated. If set to false, no empty field is generated. An example is as follows: "car_id ":"ZJA710XC", "car_owner ":"lilei", "car_brand ":"BMW", "car_price ":700000.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    CREATE SINK STREAM audi_cheaper_than_30w (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT
    +)
    +  WITH (
    +    type = "dis",
    +    channel = "dlioutput",
    +    region = "xxx",
    +    partition_key = "car_owner,car_brand",
    +    encode = "json",
    +    enable_output_null = "false"
    +);
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0242.html b/docs/dli/sqlreference/dli_08_0242.html new file mode 100644 index 00000000..82374f80 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0242.html @@ -0,0 +1,284 @@ + + +

OBS Sink Stream

+

Function

Create a sink stream to export DLI data to OBS. DLI can export the job analysis results to OBS. OBS applies to various scenarios, such as big data analysis, cloud-native application program data, static website hosting, backup/active archive, and deep/cold archive.

+

OBS is an object-based storage service. It provides massive, secure, highly reliable, and low-cost data storage capabilities. For more information about OBS, see the Object Storage Service Console Operation Guide.

+

You are advised to use the File System Sink Stream (Recommended).

+
+
+

Prerequisites

Before data exporting, check the version of the OBS bucket. The OBS sink stream supports data exporting to an OBS bucket running OBS 3.0 or a later version.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+           type = "obs",
+           region = "",
+           encode = "",
+           field_delimiter = "",
+           row_delimiter = "",
+           obs_dir = "",
+           file_prefix = "",
+           rolling_size = "",
+           rolling_interval = "",
+           quote = "",
+           array_bracket = "",
+           append = "",
+           max_record_num_per_file = "",
+           dump_interval = "",
+           dis_notice_channel = ""
+  )
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. obs indicates that data is exported to OBS.

+

region

+

Yes

+

Region to which OBS belongs.

+

ak

+

No

+

Access Key ID (AK).

+

sk

+

No

+

Secret access key used together with the ID of the access key.

+

encode

+

Yes

+

Encoding format. Currently, formats CSV, JSON, ORC, Avro, Avro-Merge, and Parquet are supported.

+

field_delimiter

+

No

+

Separator used to separate every two attributes.

+

This parameter is mandatory only when the CSV encoding format is adopted. If this parameter is not specified, the default separator comma (,) is used.

+

row_delimiter

+

No

+

Row delimiter. This parameter does not need to be configured if the CSV or JSON encoding format is adopted.

+

json_config

+

No

+

If encode is set to json, you can set this parameter to specify the mapping between the JSON field and the stream definition field. An example of the format is as follows: field1=data_json.field1;field2=data_json.field2.

+

obs_dir

+

Yes

+

Directory for storing files. The directory is in the format of {Bucket name}/{Directory name}, for example, obs-a1/dir1/subdir. If encode is set to csv (append is false), json (append is false), avro_merge, or parquet, parameterization is supported.

+

file_prefix

+

No

+

Prefix of the data export file name. The generated file is named in the format of file_prefix.x, for example, file_prefix.1 and file_prefix.2. If this parameter is not specified, the file prefix is temp by default.

+

rolling_size

+

No

+

Maximum size of a file.

+
NOTE:
  • One or both of rolling_size and rolling_interval must be configured.
  • When the size of a file exceeds the specified size, a new file is generated.
  • The unit can be KB, MB, or GB. If no unit is specified, the byte unit is used.
  • This parameter does not need to be configured if the ORC encoding format is adopted.
+
+

rolling_interval

+

No

+

Time mode, in which data is saved to the corresponding directory.

+
NOTE:
  • One or both of rolling_size and rolling_interval must be configured.
  • After this parameter is specified, data is written to the corresponding directories according to the output time.
  • The parameter value can be in the format of yyyy/MM/dd/HH/mm, which is case sensitive. The minimum unit is minute. If this parameter is set to yyyy/MM/dd/HH, data is written to the directory that is generated at the hour time. For example, data generated at 2018-09-10 16:00 will be written to the {obs_dir}/2018-09-10_16 directory.
  • If both rolling_size and rolling_interval are set, a new file is generated when the size of a single file exceeds the specified size in the directory corresponding to each time point.
+
+

quote

+

No

+

Modifier, which is added before and after each attribute only when the CSV encoding format is adopted. You are advised to use invisible characters, such as u0007, as the parameter value.

+

array_bracket

+

No

+

Array bracket, which can be configured only when the CSV encoding format is adopted. The available options are (), {}, and []. For example, if you set this parameter to {}, the array output format is {a1, a2}.

+

append

+

No

+

The value can be true or false. The default value is true.

+

If OBS does not support the append mode and the encoding format is CSV or JSON, set this parameter to false. If Append is set to false, max_record_num_per_file and dump_interval must be set.

+

max_record_num_per_file

+

No

+

Maximum number of records in a file. This parameter needs to be set if encode is csv (append is false), json (append is false), orc, avro, avro_merge, or parquet. If the maximum number of records has been reached, a new file is generated.

+

dump_interval

+

No

+

Triggering period. This parameter needs to be configured when the ORC encoding format is adopted or notification to DIS is enabled.

+
  • If the ORC encoding format is specified, this parameter indicates that files will be uploaded to OBS when the triggering period arrives even if the number of file records does not reach the maximum value.
  • In notification to DIS is enabled, this parameter specifies that a notification is sent to DIS every period to indicate that no more files will be generated in the directory.
+

dis_notice_channel

+

No

+

DIS channel where DLI sends the record that contains the OBS directory DLI periodically sends the DIS channel a record, which contains the OBS directory, indicating that no more new files will be generated in the directory.

+

encoded_data

+

No

+

Data to be encoded. This parameter is set if encode is json (append is false), avro_merge, or parquet. The format is ${field_name}, indicating that the stream field content is encoded as a complete record.

+
+
+
+

Precautions

If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.

+
+

Example

  • Export the car_infos data to the obs-sink bucket in OBS. The output directory is car_infos. The output file uses greater_30 as the file name prefix. The maximum size of a single file is 100 MB. If the data size exceeds 100 MB, another new file is generated. The data is encoded in CSV format, the comma (,) is used as the attribute delimiter, and the line break is used as the line separator.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    CREATE SINK STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "obs",
    +    encode = "csv",
    +    region = "xxx",
    +    field_delimiter = ",",
    +    row_delimiter = "\n",
    +    obs_dir = "obs-sink/car_infos",
    +    file_prefix = "greater_30",
    +    rolling_size = "100m"
    +);
    +
    + +
    +
+
  • Example of the ORC encoding format
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    CREATE SINK STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "obs",
    +    region = "xxx",
    +    encode = "orc",
    +    obs_dir = "dli-append-2/obsorc",
    +    FILE_PREFIX = "es_info",
    +    max_record_num_per_file = "100000",
    +    dump_interval = "60"
    +);
    +
    + +
    +
  • For details about the parquet encoding example, see the example in File System Sink Stream (Recommended).
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0243.html b/docs/dli/sqlreference/dli_08_0243.html new file mode 100644 index 00000000..ccd0b678 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0243.html @@ -0,0 +1,147 @@ + + +

CloudTable HBase Sink Stream

+

Function

DLI exports the job output data to HBase of CloudTable. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. Structured and semi-structured key-value data can be stored, including messages, reports, recommendation data, risk control data, logs, and orders. With DLI, you can write massive volumes of data to HBase at a high speed and with low latency.

+

CloudTable is a distributed, scalable, and fully-hosted key-value data storage service based on Apache HBase. It provides DLI with high-performance random read and write capabilities, which are helpful when applications need to store and query a massive amount of structured data, semi-structured data, and time series data. CloudTable applies to IoT scenarios and storage and query of massive volumes of key-value data. For more information about CloudTable, see the CloudTable Service User Guide.

+
+

Prerequisites

In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with CloudTable HBase. You can also set the security group rules as required.

+

For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

+

For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "cloudtable",
+    region = "",
+    cluster_id = "",
+    table_name = "",
+    table_columns = "",
+    create_if_not_exist = ""
+  )
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. cloudtable indicates that data is exported to CloudTable (HBase).

+

region

+

Yes

+

Region to which CloudTable belongs.

+

cluster_id

+

Yes

+

ID of the cluster to which the data you want to insert belongs.

+

table_name

+

Yes

+

Name of the table, into which data is to be inserted. It can be specified through parameter configurations. For example, if you want one or more certain columns as part of the table name, use car_pass_inspect_with_age_${car_age}, where car_age is the column name.

+

table_columns

+

Yes

+

Columns to be inserted. The format is rowKey, f1:c1, f1:c2, f2:c1, where rowKey must be specified. If you do not want to add a column (for example, the third column) to the database, set this parameter to rowKey,f1:c1,,f2:c1.

+

illegal_data_table

+

No

+

If this parameter is specified, abnormal data (for example, rowKey does not exist) will be written into the table. If not specified, abnormal data will be discarded. The rowKey value is a timestamp followed by six random digits, and the schema is info:data, info:reason.

+

create_if_not_exist

+

No

+

Whether to create a table or column into which the data is written when this table or column does not exist. The value can be true or false. The default value is false.

+

batch_insert_data_num

+

No

+

Number of data records to be written in batches at a time. The value must be a positive integer. The upper limit is 100. The default value is 10.

+
+
+
+

Precautions

  • If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.
  • In this way, data is written to HBase of CloudTable. The speed is limited. The dedicated resource mode is recommended.
+
+

Example

Output data of stream qualified_cars to CloudTable (HBase).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
CREATE SINK STREAM qualified_cars (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT
+)
+  WITH (
+    type = "cloudtable",
+    region = "xxx",
+    cluster_id = "209ab1b6-de25-4c48-8e1e-29e09d02de28",
+    table_name = "car_pass_inspect_with_age_${car_age}",
+    table_columns = "rowKey,info:owner,,car:speed,car:miles",
+    illegal_data_table = "illegal_data",
+    create_if_not_exist = "true",
+    batch_insert_data_num = "20"
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0244.html b/docs/dli/sqlreference/dli_08_0244.html new file mode 100644 index 00000000..d827986b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0244.html @@ -0,0 +1,158 @@ + + +

CloudTable OpenTSDB Sink Stream

+

Function

DLI exports the job output data to OpenTSDB of CloudTable. OpenTSDB is a distributed, scalable time series database based on HBase. It stores time series data. Time series data refers to the data collected at different time points. This type of data reflects the change status or degree of an object over time. OpenTSDB supports data collection and monitoring in seconds, permanent storage, index, and queries. It can be used for system monitoring and measurement as well as collection and monitoring of IoT data, financial data, and scientific experimental results.

+

CloudTable is a distributed, scalable, and fully-hosted key-value data storage service based on Apache HBase. It provides DLI with high-performance random read and write capabilities, which are helpful when applications need to store and query a massive amount of structured data, semi-structured data, and time series data. CloudTable applies to IoT scenarios and storage and query of massive volumes of key-value data. For more information about CloudTable, see the CloudTable Service User Guide.

+
+

Prerequisites

  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with CloudTable HBase. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "opentsdb",
+    region = "",
+    cluster_id = "",
+    tsdb_metrics = "",
+    tsdb_timestamps = "",
+    tsdb_values = "",
+    tsdb_tags = "",
+    batch_insert_data_num = ""
+  )
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. opentsdb indicates that data is exported to CloudTable (OpenTSDB).

+

region

+

Yes

+

Region to which CloudTable belongs.

+

cluster_id

+

No

+

ID of the cluster to which the data to be inserted belongs. Either this parameter or tsdb_link_address must be specified.

+

tsdb_metrics

+

Yes

+

Metric of a data point, which can be specified through parameter configurations.

+

tsdb_timestamps

+

Yes

+

Timestamp of a data point. The data type can be LONG, INT, SHORT, or STRING. Only dynamic columns are supported.

+

tsdb_values

+

Yes

+

Value of a data point. The data type can be SHORT, INT, LONG, FLOAT, DOUBLE, or STRING. Dynamic columns or constant values are supported.

+

tsdb_tags

+

Yes

+

Tags of a data point. Each of tags contains at least one tag value and up to eight tag values. Tags of the data point can be specified through parameter configurations.

+

batch_insert_data_num

+

No

+

Number of data records to be written in batches at a time. The value must be a positive integer. The upper limit is 65536. The default value is 8.

+

tsdb_link_address

+

No

+

OpenTSDB link of the cluster to which the data to be inserted belongs. If this parameter is used, the job must run in a dedicated DLI queue, and the DLI queue must be connected to the CloudTable cluster through an enhanced datasource connection. Either this parameter or cluster_id must be specified.

+
NOTE:

For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

+
+
+
+
+

Precautions

If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.

+
+

Example

Output data of stream weather_out to CloudTable (OpenTSDB).

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE SINK STREAM weather_out (
+  timestamp_value LONG, /* Time */
+  temperature FLOAT, /* Temperature value */
+  humidity FLOAT, /* Humidity */
+  location STRING /* Location */
+)
+  WITH (
+    type = "opentsdb",
+    region = "xxx",
+    cluster_id = "e05649d6-00e2-44b4-b0ff-7194adaeab3f",
+    tsdb_metrics = "weather",
+    tsdb_timestamps = "${timestamp_value}",
+    tsdb_values = "${temperature}; ${humidity}",
+    tsdb_tags = "location:${location},signify:temperature; location:${location},signify:humidity",
+    batch_insert_data_num = "10"
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0245.html b/docs/dli/sqlreference/dli_08_0245.html new file mode 100644 index 00000000..a33e06c8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0245.html @@ -0,0 +1,164 @@ + + +

RDS Sink Stream

+

Function

DLI outputs the Flink job output data to RDS. Currently, PostgreSQL and MySQL databases are supported. The PostgreSQL database can store data of more complex types and delivers space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-commerce. The MySQL database reduces IT deployment and maintenance costs in various scenarios, such as web applications, e-commerce, enterprise applications, and mobile applications.

+

RDS is a cloud-based web service.

+

+

For more information about RDS, see the Relational Database Service User Guide.

+
+

Prerequisites

  • Ensure that you have created a PostgreSQL or MySQL RDS instance in RDS.

    For details about how to create an RDS instance, see Creating an Instance in the Relational Database Service User Guide.

    +
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with RDS instance. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "rds",
+    username = "",
+    password = "",
+    db_url = "",
+    table_name = ""
+  );
+
+ +
+

+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. rds indicates that data is exported to RDS.

+

username

+

Yes

+

Username for connecting to a database.

+

password

+

Yes

+

Password for connecting to a database.

+

db_url

+

Yes

+

Database connection address, for example, {database_type}://ip:port/database.

+

Currently, two types of database connections are supported: MySQL and PostgreSQL.

+
  • MySQL: 'mysql://ip:port/database'
  • PostgreSQL: 'postgresql://ip:port/database'
+

table_name

+

Yes

+

Name of the table where data will be inserted.

+

db_columns

+

No

+

Mapping between attributes in the output stream and those in the database table. This parameter must be configured based on the sequence of attributes in the output stream.

+

Example:

+
create sink stream a3(student_name  string, student_age int) with (
+type = "rds",
+username = "root",
+password = "xxxxxxxx",
+db_url = "mysql://192.168.0.102:8635/test1",
+db_columns = "name,age",
+table_name = "t1"
+);
+

In the example, student_name corresponds to the name attribute in the database, and student_age corresponds to the age attribute in the database.

+
NOTE:
  • If db_columns is not configured, it is normal that the number of attributes in the output stream is less than that of attributes in the database table and the extra attributes in the database table are all nullable or have default values.
+
+

primary_key

+

No

+

To update data in the table in real time by using the primary key, add the primary_key configuration item (c_timeminute in the following example) when creating a table. During the data writing operation, data is updated if the specified primary_key exists. Otherwise, data is inserted.

+

Example:

+
CREATE SINK STREAM test(c_timeminute LONG, c_cnt LONG)
+WITH (
+type = "rds",
+username = "root",
+password = "xxxxxxxx",
+db_url = "mysql://192.168.0.12:8635/test",
+table_name = "test",
+primary_key = "c_timeminute");
+

operation_field

+

No

+

Processing method of specified data in the format of ${field_name}. The value of field_name must be a string. If field_name indicates D or DELETE, this record is deleted from the database and data is inserted by default.

+
+
+
+

Precautions

The stream format defined by stream_id must be the same as the table format.

+
+

Example

Data of stream audi_cheaper_than_30w is exported to the audi_cheaper_than_30w table in the test database.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
CREATE SINK STREAM audi_cheaper_than_30w (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "rds",
+    username = "root",
+    password = "xxxxxx",
+    db_url = "mysql://192.168.1.1:8635/test",
+    table_name = "audi_cheaper_than_30w"
+); 
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0247.html b/docs/dli/sqlreference/dli_08_0247.html new file mode 100644 index 00000000..aa8f0e78 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0247.html @@ -0,0 +1,180 @@ + + +

DWS Sink Stream (JDBC Mode)

+

Function

DLI outputs the Flink job output data to Data Warehouse Service (DWS). DWS database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types and delivers space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-commerce.

+

DWS is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data. For more information about DWS, see the .

+
+

Prerequisites

  • Ensure that you have created a DWS cluster on DWS using your account.

    For details about how to create a DWS cluster, see Creating a Cluster in the Data Warehouse Service Management Guide.

    +
  • Ensure that a DWS database table has been created.
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with DWS clusters. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "rds",
+    username = "",
+    password = "",
+    db_url = "",
+    table_name = ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. rds indicates that data is exported to RDS or DWS.

+

username

+

Yes

+

Username for connecting to a database.

+

password

+

Yes

+

Password for connecting to a database.

+

db_url

+

Yes

+

Database connection address, for example, postgresql://ip:port/database.

+

table_name

+

Yes

+

Name of the table where data will be inserted. You need to create the database table in advance.

+

db_columns

+

No

+

Mapping between attributes in the output stream and those in the database table. This parameter must be configured based on the sequence of attributes in the output stream.

+

Example:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
create sink stream a3(student_name  string, student_age int) 
+  with (
+    type = "rds",
+    username = "root",
+    password = "xxxxxxxx",
+    db_url = "postgresql://192.168.0.102:8000/test1",
+    db_columns = "name,age",
+    table_name = "t1"
+  );
+
+ +
+

In the example, student_name corresponds to the name attribute in the database, and student_age corresponds to the age attribute in the database.

+
NOTE:
  • If db_columns is not configured, it is normal that the number of attributes in the output stream is less than that of attributes in the database table and the extra attributes in the database table are all nullable or have default values.
+
+

primary_key

+

No

+

To update data in the table in real time by using the primary key, add the primary_key configuration item (c_timeminute in the following example) when creating a table. During the data writing operation, data is updated if the specified primary_key exists. Otherwise, data is inserted.

+

Example:

+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SINK STREAM test(c_timeminute LONG, c_cnt LONG)
+  WITH (
+    type = "rds",
+    username = "root",
+    password = "xxxxxxxx",
+    db_url = "postgresql://192.168.0.12:8000/test",
+    table_name = "test",
+    primary_key = "c_timeminute"
+  );
+
+ +
+
+
+
+

Precautions

The stream format defined by stream_id must be the same as the table format.

+
+

Example

Data of stream audi_cheaper_than_30w is exported to the audi_cheaper_than_30w table in the test database.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
CREATE SINK STREAM audi_cheaper_than_30w (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "rds",
+    username = "root",
+    password = "xxxxxx",
+    db_url = "postgresql://192.168.1.1:8000/test",
+    table_name = "audi_cheaper_than_30w"
+  ); 
+
+insert into audi_cheaper_than_30w select "1","2","3",4;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0248.html b/docs/dli/sqlreference/dli_08_0248.html new file mode 100644 index 00000000..98379b5b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0248.html @@ -0,0 +1,290 @@ + + +

DWS Sink Stream (OBS-based Dumping)

+

Function

Create a sink stream to export Flink job data to DWS through OBS-based dumping, specifically, output Flink job data to OBS and then import data from OBS to DWS. For details about how to import OBS data to DWS, see Concurrently Importing Data from OBS in the Data Warehouse Service Development GuideData Warehouse Service Development Guide.

+

DWS is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data. For more information about DWS, see the .

+
+

Precautions

  • OBS-based dumping supports intermediate files of the following two types:
    • ORC: The ORC format does not support array data type. If the ORC format is used, create a foreign server in DWS. For details, see Creating a Foreign Server in the .
    • CSV: By default, the line break is used as the record separator. If the line break is contained in the attribute content, you are advised to configure quote. For details, see Table 1.
    +
  • If the target table does not exist, a table is automatically created. DLI data of the SQL type does not support text. If a long text exists, you are advised to create a table in the database.
  • When encode uses the ORC format to create a DWS table, if the field attribute of the SQL stream is defined as the String type, the field attribute of the DWS table cannot use the varchar type. Instead, a specific text type must be used. If the SQL stream field attribute is defined as the Integer type, the DWS table field must use the Integer type.
+
+

Prerequisites

  • Ensure that OBS buckets and folders have been created.

    For details about how to create an OBS bucket, see Creating a Bucket in the Object Storage Service User Guide.

    +

    For details about how to create a folder, see Creating a Folder in the Object Storage Service User Guide.

    +
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with DWS clusters. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+          type = "dws",
+          region = "",
+          ak = "",
+          sk = "",
+          encode = "",
+          field_delimiter = "",
+          quote = "",
+          db_obs_server = "",
+          obs_dir = "",
+          username = "",
+          password =  "",
+          db_url = "",
+          table_name = "",
+          max_record_num_per_file = "",
+          dump_interval = ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. dws indicates that data is exported to DWS.

+

region

+

Yes

+

Region where DWS is located.

+

ak

+

Yes

+

Access Key ID (AK).

+

sk

+

Yes

+

Secret access key used together with the ID of the AK.

+

encode

+

Yes

+

Encoding format. Currently, CSV and ORC are supported.

+

field_delimiter

+

No

+

Separator used to separate every two attributes. This parameter needs to be configured if the CSV encoding mode is used. It is recommended that you use invisible characters as separators, for example, \u0006\u0002.

+

quote

+

No

+

Single byte. It is recommended that invisible characters be used, for example, u0007.

+

db_obs_server

+

No

+

Foreign server (for example, obs_server) that has been created in the database.

+

You need to specify this parameter if the ORC encoding mode is adopted.

+

obs_dir

+

Yes

+

Directory for storing intermediate files. The directory is in the format of {Bucket name}/{Directory name}, for example, obs-a1/dir1/subdir.

+

username

+

Yes

+

Username for connecting to a database.

+

password

+

Yes

+

Password for connecting to a database.

+

db_url

+

Yes

+

Database connection address. The format is /ip:port/database, for example, 192.168.1.21:8000/test1.

+

table_name

+

Yes

+

Data table name. If no table is available, a table is automatically created.

+

max_record_num_per_file

+

Yes

+

Maximum number of records that can be stored in a file. If the number of records in a file is less than the maximum value, the file will be dumped to OBS after one dumping period.

+

dump_interval

+

Yes

+

Dumping period. The unit is second.

+

delete_obs_temp_file

+

No

+

Whether to delete temporary files on OBS. The default value is true. If this parameter is set to false, files on OBS will not be deleted. You need to manually clear the files.

+

max_dump_file_num

+

No

+

Maximum number of files that can be dumped at a time. If the number of files to be dumped is less than the maximum value, the files will be dumped to OBS after one dumping period.

+
+
+
+

Example

  • Dump files in CSV format.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    CREATE SINK STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "dws",
    +    region = "xxx",
    +    ak = "",
    +    sk = "",
    +    encode = "csv",
    +    field_delimiter = "\u0006\u0006\u0002",
    +    quote = "\u0007",
    +    obs_dir = "dli-append-2/dws",
    +    username = "",
    +    password =  "",
    +    db_url = "192.168.1.12:8000/test1",
    +    table_name = "table1",
    +    max_record_num_per_file = "100",
    +    dump_interval = "10"
    +  );
    +
    + +
    +
  • Dump files in ORC format.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    CREATE SINK STREAM car_infos (
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_price INT,
    +  car_timestamp LONG
    +)
    +  WITH (
    +    type = "dws",
    +    region = "xxx",
    +    ak = "",
    +    sk = "",
    +    encode = "orc",
    +    db_obs_server = "obs_server",
    +    obs_dir = "dli-append-2/dws",
    +    username = "",
    +    password =  "",
    +    db_url = "192.168.1.12:8000/test1",
    +    table_name = "table1",
    +    max_record_num_per_file = "100",
    +    dump_interval = "10"
    +  );
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0249.html b/docs/dli/sqlreference/dli_08_0249.html new file mode 100644 index 00000000..d1412e61 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0249.html @@ -0,0 +1,129 @@ + + +

DDS Sink Stream

+

Function

DLI outputs the job output data to Document Database Service (DDS).

+

DDS is compatible with the MongoDB protocol and is secure, highly available, reliable, scalable, and easy to use. It provides DB instance creation, scaling, redundancy, backup, restoration, monitoring, and alarm reporting functions with just a few clicks on the DDS console.

+

For more information about DDS, see the Document Database Service User Guide.

+
+

Prerequisites

  • Ensure that you have created a DDS instance on DDS using your account.

    For details about how to create a DDS instance, see Buying a DDS DB Instance in the Document Database Service Getting Started.

    +
  • Currently, only cluster instances with SSL authentication disabled are supported. Replica set and single node instances are not supported.
  • In this scenario, jobs must run on the dedicated queue of DLI. Ensure that the dedicated queue of DLI has been created.
  • Ensure that a datasource connection has been set up between the DLI dedicated queue and the DDS cluster, and security group rules have been configured based on the site requirements.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "dds",
+    username = "",
+    password = "",
+    db_url = "",
+    field_names = ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. dds indicates that data is exported to DDS.

+

username

+

Yes

+

Username for connecting to a database.

+

password

+

Yes

+

Password for connecting to a database.

+

db_url

+

Yes

+

DDS instance access address, for example, ip1:port,ip2:port/database/collection.

+

field_names

+

Yes

+

Key of the data field to be inserted. The format is f1,f2,f3. Ensure that the key corresponds to the data column in the sink stream.

+

batch_insert_data_num

+

No

+

Amount of data to be written in batches at a time. The value must be a positive integer. The default value is 10.

+
+
+
+

Example

Output data in the qualified_cars stream to the collectionTest DDS DB.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE SINK STREAM qualified_cars (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT
+)
+  WITH (
+    type = "dds",
+    region = "xxx",
+    db_url = "192.168.0.8:8635,192.168.0.130:8635/dbtest/collectionTest",
+    username = "xxxxxxxxxx",
+    password =  "xxxxxxxxxx",
+    field_names = "car_id,car_owner,car_age,average_speed,total_miles",
+    batch_insert_data_num = "10"
+  );
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0251.html b/docs/dli/sqlreference/dli_08_0251.html new file mode 100644 index 00000000..d6660a9b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0251.html @@ -0,0 +1,147 @@ + + +

SMN Sink Stream

+

Function

DLI exports Flink job output data to SMN.

+

SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling and pushes messages to subscription endpoints based on requirements. SMN can be connected to other cloud services or integrated with any application that uses or generates message notifications to push messages over multiple protocols.

+

For more information about SMN, see the .

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* ) 
+  WITH(
+    type = "smn",
+    region = "",
+    topic_urn = "",
+    urn_column = "",
+    message_subject = "",
+    message_column = ""
+  )
+
+ +
+

+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. smn indicates that data is exported to SMN.

+

region

+

Yes

+

Region to which SMN belongs.

+

topic_urn

+

No

+

URN of an SMN topic, which is used for the static topic URN configuration. The SMN topic serves as the destination for short message notification and needs to be created in SMN.

+

One of topic_urn and urn_column must be configured. If both of them are configured, the topic_urn setting takes precedence.

+

urn_column

+

No

+

Field name of the topic URN content, which is used for the dynamic topic URN configuration.

+

One of topic_urn and urn_column must be configured. If both of them are configured, the topic_urn setting takes precedence.

+

message_subject

+

Yes

+

Message subject sent to SMN. This parameter can be user-defined.

+

message_column

+

Yes

+

Field name in the sink stream. Contents of the field name serve as the message contents, which are user-defined. Currently, only text messages (default) are supported.

+
+
+
+

Precautions

None

+
+

Example

Data of stream over_speed_warning is exported to SMN.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
//Static topic configuration
+CREATE SINK STREAM over_speed_warning (
+  over_speed_message STRING /* over speed message */
+)
+  WITH (
+    type = "smn",
+    region = "xxx",
+    topic_Urn = "xxx",
+    message_subject = "message title",
+    message_column = "over_speed_message"
+  );
+
+ +
+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
//Dynamic topic configuration
+CREATE SINK STREAM over_speed_warning2 (  
+    over_speed_message STRING, /* over speed message */  
+    over_speed_urn STRING
+)
+  WITH (  
+    type = "smn",  
+    region = "xxx",
+    urn_column = "over_speed_urn",  
+    message_subject = "message title",  
+    message_column = "over_speed_message"
+  );
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0252.html b/docs/dli/sqlreference/dli_08_0252.html new file mode 100644 index 00000000..d5c1fb32 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0252.html @@ -0,0 +1,177 @@ + + +

CSS Elasticsearch Sink Stream

+

Function

DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides the distributed multi-user capabilities. It delivers multiple functions, including full-text retrieval, structured search, analytics, aggregation, and highlighting. With Elasticsearch, you can achieve stable, reliable, real-time search. Elasticsearch applies to diversified scenarios, such as log analysis and site search.

+

CSS is a fully managed, distributed search service. It is fully compatible with open-source Elasticsearch and provides DLI with structured and unstructured data search, statistics, and report capabilities.

+

+

For more information about CSS, see the Cloud Search Service User Guide.

+

If the security mode is enabled when you create a CSS cluster, it cannot be undone.

+
+
+

Prerequisites

  • Ensure that you have created a cluster on CSS using your account. For details about how to create a cluster on CSS, see Creating a Cluster in the Cloud Search Service User Guide.
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with CSS. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "es",
+    region = "",
+    cluster_address = "",
+    es_index = "",
+    es_type= "",
+    es_fields= "",
+    batch_insert_data_num= ""
+  );
+
+ +
+

+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. es indicates that data is exported to CSS.

+

region

+

Yes

+

Region where CSS is located.

+

cluster_address

+

Yes

+

Private access address of the CSS cluster, for example: x.x.x.x:x. Use commas (,) to separate multiple addresses.

+

es_index

+

Yes

+

Index of the data to be inserted. This parameter corresponds to CSS index.

+

es_type

+

Yes

+

Type of the document to which data is to be inserted. This parameter corresponds to the CSS type.

+

If the Elasticsearch version is 6.x, the value cannot start with an underscore (_).

+

If the Elasticsearch version is 7.x and the type of CSS is preset, the value must be _doc. Otherwise, the value must comply with CSS specifications.

+

es_fields

+

Yes

+

Key of the data field to be inserted. The format is id,f1,f2,f3,f4. Ensure that the key corresponds to the data column in the sink. If a random attribute field instead of a key is used, the keyword id does not need to be used, for example, f1,f2,f3,f4,f5. This parameter corresponds to the CSS filed.

+

batch_insert_data_num

+

Yes

+

Amount of data to be written in batches at a time. The value must be a positive integer. The unit is 10 records. The maximum value allowed is 65536, and the default value is 10.

+

action

+

No

+

If the value is add, data is forcibly overwritten when the same ID is encountered. If the value is upsert, data is updated when the same ID is encountered. (If upsert is selected, id in the es_fields field must be specified.) The default value is add.

+

enable_output_null

+

No

+

This parameter is used to configure whether to generate an empty field. If this parameter is set to true, an empty field (the value is null) is generated. If set to false, no empty field is generated. The default value is false.

+

max_record_num_cache

+

No

+

Maximum number of records that can be cached.

+

es_certificate_name

+

No

+

Name of the datasource authentication information

+

If the security mode is enabled and HTTPS is used by the Elasticsearch cluster, the certificate is required for access. In this case, set the datasource authentication type to CSS.

+

If the security mode is enabled for the Elasticsearch cluster but HTTPS is disabled, the certificate and username and password are required for access. In this case, set the datasource authentication type to Password.

+
+
+
+

Precautions

If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.

+
+

Example

Data of stream qualified_cars is exported to the cluster on CSS.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE SINK STREAM qualified_cars (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT
+)
+  WITH (
+    type = "es",
+    region = "xxx",
+    cluster_address = "192.168.0.212:9200",
+    es_index = "car",
+    es_type = "information",
+    es_fields = "id,owner,age,speed,miles",
+    batch_insert_data_num = "10"
+); 
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0253.html b/docs/dli/sqlreference/dli_08_0253.html new file mode 100644 index 00000000..af9cf0a7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0253.html @@ -0,0 +1,131 @@ + + +

DCS Sink Stream

+

Function

DLI exports the Flink job output data to Redis of DCS. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scenarios such as caching, event pub/sub, and high-speed queuing. Redis supports direct read/write of strings, hashes, lists, queues, and sets. Redis works with in-memory dataset and provides persistence. For more information about Redis, visit https://redis.io/.

+

DCS provides Redis-compatible, secure, reliable, out-of-the-box, distributed cache capabilities allowing elastic scaling and convenient management. It meets users' requirements for high concurrency and fast data access.

+

+

For more information about DCS, see the Distributed Cache Service User Guide.

+
+

Prerequisites

  • Ensure that You have created a Redis cache instance on DCS using your account.

    For details about how to create a Redis cache instance, see Creating a DCS Instance in the Distributed Cache Service User Guide.

    +
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must be interconnected with the DCS clusters. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
  • If you use a VPC peering connection to access a DCS instance, the following restrictions also apply:
    • If network segment 172.16.0.0/12~24 is used during DCS instance creation, the DLI queue cannot be in any of the following network segments: 192.168.1.0/24, 192.168.2.0/24, and 192.168.3.0/24.
    • If network segment 192.168.0.0/16~24 is used during DCS instance creation, the DLI queue cannot be in any of the following network segments: 172.31.1.0/24, 172.31.2.0/24, and 172.31.3.0/24.
    • If network segment 10.0.0.0/8~24 is used during DCS instance creation, the DLI queue cannot be in any of the following network segments: 172.31.1.0/24, 172.31.2.0/24, and 172.31.3.0/24.
    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "dcs_redis",
+    region = "",
+    cluster_address = "",
+    password = "",
+    value_type= "",key_value= ""
+  );
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. dcs_redis indicates that data is exported to DCS Redis.

+

region

+

Yes

+

Region where DCS for storing the data is located.

+

cluster_address

+

Yes

+

Redis instance connection address.

+

password

+

No

+

Redis instance connection password. This parameter is not required if password-free access is used.

+

value_type

+

Yes

+

This parameter can be set to any or the combination of the following options:

+
  • Data types, including string, list, hash, set, and zset
  • Commands used to set the expiration time of a key, including expire, pexpire, expireAt, and pexpireAt
  • Commands used to delete a key, including del and hdel
+

Use commas (,) to separate multiple commands.

+

key_value

+

Yes

+

Key and value. The number of key_value pairs must be the same as the number of types specified by value_type, and key_value pairs are separated by semicolons (;). Both key and value can be specified through parameter configurations. The dynamic column name is represented by ${column name}.

+
+
+
+

Precautions

  • If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.
  • Characters ":", ",", ";", "$", "{", and "}" have been used as special separators without the escape function. These characters cannot be used in key and value as common characters. Otherwise, parsing will be affected and the program exceptions will occur.
+
+

Example

Data of stream qualified_cars is exported to the Redis cache instance on DCS.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
CREATE SINK STREAM qualified_cars (
+  car_id STRING, 
+  car_owner STRING, 
+  car_age INT, 
+  average_speed DOUBLE, 
+  total_miles DOUBLE
+)
+  WITH (
+    type = "dcs_redis",
+    cluster_address = "192.168.0.34:6379",
+    password = "xxxxxxxx",
+    value_type = "string; list; hash; set; zset",
+    key_value = "${car_id}_str: ${car_owner}; name_list: ${car_owner}; ${car_id}_hash: {name:${car_owner}, age: ${car_age}}; name_set:   ${car_owner}; math_zset: {${car_owner}:${average_speed}}"
+  );
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0254.html b/docs/dli/sqlreference/dli_08_0254.html new file mode 100644 index 00000000..7d850609 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0254.html @@ -0,0 +1,167 @@ + + +

MRS Kafka Sink Stream

+

Function

DLI exports the output data of the Flink job to Kafka.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages. Kafka clusters are deployed and hosted on MRS that is powered on Apache Kafka.

+
+

Prerequisites

  • If the Kafka server listens on the port using hostname, you need to add the mapping between the hostname and IP address of the Kafka Broker node to the DLI queue. Contact the Kafka service deployment personnel to obtain the hostname and IP address of the Kafka Broker node. For details about how to add an IP-domain mapping, see Enhanced Datasource Connections > Modifying the Host Information in the Data Lake Insight User Guide.
  • Kafka is an offline cluster. You need to use the enhanced datasource connection function to connect Flink jobs to Kafka. You can also set security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH(
+    type = "kafka",
+    kafka_bootstrap_servers = "",
+    kafka_topic = "",
+    encode = "json"
+)
+
+ +
+

+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. kafka indicates that data is exported to Kafka.

+

kafka_bootstrap_servers

+

Yes

+

Port that connects DLI to Kafka. Use enhanced datasource connections to connect DLI queues with Kafka clusters.

+

kafka_topic

+

Yes

+

Kafka topic into which DLI writes data.

+

encode

+

Yes

+

Encoding format. Currently, json and user_defined are supported.

+

encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.

+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+

krb_auth

+

No

+

Authentication name for creating a datasource connection authentication. This parameter is mandatory when Kerberos authentication is enabled. If Kerberos authentication is not enabled for the created MRS cluster, ensure that the /etc/hosts information of the master node in the MRS cluster is added to the host file of the DLI queue.

+

kafka_properties

+

No

+

This parameter is used to configure the native attributes of Kafka. The format is key1=value1;key2=value2.

+

kafka_certificate_name

+

No

+

Specifies the name of the datasource authentication information. This parameter is valid only when the datasource authentication type is set to Kafka_SSL.

+
NOTE:
  • If this parameter is specified, the service loads only the specified file and password under the authentication. The system automatically sets this parameter to kafka_properties.
  • Other configuration information required for Kafka SSL authentication needs to be manually configured in the kafka_properties attribute.
+
+
+
+
+

Precautions

None

+
+

Example

Output data to Kafka.

+
  • Example 1:
    1
    +2
    +3
    +4
    +5
    +6
    +7
    CREATE SINK STREAM kafka_sink (name STRING) 
    +  WITH (
    +    type="kafka",
    +    kafka_bootstrap_servers =  "ip1:port1,ip2:port2",
    +    kafka_topic = "testsink",
    +    encode = "json" 
    +  );
    +
    + +
    +
  • Example 2:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    CREATE SINK STREAM kafka_sink ( 
    +  a1 string,
    +  a2 string,
    +  a3 string,
    +  a4 INT
    +  ) // Output Field
    +  WITH (
    +    type="kafka",
    +    kafka_bootstrap_servers =  "192.x.x.x:9093, 192.x.x.x:9093, 192.x.x.x:9093",
    +kafka_topic = "testflink", // Written topic
    +  encode = "csv", // Encoding format, which can be JSON or CSV.
    +    kafka_certificate_name = "Flink",
    +    kafka_properties_delimiter = ",",
    +    kafka_properties = "sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xxx\" password=\"xxx\";,sasl.mechanism=PLAIN,security.protocol=SASL_SSL"
    +  );
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0255.html b/docs/dli/sqlreference/dli_08_0255.html new file mode 100644 index 00000000..1c4bec7b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0255.html @@ -0,0 +1,163 @@ + + +

MRS HBase Sink Stream

+

Function

DLI exports the output data of the Flink job to HBase of MRS.

+
+

Prerequisites

  • An MRS cluster has been created by using your account. DLI can interconnect with HBase clusters with Kerberos enabled.
  • In this scenario, jobs must run on the dedicated queue of DLI. Ensure that the dedicated queue of DLI has been created.
  • Ensure that a datasource connection has been set up between the DLI dedicated queue and the MRS cluster, and security group rules have been configured based on the site requirements.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
  • If you use MRS HBase, ensure that you have added IP addresses of all hosts in the MRS cluster for the enhanced datasource connection.

    For details about how to add an IP-domain mapping, see Datasource Connections >Enhanced Datasource Connections > Modifying the Host Information in the Data Lake Insight User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "mrs_hbase",
+    region = "",
+    cluster_address = "",
+    table_name = "",
+    table_columns = "",
+    illegal_data_table = "",
+    batch_insert_data_num = "",
+    action = ""
+)
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. mrs_hbase indicates that data is exported to HBase of MRS.

+

region

+

Yes

+

Region where MRS resides.

+

cluster_address

+

Yes

+

ZooKeeper address of the cluster to which the data table to be inserted belongs. The format is ip1,ip2:port.

+

table_name

+

Yes

+

Name of the table where data is to be inserted.

+

It can be specified through parameter configurations. For example, if you want one or more certain columns as part of the table name, use car_pass_inspect_with_age_${car_age}, where car_age is the column name.

+

table_columns

+

Yes

+

Columns to be inserted. The format is rowKey, f1:c1, f1:c2, f2:c1, where rowKey must be specified. If you do not want to add a column (for example, the third column) to the database, set this parameter to rowKey,f1:c1,,f2:c1.

+

illegal_data_table

+

No

+

If this parameter is specified, abnormal data (for example, rowKey does not exist) will be written into the table. If not specified, abnormal data will be discarded. The rowKey value is taskNo_Timestamp followed by six random digits, and the schema is info:data, info:reason.

+

batch_insert_data_num

+

No

+

Number of data records to be written in batches at a time. The value must be a positive integer. The upper limit is 1000. The default value is 10.

+

action

+

No

+

Whether data is added or deleted. Available options include add and delete. The default value is add.

+

krb_auth

+

No

+

Authentication name for creating a datasource connection authentication. This parameter is mandatory when Kerberos authentication is enabled. Set this parameter to the corresponding cross-source authentication name.

+
NOTE:

Ensure that the /etc/hosts information of the master node in the MRS cluster is added to the host file of the DLI queue.

+
+
+
+
+

Precautions

None

+
+

Example

Output data to HBase of MRS.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
CREATE SINK STREAM qualified_cars (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT
+)
+  WITH (
+    type = "mrs_hbase",
+    region = "xxx",
+    cluster_address = "192.16.0.88,192.87.3.88:2181",
+    table_name = "car_pass_inspect_with_age_${car_age}",
+    table_columns = "rowKey,info:owner,,car:speed,car:miles",
+    illegal_data_table = "illegal_data",
+    batch_insert_data_num = "20",
+    action = "add",
+    krb_auth = "KRB_AUTH_NAME"
+  );
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0257.html b/docs/dli/sqlreference/dli_08_0257.html new file mode 100644 index 00000000..b5af03d5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0257.html @@ -0,0 +1,133 @@ + + +

Open-Source Kafka Sink Stream

+

Function

DLI exports the output data of the Flink job to Kafka.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

  • If the Kafka server listens on the port using hostname, you need to add the mapping between the hostname and IP address of the Kafka Broker node to the DLI queue. Contact the Kafka service deployment personnel to obtain the hostname and IP address of the Kafka Broker node. For details about how to add an IP-domain mapping, see Enhanced Datasource Connections > Modifying the Host Information in the Data Lake Insight User Guide.
  • Kafka is an offline cluster. You need to use the enhanced datasource connection function to connect Flink jobs to Kafka. You can also set security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH(
+    type = "kafka",
+    kafka_bootstrap_servers = "",
+    kafka_topic = "",
+    encode = "json"
+  )
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. kafka indicates that data is exported to Kafka.

+

kafka_bootstrap_servers

+

Yes

+

Port that connects DLI to Kafka. Use enhanced datasource connections to connect DLI queues with Kafka clusters.

+

kafka_topic

+

Yes

+

Kafka topic into which DLI writes data.

+

encode

+

Yes

+

Data encoding format. The value can be csv, json, or user_defined.

+
  • field_delimiter must be specified if this parameter is set to csv.
  • encode_class_name and encode_class_parameter must be specified if this parameter is set to user_defined.
+

filed_delimiter

+

No

+

If encode is set to csv, you can use this parameter to specify the separator between CSV fields. By default, the comma (,) is used.

+

encode_class_name

+

No

+

If encode is set to user_defined, you need to set this parameter to the name of the user-defined decoding class (including the complete package path). The class must inherit the DeserializationSchema class.

+

encode_class_parameter

+

No

+

If encode is set to user_defined, you can set this parameter to specify the input parameter of the user-defined decoding class. Only one parameter of the string type is supported.

+

kafka_properties

+

No

+

This parameter is used to configure the native attributes of Kafka. The format is key1=value1;key2=value2.

+

kafka_certificate_name

+

No

+

Name of the datasource authentication information. This parameter is valid only when the datasource authentication type is set to Kafka_SSL.

+
NOTE:
  • If this parameter is specified, the service loads only the specified file and password under the authentication. The system automatically sets this parameter to kafka_properties.
  • Other configuration information required for Kafka SSL authentication needs to be manually configured in the kafka_properties attribute.
+
+
+
+
+

Precautions

None

+
+

Example

Output the data in the kafka_sink stream to Kafka.

+
1
+2
+3
+4
+5
+6
+7
CREATE SINK STREAM kafka_sink (name STRING) 
+  WITH (
+    type="kafka",
+    kafka_bootstrap_servers =  "ip1:port1,ip2:port2",
+    kafka_topic = "testsink",
+    encode = "json" 
+  );
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0258.html b/docs/dli/sqlreference/dli_08_0258.html new file mode 100644 index 00000000..0b1ac951 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0258.html @@ -0,0 +1,22 @@ + + +

Creating a Temporary Stream

+

Function

The temporary stream is used to simplify SQL logic. If complex SQL logic is followed, write SQL statements concatenated with temporary streams. The temporary stream is just a logical concept and does not generate any data.

+
+

Syntax

1
CREATE TEMP STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+
+ +
+
+

Example

1
create temp stream a2(attr1 int, attr2 string);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0259.html b/docs/dli/sqlreference/dli_08_0259.html new file mode 100644 index 00000000..6622f951 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0259.html @@ -0,0 +1,18 @@ + + +

Creating a Dimension Table

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0260.html b/docs/dli/sqlreference/dli_08_0260.html new file mode 100644 index 00000000..162f887b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0260.html @@ -0,0 +1,111 @@ + + +

Creating a Redis Table

+

Create a Redis table to connect to the source stream.

+

For details about the JOIN syntax, see JOIN Between Stream Data and Table Data.

+

Syntax

1
+2
+3
+4
+5
+6
CREATE TABLE table_id (key_attr_name STRING(, hash_key_attr_name STRING)?, value_attr_name STRING)
+  WITH (
+    type = "dcs_redis",
+    cluster_address = ""(,password = "")?,
+    value_type= "",
+    key_column= ""(,hash_key_column="")?);
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. Value dcs_redis indicates that data is exported to DCS Redis.

+

cluster_address

+

Yes

+

Redis instance connection address.

+

password

+

No

+

Redis instance connection password. This parameter is not required if password-free access is used.

+

value_type

+

Yes

+

Indicates the field data type. Supported data types include string, list, hash, set, and zset.

+

key_column

+

Yes

+

Indicates the column name of the Redis key attribute.

+

hash_key_column

+

No

+

If value_type is set to hash, this field must be specified as the column name of the level-2 key attribute.

+

cache_max_num

+

No

+

Indicates the maximum number of cached query results. The default value is 32768.

+

cache_time

+

No

+

Indicates the maximum duration for caching database query results in the memory. The unit is millisecond. The default value is 10000. The value 0 indicates that caching is disabled.

+
+
+
+

Precautions

  • Redis clusters are not supported.
  • Ensure that You have created a Redis cache instance on DCS using your account.
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with DCS instance. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Example

The Redis table is used to connect to the source stream.
CREATE TABLE table_a (attr1 string, attr2 string, attr3 string)
+  WITH (
+    type = "dcs_redis",
+    value_type = "hash",
+    key_column = "attr1",
+    hash_key_column = "attr2",
+    cluster_address = "192.168.1.238:6379",
+    password = "xxxxxxxx"
+ );
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0261.html b/docs/dli/sqlreference/dli_08_0261.html new file mode 100644 index 00000000..0244e656 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0261.html @@ -0,0 +1,218 @@ + + +

Creating an RDS Table

+

Create an RDS/DWS table to connect to the source stream.

+

For details about the JOIN syntax, see JOIN.

+

Prerequisites

  • Ensure that you have created a PostgreSQL or MySQL RDS instance in RDS.

    For details about how to create an RDS instance, see Creating an Instance in the Relational Database Service User Guide.

    +
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with RDS instance. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
CREATE TABLE  table_id (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "rds",
+    username = "",
+    password = "",
+    db_url = "",
+    table_name = ""
+  );
+
+ +
+

+
+

Keyword

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output channel type. Value rds indicates that data is stored to RDS.

+

username

+

Yes

+

Username for connecting to a database.

+

password

+

Yes

+

Password for connecting to a database.

+

db_url

+

Yes

+

Database connection address, for example, {database_type}://ip:port/database.

+

Currently, two types of database connections are supported: MySQL and PostgreSQL.

+
  • MySQL: 'mysql://ip:port/database'
  • PostgreSQL: 'postgresql://ip:port/database'
    NOTE:

    To create a DWS dimension table, set the database connection address to a DWS database address. If the DWS database version is later than 8.1.0, the open-source PostgreSQL driver cannot be used for connection. You need to use the GaussDB driver for connection.

    +
    +
+

table_name

+

Yes

+

Indicates the name of the database table for data query.

+

db_columns

+

No

+

Indicates the mapping of stream attribute fields between the sink stream and database table. This parameter is mandatory when the stream attribute fields in the sink stream do not match those in the database table. The parameter value is in the format of dbtable_attr1,dbtable_attr2,dbtable_attr3.

+

cache_max_num

+

No

+

Indicates the maximum number of cached query results. The default value is 32768.

+

cache_time

+

No

+

Indicates the maximum duration for caching database query results in the memory. The unit is millisecond. The default value is 10000. The value 0 indicates that caching is disabled.

+
+
+
+

Example

The RDS table is used to connect to the source stream.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
CREATE SOURCE STREAM car_infos (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "dis",
+    region = "",
+    channel = "dliinput",
+    encode = "csv",
+    field_delimiter = ","
+  );
+
+CREATE TABLE  db_info (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "rds",
+    username = "root",
+    password = "******",
+    db_url = "postgresql://192.168.0.0:2000/test1",
+    table_name = "car"
+);
+
+CREATE SINK STREAM audi_cheaper_than_30w (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+    type = "dis",
+    region = "",
+    channel = "dlioutput",
+    partition_key = "car_owner",
+    encode = "csv",
+    field_delimiter = ","
+  );
+
+INSERT INTO audi_cheaper_than_30w
+SELECT a.car_id, b.car_owner, b.car_brand, b.car_price 
+FROM car_infos as a join db_info as b on a.car_id = b.car_id;
+
+ +
+
+

To create a DWS dimension table, set the database connection address to a DWS database address. If the DWS database version is later than 8.1.0, the open-source PostgreSQL driver cannot be used for connection. You need to use the GaussDB driver for connection.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0262.html b/docs/dli/sqlreference/dli_08_0262.html new file mode 100644 index 00000000..79cf5ef4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0262.html @@ -0,0 +1,18 @@ + + +

Modifying a Table

+

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0263.html b/docs/dli/sqlreference/dli_08_0263.html new file mode 100644 index 00000000..2c82c1ad --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0263.html @@ -0,0 +1,62 @@ + + +

Adding a Column

+

Function

This statement is used to add one or more new columns to a table.

+
+

Syntax

1
ALTER TABLE [db_name.]table_name ADD COLUMNS (col_name1 col_type1 [COMMENT col_comment1], ...);
+
+ +
+
+

Keyword

  • ADD COLUMNS: columns to add
  • COMMENT: column description
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_).

+

table_name

+

Table name

+

col_name

+

Column name

+

col_type

+

Field type

+

col_comment

+

Column description

+
+
+
+

Precautions

Do not run this SQL statement concurrently. Otherwise, columns may be overwritten.

+
+

Example

1
ALTER TABLE t1 ADD COLUMNS (column2 int, column3 string);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0266.html b/docs/dli/sqlreference/dli_08_0266.html new file mode 100644 index 00000000..af441755 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0266.html @@ -0,0 +1,68 @@ + + +

Common Configuration Items of Batch SQL Jobs

+

This section describes the common configuration items of the SQL syntax for DLI batch jobs.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Common configuration items

Item

+

Default Value

+

Description

+

spark.sql.files.maxRecordsPerFile

+

0

+

Maximum number of records to be written into a single file. If the value is zero or negative, there is no limit.

+

spark.sql.autoBroadcastJoinThreshold

+

209715200

+

Maximum size of the table that displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display.

+
NOTE:

Currently, only the configuration unit metastore table that runs the ANALYZE TABLE COMPUTE statistics noscan command and the file-based data source table that directly calculates statistics based on data files are supported.

+
+

spark.sql.shuffle.partitions

+

200

+

Default number of partitions used to filter data for join or aggregation.

+

spark.sql.dynamicPartitionOverwrite.enabled

+

false

+

Whether DLI overwrites the partitions where data will be written into during runtime. If you set this parameter to false, all partitions that meet the specified condition will be deleted before data overwrite starts. For example, if you set false and use INSERT OVERWRITE to write partition 2021-02 to a partitioned table that has the 2021-01 partition, this partition will be deleted.

+

If you set this parameter to true, DLI does not delete partitions before overwrite starts.

+

spark.sql.files.maxPartitionBytes

+

134217728

+

Maximum number of bytes to be packed into a single partition when a file is read.

+

spark.sql.badRecordsPath

+

-

+

Path of bad records.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0267.html b/docs/dli/sqlreference/dli_08_0267.html new file mode 100644 index 00000000..1a06d0ca --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0267.html @@ -0,0 +1,195 @@ + + +

File System Sink Stream (Recommended)

+

Function

You can create a sink stream to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly according to the generated directory. The table can be processed through DLI SQL, and the output data directory can be stored in partitioned tables. It is applicable to scenarios such as data dumping, big data analysis, data backup, and active, deep, or cold archiving.

+

OBS is an object-based storage service. It provides massive, secure, highly reliable, and low-cost data storage capabilities.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  [PARTITIONED BY (attr_name (',' attr_name)*]
+  WITH (
+    type = "filesystem",
+    file.path = "obs://bucket/xx",
+    encode = "parquet",
+    ak = "",
+    sk = ""
+  );
+
+ +
+
+

Keywords

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Output stream type. If type is set to filesystem, data is exported to the file system.

+

file.path

+

Yes

+

Output directory in the form: schema://file.path.

+

Currently, Schema supports only OBS and HDFS.

+
  • If schema is set to obs, data is stored to OBS.
  • If schema is set to hdfs, data is exported to HDFS. A proxy user needs to be configured for HDFS. For details, see HDFS Proxy User Configuration.

    Example: hdfs://node-master1sYAx:9820/user/car_infos, where node-master1sYAx:9820 is the name of the node where the NameNode is located.

    +
+

encode

+

Yes

+

Output data encoding format. Currently, only the parquet and csv formats are supported.

+
  • When schema is set to obs, the encoding format of the output data can only be parquet.
  • When schema is set to hdfs, the output data can be encoded in Parquet or CSV format.
+

ak

+

No

+

Access key. This parameter is mandatory when data is exported to OBS. Global variables can be used to mask the access key used for OBS authentication.

+

sk

+

No

+

Secret access key. This parameter is mandatory when data is exported to OBS. Secret key for accessing OBS authentication. Global variables can be used to mask sensitive information.

+

krb_auth

+

No

+

Authentication name for creating a datasource connection authentication. This parameter is mandatory when Kerberos authentication is enabled. If Kerberos authentication is not enabled for the created MRS cluster, ensure that the /etc/hosts information of the master node in the MRS cluster is added to the host file of the DLI queue.

+

field_delimiter

+

No

+

Separator used to separate every two attributes.

+

This parameter needs to be configured if the CSV encoding format is adopted. It can be user-defined, for example, a comma (,).

+
+
+
+

Precautions

  • To ensure job consistency, enable checkpointing if the Flink job uses the file system output stream.
  • To avoid data loss or data coverage, you need to enable automatic or manual restart upon job exceptions. Enable the Restore Job from Checkpoint.
  • Set the checkpoint interval after weighing between real-time output file, file size, and recovery time, such as 10 minutes.
  • Two modes are supported.
    • At least once: Events are processed at least once.
    • Exactly once: Events are processed only once.
    +
  • When you use sink streams of a file system to write data into OBS, do not use multiple jobs for the same directory.
    • The default behavior of an OBS bucket is overwriting, which may cause data loss.
    • The default behavior of the OBS parallel file system bucket is appending, which may cause data confusion.
    +

    You should carefully select the OBS bucket because of the preceding behavior differences. Data exceptions may occur after abnormal job restart.

    +
+
+

HDFS Proxy User Configuration

  1. Log in to the MRS management page.
  2. Select the HDFS NameNode configuration of MRS and add configuration parameters in the Customization area.

    In the preceding information, myname in the core-site values hadoop.proxyuser.myname.hosts and hadoop.proxyuser.myname.groups is the name of the krb authentication user.

    +

    Ensure that the permission on the HDFS data write path is 777.

    +
    +
  3. After the configuration is complete, click Save.
+
+

Example

  • Example 1:

    The following example dumps the car_info data to OBS, with the buyday field as the partition field and parquet as the encoding format.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    create sink stream car_infos (
    +  carId string,
    +  carOwner string,
    +  average_speed double,
    +  buyday string
    +  ) partitioned by (buyday)
    +  with (
    +    type = "filesystem",
    +    file.path = "obs://obs-sink/car_infos",
    +    encode = "parquet",
    +    ak = "{{myAk}}",
    +    sk = "{{mySk}}"
    +);
    +
    + +
    +

    The data is ultimately stored in OBS. Directory: obs://obs-sink/car_infos/buyday=xx/part-x-x.

    +

    After the data is generated, the OBS partitioned table can be established for subsequent batch processing through the following SQL statements:

    +
    1. Create an OBS partitioned table.
      1
      +2
      +3
      +4
      +5
      +6
      +7
      +8
      create table car_infos (
      +  carId string,
      +  carOwner string,
      +  average_speed double
      +)
      +  partitioned by (buyday string)
      +  stored as parquet
      +  location 'obs://obs-sink/car_infos';
      +
      + +
      +
    2. Restore partition information from the associated OBS path.
      1
      alter table car_infos recover partitions;
      +
      + +
      +
    +
  • Example 2:

    The following example dumps the car_info data to HDFS, with the buyday field as the partition field and csv as the encoding format.

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    create sink stream car_infos (
    +  carId string,
    +  carOwner string,
    +  average_speed double,
    +  buyday string
    +  ) partitioned by (buyday)
    +  with (
    +    type = "filesystem",
    +    file.path = "hdfs://node-master1sYAx:9820/user/car_infos",
    +    encode = "csv",
    +    field_delimiter = ","
    +);
    +
    + +
    +

    The data is ultimately stored in HDFS. Directory: /user/car_infos/buyday=xx/part-x-x.

    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0270.html b/docs/dli/sqlreference/dli_08_0270.html new file mode 100644 index 00000000..9595940f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0270.html @@ -0,0 +1,12 @@ + + +

DMS Source Stream

+

DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully managed queues for sending, receiving, and storing messages. DMS for Kafka is a message queuing service based on Apache Kafka. This service provides Kafka premium instances.

+

The source stream can read data from a Kafka instance as the input data of jobs. The syntax for creating a Kafka source stream is the same as that for creating an open source Apache Kafka source stream. For details, see Open-Source Kafka Source Stream.

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0271.html b/docs/dli/sqlreference/dli_08_0271.html new file mode 100644 index 00000000..98d87e46 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0271.html @@ -0,0 +1,12 @@ + + +

DMS Sink Stream

+

DMS (Distributed Message Service) is a message middleware service based on distributed, high-availability clustering technology. It provides reliable, scalable, fully managed queues for sending, receiving, and storing messages. DMS for Kafka is a message queuing service based on Apache Kafka. This service provides Kafka premium instances.

+

DLI can write the job output data into the Kafka instance. The syntax for creating a Kafka sink stream is the same as that for creating an open source Apache Kafka sink stream. For details, see MRS Kafka Sink Stream.

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0272.html b/docs/dli/sqlreference/dli_08_0272.html new file mode 100644 index 00000000..035546fe --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0272.html @@ -0,0 +1,21 @@ + + + +

Custom Stream Ecosystem

+ +

+
+ +
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0273.html b/docs/dli/sqlreference/dli_08_0273.html new file mode 100644 index 00000000..ba9d7fbe --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0273.html @@ -0,0 +1,101 @@ + + +

Custom Source Stream

+

Compile code to obtain data from the desired cloud ecosystem or open-source ecosystem as the input data of Flink jobs.

+

Syntax

1
+2
+3
+4
+5
+6
+7
CREATE SOURCE STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "user_defined",
+    type_class_name = "",
+    type_class_parameter = ""
+  )
+  (TIMESTAMP BY timeindicator (',' timeindicator)?);timeindicator:PROCTIME '.' PROCTIME| ID '.' ROWTIME
+
+ +
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. The value user_defined indicates that the data source is a user-defined data source.

+

type_class_name

+

Yes

+

Name of the source class for obtaining source data. The value must contain the complete package path.

+

type_class_parameter

+

Yes

+

Input parameter of the user-defined source class. Only one parameter of the string type is supported.

+
+
+
+

Precautions

The user-defined source class needs to inherit the RichParallelSourceFunction class and specify the data type as Row. For example, define MySource class: public class MySource extends RichParallelSourceFunction<Row>{}. It aims to implement the open, run, and close functions.

+

Dependency pom:

+
<dependency>
+	<groupId>org.apache.flink</groupId>
+	<artifactId>flink-streaming-java_2.11</artifactId>
+	<version>${flink.version}</version>
+	<scope>provided</scope>
+</dependency>
+<dependency>
+	<groupId>org.apache.flink</groupId>
+	<artifactId>flink-core</artifactId>
+	<version>${flink.version}</version>
+	<scope>provided</scope>
+</dependency>
+
+

Example

A data record is generated in each period. The data record contains only one field of the INT type. The initial value is 1 and the period is 60 seconds. The period is specified by an input parameter.

+
+
1
+2
+3
+4
+5
+6
+7
+8
+9
CREATE SOURCE STREAM user_in_data (
+	count INT
+     )
+  WITH (  
+	type = "user_defined", 
+	type_class_name = "mySourceSink.MySource", 
+	type_class_parameter = "60"
+      )
+      TIMESTAMP BY car_timestamp.rowtime;		
+
+ +
+

To customize the implementation of the source class, you need to pack the class in a JAR package and upload the UDF function on the SQL editing page.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0274.html b/docs/dli/sqlreference/dli_08_0274.html new file mode 100644 index 00000000..4bb9610b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0274.html @@ -0,0 +1,89 @@ + + +

Custom Sink Stream

+

Compile code to write the data processed by DLI to a specified cloud ecosystem or open-source ecosystem.

+

Syntax

CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "user_defined",
+    type_class_name = "",
+    type_class_parameter = ""
+  );
+
+

Keyword

+
+ + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Data source type. The value user_defined indicates that the data source is a user-defined data source.

+

type_class_name

+

Yes

+

Name of the sink class for obtaining source data. The value must contain the complete package path.

+

type_class_parameter

+

Yes

+

Input parameter of the user-defined sink class. Only one parameter of the string type is supported.

+
+
+
+

Precautions

The user-defined sink class needs to inherit the RichSinkFunction class and specify the data type as Row. For example, define MySink class: public class MySink extends RichSinkFunction<Row>{}. It aims to implement the open, invoke, and close functions.

+

Dependency pom:

+
<dependency>
+	<groupId>org.apache.flink</groupId>
+	<artifactId>flink-streaming-java_2.11</artifactId>
+	<version>${flink.version}</version>
+	<scope>provided</scope>
+</dependency>
+<dependency>
+	<groupId>org.apache.flink</groupId>
+	<artifactId>flink-core</artifactId>
+	<version>${flink.version}</version>
+	<scope>provided</scope>
+</dependency>
+
+

Example

Writing data encoded in CSV format to a DIS stream is used as an example.

+
+
1
+2
+3
+4
+5
+6
+7
+8
CREATE SINK STREAM user_out_data (
+	count INT
+)
+  WITH (  
+	type = "user_defined", 
+	type_class_name = "mySourceSink.MySink", 
+	type_class_parameter = ""
+      );
+
+ +
+

To customize the implementation of the sink class, you need to pack the class in a JAR package and upload the UDF function on the SQL editing page.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0275.html b/docs/dli/sqlreference/dli_08_0275.html new file mode 100644 index 00000000..c8eaaaa9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0275.html @@ -0,0 +1,132 @@ + + +

SQL Syntax Overview of Stream Jobs

+

This section describes the Flink SQL syntax list provided by DLI. For details about the parameters and examples, see the syntax description.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 SQL Syntax of stream jobs

Classification

+

Function

+

Creating a Source Stream

+

CloudTable HBase Source Stream

+

Creating a Source Stream

+

DIS Source Stream

+

DMS Source Stream

+

Creating a Source Stream

+

MRS Kafka Source Stream

+

Open-Source Kafka Source Stream

+

OBS Source Stream

+

Creating a Sink Stream

+

CloudTable HBase Sink Stream

+

Creating a Sink Stream

+

CloudTable OpenTSDB Sink Stream

+

Creating a Sink Stream

+

+

+

+

+

+

+

CSS Elasticsearch Sink Stream

+

DCS Sink Stream

+

DDS Sink Stream

+

DIS Sink Stream

+

DMS Sink Stream

+

DWS Sink Stream (JDBC Mode)

+

DWS Sink Stream (OBS-based Dumping)

+

Creating a Sink Stream

+

MRS HBase Sink Stream

+

MRS Kafka Sink Stream

+

Open-Source Kafka Sink Stream

+

OBS Sink Stream

+

RDS Sink Stream

+

Creating a Sink Stream

+

SMN Sink Stream

+

File System Sink Stream (Recommended)

+

Creating a Temporary Stream

+

Creating a Temporary Stream

+

Creating a Dimension Table

+

+

Creating a Redis Table

+

Creating an RDS Table

+

Custom Stream Ecosystem

+

+

Custom Source Stream

+

Custom Sink Stream

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0281.html b/docs/dli/sqlreference/dli_08_0281.html new file mode 100644 index 00000000..8428e83b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0281.html @@ -0,0 +1,27 @@ + + +

Displaying Function Details

+

Function

Displays information about a specified function.

+
+

Syntax

1
DESCRIBE FUNCTION [EXTENDED] [db_name.] function_name;
+
+ +
+
+

Keywords

EXTENDED: displays extended usage information.

+
+

Precautions

The metadata (implementation class and usage) of an existing function is returned. If the function does not exist, the system reports an error.

+
+

Example

Displays information about the mergeBill function.

+
1
DESCRIBE FUNCTION mergeBill;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0282.html b/docs/dli/sqlreference/dli_08_0282.html new file mode 100644 index 00000000..c0f469e0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0282.html @@ -0,0 +1,25 @@ + + + +

User-Defined Functions

+ +

+
+ +
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0283.html b/docs/dli/sqlreference/dli_08_0283.html new file mode 100644 index 00000000..92f8b2e7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0283.html @@ -0,0 +1,37 @@ + + +

Creating a Function

+

Function

DLI allows you to create and use user-defined functions (UDF) and user-defined table functions (UDTF) in Spark jobs.

+
+

Syntax

1
+2
+3
+4
+5
CREATE [TEMPORARY] FUNCTION [db_name.]function_name AS class_name
+  [USING resource,...]
+
+resource: 
+  : (JAR|FILE|ARCHIVE)file_uri
+
+ +
+
+

Precautions

  • If a function with the same name exists in the database, the system reports an error.
  • Only the Hive syntax can be used to create functions.
  • If you specify the same class name for two UDFs, the functions conflict though the package names are different. Avoid this problem because it causes failure of job execution.
+
+

Keywords

  • TEMPORARY: The created function is available only in the current session and is not persisted to the underlying metabase, if any. The database name cannot be specified for a temporary function.
  • USING <resources>: resources to be loaded. It can be a list of JARs, files, or URIs.
+
+

Example

Create the mergeBill function.

+
1
+2
CREATE FUNCTION mergeBill AS 'com.xxx.hiveudf.MergeBill'
+  using jar 'obs://onlyci-7/udf/MergeBill.jar';
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0284.html b/docs/dli/sqlreference/dli_08_0284.html new file mode 100644 index 00000000..07f32b28 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0284.html @@ -0,0 +1,27 @@ + + +

Deleting a Function

+

Function

This statement is used to delete functions.

+
+

Syntax

1
DROP [TEMPORARY] FUNCTION [IF EXISTS] [db_name.] function_name;
+
+ +
+
+

Keywords

  • TEMPORARY: Indicates whether the function to be deleted is a temporary function.
  • IF EXISTS: Used when the function to be deleted does not exist to avoid system errors.
+
+

Precautions

  • An existing function is deleted. If the function to be deleted does not exist, the system reports an error.
  • Only the HIVE syntax is supported.
+
+

Example

The mergeBill function is deleted.

+
1
DROP FUNCTION mergeBill;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0285.html b/docs/dli/sqlreference/dli_08_0285.html new file mode 100644 index 00000000..a7b45259 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0285.html @@ -0,0 +1,57 @@ + + +

Displaying All Functions

+

Function

View all functions in the current project.

+
+

Syntax

1
SHOW [USER|SYSTEM|ALL] FUNCTIONS ([LIKE] regex | [db_name.] function_name);
+
+ +
+

+

In the preceding statement, regex is a regular expression. For details about its parameters, see Table 1.

+ +
+ + + + + + + + + + + + + +
Table 1 Parameter examples

Expression

+

Description

+

'xpath*'

+

Matches all functions whose names start with xpath.

+

Example: SHOW FUNCTIONS LIKE'xpath* ;

+

Matches functions whose names start with xpath, including xpath, xpath_int, and xpath_string.

+

'x[a-z]+'

+

Matches functions whose names start with x and is followed by one or more characters from a to z. For example, xpath and xtest can be matched.

+

'x.*h'

+

Matches functions whose names start with x, end with h, and contain one or more characters in the middle. For example, xpath and xtesth can be matched.

+
+
+

For details about other expressions, see the official website.

+
+

Keywords

LIKE: This qualifier is used only for compatibility and has no actual effect.

+
+

Precautions

The function that matches the given regular expression or function name are displayed. If no regular expression or name is provided, all functions are displayed. If USER or SYSTEM is specified, user-defined Spark SQL functions and system-defined Spark SQL functions are displayed, respectively.

+
+

Example

This statement is used to view all functions.

+
1
SHOW FUNCTIONS;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0286.html b/docs/dli/sqlreference/dli_08_0286.html new file mode 100644 index 00000000..662c75ac --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0286.html @@ -0,0 +1,148 @@ + + +

MRS OpenTSDB Sink Stream

+

Function

DLI exports the output data of the Flink job to OpenTSDB of MRS.

+
+

Prerequisites

  • OpenTSDB has been installed in the MRS cluster.
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with MRS clusters. You can also set the security group rules as required.

    For details about how to create an enhanced datasource connection, see Enhanced Datasource Connections in the Data Lake Insight User Guide.

    +

    For details about how to configure security group rules, see Security Group in the Virtual Private Cloud User Guide.

    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
CREATE SINK STREAM stream_id (attr_name attr_type (',' attr_name attr_type)* )
+  WITH (
+    type = "opentsdb",
+    region = "",
+    tsdb_metrics = "",
+    tsdb_timestamps = "",
+    tsdb_values = "",
+    tsdb_tags = "",
+    batch_insert_data_num = ""
+  )
+
+ +
+
+

Keywords

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Keyword description

Parameter

+

Mandatory

+

Description

+

type

+

Yes

+

Sink channel type. opentsdb indicates that data is exported to OpenTSDB of MRS.

+

region

+

Yes

+

Region where MRS resides.

+

tsdb_link_address

+

Yes

+

Service address of the OpenTSDB instance in MRS. The format is http://ip:port or https://ip:port.

+
NOTE:

If tsd.https.enabled is set to true, HTTPS must be used. Note that HTTPS does not support certificate authentication.

+
+

tsdb_metrics

+

Yes

+

Metric of a data point, which can be specified through parameter configurations.

+

tsdb_timestamps

+

Yes

+

Timestamp of a data point. The data type can be LONG, INT, SHORT, or STRING. Only dynamic columns are supported.

+

tsdb_values

+

Yes

+

Value of a data point. The data type can be SHORT, INT, LONG, FLOAT, DOUBLE, or STRING. Dynamic columns or constant values are supported.

+

tsdb_tags

+

Yes

+

Tags of a data point. Each of tags contains at least one tag value and up to eight tag values. Tags of the data point can be specified through parameter configurations.

+

batch_insert_data_num

+

No

+

Number of data records to be written in batches at a time. The value must be a positive integer. The upper limit is 65536. The default value is 8.

+
+
+
+

Precautions

If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.

+
+

Example

Output data of stream weather_out to OpenTSDB of MRS.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
CREATE SINK STREAM weather_out (
+  timestamp_value LONG, /* Time */
+  temperature FLOAT, /* Temperature value */
+  humidity FLOAT, /* Humidity */
+  location STRING /* Location */
+)
+  WITH (
+    type = "opentsdb",
+    region = "xxx",
+    tsdb_link_address = "https://x.x.x.x:4242",
+    tsdb_metrics = "weather",
+    tsdb_timestamps = "${timestamp_value}",
+    tsdb_values = "${temperature}; ${humidity}",
+    tsdb_tags = "location:${location},signify:temperature; location:${location},signify:humidity",
+    batch_insert_data_num = "10"
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0289.html b/docs/dli/sqlreference/dli_08_0289.html new file mode 100644 index 00000000..e1973ed2 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0289.html @@ -0,0 +1,20 @@ + + +

Flink Opensource SQL 1.10 Syntax Reference

+

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0290.html b/docs/dli/sqlreference/dli_08_0290.html new file mode 100644 index 00000000..72908259 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0290.html @@ -0,0 +1,17 @@ + + +

Constraints and Definitions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0291.html b/docs/dli/sqlreference/dli_08_0291.html new file mode 100644 index 00000000..6c00f550 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0291.html @@ -0,0 +1,11 @@ + + +

Supported Data Types

+

STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP, ROW

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0292.html b/docs/dli/sqlreference/dli_08_0292.html new file mode 100644 index 00000000..15139873 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0292.html @@ -0,0 +1,17 @@ + + +

Syntax Definition

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0293.html b/docs/dli/sqlreference/dli_08_0293.html new file mode 100644 index 00000000..05d54931 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0293.html @@ -0,0 +1,19 @@ + + +

Data Definition Language (DDL)

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0294.html b/docs/dli/sqlreference/dli_08_0294.html new file mode 100644 index 00000000..c0edbb13 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0294.html @@ -0,0 +1,75 @@ + + +

CREATE TABLE

+

Syntax

CREATE TABLE table_name
+  (
+    { <column_definition> | <computed_column_definition> }[ , ...n]
+    [ <watermark_definition> ]
+    [ <table_constraint> ][ , ...n]
+  )
+  [COMMENT table_comment]
+  [PARTITIONED BY (partition_column_name1, partition_column_name2, ...)]
+  WITH (key1=val1, key2=val2, ...)
+
+<column_definition>:
+  column_name column_type [ <column_constraint> ] [COMMENT column_comment]
+
+<column_constraint>:
+  [CONSTRAINT constraint_name] PRIMARY KEY NOT ENFORCED
+
+<table_constraint>:
+  [CONSTRAINT constraint_name] PRIMARY KEY (column_name, ...) NOT ENFORCED
+
+<computed_column_definition>:
+  column_name AS computed_column_expression [COMMENT column_comment]
+
+<watermark_definition>:
+  WATERMARK FOR rowtime_column_name AS watermark_strategy_expression
+
+<source_table>:
+  [catalog_name.][db_name.]table_name
+
+

Function

This clause is used to create a table with a specified name.

+
+

Description

COMPUTED COLUMN

+

A computed column is a virtual column generated using column_name AS computed_column_expression. A computed column evaluates an expression that can reference other columns declared in the same table. The column itself is not physically stored within the table. A computed column could be defined using cost AS price * quantity. This expression can contain any combination of physical columns, constants, functions, or variables, but cannot contain any subquery.

+

In Flink, a computed column is used to define the time attribute in CREATE TABLE statements. A processing time attribute can be defined easily via proc AS PROCTIME() using the system's PROCTIME() function. The event time column may be obtained from an existing field. In this case, you can use the computed column to obtain event time. For example, if the original field is not of the TIMESTAMP(3) type or is nested in a JSON string, you can use computed columns.

+

Notes:

+
  • An expression that define a computed column in a source table is calculated after data is read from the data source. The column can be used in the SELECT statement.
  • A computed column cannot be the target of an INSERT statement. In an INSERT statement, the schema of the SELECT statement must be the same as that of the target table that does not have a computed column.
+
+

WATERMARK

+

The WATERMARK clause defines the event time attribute of a table and takes the form WATERMARK FOR rowtime_column_name AS watermark_strategy_expression.

+

rowtime_column_name defines an existing column that is marked as the event time attribute of the table. The column must be of the TIMESTAMP(3) type and must be the top-level column in the schema. It can also be a computed column.

+

watermark_strategy_expression defines the watermark generation strategy. It allows arbitrary non-query expression, including computed columns, to calculate the watermark. The expression return type must be TIMESTAMP(3), which represents the timestamp since the Epoch. The returned watermark will be emitted only if it is non-null and its value is larger than the previously emitted local watermark (to preserve the contract of ascending watermarks). The watermark generation expression is evaluated by the framework for every record. The framework will periodically emit the largest generated watermark. If the current watermark is still identical to the previous one, or is null, or the value of the returned watermark is smaller than that of the last emitted one, then no new watermark will be emitted. Watermark is emitted in an interval defined by pipeline.auto-watermark-interval configuration. If watermark interval is 0 ms, the generated watermarks will be emitted per-record if it is not null and greater than the last emitted one.

+

When using event time semantics, tables must contain an event time attribute and watermarking strategy.

+

Flink provides several commonly used watermark strategies.

+
  • Strictly ascending timestamps: WATERMARK FOR rowtime_column AS rowtime_column.

    Emits a watermark of the maximum observed timestamp so far. Rows that have a timestamp bigger to the max timestamp are not late.

    +
  • Ascending timestamps: WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '0.001' SECOND.

    Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a timestamp bigger or equal to the max timestamp are not late.

    +
  • Bounded out of orderness timestamps: WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL 'string' timeUnit.

    Emits watermarks, which are the maximum observed timestamp minus the specified delay, for example, WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '5' SECOND is a 5 seconds delayed watermark strategy.

    +
    CREATE TABLE Orders (
    +    user BIGINT,
    +    product STRING,
    +    order_time TIMESTAMP(3),
    +    WATERMARK FOR order_time AS order_time - INTERVAL '5' SECOND
    +) WITH ( . . . );
    +
+

PRIMARY KEY

+

Primary key constraint is a hint for Flink to leverage for optimizations. It tells that a column or a set of columns of a table or a view are unique and they do not contain null. Neither of columns in a primary can be nullable. The primary key therefore uniquely identifies a row in a table.

+

Primary key constraint can be either declared along with a column definition (a column constraint) or as a single line (a table constraint). For both cases, it should only be declared as a singleton. If you define multiple primary key constraints at the same time, an exception would be thrown.

+

Validity Check

+

SQL standard specifies that a constraint can either be ENFORCED or NOT ENFORCED. This controls if the constraint checks are performed on the incoming/outgoing data. Flink does not own the data therefore the only mode we want to support is the NOT ENFORCED mode. It is up to the user to ensure that the query enforces key integrity.

+

Flink will assume correctness of the primary key by assuming that the columns nullability is aligned with the columns in primary key. Connectors should ensure those are aligned.

+

Notes: In a CREATE TABLE statement, creating a primary key constraint will alter the columns nullability, that means, a column with primary key constraint is not nullable.

+

PARTITIONED BY

+

Partition the created table by the specified columns. A directory is created for each partition if this table is used as a filesystem sink.

+

WITH OPTIONS

+

Table properties used to create a table source/sink. The properties are usually used to find and create the underlying connector.

+

The key and value of expression key1=val1 should both be string literal.

+

Notes: The table registered with CREATE TABLE statement can be used as both table source and table sink. We cannot decide if it is used as a source or sink until it is referenced in the DMLs.

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0295.html b/docs/dli/sqlreference/dli_08_0295.html new file mode 100644 index 00000000..336cf90b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0295.html @@ -0,0 +1,22 @@ + + +

CREATE VIEW

+

Syntax

CREATE VIEW [IF NOT EXISTS] view_name
+  [{columnName [, columnName ]* }] [COMMENT view_comment]
+  AS query_expression
+
+

Function

Create a view with multiple layers nested in it to simplify the development process.

+
+

Description

IF NOT EXISTS

+

If the view already exists, nothing happens.

+
+

Example

Create a view named viewName.

+
create view viewName as select * from dataSource
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0296.html b/docs/dli/sqlreference/dli_08_0296.html new file mode 100644 index 00000000..6dc733bd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0296.html @@ -0,0 +1,24 @@ + + +

CREATE FUNCTION

+

Syntax

CREATE FUNCTION
+  [IF NOT EXISTS] function_name
+  AS identifier [LANGUAGE JAVA|SCALA]
+
+

Function

Create a user-defined function.

+
+

Description

IF NOT EXISTS

+

If the function already exists, nothing happens.

+

LANGUAGE JAVA|SCALA

+

Language tag is used to instruct Flink runtime how to execute the function. Currently only JAVA and SCALA are supported, the default language for a function is JAVA.

+
+

Example

Create a function named STRINGBACK.

+
create function STRINGBACK as 'com.dli.StringBack'
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0297.html b/docs/dli/sqlreference/dli_08_0297.html new file mode 100644 index 00000000..8300b27a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0297.html @@ -0,0 +1,138 @@ + + +

Data Manipulation Language (DML)

+

Statements

Syntax

+
+
INSERT INTO table_name [PARTITION part_spec] query
+
+part_spec:  (part_col_name1=val1 [, part_col_name2=val2, ...])
+
+query:
+  values
+  | {
+      select
+      | selectWithoutFrom
+      | query UNION [ ALL ] query
+      | query EXCEPT query
+      | query INTERSECT query
+    }
+    [ ORDER BY orderItem [, orderItem ]* ]
+    [ LIMIT { count | ALL } ]
+    [ OFFSET start { ROW | ROWS } ]
+    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY]
+
+orderItem:
+  expression [ ASC | DESC ]
+
+select:
+  SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+  [ WINDOW windowName AS windowSpec [, windowName AS windowSpec ]* ]
+
+selectWithoutFrom:
+  SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+
+projectItem:
+  expression [ [ AS ] columnAlias ]
+  | tableAlias . *
+
+tableExpression:
+  tableReference [, tableReference ]*
+  | tableExpression [ NATURAL ] [ LEFT | RIGHT | FULL ] JOIN tableExpression [ joinCondition ]
+
+joinCondition:
+  ON booleanExpression
+  | USING '(' column [, column ]* ')'
+
+tableReference:
+  tablePrimary
+  [ matchRecognize ]
+  [ [ AS ] alias [ '(' columnAlias [, columnAlias ]* ')' ] ]
+
+tablePrimary:
+  [ TABLE ] [ [ catalogName . ] schemaName . ] tableName
+  | LATERAL TABLE '(' functionName '(' expression [, expression ]* ')' ')'
+  | UNNEST '(' expression ')'
+
+values:
+  VALUES expression [, expression ]*
+
+groupItem:
+  expression
+  | '(' ')'
+  | '(' expression [, expression ]* ')'
+  | CUBE '(' expression [, expression ]* ')'
+  | ROLLUP '(' expression [, expression ]* ')'
+  | GROUPING SETS '(' groupItem [, groupItem ]* ')'
+
+windowRef:
+    windowName
+  | windowSpec
+
+windowSpec:
+    [ windowName ]
+    '('
+    [ ORDER BY orderItem [, orderItem ]* ]
+    [ PARTITION BY expression [, expression ]* ]
+    [
+        RANGE numericOrIntervalExpression {PRECEDING}
+      | ROWS numericExpression {PRECEDING}
+    ]
+    ')'
+
+matchRecognize:
+      MATCH_RECOGNIZE '('
+      [ PARTITION BY expression [, expression ]* ]
+      [ ORDER BY orderItem [, orderItem ]* ]
+      [ MEASURES measureColumn [, measureColumn ]* ]
+      [ ONE ROW PER MATCH ]
+      [ AFTER MATCH
+            ( SKIP TO NEXT ROW
+            | SKIP PAST LAST ROW
+            | SKIP TO FIRST variable
+            | SKIP TO LAST variable
+            | SKIP TO variable )
+      ]
+      PATTERN '(' pattern ')'
+      [ WITHIN intervalLiteral ]
+      DEFINE variable AS condition [, variable AS condition ]*
+      ')'
+
+measureColumn:
+      expression AS alias
+
+pattern:
+      patternTerm [ '|' patternTerm ]*
+
+patternTerm:
+      patternFactor [ patternFactor ]*
+
+patternFactor:
+      variable [ patternQuantifier ]
+
+patternQuantifier:
+      '*'
+  |   '*?'
+  |   '+'
+  |   '+?'
+  |   '?'
+  |   '??'
+  |   '{' { [ minRepeat ], [ maxRepeat ] } '}' ['?']
+  |   '{' repeat '}'
+

Precautions

+

Flink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:

+
  • The case of identifiers is preserved whether they are quoted.
  • Identifiers are matched case-sensitively.
  • Unlike Java, back-ticks allow identifiers to contain non-alphanumeric characters (for example SELECT a AS `my field` FROM t).
+

String literals must be enclosed in single quotes (for example, SELECT'Hello World'). Two single quotation marks are used for escaping (for example, SELECT'It''s me.'). Unicode characters are supported in string literals. If explicit Unicode points are required, use the following syntax:

+
  • Use the backslash (\) as escaping character (default): SELECT U&'\263A'
  • Use a custom escaping character: SELECT U&'#263A' UESCAPE '#'
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0298.html b/docs/dli/sqlreference/dli_08_0298.html new file mode 100644 index 00000000..4d700de6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0298.html @@ -0,0 +1,107 @@ + + +

Flink OpenSource SQL 1.10 Syntax

+

This section describes the Flink OpenSource SQL syntax supported by DLI. For details about the parameters and examples, see the syntax description.

+

Creating Tables

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Syntax for creating tables

Classification

+

Function

+

Creating a Source Table

+

+

+

+

+

+

+

Kafka Source Table

+

DIS Source Table

+

JDBC Source Table

+

GaussDB(DWS) Source Table

+

Redis Source Table

+

HBase Source Table

+

userDefined Source Table

+

Creating a Result Table

+

+

+

+

+

+

+

+

+

ClickHouse Result Table

+

Kafka Result Table

+

Upsert Kafka Result Table

+

DIS Result Table

+

JDBC Result Table

+

GaussDB(DWS) Result Table

+

Redis Result Table

+

SMN Result Table

+

HBase Result Table

+

Elasticsearch Result Table

+

User-defined Result Table

+

Creating a Dimension Table

+

+

+

JDBC Dimension Table

+

GaussDB(DWS) Dimension Table

+

HBase Dimension Table

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0299.html b/docs/dli/sqlreference/dli_08_0299.html new file mode 100644 index 00000000..1f341f87 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0299.html @@ -0,0 +1,19 @@ + + +

Data Definition Language (DDL)

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0300.html b/docs/dli/sqlreference/dli_08_0300.html new file mode 100644 index 00000000..9693018a --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0300.html @@ -0,0 +1,27 @@ + + +

Creating a Source Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0301.html b/docs/dli/sqlreference/dli_08_0301.html new file mode 100644 index 00000000..33775000 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0301.html @@ -0,0 +1,251 @@ + + +

Kafka Source Table

+

Function

Create a source stream to obtain data from Kafka as input data for jobs.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

Kafka is an offline cluster. You have built an enhanced datasource connection to connect Flink jobs to Kafka. You have set security group rules as required.

+
+

Precautions

SASL_SSL cannot be enabled for the interconnected Kafka cluster.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' WATERMARK FOR rowtime_column_name AS watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'kafka',
+  'connector.version' = '',
+  'connector.topic' = '',
+  'connector.properties.bootstrap.servers' = '',
+  'connector.properties.group.id' = '',
+  'connector.startup-mode' = '',
+  'format.type' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to kafka.

+

connector.version

+

Yes

+

Kafka version. The value can be '0.10' or '0.11', which corresponds to Kafka 2.11 to 2.4.0 and other historical versions, respectively.

+

format.type

+

Yes

+

Data deserialization format. The value can be csv, json, or avro.

+

format.field-delimiter

+

No

+

Attribute delimiter. You can customize the attribute delimiter only when the encoding format is CSV. The default delimiter is a comma (,).

+

connector.topic

+

Yes

+

Kafka topic name. Either this parameter or connector.topic-pattern is used.

+

connector.topic-pattern

+

No

+

Regular expression for matching the Kafka topic name. Either this parameter or connector.topic is used.

+

Example:

+

'topic.*'

+

'(topic-c|topic-d)'

+

'(topic-a|topic-b|topic-\\d*)'

+

'(topic-a|topic-b|topic-[0-9]*)'

+

connector.properties.bootstrap.servers

+

Yes

+

Kafka broker addresses. Use commas (,) to separated them.

+

connector.properties.group.id

+

No

+

Consumer group name

+

connector.startup-mode

+

No

+

Consumer startup mode. The value can be earliest-offset, latest-offset, group-offsets, specific-offsets or timestamp. The default value is group-offsets.

+

connector.specific-offsets

+

No

+

Consumption offset. This parameter is mandatory when startup-mode is specific-offsets. The value is in the 'partition:0,offset:42;partition:1,offset:300' format.

+

connector.startup-timestamp-millis

+

No

+

Consumption start timestamp. This parameter is mandatory when startup-mode is timestamp.

+

connector.properties.*

+

No

+

Native Kafka property

+
+
+
+

Example

  • Create table kafkaSource and read data encoded in CSV format from Kafka.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    create table kafkaSource(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_speed INT)
    +with (
    +  'connector.type' = 'kafka',
    +  'connector.version' = '0.11',
    +  'connector.topic' = 'test-topic',
    +  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
    +  'connector.properties.group.id' = 'test-group',
    +  'connector.startup-mode' = 'latest-offset',
    +  'format.type' = 'csv'
    +);
    +
    + +
    +
  • Create table kafkaSource and read data in non-nested JSON strings from Kafka.
    Assume that the non-nested JSON strings are as follows:
    {"car_id": 312, "car_owner": "wang", "car_brand": "tang"}
    +{"car_id": 313, "car_owner": "li", "car_brand": "lin"}
    +{"car_id": 314, "car_owner": "zhao", "car_brand": "han"}
    +
    +
    You can create the table as follows:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    create table kafkaSource(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING
    +)
    +with (
    +  'connector.type' = 'kafka',
    +  'connector.version' = '0.11',
    +  'connector.topic' = 'test-topic',
    +  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
    +  'connector.properties.group.id' = 'test-group',
    +  'connector.startup-mode' = 'latest-offset',
    +  'format.type' = 'json'
    +);
    +
    + +
    +
    +
  • Create table kafkaSource and read the nested JSON data from Kafka.

    Assume that the JSON data is as follows:

    +
    {
    +    "id":"1",
    +    "type":"online",
    +    "data":{
    +        "patient_id":1234,
    +        "name":"bob1234",
    +        "age":"Bob",
    +        "gmt_create":"Bob",
    +        "gmt_modify":"Bob"
    +    }
    +}
    +
    You can create the table as follows:
    CREATE table kafkaSource(
    +  id STRING,
    +  type STRING,
    +  data ROW(
    +    patient_id STRING, 
    +    name STRING, 
    +    age STRING, 
    +    gmt_create STRING, 
    +    gmt_modify STRING)
    +) 
    +with (
    +  'connector.type' = 'kafka',
    +  'connector.version' = '0.11',
    +  'connector.topic' = 'test-topic',
    +  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
    +  'connector.properties.group.id' = 'test-group',
    +  'connector.startup-mode' = 'latest-offset',
    +  'format.type' = 'json'
    +);
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0302.html b/docs/dli/sqlreference/dli_08_0302.html new file mode 100644 index 00000000..7acb4a5c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0302.html @@ -0,0 +1,180 @@ + + +

DIS Source Table

+

Function

Create a source stream to read data from DIS. DIS accesses user data and Flink job reads data from the DIS stream as input data for jobs. Flink jobs can quickly remove data from producers using DIS source sources for continuous processing. Flink jobs are applicable to scenarios where data outside the cloud service is imported to the cloud service for filtering, real-time analysis, monitoring reports, and dumping.

+

DIS addresses the challenge of transmitting data outside cloud services to cloud services. DIS builds data intake streams for custom applications capable of processing or analyzing streaming data. DIS continuously captures, transmits, and stores terabytes of data from hundreds of thousands of sources every hour, such as logs, Internet of Things (IoT) data, social media feeds, website clickstreams, and location-tracking events. For more information about DIS, see the Data Ingestion Service User Guide.

+
+

Syntax

create table disSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = '',
+  'format-type' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Data source type. Set this parameter to dis.

+

connector.region

+

Yes

+

Region where DIS for storing the data locates.

+

connector.ak

+

No

+

Access key ID. This parameter must be set in pair with sk.

+

connector.sk

+

No

+

Secret access key. This parameter must be set in pair with ak.

+

connector.channel

+

Yes

+

Name of the DIS stream where data is located.

+

connector.partition-count

+

No

+

Number of partitions where data will be read. Data in partition 0 to partition-count will be read.

+

+

Neither this parameter or partition-range can be configured.

+

If neither of the two parameters is set, all partition data will be read by default.

+

connector.partition-range

+

No

+

Range of partitions where data will be read. Neither this parameter or partition-count can be configured. If neither of the two parameters is set, all partition data will be read by default.

+

+

For example, if you set partition-range to [0:2], data in partitions 1, 2, and 3 will be read. The range must be within the DIS stream.

+

connector.offset

+

No

+

Start position from which data will be read. Either this parameter or start-time can be configured.

+

connector.start-time

+

No

+

Time from which DLI reads data

+

+

If this parameter is specified, DLI reads data read from the specified time. The format is yyyy-MM-dd HH:mm:ss.

+

If neither start-time nor offset is specified, the latest data is read.

+

connector. enable-checkpoint

+

No

+

Whether to enable the checkpoint function. The value can be true (enabled) or false (disabled). The default value is false.

+

+

Do not set this parameter when offset or start-time is set. If this parameter is set to true, checkpoint-app-name must be configured.

+

connector. checkpoint-app-name

+

No

+

ID of a DIS consumer. If a DIS stream is consumed by different jobs, you need to configure the consumer ID for each job to avoid checkpoint confusion.

+

+

Do not set this parameter when offset or start-time is set. If checkpoint-app-name is set to true, this parameter is mandatory.

+

connector. checkpoint-interval

+

No

+

Interval of checkpoint operations on the DIS source operator. The default value is 60s. Available value units: d, day/h, hour/min, minute/s, sec, second

+

+

Do not set this parameter when offset or start-time is configured.

+

format.type

+

Yes

+

Data coding format. The value can be csv or json.

+

format.field-delimiter

+

No

+

Attribute delimiter. You can customize the attribute delimiter only when the encoding format is CSV. The default delimiter is a comma (,).

+
+
+
+

Precautions

None

+
+

Example

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
create table disCsvSource (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT)
+with (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disInput',
+  'format.type' = 'csv'
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0303.html b/docs/dli/sqlreference/dli_08_0303.html new file mode 100644 index 00000000..2c5c65c3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0303.html @@ -0,0 +1,145 @@ + + +

JDBC Source Table

+

Function

The JDBC connector is a Flink's built-in connector to read data from a database.

+
+

Prerequisites

  • An enhanced datasource connection with the database has been established, so that you can configure security group rules as required.
+
+

Syntax

create table jbdcSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'jdbc',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Data source type. Set this parameter to jdbc.

+

connector.url

+

Yes

+

Database URL

+

connector.table

+

Yes

+

Name of the table where the data to be read from the database is located

+

connector.driver

+

No

+

Driver required for connecting to the database If you do not set this parameter, the automatically extracted URL will be used.

+

connector.username

+

No

+

Database authentication username. This parameter must be configured in pair with connector.password.

+

connector.password

+

No

+

Database authentication password. This parameter must be configured in pair with connector.username.

+

connector.read.partition.column

+

No

+

Name of the column used to partition the input

+

+

This parameter is mandatory if connector.read.partition.lower-bound, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.lower-bound

+

No

+

Lower bound of values to be fetched for the first partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.upper-bound

+

No

+

Upper bound of values to be fetched for the last partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.lower-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.num

+

No

+

Number of partitions to be created

+

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.upper-bound are configured.

+

connector.read.fetch-size

+

No

+

Number of rows fetched from the database each time The default value is 0, indicating the hint is ignored.

+
+
+
+

Precautions

None

+
+

Example

create table jdbcSource (
+  car_id STRING,
+  car_owner STRING,
+  car_age INT,
+  average_speed INT,
+  total_miles INT)
+with (
+  'connector.type' = 'jdbc',
+  'connector.url' = 'jdbc:mysql://xx.xx.xx.xx:3306/xx',
+  'connector.table' = 'jdbc_table_name',
+  'connector.driver' = 'com.mysql.jdbc.Driver',
+  'connector.username' = 'xxx',
+  'connector.password' = 'xxxxxx'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0304.html b/docs/dli/sqlreference/dli_08_0304.html new file mode 100644 index 00000000..4b9c0111 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0304.html @@ -0,0 +1,200 @@ + + +

GaussDB(DWS) Source Table

+

Function

DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types and delivers space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-commerce.

+

GaussDB(DWS) is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data.

+
+

Prerequisites

  • Ensure that you have created a GaussDB(DWS) cluster using your account.

    For details about how to create a GaussDB(DWS) cluster, see "Creating a Cluster" in Data Warehouse Service Management Guide.

    +
  • A GaussDB(DWS) database table has been created.
  • An enhanced datasource connection has been created for DLI to connect to GaussDB(DWS) clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'gaussdb',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to gaussdb.

+

connector.url

+

Yes

+

JDBC connection address. The format is jdbc:postgresql://${ip}:${port}/${dbName}. If the database version is later than 8.1.0, the value format is jdbc:gaussdb://${ip}:${port}/${dbName}.

+

connector.table

+

Yes

+

Name of the table to be operated. If the GaussDB(DWS) table is in a schema, the format is schema\".\"Table name. For details, see the Example.

+

connector.driver

+

No

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

connector.username

+

No

+

Database authentication user name. This parameter must be configured in pair with connector.password.

+

connector.password

+

No

+

Database authentication password. This parameter must be configured in pair with connector.username.

+

connector.read.partition.column

+

No

+

Name of the column used to partition the input

+

This parameter is mandatory if connector.read.partition.lower-bound, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.lower-bound

+

No

+

Lower bound of values to be fetched for the first partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.upper-bound

+

No

+

Upper bound of values to be fetched for the last partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.lower-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.num

+

No

+

Number of partitions to be created

+

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.upper-bound are configured.

+

connector.read.fetch-size

+

No

+

Number of rows fetched from the database each time The default value is 0, indicating the hint is ignored.

+
+
+
+

Example

  • If you use the gsjdbc4 driver for connection, set connector.driver to org.postgresql.Driver. You can omit this parameter because the gsjdbc4 driver is the default one.

    Create table dwsSource with data fetched from the car_info table that is not in a schema:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    create table dwsSource(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_speed INT
    +) with (
    +  'connector.type' = 'gaussdb',
    +  'connector.url' = 'jdbc:postgresql://xx.xx.xx.xx:8000/xx',
    +  'connector.table' = 'car_info',
    +  'connector.username' = 'xx',
    +  'connector.password' = 'xx'
    +);
    +
    + +
    +

    Create table dwsSource with data fetched from GaussDB(DWS) table test that is in a schema named test_schema:

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    create table dwsSource(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_speed INT
    +) with (
    +  'connector.type' = 'gaussdb',
    +  'connector.url' = 'jdbc:postgresql://xx.xx.xx.xx:8000/xx',
    +  'connector.table' = 'test_schema\".\"test',
    +  'connector.username' = 'xx',
    +  'connector.password' = 'xx'
    +);
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0305.html b/docs/dli/sqlreference/dli_08_0305.html new file mode 100644 index 00000000..e801132e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0305.html @@ -0,0 +1,123 @@ + + +

Redis Source Table

+

Function

Create a source stream to obtain data from Redis as input for jobs.

+
+

Prerequisites

An enhanced datasource connection with Redis has been established, so that you can configure security group rules as required.

+ +
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'redis',
+  'connector.host' = '',
+  'connector.port' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to redis.

+

connector.host

+

Yes

+

Redis connector address

+

connector.port

+

Yes

+

Redis connector port

+

connector.password

+

No

+

Redis authentication password

+

connector.deploy-mode

+

No

+

Redis deployment mode. The value can be standalone or cluster. The default value is standalone.

+

connector.table-name

+

No

+

Name of the table stored in the Redis. This parameter is mandatory in the Redis Hashmap storage pattern. In this pattern, data is stored to Redis in hashmaps. The hash key is ${table-name}:${ext-key}, and the field name is the column name.

+
NOTE:

Table storage pattern: connector.table-name and connector.key-column are used as Redis keys. For the Redis hash type, each key corresponds to a hashmap. A hash key is a field name of the source table, and a hash value is a field value of the source table.

+
+

connector.use-internal-schema

+

No

+

Whether to use the existing schema in the Redis. This parameter is optional in the Redis Hashmap storage pattern. The default value is false.

+

connector.key-column

+

No

+

This parameter is optional in table storage pattern. The value is used as the value of ext-key in the Redis. If this parameter is not set, the value of ext-key is the generated UUID.

+
+
+
+

Example

Reads data from Redis.

+
create table redisSource(
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_speed INT
+) with (
+ 'connector.type' = 'redis',
+  'connector.host' = 'xx.xx.xx.xx',
+  'connector.port' = '6379',
+  'connector.password' = 'xx',
+  'connector.table-name' = 'car_info'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0306.html b/docs/dli/sqlreference/dli_08_0306.html new file mode 100644 index 00000000..fb01d083 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0306.html @@ -0,0 +1,98 @@ + + +

HBase Source Table

+

Function

Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. DLI can read data from HBase for filtering, analysis, and data dumping.

+
+

Prerequisites

  • An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required.
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +

    For details, see section "Modifying the Host Information" in the Data Lake Insight User Guide.

    +
+
+

Syntax

create table hbaseSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector.type' = 'hbase',
+  'connector.version' = '1.4.3',
+  'connector.table-name' = '',
+  'connector.zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to hbase.

+

connector.version

+

Yes

+

The value must be 1.4.3.

+

connector. table-name

+

Yes

+

HBase table name

+

connector.zookeeper.quorum

+

Yes

+

ZooKeeper address

+

connector.zookeeper.znode.parent

+

No

+

Root directory for ZooKeeper. The default value is /hbase.

+

connector.rowkey

+

No

+

Content of a compound rowkey to be assigned. The content is assigned to a new field based on the configuration.

+

Example: rowkey1:3,rowkey2:3,...

+

The value 3 indicates the first three bytes of the field. The number cannot be greater than the byte size of the field and cannot be less than 1. rowkey1:3,rowkey2:3 indicates that the first three bytes of the compound rowkey are assigned to rowkey1, and the last three bytes are assigned to rowkey2.

+
+
+
+

Example

create table hbaseSource(
+  rowkey1 string,
+  rowkey2 string,
+  info Row<owner string>,
+  car ROW<miles string, speed string>
+ ) with (
+   'connector.type' = 'hbase',
+   'connector.version' = '1.4.3',
+   'connector.table-name' = 'carinfo',
+   'connector.rowkey' = 'rowkey1:1,rowkey2:3',
+   'connector.zookeeper.quorum' = 'xxxx:2181'
+ );
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0307.html b/docs/dli/sqlreference/dli_08_0307.html new file mode 100644 index 00000000..00110bc4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0307.html @@ -0,0 +1,41 @@ + + +

Creating a Result Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0308.html b/docs/dli/sqlreference/dli_08_0308.html new file mode 100644 index 00000000..e11aa540 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0308.html @@ -0,0 +1,157 @@ + + +

Kafka Result Table

+

Function

DLI exports the output data of the Flink job to Kafka.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

Kafka is an offline cluster. You have built an enhanced datasource connection to connect Flink jobs to Kafka. You have set security group rules as required.

+
+

Precautions

SASL_SSL cannot be enabled for the interconnected Kafka cluster.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'kafka',
+  'connector.version' = '',
+  'connector.topic' = '',
+  'connector.properties.bootstrap.servers' = '',
+  'format.type' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to kafka.

+

connector.version

+

No

+

Kafka version. The value can be '0.10' or '0.11', which corresponds to Kafka 2.11 to 2.4.0 and other historical versions, respectively.

+

format.type

+

Yes

+

Data serialization format. The value can be csv, json, or avro.

+

format.field-delimiter

+

No

+

Attribute delimiter. You can customize the attribute delimiter only when the encoding format is CSV. The default delimiter is a comma (,).

+

connector.topic

+

Yes

+

Kafka topic name.

+

connector.properties.bootstrap.servers

+

Yes

+

Kafka broker addresses. Use commas (,) to separated them.

+

connector.sink-partitioner

+

No

+

Partitioner type. The value can be fixed, round-robin, or custom.

+

connector.sink-partitioner-class

+

No

+

Custom partitioner. This parameter is mandatory when sink-partitioner is custom, for example, org.mycompany.MyPartitioner.

+

update-mode

+

No

+

Data update mode. Three write modes are supported: append, retract, and upsert.

+

connector.properties.*

+

No

+

Native properties of Kafka

+
+
+
+

Example

Output the data in kafkaSink to Kafka.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table kafkaSink(
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_speed INT)
+with (
+  'connector.type' = 'kafka',
+  'connector.version' = '0.10',
+  'connector.topic' = 'test-topic',
+  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
+  'connector.sink-partitioner' = 'round-robin',
+  'format.type' = 'csv'
+);
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0309.html b/docs/dli/sqlreference/dli_08_0309.html new file mode 100644 index 00000000..3cecdd74 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0309.html @@ -0,0 +1,142 @@ + + +

Upsert Kafka Result Table

+

Function

DLI exports the output data of the Flink job to Kafka in upsert mode.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

Kafka is an offline cluster. You have built an enhanced datasource connection to connect Flink jobs to Kafka. You have set security group rules as required.

+
+

Precautions

SASL_SSL cannot be enabled for the interconnected Kafka cluster.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'upsert-kafka',
+  'connector.version' = '',
+  'connector.topic' = '',
+  'connector.properties.bootstrap.servers' = '',
+   'format.type' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to upsert-kafka.

+

connector.version

+

No

+

Kafka version. The value can only be 0.11.

+

format.type

+

Yes

+

Data serialization format. The value can be csv, json, or avro.

+

connector.topic

+

Yes

+

Kafka topic name

+

connector.properties.bootstrap.servers

+

Yes

+

Kafka broker addresses. Use commas (,) to separated them.

+

connector.sink-partitioner

+

No

+

Partitioner type. The value can be fixed, round-robin, or custom.

+

connector.sink-partitioner-class

+

No

+

Custom partitioner. This parameter is mandatory when sink-partitioner is custom, for example, org.mycompany.MyPartitioner.

+

connector.sink.ignore-retraction

+

No

+

Whether to ignore the retraction message. The default value is false, indicating that the retraction message is written to Kafka as null.

+

update-mode

+

No

+

Data update mode. Three write modes are supported: append, retract, and upsert.

+

connector.properties.*

+

No

+

Native properties of Kafka

+
+
+
+

Example

create table upsertKafkaSink(
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_speed INT,
+  primary key (car_id) not enforced
+)
+with (
+  'connector.type' = 'upsert-kafka',
+  'connector.version' = '0.11',
+  'connector.topic' = 'test-topic',
+  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
+  'format.type' = 'csv'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0310.html b/docs/dli/sqlreference/dli_08_0310.html new file mode 100644 index 00000000..c580673c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0310.html @@ -0,0 +1,139 @@ + + +

DIS Result Table

+

Function

DLI writes the Flink job output data into DIS. The data is filtered and imported to the DIS stream for future processing.

+

DIS addresses the challenge of transmitting data outside cloud services to cloud services. DIS builds data intake streams for custom applications capable of processing or analyzing streaming data. DIS continuously captures, transmits, and stores terabytes of data from hundreds of thousands of sources every hour, such as logs, Internet of Things (IoT) data, social media feeds, website clickstreams, and location-tracking events. For more information about DIS, see the Data Ingestion Service User Guide.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
create table disSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = '',
+  'format.type' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Data source type. Set this parameter to dis.

+

connector.region

+

Yes

+

Region where DIS for storing the data locates.

+

connector.ak

+

No

+

Access key ID. This parameter must be set in pair with sk.

+

connector.sk

+

No

+

Secret access key. This parameter must be set in pair with ak.

+

connector.channel

+

Yes

+

Name of the DIS stream where data is located.

+

format.type

+

Yes

+

Data coding format. The value can be csv or json.

+

format.field-delimiter

+

No

+

Attribute delimiter. You can customize the attribute delimiter only when the encoding format is CSV. The default delimiter is a comma (,).

+

connector.partition-key

+

No

+

Group primary key. Multiple primary keys are separated by commas (,). If this parameter is not specified, data is randomly written to DIS partitions.

+
+
+
+

Precautions

None

+
+

Example

Output the data in the disSink stream to DIS.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table disSink(
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_speed INT
+)
+with (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disOutput',
+  'connector.partition-key' = 'car_id,car_owner',
+  'format.type' = 'csv'
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0311.html b/docs/dli/sqlreference/dli_08_0311.html new file mode 100644 index 00000000..28506a8b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0311.html @@ -0,0 +1,160 @@ + + +

JDBC Result Table

+

Function

DLI exports the output data of the Flink job to RDS.

+
+

Prerequisites

  • An enhanced datasource connection with the database has been established, so that you can configure security group rules as required.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table jdbcSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'jdbc',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.driver' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Data source type. Set this parameter to jdbc.

+

connector.url

+

Yes

+

Database URL

+

connector.table

+

Yes

+

Name of the table where the data to be read from the database is located

+

connector.driver

+

No

+

Driver required for connecting to the database If you do not set this parameter, the automatically extracted URL will be used.

+

connector.username

+

No

+

Username for accessing the database

+

connector.password

+

No

+

Password for accessing the database

+

connector.write.flush.max-rows

+

No

+

Maximum number of rows to be updated when data is written. The default value is 5000.

+

connector.write.flush.interval

+

No

+

Interval for data update. The unit can be ms, milli, millisecond/s, sec, second/min or minute. If this parameter is not set, the value is not updated based on the interval by default.

+

connector.write.max-retries

+

No

+

Maximum number of attempts to write data if failed. The default value is 3.

+

connector.write.exclude-update-columns

+

No

+

Columns excluded for data update. The default value is empty, indicating that when data with the same primary key is updated, the update of the specified field is ignored. The primary key column is ignored by default.

+
+
+
+

Precautions

None

+
+

Example

Output data from stream jdbcSink to the MySQL database.

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
create table jdbcSink(
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_speed INT
+)
+with (
+  'connector.type' = 'jdbc',
+  'connector.url' = 'jdbc:mysql://xx.xx.xx.xx:3306/xx',
+  'connector.table' = 'jdbc_table_name',
+  'connector.driver' = 'com.mysql.jdbc.Driver',
+  'connector.username' = 'xxx',
+  'connector.password' = 'xxxxxx'
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0312.html b/docs/dli/sqlreference/dli_08_0312.html new file mode 100644 index 00000000..3ffccda5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0312.html @@ -0,0 +1,198 @@ + + +

GaussDB(DWS) Result Table

+

Function

DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types and delivers space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-commerce.

+

GaussDB(DWS) is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data.

+
+

Prerequisites

  • Ensure that you have created a GaussDB(DWS) cluster using your account.

    For details about how to create a GaussDB(DWS) cluster, see "Creating a Cluster" in Data Warehouse Service Management Guide.

    +
  • A GaussDB(DWS) database table has been created.
  • An enhanced datasource connection has been created for DLI to connect to GaussDB(DWS) clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table dwsSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'gaussdb',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.driver' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to gaussdb.

+

connector.url

+

Yes

+

JDBC connection address. The format is jdbc:postgresql://${ip}:${port}/${dbName}.

+

connector.table

+

Yes

+

Name of the table to be operated. If the GaussDB(DWS) table is in a schema, the format is schema\".\"Table name. For details, see the Example.

+

connector.driver

+

No

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

connector.username

+

No

+

Database authentication user name. This parameter must be configured in pair with connector.password.

+

connector.password

+

No

+

Database authentication password. This parameter must be configured in pair with connector.username.

+

connector.write.mode

+

No

+

Data write mode. The value can be copy, insert, or upsert. The default value is upsert.

+

This parameter must be configured depending on primary key.

+
  • If primary key is not configured, data can be appended in copy and insert modes.
  • If primary key is configured, all the three modes are available.
+

Note: GaussDB(DWS) does not support the update of distribution columns. The primary keys of columns to be updated must cover all distribution columns defined in the GaussDB(DWS) table.

+

connector.write.flush.max-rows

+

No

+

Maximum rows allowed for data flush. If the data size exceeds the value, data flush is triggered. The default value is 5000.

+

connector.write.flush.interval

+

No

+

Data flush period. Data flush is triggered periodically. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit). If this parameter is not set, the value is not updated based on the interval by default.

+

connector.write.max-retries

+

No

+

Maximum number of attempts to write data. The default value is 3.

+

connector.write.merge.filter-key

+

No

+

Column to be merged. This parameter takes effects only when PRIMARY KEY is configured and connector.write.mode is set to copy.

+

connector.write.escape-string-value

+

No

+

Whether to escape values of the string type. The default value is false.

+
+
+
+

Precautions

None

+
+

Example

  • If you use the gsjdbc4 driver for connection, set connector.driver to org.postgresql.Driver. You can omit this parameter because the gsjdbc4 driver is the default one.
    • Write data to GaussDB(DWS) in upsert mode.
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      +12
      +13
      +14
      create table dwsSink(
      +  car_id STRING,
      +  car_owner STRING,
      +  car_brand STRING,
      +  car_speed INT
      +) with (
      +  'connector.type' = 'gaussdb',
      +  'connector.url' = 'jdbc:postgresql://xx.xx.xx.xx:8000/xx',
      +  'connector.table' = 'car_info',
      +  'connector.username' = 'xx',
      +  'connector.password' = 'xx',
      +  'connector.write.mode' = 'upsert',
      +  'connector.write.flush.interval' = '30s'
      +);
      +
      + +
      +
      Create table dwsSource with data fetched from GaussDB(DWS) table test that is in a schema named ads_game_sdk_base:
      CREATE TABLE ads_rpt_game_sdk_realtime_ada_reg_user_pay_mm (
      +  ddate DATE,
      +  dmin TIMESTAMP(3),
      +  game_appkey VARCHAR,
      +  channel_id VARCHAR,
      +  pay_user_num_1m bigint,
      +  pay_amt_1m bigint,
      +  PRIMARY KEY (ddate, dmin, game_appkey, channel_id) NOT ENFORCED
      +) WITH (
      +  'connector.type' = 'gaussdb',
      +  'connector.url' = 'jdbc:postgresql://xx.xx.xx.xx:8000/dws_bigdata_db',
      +  'connector.table' = 'ads_game_sdk_base\".\"test',
      +  'connector.username' = 'xxxx',
      +  'connector.password' = 'xxxxx',
      +  'connector.write.mode' = 'upsert',
      +  'connector.write.flush.interval' = '30s'
      +);
      +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0313.html b/docs/dli/sqlreference/dli_08_0313.html new file mode 100644 index 00000000..96a15133 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0313.html @@ -0,0 +1,235 @@ + + +

Redis Result Table

+

Function

DLI exports the output data of the Flink job to Redis. Redis is a storage system that supports multiple types of data structures such as key-value. It can be used in scenarios such as caching, event pub/sub, and high-speed queuing. Redis supports direct read/write of strings, hashes, lists, queues, and sets. Redis works with in-memory dataset and provides persistence. For more information about Redis, visit https://redis.io/.

+
+

Prerequisites

An enhanced datasource connection with Redis has been established, so that you can configure security group rules as required.

+ +
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table dwsSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+'connector.type' = 'redis',
+  'connector.host' = '',
+  'connector.port' = '',
+  'connector.password' = '',
+  'connector.table-name' = '',
+  'connector.key-column' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to redis.

+

connector.host

+

Yes

+

Redis connector address

+

connector.port

+

Yes

+

Redis connector port

+

connector.password

+

No

+

Redis authentication password

+

connector.deploy-mode

+

No

+

Redis deployment mode. The value can be standalone or cluster. The default value is standalone.

+

connector.table-name

+

No

+

Name of the table stored in the Redis. This parameter is mandatory in the Redis Hashmap storage pattern. In this pattern, data is stored to Redis in hashmaps. The hash key is ${table-name}:${ext-key}, and the field name is the column name.

+
NOTE:

Table storage pattern: connector.table-name and connector.key-column are used as Redis keys. For the Redis hash type, each key corresponds to a hashmap. A hash key is a field name of the source table, and a hash value is a field value of the source table.

+
+

connector.key-column

+

No

+

This parameter is optional in table storage pattern. The value is used as the value of ext-key in the Redis. If this parameter is not set, the value of ext-key is the generated UUID.

+

connector.write-schema

+

No

+

Whether to write the current schema to the Redis. This parameter is available in table storage pattern. The default value is false.

+

connector.data-type

+

No

+

Data types for storage. This parameter is mandatory for a custom storage pattern. Supported values include string, list, hash, and set. In a string, list or set, the number of schema fields must be 2, and the number of hash fields must be 3.

+

connector.ignore-retraction

+

No

+

Whether to ignore the retraction message. The default value is false.

+
+
+
+

Precautions

Either connector.table-name or connector.data-type must be set.

+
+

Example

  • Configure the table storage pattern when you configure connector.table-name.
    In table storage pattern, data is stored in hash mode, which is different from the basic hash pattern in which the three fields of a table are used as the key, hash_key, and hash_value. The key in table pattern can be specified by connector.table-name and connector.key-column parameters, all field names in the table are used as hash_key, and the field values are written to the hash table as hash_value.
    create table redisSink(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_speed INT
    +) with (
    +  'connector.type' = 'redis',
    +  'connector.host' = 'xx.xx.xx.xx',
    +  'connector.port' = '6379',
    +  'connector.password' = 'xx',
    +  'connector.table-name'='car_info',
    +  'connector.key-column'='car_id'
    +);
    +
    +insert into redisSink
    +  (car_id,car_owner,car_brand,car_speed)
    +  VALUES
    +  ("A1234","OwnA","A1234",30);
    +
    +
  • The following example shows how to create a table when connector.data-type is set to string, list, hash, or set, respectively.
    • String type
      The table contains two columns: key and value.
      create table redisSink(
      +  attr1 STRING,
      +  attr2 STRING
      +) with (
      +  'connector.type' = 'redis',
      +  'connector.host' = 'xx.xx.xx.xx',
      +  'connector.port' = '6379',
      +  'connector.password' = 'xx',
      +  'connector.data-type' = 'string'
      +);
      +
      +insert into redisSink
      +  (attr1,attr2)
      +  VALUES
      +  ("car_id","A1234");
      +
      +
    • List type
      The table contains two columns: key and value.
       1
      + 2
      + 3
      + 4
      + 5
      + 6
      + 7
      + 8
      + 9
      +10
      +11
      +12
      +13
      +14
      +15
      create table redisSink(
      +  attr1 STRING,
      +  attr2 STRING
      +) with (
      +  'connector.type' = 'redis',
      +  'connector.host' = 'xx.xx.xx.xx',
      +  'connector.port' = '6379',
      +  'connector.password' = 'xx',
      +  'connector.data-type' = 'list'
      +);
      +
      +insert into redisSink
      +  (attr1,attr2)
      +  VALUES
      +  ("car_id","A1234");
      +
      + +
      +
      +
    • Set type
      The table contains two columns: key and value.
      create table redisSink(
      +  attr1 STRING,
      +  attr2 STRING
      +) with (
      +  'connector.type' = 'redis',
      +  'connector.host' = 'xx.xx.xx.xx',
      +  'connector.port' = '6379',
      +  'connector.password' = 'xx',
      +  'connector.data-type' = 'set'
      +);
      +
      +insert into redisSink
      +  (attr1,attr2)
      +  VALUES
      +  ("car_id","A1234");
      +
      +
    • Hash type
      The table contains three columns: key, hash_key, and hash_value.
      create table redisSink(
      +  attr1 STRING,
      +  attr2 STRING,
      +  attr3 STRING
      +) with (
      +  'connector.type' = 'redis',
      +  'connector.host' = 'xx.xx.xx.xx',
      +  'connector.port' = '6379',
      +  'connector.password' = 'xx',
      +  'connector.data-type' = 'hash'
      +);
      +
      +insert into redisSink
      +  (attr1,attr2,attr3)
      +  VALUES
      +  ("car_info","car_id","A1234");
      +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0314.html b/docs/dli/sqlreference/dli_08_0314.html new file mode 100644 index 00000000..e0ab29dc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0314.html @@ -0,0 +1,100 @@ + + +

SMN Result Table

+

Function

DLI exports Flink job output data to SMN.

+

SMN provides reliable and flexible large-scale message notification services to DLI. It significantly simplifies system coupling and pushes messages to subscription endpoints based on requirements. SMN can be connected to other cloud services or integrated with any application that uses or generates message notifications to push messages over multiple protocols.

+
+

Syntax

create table smnSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'smn',
+  'connector.region' = '',
+  'connector.topic-urn' = '',
+  'connector.message-subject' = '',
+  'connector.message-column' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Sink data type. Set this parameter to smn, which means that data is stored to SMN.

+

connector.region

+

Yes

+

Region where SMN belongs

+

connector.topic-urn

+

No

+

URN of an SMN topic, which is used for the static topic URN configuration. The SMN topic serves as the destination for short message notification and needs to be created in SMN.

+

+

Either of topic_urn and urn_column must be configured. If both of them are configured, the topic_urn setting takes precedence.

+

connector.urn-column

+

No

+

Field name of the topic URN content, which is used for the dynamic topic URN configuration.

+

+

One of topic_urn and urn_column must be configured. If both of them are configured, the topic_urn setting takes precedence.

+

connector.message-subject

+

Yes

+

Message subject sent by SMN. This parameter can be customized.

+

connector.message-column

+

Yes

+

Column name in the current table. Data in this column is the message content and is customized. Currently, only text messages are supported.

+
+
+
+

Precautions

None

+
+

Example

Write the data to the target of SMN topic. The topic of the message sent by SMN is test, and the message content is the data in the attr1 column.

+
create table smnSink (
+  attr1 STRING,
+  attr2 STRING
+)
+with (
+  'connector.type' = 'smn',
+  'connector.region' = '',
+  'connector.topic-urn' = 'xxxxxx',
+  'connector.message-subject' = 'test',
+  'connector.message-column' = 'attr1'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0315.html b/docs/dli/sqlreference/dli_08_0315.html new file mode 100644 index 00000000..308384cc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0315.html @@ -0,0 +1,119 @@ + + +

HBase Result Table

+

Function

DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. Structured and semi-structured key-value data can be stored, including messages, reports, recommendation data, risk control data, logs, and orders. With DLI, you can write massive volumes of data to HBase at a high speed and with low latency.

+
+

Prerequisites

An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required.

+
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +
+
+

Syntax

create table hbaseSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'hbase',
+  'connector.version' = '1.4.3',
+  'connector.table-name' = '',
+  'connector.zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to hbase.

+

connector.version

+

Yes

+

The value must be 1.4.3.

+

connector.table-name

+

Yes

+

HBase table name

+

connector.zookeeper.quorum

+

Yes

+

ZooKeeper address

+

connector.zookeeper.znode.parent

+

No

+

Root directory for ZooKeeper. The default value is /hbase.

+

connector.write.buffer-flush.max-size

+

No

+

Maximum buffer size for each data write. The default value is 2 MB. The unit is MB.

+

connector.write.buffer-flush.max-rows

+

No

+

Maximum number of data records that can be updated each time

+

connector.write.buffer-flush.interval

+

No

+

Update time. The default value is 0s. Example value: 2s.

+

connector.rowkey

+

No

+

Content of a compound rowkey to be assigned. The content is assigned to a new field based on the configuration.

+

Example: rowkey1:3,rowkey2:3, ...

+

The value 3 indicates the first three bytes of the field. The number cannot be greater than the byte size of the field and cannot be less than 1.

+
+
+
+

Example

 create table hbaseSink(
+  rowkey string,
+  name string,
+  i Row<geneder string, age int>,
+  j Row<address string>
+ ) with (
+   'connector.type' = 'hbase',
+   'connector.version' = '1.4.3',
+   'connector.table-name' = 'sink',
+   'connector.rowkey' = 'rowkey:1,name:3',
+   'connector.write.buffer-flush.max-rows' = '5',
+   'connector.zookeeper.quorum' = 'xxxx:2181'
+ );
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0316.html b/docs/dli/sqlreference/dli_08_0316.html new file mode 100644 index 00000000..bd95b726 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0316.html @@ -0,0 +1,188 @@ + + +

Elasticsearch Result Table

+

Function

DLI exports Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides the distributed multi-user capabilities. It delivers multiple functions, including full-text retrieval, structured search, analytics, aggregation, and highlighting. With Elasticsearch, you can achieve stable, reliable, real-time search. Elasticsearch applies to diversified scenarios, such as log analysis and site search.

+

CSS is a fully managed, distributed search service. It is fully compatible with open-source Elasticsearch and provides DLI with structured and unstructured data search, statistics, and report capabilities. For more information about CSS, see .

+
+

Prerequisites

  • Ensure that you have created a cluster on CSS using your account.

    If you need to access Elasticsearch using the cluster username and password, enable the security mode and disable HTTPS for the created CSS cluster.

    +
  • In this scenario, jobs must run on the dedicated queue of DLI. Therefore, DLI must interconnect with the enhanced datasource connection that has been connected with CSS. You can also set the security group rules as required. +
+
+

Precautions

  • Currently, only CSS 7.X and later versions are supported. Version 7.6.2 is recommended.
  • Do not enable the security mode for the CSS cluster if connector.username and connector.password are not configured.
  • ICMP must be enabled for the security group inbound rule of the CSS cluster.
+
+

Syntax

create table esSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector.type' = 'elasticsearch',
+  'connector.version' = '7',
+  'connector.hosts' = 'http://xxxx:9200',
+  'connector.index' = '',
+  'connector.document-type' = '',
+  'update-mode' = '',
+  'format.type' = 'json'
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to elasticsearch.

+

connector.version

+

Yes

+

Elasticsearch version

+

Currently, only version 7 can be used. That is, the value of this parameter can only be 7.

+

connector.hosts

+

Yes

+

Host name of the cluster where Elasticsearch locates. Use semicolons (;) to separate multiple host names. Ensure that the host name starts with http, for example, http://x.x.x.x:9200.

+

connector.index

+

Yes

+

Elasticsearch index name

+

connector.document-type

+

Yes

+

Elasticsearch type name

+

This attribute is invalid because Elasticsearch 7 uses the default _doc type.

+

update-mode

+

Yes

+

Data update mode of the sink. The value can be append or upsert.

+

connector.key-delimiter

+

No

+

Delimiter of compound primary keys. The default value is _.

+

connector.key-null-literal

+

No

+

Character used to replace null in keys.

+

connector.failure-handler

+

No

+

Policy used when an Elasticsearch request fails. The default value is fail.

+

fail: An exception is thrown when the request fails and the job fails.

+

ignore: The failed request is ignored.

+

retry-rejected: If the request fails because the queue running the Elasticsearch node is full, the request is resent and no failure is reported.

+

custom: A custom policy is used.

+

connector.failure-handler-class

+

No

+

Custom processing mode used to handle a failure

+

connector.flush-on-checkpoint

+

No

+

Whether the connector waits for all pending action requests to be acknowledged by Elasticsearch on checkpoints.

+

The default value true indicates that wait for all pending action requests on checkpoints. If you set this parameter to false, the connector will not wait for the requests.

+

connector.bulk-flush.max-actions

+

No

+

Maximum number of records that can be written in a batch

+

connector.bulk-flush.max-size

+

No

+

Maximum total amount of data to be written in batches. Specify the unit when you configure this parameter. The unit is MB.

+

connector.bulk-flush.interval

+

No

+

Update interval for batch writing. The unit is milliseconds and is not required.

+

format.type

+

Yes

+

Data format. Currently, only JSON is supported.

+

connector.username

+

No

+

Account of the cluster where Elasticsearch locates. This parameter and must be configured in pair with connector.password.

+

If the account and password are used, the security mode must be enabled and HTTPS must be disabled for the created CSS cluster.

+

connector.password

+

No

+

Password of the cluster where Elasticsearch locates. This parameter must be configured in pair with connector.username.

+
+
+
+

+

Example

create table sink1(
+  attr1 string,
+  attr2 int
+) with (
+  'connector.type' = 'elasticsearch',
+  'connector.version' = '7', 
+  'connector.hosts' = 'http://xxxx:9200',
+  'connector.index' = 'es',
+  'connector.document-type' = 'one',
+  'update-mode' = 'append',
+  'format.type' = 'json'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0317.html b/docs/dli/sqlreference/dli_08_0317.html new file mode 100644 index 00000000..0bcd2581 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0317.html @@ -0,0 +1,19 @@ + + +

Creating a Dimension Table

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0318.html b/docs/dli/sqlreference/dli_08_0318.html new file mode 100644 index 00000000..0ee1840d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0318.html @@ -0,0 +1,255 @@ + + +

JDBC Dimension Table

+

Create a JDBC dimension table to connect to the source stream.

+

Prerequisites

  • You have created a JDBC instance for your account.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
CREATE TABLE  table_id (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+  WITH (
+  'connector.type' = 'jdbc',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Data source type. Set this parameter to jdbc.

+

connector.url

+

Yes

+

Database URL

+

connector.table

+

Yes

+

Name of the table where the data to be read from the database is located

+

connector.driver

+

No

+

Driver required for connecting to the database If you do not set this parameter, the automatically extracted URL will be used.

+

connector.username

+

No

+

Database authentication user name. This parameter must be configured in pair with connector.password.

+

connector.password

+

No

+

Database authentication password. This parameter must be configured in pair with connector.username.

+

connector.read.partition.column

+

No

+

Name of the column used to partition the input

+

+

This parameter is mandatory if connector.read.partition.lower-bound, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.lower-bound

+

No

+

Lower bound of values to be fetched for the first partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.upper-bound

+

No

+

Upper bound of values to be fetched for the last partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.lower-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.num

+

No

+

Number of partitions

+

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.upper-bound are configured.

+

connector.read.fetch-size

+

No

+

Number of rows fetched from the database each time. The default value is 0, indicating the hint is ignored.

+

connector.lookup.cache.max-rows

+

No

+

Maximum number of cached rows in a dimension table. If the number of cached rows exceeds the value , old data will be deleted. The value -1 indicates that data cache disabled.

+

connector.lookup.cache.ttl

+

No

+

Time To Live (TTL) of dimension table cache. Caches exceeding the TTL will be deleted. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

connector.lookup.max-retries

+

No

+

Maximum number of attempts to obtain data from the dimension table. The default value is 3.

+
+
+
+

Example

The RDS table is used to connect to the source stream.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
CREATE TABLE car_infos (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT,
+  proctime as PROCTIME()
+)
+  WITH (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disInput',
+  'format.type' = 'csv'
+  );
+
+CREATE TABLE  db_info (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+  'connector.type' = 'jdbc',
+  'connector.url' = 'jdbc:mysql://xx.xx.xx.xx:3306/xx',
+  'connector.table' = 'jdbc_table_name',
+  'connector.driver' = 'com.mysql.jdbc.Driver',
+  'connector.username' = 'xxx',
+  'connector.password' = 'xxxxx'
+);
+
+CREATE TABLE audi_cheaper_than_30w (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disOutput',
+  'connector.partition-key' = 'car_id,car_owner',
+  'format.type' = 'csv'
+  );
+
+INSERT INTO audi_cheaper_than_30w
+SELECT a.car_id, b.car_owner, b.car_brand, b.car_price 
+FROM car_infos as a join db_info FOR SYSTEM_TIME AS OF a.proctime AS b on a.car_id = b.car_id;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0319.html b/docs/dli/sqlreference/dli_08_0319.html new file mode 100644 index 00000000..739359be --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0319.html @@ -0,0 +1,259 @@ + + +

GaussDB(DWS) Dimension Table

+

Create a GaussDB(DWS) dimension table to connect to the input stream.

+

Prerequisites

  • You have created a GaussDB(DWS) instance for your account.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'gaussdb',
+  'connector.url' = '',
+  'connector.table' = '',
+  'connector.username' = '',
+  'connector.password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to gaussdb.

+

connector.url

+

Yes

+

JDBC connection address. The format is jdbc:postgresql://${ip}:${port}/${dbName}.

+

connector.table

+

Yes

+

Name of the table where the data to be read from the database is located

+

connector.driver

+

No

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

connector.username

+

No

+

Database authentication user name. This parameter must be configured in pair with connector.password.

+

connector.password

+

No

+

Database authentication password. This parameter must be configured in pair with connector.username.

+

connector.read.partition.column

+

No

+

Name of the column used to partition the input

+

This parameter is mandatory if connector.read.partition.lower-bound, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.lower-bound

+

No

+

Lower bound of values to be fetched for the first partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.upper-bound

+

No

+

Upper bound of values to be fetched for the last partition

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.lower-bound, and

+

connector.read.partition.num are configured.

+

connector.read.partition.num

+

No

+

Number of partitions

+

+

This parameter is mandatory if connector.read.partition.column, connector.read.partition.upper-bound, and

+

connector.read.partition.upper-bound are configured.

+

connector.read.fetch-size

+

No

+

Number of rows fetched from the database each time. The default value is 0, indicating the hint is ignored.

+

connector.lookup.cache.max-rows

+

No

+

Maximum number of cached rows in a dimension table. If the number of cached rows exceeds the value , old data will be deleted. The value -1 indicates that data cache disabled.

+

connector.lookup.cache.ttl

+

No

+

+

Time To Live (TTL) of dimension table cache. Caches exceeding the TTL will be deleted. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

connector.lookup.max-retries

+

No

+

Maximum number of attempts to obtain data from the dimension table. The default value is 3.

+
+
+
+

Example

Use an RDS table to connect to the source stream.
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
CREATE TABLE car_infos (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT,
+  proctime as PROCTIME()
+)
+  WITH (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disInput',
+  'format.type' = 'csv'
+  );
+
+CREATE TABLE  db_info (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+  'connector.type' = 'gaussdb',
+  'connector.driver' = 'org.postgresql.Driver',
+  'connector.url' = 'jdbc:gaussdb://xx.xx.xx.xx:8000/xx',
+  'connector.table' = 'car_info',
+  'connector.username' = 'xx',
+  'connector.password' = 'xx',
+  'connector.lookup.cache.max-rows' = '10000',
+  'connector.lookup.cache.ttl' = '24h'
+);
+
+CREATE TABLE audi_cheaper_than_30w (
+  car_id STRING,
+  car_owner STRING,
+  car_brand STRING,
+  car_price INT
+)
+  WITH (
+  'connector.type' = 'dis',
+  'connector.region' = '',
+  'connector.channel' = 'disOutput',
+  'connector.partition-key' = 'car_id,car_owner',
+  'format.type' = 'csv'
+  );
+
+INSERT INTO audi_cheaper_than_30w
+SELECT a.car_id, b.car_owner, b.car_brand, b.car_price 
+FROM car_infos as a join db_info FOR SYSTEM_TIME AS OF a.proctime AS b on a.car_id = b.car_id;
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0320.html b/docs/dli/sqlreference/dli_08_0320.html new file mode 100644 index 00000000..b418787e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0320.html @@ -0,0 +1,114 @@ + + +

HBase Dimension Table

+

Function

Create a Hbase dimension table to connect to the source stream.

+
+

Prerequisites

  • An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +

    For details, see section "Modifying the Host Information" in the Data Lake Insight User Guide.

    +
+
+

Syntax

create table hbaseSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+ )
+with (
+  'connector.type' = 'hbase',
+  'connector.version' = '1.4.3',
+  'connector.table-name' = '',
+  'connector.zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to hbase.

+

connector.version

+

Yes

+

The value must be 1.4.3.

+

connector. table-name

+

Yes

+

Table name in HBase

+

connector.zookeeper.quorum

+

Yes

+

ZooKeeper address

+

connector.zookeeper.znode.parent

+

No

+

Root directory for ZooKeeper. The default value is /hbase.

+
+
+
+

Example

create table hbaseSource(
+  id string,
+  i Row<score string>
+ ) with (
+   'connector.type' = 'hbase',
+   'connector.version' = '1.4.3',
+   'connector.table-name' = 'user',
+   'connector.zookeeper.quorum' = 'xxxx:2181'
+ );
+create table source1(
+  id string,
+  name string,
+  geneder string,
+  age int,
+  address string,
+  proctime as PROCTIME()
+) with (
+  "connector.type" = "dis",
+  "connector.region" = "",
+  "connector.channel" = "read",
+  "connector.ak" = "xxxxxx",
+  "connector.sk" = "xxxxxx",
+  "format.type" = 'csv'
+);
+
+ create table hbaseSink(
+  rowkey string,
+  i Row<name string, geneder string, age int, address string>,
+  j ROW<score string>
+ ) with (
+   'connector.type' = 'hbase',
+   'connector.version' = '1.4.3',
+   'connector.table-name' = 'score',
+   'connector.write.buffer-flush.max-rows' = '1',
+   'connector.zookeeper.quorum' = 'xxxx:2181'
+ );
+ insert into hbaseSink select d.id, ROW(name, geneder,age,address), ROW(score) from source1 as d join hbaseSource for system_time as of d.proctime as h on d.id = h.id;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0321.html b/docs/dli/sqlreference/dli_08_0321.html new file mode 100644 index 00000000..51b2799b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0321.html @@ -0,0 +1,27 @@ + + +

Data Manipulation Language (DML)

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0322.html b/docs/dli/sqlreference/dli_08_0322.html new file mode 100644 index 00000000..795e749c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0322.html @@ -0,0 +1,197 @@ + + +

SELECT

+

SELECT

Syntax

+
1
+2
+3
+4
+5
+6
SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

This clause is used to select data from a table.

+

ALL indicates that all results are returned.

+

DISTINCT indicates that the duplicated results are removed.

+

Precautions

+
  • The to-be-queried table must exist. Otherwise, an error is reported.
  • WHERE is used to specify the filtering condition, which can be the arithmetic operator, relational operator, or logical operator.
  • GROUP BY is used to specify the grouping field, which can be one or more multiple fields.
+

Example

+

Select the order which contains more than 3 pieces of data.

+
1
insert into temp SELECT  * FROM Orders WHERE units > 3; 
+
+ +
+

Insert a group of constant data.

+
1
insert into temp select 'Lily', 'male', 'student', 17;
+
+ +
+
+

WHERE Filtering Clause

Syntax

+
1
+2
+3
SELECT   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+
+ +
+

Description

+

This clause is used to filter the query results using the WHERE clause.

+

Precautions

+
  • The to-be-queried table must exist.
  • WHERE filters the records that do not meet the requirements.
+

Example

+

Filter orders which contain more than 3 pieces and fewer than 10 pieces of data.

+
1
+2
insert into temp SELECT  * FROM Orders
+  WHERE units > 3 and units < 10; 
+
+ +
+
+

HAVING Filtering Clause

Function

+

This clause is used to filter the query results using the HAVING clause.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering. The arithmetic operation and aggregate function are supported by the HAVING clause.

+

Precautions

+

If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering.

+

Example

+

Group the student table according to the name field and filter the records in which the maximum score is higher than 95 based on groups.

+
1
+2
+3
insert into temp SELECT name, max(score) FROM student
+  GROUP BY name
+  HAVING max(score) >95;
+
+ +
+
+

Column-Based GROUP BY

Function

+

This clause is used to group a table based on columns.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.

+
  • Single-column GROUP BY indicates that the GROUP BY clause contains only one column.
  • Multi-column GROUP BY indicates that the GROUP BY clause contains multiple columns. The table will be grouped according to all fields in the GROUP BY clause. The records whose fields are the same are grouped into one group.
+

Precautions

+

GroupBy generates update results in the stream processing table.

+

Example

+

Group the student table according to the score and name fields and return the grouping results.

+
1
+2
insert into temp SELECT name,score, max(score) FROM student 
+  GROUP BY name,score;
+
+ +
+
+

Expression-Based GROUP BY

Function

+

This clause is used to group a table according to expressions.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

groupItem can have one or more fields. The fields can be called by string functions, but cannot be called by aggregate functions.

+

Precautions

+

None

+

Example

+

Use the substring function to obtain the character string from the name field, group the student table according to the obtained character string, and return each sub string and the number of records.

+
1
+2
insert into temp SELECT substring(name,6),count(name) FROM student
+  GROUP BY substring(name,6);
+
+ +
+
+

Grouping sets, Rollup, Cube

Function

+
  • The GROUP BY GROUPING SETS generates a result set equivalent to that generated by multiple simple GROUP BY UNION ALL statements. Using GROUPING SETS is more efficient.
  • The ROLLUP and CUBE generate multiple groups based on certain rules and then collect statistics by group.
  • The result set generated by CUBE contains all the combinations of values in the selected columns.
  • The result set generated by ROLLUP contains the combinations of a certain layer structure in the selected columns.
+
Syntax
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY groupingItem]
+
+

Description

+

Values of groupingItem can be Grouping sets(columnName [, columnName]*), Rollup(columnName [, columnName]*), and Cube(columnName [, columnName]*).

+

Precautions

+

None

+

Example

+

Return the results generated based on user and product.

+
INSERT INTO temp SELECT SUM(amount)
+FROM Orders
+GROUP BY GROUPING SETS ((user), (product));
+
+

GROUP BY Using HAVING

Function

+

This statement filters a table after grouping it using the HAVING clause.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering.

+

Precautions

+
  • If the filtering condition is subject to the query results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for filtering. HAVING and GROUP BY are used together. GROUP BY applies first for grouping and HAVING then applies for filtering.
  • Fields used in HAVING, except for those used for aggregate functions, must exist in GROUP BY.
  • The arithmetic operation and aggregate function are supported by the HAVING clause.
+

Example

+

Group the transactions according to num, use the HAVING clause to filter the records in which the maximum value derived from multiplying price with amount is higher than 5000, and return the filtered results.

+
1
+2
+3
+4
insert into temp SELECT num, max(price*amount) FROM transactions
+  WHERE time > '2016-06-01'
+  GROUP BY num
+  HAVING max(price*amount)>5000;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0323.html b/docs/dli/sqlreference/dli_08_0323.html new file mode 100644 index 00000000..56de6382 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0323.html @@ -0,0 +1,56 @@ + + +

Set Operations

+

UNION/UNION ALL/INTERSECT/EXCEPT

Syntax

+
1
query UNION [ ALL ] | Intersect | Except query
+
+ +
+

Description

+
  • UNION is used to return the union set of multiple query results.
  • INTERSECT is used to return the intersection of multiple query results.
  • EXCEPT is used to return the difference set of multiple query results.
+

Precautions

+
  • Set operation is to join tables from head to tail under certain conditions. The quantity of columns returned by each SELECT statement must be the same. Column types must be the same. Column names can be different.
  • By default, the duplicate records returned by UNION are removed. The duplicate records returned by UNION ALL are not removed.
+

Example

+

Output the union set of Orders1 and Orders2 without duplicate records.

+
1
+2
insert into temp SELECT  * FROM Orders1
+  UNION SELECT  * FROM Orders2;
+
+ +
+
+

IN

Syntax

+
1
+2
+3
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  WHERE column_name IN (value (, value)* ) | query
+
+ +
+

Description

+

The IN operator allows multiple values to be specified in the WHERE clause. It returns true if the expression exists in the given table subquery.

+

Precautions

+

The subquery table must consist of a single column, and the data type of the column must be the same as that of the expression.

+

Example

+

Return user and amount information of the products in NewProducts of the Orders table.

+
1
+2
+3
+4
+5
insert into temp SELECT user, amount
+FROM Orders
+WHERE product IN (
+    SELECT product FROM NewProducts
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0324.html b/docs/dli/sqlreference/dli_08_0324.html new file mode 100644 index 00000000..a5cc4e30 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0324.html @@ -0,0 +1,293 @@ + + +

Window

+

GROUP WINDOW

Description

+

Group Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:

+
  • Array functions +
    + + + + + + + + + + + + + +
    Table 1 Array functions

    Grouping Window Function

    +

    Description

    +

    TUMBLE(time_attr, interval)

    +

    Defines a tumbling time window. A tumbling time window assigns rows to non-overlapping, continuous windows with a fixed duration (interval). For example, a tumbling window of 5 minutes groups rows in 5 minutes intervals. Tumbling windows can be defined on event-time (stream + batch) or processing-time (stream).

    +

    HOP(time_attr, interval, interval)

    +

    Defines a hopping time window (called sliding window in the Table API). A hopping time window has a fixed duration (second interval parameter) and hops by a specified hop interval (first interval parameter). If the hop interval is smaller than the window size, hopping windows are overlapping. Thus, rows can be assigned to multiple windows. For example, a hopping window of 15 minutes size and 5 minute hop interval assigns each row to 3 different windows of 15 minute size, which are evaluated in an interval of 5 minutes. Hopping windows can be defined on event-time (stream + batch) or processing-time (stream).

    +

    SESSION(time_attr, interval)

    +

    Defines a session time window. Session time windows do not have a fixed duration but their bounds are defined by a time interval of inactivity, i.e., a session window is closed if no event appears for a defined gap period. For example a session window with a 30 minute gap starts when a row is observed after 30 minutes inactivity (otherwise the row would be added to an existing window) and is closed if no row is added within 30 minutes. Session windows can work on event-time (stream + batch) or processing-time (stream).

    +
    +
    +

    Notes:

    +

    In streaming mode, the time_attr argument of the group window function must refer to a valid time attribute that specifies the processing time or event time of rows.

    +

    In batch mode, the time_attr argument of the group window function must be an attribute of type TIMESTAMP.

    +
  • Window auxiliary functions
    The start and end timestamps of group windows as well as time attributes can be selected with the following auxiliary functions. +
    + + + + + + + + + + + + + + + + +
    Table 2 Window auxiliary functions

    Auxiliary Function

    +

    Description

    +

    TUMBLE_START(time_attr, interval)

    +

    HOP_START(time_attr, interval, interval)

    +

    SESSION_START(time_attr, interval)

    +

    Returns the timestamp of the inclusive lower bound of the corresponding tumbling, hopping, or session window.

    +

    TUMBLE_END(time_attr, interval)

    +

    HOP_END(time_attr, interval, interval)

    +

    SESSION_END(time_attr, interval)

    +

    Returns the timestamp of the exclusive upper bound of the corresponding tumbling, hopping, or session window.

    +

    Note: The exclusive upper bound timestamp cannot be used as a rowtime attribute in subsequent time-based operations, such as interval joins and group window or over window aggregations.

    +

    TUMBLE_ROWTIME(time_attr, interval)

    +

    HOP_ROWTIME(time_attr, interval, interval)

    +

    SESSION_ROWTIME(time_attr, interval)

    +

    Returns the timestamp of the inclusive upper bound of the corresponding tumbling, hopping, or session window. The resulting attribute is a rowtime attribute that can be used in subsequent time-based operations such as interval joins and group window or over window aggregations.

    +

    TUMBLE_PROCTIME(time_attr, interval)

    +

    HOP_PROCTIME(time_attr, interval, interval)

    +

    SESSION_PROCTIME(time_attr, interval)

    +

    Returns a proctime attribute that can be used in subsequent time-based operations such as interval joins and group window or over window aggregations.

    +
    +
    +
    +

    Note: Auxiliary functions must be called with exactly same arguments as the group window function in the GROUP BY clause.

    +
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
// Calculate the SUM every day (event time).
+insert into temp SELECT name,
+    TUMBLE_START(ts, INTERVAL '1' DAY) as wStart,
+    SUM(amount)
+    FROM Orders
+    GROUP BY TUMBLE(ts, INTERVAL '1' DAY), name;
+
+//Calculate the SUM every day (processing time). 
+insert into temp SELECT name, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY TUMBLE(proctime, INTERVAL '1' DAY), name;
+
+//Calculate the SUM over the recent 24 hours every hour (event time).
+insert into temp SELECT product, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '1' DAY), product;
+
+//Calculate the SUM of each session and an inactive interval every 12 hours (event time).
+insert into temp SELECT name, 
+    SESSION_START(ts, INTERVAL '12' HOUR) AS sStart,
+    SESSION_END(ts, INTERVAL '12' HOUR) AS sEnd,
+    SUM(amount)
+    FROM Orders
+    GROUP BY SESSION(ts, INTERVAL '12' HOUR), name;
+
+ +
+
+

TUMBLE WINDOW Extension

Function

+
The extension functions of the DLI tumbling window are as follows:
  • Periodical tumbling windows for lower latency

    Before the tumbling window ends, the window can be periodically triggered based on the configured frequency. The compute result from the start to the current time is output, which does not affect the final output. The latest result can be viewed in each period before the window ends.

    +
  • Custom latency for higher data accuracy

    You can set a latency for the end of the window. The output of the window is updated according to the configured latency each time a piece of late data reaches.

    +
+
+

Precautions

+

If you use insert to write results into the sink, the sink must support the upsert mode.

+

Syntax

+
TUMBLE(time_attr, window_interval, period_interval, lateness_interval)
+

Example

+
If the current time_attr attribute column is testtime and the window interval is 10 seconds, the statement is as follows:
TUMBLE(testtime, INTERVAL '10' SECOND, INTERVAL '10' SECOND, INTERVAL '10' SECOND)
+
+
+

Description

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Description

+

Format

+

time_attr

+

Event time or processing time attribute column

+

-

+

window_interval

+

Duration of the window

+
  • Format 1: INTERVAL '10' SECOND

    The window interval is 10 seconds. You can change the value as needed.

    +
  • Format 2: INTERVAL '10' MINUTE

    The window interval is 10 minutes. You can change the value as needed.

    +
  • Format 3: INTERVAL '10' DAY

    The window interval is 10 days. You can change the value as needed.

    +
+

period_interval

+

Frequency of periodic triggering within the window range. That is, before the window ends, the output result is updated at an interval specified by period_interval from the time when the window starts. If this parameter is not set, the periodic triggering policy is not used by default.

+

lateness_interval

+

Time to postpone the end of the window. The system continues to collect the data that reaches the window within lateness_interval after the window ends. The output is updated for each data that reaches the window within lateness_interval.

+
NOTE:

If the time window is for processing time, lateness_interval does not take effect.

+
+
+
+
Values of period_interval and lateness_interval cannot be negative numbers.
  • If period_interval is set to 0, periodic triggering is disabled for the window.
  • If lateness_interval is set to 0, the latency after the window ends is disabled.
  • If neither of the two parameters is set, both periodic triggering and latency are disabled and only the regular tumbling window functions are available .
  • If only the latency function needs to be used, set period_interval INTERVAL '0' SECOND.
+
+
+

OVER WINDOW

The difference between Over Window and Group Window is that one record is generated from one row in Over Window.

+

Syntax

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
SELECT agg1(attr1) OVER (
+  [PARTITION BY partition_name]
+  ORDER BY proctime|rowtime 
+  ROWS  
+ BETWEEN (UNBOUNDED|rowCOUNT) PRECEDING AND CURRENT ROW FROM TABLENAME
+
+SELECT agg1(attr1) OVER (
+  [PARTITION BY partition_name]
+  ORDER BY proctime|rowtime 
+  RANGE  
+  BETWEEN (UNBOUNDED|timeInterval) PRECEDING AND CURRENT ROW FROM TABLENAME
+
+ +
+

Description

+ +
+ + + + + + + + + + + + + + + + +
Table 4 Parameter description

Parameter

+

Parameter Description

+

PARTITION BY

+

Indicates the primary key of the specified group. Each group separately performs calculation.

+

ORDER BY

+

Indicates the processing time or event time as the timestamp for data.

+

ROWS

+

Indicates the count window.

+

RANGE

+

Indicates the time window.

+
+
+

Precautions

+
  • All aggregates must be defined in the same window, that is, in the same partition, sort, and range.
  • Currently, only windows from PRECEDING (unbounded or bounded) to CURRENT ROW are supported. The range described by FOLLOWING is not supported.
  • ORDER BY must be specified for a single time attribute.
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
// Calculate the count and total number from syntax rules enabled to now (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt2
+    FROM Orders;
+  
+//Calculate the count and total number of the recent four records (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+//Calculate the count and total number last 60s (in eventtime). Process the events based on event time, which is the timeattr field in Orders.
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0325.html b/docs/dli/sqlreference/dli_08_0325.html new file mode 100644 index 00000000..dbc57033 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0325.html @@ -0,0 +1,102 @@ + + +

JOIN

+

Equi-join

Syntax

+
1
+2
FROM tableExpression INNER | LEFT | RIGHT | FULL JOIN tableExpression
+  ON value11 = value21 [ AND value12 = value22]
+
+ +
+
+

Precautions

+
  • Currently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or theta joins are not supported.
  • Tables are joined in the order in which they are specified in the FROM clause. Make sure to specify tables in an order that does not yield a cross join (Cartesian product), which are not supported and would cause a query to fail.
  • For streaming queries the required state to compute the query result might grow infinitely depending on the type of aggregation and the number of distinct grouping keys. Provide a query configuration with valid retention interval to prevent excessive state size.
+

Example

+
SELECT *
+FROM Orders INNER JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders LEFT JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders RIGHT JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders FULL OUTER JOIN Product ON Orders.productId = Product.id;
+

Time-Windowed Join

Function

+

Each piece of data in a stream is joined with data in different time zones in another stream.

+

Syntax

+
from t1 JOIN t2 ON t1.key = t2.key AND TIMEBOUND_EXPRESSIO
+

Description

+

TIMEBOUND_EXPRESSION can be in either of the following formats:

+
  • L.time between LowerBound(R.time) and UpperBound(R.time)
  • R.time between LowerBound(L.time) and UpperBound(L.time)
  • Comparison expression with the time attributes (L.time/R.time)
+

Precautions

+

A time window join requires at least one equi join predicate and a join condition that limits the time of both streams.

+

For example, use two range predicates (<, <=, >=, or >), a BETWEEN predicate, or an equal predicate that compares the same type of time attributes (such as processing time and event time) in two input tables.

+

For example, the following predicate is a valid window join condition:

+
  • ltime = rtime
  • ltime >= rtime AND ltime < rtime + INTERVAL '10' MINUTE
  • ltime BETWEEN rtime - INTERVAL '10' SECOND AND rtime + INTERVAL '5' SECOND
+
+

Example

+

Join all orders shipped within 4 hours with their associated shipments.

+
SELECT *
+FROM Orders o, Shipments s
+WHERE o.id = s.orderId AND
+      o.ordertime BETWEEN s.shiptime - INTERVAL '4' HOUR AND s.shiptime;
+

Array Expansion

Precautions

+

This clause is used to return a new row for each element in the given array. Unnesting WITH ORDINALITY is not yet supported.

+

Example

+
SELECT users, tag
+FROM Orders CROSS JOIN UNNEST(tags) AS t (tag);
+
+

User-Defined Table Functions

Function

+

This clause is used to join a table with the results of a table function. ach row of the left (outer) table is joined with all rows produced by the corresponding call of the table function.

+

Precautions

+

A left outer join against a lateral table requires a TRUE literal in the ON clause.

+

Example

+

The row of the left (outer) table is dropped, if its table function call returns an empty result.

+
SELECT users, tag
+FROM Orders, LATERAL TABLE(unnest_udtf(tags)) t AS tag;
+

If a table function call returns an empty result, the corresponding outer row is preserved, and the result padded with null values.

+
SELECT users, tag
+FROM Orders LEFT JOIN LATERAL TABLE(unnest_udtf(tags)) t AS tag ON TRUE;
+
+

Temporal Table Function Join

Function

+

Precautions

+

Currently only inner join and left outer join with temporal tables are supported.

+

Example

+

Assuming Rates is a temporal table function, the join can be expressed in SQL as follows:

+
SELECT
+  o_amount, r_rate
+FROM
+  Orders,
+  LATERAL TABLE (Rates(o_proctime))
+WHERE
+  r_currency = o_currency;
+
+

Join Temporal Tables

Function

+

This clause is used to join the Temporal table.

+

Syntax

+
SELECT column-names
+FROM table1  [AS <alias1>]
+[LEFT] JOIN table2 FOR SYSTEM_TIME AS OF table1.proctime [AS <alias2>]
+ON table1.column-name1 = table2.key-name1
+

Description

+
  • table1.proctime indicates the processing time attribute (computed column) of table1.
  • FOR SYSTEM_TIME AS OF table1.proctime indicates that when the records in the left table are joined with the dimension table on the right, only the snapshot data is used for matching the current processing time dimension table.
+

Precautions

+

Only inner and left joins are supported for temporal tables with processing time attributes.

+

Example

+

LatestRates is a temporal table that is materialized with the latest rate.

+
SELECT
+  o.amout, o.currency, r.rate, o.amount * r.rate
+FROM
+  Orders AS o
+  JOIN LatestRates FOR SYSTEM_TIME AS OF o.proctime AS r
+  ON r.currency = o.currency;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0326.html b/docs/dli/sqlreference/dli_08_0326.html new file mode 100644 index 00000000..008f65a1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0326.html @@ -0,0 +1,30 @@ + + +

OrderBy & Limit

+

OrderBy

Function

+

This clause is used to sort data in ascending order on a time attribute.

+

Precautions

+

Currently, only sorting by time attribute is supported.

+

Example

+

Sort data in ascending order on the time attribute.

+
SELECT *
+FROM Orders
+ORDER BY orderTime;
+
+

Limit

Function

+

This clause is used to constrain the number of rows returned.

+

Precautions

+

This clause is used in conjunction with ORDER BY to ensure that the results are deterministic.

+

Example

+
SELECT *
+FROM Orders
+ORDER BY orderTime
+LIMIT 3;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0327.html b/docs/dli/sqlreference/dli_08_0327.html new file mode 100644 index 00000000..ba59216b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0327.html @@ -0,0 +1,32 @@ + + +

Top-N

+

Function

Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in cases where the need is to display only the N bottom-most or the N top- most records from batch/streaming table on a condition.

+
+

Syntax

SELECT [column_list]
+FROM (
+   SELECT [column_list],
+     ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]]
+       ORDER BY col1 [asc|desc][, col2 [asc|desc]...]) AS rownum
+   FROM table_name)
+WHERE rownum <= N [AND conditions]
+
+

Description

  • ROW_NUMBER(): Allocate a unique and consecutive number to each line starting from the first line in the current partition. Currently, we only support ROW_NUMBER as the over window function. In the future, we will support RANK() and DENSE_RANK().
  • PARTITION BY col1[, col2...]: Specifies the partition columns. Each partition will have a Top-N result.
  • ORDER BY col1 [asc|desc][, col2 [asc|desc]...]: Specifies the ordering columns. The ordering directions can be different on different columns.
  • WHERE rownum <= N: The rownum <= N is required for Flink to recognize this query is a Top-N query. The N represents the N smallest or largest records will be retained.
  • [AND conditions]: It is free to add other conditions in the where clause, but the other conditions can only be combined with rownum <= N using AND conjunction.
+
+

Important Notes

  • The TopN query is Result Updating.
  • Flink SQL will sort the input data stream according to the order key,
  • so if the top N records have been changed, the changed ones will be sent as retraction/update records to downstream.
  • If the top N records need to be stored in external storage, the result table should have the same unique key with the Top-N query.
+
+

Example

This is an example to get the top five products per category that have the maximum sales in realtime.

+
SELECT * 
+  FROM ( 
+     SELECT *,
+         ROW_NUMBER() OVER (PARTITION BY category ORDER BY sales DESC) as row_num
+     FROM ShopSales)
+  WHERE row_num <= 5;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0328.html b/docs/dli/sqlreference/dli_08_0328.html new file mode 100644 index 00000000..9df115e3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0328.html @@ -0,0 +1,32 @@ + + +

Deduplication

+

Function

Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.

+
+

Syntax

SELECT [column_list]
+FROM (
+   SELECT [column_list],
+     ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]]
+       ORDER BY time_attr [asc|desc]) AS rownum
+   FROM table_name)
+WHERE rownum = 1
+
+

Description

  • ROW_NUMBER(): Assigns a unique, sequential number to each row, starting with one.
  • PARTITION BY col1[, col2...]: Specifies the partition columns, for example, the deduplicate key.
  • ORDER BY time_attr [asc|desc]: Specifies the ordering column, it must be a time attribute. Currently Flink supports proctime only. Ordering by ASC means to keep the first row, ordering by DESC means to keep the last row.
  • WHERE rownum = 1: The rownum = 1 is required for Flink to recognize this query is deduplication.
+
+

Precautions

None

+
+

Example

The following examples show how to remove duplicate rows on order_id. The proctime is an event time attribute.

+
SELECT order_id, user, product, number
+  FROM (
+     SELECT *,
+         ROW_NUMBER() OVER (PARTITION BY order_id ORDER BY proctime ASC) as row_num
+     FROM Orders)
+  WHERE row_num = 1;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0329.html b/docs/dli/sqlreference/dli_08_0329.html new file mode 100644 index 00000000..c551439e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0329.html @@ -0,0 +1,17 @@ + + +

Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0330.html b/docs/dli/sqlreference/dli_08_0330.html new file mode 100644 index 00000000..5ff303ec --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0330.html @@ -0,0 +1,187 @@ + + +

User-Defined Functions

+

Overview

DLI supports the following three types of user-defined functions (UDFs):

+
+
  • Regular UDF: takes in one or more input parameters and returns a single result.
  • User-defined table-generating function (UDTF): takes in one or more input parameters and returns multiple rows or columns.
  • User-defined aggregate function (UDAF): aggregates multiple records into one value.
+

UDFs can only be used in dedicated queues.

+
+

POM Dependency

<dependency>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-table-common</artifactId>
+        <version>1.10.0</version>
+        <scope>provided</scope>
+</dependency>
+
+

Important Notes

  • Currently, Python is not supported for programming UDFs, UDTFs, and UDAFs.
  • If you use IntelliJ IDEA to debug the created UDF, select include dependencies with "Provided" scope. Otherwise, the dependency packages in the POM file cannot be loaded for local debugging.

    The following uses IntelliJ IDEA 2020.2 as an example:

    +
    1. On the IntelliJ IDEA page, select the configuration file you need to debug and click Edit Configurations.

      +
    2. On the Run/Debug Configurations page, select include dependencies with "Provided" scope.

      +
    3. Click OK.
    +
+
+

Using UDFs

  1. Encapsulate the implemented UDFs into a JAR package and upload the package to OBS.
  2. In the navigation pane of the DLI management console, choose Data Management > Package Management. On the displayed page, click Create and use the JAR package uploaded to OBS to create a package.
  3. In the left navigation, choose Job Management and click Flink Jobs. Locate the row where the target resides and click Edit in the Operation column to switch to the page where you can edit the job.
  4. Click the Running Parameters tab of your job, select the UDF JAR and click Save.
  5. Add the following statement to the SQL statements to use the functions:
+
+

UDF

The regular UDF must inherit the ScalarFunction function and implement the eval method. The open and close functions are optional.

+
+

Example code

+
import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.ScalarFunction;
+public class UdfScalarFunction extends ScalarFunction {
+  private int factor = 12;
+  public UdfScalarFunction() {
+    this.factor = 12;
+  }
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  /**
+   * Custom logic
+   * @param s
+   * @return
+   */
+   public int eval(String s) {
+     return s.hashCode() * factor;
+   }
+   /**
+    * Optional
+    */
+   @Override
+   public void close() {}
+}
+

Example

+
1
+2
CREATE FUNCTION udf_test AS 'com.company.udf.UdfScalarFunction';
+INSERT INTO sink_stream select udf_test(attr) FROM source_stream;
+
+ +
+

UDTF

The UDTF must inherit the TableFunction function and implement the eval method. The open and close functions are optional. If the UDTF needs to return multiple columns, you only need to declare the returned value as Tuple or Row. If Row is used, you need to overload the getResultType method to declare the returned field type.

+
+

Example code

+
import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.TableFunction;
+import org.apache.flink.types.Row;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+public class UdfTableFunction extends TableFunction<Row> {
+  private Logger log = LoggerFactory.getLogger(TableFunction.class);
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  public void eval(String str, String split) {
+    for (String s : str.split(split)) {
+      Row row = new Row(2);
+      row.setField(0, s);
+      row.setField(1, s.length());
+      collect(row);
+    }
+  }
+  /**
+   * Declare the type returned by the function
+   * @return
+   */
+  @Override
+  public TypeInformation<Row> getResultType() {
+  return Types.ROW(Types.STRING, Types.INT);
+  }
+  /**
+    * Optional
+   */
+  @Override
+  public void close() {}
+ }
+

Example

+

The UDTF supports CROSS JOIN and LEFT JOIN. When the UDTF is used, the LATERAL and TABLE keywords must be included.

+
  • CROSS JOIN: does not output the data of a row in the left table if the UDTF does not output the result for the data of the row.
  • LEFT JOIN: outputs the data of a row in the left table even if the UDTF does not output the result for the data of the row, but pads null with UDTF-related fields.
+
1
+2
+3
+4
+5
+6
+7
CREATE FUNCTION udtf_test AS 'com.company.udf.TableFunction';
+// CROSS JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream, LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length);
+// LEFT JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream LEFT JOIN LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length) ON TRUE;
+
+ +
+

UDAF

The UDAF must inherit the AggregateFunction function. You need to create an accumulator for storing the computing result, for example, WeightedAvgAccum in the following example code.

+
+

Example code

+
public class WeightedAvgAccum {
+public long sum = 0;
+public int count = 0;
+}
+

+
import org.apache.flink.table.functions.AggregateFunction;
+import java.util.Iterator;
+/**
+* The first type variable is the type returned by the aggregation function, and the second type variable is of the Accumulator type.
+* Weighted Average user-defined aggregate function.
+*/
+public class UdfAggFunction extends AggregateFunction<Long, WeightedAvgAccum> {
+// Initialize the accumulator.
+  @Override
+  public WeightedAvgAccum createAccumulator() {
+    return new WeightedAvgAccum();
+  }
+// Return the intermediate computing value stored in the accumulator.
+  @Override
+  public Long getValue(WeightedAvgAccum acc) {
+    if (acc.count == 0) {
+       return null;
+    } else {
+      return acc.sum / acc.count;
+ }
+}
+// Update the intermediate computing value according to the input.
+public void accumulate(WeightedAvgAccum acc, long iValue) {
+acc.sum += iValue;
+acc.count += 1;
+}
+// Perform the retraction operation, which is opposite to the accumulate operation.
+public void retract(WeightedAvgAccum acc, long iValue) {
+acc.sum -= iValue;
+acc.count -= 1;
+}
+// Combine multiple accumulator values.
+public void merge(WeightedAvgAccum acc, Iterable<WeightedAvgAccum> it) {
+Iterator<WeightedAvgAccum> iter = it.iterator();
+while (iter.hasNext()) {
+WeightedAvgAccum a = iter.next();
+acc.count += a.count;
+acc.sum += a.sum;
+}
+}
+// Reset the intermediate computing value.
+public void resetAccumulator(WeightedAvgAccum acc) {
+acc.count = 0;
+acc.sum = 0L;
+}
+}
+

Example

+
1
+2
CREATE FUNCTION udaf_test AS 'com.company.udf.UdfAggFunction';
+INSERT INTO sink_stream SELECT udaf_test(attr2) FROM source_stream GROUP BY attr1;
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0331.html b/docs/dli/sqlreference/dli_08_0331.html new file mode 100644 index 00000000..75de0d7f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0331.html @@ -0,0 +1,35 @@ + + +

Built-In Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0332.html b/docs/dli/sqlreference/dli_08_0332.html new file mode 100644 index 00000000..4f855083 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0332.html @@ -0,0 +1,573 @@ + + +

Mathematical Operation Functions

+

Relational Operators

All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.

+

Relationship operators are binary operators. Two compared data types must be of the same type or they must support implicit conversion.

+

Table 1 lists all relational operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Relational Operators

Operator

+

Returned Data Type

+

Description

+

A = B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. This operator is used for value assignment.

+

A <> B

+

BOOLEAN

+

If A is not equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned. This operator follows the standard SQL syntax.

+

A < B

+

BOOLEAN

+

If A is less than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A <= B

+

BOOLEAN

+

If A is less than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A > B

+

BOOLEAN

+

If A is greater than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A >= B

+

BOOLEAN

+

If A is greater than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A IS NULL

+

BOOLEAN

+

If A is NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS NOT NULL

+

BOOLEAN

+

If A is not NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS DISTINCT FROM B

+

BOOLEAN

+

If A is not equal to B, TRUE is returned. NULL indicates A equals B.

+

A IS NOT DISTINCT FROM B

+

BOOLEAN

+

If A is equal to B, TRUE is returned. NULL indicates A equals B.

+

A BETWEEN [ASYMMETRIC | SYMMETRIC] B AND C

+

BOOLEAN

+

If A is greater than or equal to B but less than or equal to C, TRUE is returned.

+
  • ASYMMETRIC: indicates that B and C are location-related.

    For example, "A BETWEEN ASYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C".

    +
  • SYMMETRIC: indicates that B and C are not location-related.

    For example, "A BETWEEN SYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C) OR (A BETWEEN C AND B".

    +
+

A NOT BETWEEN B [ASYMMETRIC | SYMMETRIC]AND C

+

BOOLEAN

+

If A is less than B or greater than C, TRUE is returned.

+
  • ASYMMETRIC: indicates that B and C are location-related.

    For example, "A NOT BETWEEN ASYMMETRIC B AND C" is equivalent to "A NOT BETWEEN B AND C".

    +
  • SYMMETRIC: indicates that B and C are not location-related.

    For example, "A NOT BETWEEN SYMMETRIC B AND C" is equivalent to "(A NOT BETWEEN B AND C) OR (A NOT BETWEEN C AND B)".

    +
+

A LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A matches pattern B, TRUE is returned. The escape character C can be defined as required.

+

A NOT LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match pattern B, TRUE is returned. The escape character C can be defined as required.

+

A SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A matches regular expression B, TRUE is returned. The escape character C can be defined as required.

+

A NOT SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match regular expression B, TRUE is returned. The escape character C can be defined as required.

+

value IN (value [, value]* )

+

BOOLEAN

+

If the value is equal to any value in the list, TRUE is returned.

+

value NOT IN (value [, value]* )

+

BOOLEAN

+

If the value is not equal to any value in the list, TRUE is returned.

+

EXISTS (sub-query)

+

BOOLEAN

+

If sub-query returns at least one row, TRUE is returned.

+

value IN (sub-query)

+

BOOLEAN

+

If value is equal to a row returned by subquery, TRUE is returned.

+

value NOT IN (sub-query)

+

BOOLEAN

+

If value is not equal to a row returned by subquery, TRUE is returned.

+
+
+

Precautions

+
  • Values of the double, real, and float types may be different in precision. The equal sign (=) is not recommended for comparing two values of the double type. You are advised to obtain the absolute value by subtracting these two values of the double type and determine whether they are the same based on the absolute value. If the absolute value is small enough, the two values of the double data type are regarded equal. For example:
    abs(0.9999999999 - 1.0000000000) < 0.000000001 //The precision decimal places of 0.9999999999 and 1.0000000000 are 10, while the precision decimal place of 0.000000001 is 9. Therefore, 0.9999999999 can be regarded equal to 1.0000000000.
    +
+
  • Comparison between data of the numeric type and character strings is allowed. During comparison using relational operators, including >, <, ≤, and ≥, data of the string type is converted to numeric type by default. No characters other than numeric characters are allowed.
  • Character strings can be compared using relational operators.
+
+

Logical Operators

Common logical operators are AND, OR, and NOT. Their priority order is NOT > AND > OR.

+

Table 2 lists the calculation rules. A and B indicate logical expressions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Logical Operators

Operator

+

Result Type

+

Description

+

A OR B

+

BOOLEAN

+

If A or B is TRUE, TRUE is returned. Three-valued logic is supported.

+

A AND B

+

BOOLEAN

+

If both A and B are TRUE, TRUE is returned. Three-valued logic is supported.

+

NOT A

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, UNKNOWN is returned.

+

A IS FALSE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT FALSE

+

BOOLEAN

+

If A is not FALSE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS TRUE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT TRUE

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS UNKNOWN

+

BOOLEAN

+

If A is UNKNOWN, TRUE is returned.

+

A IS NOT UNKNOWN

+

BOOLEAN

+

If A is not UNKNOWN, TRUE is returned.

+
+
+

Precautions

+

Only data of the Boolean type can be used for calculation using logical operators. Implicit type conversion is not supported.

+
+

Arithmetic Operators

Arithmetic operators include binary operators and unary operators, for all of which, the returned results are of the numeric type. Table 3 lists arithmetic operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Arithmetic Operators

Operator

+

Result Type

+

Description

+

+ numeric

+

All numeric types

+

Returns numbers.

+

- numeric

+

All numeric types

+

Returns negative numbers.

+

A + B

+

All numeric types

+

A plus B. The result type is associated with the operation data type. For example, if floating-point number is added to an integer, the result will be a floating-point number.

+

A - B

+

All numeric types

+

A minus B. The result type is associated with the operation data type.

+

A * B

+

All numeric types

+

Multiply A and B. The result type is associated with the operation data type.

+

A / B

+

All numeric types

+

Divide A by B. The result is a double-precision number.

+

POWER(A, B)

+

All numeric types

+

Returns the value of A raised to the power B.

+

ABS(numeric)

+

All numeric types

+

Returns the absolute value of a specified value.

+

MOD(A, B)

+

All numeric types

+

Returns the remainder (modulus) of A divided by B. A negative value is returned only when A is a negative value.

+

SQRT(A)

+

All numeric types

+

Returns the square root of A.

+

LN(A)

+

All numeric types

+

Returns the nature logarithm of A (base e).

+

LOG10(A)

+

All numeric types

+

Returns the base 10 logarithms of A.

+

LOG2(A)

+

All numeric types

+

Returns the base 2 logarithm of A.

+

LOG(B)

+

LOG(A, B)

+

All numeric types

+

When called with one argument, returns the natural logarithm of B.

+

When called with two arguments, this function returns the logarithm of B to the base A.

+

B must be greater than 0 and A must be greater than 1.

+

EXP(A)

+

All numeric types

+

Return the value of e raised to the power of a.

+

CEIL(A)

+

CEILING(A)

+

All numeric types

+

Return the smallest integer that is greater than or equal to a. For example: ceil(21.2) = 22.

+

FLOOR(A)

+

All numeric types

+

Return the largest integer that is less than or equal to a. For example: floor(21.2) = 21.

+

SIN(A)

+

All numeric types

+

Returns the sine value of A.

+

COS(A)

+

All numeric types

+

Returns the cosine value of A.

+

TAN(A)

+

All numeric types

+

Returns the tangent value of A.

+

COT(A)

+

All numeric types

+

Returns the cotangent value of A.

+

ASIN(A)

+

All numeric types

+

Returns the arc sine value of A.

+

ACOS(A)

+

All numeric types

+

Returns the arc cosine value of A.

+

ATAN(A)

+

All numeric types

+

Returns the arc tangent value of A.

+

ATAN2(A, B)

+

All numeric types

+

Returns the arc tangent of a coordinate (A, B).

+

COSH(A)

+

All numeric types

+

Returns the hyperbolic cosine of A. Return value type is DOUBLE.

+

DEGREES(A)

+

All numeric types

+

Convert the value of a from radians to degrees.

+

RADIANS(A)

+

All numeric types

+

Convert the value of a from degrees to radians.

+

SIGN(A)

+

All numeric types

+

Returns the sign of A. 1 is returned if A is positive. –1 is returned if A is negative. Otherwise, 0 is returned.

+

ROUND(A, d)

+

All numeric types

+

Returns a number rounded to d decimal places for A. For example: round(21.263,2) = 21.26.

+

PI

+

All numeric types

+

Returns the value of pi.

+

E()

+

All numeric types

+

Returns the value of e.

+

RAND()

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, 1.0)

+

RAND(A)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, 1.0) with an initial seed A. Two RAND functions will return identical sequences of numbers if they have the same initial seed.

+

RAND_INTEGER(A)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, A)

+

RAND_INTEGER(A, B)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, B) with an initial seed A.

+

UUID()

+

All numeric types

+

Returns a UUID string.

+

BIN(A)

+

All numeric types

+

Returns a string representation of integer A in binary format. Returns NULL if A is NULL.

+

HEX(A)

+

HEX(B)

+

All numeric types

+

Returns a string representation of an integer A value or a string B in hex format. Returns NULL if the A or B is NULL.

+

TRUNCATE(A, d)

+

All numeric types

+

Returns a number of truncated to d decimal places. Returns NULL if A or d is NULL.

+

Example: truncate (42.345, 2) = 42.340

+

truncate(42.345) = 42.000

+

PI()

+

All numeric types

+

Returns the value of pi.

+
+
+

Precautions

+

Data of the string type is not allowed in arithmetic operations.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0333.html b/docs/dli/sqlreference/dli_08_0333.html new file mode 100644 index 00000000..5a660120 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0333.html @@ -0,0 +1,439 @@ + + +

String Functions

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 String functions

SQL Function

+

Return Type

+

Description

+

string1 || string2

+

STRING

+

Returns the concatenation of string1 and string2.

+

CHAR_LENGTH(string)

+

CHARACTER_LENGTH(string)

+

INT

+

Returns the number of characters in the string.

+

UPPER(string)

+

STRING

+

Returns the string in uppercase.

+

LOWER(string)

+

STRING

+

Returns the string in lowercase.

+

POSITION(string1 IN string2)

+

INT

+

Returns the position (start from 1) of the first occurrence of string1 in string2; returns 0 if string1 cannot be found in string2.

+

TRIM([ BOTH | LEADING | TRAILING ] string1 FROM string2)

+

STRING

+

Returns a string that removes leading and/or trailing characters string2 from string1.

+

LTRIM(string)

+

STRING

+

Returns a string that removes the left whitespaces from the specified string.

+

For example, LTRIM(' This is a test String.') returns "This is a test String.".

+

RTRIM(string)

+

STRING

+

Returns a string that removes the right whitespaces from the specified string.

+

For example, RTRIM('This is a test String. ') returns "This is a test String.".

+

REPEAT(string, integer)

+

STRING

+

Returns a string that repeats the base string integer times.

+

For example, REPEAT('This is a test String.', 2) returns "This is a test String.This is a test String.".

+

REGEXP_REPLACE(string1, string2, string3)

+

STRING

+

Returns a string from string1 with all the substrings that match a regular expression string2 consecutively being replaced with string3.

+

For example, REGEXP_REPLACE('foobar', 'oo|ar', '') returns "fb".

+

REGEXP_REPLACE('ab\ab', '\\', 'e') returns "abeab".

+

OVERLAY(string1 PLACING string2 FROM integer1 [ FOR integer2 ])

+

STRING

+

Returns a string that replaces integer2 characters of STRING1 with STRING2 from position integer1.

+

The default value of integer2 is the length of string2.

+

For example, OVERLAY('This is an old string' PLACING ' new' FROM 10 FOR 5) returns "This is a new string".

+

SUBSTRING(string FROM integer1 [ FOR integer2 ])

+

STRING

+

Returns a substring of the specified string starting from position integer1 with length integer2 (to the end by default). If integer2 is not configured, the substring from integer1 to the end is returned by default.

+

REPLACE(string1, string2, string3)

+

STRING

+

Returns a new string which replaces all the occurrences of string2 with string3 (non-overlapping) from string1.

+

For example, REPLACE('hello world', 'world', 'flink') returns "hello flink"; REPLACE('ababab', 'abab', 'z') returns "zab".

+

REPLACE('ab\\ab', '\\', 'e') returns "abeab".

+

REGEXP_EXTRACT(string1, string2[, integer])

+

STRING

+

Returns a string from string1 which extracted with a specified regular expression string2 and a regex match group index integer.

+

Returns NULL, if the parameter is NULL or the regular expression is invalid.

+

For example, REGEXP_EXTRACT('foothebar', 'foo(.*?)(bar)', 2)" returns "bar".

+

INITCAP(string)

+

STRING

+

Returns a new form of STRING with the first character of each word converted to uppercase and the rest characters to lowercase.

+

CONCAT(string1, string2,...)

+

STRING

+

Returns a string that concatenates string1, string2, ….

+

For example, CONCAT('AA', 'BB', 'CC') returns "AABBCC".

+

CONCAT_WS(string1, string2, string3,...)

+

STRING

+

Returns a string that concatenates string2, string3, … with a separator string1. The separator is added between the strings to be concatenated. Returns NULL if string1 is NULL. If other arguments are NULL, this function automatically skips NULL arguments.

+

For example, CONCAT_WS('~', 'AA', NULL, 'BB', '', 'CC') returns "AA~BB~~CC".

+

LPAD(string1, integer, string2)

+

STRING

+

Returns a new string from string1 left-padded with string2 to a length of integer characters.

+

If any argument is NULL, NULL is returned.

+

If integer is negative, NULL is returned.

+

If the length of string1 is shorter than integer, returns string1 shortened to integer characters.

+

For example, LPAD(Symbol,4,Symbol) returns "Symbol hi".

+

LPAD('hi',1,'??') returns "h".

+

RPAD(string1, integer, string2)

+

STRING

+

Returns a new string from string1 right-padded with string2 to a length of integer characters.

+

If any argument is NULL, NULL is returned.

+

If integer is negative, NULL is returned.

+

If the length of string1 is shorter than integer, returns string1 shortened to integer characters.

+

For example, RPAD('hi',4,'??') returns "hi??".

+

RPAD('hi',1,'??') returns "h".

+

FROM_BASE64(string)

+

STRING

+

Returns the base64-decoded result from string.

+

Returns NULL if string is NULL.

+

For example, FROM_BASE64('aGVsbG8gd29ybGQ=') returns "hello world".

+

TO_BASE64(string)

+

STRING

+

Returns the base64-encoded result from string; f string is NULL.

+

Returns NULL if string is NULL.

+

For example, TO_BASE64(hello world) returns "aGVsbG8gd29ybGQ=".

+

ASCII(string)

+

INT

+

Returns the numeric value of the first character of string.

+

Returns NULL if string is NULL.

+

For example, ascii('abc') returns 97.

+

ascii(CAST(NULL AS VARCHAR)) returns NULL.

+

CHR(integer)

+

STRING

+

Returns the ASCII character having the binary equivalent to integer.

+

If integer is larger than 255, we will get the modulus of integer divided by 255 first, and returns CHR of the modulus.

+

Returns NULL if integer is NULL.

+

chr(97) returns a.

+

chr(353) Return a.

+

DECODE(binary, string)

+

STRING

+

Decodes the first argument into a String using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').

+

If either argument is NULL, the result will also be NULL.

+

ENCODE(strinh1, string2)

+

STRING

+

Encodes the string1 into a BINARY using the provided string2 character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').

+

If either argument is NULL, the result will also be NULL.

+

INSTR(string1, string2)

+

INT

+

Returns the position of the first occurrence of string2 in string1.

+

Returns NULL if any argument is NULL.

+

LEFT(string, integer)

+

STRING

+

Returns the leftmost integer characters from the string.

+

Returns EMPTY String if integer is negative.

+

Returns NULL if any argument is NULL.

+

RIGHT(string, integer)

+

STRING

+

Returns the rightmost integer characters from the string.

+

Returns EMPTY String if integer is negative.

+

Returns NULL if any argument is NULL.

+

LOCATE(string1, string2[, integer])

+

INT

+

Returns the position of the first occurrence of string1 in string2 after position integer.

+

Returns 0 if not found.

+

The value of integer defaults to 0.

+

Returns NULL if any argument is NULL.

+

PARSE_URL(string1, string2[, string3])

+

STRING

+

Returns the specified part from the URL.

+

Valid values for string2 include 'HOST', 'PATH', 'QUERY', 'REF', 'PROTOCOL', 'AUTHORITY', 'FILE', and 'USERINFO'.

+

Returns NULL if any argument is NULL.

+

If string2 is QUERY, the key in QUERY can be specified as string3.

+

Example:

+

The parse_url('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1', 'HOST') returns 'facebook.com'.

+

parse_url('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1', 'QUERY', 'k1') returns 'v1'.

+

REGEXP(string1, string2)

+

BOOLEAN

+

Returns TRUE if any (possibly empty) substring of string1 matches the regular expression string2, otherwise FALSE. If the information is found, TRUE is returned. string1 indicates the specified string, and string2 indicates the regular expression.

+

Returns NULL if any argument is NULL.

+

REVERSE(string)

+

STRING

+

Returns the reversed string.

+

Returns NULL if string is NULL.

+

SPLIT_INDEX(string1, string2, integer1)

+

STRING

+

Splits string1 by the delimiter string2, returns the integer1-th (zero-based) string of the split strings.

+

Returns NULL if integer is negative.

+

Returns NULL if any argument is NULL.

+

STR_TO_MAP(string1[, string2, string3]])

+

MAP

+

Returns a map after splitting the string1 into key/value pairs using delimiters.

+

The default value of string2 is ','.

+

The default value of string3 is '='.

+

SUBSTR(string[, integer1[, integer2]])

+

STRING

+

Returns a substring of string starting from position integer1 with length integer2.

+

If integer2 is not specified, the string is truncated to the end.

+

JSON_VAL(STRING json_string, STRING json_path)

+

STRING

+

Returns the value of the specified json_path from the json_string. For details about how to use the functions, see JSON_VAL Function.

+
NOTE:

The following rules are listed in descending order of priority.

+
  1. The two arguments json_string and json_path cannot be NULL.
  2. The value of json_string must be a valid JSON string. Otherwise, the function returns NULL.
  3. If json_string is an empty string, the function returns an empty string.
  4. If json_path is an empty string or the path does not exist, the function returns NULL.
+
+
+
+

JSON_VAL Function

  • Syntax
+
STRING JSON_VAL(STRING json_string, STRING json_path)
+ +
+ + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

json_string

+

STRING

+

JSON object to be parsed

+

json_path

+

STRING

+

Path expression for parsing the JSON string For the supported expressions, see Table 3.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 Expressions supported

Expression

+

Description

+

$

+

Root node in the path

+

[]

+

Access array elements

+

*

+

Array wildcard

+

.

+

Access child elements

+
+
+
  • Example
    1. Test input data.
      Test the data source kafka. The message content is as follows:
      "{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}"
      +"{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}]"
      +
      +
    2. Use JSON_VAL in SQL statements.
      create table kafkaSource(
      +  message STRING
      +)
      +with (
      +  'connector.type' = 'kafka',
      +  'connector.version' = '0.11',
      +  'connector.topic' = 'topic-swq',
      +  'connector.properties.bootstrap.servers' = 'xxx.xxx.xxx.xxx:9092,yyy.yyy.yyy:9092,zzz.zzz.zzz.zzz:9092',
      +  'connector.startup-mode' = 'earliest-offset',
      +  'format.field-delimiter' = '|',
      +  'format.type' = 'csv'
      +);
      +
      +create table kafkaSink(
      +  message1 STRING,
      +  message2 STRING,
      +  message3 STRING,
      +  message4 STRING,
      +  message5 STRING,
      +  message6 STRING
      +)
      +with (
      +  'connector.type' = 'kafka',
      +  'connector.version' = '0.11',
      +  'connector.topic' = 'topic-swq-out',
      +  'connector.properties.bootstrap.servers' = 'xxx.xxx.xxx.xxx:9092,yyy.yyy.yyy:9092,zzz.zzz.zzz.zzz:9092',
      +  'format.type' = 'json'
      +);
      +
      +INSERT INTO kafkaSink
      +SELECT 
      +JSON_VAL(message,""),
      +JSON_VAL(message,"$.name"),
      +JSON_VAL(message,"$.grade.science"),
      +JSON_VAL(message,"$.grade.science[*]"),
      +JSON_VAL(message,"$.grade.science[1]"),
      +JSON_VAL(message,"$.grade.dddd")
      +FROM kafkaSource;
      +
    3. View output.
      {"message1":null,"message2":"swq","message3":"[80,85]","message4":"[80,85]","message5":"85","message6":null}
      +{"message1":null,"message2":null,"message3":null,"message4":null,"message5":null,"message6":null}
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0334.html b/docs/dli/sqlreference/dli_08_0334.html new file mode 100644 index 00000000..f4f0f130 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0334.html @@ -0,0 +1,1704 @@ + + +

Temporal Functions

+

Table 1 lists the temporal functions supported by Flink OpenSource SQL.

+

Function Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Temporal functions

Function

+

Return Type

+

Description

+

DATE string

+

DATE

+

Parse the date string (yyyy-MM-dd) to a SQL date.

+

TIME string

+

TIME

+

Parse the time string (HH:mm:ss[.fff]) to a SQL time.

+

TIMESTAMP string

+

TIMESTAMP

+

Convert the time string into a timestamp. The time string format is yyyy-MM-dd HH:mm:ss[.fff].

+

INTERVAL string range

+

INTERVAL

+

Parse an interval string in the following two forms:

+
  • yyyy-MM for SQL intervals of months. An interval range might be YEAR or YEAR TO MONTH.
  • dd hh:mm:ss.fff for SQL intervals of milliseconds. An interval range might be DAY, MINUTE, DAY TO HOUR, or DAY TO SECOND.
+

Example:

+

INTERVAL '10 00:00:00.004' DAY TO second indicates that the interval is 10 days and 4 milliseconds.

+

INTERVAL '10' DAY: indicates that the interval is 10 days.

+

INTERVAL '2-10' YEAR TO MONTH indicates that the interval is two years and ten months.

+

CURRENT_DATE

+

DATE

+

Return the SQL date of UTC time zone.

+

CURRENT_TIME

+

TIME

+

Return the SQL time of UTC time zone.

+

CURRENT_TIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of UTC time zone.

+

LOCALTIME

+

TIME

+

Return the SQL time of the local time zone.

+

LOCALTIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of the local time zone.

+

EXTRACT(timeintervalunit FROM temporal)

+

BIGINT

+

Extract part of the time point or interval. Return the part in the int type.

+

For example, extract the date 2006-06-05 and return 5.

+

EXTRACT(DAY FROM DATE '2006-06-05') returns 5.

+

YEAR(date)

+

BIGINT

+

Return the year from a SQL date.

+

For example, YEAR(DATE'1994-09-27') returns 1994.

+

QUARTER(date)

+

BIGINT

+

Return the quarter of a year from a SQL date.

+

MONTH(date)

+

BIGINT

+

+

Return the month of a year from a SQL date.

+

For example, MONTH(DATE '1994-09-27') returns 9.

+

WEEK(date)

+

BIGINT

+

Return the week of a year from a SQL date.

+

For example, WEEK(DATE'1994-09-27') returns 39.

+

DAYOFYEAR(date)

+

BIGINT

+

Return the day of a year from a SQL date.

+

For example, DAYOFYEAR(DATE '1994-09-27') is 270.

+

DAYOFMONTH(date)

+

BIGINT

+

Return the day of a month from a SQL date.

+

For example, DAYOFMONTH(DATE'1994-09-27') returns 27.

+

DAYOFWEEK(date)

+

BIGINT

+

Return the day of a week from a SQL date.

+

Sunday is set to 1.

+

For example, DAYOFWEEK(DATE'1994-09-27') returns 3.

+

HOUR(timestamp)

+

BIGINT

+

Return the hour of a day (an integer between 0 and 23) from a SQL timestamp.

+

For example, HOUR(TIMESTAMP '1994-09-27 13:14:15') returns 13.

+

MINUTE(timestamp)

+

BIGINT

+

Return the minute of an hour (an integer between 0 and 59) from a SQL timestamp.

+

For example, MINUTE(TIMESTAMP '1994-09-27 13:14:15') returns 14.

+

SECOND(timestamp)

+

BIGINT

+

Returns the second of a minute (an integer between 0 and 59) from a SQL timestamp.

+

For example, SECOND(TIMESTAMP '1994-09-27 13:14:15') returns 15.

+

FLOOR(timepoint TO timeintervalunit)

+

TIME

+

Round a time point down to the given unit.

+

For example, 12:44:00 is returned from FLOOR(TIME '12:44:31' TO MINUTE).

+

CEIL(timepoint TO timeintervalunit)

+

TIME

+

Round a time point up to the given unit.

+

For example, CEIL(TIME '12:44:31' TO MINUTE) returns 12:45:00.

+

(timepoint1, temporal1) OVERLAPS (timepoint2, temporal2)

+

BOOLEAN

+

Return TRUE if two time intervals overlap.

+

Example:

+

(TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) returns TRUE.

+

(TIME '9:00:00', TIME '10:00:00') OVERLAPS (TIME '10:15:00', INTERVAL '3' HOUR) returns FALSE.

+

DATE_FORMAT(timestamp, string)

+

STRING

+

Convert a timestamp to a value of string in the format specified by the date format string.

+

TIMESTAMPADD(timeintervalunit, interval, timepoint)

+

TIMESTAMP/DATE/TIME

+

Return the date and time added to timepoint based on the result of interval and timeintervalunit.

+

For example, TIMESTAMPADD(WEEK, 1, DATE '2003-01-02') returns 2003-01-09.

+

TIMESTAMPDIFF(timepointunit, timepoint1, timepoint2)

+

INT

+

Return the (signed) number of timepointunit between timepoint1 and timepoint2.

+

The unit for the interval is given by the first argument, which should be one of the following values: SECOND, MINUTE, HOUR, DAY, MONTH, and YEAR.

+

For example, TIMESTAMPDIFF(DAY, TIMESTAMP '2003-01-02 10:00:00', TIMESTAMP '2003-01-03 10:00:00') returns 1.

+

CONVERT_TZ(string1, string2, string3)

+

TIMESTAMP

+

Convert a datetime string1 from time zone string2 to time zone string3.

+

For example, CONVERT_TZ('1970-01-01 00:00:00', 'UTC', 'America/Los_Angeles') returns '1969-12-31 16:00:00'.

+

FROM_UNIXTIME(numeric[, string])

+

STRING

+

Return a representation of the numeric argument as a value in string format.

+

The default string format is YYYY-MM-DD hh:mm:ss.

+

For example, FROM_UNIXTIME(44) returns 1970-01-01 09:00:44.

+

UNIX_TIMESTAMP()

+

BIGINT

+

Get current Unix timestamp in seconds.

+

+

UNIX_TIMESTAMP(string1[, string2])

+

BIGINT

+

Convert date time string string1 in format string2 to Unix timestamp (in seconds), using the specified timezone in table config.

+

The default format of string2 is yyyy-MM-dd HH:mm:ss.

+

TO_DATE(string1[, string2])

+

DATE

+

Convert a date string string1 with format string2 to a date.

+

The default format of string2 is yyyy-MM-dd.

+

TO_TIMESTAMP(string1[, string2])

+

TIMESTAMP

+

Convert date time string string1 with format string2 to a timestamp.

+

The default format of string2 is yyyy-MM-dd HH:mm:ss.

+
+
+
+

DATE

  • Function

    Returns a date parsed from string in form of yyyy-MM-dd.

    +
  • Description
    DATE DATE string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string

    +

    STRING

    +

    String in the SQL date format.

    +

    Note that the string must be in the yyyy-MM-dd format. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DATE "2021-08-19" AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2021-08-19

      +
      +
      +
    +
+
+

TIME

  • Function

    Returns a SQL time parsed from string in form of HH:mm:ss[.fff].

    +
  • Description
    TIME TIME string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string

    +

    STRING

    +

    Time

    +

    Note that the string must be in the format of HH:mm:ss[.fff]. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIME "10:11:12" AS `result`,
      +        TIME "10:11:12.032" AS `result2`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      10:11:12

      +

      10:11:12.032

      +
      +
      +
    +
+
+

TIMESTAMP

  • Function

    Converts the time string into timestamp. The time string format is yyyy-MM-dd HH:mm:ss[.fff]. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) TIMESTAMP string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string

    +

    STRING

    +

    Time

    +

    Note that the string must be in the format of yyyy-MM-dd HH:mm:ss[.fff]. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMP "1997-04-25 13:14:15" AS `result`,
      +        TIMESTAMP "1997-04-25 13:14:15.032" AS `result2`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      1997-04-25 13:14:15

      +

      1997-04-25 13:14:15.032

      +
      +
      +
    +
+
+

INTERVAL

  • Function

    Parses an interval string.

    +
  • Description
    INTERVAL INTERVAL string range
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string

    +

    STRING

    +

    Timestamp string used together with the range parameter. The string is in either of the following two formats:

    +
    • yyyy-MM for SQL intervals of months. An interval range might be YEAR or YEAR TO MONTH for intervals of months.
    • dd hh:mm:ss.fff for SQL intervals of milliseconds. An interval range might be DAY, MINUTE, DAY TO HOUR, or DAY TO SECOND.
    +

    range

    +

    INTERVAL

    +

    Interval range. This parameter is used together with the string parameter.

    +

    Available values are as follows: YEAR, YEAR To Month, DAY, MINUTE, DAY TO HOUR and DAY TO SECOND.

    +
    +
    +
  • Example
    Test statement
    -- The interval is 10 days and 4 milliseconds.
    +INTERVAL '10 00:00:00.004' DAY TO second
    +-- The interval is 10 days.
    +INTERVAL '10' 
    +-- The interval is 2 years and 10 months.
    +INTERVAL '2-10' YEAR TO MONTH
    +
    +
+
+

CURRENT_DATE

  • Function

    Returns the current SQL time (yyyy-MM-dd) in the local time zone. The return value is of the DATE type.

    +
  • Description
    DATE CURRENT_DATE
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_DATE AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2021-10-28

      +
      +
      +
    +
+
+

CURRENT_TIME

  • Function

    Returns the current SQL time (HH:mm:sss.fff) in the local time zone. The return value is of the TIME type.

    +
  • Description
    TIME CURRENT_TIME
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_TIME AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      08:29:19.289

      +
      +
      +
    +
+
+

CURRENT_TIMESTAMP

  • Function

    Returns the current SQL timestamp in the local time zone. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) CURRENT_TIMESTAMP
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_TIMESTAMP AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2021-10-28 08:33:51.606

      +
      +
      +
    +
+
+

LOCALTIME

  • Function

    Returns the current SQL time in the local time zone. The return value is of the TIME type.

    +
  • Description
    TIME LOCALTIME
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	LOCALTIME AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      16:39:37.706

      +
      +
      +
    +
+
+

LOCALTIMESTAMP

  • Function

    Returns the current SQL timestamp in the local time zone. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) LOCALTIMESTAMP
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	LOCALTIMESTAMP AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2021-10-28 16:43:17.625

      +
      +
      +
    +
+
+

EXTRACT

  • Function

    Returns a value extracted from the timeintervalunit part of temporal. The return value is of the BIGINT type.

    +
  • Description
    BIGINT EXTRACT(timeinteravlunit FROM temporal)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timeinteravlunit

    +

    TIMEUNIT

    +

    Time unit to be extracted from a time point or interval. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, SECOND.

    +

    temporal

    +

    DATE/TIME/TIMESTAMP/INTERVAL

    +

    Time point or interval.

    +
    +
    +

    Do not specify a time unit that is not of any time points or intervals. Otherwise, the job fails to be submitted.

    +

    For example, an error message is displayed when the following statement is executed because YEAR cannot be extracted from TIME.

    +
    SELECT 
    +	EXTRACT(YEAR FROM TIME '12:44:31' ) AS `result`
    +FROM
    +	testtable;
    +
    +
  • Example
    • Test statement
      SELECT 
      +	EXTRACT(YEAR FROM DATE '1997-04-25' ) AS `result`,
      +        EXTRACT(MINUTE FROM TIME '12:44:31') AS `result2`,
      +        EXTRACT(SECOND FROM TIMESTAMP '1997-04-25 13:14:15') AS `result3`,
      +        EXTRACT(YEAR FROM INTERVAL '2-10' YEAR TO MONTH) AS `result4`,
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      1997

      +

      44

      +

      15

      +

      2

      +
      +
      +
    +
+
+

YEAR

  • Function

    Returns the year from a SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT YEAR(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	YEAR(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      1997

      +
      +
      +
    +
+
+

QUARTER

  • Function

    Returns the quarter of a year (an integer between 1 and 4) from a SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT QUARTER(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	QUARTER(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      Result

      +

      2

      +
      +
      +
    +
+
+

MONTH

  • Function

    Returns the month of a year (an integer between 1 and 12) from a SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT MONTH(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	MONTH(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      4

      +
      +
      +
    +
+
+

WEEK

  • Function

    Returns the week of a year from a SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT WEEK(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	WEEK(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      17

      +
      +
      +
    +
+
+

DAYOFYEAR

  • Function

    Returns the day of a year (an integer between 1 and 366) from SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT DAYOFYEAR(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFYEAR(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      115

      +
      +
      +
    +
+
+

DAYOFMONTH

  • Function

    Returns the day of a month (an integer between 1 and 31) from a SQL date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT DAYOFMONTH(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFMONTH(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      Result

      +

      25

      +
      +
      +
    +
+
+

DAYOFWEEK

  • Function

    Returns the day of a week (an integer between 1 and 7) from a SQL date. The return value is of the BIGINT type.

    +

    Note that the start day of a week is Sunday.

    +
    +
  • Description
    BIGINT DAYOFWEEK(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFWEEK(DATE '1997-04-25') AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      6

      +
      +
      +
    +
+
+

HOUR

  • Function

    Returns the hour of a day (an integer between 0 and 23) from SQL timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT HOUR(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	HOUR(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      10

      +
      +
      +
    +
+
+

MINUTE

  • Function

    Returns the minute of an hour (an integer between 0 and 59) from a SQL timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT MINUTE(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	MINUTE(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      11

      +
      +
      +
    +
+
+

SECOND

  • Function

    Returns the second of an hour (an integer between 0 and 59) from a SQL timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT SECOND(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	SECOND(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      12

      +
      +
      +
    +
+
+

FLOOR

  • Function

    Returns a value that rounds timepoint down to the time unit timeintervalunit.

    +
  • Description
    TIME/TIMESTAMP(3) FLOOR(timepoint TO timeintervalunit)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timepoint

    +

    TIMESTAMP/TIME

    +

    SQL time or SQL timestamp

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, or SECOND.

    +
    +
    +
  • Example
    • Test statement For details about the syntax of the userDefined result table, see User-defined Result Table.
      create table PrintSink (
      +  message TIME,
      +  message2 TIME,
      +  message3 TIMESTAMP(3)
      +)
      +with (
      +  'connector.type' = 'user-defined',
      +  'connector.class-name' = 'com.swqtest.flink.sink.PrintSink'--Replace the class with a user-defined class. For details, see the syntax description in the userDefined result table.
      +);
      +
      +INSERT INTO 
      +	PrintSink
      +SELECT 
      +	FLOOR(TIME '13:14:15' TO MINUTE) AS `result`
      +        FLOOR(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result2`,
      +        FLOOR(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result3`;
      +
      +
    • Test result
      The values of the fields in the PrintSink table are as follows: +
      + + + + + + + + + +

      Message

      +

      Message 2

      +

      Message 3

      +

      13:14

      +

      13:14

      +

      1997-04-25T13:14

      +
      +
      +
      +
    +
+
+

CEIL

  • Function

    Returns a value that rounds timepoint up to the time unit timeintervalunit.

    +
  • Description
    TIME/TIMESTAMP(3) CEIL(timepoint TO timeintervalunit)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timepoint

    +

    TIMESTAMP/TIME

    +

    SQL time or SQL timestamp

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, or SECOND.

    +
    +
    +
  • Example
    • Test statement For details about the syntax of the userDefined result table, see User-defined Result Table.
      create table PrintSink (
      +  message TIME,
      +  message2 TIME,
      +  message3 TIMESTAMP(3)
      +)
      +with (
      +  'connector.type' = 'user-defined',
      +  'connector.class-name' = 'com.swqtest.flink.sink.PrintSink'--Replace the class with a user-defined class. For details, see the syntax description in the userDefined result table.
      +);
      +
      +INSERT INTO 
      +	PrintSink
      +SELECT 
      +	CEIL(TIME '13:14:15' TO MINUTE) AS `result`
      +        CEIL(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result2`,
      +        CEIL(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result3`;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      13:15

      +

      13:15

      +

      1997-04-25T13:15

      +
      +
      +
    +
+
+

OVERLAPS

  • Function

    Returns TRUE if two time intervals overlap; returns FALSE otherwise.

    +
  • Description
    BOOLEAN (timepoint1, temporal1) OVERLAPS (timepoint2, temporal2)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timepoint1/timepoint2

    +

    DATE/TIME/TIMESTAMP

    +

    Time point

    +

    temporal1/temporal2

    +

    DATE/TIME/TIMESTAMP/INTERVAL

    +

    Time point or interval

    +
    +
    +
    • (timepoint, temporal) is a closed interval.
    • The temporal can be of the DATE, TIME, TIMESTAMP, or INTERVAL type.
      • When th temporal is DATE, TIME, or TIMESTAMP, (timepoint, temporal) indicates an interval between timepoint and temporal. The temporal can be earlier than the value of timepoint, for example, (DATE '1997-04-25', DATE '1997-04-23').
      • When the temporal is INTERVAL, (timepoint, temporal) indicates an interval between timepoint and timepoint + temporal.
      +
    • Ensure that (timepoint1, temporal1) and (timepoint2, temporal2) are intervals of the same data type.
    +
    +
  • Example
    • Test statement
      SELECT 
      +	(TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) AS `result`,
      +        (TIME '2:30:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) AS `result2`,
      +	(TIME '2:30:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:31:00', INTERVAL '2' HOUR) AS `result3`,
      +	(TIME '9:00:00', TIME '10:00:00') OVERLAPS (TIME '10:00:00', INTERVAL '3' HOUR) AS `result4`,
      +	(TIMESTAMP '1997-04-25 12:00:00', TIMESTAMP '1997-04-25 12:20:00') OVERLAPS (TIMESTAMP '1997-04-25 13:00:00', INTERVAL '2' HOUR) AS `result5`,
      +	(DATE '1997-04-23', INTERVAL '2' DAY) OVERLAPS (DATE '1997-04-25', INTERVAL '2' DAY) AS `result6`,
      +	(DATE '1997-04-25', DATE '1997-04-23') OVERLAPS (DATE '1997-04-25', INTERVAL '2' DAY) AS `result7`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      result5

      +

      result6

      +

      result7

      +

      true

      +

      true

      +

      false

      +

      true

      +

      false

      +

      true

      +

      true

      +
      +
      +
    +
+
+

DATE_FORMAT

  • Function

    Converts a timestamp to a value of string in the format specified by the date format string.

    +
  • Description
    STRING DATE_FORMAT(timestamp, dateformat)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timestamp

    +

    TIMESTAMP/STRING

    +

    Time point

    +

    dateformat

    +

    STRING

    +

    String in the date format

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yyyy-MM-dd HH:mm:ss') AS `result`,
      +        DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yyyy-MM-dd') AS `result2`,
      +	DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yy/MM/dd HH:mm') AS `result3`,
      +        DATE_FORMAT('1997-04-25 10:11:12', 'yyyy-MM-dd') AS `result4`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      1997-04-25 10:11:12

      +

      1997-04-25

      +

      97/04/25 10:11

      +

      1997-04-25

      +
      +
      +
    +
+
+

TIMESTAMPADD

  • Function

    Returns the date and time by combining interval and timeintervalunit and adding the combination to timepoint.

    +

    The return value of TIMESTAMPADD is the value of timepoint. An exception is that if the input timepoint is of the TIMESTAMP type, the return value can be inserted into a table field of the DATE type.

    +
    +
  • Description
    TIMESTAMP(3)/DATE/TIME TIMESTAMPADD(timeintervalunit, interval, timepoint)
    +
  • Input parameters +
    + + + + + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit

    +

    interval

    +

    INT

    +

    Interval

    +

    timepoint

    +

    TIMESTAMP/DATE/TIME

    +

    Time point

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMPADD(WEEK, 1, DATE '1997-04-25') AS `result`,
      +        TIMESTAMPADD(QUARTER, 1, TIMESTAMP '1997-04-25 10:11:12') AS `result2`,
      +	TIMESTAMPADD(SECOND, 2, TIME '10:11:12') AS `result3`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-05-02

      +
      • If this field is inserted into a table field of the TIMESTAMP type, 1997-07-25T10:11:12 is returned.
      +
      • If this field is inserted into a table field of the DATE type, 1997-07-25 is returned.
      +

      10:11:14

      +
      +
      +
    +
+
+

TIMESTAMPDIFF

  • Function

    Returns the (signed) number of timepointunit between timepoint1 and timepoint2. The unit for the interval is given by the first argument.

    +
  • Description
    INT TIMESTAMPDIFF(timepointunit, timepoint1, timepoint2)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    timepointunit

    +

    TIMEUNIT

    +

    Time unit. The value can be SECOND, MINUTE, HOUR, DAY, MONTH or YEAR.

    +

    timepoint1/timepoint2

    +

    TIMESTAMP/DATE

    +

    Time point

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMPDIFF(DAY, TIMESTAMP '1997-04-25 10:00:00', TIMESTAMP '1997-04-28 10:00:00') AS `result`,
      +        TIMESTAMPDIFF(DAY, DATE '1997-04-25', DATE '1997-04-28') AS `result2`,
      +	TIMESTAMPDIFF(DAY, TIMESTAMP '1997-04-27 10:00:20', TIMESTAMP '1997-04-25 10:00:00') AS `result3`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      3

      +

      3

      +

      -2

      +
      +
      +
    +
+
+

CONVERT_TZ

  • Function

    Converts a datetime string1 (with default ISO timestamp format 'yyyy-MM-dd HH:mm:ss') from time zone string2 to time zone string3.

    +
  • Description
    STRING CONVERT_TZ(string1, string2, string3)
    +
  • Input parameters +
    + + + + + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string1

    +

    STRING

    +

    SQL timestamp. If the value does not meet the format requirements, NULL is returned.

    +

    string2

    +

    STRING

    +

    Time zone before conversion. The format of time zone should be either an abbreviation such as PST, a full name such as America/Los_Angeles, or a custom ID such as GMT-08:00.

    +

    string3

    +

    STRING

    +

    Time zone after conversion. The format of time zone should be either an abbreviation such as PST, a full name such as America/Los_Angeles, or a custom ID such as GMT-08:00.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	CONVERT_TZ(1970-01-01 00:00:00, UTC, America/Los_Angeles) AS `result`,
      +        CONVERT_TZ(1997-04-25 10:00:00, UTC, GMT-08:00) AS `result2`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      1969-12-31 16:00:00

      +

      1997-04-25 02:00:00

      +
      +
      +
    +
+
+

FROM_UNIXTIME

  • Function

    Returns a representation of the numeric argument as a value in string format.

    +
  • Description
    STRING FROM_UNIXTIME(numeric[, string])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    numeric

    +

    BIGINT

    +

    An internal timestamp representing the number of seconds since 1970-01-01 00:00:00 UTC. The value can be generated by the UNIX_TIMESTAMP() function.

    +

    string

    +

    STRING

    +

    Time. If this parameter is not specified, the default time format is yyyy-MM-dd HH:mm:ss format.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	FROM_UNIXTIME(44) AS `result`,
      +        FROM_UNIXTIME(44, 'yyyy:MM:dd') AS `result2`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      1970-01-01 08:00:44

      +

      1970:01:01

      +
      +
      +
    +
+
+

UNIX_TIMESTAMP

  • Function

    Gets current Unix timestamp in seconds. The return value is of the BIGINT type.

    +
  • Description
    BIGINT UNIX_TIMESTAMP()
    +
  • Input parameters

    N/A

    +
  • Example
    • Test statement
      SELECT 
      +	UNIX_TIMESTAMP() AS `result`
      +FROM
      +	table;
      +
    • Test result +
      + + + + + +

      result

      +

      1635401982

      +
      +
      +
    +
+
+

UNIX_TIMESTAMP(string1[, string2])

  • Function

    Converts date time string1 in format string2 to Unix timestamp (in seconds). The return value is of the BIGINT type.

    +
  • Description
    BIGINT UNIX_TIMESTAMP(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string1

    +

    STRING

    +

    SQL timestamp string. An error is reported if the value does not comply with the string2 format.

    +

    string2

    +

    STRING

    +

    Time. If this parameter is not specified, the default time format is yyyy-MM-dd HH:mm:ss.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	UNIX_TIMESTAMP('1997-04-25', 'yyyy-MM-dd') AS `result`,
      +        UNIX_TIMESTAMP('1997-04-25 00:00:10', 'yyyy-MM-dd HH:mm:ss') AS `result2`,
      +        UNIX_TIMESTAMP('1997-04-25 00:00:00') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      861897600

      +

      861897610

      +

      861897600

      +
      +
      +
    +
+
+

TO_DATE

  • Function

    Converts a date string1 with format string2 to a date.

    +
  • Description
    DATE TO_DATE(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string1

    +

    STRING

    +

    SQL timestamp string. If the value is not in the required format, an error is reported.

    +

    string2

    +

    STRING

    +

    Format. If this parameter is not specified, the default time format is yyyy-MM-dd.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TO_DATE('1997-04-25') AS `result`,
      +        TO_DATE('1997:04:25', 'yyyy-MM-dd') AS `result2`,
      +        TO_DATE('1997-04-25 00:00:00', 'yyyy-MM-dd HH:mm:ss') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-04-25

      +

      1997-04-25

      +

      1997-04-25

      +
      +
      +
    +
+
+

TO_TIMESTAMP

  • Function

    Converts date time string1 with format string2 to a timestamp.

    +
  • Description
    TIMESTAMP TO_TIMESTAMP(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Type

    +

    Description

    +

    string1

    +

    STRING

    +

    SQL timestamp string. If the value is not in the required format, NULL is returned.

    +

    string2

    +

    STRING

    +

    Date format. If this parameter is not specified, the default format is yyyy-MM-dd HH:mm:ss.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TO_TIMESTAMP('1997-04-25', 'yyyy-MM-dd') AS `result`,
      +        TO_TIMESTAMP('1997-04-25 00:00:00') AS `result2`,
      +        TO_TIMESTAMP('1997-04-25 00:00:00', 'yyyy-MM-dd HH:mm:ss') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-04-25 00:00

      +

      1997-04-25 00:00

      +

      1997-04-25 00:00

      +
      +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0335.html b/docs/dli/sqlreference/dli_08_0335.html new file mode 100644 index 00000000..460824ca --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0335.html @@ -0,0 +1,77 @@ + + +

Conditional Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Conditional functions

Function

+

Description

+

CASE value

+

WHEN value1_1 [, value1_2 ]* THEN result1

+

[ WHEN value2_1 [, value2_2 ]* THEN result2 ]*

+

[ ELSE resultZ ]

+

END

+

Returns resultX when the value is contained in (valueX_1, valueX_2, …).

+

Only the first matched value is returned.

+

When no value matches, returns resultZ if it is provided and returns NULL otherwise.

+

CASE

+

WHEN condition1 THEN result1

+

[ WHEN condition2 THEN result2 ]*

+

[ ELSE resultZ ]

+

END

+

Returns resultX when the first conditionX is met.

+

Only the first matched value is returned.

+

When no condition is met, returns resultZ if it is provided and returns NULL otherwise.

+

NULLIF(value1, value2)

+

Returns NULL if value1 is equal to value2; returns value1 otherwise.

+

For example, NullIF (5, 5) returns NULL.

+

NULLIF(5, 0) returns 5.

+

COALESCE(value1, value2 [, value3 ]* )

+

Returns the first value (from left to right) that is not NULL from value1, value2, ….

+

For example, COALESCE(NULL, 5) returns 5.

+

IF(condition, true_value, false_value)

+

Returns the true_value if condition is met, otherwise false_value.

+

For example, IF(5 > 3, 5, 3) returns 5.

+

IS_ALPHA(string)

+

Returns TRUE if all characters in the string are letters, otherwise FALSE.

+

IS_DECIMAL(string)

+

Returns TRUE if string can be parsed to a valid numeric, otherwise FALSE.

+

IS_DIGIT(string)

+

Returns TRUE if all characters in the string are digits, otherwise FALSE.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0336.html b/docs/dli/sqlreference/dli_08_0336.html new file mode 100644 index 00000000..be6edca2 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0336.html @@ -0,0 +1,131 @@ + + +

Type Conversion Function

+

Syntax

CAST(value AS type)
+
+

Syntax Description

This function is used to forcibly convert types.

+
+

Precautions

If the input is NULL, NULL is returned.

+
+

Example

The following example converts the amount value to an integer.

+
insert into temp select cast(amount as INT) from source_stream;
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Examples of type conversion functions

Example

+

Description

+

Example

+

cast(v1 as string)

+

Converts v1 to a string. The value of v1 can be of the numeric type or of the timestamp, date, or time type.

+

Table T1:

+
| content (INT)           |
+| -------------           |
+| 5                       |
+

Statement:

+
SELECT
+  cast(content as varchar)
+FROM
+  T1;
+

Result:

+
"5"
+

cast (v1 as int)

+

Converts v1 to the int type. The value of v1 can be a number or a character.

+

Table T1:

+
| content  (STRING)           |
+| -------------               |
+| "5"                         |
+

Statement:

+
SELECT
+  cast(content as int)
+FROM
+  T1;
+

Result:

+
5
+

cast(v1 as timestamp)

+

Converts v1 to the timestamp type. The value of v1 can be of the string, date, or time type.

+

Table T1:

+
| content  (STRING)          |
+| -------------              |
+| "2018-01-01 00:00:01"     |
+

Statement:

+
SELECT
+  cast(content as timestamp)
+FROM
+  T1;
+

Result:

+
1514736001000
+

cast(v1 as date)

+

Converts v1 to the date type. The value of v1 can be of the string or timestamp type.

+

Table T1:

+
| content  (TIMESTAMP)     |
+| -------------            |
+| 1514736001000            |
+

Statement:

+
SELECT
+  cast(content as date)
+FROM
+  T1;
+

Result:

+
"2018-01-01"
+
+
+

Flink jobs do not support the conversion of bigint to timestamp using CAST. You can convert it using to_timestamp.

+
+
+

Detailed Sample Code

/** source **/
+CREATE
+TABLE car_infos (cast_int_to_string int, cast_String_to_int string,
+case_string_to_timestamp string, case_timestamp_to_date timestamp(3)) WITH (
+  'connector.type' = 'dis',
+  'connector.region' = 'xxxxx',
+  'connector.channel' = 'dis-input',
+  'format.type' = 'json'
+);
+/** sink **/
+CREATE
+TABLE cars_infos_out (cast_int_to_string string, cast_String_to_int
+int, case_string_to_timestamp timestamp(3), case_timestamp_to_date date) WITH (
+  'connector.type' = 'dis',
+  'connector.region' = 'xxxxx',
+  'connector.channel' = 'dis-output',
+  'format.type' = 'json'
+);
+/** Statistics on static car information**/
+INSERT
+INTO
+  cars_infos_out
+SELECT
+  cast(cast_int_to_string as string),
+  cast(cast_String_to_int as int),
+  cast(case_string_to_timestamp as timestamp),
+  cast(case_timestamp_to_date as date)
+FROM
+  car_infos;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0337.html b/docs/dli/sqlreference/dli_08_0337.html new file mode 100644 index 00000000..8cfe23ac --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0337.html @@ -0,0 +1,48 @@ + + +

Collection Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 Collection functions

Function

+

Description

+

CARDINALITY(array)

+

Returns the number of elements in array.

+

array '[' integer ']'

+

Returns the element at position INT in array. The index starts from 1.

+

ELEMENT(array)

+

Returns the sole element of array (whose cardinality should be one)

+

Returns NULL if array is empty.

+

Throws an exception if array has more than one element.

+

CARDINALITY(map)

+

Returns the number of entries in map.

+

map '[' key ']'

+

Returns the value specified by key value in map.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0338.html b/docs/dli/sqlreference/dli_08_0338.html new file mode 100644 index 00000000..1bbcd2ab --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0338.html @@ -0,0 +1,38 @@ + + +

Value Construction Functions

+

Description

+
+ + + + + + + + + + + + + +
Table 1 Value construction functions

Function

+

Description

+

ROW(value1, [, value2]*)

+

(value1, [, value2]*)

+

Returns a row created from a list of values (value1, value2,…).

+

ARRAY '[' value1 [, value2 ]* ']'

+

Returns an array created from a list of values (value1, value2, …).

+

MAP '[' key1, value1 [, key2, value2]* ']'

+

Returns a map created from a list of key-value pairs ((value1, value2), (value3, value4), …).

+

The key-value pair is (key1, value1),(key2, value2).

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0339.html b/docs/dli/sqlreference/dli_08_0339.html new file mode 100644 index 00000000..e0b05dd8 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0339.html @@ -0,0 +1,31 @@ + + +

Value Access Functions

+

Description

+
+ + + + + + + + + + +
Table 1 Value access functions

Function

+

Description

+

tableName.compositeType.field

+

Returns the value of a field from a Flink composite type (e.g., Tuple, POJO) by name.

+

tableName.compositeType.*

+

Returns a flat representation of a Flink composite type (e.g., Tuple, POJO) that converts each of its direct subtype into a separate field.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0340.html b/docs/dli/sqlreference/dli_08_0340.html new file mode 100644 index 00000000..f401dc8e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0340.html @@ -0,0 +1,64 @@ + + +

Hash Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Hash functions

Function

+

Description

+

MD5(string)

+

Returns the MD5 hash as a string that contains 32 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA1(string)

+

Returns the SHA-1 hash as a string that contains 40 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA224(string)

+

Returns the SHA-224 hash as a string that contains 56 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA256(string)

+

Returns the SHA-256 hash as a string that contains 64 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA384(string)

+

Returns the SHA-384 hash as a string that contains 96 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA512(string)

+

Returns the SHA-512 hash as a string that contains 128 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA2(string, hashLength)

+

Returns the hash using the SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, or SHA-512).

+

The first argument string is the string to be hashed and the second argument hashLength is the bit length of the result (224, 256, 384, or 512).

+

Returns NULL if string or hashLength is NULL.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0341.html b/docs/dli/sqlreference/dli_08_0341.html new file mode 100644 index 00000000..56a2b889 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0341.html @@ -0,0 +1,124 @@ + + +

Aggregate Function

+

An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved by an SQL statement. Table 1 lists aggregate functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Aggregate functions

Function

+

Return Data Type

+

Description

+

COUNT([ ALL ] expression | DISTINCT expression1 [, expression2]*)

+

BIGINT

+

Returns the number of input rows for which the expression is not NULL. Use DISTINCT for one unique instance of each value.

+

COUNT(*)

+

COUNT(1)

+

BIGINT

+

Returns the number of input rows.

+

AVG([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the average (arithmetic mean) of expression across all input rows.

+

Use DISTINCT for one unique instance of each value.

+

SUM([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sum of expression across all input rows.

+

Use DISTINCT for one unique instance of each value.

+

MAX([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the maximum value of expression across all input rows.

+

MIN([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the minimum value of expression across all input rows.

+

STDDEV_POP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the population standard deviation of expression across all input rows.

+

STDDEV_SAMP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample standard deviation of expression across all input rows.

+

VAR_POP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the population variance (square of the population standard deviation) of expression across all input rows.

+

VAR_SAMP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample variance (square of the sample standard deviation) of expression across all input rows.

+

COLLECT([ ALL | DISTINCT ] expression)

+

MULTISET

+

Returns a multiset of expression across all input rows.

+

VARIANCE([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample variance (square of the sample standard deviation) of expression across all input rows.

+

FIRST_VALUE(expression)

+

Actual type

+

Returns the first value in an ordered set of values.

+

LAST_VALUE(expression)

+

Actual type

+

Returns the last value in an ordered set of values.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0342.html b/docs/dli/sqlreference/dli_08_0342.html new file mode 100644 index 00000000..27fc475b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0342.html @@ -0,0 +1,17 @@ + + +

Table-Valued Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0343.html b/docs/dli/sqlreference/dli_08_0343.html new file mode 100644 index 00000000..3ad617fb --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0343.html @@ -0,0 +1,76 @@ + + +

Deleting Partitions by Specifying Filter Criteria (Only OBS Tables Supported)

+

Function

This statement is used to delete one or more partitions based on specified conditions.

+
+

Precautions

  • This statement is used for OBS table operations only.
  • The table in which partitions are to be deleted must exist. Otherwise, an error is reported.
  • The to-be-deleted partition must exist. Otherwise, an error is reported. To avoid this error, add IF EXISTS in this statement.
+
+

Syntax

1
+2
+3
ALTER TABLE [db_name.]table_name
+  DROP [IF EXISTS]
+  PARTITIONS partition_filtercondition;
+
+ +
+
+

Keyword

  • DROP: deletes specified partitions.
  • IF EXISTS: Partitions to be deleted must exist. Otherwise, an error is reported.
  • PARTITIONS: specifies partitions meeting the conditions
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+

This statement is used for OBS table operations.

+

partition_filtercondition

+

Condition used to search partitions to be deleted. The format is as follows:

+
  • Partition column name Operator Value to compare

    Example: start_date < '201911'

    +
  • <partition_filtercondition1> AND|OR <partition_filtercondition2>

    Example: start_date < '201911' OR start_date >= '202006'

    +
  • (<partition_filtercondition1>) [,partitions (<partition_filtercondition2>), ...]

    Example: (start_date <> '202007'), partitions(start_date < '201912')

    +
+
+
+
+

Example

You can run the following statements to delete partitions of the student table using different conditions:

+
1
+2
+3
+4
+5
+6
+7
+8
alter table student drop partitions(start_date < '201911');
+alter table student drop partitions(start_date >= '202007');
+alter table student drop partitions(start_date BETWEEN '202001' AND '202007');
+alter table student drop partitions(start_date < '201912' OR start_date >= '202006');
+alter table student drop partitions(start_date > '201912' AND start_date <= '202004');
+alter table student drop partitions(start_date != '202007');
+alter table student drop partitions(start_date <> '202007');
+alter table student drop partitions(start_date <> '202007'), partitions(start_date < '201912');
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0344.html b/docs/dli/sqlreference/dli_08_0344.html new file mode 100644 index 00000000..20d67279 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0344.html @@ -0,0 +1,221 @@ + + +

ClickHouse Result Table

+

Function

DLI exports Flink job data to ClickHouse result tables.

+

ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provides good query performance. The aggregation analysis and query performance based on large and wide tables is excellent, which is one order of magnitude faster than other analytical databases.

+
+

Prerequisites

  • Ensure your jobs run on an exclusive queue (non-shared queue) of DLI.
  • You have established an enhanced datasource connection to ClickHouse and set the port in the security group rule of the ClickHouse cluster as needed.

    For details about how to set up an enhanced datasource connection. For details, see "Enhanced Datasource Connection" in the Data Lake Insight User Guide.

    +
+
+

Precautions

  • When you create a ClickHouse cluster for MRS, set the cluster version to MRS 3.1.0 and do not enable Kerberos authentication.
  • Do not define a primary key in Flink SQL statements. Do not use any syntax that generates primary keys, such as insert into clickhouseSink select id, cout(*) from sourceName group by id.
  • Flink supports the following data types: string, tinyint, smallint, int, long, float, double, date, timestamp, decimal, and Array.

    The array supports only the int, bigint, string, float, and double data types.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
create table clickhouseSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'clickhouse',
+  'connector.url' = '',
+  'connector.table' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Result table type. Set this parameter to clickhouse.

+

connector.url

+

Yes

+

ClickHouse URL.

+

Parameter format: jdbc:clickhouse://ClickHouseBalancer instance IP address:HTTP port number for ClickHouseBalancer instances/Database name

+
  • IP address of a ClickHouseBalancer instance:

    Log in to the MRS management console, click a cluster name, and choose Components > ClickHouse > Instance to obtain the service IP address of the ClickHouseBalancer instance.

    +
  • HTTP port of a ClickHouseBalancer instance:

    Log in to the MRS management console, click the target cluster name. On the displayed page, choose Components > ClickHouse. In the Service Configuration tab, choose ClickHouseBalancer from the All Roles dropdown list and search for lb_http_port to configure the parameter. The default value is 21425.

    +
  • The database name is the name of the database created for the ClickHouse cluster.
+

connector.table

+

Yes

+

Name of the ClickHouse table to be created

+

connector.driver

+

No

+

Driver required for connecting to the database

+
  • If this parameter is not specified during table creation, the driver automatically extracts the value from the ClickHouse URL.
  • If this parameter is specified during table creation, the value must be ru.yandex.clickhouse.ClickHouseDriver.
+

connector.username

+

No

+

Account for connecting the ClickHouse database

+

connector.password

+

No

+

Password for accessing the ClickHouse database

+

connector.write.flush.max-rows

+

No

+

Maximum number of rows to be updated when data is written. The default value is 5000.

+

connector.write.flush.interval

+

No

+

Interval for data update. The unit can be ms, milli, millisecond/s, sec, second/min or minute.

+

connector.write.max-retries

+

No

+

Maximum number of attempts to write data if failed. The default value is 3.

+
+
+
+

Example

Read data from a DIS table and insert the data into the test table of ClickHouse database flinktest.

+
  1. Create a DIS source table disSource.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    create table disSource(
    +  attr0 string,
    +  attr1 TINYINT,
    +  attr2 smallint,
    +  attr3 int,
    +  attr4 bigint,
    +  attr5 float,
    +  attr6 double,
    +  attr7 String,
    +  attr8 string,
    +  attr9 timestamp(3),
    +  attr10 timestamp(3),
    +  attr11 date,
    +  attr12 decimal(38, 18),
    +  attr13 decimal(38, 18)
    +) with (
    +  "connector.type" = "dis",
    +  "connector.region" = "cn-xxxx-x",
    +  "connector.channel" = "xxxx",
    +  "format.type" = 'csv'
    +);
    +
    + +
    +
  2. Create ClickHouse result table clickhouse and insert the data from the disSource table to the result table.
    create table clickhouse(
    +  attr0 string,
    +  attr1 TINYINT,
    +  attr2 smallint,
    +  attr3 int,
    +  attr4 bigint,
    +  attr5 float,
    +  attr6 double,
    +  attr7 String,
    +  attr8 string,
    +  attr9 timestamp(3),
    +  attr10 timestamp(3),
    +  attr11 date,
    +  attr12 decimal(38, 18),
    +  attr13 decimal(38, 18),
    +  attr14 array < int >,
    +  attr15 array < bigint >,
    +  attr16 array < float >,
    +  attr17 array < double >,
    +  attr18 array < varchar >,
    +  attr19 array < String >
    +) with (
    +  'connector.type' = 'clickhouse',
    +  'connector.url' = 'jdbc:clickhouse://xx.xx.xx.xx:xx/flinktest',
    +  'connector.table' = 'test'
    +);
    +
    +insert into
    +  clickhouse
    +select
    +  attr0,
    +  attr1,
    +  attr2,
    +  attr3,
    +  attr4,
    +  attr5,
    +  attr6,
    +  attr7,
    +  attr8,
    +  attr9,
    +  attr10,
    +  attr11,
    +  attr12,
    +  attr13,
    +  array [attr3, attr3+1],
    +  array [cast(attr4 as bigint), cast(attr4+1 as bigint)],
    +  array [cast(attr12 as float), cast(attr12+1 as float)],
    +  array [cast(attr13 as double), cast(attr13+1 as double)],
    +  array ['TEST1', 'TEST2'],
    +  array [attr7, attr7]
    +from
    +  disSource;
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0345.html b/docs/dli/sqlreference/dli_08_0345.html new file mode 100644 index 00000000..555f18dd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0345.html @@ -0,0 +1,132 @@ + + +

Print Result Table

+

Function

The print connector exports your data output to the error file or the out file of TaskManager. It is mainly used for code debugging and output viewing.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
create table printSink (
+  attr_name attr_type (',' attr_name attr_type) * (',' PRIMARY KEY (attr_name,...) NOT ENFORCED)
+) with (
+  'connector' = 'print',
+  'print-identifier' = '',
+  'standard-error' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector

+

Yes

+

The value is fixed to print.

+

print-identifier

+

No

+

Message that identifies print and is prefixed to the output of the value.

+

standard-error

+

No

+

The value can be only true or false. The default value is false.

+
  • If the value is true, data is output to the error file of the TaskManager.
  • If the value is false, data is output to the out file of the TaskManager.
+
+
+
+

Example

Read data from Kafka and export the data to the out file of TaskManager. You can view the output in the exported file.

+
create table kafkaSource(
+  attr0 string,
+  attr1 boolean,
+  attr3 decimal(38, 18),
+  attr4 TINYINT,
+  attr5 smallint,
+  attr6 int,
+  attr7 bigint,
+  attr8 float,
+  attr9 double,
+  attr10 date,
+  attr11 time,
+  attr12 timestamp(3)
+) with (
+  'connector.type' = 'kafka',
+  'connector.version' = '0.11',
+  'connector.topic' = 'test_json',
+  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
+  'connector.properties.group.id' = 'test_print',
+  'connector.startup-mode' = 'latest-offset',
+  'format.type' = 'csv'
+);
+
+create table printTable(
+  attr0 string,
+  attr1 boolean,
+  attr3 decimal(38,18),
+  attr4 TINYINT,
+  attr5 smallint,
+  attr6 int,
+  attr7 bigint,
+  attr8 float,
+  attr9 double,
+  attr10 date,
+  attr11 time,
+  attr12 timestamp(3),
+  attr13 array<string>,
+  attr14 row<attr15 float, attr16 timestamp(3)>,
+  attr17 map<int, bigint>
+) with (
+  "connector" = "print"
+);
+
+insert into
+  printTable
+select
+  attr0,
+  attr1,
+  attr3,
+  attr4,
+  attr5,
+  attr6,
+  attr7,
+  attr8,
+  attr9,
+  attr10,
+  attr11,
+  attr12,
+  array [cast(attr0 as string), cast(attr0 as string)],
+  row(
+    cast(attr8 as float),
+    cast(attr12 as timestamp(3))
+  ),
+  map [cast(attr6 as int), cast(attr7 as bigint)]
+from
+  kafkaSource;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0346.html b/docs/dli/sqlreference/dli_08_0346.html new file mode 100644 index 00000000..fb18acc1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0346.html @@ -0,0 +1,161 @@ + + +

File System Result Table

+

Function

You can create a file system result table to export data to a file system such as HDFS or OBS. After the data is generated, a non-DLI table can be created directly according to the generated directory. The table can be processed through DLI SQL, and the output data directory can be stored in partition tables. It is applicable to scenarios such as data dumping, big data analysis, data backup, and active, deep, or cold archiving.

+
+

Syntax

1
+2
+3
+4
+5
+6
+7
create table filesystemSink (
+  attr_name attr_type (',' attr_name attr_type) *
+) with (
+  'connector.type' = 'filesystem',
+  'connector.file-path' = '',
+  'format.type' = ''
+);
+
+ +
+
+

Important Notes

  • If the data output directory in the table creation syntax is OBS, the directory must be a parallel file system and cannot be an OBS bucket.
  • When using a file system table, you must enable checkpointing to ensure job consistency.
  • When format.type is parquet, the supported data type is string, boolean, tinyint, smallint, int, bigint, float, double, map<string, string>, timestamp(3), and time.
  • To avoid data loss or data coverage, you need to enable automatic restart upon job exceptions. Enable the Restore Job from Checkpoint.
  • Set the checkpoint interval after weighing between real-time output file, file size, and recovery time, such as 10 minutes.
  • When using HDFS, you need to bind the data source and enter the host information.
  • When using HDFS, you need to configure information about the node where the active NameNode locates.
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

The value is fixed to filesystem.

+

connector.file-path

+

Yes

+

Data output directory. The format is schema://file.path.

+
NOTE:
Currently, Schema supports only OBS and HDFS.
  • If schema is set to obs, data is stored to OBS. Note that OBS directory must be a parallel file system and must not be an OBS bucket.

    For example, obs://bucketName/fileName indicates that data is exported to the fileName directory in the bucketName bucket.

    +
  • If schema is set to hdfs, data is exported to HDFS.

    Example: hdfs://node-master1sYAx:9820/user/car_infos, where node-master1sYAx:9820 is the name of the node where the NameNode locates.

    +
+
+
+

format.type

+

Yes

+

Output data encoding format. Only parquet and csv are supported.

+
  • When schema is set to obs, the encoding format of the output data can only be parquet.
  • When schema is set to hdfs, the output data can be encoded in Parquet or CSV format.
+

format.field-delimiter

+

No

+

Delimiter used to separate every two attributes.

+

This parameter needs to be configured if the CSV encoding format is adopted. It can be user-defined, for example, a comma (,).

+

connector.ak

+

No

+

Access key for accessing OBS

+

This parameter is mandatory when data is written to OBS.

+

connector.sk

+

No

+

Secret key for accessing OBS

+

This parameter is mandatory when data is written to OBS.

+

connector.partitioned-by

+

No

+

Partitioning field. Use commas (,) to separate multiple fields.

+
+
+
+

Example

Read data from Kafka and write the data in Parquet format to the fileName directory in the bucketName bucket.

+
create table kafkaSource(
+  attr0 string,
+  attr1 boolean,
+  attr2 TINYINT,
+  attr3 smallint,
+  attr4 int,
+  attr5 bigint,
+  attr6 float,
+  attr7 double,
+  attr8 timestamp(3),
+  attr9 time
+) with (
+  'connector.type' = 'kafka',
+  'connector.version' = '0.11',
+  'connector.topic' = 'test_json',
+  'connector.properties.bootstrap.servers' = 'xx.xx.xx.xx:9092',
+  'connector.properties.group.id' = 'test_filesystem',
+  'connector.startup-mode' = 'latest-offset',
+  'format.type' = 'csv'
+);
+
+create table filesystemSink(
+  attr0 string,
+  attr1 boolean,
+  attr2 TINYINT,
+  attr3 smallint,
+  attr4 int,
+  attr5 bigint,
+  attr6 float,
+  attr7 double,
+  attr8 map < string,  string >,
+  attr9 timestamp(3),
+  attr10 time
+) with (
+  "connector.type" = "filesystem",
+  "connector.file-path" = "obs://bucketName/fileName",
+  "format.type" = "parquet",
+  "connector.ak" = "xxxx",
+  "connector.sk" = "xxxxxx"
+);
+
+insert into
+  filesystemSink
+select
+  attr0,
+  attr1,
+  attr2,
+  attr3,
+  attr4,
+  attr5,
+  attr6,
+  attr7,
+  map [attr0,attr0],
+  attr8,
+  attr9
+from
+  kafkaSource;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0347.html b/docs/dli/sqlreference/dli_08_0347.html new file mode 100644 index 00000000..e50d0166 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0347.html @@ -0,0 +1,98 @@ + + +

User-defined Result Table

+

Function

Write your Java code to insert the processed data into a specified database supported by your cloud service.

+
+

Prerequisites

Implement the custom sink class :

+

The custom sink class is inherited from Flink open-source class RichSinkFunction. The data type is Tuple2<Boolean, Row>.

+
For example, define the MySink class by public class MySink extends RichSinkFunction< Tuple2<Boolean, Row>>{}, and implement the open, invoke, and close functions. A code example is as follows:
public class MySink extends RichSinkFunction<Tuple2<Boolean, Row>> {
+    // Initialize the object.
+    @Override
+    public void open(Configuration parameters) throws Exception {}
+
+    @Override
+    // Implement the data processing logic.
+    /* The in parameter contains two values. The first value is of the Boolean type. The value true indicates the insert or update operation, and the value false indicates the delete operation. If the interconnected sink does not support the delete operation, the deletion will not be executed. The second value indicates the data to be operated.*/
+    public void invoke(Tuple2<Boolean, Row> in, Context context) throws Exception {}
+
+    @Override
+    public void close() throws Exception {}
+}
+
+

Content of the dependent pom configuration file is as follows:

+
<dependency>
+    <groupId>org.apache.flink</groupId> 
+    <artifactId>flink-streaming-java_2.11</artifactId> 
+    <version>${flink.version}</version> 
+    <scope>provided</scope> 
+</dependency> 
+
+<dependency> 
+    <groupId>org.apache.flink</groupId> 
+    <artifactId>flink-core</artifactId> 
+    <version>${flink.version}</version> 
+    <scope>provided</scope> 
+</dependency>
+

Pack the implemented class and compile it in a JAR file, and upload it using the UDF Jar parameter on the editing page of your Flink OpenSource SQL job.

+
+

Syntax

create table userDefinedSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'user-defined',
+  'connector.class-name' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. The value can only be a user-defined sink.

+

connector.class-name

+

Yes

+

Fully qualified class name of the sink class. For details about the implementation of the sink class, see Prerequisites.

+

connector.class-parameter

+

No

+

Parameter of the constructor of the sink class. Only one parameter of the string type is supported.

+
+
+
+

Precautions

connector.class-name must be a fully qualified class name.

+
+

Example

create table userDefinedSink (
+  attr1 int,
+  attr2 int
+)
+with (
+  'connector.type' = 'user-defined',
+  'connector.class-name' = 'xx.xx.MySink'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0348.html b/docs/dli/sqlreference/dli_08_0348.html new file mode 100644 index 00000000..f813c8b9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0348.html @@ -0,0 +1,125 @@ + + +

OpenTSDB Result Table

+

Function

OpenTSDB is a distributed, scalable time series database based on HBase. OpenTSDB is designed to collect monitoring information of a large-scale cluster and query data in seconds, facilitating querying and storing massive amounts of monitoring data in common databases. OpenTSDB can be used for system monitoring and measurement as well as collection and monitoring of IoT data, financial data, and scientific experimental results.

+

DLI uses enhanced datasource connections to write the output of Flink jobs to OpenTSDB.

+
+

Prerequisites

  • The OpenTSDB service has been enabled.
  • An enhanced datasource connection has been created for DLI to connect to OpenTSDB, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Syntax

create table tsdbSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'opentsdb',
+  'connector.region' = '',
+  'connector.tsdb-metrics' = '',
+  'connector.tsdb-timestamps' = '',
+  'connector.tsdb-values' = '',
+  'connector.tsdb-tags' = '',
+  'connector.tsdb-link-address' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Connector type. Set this parameter to opentsdb.

+

connector.region

+

Yes

+

Region where OpenTSDB locates

+

connector.tsdb-metrics

+

Yes

+

Metrics of data points, which can be specified through parameter configurations.

+

The number of metrics must be 1 or the same as the number of connector.tsdb-values.

+

Use semicolons (;) to separate multiple metrics.

+

connector.tsdb-timestamps

+

Yes

+

Timestamps of data points. Only dynamic columns are supported.

+

The data type can be int, bigint, or string. Only numbers are supported.

+

The number of metrics must be 1 or the same as the number of connector.tsdb-values.

+

Use semicolons (;) to separate multiple timestamps.

+

connector.tsdb-values

+

Yes

+

Values of data points. You can specify dynamic columns or constant values.

+

Separate multiple values with semicolons (;).

+

connector.tsdb-tags

+

Yes

+

Tags of data points. Each tag contains at least one tag value and a maximum of eight tag values. Separate multiple tags by commas (,). You can specify the tags by parameters.

+

The number of metrics must be 1 or the same as the number of connector.tsdb-values.

+

Separate multiple tags with semicolons (;).

+

connector.batch-insert-data-num

+

No

+

Number of data records to be written in batches at a time. The value must be a positive integer. The default value is 8.

+

connector.tsdb-link-address

+

Yes

+

OpenTSDB address for connecting to the cluster where the data to be inserted belongs.

+
+
+
+

Precautions

  • If your OpenTSDB runs in an MRS cluster, ensure that:
    1. The IP address and port number of OpenTSDB must be obtained from tsd.network.bind and tsd.network.port in the OpenTSDB service configuration.
    2. If tsd.https.enabled is set to true, the value format of connector.tsdb-link-address in the SQL statement is https://ip:port. If tsd.https.enabled is set to false, the value of connector.tsdb-link-address can be in the format of http://ip:port or ip:port.
    3. When establishing an enhanced datasource connection, you need to add the mapping between MRS cluster hosts and IP addresses in /etc/hosts to the Host Information parameter.
    +
+
  • If a configuration item can be specified through parameter configurations, one or more columns in the record can be used as part of the configuration item. For example, if the configuration item is set to car_$ {car_brand} and the value of car_brand in a record is BMW, the value of this configuration item is car_BMW in the record.
  • If dynamic columns are supported, the format must be ${columnName}, where columnName indicates a field name.
+
+

Example

create table sink1(
+  attr1 bigint,
+  attr2 int,
+  attr3 int
+) with (
+  'connector.type' = 'opentsdb',
+  'connector.region' = '',
+  'connector.tsdb-metrics' = '',
+  'connector.tsdb-timestamps' = '${attr1}',
+  'connector.tsdb-values' = '${attr2};10',
+  'connector.tsdb-tags' = 'key1:value1,key2:value2;key3:value3',
+  'connector.tsdb-link-address' = ''
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0349.html b/docs/dli/sqlreference/dli_08_0349.html new file mode 100644 index 00000000..1a567ee5 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0349.html @@ -0,0 +1,27 @@ + + + +

Backing Up and Restoring Data of Multiple Versions

+ +

+
+ +
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0350.html b/docs/dli/sqlreference/dli_08_0350.html new file mode 100644 index 00000000..8a47933b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0350.html @@ -0,0 +1,58 @@ + + +

Setting the Retention Period for Multiversion Backup Data

+

Function

After multiversion is enabled, backup data is retained for seven days by default. You can change the retention period by setting system parameter dli.multi.version.retention.days. Multiversion data out of the retention period will be automatically deleted when the insert overwrite or truncate statement is executed. You can also set table attribute dli.multi.version.retention.days to adjust the retention period when adding a column or modifying a partitioned table. For details about the syntax for enabling or disabling the multiversion function, see Enabling or Disabling Multiversion Backup.

+

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Syntax

1
+2
ALTER TABLE [db_name.]table_name 
+SET TBLPROPERTIES ("dli.multi.version.retention.days"="days");
+
+ +
+
+

Keyword

  • TBLPROPERTIES: This keyword is used to add a key/value property to a table.
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+

days

+

Date when the multiversion backup data is reserved. The default value is 7 days. The value ranges from 1 to 7 days.

+
+
+
+

Precautions

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Example

Set the retention period of multiversion backup data to 5 days.
1
+2
ALTER TABLE test_table 
+SET TBLPROPERTIES ("dli.multi.version.retention.days"="5");
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0351.html b/docs/dli/sqlreference/dli_08_0351.html new file mode 100644 index 00000000..91ba2088 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0351.html @@ -0,0 +1,63 @@ + + +

Viewing Multiversion Backup Data

+

Function

After the multiversion function is enabled, you can run the SHOW HISTORY command to view the backup data of a table. For details about the syntax for enabling or disabling the multiversion function, see Enabling or Disabling Multiversion Backup.

+

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Syntax

  • View the backup data of a non-partitioned table.
    SHOW HISTORY FOR TABLE [db_name.]table_name;
    +
  • View the backup data of a specified partition.
    SHOW HISTORY FOR TABLE [db_name.]table_name PARTITION (column = value, ...);
    +
+
+

Keyword

  • SHOW HISTORY FOR TABLE: Used to view backup data
  • PARTITION: Used to specify the partition column
+
+

Parameter

+
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+

column

+

Partition column name

+

value

+

Value corresponding to the partition column name

+
+
+
+

Precautions

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Example

  • View multiversion backup data of the test_table table.
    1
    SHOW HISTORY FOR TABLE test_table;
    +
    + +
    +
+
  • View multiversion backup data of the dt partition in the test_table partitioned table.
    1
    SHOW HISTORY FOR TABLE test_table PARTITION (dt='2021-07-27');
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0352.html b/docs/dli/sqlreference/dli_08_0352.html new file mode 100644 index 00000000..ba3c2183 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0352.html @@ -0,0 +1,68 @@ + + +

Restoring Multiversion Backup Data

+

Function

After the multiversion function is enabled, you can run the RESTORE TABLE statement to restore a table or partition of a specified version. For details about the syntax for enabling or disabling the multiversion function, see Enabling or Disabling Multiversion Backup.

+

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Syntax

  • Restore the non-partitioned table data to the backup data of a specified version.
    RESTORE TABLE [db_name.]table_name TO VERSION 'version_id';
    +
  • Restore the data of a single partition in a partitioned table to the backup data of a specified version.
    RESTORE TABLE [db_name.]table_name PARTITION (column = value, ...) TO VERSION 'version_id';
    +
+
+

Keyword

  • RESTORE TABLE: Used to restore backup data
  • PARTITION: Used to specify the partition column
  • TO VERSION: Used to specify the version number You can run the SHOW HISTORY command to obtain the version number. For details, see Viewing Multiversion Backup Data.
+
+

Parameter

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+

column

+

Partition column name

+

value

+

Value corresponding to the partition column name

+

version_id

+

Target version of the backup data to be restored You can run the SHOW HISTORY command to obtain the version number. For details, see Viewing Multiversion Backup Data.

+
+
+
+

Precautions

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Example

  • Restore the data in non-partitioned table test_table to version 20210930.
    1
    RESTORE TABLE test_table TO VERSION '20210930';
    +
    + +
    +
+
  • Restore the data of partition dt in partitioned table test_table to version 20210930.
    1
    RESTORE TABLE test_table PARTITION (dt='2021-07-27') TO VERSION '20210930';
    +
    + +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0353.html b/docs/dli/sqlreference/dli_08_0353.html new file mode 100644 index 00000000..af3372ff --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0353.html @@ -0,0 +1,59 @@ + + +

Configuring the Trash Bin for Expired Multiversion Data

+

Function

After the multiversion function is enabled, expired backup data will be directly deleted by the system when the insert overwrite or truncate statement is executed. You can configure the trash bin of the OBS parallel file system to accelerate the deletion of expired backup data. To enable the trash bin, add dli.multi.version.trash.dir to the table properties. For details about the syntax for enabling or disabling the multiversion function, see Enabling or Disabling Multiversion Backup.

+

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Syntax

1
+2
ALTER TABLE [db_name.]table_name 
+SET TBLPROPERTIES ("dli.multi.version.trash.dir"="OBS bucket for expired multiversion backup data");
+
+ +
+
+

Keyword

  • TBLPROPERTIES: This keyword is used to add a key/value property to a table.
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+

OBS bucket for expired multiversion backup data

+

A directory in the bucket where the current OBS table locates. You can change the directory path as needed. For example, if the current OBS table directory is obs://bucketName/filePath and a Trash directory has been created in the OBS table directory, you can set the trash bin directory to obs://bucketName/filePath/Trash.

+
+
+
+

Precautions

  • Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.
  • To automatically empty the trash bin, you need to configure a lifecycle rule for the bucket of the OBS parallel file system. The procedure is as follows:
    1. On the OBS console, choose Parallel File System in the left navigation pane. Click the name of the target file system. The Overview page is displayed.
    2. In the left navigation pane, choose Basic Configurations > Lifecycle Rules to create a lifecycle rule.
    +
+
+

Example

Configure the trash bin to accelerate the deletion of expired backup data. The data is dumped to the /.Trash directory in OBS.
1
+2
ALTER TABLE test_table 
+SET TBLPROPERTIES ("dli.multi.version.trash.dir"="/.Trash");
+
+ +
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0354.html b/docs/dli/sqlreference/dli_08_0354.html new file mode 100644 index 00000000..8010d210 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0354.html @@ -0,0 +1,66 @@ + + +

Enabling or Disabling Multiversion Backup

+

Function

DLI controls multiple versions of backup data for restoration. After the multiversion function is enabled, the system automatically backs up table data when you delete or modify the data using insert overwrite or truncate, and retains the data for a certain period. You can quickly restore data within the retention period. For details about the syntax related to the multiversion function, see Backing Up and Restoring Data of Multiple Versions.

+

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Syntax

  • Enable the multiversion function.
    ALTER TABLE [db_name.]table_name 
    +SET TBLPROPERTIES ("dli.multi.version.enable"="true");
    +
  • Disable the multiversion function.
    1
    +2
    ALTER TABLE [db_name.]table_name 
    +UNSET TBLPROPERTIES ("dli.multi.version.enable");
    +
    + +
    +
    After multiversion is enabled, data of different versions is automatically stored in the OBS storage directory when insert overwrite or truncate is executed. After multiversion is disabled, run the following statement to restore the multiversion backup data directory:
    RESTORE TABLE [db_name.]table_name TO initial layout;
    +
    +
+
+

Keyword

  • SET TBLPROPERTIES: Used to set table properties and enable multiversion.
  • UNSET TBLPROPERTIES: Used to unset table properties and disable multiversion.
+
+

Parameter

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_).

+

table_name

+

Table name

+
+
+
+

Precautions

Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.

+
+

Example

  • Modify the test_table table to enable multiversion.
    1
    +2
    ALTER TABLE test_table 
    +SET TBLPROPERTIES ("dli.multi.version.enable"="true");
    +
    + +
    +
  • Modify the test_table table to disable multiversion.
    1
    +2
    ALTER TABLE test_table 
    +UNSET TBLPROPERTIES ("dli.multi.version.enable");
    +
    + +
    +
    Restore the multiversion backup data directory.
    RESTORE TABLE test_table TO initial layout;
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0355.html b/docs/dli/sqlreference/dli_08_0355.html new file mode 100644 index 00000000..24caa70f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0355.html @@ -0,0 +1,49 @@ + + +

Deleting Multiversion Backup Data

+

Function

The retention period of multiversion backup data takes effect each time the insert overwrite or truncate statement is executed. If neither statement is executed for the table, multiversion backup data out of the retention period will not be automatically deleted. You can run the SQL commands described in this section to manually delete multiversion backup data.

+
+

Syntax

Delete multiversion backup data out of the retention period.
clear history for table [db_name.]table_name older_than 'timestamp';
+
+
+

Keyword

  • clear history for table: Used to delete multiversion backup data
  • older_than: Used to specify the time range for deleting multiversion backup data
+
+

Parameter

+
+ + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name, which consists of letters, digits, and underscores (_). The value cannot contain only digits or start with a digit or underscore (_).

+

table_name

+

Table name

+

Timestamp

+

Multiversion backup data generated before the timestamp will be deleted. Timestamp format: yyyy-MM-dd HH:mm:ss

+
+
+
+

Precautions

  • Currently, the multiversion function supports only OBS tables created using the Hive syntax. For details about the syntax for creating a table, see Creating an OBS Table Using the Hive Syntax.
  • This statement does not delete the backup data of the current version.
+
+

Example

Delete the multiversion backup data generated before 2021-09-25 23:59:59 in the dliTable table. When the multiversion backup data is generated, a timestamp is generated.
clear history for table dliTable older_than '2021-09-25 23:59:59';
+
+

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0356.html b/docs/dli/sqlreference/dli_08_0356.html new file mode 100644 index 00000000..cb2d6a9d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0356.html @@ -0,0 +1,141 @@ + + +

string_split

+

The string_split function splits a target string into substrings based on the specified separator and returns a substring list.

+

Description

string_split(target, separator)
+ +
+ + + + + + + + + + + + + +
Table 1 string_split parameters

Parameter

+

Type

+

Description

+

target

+

STRING

+

Target string to be processed

+
NOTE:
  • If target is NULL, an empty line is returned.
  • If target contains two or more consecutive separators, an empty substring is returned.
  • If target does not contain a specified separator, the original string passed to target is returned.
+
+

separator

+

VARCHAR

+

Delimiter. Currently, only single-character delimiters are supported.

+
+
+
+

Example

  1. Prepare test input data. +
    + + + + + + + + + + + + + +
    Table 2 Source table disSource

    target (STRING)

    +

    separator (VARCHAR)

    +

    test-flink

    +

    -

    +

    flink

    +

    -

    +

    one-two-ww-three

    +

    -

    +
    +
    +
  2. Write test SQL statements.
    create table disSource(
    +  target STRING,
    +  separator  VARCHAR
    +) with (
    +  "connector.type" = "dis",
    +  "connector.region" = "xxx",
    +  "connector.channel" = "ygj-dis-in",
    +  "format.type" = 'csv'
    +);
    +
    +create table disSink(
    +  target STRING,
    +  item STRING
    +) with (
    +  'connector.type' = 'dis',
    +  'connector.region' = 'xxx',
    +  'connector.channel' = 'ygj-dis-out',
    +  'format.type' = 'csv'
    +);
    +
    +insert into
    +  disSink
    +select
    +  target,
    +  item
    +from
    +  disSource,
    +lateral table(string_split(target, separator)) as T(item);
    +
  3. Check test results. +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 3 disSink result table

    target (STRING)

    +

    item (STRING)

    +

    test-flink

    +

    test

    +

    test-flink

    +

    flink

    +

    flink

    +

    flink

    +

    one-two-ww-three

    +

    one

    +

    one-two-ww-three

    +

    two

    +

    one-two-ww-three

    +

    ww

    +

    one-two-ww-three

    +

    three

    +
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0357.html b/docs/dli/sqlreference/dli_08_0357.html new file mode 100644 index 00000000..97390b3c --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0357.html @@ -0,0 +1,34 @@ + + +

split_cursor

+

The split_cursor function can convert one row of records into multiple rows or convert one column of records into multiple columns. Table-valued functions can only be used in JOIN LATERAL TABLE.

+ +
+ + + + + + + + + +
Table 1 split_cursor function

Function

+

Return Type

+

Description

+

split_cursor(value, delimiter)

+

cursor

+

Separates the "value" string into multiple rows of strings by using the delimiter.

+
+
+

Example

Input one record ("student1", "student2, student3") and output two records ("student1", "student2") and ("student1", "student3").

+
create table s1(attr1 string, attr2 string) with (......);
+insert into s2 select  attr1, b1 from s1 left join lateral table(split_cursor(attr2, ',')) as T(b1) on true;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0358.html b/docs/dli/sqlreference/dli_08_0358.html new file mode 100644 index 00000000..5b9b82fe --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0358.html @@ -0,0 +1,83 @@ + + +

userDefined Source Table

+

Function

You can call APIs to obtain data from the cloud ecosystem or an open source ecosystem and use the obtained data as input of Flink jobs.

+
+

Prerequisites

The customized source class needs to inherit the RichParallelSourceFunction class and specify the data type as Row.

+

For example, run public class MySource extends RichParallelSourceFunction<Row>{} to declare custom class MySource. You need to implement the open, run, close, and cancel functions. Encapsulate the class into a JAR file and upload the file through the UDF JAR on the SQL editing page.

+
Content of the dependent pom configuration file is as follows:
<dependency>
+    <groupId>org.apache.flink</groupId> 
+    <artifactId>flink-streaming-java_2.11</artifactId> 
+    <version>${flink.version}</version> 
+    <scope>provided</scope> 
+</dependency> 
+
+<dependency> 
+    <groupId>org.apache.flink</groupId> 
+    <artifactId>flink-core</artifactId> 
+    <version>${flink.version}</version> 
+    <scope>provided</scope> 
+</dependency>
+
+
+

Syntax

create table userDefinedSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = 'user-defined',
+  'connector.class-name' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Description

+

connector.type

+

Yes

+

Source type. The value can only be user-defined, indicating a custom source.

+

connector.class-name

+

Yes

+

Fully qualified class name of the source class

+

connector.class-parameter

+

No

+

Parameter of the constructor of the source class. Only one parameter of the string type is supported.

+
+
+
+

Precautions

connector.class-name must be a fully qualified class name.

+
+

Example

create table userDefinedSource (
+  attr1 int,
+  attr2 int
+)
+with (
+  'connector.type' = 'user-defined',
+  'connector.class-name' = 'xx.xx.MySource'
+);
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0359.html b/docs/dli/sqlreference/dli_08_0359.html new file mode 100644 index 00000000..c6e88d1b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0359.html @@ -0,0 +1,50 @@ + + +

Updating Table Metadata with REFRESH TABLE

+

Function

Spark caches Parquet metadata to improve performance. If you update a Parquet table, the cached metadata is not updated. Spark SQL cannot find the newly inserted data and an error similar with the following is reported:
DLI.0002: FileNotFoundException: getFileStatus on  error message
+
+

You can use REFRESH TABLE to solve this problem. REFRESH TABLE reorganizes files of a partition and reuses the original table metadata information to detect the increase or decrease of table fields. This statement is mainly used when the metadata in a table is not modified but the table data is modified.

+
+

Syntax

1
REFRESH TABLE [db_name.]table_name;
+
+ +
+
+

Keyword

None

+
+

Parameter

+
+ + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Description

+

db_name

+

Database name that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_).

+

table_name

+

Table name of a database that contains letters, digits, and underscores (_). It cannot contain only digits or start with an underscore (_). The matching rule is ^(?!_)(?![0-9]+$)[A-Za-z0-9_$]*$. If special characters are required, use single quotation marks ('') to enclose them.

+
+
+
+

Precautions

None

+
+

Example

Update metadata of the test table.

+
1
REFRESH TABLE test;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0370.html b/docs/dli/sqlreference/dli_08_0370.html new file mode 100644 index 00000000..ee72b1ad --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0370.html @@ -0,0 +1,20 @@ + + +

Flink Opensource SQL 1.12 Syntax Reference

+

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0371.html b/docs/dli/sqlreference/dli_08_0371.html new file mode 100644 index 00000000..d3f6a1cb --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0371.html @@ -0,0 +1,17 @@ + + +

Constraints and Definitions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0372.html b/docs/dli/sqlreference/dli_08_0372.html new file mode 100644 index 00000000..14418a45 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0372.html @@ -0,0 +1,11 @@ + + +

Supported Data Types

+

STRING, BOOLEAN, BYTES, DECIMAL, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE, INTERVAL, ARRAY, MULTISET, MAP, ROW

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0373.html b/docs/dli/sqlreference/dli_08_0373.html new file mode 100644 index 00000000..24430126 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0373.html @@ -0,0 +1,17 @@ + + +

Syntax

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0374.html b/docs/dli/sqlreference/dli_08_0374.html new file mode 100644 index 00000000..a70ea328 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0374.html @@ -0,0 +1,19 @@ + + +

Data Definition Language (DDL)

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0375.html b/docs/dli/sqlreference/dli_08_0375.html new file mode 100644 index 00000000..caa06afd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0375.html @@ -0,0 +1,75 @@ + + +

CREATE TABLE

+

Syntax

CREATE TABLE table_name
+  (
+    { <column_definition> | <computed_column_definition> }[ , ...n]
+    [ <watermark_definition> ]
+    [ <table_constraint> ][ , ...n]
+  )
+  [COMMENT table_comment]
+  [PARTITIONED BY (partition_column_name1, partition_column_name2, ...)]
+  WITH (key1=val1, key2=val2, ...)
+
+<column_definition>:
+  column_name column_type [ <column_constraint> ] [COMMENT column_comment]
+
+<column_constraint>:
+  [CONSTRAINT constraint_name] PRIMARY KEY NOT ENFORCED
+
+<table_constraint>:
+  [CONSTRAINT constraint_name] PRIMARY KEY (column_name, ...) NOT ENFORCED
+
+<computed_column_definition>:
+  column_name AS computed_column_expression [COMMENT column_comment]
+
+<watermark_definition>:
+  WATERMARK FOR rowtime_column_name AS watermark_strategy_expression
+
+<source_table>:
+  [catalog_name.][db_name.]table_name
+
+

Function

Create a table with a specified name.

+
+

Description

COMPUTED COLUMN

+

A computed column is a virtual column generated using column_name AS computed_column_expression. A computed column evaluates an expression that can reference other columns declared in the same table. The column itself is not physically stored within the table. A computed column could be defined using cost AS price * quantity. This expression can contain any combination of physical columns, constants, functions, or variables, but cannot contain any subquery.

+

In Flink, a computed column is used to define the time attribute in CREATE TABLE statements. A processing time attribute can be defined easily via proc AS PROCTIME() using the system's PROCTIME() function. The event time column may be obtained from an existing field. In this case, you can use the computed column to obtain event time. For example, if the original field is not of the TIMESTAMP(3) type or is nested in a JSON string, you can use computed columns.

+

Note:

+
  • An expression that defines a computed column in a source table is calculated after data is read from the data source. The column can be used in the SELECT statement.
  • A computed column cannot be the target of an INSERT statement. In an INSERT statement, the schema of the SELECT statement must be the same as that of the target table that does not have a computed column.
+
+

WATERMARK

+

The WATERMARK clause defines the event time attribute of a table and takes the form WATERMARK FOR rowtime_column_name AS watermark_strategy_expression.

+

rowtime_column_name defines an existing column that is marked as the event time attribute of the table. The column must be of the TIMESTAMP(3) type and must be the top-level column in the schema. It can also be a computed column.

+

watermark_strategy_expression defines the watermark generation strategy. It allows arbitrary non-query expressions, including computed columns, to calculate the watermark. The expression return type must be TIMESTAMP(3), which represents the timestamp since the Epoch. The returned watermark will be emitted only if it is non-null and its value is greater than the previously emitted local watermark (to preserve the contract of ascending watermarks). The watermark generation expression is evaluated by the framework for every record. The framework will periodically emit the largest generated watermark. If the current watermark is still identical to the previous one, or is null, or the value of the returned watermark is smaller than that of the last emitted one, then no new watermark will be emitted. A watermark is emitted in an interval defined by pipeline.auto-watermark-interval. If the watermark interval is 0 ms, a watermark will be emitted per record if it is not null and greater than the last emitted one.

+

When using event time semantics, tables must contain an event time attribute and watermark strategy.

+

Flink provides several commonly used watermark strategies.

+
  • Strictly ascending timestamps: WATERMARK FOR rowtime_column AS rowtime_column

    Emits a watermark of the maximum observed timestamp so far. Rows that have a timestamp bigger than the maximum timestamp are not late.

    +
  • Ascending timestamps: WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '0.001' SECOND

    Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a timestamp bigger than or equal to the maximum timestamp are not late.

    +
  • Bounded out-of-order timestamps: WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL 'string' timeUnit

    Emits a watermark, which is the maximum observed timestamp minus the specified delay, for example, WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '5' SECOND is a 5-second delayed watermark strategy.

    +
    CREATE TABLE Orders (
    +    user BIGINT,
    +    product STRING,
    +    order_time TIMESTAMP(3),
    +    WATERMARK FOR order_time AS order_time - INTERVAL '5' SECOND
    +) WITH ( . . . );
    +
+

PRIMARY KEY

+

The primary key constraint is a hint for Flink to leverage for optimizations. It tells that a column or a set of columns of a table or a view are unique and they do not contain null. Neither of columns in a primary can be nullable. The primary key therefore uniquely identifies a row in a table.

+

The primary key constraint can be either declared along with a column definition (a column constraint) or as a single line (a table constraint). For both cases, it should only be declared as a singleton. If you define multiple primary key constraints at the same time, an exception would be thrown.

+

Validity Check

+

SQL standard specifies that a constraint can either be ENFORCED or NOT ENFORCED. This controls if the constraint checks are performed on the incoming/outgoing data. Flink does not own the data and therefore the only mode we want to support is the NOT ENFORCED mode. It is up to the user to ensure that the query enforces key integrity.

+

Flink will assume correctness of the primary key by assuming that the columns nullability is aligned with the columns in the primary key. Connectors should ensure those are aligned.

+

Note: In a CREATE TABLE statement, creating a primary key constraint will alter the columns nullability, which means, a column with a primary key constraint is not nullable.

+

PARTITIONED BY

+

Partition the created table by the specified columns. A directory is created for each partition if this table is used as a file system sink.

+

WITH OPTIONS

+

Table properties used to create a table source/sink. The properties are usually used to find and create the underlying connector.

+

The key and value of expression key1=val1 should both be string literal.

+

Note: The table registered with the CREATE TABLE statement can be used as both the table source and table sink. We cannot decide if it is used as a source or sink until it is referenced in the DMLs.

+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0376.html b/docs/dli/sqlreference/dli_08_0376.html new file mode 100644 index 00000000..d0a8d161 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0376.html @@ -0,0 +1,22 @@ + + +

CREATE VIEW

+

Syntax

CREATE VIEW [IF NOT EXISTS] view_name
+  [{columnName [, columnName ]* }] [COMMENT view_comment]
+  AS query_expression
+
+

Function

Create a view with multiple layers nested in it to simplify the development process.

+
+

Description

IF NOT EXISTS

+

If the view already exists, nothing happens.

+
+

Example

Create a view named viewName.

+
create view viewName as select * from dataSource
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0377.html b/docs/dli/sqlreference/dli_08_0377.html new file mode 100644 index 00000000..24ae788f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0377.html @@ -0,0 +1,25 @@ + + +

CREATE FUNCTION

+

Syntax

CREATE FUNCTION
+  [IF NOT EXISTS] function_name
+  AS identifier [LANGUAGE JAVA|SCALA]
+
+

Function

Create a user-defined function.

+

For details about how to create a user-defined function, see User-Defined Functions (UDFs).

+
+

Description

IF NOT EXISTS

+

If the function already exists, nothing happens.

+

LANGUAGE JAVA|SCALA

+

The language tag is used to instruct Flink runtime how to execute the function. Currently, only JAVA and SCALA language tags are supported, the default language for a function is JAVA.

+
+

Example

Create a function named STRINGBACK.

+
create function STRINGBACK as 'com.dli.StringBack'
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0378.html b/docs/dli/sqlreference/dli_08_0378.html new file mode 100644 index 00000000..23d2b454 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0378.html @@ -0,0 +1,138 @@ + + +

Data Manipulation Language (DML)

+

DML Statements

Syntax

+
+
INSERT INTO table_name [PARTITION part_spec] query
+
+part_spec:  (part_col_name1=val1 [, part_col_name2=val2, ...])
+
+query:
+  values
+  | {
+      select
+      | selectWithoutFrom
+      | query UNION [ ALL ] query
+      | query EXCEPT query
+      | query INTERSECT query
+    }
+    [ ORDER BY orderItem [, orderItem ]* ]
+    [ LIMIT { count | ALL } ]
+    [ OFFSET start { ROW | ROWS } ]
+    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY]
+
+orderItem:
+  expression [ ASC | DESC ]
+
+select:
+  SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+  [ WINDOW windowName AS windowSpec [, windowName AS windowSpec ]* ]
+
+selectWithoutFrom:
+  SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+
+projectItem:
+  expression [ [ AS ] columnAlias ]
+  | tableAlias . *
+
+tableExpression:
+  tableReference [, tableReference ]*
+  | tableExpression [ NATURAL ] [ LEFT | RIGHT | FULL ] JOIN tableExpression [ joinCondition ]
+
+joinCondition:
+  ON booleanExpression
+  | USING '(' column [, column ]* ')'
+
+tableReference:
+  tablePrimary
+  [ matchRecognize ]
+  [ [ AS ] alias [ '(' columnAlias [, columnAlias ]* ')' ] ]
+
+tablePrimary:
+  [ TABLE ] [ [ catalogName . ] schemaName . ] tableName
+  | LATERAL TABLE '(' functionName '(' expression [, expression ]* ')' ')'
+  | UNNEST '(' expression ')'
+
+values:
+  VALUES expression [, expression ]*
+
+groupItem:
+  expression
+  | '(' ')'
+  | '(' expression [, expression ]* ')'
+  | CUBE '(' expression [, expression ]* ')'
+  | ROLLUP '(' expression [, expression ]* ')'
+  | GROUPING SETS '(' groupItem [, groupItem ]* ')'
+
+windowRef:
+    windowName
+  | windowSpec
+
+windowSpec:
+    [ windowName ]
+    '('
+    [ ORDER BY orderItem [, orderItem ]* ]
+    [ PARTITION BY expression [, expression ]* ]
+    [
+        RANGE numericOrIntervalExpression {PRECEDING}
+      | ROWS numericExpression {PRECEDING}
+    ]
+    ')'
+
+matchRecognize:
+      MATCH_RECOGNIZE '('
+      [ PARTITION BY expression [, expression ]* ]
+      [ ORDER BY orderItem [, orderItem ]* ]
+      [ MEASURES measureColumn [, measureColumn ]* ]
+      [ ONE ROW PER MATCH ]
+      [ AFTER MATCH
+            ( SKIP TO NEXT ROW
+            | SKIP PAST LAST ROW
+            | SKIP TO FIRST variable
+            | SKIP TO LAST variable
+            | SKIP TO variable )
+      ]
+      PATTERN '(' pattern ')'
+      [ WITHIN intervalLiteral ]
+      DEFINE variable AS condition [, variable AS condition ]*
+      ')'
+
+measureColumn:
+      expression AS alias
+
+pattern:
+      patternTerm [ '|' patternTerm ]*
+
+patternTerm:
+      patternFactor [ patternFactor ]*
+
+patternFactor:
+      variable [ patternQuantifier ]
+
+patternQuantifier:
+      '*'
+  |   '*?'
+  |   '+'
+  |   '+?'
+  |   '?'
+  |   '??'
+  |   '{' { [ minRepeat ], [ maxRepeat ] } '}' ['?']
+  |   '{' repeat '}'
+

Precautions

+

Flink SQL uses a lexical policy for identifier (table, attribute, function names) similar to Java:

+
  • The case of identifiers is preserved whether or not they are quoted.
  • Identifiers are matched case-sensitively.
  • Unlike Java, back-ticks allow identifiers to contain non-alphanumeric characters (for example, SELECT a AS `my field` FROM t).
+

String literals must be enclosed in single quotes (for example, SELECT'Hello World'). Duplicate a single quote for escaping (for example, SELECT'It''s me.'). Unicode characters are supported in string literals. If explicit Unicode points are required, use the following syntax:

+
  • Use the backslash (\) as an escaping character (default): SELECT U&'\263A'
  • Use a custom escaping character: SELECT U&'#263A' UESCAPE '#'
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0379.html b/docs/dli/sqlreference/dli_08_0379.html new file mode 100644 index 00000000..1b11eed9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0379.html @@ -0,0 +1,124 @@ + + +

Overview

+

This section describes the Flink open source SQL 1.12 syntax supported by DLI. For details about the parameters and examples, see the syntax description.

+

Creating Tables

+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0380.html b/docs/dli/sqlreference/dli_08_0380.html new file mode 100644 index 00000000..216823b9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0380.html @@ -0,0 +1,21 @@ + + +

DDL Syntax

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0381.html b/docs/dli/sqlreference/dli_08_0381.html new file mode 100644 index 00000000..bde8db08 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0381.html @@ -0,0 +1,31 @@ + + +

Creating Source Tables

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0382.html b/docs/dli/sqlreference/dli_08_0382.html new file mode 100644 index 00000000..dce74a79 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0382.html @@ -0,0 +1,163 @@ + + +

DataGen Source Table

+

Function

DataGen is used to generate random data for debugging and testing.

+
+

Prerequisites

None

+
+

Precautions

  • When you create a DataGen table, the table field type cannot be Array, Map, or Row. You can use COMPUTED COLUMN in CREATE TABLE to construct similar functions.
  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
+
+

Syntax

create table dataGenSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' WATERMARK FOR rowtime_column_name AS watermark-strategy_expression)
+)
+with (
+  'connector' = 'datagen'
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to datagen.

+

rows-per-second

+

No

+

10000

+

Long

+

Number of rows generated per second, which is used to control the emit rate.

+

fields.#.kind

+

No

+

random

+

String

+

Generator of the # field. The # field must be an actual field in the DataGen table. Replace # with the corresponding field name. The meanings of the # field for other parameters are the same.

+

The value can be sequence or random.

+
  • random is the default generator. You can use the fields.#.max and fields.#.min parameters to specify the maximum and minimum values that are randomly generated.

    If the specified field type is char, varchar, or string, you can also use the fields.#.length field to specify the length. A random generator is an unbounded generator.

    +
  • Sequence generator. You can use fields.#.start and fields.#.end to specify the start and end values of a sequence. A sequence generator is a bounded generator. When the sequence number reaches the end value, the reading ends.
+

fields.#.min

+

No

+

Minimum value of the field type specified by #

+

Field type specified by #

+

This parameter is valid only when fields.#.kind is set to random.

+

Minimum value of the random generator. It applies only to numeric field types specified by #.

+

fields.#.max

+

No

+

Maximum value of the field type specified by #

+

Field type specified by #

+

This parameter is valid only when fields.#.kind is set to random.

+

Maximum value of the random generator. It applies only to numeric field types specified by #.

+

fields.#.length

+

No

+

100

+

Integer

+

This parameter is valid only when fields.#.kind is set to random.

+

Length of the characters generated by the random generator. It applies only to char, varchar, and string types specified by #.

+

fields.#.start

+

No

+

None

+

Field type specified by #

+

This parameter is valid only when fields.#.kind is set to sequence.

+

Start value of a sequence generator.

+

fields.#.end

+

No

+

None

+

Field type specified by #

+

This parameter is valid only when fields.#.kind is set to sequence.

+

End value of a sequence generator.

+
+
+
+

Example

Create a Flink OpenSource SQL job. Run the following script to generate random data through the DataGen table and output the data to the Print result table.

+

When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.

+
create table dataGenSOurce(
+  user_id string,
+  amount int
+) with (
+  'connector' = 'datagen',
+  'rows-per-second' = '1', --Generates a piece of data per second.
+  'fields.user_id.kind' = 'random', --Specifies a random generator for the user_id field.
+  'fields.user_id.length' = '3' --Limits the length of user_id to 3.
+);
+
+create table printSink(
+  user_id string,
+  amount int
+) with (
+  'connector' = 'print'
+);
+
+insert into printSink select * from dataGenSOurce;
+

After the job is submitted, the job status changes to Running. You can perform the following operations to view the output result:

+
  • Method 1:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Locate the row that contains the target Flink job, and choose More > FlinkUI in the Operation column.
    3. On the Flink UI, choose Task Managers, click the task name, and select Stdout to view job logs.
    +
  • Method 2: If you select Save Job Log on the Running Parameters tab before submitting the job, perform the following operations:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0383.html b/docs/dli/sqlreference/dli_08_0383.html new file mode 100644 index 00000000..81824fb6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0383.html @@ -0,0 +1,286 @@ + + +

GaussDB(DWS) Source Table

+

Function

DLI reads data of Flink jobs from GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types and deliver space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-Commerce.

+

GaussDB(DWS) is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data.

+
+

Prerequisites

  • You have created a GaussDB(DWS) cluster.

    For details about how to create a GaussDB(DWS) cluster, see Creating a Cluster in the Data Warehouse Service Management Guide.

    +
  • You have created a GaussDB(DWS) database table.
  • An enhanced datasource connection has been created for DLI to connect to GaussDB(DWS) clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+)
+with (
+  'connector' = 'gaussdb',
+  'url' = '',
+  'table-name' = '',
+  'username' = '',
+  'password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to gaussdb.

+

url

+

Yes

+

None

+

String

+

JDBC connection address. Set the IP address in this parameter to the internal IP address of GaussDB(DWS).

+

If you use the gsjdbc4 driver, set the value in jdbc:postgresql://${ip}:${port}/${dbName} format.

+

If you use the gsjdbc200 driver, set the value in jdbc:gaussdb://${ip}:${port}/${dbName} format.

+

table-name

+

Yes

+

None

+

String

+

Name of the GaussDB(DWS) table to be operated. If the GaussDB(DWS) table is in a schema, refer to the description of GaussDB(DWS) table in a schema.

+

driver

+

No

+

org.postgresql.Driver

+

String

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

username

+

No

+

None

+

String

+

Username for GaussDB(DWS) database authentication. This parameter must be configured in pair with password.

+

password

+

No

+

None

+

String

+

Password for GaussDB(DWS) database authentication. This parameter must be configured in pair with username.

+

scan.partition.column

+

No

+

None

+

String

+

Name of the column used to partition the input.

+

Note: This parameter must be used together with scan.partition.lower-bound, scan.partition.upper-bound, and scan.partition.num.

+

scan.partition.lower-bound

+

No

+

None

+

Integer

+

Lower bound of values to be fetched for the first partition.

+

This parameter must be used together with scan.partition.column, scan.partition.upper-bound, and scan.partition.num.

+

scan.partition.upper-bound

+

No

+

None

+

Integer

+

Upper bound of values to be fetched for the last partition.

+

This parameter must be used together with scan.partition.column, scan.partition.lower-bound, and scan.partition.num.

+

scan.partition.num

+

No

+

None

+

Integer

+

Number of partitions to be created.

+

This parameter must be used together with scan.partition.column, scan.partition.upper-bound, and scan.partition.upper-bound.

+

scan.fetch-size

+

No

+

0

+

Integer

+

Number of rows fetched from the database each time. The default value is 0, indicating that the number of rows is not limited.

+
+
+
+

Example

In this example, data is read from the GaussDB(DWS) data source and written to the Print result table. The procedure is as follows:

+
  1. Create a table named dws_order in GaussDB(DWS).
    create table public.dws_order(
    +  order_id VARCHAR,
    +  order_channel VARCHAR,
    +  order_time VARCHAR,
    +  pay_amount FLOAT8,
    +  real_pay FLOAT8,
    +  pay_time VARCHAR,
    +  user_id VARCHAR,
    +  user_name VARCHAR,
    +  area_id VARCHAR);
    +
    Insert data into the dws_order table.
    insert into public.dws_order
    +  (order_id,
    +  order_channel,
    +  order_time,
    +  pay_amount,
    +  real_pay,
    +  pay_time,
    +  user_id,
    +  user_name,
    +  area_id) values
    +  ('202103241000000001', 'webShop', '2021-03-24 10:00:00', '100.00', '100.00', '2021-03-24 10:02:03', '0001', 'Alice', '330106'),
    +  ('202103251202020001', 'miniAppShop', '2021-03-25 12:02:02', '60.00', '60.00', '2021-03-25 12:03:00', '0002', 'Bob', '330110');
    +
    +
  2. Create an enhanced datasource connection in the VPC and subnet where GaussDB(DWS) locates, and bind the connection to the required Flink elastic resource pool.
  3. Set GaussDB(DWS) security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the GaussDB(DWS) address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job. The job script uses the GaussDB(DWS) data source and the Print result table.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE dwsSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'gaussdb',
    +  'url' = 'jdbc:postgresql://DWSIP:DWSPort/DWSdbName',
    +  'table-name' = 'dws_order',
    +  'driver' = 'org.postgresql.Driver',
    +  'username' = 'DWSUserName',
    +  'password' = 'DWSPassword'
    +);
    +
    +CREATE TABLE printSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from dwsSource;
    +
    +
  5. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
    ++I(202103251202020001,miniAppShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    +
+
+

FAQ

  • Q: What should I do if the job execution fails and the log contains the following error information?
    java.io.IOException: unable to open JDBC writer
    +...
    +Caused by: org.postgresql.util.PSQLException: The connection attempt failed.
    +...
    +Caused by: java.net.SocketTimeoutException: connect timed out
    +
    A: The datasource connection is not bound or the binding fails. +
    +
+
+
  • Q: How can I configure a GaussDB(DWS) table that is in a schema?

    A: The following provides an example of configuring the dws_order table in the dbuser2 schema:

    +
    CREATE TABLE dwsSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'gaussdb',
    +  'url' = 'jdbc:postgresql://DWSIP:DWSPort/DWSdbName',
    +  'table-name' = 'dbuser2\".\"dws_order',
    +  'driver' = 'org.postgresql.Driver',
    +  'username' = 'DWSUserName',
    +  'password' = 'DWSPassword'
    +);
    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0384.html b/docs/dli/sqlreference/dli_08_0384.html new file mode 100644 index 00000000..b5f6ead9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0384.html @@ -0,0 +1,267 @@ + + +

HBase Source Table

+

Function

Create a source stream to obtain data from HBase as input for jobs. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. DLI can read data from HBase for filtering, analysis, and data dumping.

+
+

Prerequisites

  • An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +

    For details, see section "Modifying the Host Information" in the Data Lake Insight User Guide.

    +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • Do not enable Kerberos authentication for the created HBase cluster.
  • The column families in created HBase source table must be declared as the ROW type, the field names map the column family names, and the nested field names map the column qualifier names.

    There is no need to declare all the families and qualifiers in the schema. Users can declare what is used in the query. Except the ROW type fields, the single atomic type field (for example, STRING or BIGINT) will be recognized as the HBase rowkey. The rowkey field can be an arbitrary name, but should be quoted using backticks if it is a reserved keyword.

    +
+
+

Syntax

create table hbaseSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+  ','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'hbase-2.2',
+  'table-name' = '',
+  'zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to hbase-2.2.

+

table-name

+

Yes

+

None

+

String

+

Name of the HBase table to connect.

+

zookeeper.quorum

+

Yes

+

None

+

String

+

HBase ZooKeeper quorum, in the format of "ZookeeperAddress:ZookeeperPort".

+

The following uses an MRS HBase cluster as an example to describe how to obtain the IP address and port number of ZooKeeper used by this parameter:

+
  • On MRS Manager, choose Cluster and click the name of the desired cluster. Choose Services > ZooKeeper > Instance, and obtain the IP address of the ZooKeeper instance.
  • On MRS Manager, choose Cluster and click the name of the desired cluster. Choose Services > ZooKeeper > Configurations > All Configurations, search for the clientPort parameter, and obtain its value, that is, the ZooKeeper port number.
+

zookeeper.znode.parent

+

No

+

/hbase

+

String

+

Root directory in ZooKeeper. The default value is /hbase.

+

null-string-literal

+

No

+

None

+

String

+

Representation for null values for string fields.

+

HBase source encodes/decodes empty bytes as null values for all types except the string type.

+
+
+
+

Data Type Mapping

HBase stores all data as byte arrays. The data needs to be serialized and deserialized during read and write operations.

+

When serializing and de-serializing, Flink HBase connector uses utility class org.apache.hadoop.hbase.util.Bytes provided by HBase (Hadoop) to convert Flink data types to and from byte arrays.

+

Flink HBase connector encodes null values to empty bytes, and decode empty bytes to null values for all data types except the string type. For the string type, the null literal is determined by the null-string-literal option.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data type mapping

Flink SQL Type

+

HBase Conversion

+

CHAR/VARCHAR/STRING

+

byte[] toBytes(String s)

+

String toString(byte[] b)

+

BOOLEAN

+

byte[] toBytes(boolean b)

+

boolean toBoolean(byte[] b)

+

BINARY/VARBINARY

+

Returns byte[] as is.

+

DECIMAL

+

byte[] toBytes(BigDecimal v)

+

BigDecimal toBigDecimal(byte[] b)

+

TINYINT

+

new byte[] { val }

+

bytes[0] // returns first and only byte from bytes

+

SMALLINT

+

byte[] toBytes(short val)

+

short toShort(byte[] bytes)

+

INT

+

byte[] toBytes(int val)

+

int toInt(byte[] bytes)

+

BIGINT

+

byte[] toBytes(long val)

+

long toLong(byte[] bytes)

+

FLOAT

+

byte[] toBytes(float val)

+

float toFloat(byte[] bytes)

+

DOUBLE

+

byte[] toBytes(double val)

+

double toDouble(byte[] bytes)

+

DATE

+

Stores the number of days since epoch as an int value.

+

TIME

+

Stores the number of milliseconds of the day as an int value.

+

TIMESTAMP

+

Stores the milliseconds since epoch as a long value.

+

ARRAY

+

Not supported

+

MAP/MULTISET

+

Not supported

+

ROW

+

Not supported

+
+
+
+

Example

In this example, data is read from the HBase data source and written to the Print result table. The procedure is as follows (the HBase versions used in this example are 1.3.1, 2.1.1, and 2.2.3):

+
  1. Create an enhanced datasource connection in the VPC and subnet where HBase locates, and bind the connection to the required Flink queue. .
  2. Set HBase cluster security groups and add inbound rules to allow access from the Flink job queue. Test the connectivity using the HBase address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Use the HBase shell to create HBase table order that has only one column family detail. The creation statement is as follows:
    create 'order', {NAME => 'detail'}
    +
  4. Run the following command in the HBase shell to insert a data record:
    put 'order', '202103241000000001', 'detail:order_channel','webShop'
    +put 'order', '202103241000000001', 'detail:order_time','2021-03-24 10:00:00'
    +put 'order', '202103241000000001', 'detail:pay_amount','100.00'
    +put 'order', '202103241000000001', 'detail:real_pay','100.00'
    +put 'order', '202103241000000001', 'detail:pay_time','2021-03-24 10:02:03'
    +put 'order', '202103241000000001', 'detail:user_id','0001'
    +put 'order', '202103241000000001', 'detail:user_name','Alice'
    +put 'order', '202103241000000001', 'detail:area_id','330106'
    +
  5. Create a Flink OpenSource SQL job. Enter the following job script and submit the job. The job script uses the HBase data source and the Print result table.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    create table hbaseSource (
    +  order_id string,-- Indicates the unique rowkey.
    +  detail Row( -- Indicates the column family.
    +    order_channel string,
    +    order_time string,
    +    pay_amount string,
    +    real_pay string,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string),
    +  primary key (order_id) not enforced
    +) with (
    +  'connector' = 'hbase-2.2',
    +   'table-name' = 'order',
    +   'zookeeper.quorum' = 'ZookeeperAddress:ZookeeperPort'
    +) ;
    +
    +create table printSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount string,
    +  real_pay string,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) with (
    + 'connector' = 'print'
    +);
    +
    +insert into printSink select order_id, detail.order_channel,detail.order_time,detail.pay_amount,detail.real_pay,
    +detail.pay_time,detail.user_id,detail.user_name,detail.area_id from hbaseSource;
    +
    +
  6. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103241000000001,webShop,2021-03-24 10:00:00,100.00,100.00,2021-03-24 10:02:03,0001,Alice,330106)
    +
+
+

FAQ

  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    java.lang.IllegalArgumentException: offset (0) + length (8) exceed the capacity of the array: 6
    +

    A: If data in the HBase table is imported in other modes, the data is represented in the string format. Therefore, this error is reported when other data formats are used. Change the type of the non-string fields in the HBase source table created by Flink to the string format.

    +
+
+
  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    org.apache.zookeeper.ClientCnxn$SessionTimeoutException: Client session timed out, have not heard from server in 90069ms for connection id 0x0
    +

    A: The datasource connection is not bound, the binding fails, or the security group of the HBase cluster is not configured to allow access from the network segment of the DLI queue. Configure the datasource connection or configure the security group of the HBase cluster to allow access from the DLI queue.

    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0385.html b/docs/dli/sqlreference/dli_08_0385.html new file mode 100644 index 00000000..43827ded --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0385.html @@ -0,0 +1,403 @@ + + +

JDBC Source Table

+

Function

The JDBC connector is a Flink's built-in connector to read data from a database.

+
+

Prerequisites

  • An enhanced datasource connection with the instances has been established, so that you can configure security group rules as required.
+
+

Precautions

When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.

+
+

Syntax

create table jbdcSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+) with (
+  'connector' = 'jdbc',
+  'url' = '',
+  'table-name' = '',
+  'username' = '',
+  'password' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to jdbc.

+

url

+

Yes

+

None

+

String

+

Database URL.

+

table-name

+

Yes

+

None

+

String

+

Name of the table where the data will be read from the database.

+

driver

+

No

+

None

+

String

+

Driver required for connecting to the database. If you do not set this parameter, it will be automatically derived from the URL.

+

username

+

No

+

None

+

String

+

Database authentication username. This parameter must be configured in pair with password.

+

password

+

No

+

None

+

String

+

Database authentication password. This parameter must be configured in pair with username.

+

scan.partition.column

+

No

+

None

+

String

+

Name of the column used to partition the input. For details, see Partitioned Scan.

+

scan.partition.num

+

No

+

None

+

Integer

+

Number of partitions to be created. For details, see Partitioned Scan.

+

scan.partition.lower-bound

+

No

+

None

+

Integer

+

Lower bound of values to be fetched for the first partition. For details, see Partitioned Scan.

+

scan.partition.upper-bound

+

No

+

None

+

Integer

+

Upper bound of values to be fetched for the last partition. For details, see Partitioned Scan.

+

scan.fetch-size

+

No

+

0

+

Integer

+

Number of rows fetched from the database each time. If this parameter is set to 0, the SQL hint is ignored.

+

scan.auto-commit

+

No

+

true

+

Boolean

+

Whether each statement is committed in a transaction automatically.

+
+
+
+

Partitioned Scan

To accelerate reading data in parallel Source task instances, Flink provides the partitioned scan feature for the JDBC table. The following parameters describe how to partition the table when reading in parallel from multiple tasks.

+
  • scan.partition.column: name of the column used to partition the input. The data type of the column must be number, date, or timestamp.
  • scan.partition.num: number of partitions.
  • scan.partition.lower-bound: minimum value of the first partition.
  • scan.partition.upper-bound: maximum value of the last partition.
+
  • When a table is created, the preceding partitioned scan parameters must all be specified if any of them is specified.
  • The scan.partition.lower-bound and scan.partition.upper-bound parameters are used to decide the partition stride instead of filtering rows in the table. All rows in the table are partitioned and returned.
+
+
+

Data Type Mapping

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data type mapping

MySQL Type

+

PostgreSQL Type

+

Flink SQL Type

+

TINYINT

+

-

+

TINYINT

+

SMALLINT

+

TINYINT UNSIGNED

+

SMALLINT

+

INT2

+

SMALLSERIAL

+

SERIAL2

+

SMALLINT

+

INT

+

MEDIUMINT

+

SMALLINT UNSIGNED

+

INTEGER

+

SERIAL

+

INT

+

BIGINT

+

INT UNSIGNED

+

BIGINT

+

BIGSERIAL

+

BIGINT

+

BIGINT UNSIGNED

+

-

+

DECIMAL(20, 0)

+

BIGINT

+

BIGINT

+

BIGINT

+

FLOAT

+

REAL

+

FLOAT4

+

FLOAT

+

DOUBLE

+

DOUBLE PRECISION

+

FLOAT8

+

DOUBLE PRECISION

+

DOUBLE

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

DECIMAL(p, s)

+

BOOLEAN

+

TINYINT(1)

+

BOOLEAN

+

BOOLEAN

+

DATE

+

DATE

+

DATE

+

TIME [(p)]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

DATETIME [(p)]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

CHAR(n)

+

VARCHAR(n)

+

TEXT

+

CHAR(n)

+

CHARACTER(n)

+

VARCHAR(n)

+

CHARACTER

+

VARYING(n)

+

TEXT

+

STRING

+

BINARY

+

VARBINARY

+

BLOB

+

BYTEA

+

BYTES

+

-

+

ARRAY

+

ARRAY

+
+
+
+

Example

This example uses JDBC as the data source and Print as the sink to read data from the RDS MySQL database and write the data to the Print result table.

+
  1. Create an enhanced datasource connection in the VPC and subnet where RDS MySQL locates, and bind the connection to the required Flink elastic resource pool.
  2. Set RDS MySQL security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the RDS address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Log in to the RDS MySQL database, create table orders in the Flink database, and insert data.

    Create table orders in the Flink database.

    +
    CREATE TABLE `flink`.`orders` (
    +	`order_id` VARCHAR(32) NOT NULL,
    +	`order_channel` VARCHAR(32) NULL,
    +	`order_time` VARCHAR(32) NULL,
    +	`pay_amount` DOUBLE UNSIGNED NOT NULL,
    +	`real_pay` DOUBLE UNSIGNED NULL,
    +	`pay_time` VARCHAR(32) NULL,
    +	`user_id` VARCHAR(32) NULL,
    +	`user_name` VARCHAR(32) NULL,
    +	`area_id` VARCHAR(32) NULL,
    +	PRIMARY KEY (`order_id`)
    +)	ENGINE = InnoDB
    +	DEFAULT CHARACTER SET = utf8mb4
    +	COLLATE = utf8mb4_general_ci;
    +
    Insert data into the table.
    insert into orders(
    +  order_id,
    +  order_channel,
    +  order_time,
    +  pay_amount,
    +  real_pay,
    +  pay_time,
    +  user_id,
    +  user_name,
    +  area_id) values
    +  ('202103241000000001', 'webShop', '2021-03-24 10:00:00', '100.00', '100.00', '2021-03-24 10:02:03', '0001', 'Alice', '330106'),  
    +  ('202103251202020001', 'miniAppShop', '2021-03-25 12:02:02', '60.00', '60.00', '2021-03-25 12:03:00', '0002', 'Bob', '330110');
    +
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE jdbcSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'jdbc',
    +  'url' = 'jdbc:mysql://MySQLAddress:MySQLPort/flink',--flink is the database name created in RDS MySQL.
    +  'table-name' = 'orders',
    +  'username' = 'MySQLUsername',
    +  'password' = 'MySQLPassword'
    +);
    +
    +CREATE TABLE printSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from jdbcSource;
    +
    +
  5. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
    ++I(202103251202020001,miniAppShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    +
+
+

FAQ

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0386.html b/docs/dli/sqlreference/dli_08_0386.html new file mode 100644 index 00000000..987720dd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0386.html @@ -0,0 +1,696 @@ + + +

Kafka Source Table

+

Function

Create a source stream to obtain data from Kafka as input data for jobs.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

  • You have created a Kafka cluster.
  • An enhanced datasource connection has been created for DLI to connect to Kafka clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • For details about how to use data types when creating tables, see Format.
  • SASL_SSL cannot be enabled for the interconnected Kafka cluster.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+  (',' WATERMARK FOR rowtime_column_name AS watermark-strategy_expression)
+)
+with (
+  'connector' = 'kafka',
+  'topic' = '',
+  'properties.bootstrap.servers' = '',
+  'properties.group.id' = '',
+  'scan.startup.mode' = '',
+  'format' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to kafka.

+

topic

+

Yes

+

None

+

String

+

Topic name of the Kafka record.

+

Note:

+
  • Only one of topic and topic-pattern can be specified.
  • If there are multiple topics, separate them with semicolons (;), for example, topic-1;topic-2.
+

topic-pattern

+

No

+

None

+

String

+

Regular expression for a pattern of topic names to read from.

+

Only one of topic and topic-pattern can be specified.

+

For example:

+

'topic.*'

+

'(topic-c|topic-d)'

+

'(topic-a|topic-b|topic-\\d*)'

+

'(topic-a|topic-b|topic-[0-9]*)'

+

properties.bootstrap.servers

+

Yes

+

None

+

String

+

Comma separated list of Kafka brokers.

+

properties.group.id

+

Yes

+

None

+

String

+

ID of the consumer group for the Kafka source.

+

properties.*

+

No

+

None

+

String

+

This parameter can set and pass arbitrary Kafka configurations.

+

Note:

+
  • The suffix to properties. must match the configuration key in Apache Kafka.

    For example, you can disable automatic topic creation via 'properties.allow.auto.create.topics' = 'false'.

    +
  • Some configurations are not supported, for example, 'key.deserializer' and 'value.deserializer'.
+

format

+

Yes

+

None

+

String

+

Format used to deserialize and serialize the value part of Kafka messages. Note: Either this parameter or the value.format parameter is required.

+

Refer to Format for more details and format parameters.

+

key.format

+

No

+

None

+

String

+

Format used to deserialize and serialize the key part of Kafka messages.

+

Note:

+
  • If a key format is defined, the key.fields parameter is required as well. Otherwise, the Kafka records will have an empty key.
  • Refer to Format for more details and format parameters.
+

key.fields

+

No

+

[]

+

List<String>

+

Defines the columns in the table as the list of keys. This parameter must be configured in pair with key.format.

+

This parameter is left empty by default. Therefore, no key is defined.

+

The format is like field1;field2.

+

key.fields-prefix

+

No

+

None

+

String

+

Defines a custom prefix for all fields of the key format to avoid name clashes with fields of the value format.

+

value.format

+

Yes

+

None

+

String

+

Format used to deserialize and serialize the value part of Kafka messages.

+

Note:

+
  • Either this parameter or the format parameter is required. If two parameters are configured, a conflict occurs.
  • Refer to Format for more details and format parameters.
+

value.fields-include

+

No

+

ALL

+

Enum

+

Possible values: [ALL, EXCEPT_KEY]

+

Whether to contain the key field when parsing the message body.

+

Possible values are:

+
  • ALL (default): All defined fields are included in the value of Kafka messages.
  • EXCEPT_KEY: All the fields except those defined by key.fields are included in the value of Kafka messages.
+

scan.startup.mode

+

No

+

group-offsets

+

String

+

Start position for Kafka to read data.

+

Possible values are:

+
  • earliest-offset: Data is read from the earliest Kafka offset.
  • latest-offset: Data is read from the latest Kafka offset.
  • group-offsets (default): Data is read based on the consumer group.
  • timestamp: Data is read from a user-supplied timestamp. When setting this option, you also need to specify scan.startup.timestamp-millis in WITH.
  • specific-offsets: Data is read from user-supplied specific offsets for each partition. When setting this option, you also need to specify scan.startup.specific-offsets in WITH.
+

scan.startup.specific-offsets

+

No

+

None

+

String

+

This parameter takes effect only when scan.startup.mode is set to specific-offsets. It specifies the offsets for each partition, for example, partition:0,offset:42;partition:1,offset:300.

+

scan.startup.timestamp-millis

+

No

+

None

+

Long

+

Startup timestamp. This parameter takes effect when scan.startup.mode is set to timestamp.

+

scan.topic-partition-discovery.interval

+

No

+

None

+

Duration

+

Interval for a consumer to periodically discover dynamically created Kafka topics and partitions.

+
+
+
+

Metadata Column

You can define metadata columns in the source table to obtain the metadata of Kafka messages. For example, if multiple topics are defined in the WITH parameter and the metadata column is defined in the Kafka source table, the data read by Flink is labeled with the topic from which the data is read.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Metadata column

Key

+

Data Type

+

R/W

+

Description

+

topic

+

STRING NOT NULL

+

R

+

Topic name of the Kafka record.

+

partition

+

INT NOT NULL

+

R

+

Partition ID of the Kafka record.

+

headers

+

MAP<STRING, BYTES> NOT NULL

+

R/W

+

Headers of Kafka messages.

+

leader-epoch

+

INT NULL

+

R

+

Leader epoch of the Kafka record.

+

For details, see example 1.

+

offset

+

BIGINT NOT NULL

+

R

+

Offset of the Kafka record.

+

timestamp

+

TIMESTAMP(3) WITH LOCAL TIME ZONE NOT NULL

+

R/W

+

Timestamp of the Kafka record.

+

timestamp-type

+

STRING NOT NULL

+

R

+

Timestamp type of the Kafka record. The options are as follows:

+
  • NoTimestampType: No timestamp is defined in the message.
  • CreateTime: time when the message is generated.
  • LogAppendTime: time when the message is added to the Kafka broker.

    For details, see example 1.

    +
+
+
+
+

Example (SASL_SSL Disabled for the Kafka Cluster)

  • Example 1: Read data from the Kafka metadata column and write it to the Print sink.
    1. Create an enhanced datasource connection in the VPC and subnet where Kafka locates, and bind the connection to the required Flink elastic resource pool.
    2. Set Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
    3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
      When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
      CREATE TABLE orders (
      +  `topic` String metadata,
      +  `partition` int metadata,
      +  `headers` MAP<STRING, BYTES> metadata,
      +  `leaderEpoch` INT metadata from 'leader-epoch',
      +  `offset` bigint metadata,
      +  `timestamp` TIMESTAMP(3) metadata,
      +  `timestampType` string metadata from 'timestamp-type',
      +  `message` string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = 'KafkaTopic',
      +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
      +  'properties.group.id' = 'GroupId',
      +  'scan.startup.mode' = 'latest-offset',
      +  "format" = "csv",
      +  "csv.field-delimiter" = "\u0001",
      +  "csv.quote-character" = "''"
      +);
      +
      +CREATE TABLE printSink (
      +  `topic` String,
      +  `partition` int,
      +  `headers` MAP<STRING, BYTES>,
      +  `leaderEpoch` INT,
      +  `offset` bigint,
      +  `timestamp` TIMESTAMP(3),
      +  `timestampType` string,
      +  `message` string -- Indicates that data written by users is read from Kafka.
      +) WITH (
      +  'connector' = 'print'
      +);
      +
      +insert into printSink select * from orders;
      +
      +

      If you need to read the value of each field instead of the entire message, use the following statements:

      +
      CREATE TABLE orders (
      +  `topic` String metadata,
      +  `partition` int metadata,
      +  `headers` MAP<STRING, BYTES> metadata,
      +  `leaderEpoch` INT metadata from 'leader-epoch',
      +  `offset` bigint metadata,
      +  `timestamp` TIMESTAMP(3) metadata,
      +  `timestampType` string metadata from 'timestamp-type',
      +  order_id string,
      +  order_channel string,
      +  order_time string, 
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = '<yourTopic>',
      +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
      +  'properties.group.id' = 'GroupId',
      +  'scan.startup.mode' = 'latest-offset',
      +  "format" = "json"
      +);
      +
      +CREATE TABLE printSink (
      +  `topic` String,
      +  `partition` int,
      +  `headers` MAP<STRING, BYTES>,
      +  `leaderEpoch` INT,
      +  `offset` bigint,
      +  `timestamp` TIMESTAMP(3),
      +  `timestampType` string,
      +  order_id string,
      +  order_channel string,
      +  order_time string, 
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'print'
      +);
      +
      +insert into printSink select * from orders;
      +
    4. Send the following data to the corresponding topics in Kafka:
      {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
      +
      +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
      +
      +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
      +
    5. Perform the following operations to view the output:
      1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
      2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
      3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
      +

      The data result is as follows:

      +
      +I(fz-source-json,0,{},0,243,2021-12-27T09:23:32.253,CreateTime,{"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"})
      ++I(fz-source-json,0,{},0,244,2021-12-27T09:23:39.655,CreateTime,{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"})
      ++I(fz-source-json,0,{},0,245,2021-12-27T09:23:48.405,CreateTime,{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"})
      +
    +

    +
  • Example 2: Use the Kafka source table and Print result table to read JSON data from Kafka and output it to the log file.
    1. Create an enhanced datasource connection in the VPC and subnet where Kafka locates, and bind the connection to the required Flink elastic resource pool.
    2. Set Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
    3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
      When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
      CREATE TABLE orders (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = '<yourTopic>',
      +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
      +  'properties.group.id' = 'GroupId',
      +  'scan.startup.mode' = 'latest-offset',
      +  "format" = "json"
      +);
      +
      +CREATE TABLE printSink (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'print'
      +);
      +
      +insert into printSink select * from orders;
      +
      +
    4. Send the following test data to the corresponding topics in Kafka:
      {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"} 
      +
      +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
      +
      +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
      +
    5. Perform the following operations to view the output:
      1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
      2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
      3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
      +

      The data result is as follows:

      +
      +I(202103241000000001,webShop,2021-03-24T10:00,100.0,100.0,2021-03-2410:02:03,0001,Alice,330106)
      ++I(202103241606060001,appShop,2021-03-24T16:06:06,200.0,180.0,2021-03-2416:10:06,0001,Alice,330106)
      ++I(202103251202020001,miniAppShop,2021-03-25T12:02:02,60.0,60.0,2021-03-2512:03:00,0002,Bob,330110)
      +
    +
+
+

Example (SASL_SSL Enabled for the Kafka Cluster)

  • Example 1: Enable SASL_SSL authentication for the DMS cluster.

    Create a Kafka cluster for DMS, enable SASL_SSL, download the SSL certificate, and upload the downloaded certificate client.jks to an OBS bucket.

    +
    CREATE TABLE ordersSource (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.ssl.truststore.location' = 'obs://xx/xx.jks',  -- Location where the user uploads the certificate to
    +  'properties.sasl.mechanism' = 'PLAIN',  --  Value format: SASL_PLAINTEXT
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.jaas.config' = 'org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xx\" password=\"xx\";', -- Account and password set when the Kafka cluster is created
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.ssl.truststore.location' = 'obs://xx/xx.jks',
    +  'properties.sasl.mechanism' = 'PLAIN',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.jaas.config' = 'org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xx\" password=\"xx\";',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 2: Enable Kafka SASL_SSL authentication for the MRS cluster.
    • Enable Kerberos authentication for the MRS cluster.
    • Click the Components tab and click Kafka. In the displayed page, click the Service Configuration tab, locate the security.protocol, and set it to SASL_SSL.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential.

      Obtain the truststore.jks file using the authentication credential and store the credential and truststore.jks file in OBS.

      +
    • If "Message stream modified (41)" is displayed, the JDK version may be incorrect. Change the JDK version in the sample code to a version earlier than 8u_242 or delete the renew_lifetime = 0m configuration item from the krb5.conf configuration file.
    • Set the port to the sasl_ssl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SASL_SSL.
    +
    CREATE TABLE ordersSource (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21009,xx:21009',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx', --Username
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
    +  'properties.ssl.truststore.password' = 'xx',  -- Password set for generating truststore.jks
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21009,xx:21009',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
    +  'properties.ssl.truststore.password' = 'xx',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 3: Enable Kerberos SASL_PAINTEXT authentication for the MRS cluster
    • Enable Kerberos authentication for the MRS cluster.
    • Click the Components tab and click Kafka. In the displayed page, click the Service Configuration tab, locate the security.protocol, and set it to SASL_PLAINTEXT.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential. Upload the credential to OBS.
    • If error message "Message stream modified (41)" is displayed, the JDK version may be incorrect. Change the JDK version in the sample code to a version earlier than 8u_242 or delete the renew_lifetime = 0m configuration item from the krb5.conf configuration file.
    • Set the port to the sasl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SASL_PLAINTEXT.
    +
    CREATE TABLE ordersSources (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21007,xx:21007',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_PLAINTEXT',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21007,xx:21007',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_PLAINTEXT',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 4: Use SSL for the MRS cluster
    • Do not enable Kerberos authentication for the MRS cluster.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential.

      Obtain the truststore.jks file using the authentication credential and store the credential and truststore.jks file in OBS.

      +
    • Set the port to the ssl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SSL.
    • Set ssl.mode.enable to true.
      CREATE TABLE ordersSource (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = 'xx',
      +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
      +  'properties.group.id' = 'GroupId',
      +  'scan.startup.mode' = 'latest-offset',
      +  'properties.connector.auth.open' = 'true',
      +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
      +  'properties.ssl.truststore.password' = 'xx',  -- Password set for generating truststore.jks
      +  'properties.security.protocol' = 'SSL',
      +  "format" = "json"
      +);
      + 
      +CREATE TABLE ordersSink (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'print'
      +);
      + 
      +insert into ordersSink select * from ordersSource;
      +
    +
+
+

FAQ

  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    org.apache.kafka.common.errors.TimeoutException: Timeout expired while fetching topic metadata
    +

    A: The datasource connection is not bound, the binding fails, or the security group of the Kafka cluster is not configured to allow access from the network segment of the DLI queue. Configure the datasource connection or configure the security group of the Kafka cluster to allow access from the DLI queue.

    +
  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    Caused by: java.lang.RuntimeException: RealLine:45;Table 'default_catalog.default_database.printSink' declares persistable metadata columns, but the underlying DynamicTableSink doesn't implement the SupportsWritingMetadata interface. If the column should not be persisted, it can be declared with the VIRTUAL keyword.
    +

    A: The metadata type is defined in the sink table, but the Print connector does not support deletion of matadata from the sink table.

    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0387.html b/docs/dli/sqlreference/dli_08_0387.html new file mode 100644 index 00000000..4cbf58f6 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0387.html @@ -0,0 +1,236 @@ + + +

MySQL CDC Source Table

+

Function

The MySQL CDC source table, that is, the MySQL streaming source table, reads all historical data in the database first and then smoothly switches data read to the Binlog to ensure data integrity.

+
+

Prerequisites

  • MySQL CDC requires MySQL 5.7 or 8.0.x.
  • An enhanced datasource connection has been created for DLI to connect to the MySQL database, so that you can configure security group rules as required. +
  • Binlog is enabled for MySQL, and binlog_row_image is set to FULL.
  • A MySQL user has been created and granted the SELECT, SHOW DATABASES, REPLICATION SLAVE, and REPLICATION CLIENT permissions.
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • Each client that synchronizes database data has a unique ID, that is, the server ID. You are advised to configure a unique server ID for each MySQL CDC job in the same database.
    Main reasons are as follows:
    • The MySQL server maintains the network connection and Binlog location based on the ID. Therefore, if a large number of clients with the same server ID connect to the MySQL server, the CPU usage of the MySQL server may increase sharply, affecting the stability of online services.
    • If multiple jobs share the same server ID, Binlog locations will be disordered, making data read inaccurate. Therefore, you are advised to configure different server IDs for each MySQL CDC job.
    +
    +
+
  • Watermarks cannot be defined for MySQL CDC source tables. For details about window aggregation, see FAQ.
  • If you connect to a sink source that supports upsert, such as GaussDB(DWS) and MySQL, you need to define the primary key in the statement for creating the sink table. For details, see the printSink table creation statement in Example.
+
+

Syntax

create table mySqlCdcSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'mysql-cdc',   
+  'hostname' = 'mysqlHostname',
+  'username' = 'mysqlUsername',
+  'password' = 'mysqlPassword',
+  'database-name' = 'mysqlDatabaseName',
+  'table-name' = 'mysqlTableName'
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to mysql-cdc.

+

hostname

+

Yes

+

None

+

String

+

IP address or hostname of the MySQL database.

+

username

+

Yes

+

None

+

String

+

Username of the MySQL database.

+

password

+

Yes

+

None

+

String

+

Password of the MySQL database.

+

database-name

+

Yes

+

None

+

String

+

Name of the database to connect.

+

The database name supports regular expressions to read data from multiple databases. For example, flink(.)* indicates all database names starting with flink.

+

table-name

+

Yes

+

None

+

String

+

Name of the table to read data from.

+

The table name supports regular expressions to read data from multiple tables. For example, cdc_order(.)* indicates all table names starting with cdc_order.

+

port

+

No

+

3306

+

Integer

+

Port number of the MySQL database.

+

server-id

+

No

+

A random value from 5400 to 6000

+

String

+

A numeric ID of the database client, which must be globally unique in the MySQL cluster. You are advised to set a unique ID for each job in the same database.

+

By default, a random value ranging from 5400 to 6400 is generated.

+

scan.startup.mode

+

No

+

initial

+

String

+

Startup mode for consuming data.

+
  • initial (default): In the first startup, the database scans all historical data and then reads the latest Binlog data.
  • latest-offset: In the first startup, the database reads data directly from the end of the Binlog (the latest Binlog) instead of scanning all historical data. That is, it reads only the latest changes after the connector is started.
+

server-time-zone

+

No

+

None

+

String

+

Time zone of the session used by the database.

+
+
+
+

Example

In this example, MySQL-CDC is used to read data from RDS for MySQL in real time and write the data to the Print result table. The procedure is as follows (MySQL 5.7.32 is used in this example):

+
  1. Create an enhanced datasource connection in the VPC and subnet where MySQL locates, and bind the connection to the required Flink elastic resource pool.
  2. Set MySQL security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the MySQL address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Create a table named cdc_order in database flink of the MySQL database.
    CREATE TABLE `flink`.`cdc_order` (
    +	`order_id` VARCHAR(32) NOT NULL,
    +	`order_channel` VARCHAR(32) NULL,
    +	`order_time` VARCHAR(32) NULL,
    +	`pay_amount` DOUBLE  NULL,
    +	`real_pay` DOUBLE  NULL,
    +	`pay_time` VARCHAR(32) NULL,
    +	`user_id` VARCHAR(32) NULL,
    +	`user_name` VARCHAR(32) NULL,
    +	`area_id` VARCHAR(32) NULL,
    +	PRIMARY KEY (`order_id`)
    +)	ENGINE = InnoDB
    +	DEFAULT CHARACTER SET = utf8mb4
    +	COLLATE = utf8mb4_general_ci;
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    create table mysqlCdcSource(
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id STRING
    +) with (
    +  'connector' = 'mysql-cdc',
    +  'hostname' = 'mysqlHostname',
    +  'username' = 'mysqlUsername',
    +  'password' = 'mysqlPassword',
    +  'database-name' = 'mysqlDatabaseName',
    +  'table-name' = 'mysqlTableName'
    +);
    +
    +create table printSink(
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id STRING,
    +  primary key(order_id) not enforced
    +) with (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from mysqlCdcSource;
    +
    +
  5. Insert test data in MySQL.
    insert into cdc_order values
    +('202103241000000001','webShop','2021-03-24 10:00:00','100.00','100.00','2021-03-24 10:02:03','0001','Alice','330106'),
    +('202103241606060001','appShop','2021-03-24 16:06:06','200.00','180.00','2021-03-24 16:10:06','0001','Alice','330106');
    +
    +delete from cdc_order  where order_channel = 'webShop';
    +
    +insert into cdc_order values('202103251202020001','miniAppShop','2021-03-25 12:02:02','60.00','60.00','2021-03-25 12:03:00','0002','Bob','330110');
    +
  6. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103241000000001,webShop,2021-03-2410:00:00,100.0,100.0,2021-03-2410:02:03,0001,Alice,330106)
    ++I(202103241606060001,appShop,2021-03-2416:06:06,200.0,180.0,2021-03-2416:10:06,0001,Alice,330106)
    +-D(202103241000000001,webShop,2021-03-2410:00:00,100.0,100.0,2021-03-2410:02:03,0001,Alice,330106)
    ++I(202103251202020001,miniAppShop,2021-03-2512:02:02,60.0,60.0,2021-03-2512:03:00,0002,Bob,330110)
    +
+
+

FAQ

Q: How do I perform window aggregation if the MySQL CDC source table does not support definition of watermarks?

+

A: You can use the non-window aggregation method. That is, convert the time field into a window value, and then use GROUP BY to perform aggregation based on the window value.

+

For example, you can use the following script to collect statistics on the number of orders per minute (order_time indicates the order time, in the string format):

+
insert into printSink select DATE_FORMAT(order_time, 'yyyy-MM-dd HH:mm'), count(*) from mysqlCdcSource group by DATE_FORMAT(order_time, 'yyyy-MM-dd HH:mm');
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0388.html b/docs/dli/sqlreference/dli_08_0388.html new file mode 100644 index 00000000..2908c89b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0388.html @@ -0,0 +1,250 @@ + + +

Postgres CDC Source Table

+

Function

The Postgres CDC source table, that is, Postgres streaming source table, is used to read the full snapshot data and changed data of the PostgreSQL database in sequence. The exactly-once processing semantics is used to ensure data accuracy even if a failure occurs.

+
+

Prerequisites

  • The PostgreSQL version be 9.6, 10, 11, or 12.
  • An enhanced datasource connection with the database has been established, so that you can configure security group rules as required. +
+
+

Precautions

  • When you create a Flink OpenSource SQL job, set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • The PostgreSQL version cannot be earlier than PostgreSQL 11.
  • If operations such as update will be performed on the Postgres table, you need to run the following statement in PostgreSQL. Note: Replace test.cdc_order with the actual database and table.
    ALTER TABLE test.cdc_order REPLICA IDENTITY FULL
    +
  • Before creating the PostgreSQL CDC source table, check whether the current PostgreSQL contains the default plug-in. You can run the following statement in PostgreSQL to query the current plug-ins:
    SELECT name FROM pg_available_extensions;
    +

    If the default plug-in decoderbufs is not available, you need to set the decoding.plugin.name parameter to specify an existing plug-in in PostgreSQL when creating the PostgreSQL CDC source table.

    +
+
+

Syntax

create table postgresCdcSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'postgres-cdc',   
+  'hostname' = 'PostgresHostname',
+  'username' = 'PostgresUsername',
+  'password' = 'PostgresPassword',
+  'database-name' = 'PostgresDatabaseName',
+  'schema-name' = 'PostgresSchemaName',
+  'table-name' = 'PostgresTableName'
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to postgres-cdc.

+

hostname

+

Yes

+

None

+

String

+

IP address or hostname of the Postgres database.

+

username

+

Yes

+

None

+

String

+

Username of the Postgres database.

+

password

+

Yes

+

None

+

String

+

Password of the Postgres database.

+

database-name

+

Yes

+

None

+

String

+

Database name.

+

schema-name

+

Yes

+

None

+

String

+

Postgres schema name.

+

The schema name supports regular expressions to read data from multiple schemas. For example, test(.)* indicates all schema names starting with test.

+

table-name

+

Yes

+

None

+

String

+

Postgres table name.

+

The table name supports regular expressions to read data from multiple tables. For example, cdc_order(.)* indicates all table names starting with cdc_order.

+

port

+

No

+

5432

+

Integer

+

Port number of the Postgres database.

+

decoding.plugin.name

+

No

+

decoderbufs

+

String

+

Determined based on the plug-in that is installed in the PostgreSQL database. The value can be:

+
  • decoderbufs (default)
  • wal2json
  • wal2json_rds
  • wal2json_streaming
  • wal2json_rds_streaming
  • pgoutput
+

debezium.*

+

No

+

None

+

String

+

Fine-grained control over the behavior of Debezium clients, for example, 'debezium.snapshot.mode' = 'never'. For details, see Connector configuration properties.

+

You are advised to set the debezium.slot.name parameter for each table to avoid the following error: "PSQLException: ERROR: replication slot "debezium" is active for PID 974"

+
+
+
+

Example

In this example, Postgres-CDC is used to read data from RDS for PostgreSQL in real time and write the data to the Print result table. The procedure is as follows (PostgreSQL 11.11 is used in this example):

+
  1. Create an enhanced datasource connection in the VPC and subnet where PostgreSQL locates, and bind the connection to the required Flink elastic resource pool.
  2. Set PostgreSQL security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the PostgreSQL address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. In PostgreSQL, create database flink and schema test.
  4. Create table cdc_order in the schema test of database flink in PostgreSQL.
    create table test.cdc_order(
    +  order_id VARCHAR,
    +  order_channel VARCHAR,
    +  order_time VARCHAR,
    +  pay_amount FLOAT8,
    +  real_pay FLOAT8,
    +  pay_time VARCHAR,
    +  user_id VARCHAR,
    +  user_name VARCHAR,
    +  area_id VARCHAR,
    +  primary key(order_id)
    +);
    +
  5. Run the following SQL statement in PostgreSQL. If you do not run this statement, an error will be reported when the Flink job is executed. For details, see the error message in FAQ.
    ALTER TABLE test.cdc_order REPLICA IDENTITY FULL
    +
  6. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    create table postgresCdcSource(
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id STRING,
    +  primary key (order_id) not enforced
    +) with (
    +  'connector' = 'postgres-cdc',
    +  'hostname' = 'PostgresHostname',
    +  'username' = 'PostgresUsername',
    +  'password' = 'PostgresPassword',
    +  'database-name' = 'flink',
    +  'schema-name' = 'test',
    +  'table-name' = 'cdc_order'
    +);
    +
    +create table printSink(
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id STRING,
    +  primary key(order_id) not enforced
    +) with (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from postgresCdcSource;
    +
    +
  7. Run the following command in PostgreSQL:
    insert into test.cdc_order
    +  (order_id,
    +  order_channel,
    +  order_time,
    +  pay_amount,
    +  real_pay,
    +  pay_time,
    +  user_id,
    +  user_name,
    +  area_id) values
    +  ('202103241000000001', 'webShop', '2021-03-24 10:00:00', '100.00', '100.00', '2021-03-24 10:02:03', '0001', 'Alice', '330106'),
    +  ('202103251202020001', 'miniAppShop', '2021-03-25 12:02:02', '60.00', '60.00', '2021-03-25 12:03:00', '0002', 'Bob', '330110');
    +
    +update test.cdc_order set order_channel = 'webShop' where order_id = '202103251202020001';
    +
    +delete from test.cdc_order where order_id = '202103241000000001';
    +
  8. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
    ++I(202103251202020001,miniAppShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    +-U(202103251202020001,miniAppShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    ++U(202103251202020001,webShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    +-D(202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
    +
+
+

FAQ

  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    org.postgresql.util.PSQLException: ERROR: logical decoding requires wal_level >= logical
    +
  • A: Change the value of wal_level to logical and restart the PostgreSQL database.

    After modifying the PostgreSQL parameter, restart the RDS PostgreSQL instance for the modification to take effect.

    +
+
+
  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    java.lang.IllegalStateException: The "before" field of UPDATE/DELETE message is null, please check the Postgres table has been set REPLICA IDENTITY to FULL level. You can update the setting by running the command in Postgres 'ALTER TABLE test.cdc_order REPLICA IDENTITY FULL'. 
    +

    A: If a similar error is reported in the run log, run the ALTER TABLE test.cdc_order REPLICA IDENTITY FULL statement in PostgreSQL.

    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0389.html b/docs/dli/sqlreference/dli_08_0389.html new file mode 100644 index 00000000..1edf4d40 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0389.html @@ -0,0 +1,415 @@ + + +

Redis Source Table

+

Function

Create a source stream to obtain data from Redis as input for jobs.

+
+

Prerequisites

An enhanced datasource connection has been created for DLI to connect to the Redis database, so that you can configure security group rules as required.

+ +
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • To obtain the key values, you can set the primary key in Flink. The primary key maps to the Redis key.
  • The primary key cannot be a composite primary key, and only can be one field.
  • Constraints on schema-syntax:
    • If schema-syntax is map or array, there can be only one non-primary key and it must be of the same map or array type.
    • If schema-syntax is fields-scores, the number of non-primary keys must be an even number, and the second key of every two keys except the primary key must be of the double type. The double value is the score of the previous key. The following is an example:
      CREATE TABLE redisSource (
      +  redisKey string,
      +  order_id string,
      +  score1 double,
      +  order_channel string,
      +  score2 double,
      +  order_time string,
      +  score3 double,
      +  pay_amount double,
      +  score4 double,
      +  real_pay double,
      +  score5 double,
      +  pay_time string,
      +  score6 double,
      +  user_id string,
      +  score7 double,
      +  user_name string,
      +  score8 double,
      +  area_id string,
      +  score9 double,
      +  primary key (redisKey) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'fields-scores'
      +);
      +
    +
  • Restrictions on data-type:
    • When data-type is set, the types of non-primary keys defined in Flink must be the same.
    • If data-type is sorted-set and schema-syntax is fields or array, only sorted-set values can be read from Redis, and the score value cannot be read.
    • If data-type is string, only one non-primary key field is allowed.
    • If data-type is sorted-set and schema-syntax is map, only one non-primary key field is allowed besides the primary key field.

      This non-primary key field must be of the map type. The map value of the field must be of the double type, indicating the score. The map key of the field indicates the value in the Redis set.

      +
    • If data-type is sorted-set and schema-syntax is array-scores, only two non-primary keys are allowed and must be of the array type.
      The first key indicates values in the Redis set. The second key is of the array<double> type, indicating index scores. The following is an example:
      CREATE TABLE redisSink (
      +  order_id string,
      +  arrayField Array<String>,
      +  arrayScore array<double>,
      +  primary key (order_id) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  "default-score" = '3',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'array-scores'
      +);
      +
      +
    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+  ,PRIMARY KEY (attr_name, ...) NOT ENFORCED
+)
+with (
+  'connector' = 'redis',
+  'host' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to redis.

+

host

+

Yes

+

None

+

String

+

Redis connector address.

+

port

+

No

+

6379

+

Integer

+

Redis connector port.

+

password

+

No

+

None

+

String

+

Redis authentication password.

+

namespace

+

No

+

None

+

String

+

Redis key namespace.

+

delimiter

+

No

+

:

+

String

+

Delimiter between the Redis key and namespace.

+

data-type

+

No

+

hash

+

String

+

Redis data type. Available values are as follows:

+
  • hash
  • list
  • set
  • sorted-set
  • string
+

For details about the constraints, see Constraints on data-type.

+

schema-syntax

+

No

+

fields

+

String

+

Redis schema semantics. Available values are as follows (for details, see Precautions and FAQ):

+
  • fields: applicable to all data types
  • fields-scores: applicable to sorted-set data
  • array: applicable to list, set, and sorted-set data
  • array-scores: applicable to sorted-set data
  • map: applicable to hash and sorted-set data
+

For details about the constraints, see Constraints on schema-syntax.

+

deploy-mode

+

No

+

standalone

+

String

+

Deployment mode of the Redis cluster. The value can be standalone, master-replica, or cluster. The default value is standalone.

+

retry-count

+

No

+

5

+

Integer

+

Number of attempts to connect to the Redis cluster.

+

connection-timeout-millis

+

No

+

10000

+

Integer

+

Maximum timeout for connecting to the Redis cluster.

+

commands-timeout-millis

+

No

+

2000

+

Integer

+

Maximum time for waiting for a completion response.

+

rebalancing-timeout-millis

+

No

+

15000

+

Integer

+

Sleep time when the Redis cluster fails.

+

scan-keys-count

+

No

+

1000

+

Integer

+

Number of data records read in each scan.

+

default-score

+

No

+

0

+

Double

+

Default score when data-type is sorted-set.

+

deserialize-error-policy

+

No

+

fail-job

+

Enum

+

Policy of how to process a data parsing failure. Available values are as follows:

+
  • fail-job: Fail the job.
  • skip-row: Skip the current data.
  • null-field: Set the current data to null.
+

skip-null-values

+

No

+

true

+

Boolean

+

Whether null values will be skipped.

+
+
+
+

Example

In this example, data is read from the DCS Redis data source and written to the Print result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where Redis locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Redis security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Redis address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Run the following commands on the Redis client to insert data into different keys and store the data in hash format:
    HMSET redisSource order_id 202103241000000001 order_channel webShop order_time "2021-03-24 10:00:00" pay_amount 100.00 real_pay 100.00 pay_time "2021-03-24 10:02:03" user_id 0001 user_name Alice area_id 330106
    +
    +HMSET redisSource1 order_id 202103241606060001 order_channel appShop order_time "2021-03-24 16:06:06" pay_amount 200.00 real_pay 180.00 pay_time "2021-03-24 16:10:06" user_id 0001 user_name Alice area_id 330106
    +
    +HMSET redisSource2 order_id 202103251202020001 order_channel miniAppShop order_time "2021-03-25 12:02:02" pay_amount 60.00 real_pay 60.00 pay_time "2021-03-25 12:03:00" user_id 0002 user_name Bob area_id 330110
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script to read data in hash format from Redis.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE redisSource (
    +  redisKey string,
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  primary key (redisKey) not enforced  --Obtains the key value from Redis.
    +) WITH (
    +  'connector' = 'redis',
    +  'host' = 'RedisIP',
    +  'password' = 'RedisPassword',
    +  'data-type' = 'hash',
    +  'deploy-mode' = 'master-replica'
    +);
    +
    +CREATE TABLE printSink (
    +  redisKey string,
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from redisSource;
    +
    +
  5. Perform the following operations to view the data result in the taskmanager.out file:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(redisSource1,202103241606060001,appShop,2021-03-24 16:06:06,200.0,180.0,2021-03-24 16:10:06,0001,Alice,330106)
    ++I(redisSource,202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
    ++I(redisSource2,202103251202020001,miniAppShop,2021-03-25 12:02:02,60.0,60.0,2021-03-25 12:03:00,0002,Bob,330110)
    +
+
+

FAQ

  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    Caused by: org.apache.flink.client.program.ProgramInvocationException: The main method caused an error: RealLine:36;Usage of 'set' data-type and 'fields' schema syntax in source Redis connector with multiple non-key column types. As 'set' in Redis is not sorted, it's not possible to map 'set's values to table schema with different types.
    +

    A: If data-type is set, the data types of non-primary key fields in Flink are different. As a result, this error is reported. When data-type is set, the types of non-primary keys defined in Flink must be the same.

    +
  • Q: If data-type is hash, what are the differences between schema-syntax set to fields and that to map?

    A: When schema-syntax is set to fields, the hash value in the Redis key is assigned to the field with the same name in Flink. When schema-syntax is set to map, the hash key and hash value of each hash in Redis are put into a map, which represents the value of the corresponding Flink field. Specifically, this map contains all hash keys and hash values of a key in Redis.

    +
    • For fields:
      1. Insert the following data into Redis:
        HMSET redisSource order_id 202103241000000001 order_channel webShop order_time "2021-03-24 10:00:00" pay_amount 100.00 real_pay 100.00 pay_time "2021-03-24 10:02:03" user_id 0001 user_name Alice area_id 330106
        +
      2. When schema-syntax is set to fields, use the following job script:
        CREATE TABLE redisSource (
        +  redisKey string,
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string,
        +  primary key (redisKey) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'data-type' = 'hash',
        +  'deploy-mode' = 'master-replica'
        +);
        +
        +CREATE TABLE printSink (
        +  redisKey string,
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string
        +) WITH (
        +  'connector' = 'print'
        +);
        +
        +insert into printSink select * from redisSource;
        +
      3. The job execution result is as follows:
        +I(redisSource,202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106)
        +
      +
    • For map:
      1. Insert the following data into Redis:
        HMSET redisSource order_id 202103241000000001 order_channel webShop order_time "2021-03-24 10:00:00" pay_amount 100.00 real_pay 100.00 pay_time "2021-03-24 10:02:03" user_id 0001 user_name Alice area_id 330106
        +
      2. When schema-syntax is set to map, use the following job script:
        CREATE TABLE redisSource (
        +  redisKey string,
        +  order_result map<string, string>,
        +  primary key (redisKey) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'data-type' = 'hash',
        +  'deploy-mode' = 'master-replica',
        +  'schema-syntax' = 'map'
        +);
        +
        +CREATE TABLE printSink (
        +  redisKey string,
        +  order_result map<string, string>
        +) WITH (
        +  'connector' = 'print'
        +);
        +
        +insert into printSink select * from redisSource;
        +
      3. The job execution result is as follows:
        +I(redisSource,{user_id=0001, user_name=Alice, pay_amount=100.00, real_pay=100.00, order_time=2021-03-24 10:00:00, area_id=330106, order_id=202103241000000001, order_channel=webShop, pay_time=2021-03-24 10:02:03})
        +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0390.html b/docs/dli/sqlreference/dli_08_0390.html new file mode 100644 index 00000000..7886b6b4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0390.html @@ -0,0 +1,213 @@ + + +

Upsert Kafka Source Table

+

Function

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+

As a source, the upsert-kafka connector produces a changelog stream, where each data record represents an update or delete event. More precisely, the value in a data record is interpreted as an UPDATE of the last value for the same key, if any (if a corresponding key does not exist yet, the UPDATE will be considered an INSERT). Using the table analogy, a data record in a changelog stream is interpreted as an UPSERT, also known as INSERT/UPDATE, because any existing row with the same key is overwritten. Also, null values are interpreted in a special way: A record with a null value represents a DELETE.

+
+

Prerequisites

An enhanced datasource connection has been created for DLI to connect to Kafka clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • The Upsert Kafka always works in the upsert fashion and requires to define the primary key in the DDL. With the assumption that records with the same key should be ordered in the same partition, the primary key semantic on the changelog source means the materialized changelog is unique on the primary keys. The primary key definition will also control which fields should end up in Kafka's key.
  • Because the connector is working in upsert mode, the last record on the same key will take effect when reading back as a source.
  • For details about how to use data types, see section Format.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'upsert-kafka',
+  'topic' = '',
+  'properties.bootstrap.servers' = '',
+  'key.format' = '',
+  'value.format' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to upsert-kafka.

+

topic

+

Yes

+

None

+

String

+

Kafka topic name.

+

properties.bootstrap.servers

+

Yes

+

None

+

String

+

Comma separated list of Kafka brokers.

+

key.format

+

Yes

+

None

+

String

+

Format used to deserialize and serialize the key part of Kafka messages. The key fields are specified by the PRIMARY KEY syntax. The following formats are supported:

+
  • csv
  • json
  • avro
+

Refer to Format for more details and format parameters.

+

key.fields-prefix

+

No

+

None

+

String

+

Defines a custom prefix for all fields of the key format to avoid name clashes with fields of the value format.

+

By default, the prefix is empty. If a custom prefix is defined, both the table schema and key.fields will work with prefixed names. When constructing the data type of the key format, the prefix will be removed and the non-prefixed names will be used within the key format. Note that this option requires that value.fields-include must be set to EXCEPT_KEY.

+

value.format

+

Yes

+

None

+

String

+

Format used to deserialize and serialize the value part of Kafka messages. The following formats are supported:

+
  • csv
  • json
  • avro
+

Refer to Format for more details and format parameters.

+

value.fields-include

+

Yes

+

ALL

+

String

+

Controls which fields should appear in the value part. Possible values are:

+
  • ALL: All fields in the schema, including the primary key field, are included in the value part.
  • EXCEPT_KEY: All the fields of the table schema are included, except the primary key field.
+

properties.*

+

No

+

None

+

String

+

This option can set and pass arbitrary Kafka configurations.

+

The suffix to properties. must match the parameter defined in Kafka Configuration documentation. Flink will remove the properties. key prefix and pass the transformed key and value to the underlying KafkaClient.

+

For example, you can disable automatic topic creation via 'properties.allow.auto.create.topics' = 'false'.

+

But there are some configurations that do not support to set, because Flink will override them, for example, 'key.deserializer' and 'value.deserializer'.

+
+
+
+

Example

In this example, data is read from the Kafka data source and written to the Print result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where Kafka locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE upsertKafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,  
    +  area_id string,
    +  PRIMARY KEY (order_id) NOT ENFORCED
    +) WITH (
    +  'connector' = 'upsert-kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' =  'KafkaAddress1:KafkaPort,KafkAddress2:KafkaPort',
    +  'key.format' = 'csv',
    +  'value.format' = 'json'
    +);
    +
    +CREATE TABLE printSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,  
    +  area_id string,
    +  PRIMARY KEY (order_id) NOT ENFORCED
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +INSERT INTO printSink
    +SELECT * FROM upsertKafkaSource;
    +
    +
  4. Insert the following data to the specified topics in Kafka. (Note: Specify the key when inserting data to Kafka.)
    {"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
  5. Perform the following operations to view the output:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +

    The data result is as follows:

    +
    +I(202103251202020001,miniAppShop,2021-03-2512:02:02,60.0,60.0,2021-03-2512:03:00,0002,Bob,330110)
    ++I(202103251505050001,qqShop,2021-03-2515:05:05,500.0,400.0,2021-03-2515:10:00,0003,Cindy,330108)
    +-U(202103251202020001,miniAppShop,2021-03-2512:02:02,60.0,60.0,2021-03-2512:03:00,0002,Bob,330110)
    ++U(202103251202020001,miniAppShop,2021-03-2512:02:02,60.0,60.0,2021-03-2512:03:00,0002,Bob,330110)
    +
+
+

FAQ

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0391.html b/docs/dli/sqlreference/dli_08_0391.html new file mode 100644 index 00000000..6a57a488 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0391.html @@ -0,0 +1,33 @@ + + +

Creating Result Tables

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0392.html b/docs/dli/sqlreference/dli_08_0392.html new file mode 100644 index 00000000..8f307c6b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0392.html @@ -0,0 +1,69 @@ + + +

BlackHole Result Table

+

Function

The BlackHole connector allows for swallowing all input records. It is designed for high-performance testing and UDF output. It is not a substantive sink. The BlackHole result table is a built-in connector.

+

For example, if an error is reported when you register a result table of another type, but you are not sure whether it is caused by a system fault or an invalid setting of the WITH parameter for the result table, you can change the value of connector to blackhole and click Run. If no error is reported, the system is normal. You must check the settings of the WITH parameter.

+
+

Prerequisites

None

+
+

Precautions

When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.

+
+

Syntax

create table blackhole_table (
+ attr_name attr_type (',' attr_name attr_type) *
+) with (
+ 'connector = blackhole'
+)
+
+

Parameters

+
+ + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to blackhole.

+
+
+
+

Example

The DataGen source table generates data, and the BlackHole result table receives the data.

+
create table datagenSource (
+ user_id string,
+ user_name string,
+ user_age int
+) with (
+ 'connector' = 'datagen',
+ 'rows-per-second'='1'
+);
+create table blackholeSink (
+ user_id string,
+ user_name string,
+ user_age int
+) with (
+ 'connector' = 'blackhole'
+);
+insert into blackholeSink select * from datagenSource;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0393.html b/docs/dli/sqlreference/dli_08_0393.html new file mode 100644 index 00000000..3abdf736 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0393.html @@ -0,0 +1,223 @@ + + +

ClickHouse Result Table

+

Function

DLI can output Flink job data to the ClickHouse database. ClickHouse is a column-based database oriented to online analysis and processing. It supports SQL query and provides good query performance. The aggregation analysis and query performance based on large and wide tables is excellent, which is one order of magnitude faster than other analytical databases.

+
+

Prerequisites

  • Your jobs are running on a dedicated queue (non-shared queue) of DLI.
  • You have established an enhanced datasource connection to ClickHouse and set the port in the security group rule of the ClickHouse cluster as needed.
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • When you create a ClickHouse cluster for MRS, set the cluster version to MRS 3.1.0 or later and do not enable Kerberos authentication.
  • The ClickHouse result table does not support table data deletion.
  • Flink supports the following data types: string, tinyint, smallint, int, long, float, double, date, timestamp, decimal, and array.

    The array supports only the int, bigint, string, float, and double data types.

    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
create table clickhouseSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector.type' = clickhouse,
+  'connector.url' = '',
+  'connector.table' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector.type

+

Yes

+

None

+

String

+

Result table type. Set this parameter to clickhouse.

+

connector.url

+

Yes

+

None

+

String

+

ClickHouse URL.

+

Parameter format: jdbc:clickhouse://ClickHouseBalancer instance IP address:HTTP port number for ClickHouseBalancer instances/Database name

+
  • IP address of a ClickHouseBalancer instance:

    Log in to the MRS console and choose Clusters > Active Clusters in the navigation pane. Click a cluster name, and choose Components > ClickHouse > Instances to obtain the business IP address of the ClickHouseBalancer instance.

    +
  • HTTP port of a ClickHouseBalancer instance:

    Log in to the MRS console and choose Clusters > Active Clusters in the navigation pane. Click a cluster name, and choose Components > ClickHouse > Service Configuration. On the Service Configuration page, select ClickHouseBalancer from the All Roles drop-down list, search for lb_http_port, and obtain the parameter value. The default value is 21425.

    +
  • The database name is the name of the database created for the ClickHouse cluster.
+

connector.table

+

Yes

+

None

+

String

+

Name of the ClickHouse table to be created.

+

connector.driver

+

No

+

ru.yandex.clickhouse.ClickHouseDriver

+

String

+

Driver required for connecting to the database.

+
  • If this parameter is not specified during table creation, the driver automatically extracts the value from the ClickHouse URL.
  • If this parameter is specified during table creation, the value must be ru.yandex.clickhouse.ClickHouseDriver.
+

connector.username

+

No

+

None

+

String

+

Username for connecting to the ClickHouse database.

+

connector.password

+

No

+

None

+

String

+

Password for connecting to the ClickHouse database.

+

connector.write.flush.max-rows

+

No

+

5000

+

Integer

+

Maximum number of rows to be updated when data is written. The default value is 5000.

+

connector.write.flush.interval

+

No

+

0

+

Duration

+

Interval for data update. The unit can be ms, milli, millisecond/s, sec, second/min, or minute.

+

Value 0 indicates that data is not updated.

+

connector.write.max-retries

+

No

+

3

+

Integer

+

Maximum number of retries for writing data to the result table. The default value is 3.

+
+
+
+

Example

In this example, data is from Kafka and inserted to table order in ClickHouse database flink. The procedure is as follows (the ClickHouse version is 21.3.4.25 in MRS):

+
  1. Create an enhanced datasource connection in the VPC and subnet where ClickHouse and Kafka clusters locate, and bind the connection to the required Flink queue.
  2. Set ClickHouse and Kafka cluster security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the ClickHouse address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Use the ClickHouse client to connect to the ClickHouse server and run the following command to query other environment parameters such as the cluster ID:
    select cluster,shard_num,replica_num,host_name from system.clusters;
    +
    The following information is displayed:
    ┌─cluster────┬────┬─shard_num─┐
    +│ default_cluster │    1   │           1 │
    +│ default_cluster │    1   │           2 │
    +└──────── ┴────┴────── ┘
    +
    +
  4. Run the following command to create database flink on a node of the ClickHouse cluster based on the obtained cluster ID, for example, default_cluster:
    CREATE DATABASE flink ON CLUSTER default_cluster;
    +
  5. Run the following command to create the ReplicatedMergeTree table named order on the node of cluster default_cluster and on database flink:
    CREATE TABLE flink.order ON CLUSTER default_cluster(order_id String,order_channel String,order_time String,pay_amount Float64,real_pay Float64,pay_time String,user_id String,user_name String,area_id String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/flink/order', '{replica}')ORDER BY order_id;
    +
  6. Create a Flink OpenSource SQL job. Enter the following job script and submit the job. The job script uses the Kafka data source and the ClickHouse result table.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +create table clickhouseSink(
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) with (
    +  'connector.type' = 'clickhouse',
    +  'connector.url' = 'jdbc:clickhouse://ClickhouseAddress:ClickhousePort/flink',
    +  'connector.table' = 'order',
    +  'connector.write.flush.max-rows' = '1'
    +);
    +
    +insert into clickhouseSink select * from orders;
    +
    +
  7. Connect to the Kafka cluster and insert the following test data into Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
  8. Use the ClickHouse client to connect to the ClickHouse and run the following command to query the data written to table order in database flink:
    select * from flink.order;
    +
    The query result is as follows:
    202103241000000001 webShop 2021-03-24 10:00:00 100 100 2021-03-24 10:02:03 0001 Alice 330106
    +
    +202103241606060001 appShop 2021-03-24 16:06:06 200 180 2021-03-24 16:10:06 0001 Alice 330106 
    +
    +202103251202020001 miniAppShop 2021-03-25 12:02:02 60 60 2021-03-25 12:03:00 0002 Bob 330110 
    +
    +
+
+

FAQ

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0394.html b/docs/dli/sqlreference/dli_08_0394.html new file mode 100644 index 00000000..436a4144 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0394.html @@ -0,0 +1,309 @@ + + +

GaussDB(DWS) Result Table

+

Function

DLI outputs the Flink job output data to GaussDB(DWS). GaussDB(DWS) database kernel is compliant with PostgreSQL. The PostgreSQL database can store data of more complex types and deliver space information services, multi-version concurrent control (MVCC), and high concurrency. It applies to location applications, financial insurance, and e-Commerce.

+

GaussDB(DWS) is an online data processing database based on the cloud infrastructure and platform and helps you mine and analyze massive sets of data.

+
+

Prerequisites

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • You have created a GaussDB(DWS) cluster. For details about how to create a GaussDB(DWS) cluster, see Creating a Cluster in the Data Warehouse Service Management Guide.
  • You have created a GaussDB(DWS) database table.
  • An enhanced datasource connection has been created for DLI to connect to GaussDB(DWS) clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • To use the upsert mode, you must define the primary key for both the GaussDB(DWS) result table and the GaussDB(DWS) table connected to the result table.
  • If tables with the same name exist in different GaussDB(DWS) schemas, you need to specify the schemas in the Flink open source SQL statements.
  • Before submitting a Flink job, you are advised to select Save Job Log and set the OBS bucket for saving job logs. This helps you view logs and locate faults when the job fails to be submitted or runs abnormally.
  • If you use the gsjdbc4 driver for connection, set driver to org.postgresql.Driver. You can omit this parameter because the gsjdbc4 driver is the default one.
    For example, run the following statements to use the gsjdbc4 driver to write data to GaussDB(DWS) in upsert mode:
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    create table dwsSink(
    +  car_id STRING,
    +  car_owner STRING,
    +  car_brand STRING,
    +  car_speed INT
    +) with (
    +  'connector' = 'gaussdb',
    +  'url' = 'jdbc:postgresql://DwsAddress:DwsPort/DwsDatabase',
    +  'table-name' = 'car_info',
    +  'username' = 'DwsUserName',
    +  'password' = 'DwsPasswrod',
    +  'write.mode' = 'upsert'
    +);
    +
    + +
    +
    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table dwsSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'gaussdb',
+  'url' = '',
+  'table-name' = '',
+  'driver' = '',
+  'username' = '',
+  'password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to gaussdb.

+

url

+

Yes

+

None

+

String

+

JDBC connection address.

+

If you use the gsjdbc4 driver, set the value in jdbc:postgresql://${ip}:${port}/${dbName} format.

+

If you use the gsjdbc200 driver, set the value in jdbc:gaussdb://${ip}:${port}/${dbName} format.

+

table-name

+

Yes

+

None

+

String

+

Name of the table to be operated. If the GaussDB(DWS) table is in a schema, the format is schema\".\"Table name. For details, see FAQ.

+

driver

+

No

+

org.postgresql.Driver

+

String

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

username

+

No

+

None

+

String

+

Username for GaussDB(DWS) database authentication. This parameter must be configured in pair with password.

+

password

+

No

+

None

+

String

+

Password for GaussDB(DWS) database authentication. This parameter must be configured in pair with username.

+

write.mode

+

No

+

None

+

String

+

Data write mode. The value can be copy, insert, or upsert. The default value is upsert.

+

This parameter must be configured depending on primary key.

+
  • If primary key is not configured, data can be appended in copy and insert modes.
  • If primary key is configured, all the three modes are available.
+

Note: GaussDB(DWS) does not support the update of distribution columns. The primary keys of columns to be updated must cover all distribution columns defined in the GaussDB(DWS) table.

+

sink.buffer-flush.max-rows

+

No

+

100

+

Integer

+

Maximum rows allowed for data flush. If the data size exceeds the value, data flush is triggered. The default value is 100.

+

If this parameter is set to 0, this configuration is disabled, and data is flushed in real time.

+

sink.buffer-flush.interval

+

No

+

1s

+

Duration

+

Data flush period. Data flush is triggered periodically. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

sink.max-retries

+

No

+

3

+

Integer

+

Maximum number of write retries.

+

write.escape-string-value

+

No

+

false

+

Boolean

+

Whether to escape values of the string type. This parameter is used only when write.mode is set to copy.

+
+
+
+

Example

In this example, data is read from the Kafka data source and written to the GaussDB(DWS) result table in insert mode. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where GaussDB(DWS) and Kafka locate, and bind the connection to the required Flink elastic resource pool.
  2. Set GaussDB(DWS) and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the GaussDB(DWS) and Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Connect to the GaussDB(DWS) database and create a table named dws_order.
    create table public.dws_order(
    +  order_id VARCHAR,
    +  order_channel VARCHAR,
    +  order_time VARCHAR,
    +  pay_amount FLOAT8,
    +  real_pay FLOAT8,
    +  pay_time VARCHAR,
    +  user_id VARCHAR,
    +  user_name VARCHAR,
    +  area_id VARCHAR);
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job. The job script uses the Kafka data source and the GaussDB(DWS) result table.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +CREATE TABLE dwsSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'gaussdb',
    +  'url' = 'jdbc:postgresql://DWSAddress:DWSPort/DWSdbName',
    +  'table-name' = 'dws_order',
    +  'driver' = 'org.postgresql.Driver',
    +  'username' = 'DWSUserName',
    +  'password' = 'DWSPassword',
    +  'write.mode' = 'insert'
    +);
    +
    +insert into dwsSink select * from kafkaSource;
    +
    +
  5. Connect to the Kafka cluster and enter the following test data to Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
  6. Run the following SQL statement in GaussDB(DWS) to view the data result:
     select * from dws_order
    +
    The data result is as follows:
    202103241000000001	webShop	2021-03-24 10:00:00	100.0	100.0	2021-03-24 10:02:03	0001	Alice	330106
    +
    +
+
+

FAQ

  • Q: What should I do if the Flink job execution fails and the log contains the following error information?
    java.io.IOException: unable to open JDBC writer
    +...
    +Caused by: org.postgresql.util.PSQLException: The connection attempt failed.
    +...
    +Caused by: java.net.SocketTimeoutException: connect timed out
    +
    A: The datasource connection is not bound or the binding fails. +
    +
+
  • Q: How can I configure a GaussDB(DWS) table that is in a schema?
    A: When GaussDB(DWS) table test is in schema ads_game_sdk_base, refer to the 'table-name' parameter setting in the following example:
    CREATE TABLE ads_rpt_game_sdk_realtime_ada_reg_user_pay_mm (
    +  ddate DATE,
    +  dmin TIMESTAMP(3),
    +  game_appkey VARCHAR,
    +  channel_id VARCHAR,
    +  pay_user_num_1m bigint,
    +  pay_amt_1m bigint,
    +  PRIMARY KEY (ddate, dmin, game_appkey, channel_id) NOT ENFORCED
    +) WITH (
    +  'connector' = 'gaussdb',
    +  'url' = 'jdbc:postgresql://<yourDwsAddress>:<yourDwsPort>/dws_bigdata_db',
    +  'table-name' = 'ads_game_sdk_base\".\"test',
    +  'username' = '<yourUsername>',
    +  'password' = '<yourPassword>',
    +  'write.mode' = 'upsert'
    +);
    +
    +
+
+
  • Q: What can I do if a job is running properly but there is no data in GaussDB(DWS)?
    A: Check the following items:
    • Check whether the JobManager and TaskManager logs contain error information. To view logs, perform the following steps:
      1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
      2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
      3. Go to the folder of the date, find the folder whose name contains taskmanager or jobmanager, download the taskmanager.out or jobmanager.out file, and view result logs.
      +
    • Check whether the datasource connection is correctly bound and whether a security group rule allows access of the queue.
    • Check whether the GaussDB(DWS) table to which data is to be written exists in multiple schemas. If it does, specify the schemas in the Flink job.
    +
    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0395.html b/docs/dli/sqlreference/dli_08_0395.html new file mode 100644 index 00000000..d268aeec --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0395.html @@ -0,0 +1,380 @@ + + +

Elasticsearch Result Table

+

Function

DLI outputs Flink job output data to Elasticsearch of Cloud Search Service (CSS). Elasticsearch is a popular enterprise-class Lucene-powered search server and provides the distributed multi-user capabilities. It delivers multiple functions, including full-text retrieval, structured search, analytics, aggregation, and highlighting. With Elasticsearch, you can achieve stable, reliable, real-time search. Elasticsearch applies to diversified scenarios, such as log analysis and site search.

+

CSS is a fully managed, distributed search service. It is fully compatible with open-source Elasticsearch and provides DLI with structured and unstructured data search, statistics, and report capabilities.

+
+

Prerequisites

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • You have created a cluster on CSS.

    If you need to access Elasticsearch using the cluster username and password, enable the security mode and disable HTTPS for the created CSS cluster.

    +
  • An enhanced datasource connection has been created for DLI to connect to CSS, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • Currently, only CSS 7.X and later versions are supported. Version 7.6.2 is recommended.
  • If the username and password are not used, the security mode must be enabled and HTTPS must be disabled for the created CSS cluster.
  • ICMP must be enabled for the security group inbound rules of the CSS cluster.
  • For details about how to use data types, see section Format.
  • Before submitting a Flink job, you are advised to select Save Job Log and set the OBS bucket for saving job logs. This helps you view logs and locate faults when the job fails to be submitted or runs abnormally.
  • The Elasticsearch sink can work in either upsert mode or append mode, depending on whether a primary key is defined.
    • If a primary key is defined, the Elasticsearch sink works in upsert mode, which can consume queries containing UPDATE and DELETE messages.
    • If a primary key is not defined, the Elasticsearch sink works in append mode which can only consume queries containing INSERT messages.
    +

    In the Elasticsearch result table, the primary key is used to calculate the Elasticsearch document ID. The document ID is a string of up to 512 bytes. It cannot have spaces. The Elasticsearch result table generates a document ID string for every row by concatenating all primary key fields in the order defined in the DDL using a key delimiter specified by document-id.key-delimiter. Certain types are not allowed as a primary key field as they do not have a good string representation, for example, BYTES, ROW, ARRAY, and MAP. If no primary key is specified, Elasticsearch will generate a document ID automatically.

    +
  • The Elasticsearch result table supports both static index and dynamic index.
    • If you want to have a static index, the index option value should be a plain string, such as myusers, all the records will be consistently written into the myusers index.
    • If you want to have a dynamic index, you can use {field_name} to reference a field value in the record to dynamically generate a target index. You can also use {field_name|date_format_string} to convert a field value of the TIMESTAMP, DATE, or TIME type into the format specified by date_format_string. date_format_string is compatible with Java's DateTimeFormatter. For example, if the option value is myusers-{log_ts|yyyy-MM-dd}, then a record with log_ts field value 2020-03-27 12:25:55 will be written into the myusers-2020-03-27 index.
    +
+
+

Syntax

create table esSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'elasticsearch-7',
+  'hosts' = '',
+  'index' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to elasticsearch-7, indicating to connect to a cluster of Elasticsearch 7.x or later.

+

hosts

+

Yes

+

None

+

String

+

Host name of the cluster where Elasticsearch locates. Use semicolons (;) to separate multiple host names. Ensure that the host name starts with http, for example, http://x.x.x.x:9200.

+

index

+

Yes

+

None

+

String

+

Elasticsearch index for every record. The index can be a static index (for example, 'myIndex') or a dynamic index (for example, 'index-{log_ts|yyyy-MM-dd}').

+

username

+

No

+

None

+

String

+

Username of the cluster where Elasticsearch locates. This parameter must be configured in pair with password.

+

If the username and password are used, the security mode must be enabled and HTTPS must be disabled for the created CSS cluster.

+

password

+

No

+

None

+

String

+

Password of the cluster where Elasticsearch locates. This parameter must be configured in pair with username.

+

document-id.key-delimiter

+

No

+

_

+

String

+

Delimiter of composite primary keys. The default value is _.

+

failure-handler

+

No

+

fail

+

String

+

Failure handling strategy in case a request to Elasticsearch fails. Valid strategies are:

+
  • fail: throws an exception if a request fails and thus causes a job failure.
  • ignore: ignores failures and drops the request.
  • retry-rejected: re-adds requests that have failed due to queue capacity saturation.
  • Custom class name: for failure handling with an ActionRequestFailureHandler subclass.
+

sink.flush-on-checkpoint

+

No

+

true

+

Boolean

+

Whether to flush on checkpoint.

+

If this parameter is set to false, the connector will not wait for all pending action requests to be acknowledged by Elasticsearch on checkpoints. Therefore, the connector does not provide any strong guarantees for at-least-once delivery of action requests.

+

sink.bulk-flush.max-actions

+

No

+

1000

+

Interger

+

Maximum number of buffered actions per bulk request. You can set this parameter to 0 to disable it.

+

sink.bulk-flush.max-size

+

No

+

2mb

+

MemorySize

+

Maximum size in memory of buffered actions per bulk request. It must be in MB granularity. You can set this parameter to 0 to disable it.

+

sink.bulk-flush.interval

+

No

+

1s

+

Duration

+

Interval for flushing buffered actions. You can set this parameter to 0 to disable it.

+

Note:

+

Both sink.bulk-flush.max-size and sink.bulk-flush.max-actions can be set to 0 with the flush interval set allowing for complete asynchronous processing of buffered actions.

+

sink.bulk-flush.backoff.strategy

+

No

+

DISABLED

+

String

+

Specifies how to perform retries if any flush actions failed due to a temporary request error. Valid strategies are:

+
  • DISABLED: no retry performed, that is, fail after the first request error.
  • CONSTANT: wait for backoff delay between retries.
  • EXPONENTIAL: initially wait for backoff delay and increase exponentially between retries.
+

sink.bulk-flush.backoff.max-retries

+

No

+

8

+

Integer

+

Maximum number of backoff retries.

+

sink.bulk-flush.backoff.delay

+

No

+

50ms

+

Duration

+

Delay between each backoff attempt.

+

For CONSTANT backoff, this is simply the delay between each retry.

+

For EXPONENTIAL backoff, this is the initial base delay.

+

connection.max-retry-timeout

+

No

+

None

+

Duration

+

Maximum timeout between retries.

+

connection.path-prefix

+

No

+

None

+

String

+

Prefix string to be added to every REST communication, for example, '/v1'.

+

format

+

No

+

json

+

String

+

The Elasticsearch connector supports to specify a format. The format must produce a valid JSON document. By default, the built-in JSON format is used.

+

Refer to Format for more details and format parameters.

+
+
+
+

Example

In this example, data is read from the Kafka data source and written to the Elasticsearch result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where Elasticsearch and Kafka locate, and bind the connection to the required Flink elastic resource pool.
  2. Set Elasticsearch and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Elasticsearch and Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Log in to Kibana of the Elasticsearch cluster, select Dev Tools, enter and execute the following statement to create an index whose value is orders:
    PUT /orders
    +{
    +  "settings": {
    +    "number_of_shards": 1
    +  },
    +	"mappings": {
    +	  "properties": {
    +	    "order_id": {
    +	      "type": "text"
    +	    },
    +	    "order_channel": {
    +	      "type": "text"
    +	    },
    +	    "order_time": {
    +	      "type": "text"
    +	    },
    +	    "pay_amount": {
    +	      "type": "double"
    +	    },
    +	    "real_pay": {
    +	      "type": "double"
    +	    },
    +	    "pay_time": {
    +	      "type": "text"
    +	    },
    +	    "user_id": {
    +	      "type": "text"
    +	    },
    +	    "user_name": {
    +	      "type": "text"
    +	    },
    +	    "area_id": {
    +	      "type": "text"
    +	    }
    +	  }
    +	}
    +}
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +
    +CREATE TABLE elasticsearchSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'elasticsearch-7',
    +  'hosts' = 'ElasticsearchAddress:ElasticsearchPort',
    +  'index' = 'orders'
    +);
    +
    +insert into elasticsearchSink select * from kafkaSource;
    +
    +
  5. Connect to the Kafka cluster and insert the following test data into Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
  6. Enter the following statement in Kibana of the Elasticsearch cluster and view the result:
    GET orders/_search
    +
    {
    +  "took" : 1,
    +  "timed_out" : false,
    +  "_shards" : {
    +    "total" : 1,
    +    "successful" : 1,
    +    "skipped" : 0,
    +    "failed" : 0
    +  },
    +  "hits" : {
    +    "total" : {
    +      "value" : 2,
    +      "relation" : "eq"
    +    },
    +    "max_score" : 1.0,
    +    "hits" : [
    +      {
    +        "_index" : "orders",
    +        "_type" : "_doc",
    +        "_id" : "ae7wpH4B1dV9conjpXeB",
    +        "_score" : 1.0,
    +        "_source" : {
    +          "order_id" : "202103241000000001",
    +          "order_channel" : "webShop",
    +          "order_time" : "2021-03-24 10:00:00",
    +          "pay_amount" : 100.0,
    +          "real_pay" : 100.0,
    +          "pay_time" : "2021-03-24 10:02:03",
    +          "user_id" : "0001",
    +          "user_name" : "Alice",
    +          "area_id" : "330106"
    +        }
    +      },
    +      {
    +        "_index" : "orders",
    +        "_type" : "_doc",
    +        "_id" : "au7xpH4B1dV9conjn3er",
    +        "_score" : 1.0,
    +        "_source" : {
    +          "order_id" : "202103241606060001",
    +          "order_channel" : "appShop",
    +          "order_time" : "2021-03-24 16:06:06",
    +          "pay_amount" : 200.0,
    +          "real_pay" : 180.0,
    +          "pay_time" : "2021-03-24 16:10:06",
    +          "user_id" : "0001",
    +          "user_name" : "Alice",
    +          "area_id" : "330106"
    +        }
    +      }
    +    ]
    +  }
    +}
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0396.html b/docs/dli/sqlreference/dli_08_0396.html new file mode 100644 index 00000000..c80b31b7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0396.html @@ -0,0 +1,359 @@ + + +

HBase Result Table

+

Function

DLI outputs the job data to HBase. HBase is a column-oriented distributed cloud storage system that features enhanced reliability, excellent performance, and elastic scalability. It applies to the storage of massive amounts of data and distributed computing. You can use HBase to build a storage system capable of storing TB- or even PB-level data. With HBase, you can filter and analyze data with ease and get responses in milliseconds, rapidly mining data value. Structured and semi-structured key-value data can be stored, including messages, reports, recommendation data, risk control data, logs, and orders. With DLI, you can write massive volumes of data to HBase at a high speed and with low latency.

+
+

Prerequisites

  • An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • Do not enable Kerberos authentication for the HBase cluster.
  • The column families in created HBase result table must be declared as the ROW type, the field names map the column family names, and the nested field names map the column qualifier names. There is no need to declare all the families and qualifiers in the schema. Users can declare what is used in the query. Except the ROW type fields, the single atomic type field (for example, STRING or BIGINT) will be recognized as the HBase rowkey. The rowkey field can be an arbitrary name, but should be quoted using backticks if it is a reserved keyword.
+
+

Syntax

create table hbaseSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  ','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+) with (
+  'connector' = 'hbase-2.2',
+  'table-name' = '',
+  'zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to hbase-2.2.

+

table-name

+

Yes

+

None

+

String

+

Name of the HBase table to connect.

+

zookeeper.quorum

+

Yes

+

None

+

String

+

HBase ZooKeeper instance information, in the format of ZookeeperAddress:ZookeeperPort.

+

The following uses an MRS HBase cluster as an example to describe how to obtain the IP address and port number of ZooKeeper used by this parameter:

+
  • On MRS Manager, choose Cluster and click the name of the desired cluster. Choose Services > ZooKeeper > Instance, and obtain the IP address of the ZooKeeper instance.
  • On MRS Manager, choose Cluster and click the name of the desired cluster. Choose Services > ZooKeeper > Configurations > All Configurations, search for the clientPort parameter, and obtain its value, that is, the ZooKeeper port number.
+

zookeeper.znode.parent

+

No

+

/hbase

+

String

+

Root directory in ZooKeeper. The default value is /hbase.

+

null-string-literal

+

No

+

null

+

String

+

Representation for null values for string fields.

+

The HBase sink encodes/decodes empty bytes as null values for all types except the string type.

+

sink.buffer-flush.max-size

+

No

+

2mb

+

MemorySize

+

Maximum size in memory of buffered rows for each write request.

+

This can improve performance for writing data to the HBase database, but may increase the latency.

+

You can set this parameter to 0 to disable it.

+

sink.buffer-flush.max-rows

+

No

+

1000

+

Integer

+

Maximum number of rows to buffer for each write request.

+

This can improve performance for writing data to the HBase database, but may increase the latency.

+

You can set this parameter to 0 to disable it.

+

sink.buffer-flush.interval

+

No

+

1s

+

Duration

+

Interval for flushing any buffered rows.

+

This can improve performance for writing data to the HBase database, but may increase the latency.

+

You can set this parameter to 0 to disable it.

+

Note: Both sink.buffer-flush.max-size and sink.buffer-flush.max-rows can be set to 0 with the flush interval set allowing for complete asynchronous processing of buffered actions.

+

sink.parallelism

+

No

+

None

+

Integer

+

Defines the parallelism of the HBase sink operator.

+

By default, the parallelism is determined by the framework using the same parallelism of the upstream chained operator.

+
+
+
+

Data Type Mapping

HBase stores all data as byte arrays. The data needs to be serialized and deserialized during read and write operations.

+

When serializing and de-serializing, Flink HBase connector uses utility class org.apache.hadoop.hbase.util.Bytes provided by HBase (Hadoop) to convert Flink data types to and from byte arrays.

+

Flink HBase connector encodes null values to empty bytes, and decode empty bytes to null values for all data types except the string type. For the string type, the null literal is determined by the null-string-literal option.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data type mapping

Flink SQL Type

+

HBase Conversion

+

CHAR / VARCHAR / STRING

+

byte[] toBytes(String s)

+

String toString(byte[] b)

+

BOOLEAN

+

byte[] toBytes(boolean b)

+

boolean toBoolean(byte[] b)

+

BINARY / VARBINARY

+

Returns byte[] as is.

+

DECIMAL

+

byte[] toBytes(BigDecimal v)

+

BigDecimal toBigDecimal(byte[] b)

+

TINYINT

+

new byte[] { val }

+

bytes[0] // returns first and only byte from bytes

+

SMALLINT

+

byte[] toBytes(short val)

+

short toShort(byte[] bytes)

+

INT

+

byte[] toBytes(int val)

+

int toInt(byte[] bytes)

+

BIGINT

+

byte[] toBytes(long val)

+

long toLong(byte[] bytes)

+

FLOAT

+

byte[] toBytes(float val)

+

float toFloat(byte[] bytes)

+

DOUBLE

+

byte[] toBytes(double val)

+

double toDouble(byte[] bytes)

+

DATE

+

Stores the number of days since epoch as an int value.

+

TIME

+

Stores the number of milliseconds of the day as an int value.

+

TIMESTAMP

+

Stores the milliseconds since epoch as a long value.

+

ARRAY

+

Not supported

+

MAP / MULTISET

+

Not supported

+

ROW

+

Not supported

+
+
+
+

Example

In this example, data is read from the Kafka data source and written to the HBase result table. The procedure is as follows (the HBase versions used in this example are 1.3.1 and 2.2.3):

+
  1. Create an enhanced datasource connection in the VPC and subnet where HBase and Kafka locate, and bind the connection to the required Flink elastic resource pool. .
  2. Set HBase and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the HBase and Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Use the HBase shell to create HBase table order that has only one column family detail.
    create 'order', {NAME => 'detail'}
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job. The job script uses Kafka as the data source and HBase as the result table (the Rowkey is order_id and the column family name is detail).
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +create table hbaseSink(
    +  order_id string,
    +  detail Row(
    +    order_channel string,
    +    order_time string,
    +    pay_amount double,
    +    real_pay double,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string)
    +) with (
    +  'connector' = 'hbase-2.2',
    +  'table-name' = 'order',
    +  'zookeeper.quorum' = 'ZookeeperAddress:ZookeeperPort',
    +  'sink.buffer-flush.max-rows' = '1'
    +);
    +
    +insert into hbaseSink select order_id, Row(order_channel,order_time,pay_amount,real_pay,pay_time,user_id,user_name,area_id) from orders;
    +
    +
  5. Connect to the Kafka cluster and enter the following data to Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
  6. Run the following statement on the HBase shell to view the data result:
     scan 'order'
    +
    The data result is as follows:
    202103241000000001   column=detail:area_id, timestamp=2021-12-16T21:30:37.954, value=330106
    +
    +202103241000000001   column=detail:order_channel, timestamp=2021-12-16T21:30:37.954, value=webShop
    +
    +202103241000000001   column=detail:order_time, timestamp=2021-12-16T21:30:37.954, value=2021-03-24 10:00:00
    +
    +202103241000000001   column=detail:pay_amount, timestamp=2021-12-16T21:30:37.954, value=@Y\x00\x00\x00\x00\x00\x00
    +
    +202103241000000001   column=detail:pay_time, timestamp=2021-12-16T21:30:37.954, value=2021-03-24 10:02:03
    +
    +202103241000000001   column=detail:real_pay, timestamp=2021-12-16T21:30:37.954, value=@Y\x00\x00\x00\x00\x00\x00
    +
    +202103241000000001   column=detail:user_id, timestamp=2021-12-16T21:30:37.954, value=0001
    +
    +202103241000000001   column=detail:user_name, timestamp=2021-12-16T21:30:37.954, value=Alice
    +
    +202103241606060001   column=detail:area_id, timestamp=2021-12-16T21:30:44.842, value=330106
    +
    +202103241606060001   column=detail:order_channel, timestamp=2021-12-16T21:30:44.842, value=appShop
    +
    +202103241606060001   column=detail:order_time, timestamp=2021-12-16T21:30:44.842, value=2021-03-24 16:06:06
    +
    +202103241606060001   column=detail:pay_amount, timestamp=2021-12-16T21:30:44.842, value=@i\x00\x00\x00\x00\x00\x00
    +
    +202103241606060001   column=detail:pay_time, timestamp=2021-12-16T21:30:44.842, value=2021-03-24 16:10:06
    +
    +202103241606060001   column=detail:real_pay, timestamp=2021-12-16T21:30:44.842, value=@f\x80\x00\x00\x00\x00\x00
    +
    +202103241606060001   column=detail:user_id, timestamp=2021-12-16T21:30:44.842, value=0001
    +
    +202103241606060001   column=detail:user_name, timestamp=2021-12-16T21:30:44.842, value=Alice
    +
    +202103251202020001   column=detail:area_id, timestamp=2021-12-16T21:30:52.181, value=330110
    +
    +202103251202020001   column=detail:order_channel, timestamp=2021-12-16T21:30:52.181, value=miniAppShop
    +
    +202103251202020001   column=detail:order_time, timestamp=2021-12-16T21:30:52.181, value=2021-03-25 12:02:02
    +
    +202103251202020001   column=detail:pay_amount, timestamp=2021-12-16T21:30:52.181, value=@N\x00\x00\x00\x00\x00\x00
    +
    +202103251202020001   column=detail:pay_time, timestamp=2021-12-16T21:30:52.181, value=2021-03-25 12:03:00
    +
    +202103251202020001   column=detail:real_pay, timestamp=2021-12-16T21:30:52.181, value=@N\x00\x00\x00\x00\x00\x00
    +
    +202103251202020001   column=detail:user_id, timestamp=2021-12-16T21:30:52.181, value=0002
    +
    +202103251202020001   column=detail:user_name, timestamp=2021-12-16T21:30:52.181, value=Bob
    +
    +
+
+

FAQ

Q: What should I do if the Flink job execution fails and the log contains the following error information?

+
org.apache.zookeeper.ClientCnxn$SessionTimeoutException: Client session timed out, have not heard from server in 90069ms for connection id 0x0
+

A: The datasource connection is not bound or the binding fails. Configure the datasource connection or configure the security group of the Kafka cluster to allow access from the DLI queue.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0397.html b/docs/dli/sqlreference/dli_08_0397.html new file mode 100644 index 00000000..593ef459 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0397.html @@ -0,0 +1,379 @@ + + +

JDBC Result Table

+

Function

DLI outputs the Flink job output data to RDS through the JDBC result table.

+
+

Prerequisites

An enhanced datasource connection with the instances has been established, so that you can configure security group rules as required.

+ +
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • The connector operates in upsert mode if the primary key was defined; otherwise, the connector operates in append mode.
    • In upsert mode, Flink will insert a new row or update the existing row according to the primary key. Flink can ensure the idempotence in this way. To guarantee the output result is as expected, it is recommended to define a primary key for the table.
    • In append mode, Flink will interpret all records as INSERT messages. The INSERT operation may fail if a primary key or unique constraint violation happens in the underlying database.
    +
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
create table jdbcSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'jdbc',
+  'url' = '',
+  'table-name' = '',
+  'driver' = '',
+  'username' = '',
+  'password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to jdbc.

+

url

+

Yes

+

None

+

String

+

Database URL.

+

table-name

+

Yes

+

None

+

String

+

Name of the table where the data will be read from the database.

+

driver

+

No

+

None

+

String

+

Driver required for connecting to the database. If you do not set this parameter, it will be automatically derived from the URL.

+

username

+

No

+

None

+

String

+

Database authentication username. This parameter must be configured in pair with password.

+

password

+

No

+

None

+

String

+

Database authentication password. This parameter must be configured in pair with username.

+

sink.buffer-flush.max-rows

+

No

+

100

+

Integer

+

Maximum size of buffered records before flush. You can set this parameter to 0 to disable it.

+

sink.buffer-flush.interval

+

No

+

1s

+

Duration

+

Flush interval, during which asynchronous threads will flush data. You can set this parameter to 0 to disable it.

+

Note that sink.buffer-flush.max-rows can be set to 0 with the flush interval set allowing for complete asynchronous processing of buffered actions.

+

sink.max-retries

+

No

+

3

+

Integer

+

Maximum number of retries if writing records to the database failed.

+
+
+
+

Data Type Mapping

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Data type mapping

MySQL Type

+

PostgreSQL Type

+

Flink SQL Type

+

TINYINT

+

-

+

TINYINT

+

SMALLINT

+

TINYINT UNSIGNED

+

SMALLINT

+

INT2

+

SMALLSERIAL

+

SERIAL2

+

SMALLINT

+

INT

+

MEDIUMINT

+

SMALLINT UNSIGNED

+

INTEGER

+

SERIAL

+

INT

+

BIGINT

+

INT UNSIGNED

+

BIGINT

+

BIGSERIAL

+

BIGINT

+

BIGINT UNSIGNED

+

-

+

DECIMAL(20, 0)

+

BIGINT

+

BIGINT

+

BIGINT

+

FLOAT

+

REAL

+

FLOAT4

+

FLOAT

+

DOUBLE

+

DOUBLE PRECISION

+

FLOAT8

+

DOUBLE PRECISION

+

DOUBLE

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

DECIMAL(p, s)

+

BOOLEAN

+

TINYINT(1)

+

BOOLEAN

+

BOOLEAN

+

DATE

+

DATE

+

DATE

+

TIME [(p)]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

DATETIME [(p)]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

CHAR(n)

+

VARCHAR(n)

+

TEXT

+

CHAR(n)

+

CHARACTER(n)

+

VARCHAR(n)

+

CHARACTER

+

VARYING(n)

+

TEXT

+

STRING

+

BINARY

+

VARBINARY

+

BLOB

+

BYTEA

+

BYTES

+

-

+

ARRAY

+

ARRAY

+
+
+
+

Example

In this example, Kafka is used to send data, and Kafka data is written to the MySQL database through the JDBC result table.

+
  1. Create an enhanced datasource connection in the VPC and subnet where MySQL and Kafka locate, and bind the connection to the required Flink elastic resource pool.
  2. Set MySQL and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the MySQL and Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Log in to the MySQL database and create table orders in database flink.
    CREATE TABLE `flink`.`orders` (
    +	`order_id` VARCHAR(32) NOT NULL,
    +	`order_channel` VARCHAR(32) NULL,
    +	`order_time` VARCHAR(32) NULL,
    +	`pay_amount` DOUBLE UNSIGNED NOT NULL,
    +	`real_pay` DOUBLE UNSIGNED NULL,
    +	`pay_time` VARCHAR(32) NULL,
    +	`user_id` VARCHAR(32) NULL,
    +	`user_name` VARCHAR(32) NULL,
    +	`area_id` VARCHAR(32) NULL,
    +	PRIMARY KEY (`order_id`)
    +)	ENGINE = InnoDB
    +	DEFAULT CHARACTER SET = utf8mb4
    +	COLLATE = utf8mb4_general_ci;
    +
  4. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +CREATE TABLE jdbcSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'jdbc',
    +  'url? = 'jdbc:mysql://MySQLAddress:MySQLPort/flink',-- flink is the MySQL database where the orders table locates.
    +  'table-name' = 'orders',
    +  'username' = 'MySQLUsername',
    +  'password' = 'MySQLPassword',
    +  'sink.buffer-flush.max-rows' = '1'
    +);
    +
    +insert into jdbcSink select * from kafkaSource;
    +
    +
  5. Connect to the Kafka cluster and send the following test data to the Kafka topics:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"} 
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
  6. Run the SQL statement in the MySQL database to view data in the table:
    select * from orders;
    +
    The following is an example of the result (note that the following data is replicated from the MySQL database but not the data style in the MySQL database):
    202103241000000001,webShop,2021-03-24 10:00:00,100.0,100.0,2021-03-24 10:02:03,0001,Alice,330106
    +202103241606060001,appShop,2021-03-24 16:06:06,200.0,180.0,2021-03-24 16:10:06,0001,Alice,330106
    +
    +
+
+

FAQ

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0398.html b/docs/dli/sqlreference/dli_08_0398.html new file mode 100644 index 00000000..f774e418 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0398.html @@ -0,0 +1,495 @@ + + +

Kafka Result Table

+

Function

DLI outputs the Flink job output data to Kafka through the Kafka result table.

+

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages.

+
+

Prerequisites

  • You have created a Kafka cluster.
  • An enhanced datasource connection has been created for DLI to connect to Kafka clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • For details about how to use data types, see section Format.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
create table kafkaSink(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'kafka',
+  'topic' = '',
+  'properties.bootstrap.servers' = '',
+  'format' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

string

+

Connector to be used. Set this parameter to kafka.

+

topic

+

Yes

+

None

+

string

+

Topic name of the Kafka result table.

+

properties.bootstrap.servers

+

Yes

+

None

+

string

+

Kafka broker address. The value is in the format of host:port,host:port,host:port. Multiple host:port pairs are separated with commas (,).

+

format

+

Yes

+

None

+

string

+

Format used by the Flink Kafka connector to serialize Kafka messages. Either this parameter or the value.format parameter is required.

+

The following formats are supported:

+
  • csv
  • json
  • avro
+

Refer to Format for more details and format parameters.

+

topic-pattern

+

No

+

None

+

String

+

Regular expression for matching the Kafka topic name.

+

Only one of topic and topic-pattern can be specified.

+

Example: 'topic.*'

+

'(topic-c|topic-d)'

+

'(topic-a|topic-b|topic-\\d*)'

+

'(topic-a|topic-b|topic-[0-9]*)'

+

properties.*

+

No

+

None

+

String

+

This parameter can set and pass arbitrary Kafka configurations.

+

Note:

+
  • Suffix names must match the configuration key defined in Apache Kafka.

    For example, you can disable automatic topic creation via 'properties.allow.auto.create.topics' = 'false'.

    +
  • Some configurations are not supported, for example, 'key.deserializer' and 'value.deserializer'.
+

key.format

+

No

+

None

+

String

+

Format used to deserialize and serialize the key part of Kafka messages.

+

Note:

+
  • If a key format is defined, the key.fields parameter is required as well. Otherwise, the Kafka records will have an empty key.
  • Possible values are:

    csv

    +

    json

    +

    avro

    +

    debezium-json

    +

    canal-json

    +

    maxwell-json

    +

    avro-confluent

    +

    raw

    +

    Refer to Format for more details and format parameters.

    +
+

key.fields

+

+

No

+

+

[]

+

List<String>

+

Defines the columns in the table as the list of keys. This parameter must be configured in pair with key.format.

+

This parameter is left empty by default. Therefore, no key is defined.

+

The format is like field1;field2.

+

key.fields-prefix

+

No

+

None

+

String

+

Defines a custom prefix for all fields of the key format to avoid name clashes with fields of the value format.

+

value.format

+

Yes

+

None

+

String

+

Format used to deserialize and serialize the value part of Kafka messages.

+

Note:

+
  • Either this parameter or the format parameter is required. If two parameters are configured, a conflict occurs.
  • Refer to Format for more details and format parameters.
+

value.fields-include

+

No

+

ALL

+

Enum

+

Possible values: [ALL, EXCEPT_KEY]

+

Whether to contain the key field when parsing the message body.

+

Possible values are:

+
  • ALL (default): All defined fields are included in the value of Kafka messages.
  • EXCEPT_KEY: All the fields except those defined by key.fields are included in the value of Kafka messages.
+

sink.partitioner

+

No

+

None

+

string

+

Mapping from Flink's partitions into Kafka's partitions. Valid values are as follows:

+
  • fixed (default): Each Flink partition ends up in at most one Kafka partition.
  • round-robin: A Flink partition is distributed to Kafka partitions in a round-robin manner.
  • Custom FlinkKafkaPartitioner subclass: If fixed and round-robin do not meet your requirements, you can create subclass FlinkKafkaPartitioner to customize the partition mapping, for example, org.mycompany.MyPartitioner.
+

sink.semantic

+

No

+

at-least-once

+

String

+

Defines the delivery semantic for the Kafka sink.

+

Valid values are as follows:

+
  • at-least-once
  • exactly-once
  • none
+

sink.parallelism

+

No

+

None

+

Integer

+

Defines the parallelism of the Kafka sink operator.

+

By default, the parallelism is determined by the framework using the same parallelism of the upstream chained operator.

+
+
+
+

Example (SASL_SSL Disabled for the Kafka Cluster)

In this example, data is read from a Kafka topic and written to another using a Kafka result table.

+
  1. Create an enhanced datasource connection in the VPC and subnet where Kafka locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +
    +CREATE TABLE kafkaSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSinkTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  "format" = "json"
    +);
    +
    +insert into kafkaSink select * from kafkaSource;
    +
    +
  4. Connect to the Kafka cluster and insert the following test data into the source topic in Kafka:
    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
  5. Connect to the Kafka cluster and read data from the sink topic of Kafka.
    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
+
+

Example (SASL_SSL Enabled for the Kafka Cluster)

  • Example 1: Enable SASL_SSL authentication for the DMS cluster.

    Create a Kafka cluster for DMS, enable SASL_SSL, download the SSL certificate, and upload the downloaded certificate client.jks to an OBS bucket.

    +
    CREATE TABLE ordersSource (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.ssl.truststore.location' = 'obs://xx/xx.jks',  -- Location where the user uploads the certificate to
    +  'properties.sasl.mechanism' = 'PLAIN',  --  Value format: SASL_PLAINTEXT
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.jaas.config' = 'org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xx\" password=\"xx\";', -- Account and password set when the Kafka cluster is created
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.ssl.truststore.location' = 'obs://xx/xx.jks',
    +  'properties.sasl.mechanism' = 'PLAIN',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.jaas.config' = 'org.apache.kafka.common.security.plain.PlainLoginModule required username=\"xx\" password=\"xx\";',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 2: Enable Kafka SASL_SSL authentication for the MRS cluster.
    • Enable Kerberos authentication for the MRS cluster.
    • Click the Components tab and click Kafka. In the displayed page, click the Service Configuration tab, locate the security.protocol, and set it to SASL_SSL.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential.

      Obtain the truststore.jks file using the authentication credential and store the credential and truststore.jks file in OBS.

      +
    • If "Message stream modified (41)" is displayed, the JDK version may be incorrect. Change the JDK version in the sample code to a version earlier than 8u_242 or delete the renew_lifetime = 0m configuration item from the krb5.conf configuration file.
    • Set the port to the sasl_ssl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SASL_SSL.
    +
    CREATE TABLE ordersSource (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21009,xx:21009',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx', --Username
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
    +  'properties.ssl.truststore.password' = 'xx',  -- Password set for generating truststore.jks
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21009,xx:21009',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
    +  'properties.ssl.truststore.password' = 'xx',
    +  'properties.security.protocol' = 'SASL_SSL',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 3: Enable Kerberos SASL_PAINTEXT authentication for the MRS cluster
    • Enable Kerberos authentication for the MRS cluster.
    • Click the Components tab and click Kafka. In the displayed page, click the Service Configuration tab, locate the security.protocol, and set it to SASL_PLAINTEXT.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential. Upload the credential to OBS.
    • If error message "Message stream modified (41)" is displayed, the JDK version may be incorrect. Change the JDK version in the sample code to a version earlier than 8u_242 or delete the renew_lifetime = 0m configuration item from the krb5.conf configuration file.
    • Set the port to the sasl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SASL_PLAINTEXT.
    +
    CREATE TABLE ordersSources (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21007,xx:21007',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_PLAINTEXT',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +CREATE TABLE ordersSink (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'xx',
    +  'properties.bootstrap.servers' = 'xx:21007,xx:21007',
    +  'properties.sasl.kerberos.service.name' = 'kafka',
    +  'properties.connector.auth.open' = 'true',
    +  'properties.connector.kerberos.principal' = 'xx',
    +  'properties.connector.kerberos.krb5' = 'obs://xx/krb5.conf',
    +  'properties.connector.kerberos.keytab' = 'obs://xx/user.keytab',
    +  'properties.security.protocol' = 'SASL_PLAINTEXT',
    +  'properties.sasl.mechanism' = 'GSSAPI',
    +  "format" = "json"
    +);
    + 
    +insert into ordersSink select * from ordersSource;
    +
  • Example 4: Use SSL for the MRS cluster
    • Do not enable Kerberos authentication for the MRS cluster.
    • Log in to the FusionInsight Manager of the MRS cluster and download the user credential. Choose System > Permission > User. Locate the row that contains the target user, choose More > Download Authentication Credential.

      Obtain the truststore.jks file using the authentication credential and store the credential and truststore.jks file in OBS.

      +
    • Set the port to the ssl.port configured in the Kafka service configuration.
    • In the following statements, set security.protocol to SSL.
    • Set ssl.mode.enable to true.
      CREATE TABLE ordersSource (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = 'xx',
      +  'properties.bootstrap.servers' = 'xx:9093,xx:9093,xx:9093',
      +  'properties.group.id' = 'GroupId',
      +  'scan.startup.mode' = 'latest-offset',
      +  'properties.connector.auth.open' = 'true',
      +  'properties.ssl.truststore.location' = 'obs://xx/truststore.jks',
      +  'properties.ssl.truststore.password' = 'xx',  -- Password set for generating truststore.jks
      +  'properties.security.protocol' = 'SSL',
      +  "format" = "json"
      +);
      + 
      +CREATE TABLE ordersSink (
      +  order_id string,
      +  order_channel string,
      +  order_time timestamp(3),
      +  pay_amount double,
      +  real_pay double,
      +  pay_time string,
      +  user_id string,
      +  user_name string,
      +  area_id string
      +) WITH (
      +  'connector' = 'print'
      +);
      + 
      +insert into ordersSink select * from ordersSource;
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0399.html b/docs/dli/sqlreference/dli_08_0399.html new file mode 100644 index 00000000..37aa7d83 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0399.html @@ -0,0 +1,154 @@ + + +

Print Result Table

+

Function

The Print connector is used to print output data to the error file or TaskManager file, making it easier for you to view the result in code debugging.

+
+

Prerequisites

None

+
+

Precautions

  • The Print result table supports the following output formats: +
    + + + + + + + + + + + + + + + + + + + + + +

    Print

    +

    Condition 1

    +

    Condition 2

    +

    Identifier:Task ID> Output data

    +

    A print identifier prefix must be provided. That is, you must specify print-identifier in the WITH parameter when creating the Print result table.

    +

    parallelism > 1

    +

    Identifier> Output data

    +

    A print identifier prefix must be provided. That is, you must specify print-identifier in the WITH parameter when creating the Print result table.

    +

    parallelism == 1

    +

    Task ID> Output data

    +

    A print identifier prefix is not needed. That is, you do not specify print-identifier in the WITH parameter when creating the Print result table.

    +

    parallelism > 1

    +

    Output data

    +

    A print identifier prefix is not needed. That is, you do not specify print-identifier in the WITH parameter when creating the Print result table.

    +

    parallelism == 1

    +
    +
    +
+
  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
create table printSink (
+  attr_name attr_type 
+  (',' attr_name attr_type) * 
+  (',' PRIMARY KEY (attr_name,...) NOT ENFORCED)
+) with (
+  'connector' = 'print',
+  'print-identifier' = '',
+  'standard-error' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to print.

+

print-identifier

+

No

+

None

+

String

+

Message that identifies print and is prefixed to the output of the value.

+

standard-error

+

No

+

false

+

Boolean

+

The value can be only true or false. The default value is false.

+
  • If the value is true, data is output to the error file of the TaskManager.
  • If the value is false, data is output to the out file of the TaskManager.
+
+
+
+

Example

Create a Flink OpenSource SQL job. Run the following script to generate random data through the DataGen table and output the data to the Print result table.

+

When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.

+
create table dataGenSOurce(
+  user_id string,
+  amount int
+) with (
+  'connector' = 'datagen',
+  'rows-per-second' = '1', --Generate a piece of data per second.
+  'fields.user_id.kind' = 'random', --Specify a random generator for the user_id field.
+  'fields.user_id.length' = '3' --Limit the length of user_id to 3.
+);
+
+create table printSink(
+  user_id string,
+  amount int
+) with (
+  'connector' = 'print'
+);
+
+insert into printSink select * from dataGenSOurce;
+

After the job is submitted, the job status changes to Running. You can perform the following operations of either method to view the output result:

+
  • Method 1:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Locate the row that contains the target Flink job, and choose More > FlinkUI in the Operation column.
    3. On the Flink UI, choose Task Managers, click the task name, and select Stdout to view job logs.
    +
  • Method 2: If you select Save Job Log on the Running Parameters tab before submitting the job, perform the following operations:
    1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
    2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
    3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0400.html b/docs/dli/sqlreference/dli_08_0400.html new file mode 100644 index 00000000..2bd55ec4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0400.html @@ -0,0 +1,564 @@ + + +

Redis Result Table

+

Function

DLI outputs the Flink job output data to Redis. Redis is a key-value storage system that supports multiple types of data structures. It can be used in scenarios such as caching, event publish/subscribe, and high-speed queuing. Redis supports direct read/write of strings, hashes, lists, queues, and sets. Redis works with in-memory datasets and provides persistence. For more information about Redis, visit https://redis.io/.

+
+

Prerequisites

  • An enhanced datasource connection with Redis has been established, so that you can configure security group rules as required. +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • If the Redis key field is not defined in the statement for creating the Redis result table, the generated UUID is used as the key.
  • To specify a key in Redis, you need to define a primary key in the Redis result table of Flink. The value of the primary key is the Redis key.
  • If the primary key defined for the Redis result table, it cannot be a composite primary key and only can be one field.
  • Constraints on schema-syntax:
    • If schema-syntax is map or array, there can be only one non-primary key and it must be of the same map or array type.
    • If schema-syntax is fields-scores, the number of non-primary keys must be an even number, and the second key of every two keys except the primary key must be of the double type. The double value is the score of the previous key. The following is an example:
      CREATE TABLE redisSink (
      +  order_id string,
      +  order_channel string,
      +  order_time double,
      +  pay_amount STRING,
      +  real_pay double,
      +  pay_time string,
      +  user_id double,
      +  user_name string,
      +  area_id double,
      +  primary key (order_id) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'fields-scores'
      +);
      +
    +
  • Restrictions on data-type:
    • If data-type is string, only one non-primary key field is allowed.
    • If data-type is sorted-set and schema-syntax is fields or array, default-score is used as the score.
    • If data-type is sorted-set and schema-syntax is map, there can be only one non-primary key in addition to the primary key and the non-primary key must be of the map type. The map values of the non-primary key must be of the double type, indicating the score. The keys in the map are the values in the Redis set.
    • If data-type is sorted-set and schema-syntax is array-scores, only two non-primary keys are allowed and must be of the array type.
      The first key indicates values in the Redis set. The second key is of the array<double> type, indicating index scores. The following is an example:
      CREATE TABLE redisSink (
      +  order_id string,
      +  arrayField Array<String>,
      +  arrayScore array<double>,
      +  primary key (order_id) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  "default-score" = '3',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'array-scores'
      +);
      +
      +
    +
+
+

Syntax

1
+2
+3
+4
+5
+6
+7
+8
+9
create table dwsSink (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'redis',
+  'host' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector to be used. Set this parameter to redis.

+

host

+

Yes

+

None

+

String

+

Redis connector address.

+

port

+

No

+

6379

+

Integer

+

Redis connector port.

+

password

+

No

+

None

+

String

+

Redis authentication password.

+

namespace

+

No

+

None

+

String

+

Redis key namespace.

+

For example, if the value is set to "person" and the key is "jack", the value in the Redis is person:jack.

+

delimiter

+

No

+

:

+

String

+

Delimiter between the Redis key and namespace.

+

data-type

+

No

+

hash

+

String

+

Redis data type. Available values are as follows:

+
  • hash
  • list
  • set
  • sorted-set
  • string
+

For details about the constraints, see Constraints on data-type.

+

schema-syntax

+

No

+

fields

+

String

+

Redis schema semantics. Available values are as follows:

+
  • fields: applicable to all data types. This value indicates that multiple fields can be set and the value of each field is read when data is written.
  • fields-scores: applicable to sorted-set data, indicating that each field is read as an independent score.
  • array: applicable to list, set, and sorted-set data.
  • array-scores: applicable to sorted-set data.
  • map: applicable to hash and sorted-set data.
+

For details about the constraints, see Constraints on schema-syntax.

+

deploy-mode

+

No

+

standalone

+

String

+

Deployment mode of the Redis cluster. The value can be standalone, master-replica, or cluster. The default value is standalone.

+

For details about the setting, see the instance type description of the Redis cluster.

+

retry-count

+

No

+

5

+

Integer

+

Number of attempts to connect to the Redis cluster.

+

connection-timeout-millis

+

No

+

10000

+

Integer

+

Maximum timeout for connecting to the Redis cluster.

+

commands-timeout-millis

+

No

+

2000

+

Integer

+

Maximum time for waiting for a completion response.

+

rebalancing-timeout-millis

+

No

+

15000

+

Integer

+

Sleep time when the Redis cluster fails.

+

default-score

+

No

+

0

+

Double

+

Default score when data-type is sorted-set.

+

ignore-retraction

+

No

+

false

+

Boolean

+

Whether to ignore Retract messages.

+

skip-null-values

+

No

+

true

+

Boolean

+

Whether null values will be skipped. If this parameter is false, null will be assigned for null values.

+

key-ttl-mode

+

No

+

no-ttl

+

String

+

Whether the Redis sink TTL function will be enabled. The value can be no-ttl, expire-msec, expire-at-date or expire-at-timestamp.

+
  • no-ttl: No expiration time is set.
  • expire-msec: validity period of the key. The parameter is a long string, in milliseconds.
  • expire-at-date: Date and time when the key expires. The value is in UTC time format.
  • expire-at-timestamp: Timestamp when the key expires.
+

key-ttl

+

No

+

None

+

String

+

Supplementary parameter of key-ttl-mode. Available values are as follows:

+
  • If key-ttl-mode is no-ttl, this parameter does not need to be configured.
  • If key-ttl-mode is expire-msec, set this parameter to a string that can be parsed into the Long type. For example, 5000 indicates that the key will expire in 5000 ms.
  • If key-ttl-mode is expire-at-date, set this parameter to a date. For example, 2011-12-03T10:15:30 indicates that the expiration time is 2011-12-03 18:15:30 (UTC+8).
  • If key-ttl-mode is expire-at-timestamp, set this parameter to a timestamp, in milliseconds. For example, 1679385600000 indicates that the expiration time is 2023-03-21 16:00:00.
+
+
+
+

Example

In this example, data is read from the Kafka data source and written to the Redis result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where Redis locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Redis security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Redis address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourTopic>',
    +  'properties.bootstrap.servers' = '<yourKafka>:<port>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +--In the following redisSink table, data-type is set to default value hash, schema-syntax is fields, and order_id is defined as the primary key. Therefore, the value of this field is used as the Redis key.
    +CREATE TABLE redisSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  primary key (order_id) not enforced
    +) WITH (
    +  'connector' = 'redis',
    +  'host' = '<yourRedis>',
    +  'password' = '<yourPassword>',
    +  'deploy-mode' = 'master-replica',
    +  'schema-syntax' = 'fields'
    +);
    +
    +insert into redisSink select * from orders;
    +
    +
  4. Connect to the Kafka cluster and insert the following test data into Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
  5. Run the following commands in Redis and view the result:
    • Obtain the result whose key is 202103241606060001.

      Run following command:

      +
      HGETALL 202103241606060001
      +
      Command output:
       1) "user_id"
      + 2) "0001"
      + 3) "user_name"
      + 4) "Alice"
      + 5) "pay_amount"
      + 6) "200.0"
      + 7) "real_pay"
      + 8) "180.0"
      + 9) "order_time"
      +10) "2021-03-24 16:06:06"
      +11) "area_id"
      +12) "330106"
      +13) "order_channel"
      +14) "appShop"
      +15) "pay_time"
      +16) "2021-03-24 16:10:06"
      +
      +
    • Obtain the result whose key is 202103241000000001.

      Run following command:

      +
      HGETALL 202103241000000001
      +
      Command output:
       1) "user_id"
      + 2) "0001"
      + 3) "user_name"
      + 4) "Alice"
      + 5) "pay_amount"
      + 6) "100.0"
      + 7) "real_pay"
      + 8) "100.0"
      + 9) "order_time"
      +10) "2021-03-24 10:00:00"
      +11) "area_id"
      +12) "330106"
      +13) "order_channel"
      +14) "webShop"
      +15) "pay_time"
      +16) "2021-03-24 10:02:03"
      +
      +
    +
+
+

FAQ

  • Q: When data-type is set, why is the final result data less than the input data?

    A: This is because the input data contains duplicate data. Deduplication is performed in the Redis set, and the number of records in the result decreases.

    +
  • Q: What should I do if Flink job logs contain the following error information?
    org.apache.flink.table.api.ValidationException: SQL validation failed. From line 1, column 40 to line 1, column 105: Parameters must be of the same type
    +

    A: The array type is used. However, the types of fields in the array are different. You need to ensure that the types of fields in the array in Redis are the same.

    +
  • Q: What should I do if Flink job logs contain the following error information?
    org.apache.flink.addons.redis.core.exception.RedisConnectorException: Wrong Redis schema for 'map' syntax: There should be a key (possibly) and 1 MAP non-key column.
    +

    A: When schema-syntax is map, the table creation statement in Flink can contain only one non-primary key column, and the column type must be map.

    +
  • Q: What should I do if Flink job logs contain the following error information?
    org.apache.flink.addons.redis.core.exception.RedisConnectorException: Wrong Redis schema for 'array' syntax: There should be a key (possibly) and 1 ARRAY non-key column.
    +

    A: When schema-syntax is array, the table creation statement in Flink can contain only one non-primary key column, and the column type must be array.

    +
  • Q: What is the function of schema-syntax since data-type has been set?

    A: schema-syntax is used to process special types, such as map and array.

    +
    • If it is set to fields, the value of each field is processed. If it is set to array or map, each element in the field is processed. For fields, the field value of the map or array type is directly used as a value in Redis.
    • For array or map, each value in the array is used as a Redis value, and the field value of the map is used as the Redis value. array-scores is used to process the sorted-set data type. It indicates that two array fields are used, the first one is the value in the set, and the second one is the score. fields-scores is used to process the sorted-set data type, indicating that the score is derived from the defined field. The field of an odd number except the primary key indicates the value in the set, and its next field indicates its score. Therefore, its next field must be of the double type.
    +
  • Q: If data-type is hash, what are the differences between schema-syntax set to fields and that to map?

    A: When fields is used, the field name in Flink is used as the Redis field of the hash data type, and the value of that field is used as the value of the hash data type in Redis. When map is used, the field key in Flink is used as the Redis field of the hash data type, and the value of that field is used as the value of the hash data type in Redis. The following is an example:

    +
    • For fields:
      1. The execution script of the Flink job is as follows:
        CREATE TABLE orders (
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string
        +) WITH (
        +  'connector' = 'kafka',
        +  'topic' = 'kafkaTopic',
        +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
        +  'properties.group.id' = 'GroupId',
        +  'scan.startup.mode' = 'latest-offset',
        +  'format' = 'json'
        +);
        +
        +CREATE TABLE redisSink (
        +  order_id string,
        +  maptest Map<string, String>,
        +  primary key (order_id) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'deploy-mode' = 'master-replica',
        +  'schema-syntax' = 'fields'
        +);
        +
        +insert into redisSink select order_id, Map[user_id, area_id] from orders;
        +
      2. Connect to the Kafka cluster and insert the following test data into the Kafka topic:
        {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
        +
      3. In the Redis, the result is as follows:
        1) "maptest"
        +2) "{0001=330106}"
        +
      +
    • For map:
      1. The execution script of the Flink job is as follows:
        CREATE TABLE orders (
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string
        +) WITH (
        +  'connector' = 'kafka',
        +  'topic' = 'kafkaTopic',
        +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
        +  'properties.group.id' = 'GroupId',
        +  'scan.startup.mode' = 'latest-offset',
        +  'format' = 'json'
        +);
        +
        +CREATE TABLE redisSink (
        +  order_id string,
        +  maptest Map<string, String>,
        +  primary key (order_id) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'deploy-mode' = 'master-replica',
        +  'schema-syntax' = 'map'
        +);
        +
        +insert into redisSink select order_id, Map[user_id, area_id] from orders;
        +
      2. Connect to the Kafka cluster and insert the following test data into the Kafka topic:
        {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
        +
      3. In the Redis, the result is as follows:
        1) "0001"
        +2) "330106"
        +
      +
    +
+
+
  • Q: If data-type is list, what are the differences between schema-syntax set to fields and that to array?

    A: The setting to fields or array does not result in different results. The only difference is that in the Flink table creation statement. fields can be multiple fields. However, array requires that the field is of the array type and the data types in the array must be the same. Therefore, fields are more flexible.

    +
    • For fields:
      1. The execution script of the Flink job is as follows:
        CREATE TABLE orders (
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string
        +) WITH (
        +  'connector' = 'kafka',
        +  'topic' = 'kafkaTopic',
        +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
        +  'properties.group.id' = 'GroupId',
        +  'scan.startup.mode' = 'latest-offset',
        +  'format' = 'json'
        +);
        +
        +CREATE TABLE redisSink (
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string, 
        +  primary key (order_id) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'data-type' = 'list',
        +  'deploy-mode' = 'master-replica',
        +  'schema-syntax' = 'fields'
        +);
        +
        +insert into redisSink select * from orders;
        +
      2. Connect to the Kafka cluster and insert the following test data into the Kafka topic:
        {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
        +
      3. View the result.

        Run the following command in Redis:

        +
        LRANGE 202103241000000001 0 8
        +
        The command output is as follows:
        1) "webShop"
        +2) "2021-03-24 10:00:00"
        +3) "100.0"
        +4) "100.0"
        +5) "2021-03-24 10:02:03"
        +6) "0001"
        +7) "Alice"
        +8) "330106"
        +
        +
      +
    • For array:
      1. The execution script of the Flink job is as follows:
        CREATE TABLE orders (
        +  order_id string,
        +  order_channel string,
        +  order_time string,
        +  pay_amount double,
        +  real_pay double,
        +  pay_time string,
        +  user_id string,
        +  user_name string,
        +  area_id string
        +) WITH (
        +  'connector' = 'kafka',
        +  'topic' = 'kafkaTopic',
        +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
        +  'properties.group.id' = 'GroupId',
        +  'scan.startup.mode' = 'latest-offset',
        +  'format' = 'json'
        +);
        +
        +CREATE TABLE redisSink (
        +  order_id string,
        +  arraytest Array<String>,
        +  primary key (order_id) not enforced
        +) WITH (
        +  'connector' = 'redis',
        +  'host' = 'RedisIP',
        +  'password' = 'RedisPassword',
        +  'data-type' = 'list',
        +  'deploy-mode' = 'master-replica',
        +  'schema-syntax' = 'array'
        +);
        +
        +insert into redisSink select order_id, array[order_channel,order_time,pay_time,user_id,user_name,area_id] from orders;
        +
      2. Connect to the Kafka cluster and insert the following test data into the Kafka topic:
        {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
        +
      3. In Redis, view the result. (The result is different from that of fields because data of the double type is not added to the table creation statement of the sink in Flink. Therefore, two values are missing. This is not caused by the difference between fields and array.)
        1) "webShop"
        +2) "2021-03-24 10:00:00"
        +3) "2021-03-24 10:02:03"
        +4) "0001"
        +5) "Alice"
        +6) "330106"
        +
      +
    +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0401.html b/docs/dli/sqlreference/dli_08_0401.html new file mode 100644 index 00000000..fc57477b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0401.html @@ -0,0 +1,224 @@ + + +

Upsert Kafka Result Table

+

Function

Apache Kafka is a fast, scalable, and fault-tolerant distributed message publishing and subscription system. It delivers high throughput and built-in partitions and provides data replicas and fault tolerance. Apache Kafka is applicable to scenarios of handling massive messages. DLI outputs the Flink job output data to Kafka in upsert mode.

+

The Upsert Kafka connector allows for reading data from and writing data into Kafka topics in the upsert fashion.

+

As a sink, the Upsert Kafka connector can consume a changelog stream. It will write INSERT/UPDATE_AFTER data as normal Kafka messages value, and write DELETE data as Kafka messages with null values (indicate tombstone for the key). Flink will guarantee the message ordering on the primary key by partition data on the values of the primary key columns, so the UPDATE/DELETE messages on the same key will fall into the same partition.

+
+

Prerequisites

  • You have created a Kafka cluster.
  • An enhanced datasource connection has been created for DLI to connect to Kafka clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

  • When creating a Flink OpenSource SQL job, you need to set Flink Version to 1.12 on the Running Parameters tab of the job editing page, select Save Job Log, and set the OBS bucket for saving job logs.
  • For details about how to use data types, see section Format.
  • The Upsert Kafka always works in the upsert fashion and requires to define the primary key in the DDL.
  • By default, an Upsert Kafka sink ingests data with at-least-once guarantees into a Kafka topic if the query is executed with checkpointing enabled. This means that Flink may write duplicate records with the same key into the Kafka topic. Therefore, the Upsert Kafka connector achieves idempotent writes.
+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
create table kafkaSource(
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (','PRIMARY KEY (attr_name, ...) NOT ENFORCED)
+)
+with (
+  'connector' = 'upsert-kafka',
+  'topic' = '',
+  'properties.bootstrap.servers' = '',
+  'key.format' = '',
+  'value.format' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Type

+

Description

+

connector

+

Yes

+

(none)

+

String

+

Connector to be used. Set this parameter to upsert-kafka.

+

topic

+

Yes

+

(none)

+

String

+

Kafka topic name.

+

properties.bootstrap.servers

+

Yes

+

(none)

+

String

+

Comma separated list of Kafka brokers.

+

key.format

+

Yes

+

(none)

+

String

+

Format used to deserialize and serialize the key part of Kafka messages. The key fields are specified by the PRIMARY KEY syntax. The following formats are supported:

+
  • csv
  • json
  • avro
+

Refer to Format for more details and format parameters.

+

key.fields-prefix

+

No

+

(none)

+

String

+

Defines a custom prefix for all fields of the key format to avoid name clashes with fields of the value format.

+

By default, the prefix is empty. If a custom prefix is defined, both the table schema and key.fields will work with prefixed names. When constructing the data type of the key format, the prefix will be removed and the non-prefixed names will be used within the key format. Note that this option requires that value.fields-include must be set to EXCEPT_KEY.

+

value.format

+

Yes

+

(none)

+

String

+

Format used to deserialize and serialize the value part of Kafka messages. The following formats are supported:

+
  • csv
  • json
  • avro
+

Refer to Format for more details and format parameters.

+

value.fields-include

+

No

+

'ALL'

+

String

+

Controls which fields should appear in the value part. Options:

+
  • ALL: All fields in the schema, including the primary key field, are included in the value part.
  • EXCEPT_KEY: All the fields of the table schema are included, except the primary key field.
+

sink.parallelism

+

No

+

(none)

+

Interger

+

Defines the parallelism of the Upsert Kafka sink operator. By default, the parallelism is determined by the framework using the same parallelism of the upstream chained operator.

+

properties.*

+

No

+

(none)

+

String

+

This option can set and pass arbitrary Kafka configurations.

+

The suffix of this parameter must match the parameter defined in Kafka Configuration documentation. Flink will remove the properties. key prefix and pass the transformed key and value to the underlying KafkaClient.

+

For example, you can disable automatic topic creation via 'properties.allow.auto.create.topics' = 'false'. But there are some configurations that do not support to set, because Flink will override them, for example, 'key.deserializer' and 'value.deserializer'.

+
+
+
+

Example

In this example, Kafka source topic data is read from the Kafka source table and written to the Kafka sink topic through the Upsert Kafka result table.

+
  1. Create an enhanced datasource connection in the VPC and subnet where Kafka locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  3. Create a Flink OpenSource SQL job. Enter the following job script and submit the job.
    When you create a job, set Flink Version to 1.12 on the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +CREATE TABLE UPSERTKAFKASINK (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  PRIMARY KEY (order_id) NOT ENFORCED
    +) WITH (
    +  'connector' = 'upsert-kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' =  'KafkaAddress1:KafkaPort,KafkAddress2:KafkaPort',
    +  'key.format' = 'json',
    +  'value.format' = 'json'
    +);
    +insert into UPSERTKAFKASINK
    +select * from orders;
    +
    +
  4. Connect to the Kafka cluster and send the following test data to the Kafka source topic:
    {"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
  5. Connect to the Kafka cluster and read data from the Kafka sink topic. The result is as follows:
    {"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
+
+

FAQ

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0402.html b/docs/dli/sqlreference/dli_08_0402.html new file mode 100644 index 00000000..d2015c5f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0402.html @@ -0,0 +1,21 @@ + + +

Creating Dimension Tables

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0403.html b/docs/dli/sqlreference/dli_08_0403.html new file mode 100644 index 00000000..d85cd575 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0403.html @@ -0,0 +1,358 @@ + + +

GaussDB(DWS) Dimension Table

+

Function

Create a GaussDB(DWS) table to connect to source streams for wide table generation.

+
+

Prerequisites

  • Ensure that you have created a GaussDB(DWS) cluster using your account.
  • A DWS database table has been created.
  • An enhanced datasource connection has been created for DLI to connect to DWS clusters, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
+
+

Precautions

When you create a Flink OpenSource SQL job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+with (
+  'connector' = 'gaussdb',
+  'url' = '',
+  'table-name' = '',
+  'username' = '',
+  'password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Types

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector type. Set this parameter to gaussdb.

+

url

+

Yes

+

None

+

String

+

JDBC connection address.

+

If you use the gsjdbc4 driver, set the value in jdbc:postgresql://${ip}:${port}/${dbName} format.

+

If you use the gsjdbc200 driver, set the value in jdbc:gaussdb://${ip}:${port}/${dbName} format.

+

table-name

+

Yes

+

None

+

String

+

Name of the table where the data will be read from the database

+

driver

+

No

+

None

+

String

+

JDBC connection driver. The default value is org.postgresql.Driver.

+

username

+

No

+

None

+

String

+

Database authentication user name. This parameter must be configured in pair with password.

+

password

+

No

+

None

+

String

+

Database authentication password. This parameter must be configured in pair with username.

+

scan.partition.column

+

No

+

None

+

String

+

Name of the column used to partition the input

+

This parameter must be set when scan.partition.lower-bound, scan.partition.upper-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.lower-bound

+

No

+

None

+

Integer

+

Lower bound of values to be fetched for the first partition

+

This parameter must be set when scan.partition.column, scan.partition.upper-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.upper-bound

+

No

+

None

+

Integer

+

Upper bound of values to be fetched for the last partition

+

This parameter must be set when scan.partition.column, scan.partition.lower-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.num

+

No

+

None

+

Integer

+

Number of partitions to be created

+

This parameter must be set when scan.partition.column, scan.partition.upper-bound, and scan.partition.upper-bound are all configured, and should not be set when other three parameters are not.

+

scan.fetch-size

+

No

+

0

+

Integer

+

Number of rows fetched from the database each time. The default value 0 indicates that the number of rows is not limited.

+

scan.auto-commit

+

No

+

true

+

Boolean

+

Automatic commit flag.

+

It determines whether each statement is committed in a transaction automatically.

+

lookup.cache.max-rows

+

No

+

None

+

Integer

+

The max number of rows of lookup cache. Caches exceeding the TTL will be expired.

+

Lookup cache is disabled by default.

+

lookup.cache.ttl

+

No

+

+

None

+

Duration

+

Maximum time to live (TTL) of for every rows in lookup cache. Caches exceeding the TTL will be expired. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

Lookup cache is disabled by default.

+

lookup.max-retries

+

No

+

3

+

Integer

+

Maximum retry times if lookup database failed.

+
+
+
+

Example

Read data from a Kafka source table, use a GaussDB(DWS) table as the dimension table. Write wide table information generated by the source and dimension tables to a Kafka result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where DWS and Kafka locate, and bind the connection to the required Flink elastic resource pool.
  2. Set GaussDB(DWS) and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the DWS and Kafka address. If the connection passes the test, it is bound to the queue.
  3. Connect to the GaussDB(DWS) database instance, create a table as a dimension table, and name the table area_info. Example SQL statements are as follows:
    create table public.area_info(
    +  area_id VARCHAR,
    +  area_province_name VARCHAR,
    +  area_city_name VARCHAR,
    +  area_county_name VARCHAR,
    +  area_street_name VARCHAR,
    +  region_name VARCHAR);
    +
  4. Connect to the database and run the following statement to insert test data into the dimension table area_info:
      insert into area_info
    +  (area_id, area_province_name, area_city_name, area_county_name, area_street_name, region_name) 
    +  values
    +  ('330102', 'a1', 'b1', 'c1', 'd1', 'e1'),
    +  ('330106', 'a1', 'b1', 'c2', 'd2', 'e1'),
    +  ('330108', 'a1', 'b1', 'c3', 'd3', 'e1'),
    +  ('330110', 'a1', 'b1', 'c4', 'd4', 'e1');
    +
  5. Create a Flink OpenSource SQL job Enter the following job script and submit the job. The job script uses Kafka as the data source and a GaussDB(DWS) table as the dimension table. Data is output to a Kafka result table.
    When you create a job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Set the values of the parameters in bold in the following script as needed.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  proctime as Proctime()
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSourceTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'dws-order',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +-- Create an address dimension table
    +create table area_info (
    +    area_id string, 
    +    area_province_name string, 
    +    area_city_name string, 
    +    area_county_name string,
    +    area_street_name string, 
    +    region_name string 
    +) WITH (
    +  'connector' = 'gaussdb',
    +  'driver' = 'org.postgresql.Driver',
    +  'url' = 'jdbc:gaussdb://DwsAddress:DwsPort/DwsDbName',
    +  'table-name' = 'area_info',
    +  'username' = 'DwsUserName',
    +  'password' = 'DwsPassword',
    +  'lookup.cache.max-rows' = '10000',
    +  'lookup.cache.ttl' = '2h'
    +);
    +
    +-- Generate a wide table based on the address dimension table containing detailed order information.
    +create table order_detail(
    +    order_id string,
    +    order_channel string,
    +    order_time string,
    +    pay_amount double,
    +    real_pay double,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string,
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string,
    +    area_street_name string,
    +    region_name string
    +) with (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSinkTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'format' = 'json'
    +);
    +
    +insert into order_detail
    +    select orders.order_id, orders.order_channel, orders.order_time, orders.pay_amount, orders.real_pay, orders.pay_time, orders.user_id, orders.user_name,
    +           area.area_id, area.area_province_name, area.area_city_name, area.area_county_name,
    +           area.area_street_name, area.region_name  from orders
    +    left join area_info for system_time as of orders.proctime as area on orders.area_id = area.area_id;
    +
    +
  6. Connect to the Kafka cluster and insert the following test data into the source topic in Kafka:
    {"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
  7. Connect to the Kafka cluster and read data from the sink topic of Kafka. The result is as follows:
    {"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106","area_province_name":"a1","area_city_name":"b1","area_county_name":"c2","area_street_name":"d2","region_name":"e1"}
    +
    +{"order_id":"202103251202020001","order_channel":"miniAppShop","order_time":"2021-03-25 12:02:02","pay_amount":60.0,"real_pay":60.0,"pay_time":"2021-03-25 12:03:00","user_id":"0002","user_name":"Bob","area_id":"330110","area_province_name":"a1","area_city_name":"b1","area_county_name":"c4","area_street_name":"d4","region_name":"e1"}
    +
    +{"order_id":"202103251505050001","order_channel":"qqShop","order_time":"2021-03-25 15:05:05","pay_amount":500.0,"real_pay":400.0,"pay_time":"2021-03-25 15:10:00","user_id":"0003","user_name":"Cindy","area_id":"330108","area_province_name":"a1","area_city_name":"b1","area_county_name":"c3","area_street_name":"d3","region_name":"e1"}
    +
+
+

FAQs

  • Q: What should I do if Flink job logs contain the following error information?
    java.io.IOException: unable to open JDBC writer
    +...
    +Caused by: org.postgresql.util.PSQLException: The connection attempt failed.
    +...
    +Caused by: java.net.SocketTimeoutException: connect timed out
    +
    A: The datasource connection is not bound or the binding fails. +
    +
+
  • Q: How can I configure a GaussDB(DWS) table that is in a schema?
    A: In the following example configures the area_info table in the dbuser2 schema.
    -- Create an address dimension table
    +create table area_info (
    +    area_id string, 
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string,
    +    area_street_name string, 
    +    region_name string 
    +) WITH (
    + 'connector' = 'gaussdb',
    +  'driver' = 'org.postgresql.Driver',
    +  'url' = 'jdbc:postgresql://DwsAddress:DwsPort/DwsDbname',
    +  'table-name' = 'dbuser2.area_info',
    +  'username' = 'DwsUserName',
    +  'password' = 'DwsPassword',
    +  'lookup.cache.max-rows' = '10000',
    +  'lookup.cache.ttl' = '2h'
    +);
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0404.html b/docs/dli/sqlreference/dli_08_0404.html new file mode 100644 index 00000000..6e676394 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0404.html @@ -0,0 +1,337 @@ + + +

HBase Dimension Table

+

Function

Create a Hbase dimension table to connect to the source streams for wide table generation.

+
+

Prerequisites

  • An enhanced datasource connection has been created for DLI to connect to HBase, so that jobs can run on the dedicated queue of DLI and you can set the security group rules as required. +
  • If MRS HBase is used, IP addresses of all hosts in the MRS cluster have been added to host information of the enhanced datasource connection.

    .

    +

    For details, see section "Modifying the Host Information" in the Data Lake Insight User Guide.

    +
+
+

Precautions

  • When you create a Flink OpenSource SQL job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.
  • Do not enable Kerberos authentication for the HBase cluster.
  • All the column families in HBase table must be declared as ROW type, the field name maps to the column family name, and the nested field names map to the column qualifier names. There is no need to declare all the families and qualifiers in the schema, users can declare what is used in the query. Except the ROW type fields, the single atomic type field (for example, STRING, BIGINT) will be recognized as HBase rowkey. The rowkey field can be an arbitrary name, but should be quoted using backticks if it is a reserved keyword.
+
+

Syntax

create table hbaseSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+ )
+with (
+  'connector' = 'hbase-2.2',
+  'table-name' = '',
+  'zookeeper.quorum' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector type. Set this parameter to hbase-2.2.

+

table-name

+

Yes

+

None

+

String

+

Name of the HBase table

+

zookeeper.quorum

+

Yes

+

None

+

String

+

HBase Zookeeper quorum. The format is ZookeeperAddress:ZookeeperPort.

+

The following describes how to obtain the ZooKeeper IP address and port number:

+
  • On the MRS Manager console, choose Cluster > Name of the desired cluster > Service > ZooKeeper > Instance. On the displayed page, obtain the IP address of the ZooKeeper instance.
  • On the MRS Manager console, choose Cluster > Name of the desired cluster > Service > ZooKeeper > Configuration, and click All Configurations. Search for the clientPort parameter, and obtain the ZooKeeper port number.
+

zookeeper.znode.parent

+

No

+

/hbase

+

String

+

Root directory in ZooKeeper for the HBase cluster.

+

lookup.async

+

No

+

false

+

Boolean

+

Whether async lookup is enabled.

+

lookup.cache.max-rows

+

No

+

-1

+

Long

+

The max number of rows of lookup cache. Caches exceeding the TTL will be expired.

+

Lookup cache is disabled by default.

+

lookup.cache.ttl

+

No

+

-1

+

Long

+

Maximum time to live (TTL) of for every rows in lookup cache. Caches exceeding the TTL will be expired. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

Lookup cache is disabled by default.

+

lookup.max-retries

+

No

+

3

+

Integer

+

Maximum retry times if lookup database failed.

+
+
+
+

Data Type Mapping

HBase stores all data as byte arrays. The data needs to be serialized and deserialized during read and write operation.

+

When serializing and de-serializing, Flink HBase connector uses utility class org.apache.hadoop.hbase.util.Bytes provided by HBase (Hadoop) to convert Flink data types to and from byte arrays.

+

Flink HBase connector encodes null values to empty bytes, and decode empty bytes to null values for all data types except string type. For string type, the null literal is determined by null-string-literal option.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data type mapping

Flink SQL Type

+

HBase Conversion

+

CHAR / VARCHAR / STRING

+

byte[] toBytes(String s)

+

String toString(byte[] b)

+

BOOLEAN

+

byte[] toBytes(boolean b)

+

boolean toBoolean(byte[] b)

+

BINARY / VARBINARY

+

Return byte[] as is.

+

DECIMAL

+

byte[] toBytes(BigDecimal v)

+

BigDecimal toBigDecimal(byte[] b)

+

TINYINT

+

new byte[] { val }

+

bytes[0] // returns first and only byte from bytes

+

SMALLINT

+

byte[] toBytes(short val)

+

short toShort(byte[] bytes)

+

INT

+

byte[] toBytes(int val)

+

int toInt(byte[] bytes)

+

BIGINT

+

byte[] toBytes(long val)

+

long toLong(byte[] bytes)

+

FLOAT

+

byte[] toBytes(float val)

+

float toFloat(byte[] bytes)

+

DOUBLE

+

byte[] toBytes(double val)

+

double toDouble(byte[] bytes)

+

DATE

+

Number of days since 1970-01-01 00:00:00 UTC. The value is an integer.

+

TIME

+

Number of milliseconds since 1970-01-01 00:00:00 UTC. The value is an integer.

+

TIMESTAMP

+

Number of milliseconds since 1970-01-01 00:00:00 UTC. The value is of the long type.

+

ARRAY

+

Not supported

+

MAP / MULTISET

+

Not supported

+

ROW

+

Not supported

+
+
+
+

Example

In this example, data is read from a Kafka data source, an HBase table is used as a dimension table to generate a wide table, and the result is written to a Kafka result table. The procedure is as follows (the HBase versions in this example are 1.3.1 and 2.2.3):

+
  1. Create an enhanced datasource connection in the VPC and subnet where HBase and Kafka locate, and bind the connection to the required Flink elastic resource pool. Add MRS host information for the enhanced datasource connection..
  2. Set HBase and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the HBase and Kafka address. If the connection passes the test, it is bound to the queue.
  3. Create a HBase table and name it area_info using the HBase shell. The table has only one column family detail. The creation statement is as follows:
    create 'area_info', {NAME => 'detail'}
    +
  4. Run the following statement in the HBase shell to insert dimension table data:
    put 'area_info', '330106', 'detail:area_province_name', 'a1'
    +put 'area_info', '330106', 'detail:area_city_name', 'b1'
    +put 'area_info', '330106', 'detail:area_county_name', 'c2'
    +put 'area_info', '330106', 'detail:area_street_name', 'd2'
    +put 'area_info', '330106', 'detail:region_name', 'e1'
    +
    +put 'area_info', '330110', 'detail:area_province_name', 'a1'
    +put 'area_info', '330110', 'detail:area_city_name', 'b1'
    +put 'area_info', '330110', 'detail:area_county_name', 'c4'
    +put 'area_info', '330110', 'detail:area_street_name', 'd4'
    +put 'area_info', '330110', 'detail:region_name', 'e1'
    +
  5. Create a Flink OpenSource SQL job Enter the following job script and submit the job. The job script uses Kafka as the data source and an HBase table as the dimension table. Data is output to a Kafka result table.
    When you create a job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Set the values of the parameters in bold in the following script as needed.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  proctime as Proctime()
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSourceTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +-- Create an address dimension table
    +create table area_info (
    +  area_id string,   
    +  detail row(
    +    area_province_name string, 
    +    area_city_name string, 
    +    area_county_name string, 
    +    area_street_name string, 
    +    region_name string) 
    +) WITH (
    +  'connector' = 'hbase-2.2',
    +  'table-name' = 'area_info',
    +  'zookeeper.quorum' = 'ZookeeperAddress:ZookeeperPort',
    +  'lookup.async' = 'true',
    +  'lookup.cache.max-rows' = '10000',
    +  'lookup.cache.ttl' = '2h'
    +);
    +
    +-- Generate a wide table based on the address dimension table containing detailed order information.
    +create table order_detail(
    +    order_id string,
    +    order_channel string,
    +    order_time string,
    +    pay_amount double,
    +    real_pay double,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string,
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string,
    +    area_street_name string,
    +    region_name string
    +) with (
    +  'connector' = 'kafka',
    +  'topic' = '<yourSinkTopic>',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'format' = 'json'
    +);
    +
    +insert into order_detail
    +    select orders.order_id, orders.order_channel, orders.order_time, orders.pay_amount, orders.real_pay, orders.pay_time, orders.user_id, orders.user_name,
    +           area.area_id, area.area_province_name, area.area_city_name, area.area_county_name,
    +           area.area_street_name, area.region_name  from orders
    +    left join area_info for system_time as of orders.proctime as area on orders.area_id = area.area_id;
    +
    +
  6. Connect to the Kafka cluster and insert the following test data into the source topic in Kafka:
    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
  7. Connect to the Kafka cluster and read data from the sink topic of Kafka. The result data is as follows:
    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106","area_province_name":"a1","area_city_name":"b1","area_county_name":"c2","area_street_name":"d2","region_name":"e1"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106","area_province_name":"a1","area_city_name":"b1","area_county_name":"c2","area_street_name":"d2","region_name":"e1"}
    +
    +{"order_id":"202103251202020001","order_channel":"miniAppShop","order_time":"2021-03-25 12:02:02","pay_amount":60.0,"real_pay":60.0,"pay_time":"2021-03-25 12:03:00","user_id":"0002","user_name":"Bob","area_id":"330110","area_province_name":"a1","area_city_name":"b1","area_county_name":"c4","area_street_name":"d4","region_name":"e1"}
    +
+
+

FAQs

Q: What should I do if Flink job logs contain the following error information?

+
org.apache.zookeeper.ClientCnxn$SessionTimeoutException: Client session timed out, have not heard from server in 90069ms for connection id 0x0
+

A: The datasource connection is not bound or the binding fails. Configure the datasource connection or configure the security group of the Kafka cluster to allow access from the DLI queue.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0405.html b/docs/dli/sqlreference/dli_08_0405.html new file mode 100644 index 00000000..0aab9b20 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0405.html @@ -0,0 +1,469 @@ + + +

JDBC Dimension Table

+

Create a JDBC dimension table to connect to the source stream.

+

Prerequisites

You have created a JDBC instance for your account.

+
+

Precautions

When you create a Flink OpenSource SQL job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.

+
+

Syntax

 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
CREATE TABLE  table_id (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+)
+  WITH (
+  'connector' = 'jdbc',
+  'url' = '',
+  'table-name' = '',
+  'driver' = '',
+  'username' = '',
+  'password' = ''
+);
+
+ +
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter descriptions

Parameter

+

Mandatory

+

Description

+

connector

+

Yes

+

Data source type. The value is fixed to jdbc.

+

url

+

Yes

+

Database URL

+

table-name

+

Yes

+

Name of the table where the data will be read from the database

+

driver

+

No

+

Driver required for connecting to the database. If you do not set this parameter, the automatically extracted URL will be used.

+

username

+

No

+

Database authentication user name. This parameter must be configured in pair with password.

+

password

+

No

+

Database authentication password. This parameter must be configured in pair with username.

+

scan.partition.column

+

No

+

Name of the column used to partition the input

+

This parameter must be set when scan.partition.lower-bound, scan.partition.upper-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.lower-bound

+

No

+

Lower bound of values to be fetched for the first partition

+

This parameter must be set when scan.partition.column, scan.partition.upper-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.upper-bound

+

No

+

Upper bound of values to be fetched for the last partition

+

This parameter must be set when scan.partition.column, scan.partition.lower-bound, and scan.partition.num are all configured, and should not be set when other three parameters are not.

+

scan.partition.num

+

No

+

Number of partitions to be created

+

This parameter must be set when scan.partition.column, scan.partition.upper-bound, and scan.partition.upper-bound are all configured, and should not be set when other three parameters are not.

+

scan.fetch-size

+

No

+

Number of rows fetched from the database each time. The default value is 0, indicating the hint is ignored.

+

lookup.cache.max-rows

+

No

+

Maximum number of cached rows in a dimension table. If the number of cached rows exceeds the value , old data will be deleted. The value -1 indicates that data cache disabled.

+

lookup.cache.ttl

+

No

+

Maximum time to live (TTL) of for every rows in lookup cache. Caches exceeding the TTL will be expired. The format is {length value}{time unit label}, for example, 123ms, 321s. The supported time units include d, h, min, s, and ms (default unit).

+

lookup.max-retries

+

No

+

Maximum number of attempts to obtain data from the dimension table. The default value is 3.

+
+
+
+

Data Type Mapping

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data type mapping

MySQL Type

+

PostgreSQL Type

+

Flink SQL Type

+

TINYINT

+

-

+

TINYINT

+

SMALLINT

+

TINYINT UNSIGNED

+

SMALLINT

+

INT2

+

SMALLSERIAL

+

SERIAL2

+

SMALLINT

+

INT

+

MEDIUMINT

+

SMALLINT UNSIGNED

+

INTEGER

+

SERIAL

+

INT

+

BIGINT

+

INT UNSIGNED

+

BIGINT

+

BIGSERIAL

+

BIGINT

+

BIGINT UNSIGNED

+

-

+

DECIMAL(20, 0)

+

BIGINT

+

BIGINT

+

BIGINT

+

FLOAT

+

REAL

+

FLOAT4

+

FLOAT

+

DOUBLE

+

DOUBLE PRECISION

+

FLOAT8

+

DOUBLE PRECISION

+

DOUBLE

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

NUMERIC(p, s)

+

DECIMAL(p, s)

+

DECIMAL(p, s)

+

BOOLEAN

+

TINYINT(1)

+

BOOLEAN

+

BOOLEAN

+

DATE

+

DATE

+

DATE

+

TIME [(p)]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

TIME [(p)] [WITHOUT TIMEZONE]

+

DATETIME [(p)]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

TIMESTAMP [(p)] [WITHOUT TIMEZONE]

+

CHAR(n)

+

VARCHAR(n)

+

TEXT

+

CHAR(n)

+

CHARACTER(n)

+

VARCHAR(n)

+

CHARACTER

+

VARYING(n)

+

TEXT

+

STRING

+

BINARY

+

VARBINARY

+

BLOB

+

BYTEA

+

BYTES

+

-

+

ARRAY

+

ARRAY

+
+
+
+

Example

Read data from a Kafka source table, use a JDBC table as the dimension table. Write table information generated by the source and dimension tables to a Kafka result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where MySQL and Kafka locate, and bind the connection to the required Flink elastic resource pool.
  2. Set MySQL and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the MySQL and Kafka address. If the connection passes the test, it is bound to the queue.
  3. Connect to the MySQL database instance, create a table in the flink database as a dimension table, and name the table area_info. Example SQL statements are as follows:
    CREATE TABLE `flink`.`area_info` (
    +	`area_id` VARCHAR(32) NOT NULL,
    +	`area_province_name` VARCHAR(32) NOT NULL,
    +	`area_city_name` VARCHAR(32) NOT NULL,
    +	`area_county_name` VARCHAR(32) NOT NULL,
    +	`area_street_name` VARCHAR(32) NOT NULL,
    +	`region_name` VARCHAR(32) NOT NULL,
    +	PRIMARY KEY (`area_id`)
    +)	ENGINE = InnoDB
    +	DEFAULT CHARACTER SET = utf8mb4
    +	COLLATE = utf8mb4_general_ci;
    +
  4. Connect to the MySQL database and run the following statement to insert test data into the JDBC dimension table area_info:
    insert into flink.area_info
    +  (area_id, area_province_name, area_city_name, area_county_name, area_street_name, region_name) 
    +  values
    +  ('330102', 'a1', 'b1', 'c1', 'd1', 'e1'),
    +  ('330106', 'a1', 'b1', 'c2', 'd2', 'e1'),
    +  ('330108', 'a1', 'b1', 'c3', 'd3', 'e1'),  ('330110', 'a1', 'b1', 'c4', 'd4', 'e1');
    +
  5. Create a Flink OpenSource SQL job Enter the following job script and submit the job. The job script uses Kafka as the data source and a JDBC table as the dimension table. Data is output to a Kafka result table.
    When you create a job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Set the values of the parameters in bold in the following script as needed.
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
    +14
    +15
    +16
    +17
    +18
    +19
    +20
    +21
    +22
    +23
    +24
    +25
    +26
    +27
    +28
    +29
    +30
    +31
    +32
    +33
    +34
    +35
    +36
    +37
    +38
    +39
    +40
    +41
    +42
    +43
    +44
    +45
    +46
    +47
    +48
    +49
    +50
    +51
    +52
    +53
    +54
    +55
    +56
    +57
    +58
    +59
    +60
    +61
    +62
    +63
    +64
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  proctime as Proctime()
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSourceTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'jdbc-order',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +-- Create an address dimension table
    +create table area_info (
    +    area_id string, 
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string, 
    +    area_street_name string, 
    +    region_name string 
    +) WITH (
    +  'connector' = 'jdbc',
    +  'url' = 'jdbc:mysql://JDBCAddress:JDBCPort/flink',--flink is the MySQL database where the area_info table locates.
    +  'table-name' = 'area_info',
    +  'username' = 'JDBCUserName',
    +  'password' = 'JDBCPassWord'
    +);
    +
    +-- Generate a wide table based on the address dimension table containing detailed order information.
    +create table order_detail(
    +    order_id string,
    +    order_channel string,
    +    order_time string,
    +    pay_amount double,
    +    real_pay double,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string,
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string,
    +    area_street_name string,
    +    region_name string
    +) with (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaSinkTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'format' = 'json'
    +);
    +
    +insert into order_detail
    +    select orders.order_id, orders.order_channel, orders.order_time, orders.pay_amount, orders.real_pay, orders.pay_time, orders.user_id, orders.user_name,
    +           area.area_id, area.area_province_name, area.area_city_name, area.area_county_name,
    +           area.area_street_name, area.region_name  from orders 
    +           left join area_info for system_time as of orders.proctime as area on orders.area_id = area.area_id;
    +
    + +
    +
    +
  6. Connect to the Kafka cluster and insert the following test data into the source topic in Kafka:
    {"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
  7. Connect to the Kafka cluster and read data from the sink topic of Kafka.
    {"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106","area_province_name":"a1","area_city_name":"b1","area_county_name":"c2","area_street_name":"d2","region_name":"e1"}
    +
    +{"order_id":"202103251202020001","order_channel":"miniAppShop","order_time":"2021-03-25 12:02:02","pay_amount":60.0,"real_pay":60.0,"pay_time":"2021-03-25 12:03:00","user_id":"0002","user_name":"Bob","area_id":"330110","area_province_name":"a1","area_city_name":"b1","area_county_name":"c4","area_street_name":"d4","region_name":"e1"}
    +
    +{"order_id":"202103251505050001","order_channel":"qqShop","order_time":"2021-03-25 15:05:05","pay_amount":500.0,"real_pay":400.0,"pay_time":"2021-03-25 15:10:00","user_id":"0003","user_name":"Cindy","area_id":"330108","area_province_name":"a1","area_city_name":"b1","area_county_name":"c3","area_street_name":"d3","region_name":"e1"}
    +
+
+

FAQs

None

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0406.html b/docs/dli/sqlreference/dli_08_0406.html new file mode 100644 index 00000000..2c9597af --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0406.html @@ -0,0 +1,384 @@ + + +

Redis Dimension Table

+

Function

Create a Redis table to connect to source streams for wide table generation.

+
+

Prerequisites

  • An enhanced datasource connection with Redis has been established, so that you can configure security group rules as required. +
+
+

Precautions

  • When you create a Flink OpenSource SQL job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs.
  • To obtain the key values, you can set the primary key in Flink. The primary key maps to the Redis key.
  • If the primary key cannot be a composite primary key, and only can be one field.
  • Constraints on schema-syntax:
    • If schema-syntax is map or array, there can be only one non-primary key and it must be of the same map or array type.
    • If schema-syntax is fields-scores, the number of non-primary keys must be an even number, and the second key of every two keys except the primary key must be of the double type. The double value is the score of the previous key. The following is an example:
      CREATE TABLE redisSource (
      +  redisKey string,
      +  order_id string,
      +  score1 double,
      +  order_channel string,
      +  score2 double,
      +  order_time string,
      +  score3 double,
      +  pay_amount double,
      +  score4 double,
      +  real_pay double,
      +  score5 double,
      +  pay_time string,
      +  score6 double,
      +  user_id string,
      +  score7 double,
      +  user_name string,
      +  score8 double,
      +  area_id string,
      +  score9 double,
      +  primary key (redisKey) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'fields-scores'
      +);
      +
    +
  • Restrictions on data-type:
    • When data-type is set, the types of non-primary keys defined in Flink must be the same.
    • If data-type is sorted-set and schema-syntax is fields or array, only sorted set values can be read from Redis, and the score value cannot be read.
    • If data-type is string, only one non-primary key field is allowed.
    • If data-type is sorted-set and schema-syntax is map, there can be only one non-primary key in addition to the primary key and the non-primary key must be of the map type. The map values of the non-primary key must be of the double type, indicating the score. The keys in the map are the values in the Redis set.
    • If data-type is sorted-set and schema-syntax is array-scores, only two non-primary keys are allowed and must be of the array type.
      The first key indicates values in the Redis set. The second key is of the array<double> type, indicating index scores. The following is an example:
      CREATE TABLE redisSink (
      +  order_id string,
      +  arrayField Array<String>,
      +  arrayScore array<double>,
      +  primary key (order_id) not enforced
      +) WITH (
      +  'connector' = 'redis',
      +  'host' = 'RedisIP',
      +  'password' = 'RedisPassword',
      +  'data-type' = 'sorted-set',
      +  "default-score" = '3',
      +  'deploy-mode' = 'master-replica',
      +  'schema-syntax' = 'array-scores'
      +);
      +
      +
    +
+
+

Syntax

create table dwsSource (
+  attr_name attr_type 
+  (',' attr_name attr_type)* 
+  (',' watermark for rowtime_column_name as watermark-strategy_expression)
+  ,PRIMARY KEY (attr_name, ...) NOT ENFORCED
+)
+with (
+  'connector' = 'redis',
+  'host' = ''
+);
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Data Types

+

Description

+

connector

+

Yes

+

None

+

String

+

Connector type. Set this parameter to redis.

+

host

+

Yes

+

None

+

String

+

Redis connector address

+

port

+

No

+

6379

+

Integer

+

Redis connector port

+

password

+

No

+

None

+

String

+

Redis authentication password

+

namespace

+

No

+

None

+

String

+

Redis key namespace

+

delimiter

+

No

+

:

+

String

+

Delimiter between the Redis key and namespace

+

data-type

+

No

+

hash

+

String

+

Redis data type. Available values are as follows:

+
  • hash
  • list
  • set
  • sorted-set
  • string
+

For details about the constraints, see Constraints on data-type.

+

schema-syntax

+

No

+

fields

+

String

+

Redis schema semantics. Available values are as follows:

+
  • fields: applicable to all data types
  • fields-scores: applicable to sorted set data
  • array: applicable to list, set, and sorted set data
  • array-scores: applicable to sorted set data
  • map: applicable to hash and sorted set data
+

For details about the constraints, see Constraints on schema-syntax.

+

deploy-mode

+

No

+

standalone

+

String

+

Deployment mode of the Redis cluster. The value can be standalone, master-replica, or cluster. The default value is standalone.

+

retry-count

+

Yes

+

5

+

Integer

+

Size of each connection request queue. If the number of connection requests in a queue exceeds the queue size, command calling will cause RedisException. Setting requestQueueSize to a small value will cause exceptions to occur earlier during overload or disconnection. A larger value indicates more time required to reach the boundary, but more requests may be queued and more heap space may be used. The default value is 2147483647.

+

connection-timeout-millis

+

No

+

10000

+

Integer

+

Maximum timeout for connecting to the Redis cluster

+

commands-timeout-millis

+

No

+

2000

+

Integer

+

Maximum time for waiting for a completion response

+

rebalancing-timeout-millis

+

No

+

15000

+

Integer

+

Sleep time when the Redis cluster fails

+

scan-keys-count

+

No

+

1000

+

Integer

+

Number of data records read in each scan

+

default-score

+

No

+

0

+

Double

+

Default score when data-type is sorted-set

+

deserialize-error-policy

+

No

+

fail-job

+

Enum

+

How to process a data parsing failure

+

Available values are as follows:

+
  • fail-job: Fail the job
  • skip-row: Skip the current data.
  • null-field: Set the current data to null.
+

skip-null-values

+

No

+

true

+

Boolean

+

Whether null values will be skipped

+

lookup.async

+

No

+

false

+

Boolean

+

Whether asynchronous I/O will be used when this table is used as a dimension table

+
+
+
+

Example

Read data from a Kafka source table, use a Redis table as the dimension table. Write wide table information generated by the source and dimension tables to a Kafka result table. The procedure is as follows:

+
  1. Create an enhanced datasource connection in the VPC and subnet where Redis and Kafka locates, and bind the connection to the required Flink elastic resource pool.
  2. Set Redis and Kafka security groups and add inbound rules to allow access from the Flink queue. Test the connectivity using the Redis address. If the connection passes the test, it is bound to the queue.
  3. Run the following commands on the Redis client to send data to Redis:
    HMSET 330102  area_province_name a1 area_province_name b1 area_county_name c1 area_street_name d1 region_name e1
    +
    +HMSET 330106  area_province_name a1 area_province_name b1 area_county_name c2 area_street_name d2 region_name e1
    +
    +HMSET 330108  area_province_name a1 area_province_name b1 area_county_name c3 area_street_name d3 region_name e1
    +
    +HMSET 330110  area_province_name a1 area_province_name b1 area_county_name c4 area_street_name d4 region_name e1
    +
  4. Create a Flink OpenSource SQL job Enter the following job script and submit the job. The job script uses Kafka as the data source and a Redis table as the dimension table. Data is output to a Kafka result table.
    When you create a job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Set the values of the parameters in bold in the following script as needed.
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time string,
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  proctime as Proctime()
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'kafkaSourceTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +-- Create an address dimension table
    +create table area_info (
    +    area_id string, 
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string, 
    +    area_street_name string, 
    +    region_name string, 
    +    primary key (area_id) not enforced -- Redis key
    +) WITH (
    +  'connector' = 'redis',
    +  'host' = 'RedisIP',
    +  'password' = 'RedisPassword',
    +  'data-type' = 'hash',
    +  'deploy-mode' = 'master-replica'
    +);
    +
    +-- Generate a wide table based on the address dimension table containing detailed order information.
    +create table order_detail(
    +    order_id string,
    +    order_channel string,
    +    order_time string,
    +    pay_amount double,
    +    real_pay double,
    +    pay_time string,
    +    user_id string,
    +    user_name string,
    +    area_id string,
    +    area_province_name string,
    +    area_city_name string,
    +    area_county_name string,
    +    area_street_name string,
    +    region_name string
    +) with (
    +  'connector' = 'kafka',
    +  'topic' = 'kafkaSinkTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'format' = 'json'
    +);
    +
    +insert into order_detail
    +    select orders.order_id, orders.order_channel, orders.order_time, orders.pay_amount, orders.real_pay, orders.pay_time, orders.user_id, orders.user_name,
    +           area.area_id, area.area_province_name, area.area_city_name, area.area_county_name,
    +           area.area_street_name, area.region_name  from orders
    +    left join area_info for system_time as of orders.proctime as area on orders.area_id = area.area_id;
    +
    +
  5. Connect to the Kafka cluster and insert the following test data into the source topic in Kafka:
    {"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103251202020001", "order_channel":"miniAppShop", "order_time":"2021-03-25 12:02:02", "pay_amount":"60.00", "real_pay":"60.00", "pay_time":"2021-03-25 12:03:00", "user_id":"0002", "user_name":"Bob", "area_id":"330110"}
    +
    +{"order_id":"202103251505050001", "order_channel":"qqShop", "order_time":"2021-03-25 15:05:05", "pay_amount":"500.00", "real_pay":"400.00", "pay_time":"2021-03-25 15:10:00", "user_id":"0003", "user_name":"Cindy", "area_id":"330108"}
    +
  6. Connect to the Kafka cluster and read data from the sink topic of Kafka. The result data is as follows:
    {"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106","area_province_name":"a1","area_city_name":"b1","area_county_name":"c2","area_street_name":"d2","region_name":"e1"}
    +
    +{"order_id":"202103251202020001","order_channel":"miniAppShop","order_time":"2021-03-25 12:02:02","pay_amount":60.0,"real_pay":60.0,"pay_time":"2021-03-25 12:03:00","user_id":"0002","user_name":"Bob","area_id":"330110","area_province_name":"a1","area_city_name":"b1","area_county_name":"c4","area_street_name":"d4","region_name":"e1"}
    +
    +{"order_id":"202103251505050001","order_channel":"qqShop","order_time":"2021-03-25 15:05:05","pay_amount":500.0,"real_pay":400.0,"pay_time":"2021-03-25 15:10:00","user_id":"0003","user_name":"Cindy","area_id":"330108","area_province_name":"a1","area_city_name":"b1","area_county_name":"c3","area_street_name":"d3","region_name":"e1"}
    +
    +
+
+

FAQs

If Chinese characters are written to the Redis in the Windows environment, an exception will occur during data writing.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0407.html b/docs/dli/sqlreference/dli_08_0407.html new file mode 100644 index 00000000..60a4bcfd --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0407.html @@ -0,0 +1,29 @@ + + +

Format

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0408.html b/docs/dli/sqlreference/dli_08_0408.html new file mode 100644 index 00000000..ef162c9b --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0408.html @@ -0,0 +1,231 @@ + + +

Avro

+

Function

Apache Avro is supported for you to read and write Avro data based on an Avro schema with Flink. The Avro schema is derived from the table schema.

+
+

Supported Connectors

  • Kafka
  • Upsert Kafka
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 Parameter

Parameter

+

Mandatory

+

Default value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set the value to avro.

+

avro.codec

+

No

+

None

+

String

+

Avro compression codec for the file system only The codec is disabled by default. Available values are deflate, snappy, bzip2, and xz.

+
+
+
+

Data Type Mapping

Currently, the Avro schema is derived from the table schema and cannot be explicitly defined. The following table lists mappings between Flink to Avro types.

+

In addition to the following types, Flink supports reading/writing nullable types. Flink maps nullable types to Avro union(something, null), where something is an Avro type converted from Flink type.

+

You can refer to Apache Avro 1.11.0 Specification for more information about Avro types.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Data Type Mapping

Flink SQL Type

+

Avro Type

+

Avro Logical Type

+

CHAR / VARCHAR / STRING

+

string

+

-

+

BOOLEAN

+

boolean

+

-

+

BINARY / VARBINARY

+

bytes

+

-

+

DECIMAL

+

fixed

+

decimal

+

TINYINT

+

int

+

-

+

SMALLINT

+

int

+

-

+

INT

+

int

+

-

+

BIGINT

+

long

+

-

+

FLOAT

+

float

+

-

+

DOUBLE

+

double

+

-

+

DATE

+

int

+

date

+

TIME

+

int

+

time-millis

+

TIMESTAMP

+

long

+

timestamp-millis

+

ARRAY

+

array

+

-

+

MAP (keys must be of the string, char, or varchar type.)

+

map

+

-

+

MULTISET (elements must be of the string, char, or varchar type.)

+

map

+

-

+

ROW

+

record

+

-

+
+
+
+

Example

Read data from Kafka, deserialize the data to the Avro format, and outputs the data to print.

+
  1. Create a datasource connection for access to the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job and select Flink 1.12. Copy the following statement and submit the job:

    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourTopic>',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>,<yourKafkaAddress3>:<yourKafkaPort>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "avro"
    +);
    +
    +CREATE TABLE printSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select * from kafkaSource;
    +

  3. Insert the following data to Kafka using Avro data serialization:

    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +

  4. Perform the following operations to view the output:

    • Method 1: Locate the job and click More > FlinkUI. Choose Task Managers > Stdout.
    • Method 2: If you allow DLI to save job logs in OBS, view the output in the taskmanager.out file.
      +I(202103241000000001,webShop,2021-03-2410:00:00,100.0,100.0,2021-03-2410:02:03,0001,Alice,330106)
      ++I(202103241606060001,appShop,2021-03-2416:06:06,200.0,180.0,2021-03-2416:10:06,0001,Alice,330106)
      +
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0409.html b/docs/dli/sqlreference/dli_08_0409.html new file mode 100644 index 00000000..64482521 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0409.html @@ -0,0 +1,182 @@ + + +

Canal

+

Function

Canal is a Changelog Data Capture (CDC) tool that can stream changes in real-time from MySQL into other systems. Canal provides a unified format schema for changelog and supports to serialize messages using JSON and protobuf (the default format for Canal).

+

Flink supports to interpret Canal JSON messages as INSERT, UPDATE, and DELETE messages into the Flink SQL system. This is useful in many cases to leverage this feature, such as:

+
  • synchronizing incremental data from databases to other systems
  • Auditing logs
  • Real-time materialized view on databases
  • Temporal join changing history of a database table, etc.
+

Flink also supports to encode the INSERT, UPDATE, and DELETE messages in Flink SQL as Canal JSON messages, and emit to storage like Kafka. However, currently Flink cannot combine UPDATE_BEFORE and UPDATE_AFTER into a single UPDATE message. Therefore, Flink encodes UPDATE_BEFORE and UPDATE_AFTER as DELETE and INSERT Canal messages.

+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. In this example.Set this parameter to canal-json.

+

canal-json.ignore-parse-errors

+

No

+

false

+

Boolean

+

Whether fields and rows with parse errors will be skipped or failed. The default value is false, indicating that an error will be thrown. Fields are set to null in case of errors.

+

canal-json.timestamp-format.standard

+

No

+

'SQL'

+

String

+

Input and output timestamp formats. Currently supported values are SQL and ISO-8601:

+
  • SQL will parse input timestamp in "yyyy-MM-dd HH:mm:ss.s{precision}" format, for example 2020-12-30 12:13:14.123 and output timestamp in the same format.
  • ISO-8601 will parse input timestamp in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, for example 2020-12-30T12:13:14.123 and output timestamp in the same format.
+

canal-json.map-null-key.mode

+

No

+

'FALL'

+

String

+

Handling mode when serializing null keys for map data. Available values are as follows:

+
  • FAIL will throw exception when encountering map value with null key.
  • DROP will drop null key entries for map data.
  • LITERAL replaces the empty key value in the map with a string constant. The string literal is defined by canal-json.map-null-key.literal option.
+

canal-json.map-null-key.literal

+

No

+

'null'

+

String

+

String literal to replace null key when canal-json.map-null-key.mode is LITERAL.

+

canal-json.database.include

+

No

+

None

+

+

String

+

+

An optional regular expression to only read the specific databases changelog rows by regular matching the database meta field in the Canal record.

+

canal-json.table.include

+

No

+

None

+

String

+

An optional regular expression to only read the specific tables changelog rows by regular matching the table meta field in the Canal record.

+
+
+
+

Supported Connectors

  • Kafka
+
+

Example

Use Kafka to send data and output the data to print.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job and select Flink 1.12. Copy the following statement and submit the job:

    create table kafkaSource(
    +  id bigint,
    +  name string,
    +  description string,
    +  weight DECIMAL(10, 2)
    +  ) with (
    +    'connector' = 'kafka',
    +    'topic' = '<yourTopic>',
    +    'properties.group.id' = '<yourGroupId>',
    +    'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +    'scan.startup.mode' = 'latest-offset',
    +    'format' = 'canal-json'
    +);
    +create table printSink(
    +  id bigint,
    +  name string,
    +  description string,
    +  weight DECIMAL(10, 2)
    +   ) with (
    +     'connector' = 'print'
    +   );
    +insert into printSink select * from kafkaSource;
    +

  3. Insert the following data to the corresponding topic in Kafka:

    {
    +  "data": [
    +    {
    +      "id": "111",
    +      "name": "scooter",
    +      "description": "Big 2-wheel scooter",
    +      "weight": "5.18"
    +    }
    +  ],
    +  "database": "inventory",
    +  "es": 1589373560000,
    +  "id": 9,
    +  "isDdl": false,
    +  "mysqlType": {
    +    "id": "INTEGER",
    +    "name": "VARCHAR(255)",
    +    "description": "VARCHAR(512)",
    +    "weight": "FLOAT"
    +  },
    +  "old": [
    +    {
    +      "weight": "5.15"
    +    }
    +  ],
    +  "pkNames": [
    +    "id"
    +  ],
    +  "sql": "",
    +  "sqlType": {
    +    "id": 4,
    +    "name": 12,
    +    "description": 12,
    +    "weight": 7
    +  },
    +  "table": "products",
    +  "ts": 1589373560798,
    +  "type": "UPDATE"
    +}
    +

  4. View the output through either of the following methods:

    • Method 1: Locate the job and click More > FlinkUI. Choose Task Managers > Stdout.
    • Method 2: If you allow DLI to save job logs in OBS, view the output in the taskmanager.out file.
    +
    -U(111,scooter,Big2-wheel scooter,5.15)
    ++U(111,scooter,Big2-wheel scooter,5.18)
    +

+

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0410.html b/docs/dli/sqlreference/dli_08_0410.html new file mode 100644 index 00000000..e474a414 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0410.html @@ -0,0 +1,118 @@ + + +

Confluent Avro

+

Function

The Avro Schema Registry (avro-confluent) format allows you to read records that were serialized by the io.confluent.kafka.serializers.KafkaAvroSerializer and to write records that can in turn be read by the io.confluent.kafka.serializers.KafkaAvroDeserializer.

+

When reading (deserializing) a record with this format the Avro writer schema is fetched from the configured Confluent Schema Registry based on the schema version ID encoded in the record while the reader schema is inferred from table schema.

+

When writing (serializing) a record with this format the Avro schema is inferred from the table schema and used to retrieve a schema ID to be encoded with the data The lookup is performed with in the configured Confluent Schema Registry under the subject. The subject is specified by avro-confluent.schema-registry.subject.

+
+

Supported Connectors

  • kafka
  • upsert kafka
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set this parameter to avro-confluent.

+

avro-confluent.schema-registry.subject

+

No

+

None

+

String

+

The Confluent Schema Registry subject under which to register the schema used by this format during serialization.

+

By default, kafka and upsert-kafka connectors use <topic_name>-value or <topic_name>-key as the default subject name if this format is used as the value or key format.

+

avro-confluent.schema-registry.url

+

Yes

+

None

+

String

+

URL of the Confluent Schema Registry to fetch/register schemas.

+
+
+
+

Example

1. Read JSON data from the source topic in Kafka and write the data in Confluent Avro format to the sink topic.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka and ECS locate and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka and ECS IP addresses. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Purchase an ECS cluster, download Confluent 5.5.2 (https://packages.confluent.io/archive/5.5/) and jdk1.8.0_232, and upload them to the ECS cluster. Run the following command to decompress the packages (assume that the decompression directories are confluent-5.5.2 and jdk1.8.0_232):

    tar zxvf confluent-5.5.2-2.11.tar.gz
    +tar zxvf jdk1.8.0_232.tar.gz
    +

  3. Run the following commands to install jdk1.8.0_232 in the current ECS cluster. You can run the pwd command in the jdk1.8.0_232 folder to view the value of yourJdkPath.

    export JAVA_HOME=<yourJdkPath>
    +export PATH=$JAVA_HOME/bin:$PATH
    +export CLASSPATH=.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
    +

  4. Go to the confluent-5.5.2/etc/schema-registry/ directory and modify the following configuration items in the schema-registry.properties file:

    listeners=http://<yourEcsIp>:8081  
    +kafkastore.bootstrap.servers=<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>
    +

  5. Switch to the confluent-5.5.2 directory and run the following command to start Confluent:

    bin/schema-registry-start etc/schema-registry/schema-registry.properties
    +

  6. Create a Flink opensource SQL job, select the Flink 1.12 version, and allow DLI to save job logs in OBS. Add the following statement to the job and submit it:

    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,  
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>',
    +  'topic' = '<yourSourceTopic>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +CREATE TABLE kafkaSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,  
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>',
    +  'topic' = '<yourSinkTopic>',
    +  'format' = 'avro-confluent',
    +  'avro-confluent.schema-registry.url' = 'http://<yourEcsIp>:8081',
    +  'avro-confluent.schema-registry.subject' = '<yourSubject>'
    +);
    +insert into kafkaSink select * from kafkaSource;
    +

  7. Insert the following data into Kafka:

    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241606060001", "order_channel":"appShop", "order_time":"2021-03-24 16:06:06", "pay_amount":"200.00", "real_pay":"180.00", "pay_time":"2021-03-24 16:10:06", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +

  8. Read the data of the sink Kafka topic. You will find that the data has been written and the schema has been saved to the _schema topic of Kafka.
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0411.html b/docs/dli/sqlreference/dli_08_0411.html new file mode 100644 index 00000000..cd93d189 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0411.html @@ -0,0 +1,180 @@ + + +

CSV

+

Function

The CSV format allows you to read and write CSV data based on a CSV schema. Currently, the CSV schema is derived from table schema.

+
+

Supported Connectors

  • Kafka
  • Upsert Kafka
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set the value to csv.

+

csv.field-delimiter

+

No

+

,

+

String

+

Field delimiter character, which must be a single character. You can use backslash to specify special characters, for example, \t represents the tab character. You can also use unicode to specify them in plain SQL, for example, 'csv.field-delimiter' = '\u0001' represents the 0x01 character.

+

csv.disable-quote-character

+

No

+

false

+

Boolean

+

Disabled quote character for enclosing field values. If you set this parameter to true, csv.quote-character cannot be set.

+

csv.quote-character

+

No

+

''

+

String

+

Quote character for enclosing field values.

+

csv.allow-comments

+

No

+

false

+

Boolean

+

Ignore comment lines that start with #. If you set this parameter to true, make sure to also ignore parse errors to allow empty rows.

+

csv.ignore-parse-errors

+

No

+

false

+

Boolean

+

Whether fields and rows with parse errors will be skipped or failed. The default value is false, indicating that an error will be thrown. Fields are set to null in case of errors.

+

csv.array-element-delimiter

+

No

+

;

+

String

+

Array element delimiter string for separating array and row element values.

+

csv.escape-character

+

No

+

+

None

+

String

+

+

Escape character for escaping values

+

+

csv.null-literal

+

No

+

None

+

String

+

Null literal string that is interpreted as a null value.

+
+
+
+

Example

Use Kafka to send data and output the data to print.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job. Copy the following statement and submit the job:

    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourSourceTopic>',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "csv"
    +);
    +
    +CREATE TABLE kafkaSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourSinkTopic>',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +  "format" = "csv"
    +);
    +
    +insert into kafkaSink select * from kafkaSource;
    +
    +

  3. Insert the following data into the source Kafka topic:

    202103251505050001,qqShop,2021-03-25 15:05:05,500.00,400.00,2021-03-25 15:10:00,0003,Cindy,330108
    +
    +202103241606060001,appShop,2021-03-24 16:06:06,200.00,180.00,2021-03-24 16:10:06,0001,Alice,330106
    +

  4. Read data from the sink Kafka topic. The result is as follows:

    202103251505050001,qqShop,"2021-03-25 15:05:05",500.0,400.0,"2021-03-25 15:10:00",0003,Cindy,330108
    +
    +202103241606060001,appShop,"2021-03-24 16:06:06",200.0,180.0,"2021-03-24 16:10:06",0001,Alice,330106
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0412.html b/docs/dli/sqlreference/dli_08_0412.html new file mode 100644 index 00000000..3d101b34 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0412.html @@ -0,0 +1,159 @@ + + +

Debezium

+

Function

Debezium is a Changelog Data Capture (CDC) tool that can stream changes in real-time from other databases into Kafka. Debezium provides a unified format schema for changelog and supports to serialize messages using JSON.

+

Flink supports to interpret Debezium JSON and Avro messages as INSERT/UPDATE/DELETE messages into Flink SQL system. This is useful in many cases to leverage this feature, such as:

+
  • synchronizing incremental data from databases to other systems
  • Auditing logs
  • Real-time materialized view on databases
  • Temporal join changing history of a database table, etc.
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default Value

+

Mandatory

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. In this example.Set this parameter to debezium-json.

+

debezium-json.schema-include

+

No

+

false

+

Boolean

+

Whether the Debezium JSON messages contain the schema. When setting up Debezium Kafka Connect, enable the Kafka configuration value.converter.schemas.enable to include the schema in the message.

+

debezium-json.ignore-parse-errors

+

No

+

false

+

Boolean

+

Whether fields and rows with parse errors will be skipped or failed. The default value is false, indicating that an error will be thrown. Fields are set to null in case of errors.

+

debezium-json.timestamp-format.standard

+

No

+

'SQL'

+

String

+

Input and output timestamp formats. Currently supported values are SQL and ISO-8601.

+
  • SQL will parse input timestamp in "yyyy-MM-dd HH:mm:ss.s{precision}" format, for example 2020-12-30 12:13:14.123 and output timestamp in the same format.
  • ISO-8601 will parse input timestamp in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, for example 2020-12-30T12:13:14.123 and output timestamp in the same format.
+

debezium-json.map-null-key.mode

+

No

+

'FAIL'

+

String

+

Handling mode when serializing null keys for map data. Available values are as follows:

+
  • FAIL will throw exception when encountering map value with null key.
  • DROP will drop null key entries for map data.
  • LITERAL replaces the empty key value in the map with a string constant. The string literal is defined by debezium-json.map-null-key.literal option.
+

debezium-json.map-null-key.literal

+

No

+

'null'

+

String

+

String literal to replace null key when debezium-json.map-null-key.mode is LITERAL.

+
+
+
+

Supported Connectors

  • Kafka
+
+

Example

Use Kafka to send data and output the data to print.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job. Copy the following statement and submit the job:

    create table kafkaSource(
    +  id BIGINT,
    +  name STRING,
    +  description STRING,
    +  weight DECIMAL(10, 2)
    +  ) with (
    +    'connector' = 'kafka',
    +    'topic' = '<yourTopic>',
    +    'properties.group.id' = '<yourGroupId>',
    +    'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +    'scan.startup.mode' = 'latest-offset',
    +    'format' = 'debezium-json'
    +);
    +create table printSink(
    +  id BIGINT,
    +  name STRING,
    +  description STRING,
    +  weight DECIMAL(10, 2)
    +   ) with (
    +     'connector' = 'print'
    +   );
    +insert into printSink select * from kafkaSource;
    +

  3. Insert the following data to the corresponding topic in Kafka:

    {
    +  "before": {
    +    "id": 111,
    +    "name": "scooter",
    +    "description": "Big 2-wheel scooter",
    +    "weight": 5.18
    +  },
    +  "after": {
    +    "id": 111,
    +    "name": "scooter",
    +    "description": "Big 2-wheel scooter",
    +    "weight": 5.15
    +  },
    +  "source": {
    +    "version": "0.9.5.Final",
    +	"connector": "mysql",
    +	"name": "fullfillment",
    +	"server_id" :1,
    +	"ts_sec": 1629607909,
    +	"gtid": "mysql-bin.000001",
    +	"pos": 2238,"row": 0,
    +	"snapshot": false,
    +	"thread": 7,
    +	"db": "inventory",
    +	"table": "test",
    +	"query": null},
    +  "op": "u",
    +  "ts_ms": 1589362330904,
    +  "transaction": null
    +}
    +

  4. View the output through either of the following methods:

    • Method 1: Locate the job and click More > FlinkUI. Choose Task Managers > Stdout.
    • Method 2: If you allow DLI to save job logs in OBS, view the output in the taskmanager.out file.
    +
    -U(111,scooter,Big2-wheel scooter,5.18)
    ++U(111,scooter,Big2-wheel scooter,5.15)
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0413.html b/docs/dli/sqlreference/dli_08_0413.html new file mode 100644 index 00000000..fce41d8f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0413.html @@ -0,0 +1,146 @@ + + +

JSON

+

Function

The JSON format allows you to read and write JSON data based on a JSON schema. Currently, the JSON schema is derived from table schema.

+
+

Supported Connectors

  • Kafka
  • Upsert Kafka
  • Elasticsearch
+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set this parameter to json.

+

json.fail-on-missing-field

+

No

+

false

+

Boolean

+

Whether missing fields and rows will be skipped or failed. The default value is false, indicating that an error will be thrown.

+

json.ignore-parse-errors

+

No

+

false

+

Boolean

+

Whether fields and rows with parse errors will be skipped or failed. The default value is false, indicating that an error will be thrown. Fields are set to null in case of errors.

+

json.timestamp-format.standard

+

No

+

'SQL'

+

String

+

Input and output timestamp format for TIMESTAMP and TIMESTAMP WITH LOCAL TIME ZONE.

+

Currently supported values are SQL and ISO-8601:

+
  • SQL will parse the input TIMESTAMP values in "yyyy-MM-dd HH:mm:ss.s{precision}" format, for example, 2020-12-30 12:13:14.123, parse TIMESTAMP WITH LOCAL TIME ZONE values in "yyyy-MM-dd HH:mm:ss.s{precision}'Z'" format, for example, 2020-12-30 12:13:14.123Z and output timestamp in the same format.
  • ISO-8601 will parse the input TIMESTAMP values in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, for example, 2020-12-30T12:13:14.123 parse TIMESTAMP WITH LOCAL TIME ZONE values in "yyyy-MM-ddTHH:mm:ss.s{precision}'Z'" format, for example, 2020-12-30T12:13:14.123Z and output timestamp in the same format.
+

json.map-null-key.mode

+

No

+

'FALL'

+

String

+

Handling mode when serializing null keys for map data. Available values are as follows:

+
  • FAIL will throw exception when encountering map value with null key.
  • DROP will drop null key entries for map data.
  • LITERAL replaces the empty key value in the map with a string constant. The string literal is defined by json.map-null-key.literal option.
+

json.map-null-key.literal

+

No

+

'null'

+

String

+

String literal to replace null key when json.map-null-key.mode is LITERAL.

+
+
+
+

Example

In this example, data is read from a topic and written to another using a Kafka sink.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set an inbound rule for the security group to allow access of the queue and test the connectivity using the Kafka address. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job, select Flink 1.12, and allow DLI to save job logs in OBS. Use the following statement in the job and submit it:

    CREATE TABLE kafkaSource (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourSourceTopic>',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +
    +CREATE TABLE kafkaSink (
    +  order_id string,
    +  order_channel string,
    +  order_time string, 
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourSinkTopic>',
    +  'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +  "format" = "json"
    +);
    +
    +insert into kafkaSink select * from kafkaSource;
    +

  3. Insert the following data into the source Kafka topic:

    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +

  4. Read data from the sink topic. The result is as follows:

    {"order_id":"202103241000000001","order_channel":"webShop","order_time":"2021-03-24 10:00:00","pay_amount":100.0,"real_pay":100.0,"pay_time":"2021-03-24 10:02:03","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +
    +{"order_id":"202103241606060001","order_channel":"appShop","order_time":"2021-03-24 16:06:06","pay_amount":200.0,"real_pay":180.0,"pay_time":"2021-03-24 16:10:06","user_id":"0001","user_name":"Alice","area_id":"330106"}
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0414.html b/docs/dli/sqlreference/dli_08_0414.html new file mode 100644 index 00000000..2ba09be4 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0414.html @@ -0,0 +1,143 @@ + + +

Maxwell

+

Function

Flink supports to interpret Maxwell JSON messages as INSERT/UPDATE/DELETE messages into Flink SQL system. This is useful in many cases to leverage this feature,

+
+

such as:

+
  • Synchronizing incremental data from databases to other systems
  • Auditing logs
  • Real-time materialized views on databases
  • Temporal join changing history of a database table and so on
+

Flink also supports to encode the INSERT/UPDATE/DELETE messages in Flink SQL as Maxwell JSON messages, and emit to external systems like Kafka. However, currently Flink cannot combine UPDATE_BEFORE and UPDATE_AFTER into a single UPDATE message. Therefore, Flink encodes UPDATE_BEFORE and UDPATE_AFTER as DELETE and INSERT Maxwell messages.

+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set this parameter to maxwell-json.

+

maxwell-json.ignore-parse-errors

+

No

+

false

+

Boolean

+

Whether fields and rows with parse errors will be skipped or failed. Fields are set to null in case of errors.

+

maxwell-json.timestamp-format.standard

+

No

+

'SQL'

+

String

+

Input and output timestamp formats. Currently supported values are SQL and ISO-8601:

+

SQL will parse input timestamp in "yyyy-MM-dd HH:mm:ss.s{precision}" format, for example, 2020-12-30 12:13:14.123 and output timestamp in the same format.

+

ISO-8601 will parse input timestamp in "yyyy-MM-ddTHH:mm:ss.s{precision}" format, for example 2020-12-30T12:13:14.123 and output timestamp in the same format.

+

maxwell-json.map-null-key.mode

+

No

+

'FAIL'

+

String

+

Handling mode when serializing null keys for map data. Currently supported values are 'FAIL', 'DROP' and 'LITERAL':

+

FAIL will throw exception when encountering map with null key.

+

DROP will drop null key entries for map data.

+

LITERAL will replace null key with string literal. The string literal is defined by maxwell-json.map-null-key.literal option.

+

maxwell-json.map-null-key.literal

+

No

+

'null'

+

String

+

String literal to replace null key when maxwell-json.map-null-key.mode is LITERAL.

+
+
+
+

Supported Connectors

  • Kafka
+
+

Example

Use Kafka to send data and output the data to print.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job and select Flink 1.12. Copy the following statement and submit the job:

    create table kafkaSource(
    +  id bigint,
    +  name string,
    +  description string,
    +  weight DECIMAL(10, 2)  
    +  ) with (
    +    'connector' = 'kafka',
    +    'topic' = '<yourTopic>',
    +    'properties.group.id' = '<yourGroupId>',
    +    'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>',
    +    'scan.startup.mode' = 'latest-offset',
    +    'format' = 'maxwell-json'
    +);
    +create table printSink(
    +  id bigint,
    +  name string,
    +  description string,
    +  weight DECIMAL(10, 2)
    +   ) with (
    +     'connector' = 'print'
    +   );
    +insert into printSink select * from kafkaSource;
    +

  3. Insert the following data to the corresponding topic in Kafka:

    {
    +   "database":"test",
    +   "table":"e",
    +   "type":"insert",
    +   "ts":1477053217,
    +   "xid":23396,
    +   "commit":true,
    +   "position":"master.000006:800911",
    +   "server_id":23042,
    +   "thread_id":108,
    +   "primary_key": [1, "2016-10-21 05:33:37.523000"],
    +   "primary_key_columns": ["id", "c"],
    +   "data":{
    +     "id":111,
    +     "name":"scooter",
    +     "description":"Big 2-wheel scooter",
    +     "weight":5.15
    +   },
    +   "old":{
    +     "weight":5.18
    +   }
    +}
    +

  4. View the output through either of the following methods:

    • Method 1: Locate the job and click More > FlinkUI. Choose Task Managers > Stdout.
    • Method 2: If you allow DLI to save job logs in OBS, view the output in the taskmanager.out file.
    +
    +I(111,scooter,Big 2-wheel scooter,5.15)
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0415.html b/docs/dli/sqlreference/dli_08_0415.html new file mode 100644 index 00000000..ca2877f7 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0415.html @@ -0,0 +1,89 @@ + + +

Raw

+

Function

The raw format allows you to read and write raw (byte based) values as a single column.

+

Note: This format encodes null values as null of the byte[] type. This may have limitation when used in upsert-kafka, because upsert-kafka treats null values as a tombstone message (DELETE on the key). Therefore, we recommend avoiding using upsert-kafka connector and the raw format as a value.format if the field can have a null value.

+

The raw format connector is built-in, no additional dependencies are required.

+
+

Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1

Parameter

+

Mandatory

+

Default Value

+

Type

+

Description

+

format

+

Yes

+

None

+

String

+

Format to be used. Set this parameter to raw.

+

raw.charset

+

No

+

UTF-8

+

String

+

Charset to encode the text string.

+

raw.endianness

+

No

+

big-endian

+

String

+

Endianness to encode the bytes of numeric value. Valid values are big-endian and little-endian. You can search for endianness for more details.

+
+
+
+

Supported Connectors

  • Kafka
  • UpsertKafka
+
+

Example

Use Kafka to send data and output the data to print.

+
  1. Create a datasource connection for the communication with the VPC and subnet where Kafka locates and bind the connection to the queue. Set a security group and inbound rule to allow access of the queue and test the connectivity of the queue using the Kafka IP address. For example, locate a general-purpose queue where the job runs and choose More > Test Address Connectivity in the Operation column. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Create a Flink OpenSource SQL job and select Flink 1.12. Copy the following statement and submit the job:

    create table kafkaSource(
    +  log string
    +  ) with (
    +    'connector' = 'kafka',
    +    'topic' = '<yourTopic>',
    +    'properties.group.id' = '<yourGroupId>',
    +    'properties.bootstrap.servers' = '<yourKafkaAddress>:<yourKafkaPort>',
    +    'scan.startup.mode' = 'latest-offset',
    +    'format' = 'raw'
    +);
    +create table printSink(
    +  log string
    +   ) with (
    +     'connector' = 'print'
    +   );
    +insert into printSink select * from kafkaSource;
    +

  3. Insert the following data to the corresponding topic in Kafka:

    47.29.201.179 - - [28/Feb/2019:13:17:10 +0000] "GET /?p=1 HTTP/2.0" 200 5316 "https://domain.com/?p=1" "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36" "2.75"
    +

  4. View the output through either of the following methods:

    • Method 1: Locate the job and click More > FlinkUI. Choose Task Managers > Stdout.
    • Method 2: If you allow DLI to save job logs in OBS, view the output in the taskmanager.out file.
    +
    +I(47.29.201.179 - - [28/Feb/2019:13:17:10 +0000] "GET /?p=1 HTTP/2.0"2005316"https://domain.com/?p=1"
    +"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36" "2.75")
    +

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0416.html b/docs/dli/sqlreference/dli_08_0416.html new file mode 100644 index 00000000..833ccd20 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0416.html @@ -0,0 +1,27 @@ + + +

DML Snytax

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0417.html b/docs/dli/sqlreference/dli_08_0417.html new file mode 100644 index 00000000..c635a3be --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0417.html @@ -0,0 +1,197 @@ + + +

SELECT

+

SELECT

Syntax

+
1
+2
+3
+4
+5
+6
SELECT [ ALL | DISTINCT ]
+  { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

SELECT is used to select data from a table.

+

ALL indicates that all results are returned.

+

DISTINCT indicates that the duplicated results are removed.

+

Precautions

+
  • The to-be-queried table must exist. Otherwise, an error is reported.
  • WHERE is used to specify the search condition, which can be the arithmetic operator, relational operator, or logical operator.
  • GROUP BY is used to specify the grouping field, which can be one or more multiple fields.
+

Example

+

Select the order which contains more than 3 pieces of data.

+
1
insert into temp SELECT  * FROM Orders WHERE units > 3; 
+
+ +
+

Insert a group of constant data.

+
1
insert into temp select 'Lily', 'male', 'student', 17;
+
+ +
+
+

WHERE

Syntax

+
1
+2
+3
SELECT   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+
+ +
+

Description

+

This clause is used to filter the query results using the WHERE clause.

+

Precautions

+
  • The to-be-queried table must exist.
  • WHERE filters the records that do not meet the requirements.
+

Example

+

Search orders which contain more than 3 pieces and fewer than 10 pieces of data.

+
1
+2
insert into temp SELECT  * FROM Orders
+  WHERE units > 3 and units < 10; 
+
+ +
+
+

HAVING

Function

+

This clause is used to search for the query results that meet the search condition.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. You can use GROUP BY for grouping and then use HAVING for filtering. Arithmetic operations and aggregate functions are supported in the HAVING clause.

+

Precautions

+

If the filtering condition is subject to the results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for search.

+

Example

+

Group the student table according to the name field and search for the records in which the maximum score is higher than 95 in the group.

+
1
+2
+3
insert into temp SELECT name, max(score) FROM student
+  GROUP BY name
+  HAVING max(score) >95;
+
+ +
+
+

Column-Based GROUP BY

Function

+

This clause is used to group a table based on columns.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

Column-based GROUP BY can be categorized into single-column GROUP BY and multi-column GROUP BY.

+
  • Single-column GROUP BY indicates that the GROUP BY clause contains only one column.
  • Multi-column GROUP BY indicates that the GROUP BY clause contains multiple columns. The table will be grouped according to all fields in the GROUP BY clause. The records whose fields are the same are grouped into one group.
+

Precautions

+

GroupBy generates update results in the stream processing table.

+

Example

+

Group the student table according to the score and name fields and return the grouping results.

+
1
+2
insert into temp SELECT name,score, max(score) FROM student 
+  GROUP BY name,score;
+
+ +
+
+

Expression-Based GROUP BY

Function

+

This clause is used to group streams according to expressions.

+

Syntax

+
1
+2
+3
+4
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+
+ +
+

Description

+

groupItem can have one or more fields. The fields can be called by string functions, but cannot be called by aggregate functions.

+

Precautions

+

None

+

Example

+

Use the substring function to obtain the character string from the name field, group the student table according to the obtained character string, and return each sub character string and the number of records.

+
1
+2
insert into temp SELECT substring(name,6),count(name) FROM student
+  GROUP BY substring(name,6);
+
+ +
+
+

Grouping sets, Rollup, Cube

Function

+
  • The GROUP BY GROUPING SETS generates a result set equivalent to that generated by multiple simple GROUP BY UNION ALL statements. Using GROUPING SETS is more efficient.
  • The ROLLUP and CUBE generate multiple groups based on certain rules and then collect statistics by group.
  • The result set generated by CUBE contains all the combinations of values in the selected columns.
  • The result set generated by ROLLUP contains the combinations of a certain layer structure in the selected columns.
+
Syntax
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY groupingItem]
+
+

Description

+

Values of groupingItem can be Grouping sets(columnName [, columnName]*), Rollup(columnName [, columnName]*), and Cube(columnName [, columnName]*).

+

Precautions

+

None

+

Example

+

Return the results generated based on user and product.

+
INSERT INTO temp SELECT SUM(amount)
+FROM Orders
+GROUP BY GROUPING SETS ((user), (product));
+
+

GROUP BY Using HAVING

Function

+

This clause filters a table after grouping it using the HAVING clause.

+

Syntax

+
1
+2
+3
+4
+5
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  [ WHERE booleanExpression ]
+  [ GROUP BY { groupItem [, groupItem ]* } ]
+  [ HAVING booleanExpression ]
+
+ +
+

Description

+

Generally, HAVING and GROUP BY are used together. You can use GROUP BY for grouping and the HAVING for filtering.

+

Precautions

+
  • If the filtering condition is subject to the results of GROUP BY, the HAVING clause, rather than the WHERE clause, must be used for search. HAVING and GROUP BY are used together. Use GROUP BY for grouping and the HAVING for filtering.
  • Fields used in HAVING, except for those used for aggregate functions, must exist in GROUP BY.
  • The arithmetic operation and aggregate function are supported by the HAVING clause.
+

Example

+

Group the transactions by num, use the HAVING clause to search for the records in which the maximum value derived from multiplying price with amount is higher than 5000, and return the filtered results.

+
1
+2
+3
+4
insert into temp SELECT num, max(price*amount) FROM transactions
+  WHERE time > '2016-06-01'
+  GROUP BY num
+  HAVING max(price*amount)>5000;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0418.html b/docs/dli/sqlreference/dli_08_0418.html new file mode 100644 index 00000000..c49aa952 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0418.html @@ -0,0 +1,56 @@ + + +

Set Operations

+

Union/Union ALL/Intersect/Except

Syntax

+
1
query UNION [ ALL ] | Intersect | Except query
+
+ +
+

Description

+
  • UNION is used to return the union set of multiple query results.
  • INTERSECT is used to return the intersection of multiple query results.
  • EXCEPT is used to return the difference set of multiple query results.
+

Precautions

+
  • Set operations join tables from head to tail under certain conditions. The quantity of columns returned by each SELECT statement must be the same. Column types must be the same. Column names can be different.
  • By default, UNION takes only distinct records while UNION ALL does not remove duplicates from the result.
+

Example

+

Output distinct records found in either Orders1 and Orders2 tables.

+
1
+2
insert into temp SELECT  * FROM Orders1
+  UNION SELECT  * FROM Orders2;
+
+ +
+
+

IN

Syntax

+
1
+2
+3
SELECT [ ALL | DISTINCT ]   { * | projectItem [, projectItem ]* }
+  FROM tableExpression
+  WHERE column_name IN (value (, value)* ) | query
+
+ +
+

Description

+

The IN operator allows multiple values to be specified in the WHERE clause. It returns true if the expression exists in the given table subquery.

+

Precautions

+

The subquery table must consist of a single column, and the data type of the column must be the same as that of the expression.

+

Example

+

Return user and amount information of the products in NewProducts of the Orders table.

+
1
+2
+3
+4
+5
insert into temp SELECT user, amount
+FROM Orders
+WHERE product IN (
+    SELECT product FROM NewProducts
+);
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0419.html b/docs/dli/sqlreference/dli_08_0419.html new file mode 100644 index 00000000..9b2041dc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0419.html @@ -0,0 +1,419 @@ + + +

Window

+

GROUP WINDOW

Description

+

Group Window is defined in GROUP BY. One record is generated from each group. Group Window involves the following functions:

+
  • Array functions +
    + + + + + + + + + + + + + +
    Table 1 Array functions

    Grouping Window Function

    +

    Description

    +

    TUMBLE(time_attr, interval)

    +

    Defines a tumbling time window. A tumbling time window assigns rows to non-overlapping, continuous windows with a fixed duration (interval). For example, a tumbling window of 5 minutes groups rows in 5 minutes intervals. Tumbling windows can be defined on event-time (stream + batch) or processing-time (stream).

    +

    HOP(time_attr, interval, interval)

    +

    Defines a hopping time window (called sliding window in the Table API). A hopping time window has a fixed duration (second interval parameter) and hops by a specified hop interval (first interval parameter). If the hop interval is smaller than the window size, hopping windows are overlapping. Thus, rows can be assigned to multiple windows. For example, a hopping window of 15 minutes size and 5 minute hop interval assigns each row to 3 different windows of 15 minute size, which are evaluated in an interval of 5 minutes. Hopping windows can be defined on event-time (stream + batch) or processing-time (stream).

    +

    SESSION(time_attr, interval)

    +

    Defines a session time window. Session time windows do not have a fixed duration but their bounds are defined by a time interval of inactivity, that is, a session window is closed if no event appears for a defined gap period. For example a session window with a 30 minute gap starts when a row is observed after 30 minutes inactivity (otherwise the row would be added to an existing window) and is closed if no row is added within 30 minutes. Session windows can work on event-time (stream + batch) or processing-time (stream).

    +
    +
    +

    Notes:

    +

    In streaming mode, the time_attr argument of the group window function must refer to a valid time attribute that specifies the processing time or event time of rows.

    +
    • event-time: The type is timestamp(3).
    • processing-time: No need to specify the type.
    +

    In batch mode, the time_attr argument of the group window function must be an attribute of type timestamp.

    +
    +
  • Window auxiliary functions
    The start and end timestamps of group windows as well as time attributes can be selected with the following auxiliary functions. +
    + + + + + + + + + + + + + + + + +
    Table 2 Window auxiliary functions

    Auxiliary Function

    +

    Description

    +

    TUMBLE_START(time_attr, interval)

    +

    HOP_START(time_attr, interval, interval)

    +

    SESSION_START(time_attr, interval)

    +

    Returns the timestamp of the inclusive lower bound of the corresponding tumbling, hopping, or session window.

    +

    TUMBLE_END(time_attr, interval)

    +

    HOP_END(time_attr, interval, interval)

    +

    SESSION_END(time_attr, interval)

    +

    Returns the timestamp of the exclusive upper bound of the corresponding tumbling, hopping, or session window.

    +

    Note: The exclusive upper bound timestamp cannot be used as a rowtime attribute in subsequent time-based operations, such as interval joins and group window or over window aggregations.

    +

    TUMBLE_ROWTIME(time_attr, interval)

    +

    HOP_ROWTIME(time_attr, interval, interval)

    +

    SESSION_ROWTIME(time_attr, interval)

    +

    Returns the timestamp of the inclusive upper bound of the corresponding tumbling, hopping, or session window. The resulting attribute is a rowtime attribute that can be used in subsequent time-based operations such as interval joins and group window or over window aggregations.

    +

    TUMBLE_PROCTIME(time_attr, interval)

    +

    HOP_PROCTIME(time_attr, interval, interval)

    +

    SESSION_PROCTIME(time_attr, interval)

    +

    Returns a proctime attribute that can be used in subsequent time-based operations such as interval joins and group window or over window aggregations.

    +
    +
    +
    +

    Note: Auxiliary functions must be called with exactly same arguments as the group window function in the GROUP BY clause.

    +
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
// Calculate the SUM every day (event time).
+insert into temp SELECT name,
+    TUMBLE_START(ts, INTERVAL '1' DAY) as wStart,
+    SUM(amount)
+    FROM Orders
+    GROUP BY TUMBLE(ts, INTERVAL '1' DAY), name;
+
+// Calculate the SUM every day (processing time). 
+insert into temp SELECT name, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY TUMBLE(proctime, INTERVAL '1' DAY), name;
+
+// Calculate the SUM over the recent 24 hours every hour (event time).
+insert into temp SELECT product, 
+    SUM(amount) 
+    FROM Orders 
+    GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '1' DAY), product;
+
+// Calculate the SUM of each session and an inactive interval every 12 hours (event time).
+insert into temp SELECT name, 
+    SESSION_START(ts, INTERVAL '12' HOUR) AS sStart,
+    SESSION_END(ts, INTERVAL '12' HOUR) AS sEnd,
+    SUM(amount)
+    FROM Orders
+    GROUP BY SESSION(ts, INTERVAL '12' HOUR), name;
+
+ +
+
+

TUMBLE WINDOW Extension

Function

+
The extension functions of the DLI tumbling window are as follows:
  • A tumbling window is triggered periodically to reduce latency.

    Before the tumbling window ends, the window can be periodically triggered based on the configured frequency. The compute result from the start to the current time is output, which does not affect the final output. The latest result can be viewed in each period before the window ends.

    +
  • Data accuracy is improved.

    You can set a latency for the end of the window. The output of the window is updated according to the configured latency each time a piece of late data reaches.

    +
+
+

Precautions

+
  • If you use the INSERT statement to write results to a sink, it must support the upsert mode. Ensure that the result table supports upsert operations and the primary key is defined.
  • Latency settings only take effect for event time and not for proctime.
  • Auxiliary functions must be called with the same parameters as the grouping window functions in the GROUP BY clause.
  • If event time is used, watermark must be used. The code is as follows (order_time is identified as the event time column and watermark is set to 3 seconds):
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  watermark for order_time as order_time - INTERVAL '3' SECOND
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourTopic>',
    +  'properties.bootstrap.servers' = '<yourKafka>:<port>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
  • If the proctime is used, you need to use the computed column. The code is as follows (proc is the processing time column):
    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  proc as proctime()
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourTopic>',
    +  'properties.bootstrap.servers' = '<yourKafka>:<port>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
+

Syntax

+
TUMBLE(time_attr, window_interval, period_interval, lateness_interval)
+

Example

+
The current time attribute column is testtime, the window interval is 10 seconds, and the latency is 10 seconds.
TUMBLE(testtime, INTERVAL '10' SECOND, INTERVAL '10' SECOND, INTERVAL '10' SECOND)
+
+
+

Description

+ +
+ + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Description

+

Format

+

time_attr

+

Event time or processing time attribute column

+
  • event-time: The type is timestamp(3).
  • processing-time: No need to specify the type.
+

-

+

window_interval

+

Duration of the window

+
  • Format 1: INTERVAL '10' SECOND

    The window interval is 10 seconds. You can change the value as needed.

    +
  • Format 2: INTERVAL '10' MINUTE

    The window interval is 10 minutes. You can change the value as needed.

    +
  • Format 3: INTERVAL '10' DAY

    The window interval is 10 days. You can change the value as needed.

    +
+

period_interval

+

Frequency of periodic triggering within the window range. That is, before the window ends, the output result is updated at an interval specified by period_interval from the time when the window starts. If this parameter is not set, the periodic triggering policy is not used by default.

+

lateness_interval

+

Time to postpone the end of the window. The system continues to collect the data that reaches the window within lateness_interval after the window ends. The output is updated for each data that reaches the window within lateness_interval.

+
NOTE:

If the time window is for processing time, lateness_interval does not take effect.

+
+
+
+
Values of period_interval and lateness_interval cannot be negative numbers.
  • If period_interval is set to 0, periodic triggering is disabled for the window.
  • If lateness_interval is set to 0, the latency after the window ends is disabled.
  • If neither of the two parameters is set, both periodic triggering and latency are disabled and only the regular tumbling window functions are available .
  • If only the latency function needs to be used, set period_interval INTERVAL '0' SECOND.
+
+
+

Auxiliary Functions

+ +
+ + + + + + + + + + +
Table 4 Auxiliary function

Auxiliary Function

+

Description

+

TUMBLE_START(time_attr, window_interval, period_interval, lateness_interval)

+

Returns the timestamp of the inclusive lower bound of the corresponding tumbling window.

+

TUMBLE_END(time_attr, window_interval, period_interval, lateness_interval)

+

Returns the timestamp of the exclusive upper bound of the corresponding tumbling window.

+
+
+

Example

+

1. The Kafka is used as the data source table containing the order information, and the JDBC is used as the data result table for statistics on the number of orders settled by a user within 30 seconds. The order ID and window opening time are used as primary keys to collect result statistics in real time to JDBC.

+
  1. Create a datasource connection for the communication with the VPC and subnet where MySQL and Kafka locate and bind the connection to the queue. Set an inbound rule for the security group to allow access of the queue, and test the connectivity of the queue using the MySQL and Kafka addresses. If the connection is successful, the datasource is bound to the queue. Otherwise, the binding fails.
  2. Run the following statement to create the order_count table in the MySQL Flink database:

    CREATE TABLE `flink`.`order_count` (
    +	`user_id` VARCHAR(32) NOT NULL,
    +	`window_start` TIMESTAMP NOT NULL,
    +	`window_end` TIMESTAMP NULL,
    +	`total_num` BIGINT UNSIGNED NULL,
    +	PRIMARY KEY (`user_id`, `window_start`)
    +)	ENGINE = InnoDB
    +	DEFAULT CHARACTER SET = utf8mb4
    +	COLLATE = utf8mb4_general_ci;
    +

  3. Create a Flink OpenSource SQL job and submit the job. In this example, the window size is 30 seconds, the triggering period is 10 seconds, and the latency is 5 seconds. That is, if the result is updated before the window ends, the intermediate result will be output every 10 seconds. After the watermark is reached and the window ends, the data whose event time is within 5 seconds of the watermark will still be processed and counted in the current window. If the event time exceeds 5 seconds of the watermark, the data will be discarded.

    CREATE TABLE orders (
    +  order_id string,
    +  order_channel string,
    +  order_time timestamp(3),
    +  pay_amount double,
    +  real_pay double,
    +  pay_time string,
    +  user_id string,
    +  user_name string,
    +  area_id string,
    +  watermark for order_time as order_time - INTERVAL '3' SECOND
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = '<yourTopic>',
    +  'properties.bootstrap.servers' = '<yourKafka>:<port>',
    +  'properties.group.id' = '<yourGroupId>',
    +  'scan.startup.mode' = 'latest-offset',
    +  'format' = 'json'
    +);
    +
    +CREATE TABLE jdbcSink (
    +  user_id string,
    +  window_start timestamp(3),
    +  window_end timestamp(3),
    +  total_num BIGINT,
    +  primary key (user_id, window_start) not enforced
    +) WITH (
    +  'connector' = 'jdbc',
    +  'url' = 'jdbc:mysql://<yourMySQL>:3306/flink',
    +  'table-name' = 'order_count',
    +  'username' = '<yourUserName>',
    +  'password' = '<yourPassword>',
    +  'sink.buffer-flush.max-rows' = '1'
    +);
    +
    +insert into jdbcSink select 
    +    order_id,
    +    TUMBLE_START(order_time, INTERVAL '30' SECOND, INTERVAL '10' SECOND, INTERVAL '5' SECOND),
    +    TUMBLE_END(order_time, INTERVAL '30' SECOND, INTERVAL '10' SECOND, INTERVAL '5' SECOND),
    +    COUNT(*) from orders
    +    GROUP BY user_id, TUMBLE(order_time, INTERVAL '30' SECOND, INTERVAL '10' SECOND, INTERVAL '5' SECOND);
    +

  4. Insert data to Kafka. Assume that orders are settled at different time and the order data at 10:00:13 arrives late.

    {"order_id":"202103241000000001", "order_channel":"webShop", "order_time":"2021-03-24 10:00:00", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241000000002", "order_channel":"webShop", "order_time":"2021-03-24 10:00:20", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241000000003", "order_channel":"webShop", "order_time":"2021-03-24 10:00:33", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +
    +{"order_id":"202103241000000004", "order_channel":"webShop", "order_time":"2021-03-24 10:00:13", "pay_amount":"100.00", "real_pay":"100.00", "pay_time":"2021-03-24 10:02:03", "user_id":"0001", "user_name":"Alice", "area_id":"330106"}
    +

  5. Run the following statement in the MySQL database to view the output result. The final result is displayed as follows because the periodic output result cannot be collected:

    select * from order_count
    +
    user_id      window_start         window_end        total_num
    +0001      2021-03-24 10:00:00  2021-03-24 10:00:30    3
    +0001      2021-03-24 10:00:30  2021-03-24 10:01:00    1
    +

+

OVER WINDOW

The difference between Over Window and Group Window is that one record is generated from one row in Over Window.

+

Syntax

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
SELECT agg1(attr1) OVER (
+  [PARTITION BY partition_name]
+  ORDER BY proctime|rowtime 
+  ROWS  
+ BETWEEN (UNBOUNDED|rowCOUNT) PRECEDING AND CURRENT ROW FROM TABLENAME
+
+SELECT agg1(attr1) OVER (
+  [PARTITION BY partition_name]
+  ORDER BY proctime|rowtime 
+  RANGE  
+  BETWEEN (UNBOUNDED|timeInterval) PRECEDING AND CURRENT ROW FROM TABLENAME
+
+ +
+

Description

+ +
+ + + + + + + + + + + + + + + + +
Table 5 Parameter description

Parameter

+

Description

+

PARTITION BY

+

Indicates the primary key of the specified group. Each group separately performs calculation.

+

ORDER BY

+

Indicates the processing time or event time as the timestamp for data.

+

ROWS

+

Indicates the count window.

+

RANGE

+

Indicates the time window.

+
+
+

Precautions

+
  • All aggregates must be defined in the same window, that is, in the same partition, sort, and range.
  • Currently, only windows from PRECEDING (unbounded or bounded) to CURRENT ROW are supported. The range described by FOLLOWING is not supported.
  • ORDER BY must be specified for a single time attribute.
+

Example

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
// Calculate the count and total number from syntax rules enabled to now (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime RANGE UNBOUNDED preceding) as cnt2
+    FROM Orders;
+  
+// Calculate the count and total number of the recent four records (in proctime).
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY proctime ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+// Calculate the count and total number last 60s (in eventtime). Process the events based on event time, which is the timeattr field in Orders.
+insert into temp SELECT name,
+    count(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt1,
+    sum(amount) OVER (PARTITION BY name ORDER BY timeattr RANGE BETWEEN INTERVAL '60' SECOND PRECEDING AND CURRENT ROW) as cnt2
+    FROM Orders;
+
+ +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0420.html b/docs/dli/sqlreference/dli_08_0420.html new file mode 100644 index 00000000..46adac89 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0420.html @@ -0,0 +1,102 @@ + + +

JOIN

+

Equi-join

Syntax

+
1
+2
FROM tableExpression INNER | LEFT | RIGHT | FULL JOIN tableExpression
+  ON value11 = value21 [ AND value12 = value22]
+
+ +
+
+

Precautions

+
  • Currently, only equi-joins are supported, for example, joins that have at least one conjunctive condition with an equality predicate. Arbitrary cross or theta joins are not supported.
  • Tables are joined in the order in which they are specified in the FROM clause. Make sure to specify tables in an order that does not yield a cross join (Cartesian product), which are not supported and would cause a query to fail.
  • For streaming queries the required state to compute the query result might grow infinitely depending on the type of aggregation and the number of distinct grouping keys. Provide a query configuration with valid retention interval to prevent excessive state size.
+

Example

+
SELECT *
+FROM Orders INNER JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders LEFT JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders RIGHT JOIN Product ON Orders.productId = Product.id;
+
+SELECT *
+FROM Orders FULL OUTER JOIN Product ON Orders.productId = Product.id;
+

Time-windowed Join

Function

+

Each piece of data in a stream is joined with data in different time zones in another stream.

+

Syntax

+
from t1 JOIN t2 ON t1.key = t2.key AND TIMEBOUND_EXPRESSIO
+

Description

+

TIMEBOUND_EXPRESSION can be in either of the following formats:

+
  • L.time between LowerBound(R.time) and UpperBound(R.time)
  • R.time between LowerBound(L.time) and UpperBound(L.time)
  • Comparison expression with the time attributes (L.time/R.time)
+

Precautions

+

A time window join requires at least one equi join predicate and a join condition that limits the time of both streams.

+

For example, use two range predicates (<, <=, >=, or >), a BETWEEN predicate, or an equal predicate that compares the same type of time attributes (such as processing time and event time) in two input tables.

+

For example, the following predicate is a valid window join condition:

+
  • ltime = rtime
  • ltime >= rtime AND ltime < rtime + INTERVAL '10' MINUTE
  • ltime BETWEEN rtime - INTERVAL '10' SECOND AND rtime + INTERVAL '5' SECOND
+
+

Example

+

Join all orders shipped within 4 hours with their associated shipments.

+
SELECT *
+FROM Orders o, Shipments s
+WHERE o.id = s.orderId AND
+      o.ordertime BETWEEN s.shiptime - INTERVAL '4' HOUR AND s.shiptime;
+

Expanding arrays into a relation

Precautions

+

This clause is used to return a new row for each element in the given array. Unnesting WITH ORDINALITY is not yet supported.

+

Example

+
SELECT users, tag
+FROM Orders CROSS JOIN UNNEST(tags) AS t (tag);
+
+

User-Defined Table Functions

Function

+

This clause is used to join a table with the results of a table function. ach row of the left (outer) table is joined with all rows produced by the corresponding call of the table function.

+

Precautions

+

A left outer join against a lateral table requires a TRUE literal in the ON clause.

+

Example

+

The row of the left (outer) table is dropped, if its table function call returns an empty result.

+
SELECT users, tag
+FROM Orders, LATERAL TABLE(unnest_udtf(tags)) t AS tag;
+

If a table function call returns an empty result, the corresponding outer row is preserved, and the result padded with null values.

+
SELECT users, tag
+FROM Orders LEFT JOIN LATERAL TABLE(unnest_udtf(tags)) t AS tag ON TRUE;
+
+

Join Temporal Table Function

Function

+

Precautions

+

Currently only inner join and left outer join with temporal tables are supported.

+

Example

+

Assuming Rates is a temporal table function, the join can be expressed in SQL as follows:

+
SELECT
+  o_amount, r_rate
+FROM
+  Orders,
+  LATERAL TABLE (Rates(o_proctime))
+WHERE
+  r_currency = o_currency;
+
+

Join Temporal Tables

Function

+

This clause is used to join the Temporal table.

+

Syntax

+
SELECT column-names
+FROM table1  [AS <alias1>]
+[LEFT] JOIN table2 FOR SYSTEM_TIME AS OF table1.proctime [AS <alias2>]
+ON table1.column-name1 = table2.key-name1
+

Description

+
  • table1.proctime indicates the processing time attribute (computed column) of table1.
  • FOR SYSTEM_TIME AS OF table1.proctime indicates that when the records in the left table are joined with the dimension table on the right, only the snapshot data is used for matching the current processing time dimension table.
+

Precautions

+

Only inner and left joins are supported for temporal tables with processing time attributes.

+

Example

+

LatestRates is a dimension table (such as HBase table) that is materialized with the latest rate.

+
SELECT
+  o.amout, o.currency, r.rate, o.amount * r.rate
+FROM
+  Orders AS o
+  JOIN LatestRates FOR SYSTEM_TIME AS OF o.proctime AS r
+  ON r.currency = o.currency;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0421.html b/docs/dli/sqlreference/dli_08_0421.html new file mode 100644 index 00000000..929411c1 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0421.html @@ -0,0 +1,30 @@ + + +

OrderBy & Limit

+

OrderBy

Function

+

This clause is used to sort data in ascending order on a time attribute.

+

Precautions

+

Currently, only sorting by time attribute is supported.

+

Example

+

Sort data in ascending order on the time attribute.

+
SELECT *
+FROM Orders
+ORDER BY orderTime;
+
+

Limit

Function

+

This clause is used to constrain the number of rows returned.

+

Precautions

+

This clause is used in conjunction with ORDER BY to ensure that the results are deterministic.

+

Example

+
SELECT *
+FROM Orders
+ORDER BY orderTime
+LIMIT 3;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0422.html b/docs/dli/sqlreference/dli_08_0422.html new file mode 100644 index 00000000..425bf10d --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0422.html @@ -0,0 +1,32 @@ + + +

Top-N

+

Function

Top-N queries ask for the N smallest or largest values ordered by columns. Both smallest and largest values sets are considered Top-N queries. Top-N queries are useful in cases where the need is to display only the N bottom-most or the N top- most records from batch/streaming table on a condition.

+
+

Syntax

SELECT [column_list]
+FROM (
+   SELECT [column_list],
+     ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]]
+       ORDER BY col1 [asc|desc][, col2 [asc|desc]...]) AS rownum
+   FROM table_name)
+WHERE rownum <= N [AND conditions]
+
+

Description

  • ROW_NUMBER(): Allocate a unique and consecutive number to each line starting from the first line in the current partition. Currently, we only support ROW_NUMBER as the over window function. In the future, we will support RANK() and DENSE_RANK().
  • PARTITION BY col1[, col2...]: Specifies the partition columns. Each partition will have a Top-N result.
  • ORDER BY col1 [asc|desc][, col2 [asc|desc]...]: Specifies the ordering columns. The ordering directions can be different on different columns.
  • WHERE rownum <= N: The rownum <= N is required for Flink to recognize this query is a Top-N query. The N represents the N smallest or largest records will be retained.
  • [AND conditions]: It is free to add other conditions in the where clause, but the other conditions can only be combined with rownum <= N using AND conjunction.
+
+

Precautions

  • The TopN query is Result Updating.
  • Flink SQL will sort the input data stream according to the order key,
  • so if the top N records have been changed, the changed ones will be sent as retraction/update records to downstream.
  • If the top N records need to be stored in external storage, the result table should have the same unique key with the Top-N query.
+
+

Example

This is an example to get the top five products per category that have the maximum sales in realtime.

+
SELECT * 
+  FROM ( 
+     SELECT *,
+         ROW_NUMBER() OVER (PARTITION BY category ORDER BY sales DESC) as row_num
+     FROM ShopSales)
+  WHERE row_num <= 5;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0423.html b/docs/dli/sqlreference/dli_08_0423.html new file mode 100644 index 00000000..5ae5f1cc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0423.html @@ -0,0 +1,32 @@ + + +

Deduplication

+

Function

Deduplication removes rows that duplicate over a set of columns, keeping only the first one or the last one.

+
+

Syntax

SELECT [column_list]
+FROM (
+   SELECT [column_list],
+     ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]]
+       ORDER BY time_attr [asc|desc]) AS rownum
+   FROM table_name)
+WHERE rownum = 1
+
+

Description

  • ROW_NUMBER(): Assigns a unique, sequential number to each row, starting with one.
  • PARTITION BY col1[, col2...]: Specifies the partition columns, i.e. the deduplicate key.
  • ORDER BY time_attr [asc|desc]: Specifies the ordering column, it must be a time attribute. Currently Flink supports proctime only. Ordering by ASC means keeping the first row, ordering by DESC means keeping the last row.
  • WHERE rownum = 1: The rownum = 1 is required for Flink to recognize this query is deduplication.
+
+

Precautions

None

+
+

Example

The following examples show how to remove duplicate rows on order_id. The proctime is an event time attribute.

+
SELECT order_id, user, product, number
+  FROM (
+     SELECT *,
+         ROW_NUMBER() OVER (PARTITION BY order_id ORDER BY proctime ASC) as row_num
+     FROM Orders)
+  WHERE row_num = 1;
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0424.html b/docs/dli/sqlreference/dli_08_0424.html new file mode 100644 index 00000000..1aed98e9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0424.html @@ -0,0 +1,17 @@ + + +

Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0425.html b/docs/dli/sqlreference/dli_08_0425.html new file mode 100644 index 00000000..d1ab4d7e --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0425.html @@ -0,0 +1,181 @@ + + +

User-Defined Functions (UDFs)

+

Overview

DLI supports the following three types of user-defined functions (UDFs):

+
+
  • Regular UDF: takes in one or more input parameters and returns a single result.
  • User-defined table-generating function (UDTF): takes in one or more input parameters and returns multiple rows or columns.
  • User-defined aggregate function (UDAF): aggregates multiple records into one value.
+
  • UDFs can only be used in dedicated queues.
  • Currently, Python is not supported for programming UDFs, UDTFs, and UDAFs.
+
+

POM Dependency

<dependency>
+        <groupId>org.apache.flink</groupId>
+        <artifactId>flink-table-common</artifactId>
+        <version>1.10.0</version>
+        <scope>provided</scope>
+</dependency>
+
+

Using UDFs

  1. Encapsulate the implemented UDFs into a JAR package and upload the package to OBS.
  2. In the navigation pane of the DLI management console, choose Data Management > Package Management. On the displayed page, click Create and use the JAR package uploaded to OBS to create a package.
  3. In the left navigation, choose Job Management and click Flink Jobs. Locate the row where the target resides and click Edit in the Operation column to switch to the page where you can edit the job.
  4. Click the Running Parameters tab of your job, select the UDF JAR and click Save.
  5. Add the following statement to the SQL statements to use the functions:
+
+

UDF

The regular UDF must inherit the ScalarFunction function and implement the eval method. The open and close functions are optional.

+
+

Example code

+
import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.ScalarFunction;
+public class UdfScalarFunction extends ScalarFunction {
+  private int factor = 12;
+  public UdfScalarFunction() {
+    this.factor = 12;
+  }
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  /**
+   * Custom logic
+   * @param s
+   * @return
+   */
+   public int eval(String s) {
+     return s.hashCode() * factor;
+   }
+   /**
+    * Optional
+    */
+   @Override
+   public void close() {}
+}
+

Example

+
1
+2
CREATE FUNCTION udf_test AS 'com.company.udf.UdfScalarFunction';
+INSERT INTO sink_stream select udf_test(attr) FROM source_stream;
+
+ +
+

UDTF

The UDTF must inherit the TableFunction function and implement the eval method. The open and close functions are optional. If the UDTF needs to return multiple columns, you only need to declare the returned value as Tuple or Row. If Row is used, you need to overload the getResultType method to declare the returned field type.

+
+

Example code

+
import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.table.functions.FunctionContext;
+import org.apache.flink.table.functions.TableFunction;
+import org.apache.flink.types.Row;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+public class UdfTableFunction extends TableFunction<Row> {
+  private Logger log = LoggerFactory.getLogger(TableFunction.class);
+  /**
+   * (optional) Initialization
+   * @param context
+   */
+  @Override
+  public void open(FunctionContext context) {}
+  public void eval(String str, String split) {
+    for (String s : str.split(split)) {
+      Row row = new Row(2);
+      row.setField(0, s);
+      row.setField(1, s.length());
+      collect(row);
+    }
+  }
+  /**
+   * Declare the type returned by the function
+   * @return
+   */
+  @Override
+  public TypeInformation<Row> getResultType() {
+  return Types.ROW(Types.STRING, Types.INT);
+  }
+  /**
+    * Optional
+   */
+  @Override
+  public void close() {}
+ }
+

Example

+

The UDTF supports CROSS JOIN and LEFT JOIN. When the UDTF is used, the LATERAL and TABLE keywords must be included.

+
  • CROSS JOIN: does not output the data of a row in the left table if the UDTF does not output the result for the data of the row.
  • LEFT JOIN: outputs the data of a row in the left table even if the UDTF does not output the result for the data of the row, but pads null with UDTF-related fields.
+
1
+2
+3
+4
+5
+6
+7
CREATE FUNCTION udtf_test AS 'com.company.udf.TableFunction';
+// CROSS JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream, LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length);
+// LEFT JOIN
+INSERT INTO sink_stream select subValue, length FROM source_stream LEFT JOIN LATERAL
+TABLE(udtf_test(attr, ',')) as T(subValue, length) ON TRUE;
+
+ +
+

UDAF

The UDAF must inherit the AggregateFunction function. You need to create an accumulator for storing the computing result, for example, WeightedAvgAccum in the following example code.

+
+

Example code

+
public class WeightedAvgAccum {
+public long sum = 0;
+public int count = 0;
+}
+

+
import org.apache.flink.table.functions.AggregateFunction;
+import java.util.Iterator;
+/**
+* The first type variable is the type returned by the aggregation function, and the second type variable is of the Accumulator type.
+* Weighted Average user-defined aggregate function.
+*/
+public class UdfAggFunction extends AggregateFunction<Long, WeightedAvgAccum> {
+// Initialize the accumulator.
+  @Override
+  public WeightedAvgAccum createAccumulator() {
+    return new WeightedAvgAccum();
+  }
+// Return the intermediate computing value stored in the accumulator.
+  @Override
+  public Long getValue(WeightedAvgAccum acc) {
+    if (acc.count == 0) {
+       return null;
+    } else {
+      return acc.sum / acc.count;
+ }
+}
+// Update the intermediate computing value according to the input.
+public void accumulate(WeightedAvgAccum acc, long iValue) {
+acc.sum += iValue;
+acc.count += 1;
+}
+// Perform the retraction operation, which is opposite to the accumulate operation.
+public void retract(WeightedAvgAccum acc, long iValue) {
+acc.sum -= iValue;
+acc.count -= 1;
+}
+// Combine multiple accumulator values.
+public void merge(WeightedAvgAccum acc, Iterable<WeightedAvgAccum> it) {
+Iterator<WeightedAvgAccum> iter = it.iterator();
+while (iter.hasNext()) {
+WeightedAvgAccum a = iter.next();
+acc.count += a.count;
+acc.sum += a.sum;
+}
+}
+// Reset the intermediate computing value.
+public void resetAccumulator(WeightedAvgAccum acc) {
+acc.count = 0;
+acc.sum = 0L;
+}
+}
+

Example

+
1
+2
CREATE FUNCTION udaf_test AS 'com.company.udf.UdfAggFunction';
+INSERT INTO sink_stream SELECT udaf_test(attr2) FROM source_stream GROUP BY attr1;
+
+ +
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0426.html b/docs/dli/sqlreference/dli_08_0426.html new file mode 100644 index 00000000..3d792d45 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0426.html @@ -0,0 +1,35 @@ + + +

Built-In Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0427.html b/docs/dli/sqlreference/dli_08_0427.html new file mode 100644 index 00000000..817e2506 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0427.html @@ -0,0 +1,573 @@ + + +

Mathematical Operation Functions

+

Relational Operators

All data types can be compared by using relational operators and the result is returned as a BOOLEAN value.

+

Relationship operators are binary operators. Two compared data types must be of the same type or they must support implicit conversion.

+

Table 1 lists all relational operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Relational Operators

Operator

+

Returned Data Type

+

Description

+

A = B

+

BOOLEAN

+

If A is equal to B, then TRUE is returned. Otherwise, FALSE is returned. This operator is used for value assignment.

+

A <> B

+

BOOLEAN

+

If A is not equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned. This operator follows the standard SQL syntax.

+

A < B

+

BOOLEAN

+

If A is less than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A <= B

+

BOOLEAN

+

If A is less than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A > B

+

BOOLEAN

+

If A is greater than B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A >= B

+

BOOLEAN

+

If A is greater than or equal to B, then TRUE is returned. Otherwise, FALSE is returned. If A or B is NULL, then NULL is returned.

+

A IS NULL

+

BOOLEAN

+

If A is NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS NOT NULL

+

BOOLEAN

+

If A is not NULL, then TRUE is returned. Otherwise, FALSE is returned.

+

A IS DISTINCT FROM B

+

BOOLEAN

+

If A is not equal to B, TRUE is returned. NULL indicates A equals B.

+

A IS NOT DISTINCT FROM B

+

BOOLEAN

+

If A is equal to B, TRUE is returned. NULL indicates A equals B.

+

A BETWEEN [ASYMMETRIC | SYMMETRIC] B AND C

+

BOOLEAN

+

If A is greater than or equal to B but less than or equal to C, TRUE is returned.

+
  • ASYMMETRIC: indicates that B and C are location-related.

    For example, "A BETWEEN ASYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C".

    +
  • SYMMETRIC: indicates that B and C are not location-related.

    For example, "A BETWEEN SYMMETRIC B AND C" is equivalent to "A BETWEEN B AND C) OR (A BETWEEN C AND B".

    +
+

A NOT BETWEEN B [ASYMMETRIC | SYMMETRIC]AND C

+

BOOLEAN

+

If A is less than B or greater than C, TRUE is returned.

+
  • ASYMMETRIC: indicates that B and C are location-related.

    For example, "A NOT BETWEEN ASYMMETRIC B AND C" is equivalent to "A NOT BETWEEN B AND C".

    +
  • SYMMETRIC: indicates that B and C are not location-related.

    For example, "A NOT BETWEEN SYMMETRIC B AND C" is equivalent to "(A NOT BETWEEN B AND C) OR (A NOT BETWEEN C AND B)".

    +
+

A LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A matches pattern B, TRUE is returned. The escape character C can be defined as required.

+

A NOT LIKE B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match pattern B, TRUE is returned. The escape character C can be defined as required.

+

A SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A matches regular expression B, TRUE is returned. The escape character C can be defined as required.

+

A NOT SIMILAR TO B [ ESCAPE C ]

+

BOOLEAN

+

If A does not match regular expression B, TRUE is returned. The escape character C can be defined as required.

+

value IN (value [, value]* )

+

BOOLEAN

+

If the value is equal to any value in the list, TRUE is returned.

+

value NOT IN (value [, value]* )

+

BOOLEAN

+

If the value is not equal to any value in the list, TRUE is returned.

+

EXISTS (sub-query)

+

BOOLEAN

+

If sub-query returns at least one row, TRUE is returned.

+

value IN (sub-query)

+

BOOLEAN

+

If value is equal to a row returned by subquery, TRUE is returned.

+

value NOT IN (sub-query)

+

BOOLEAN

+

If value is not equal to a row returned by subquery, TRUE is returned.

+
+
+

Precautions

+
  • Values of the double, real, and float types may be different in precision. The equal sign (=) is not recommended for comparing two values of the double type. You are advised to obtain the absolute value by subtracting these two values of the double type and determine whether they are the same based on the absolute value. If the absolute value is small enough, the two values of the double data type are regarded equal. For example:
    abs(0.9999999999 - 1.0000000000) < 0.000000001 //The precision decimal places of 0.9999999999 and 1.0000000000 are 10, while the precision decimal place of 0.000000001 is 9. Therefore, 0.9999999999 can be regarded equal to 1.0000000000.
    +
+
  • Comparison between data of the numeric type and character strings is allowed. During comparison using relational operators, including >, <, ≤, and ≥, data of the string type is converted to numeric type by default. No characters other than numeric characters are allowed.
  • Character strings can be compared using relational operators.
+
+

Logical Operators

Common logical operators are AND, OR, and NOT. Their priority order is NOT > AND > OR.

+

Table 2 lists the calculation rules. A and B indicate logical expressions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Logical Operators

Operator

+

Returned Data Type

+

Description

+

A OR B

+

BOOLEAN

+

If A or B is TRUE, TRUE is returned. Three-valued logic is supported.

+

A AND B

+

BOOLEAN

+

If both A and B are TRUE, TRUE is returned. Three-valued logic is supported.

+

NOT A

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, UNKNOWN is returned.

+

A IS FALSE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT FALSE

+

BOOLEAN

+

If A is not FALSE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS TRUE

+

BOOLEAN

+

If A is TRUE, TRUE is returned. If A is UNKNOWN, FALSE is returned.

+

A IS NOT TRUE

+

BOOLEAN

+

If A is not TRUE, TRUE is returned. If A is UNKNOWN, TRUE is returned.

+

A IS UNKNOWN

+

BOOLEAN

+

If A is UNKNOWN, TRUE is returned.

+

A IS NOT UNKNOWN

+

BOOLEAN

+

If A is not UNKNOWN, TRUE is returned.

+
+
+

Precautions

+

Only data of the Boolean type can be used for calculation using logical operators. Implicit type conversion is not supported.

+
+

Arithmetic Operators

Arithmetic operators include binary operators and unary operators, for all of which, the returned results are of the numeric type. Table 3 lists arithmetic operators supported by Flink SQL.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Arithmetic Operators

Operator

+

Returned Data Type

+

Description

+

+ numeric

+

All numeric types

+

Returns numbers.

+

- numeric

+

All numeric types

+

Returns negative numbers.

+

A + B

+

All numeric types

+

A plus B. The result type is associated with the operation data type. For example, if floating-point number is added to an integer, the result will be a floating-point number.

+

A - B

+

All numeric types

+

A minus B. The result type is associated with the operation data type.

+

A * B

+

All numeric types

+

Multiply A and B. The result type is associated with the operation data type.

+

A / B

+

All numeric types

+

Divide A by B. The result is a number of the double type (double-precision number).

+

POWER(A, B)

+

All numeric types

+

Returns the value of A raised to the power B.

+

ABS(numeric)

+

All numeric types

+

Returns the absolute value of a specified value.

+

MOD(A, B)

+

All numeric types

+

Returns the remainder (modulus) of A divided by B. A negative value is returned only when A is a negative value.

+

SQRT(A)

+

All numeric types

+

Returns the square root of A.

+

LN(A)

+

All numeric types

+

Returns the nature logarithm of A (base e).

+

LOG10(A)

+

All numeric types

+

Returns the base 10 logarithms of A.

+

LOG2(A)

+

All numeric types

+

Returns the base 2 logarithm of A.

+

LOG(B)

+

LOG(A, B)

+

All numeric types

+

When called with one argument, returns the natural logarithm of B.

+

When called with two arguments, this function returns the logarithm of B to the base A.

+

B must be greater than 0 and A must be greater than 1.

+

EXP(A)

+

All numeric types

+

Return the value of e raised to the power of a.

+

CEIL(A)

+

CEILING(A)

+

All numeric types

+

Return the smallest integer that is greater than or equal to a. For example: ceil(21.2) = 22.

+

FLOOR(A)

+

All numeric types

+

Return the largest integer that is less than or equal to a. For example: floor(21.2) = 21.

+

SIN(A)

+

All numeric types

+

Returns the sine value of A.

+

COS(A)

+

All numeric types

+

Returns the cosine value of A.

+

TAN(A)

+

All numeric types

+

Returns the tangent value of A.

+

COT(A)

+

All numeric types

+

Returns the cotangent value of A.

+

ASIN(A)

+

All numeric types

+

Returns the arc sine value of A.

+

ACOS(A)

+

All numeric types

+

Returns the arc cosine value of A.

+

ATAN(A)

+

All numeric types

+

Returns the arc tangent value of A.

+

ATAN2(A, B)

+

All numeric types

+

Returns the arc tangent of a coordinate (A, B).

+

COSH(A)

+

All numeric types

+

Returns the hyperbolic cosine of A. Return value type is DOUBLE.

+

DEGREES(A)

+

All numeric types

+

Convert the value of a from radians to degrees.

+

RADIANS(A)

+

All numeric types

+

Convert the value of a from degrees to radians.

+

SIGN(A)

+

All numeric types

+

Returns the sign of A. 1 is returned if A is positive. –1 is returned if A is negative. Otherwise, 0 is returned.

+

ROUND(A, d)

+

All numeric types

+

Returns a number rounded to d decimal places for A. For example: round(21.263,2) = 21.26.

+

PI

+

All numeric types

+

Returns the value of pi.

+

E()

+

All numeric types

+

Returns the value of e.

+

RAND()

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, 1.0)

+

RAND(A)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, 1.0) with an initial seed A. Two RAND functions will return identical sequences of numbers if they have the same initial seed.

+

RAND_INTEGER(A)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, A)

+

RAND_INTEGER(A, B)

+

All numeric types

+

Returns a pseudorandom double value in the range [0.0, B) with an initial seed A.

+

UUID()

+

All numeric types

+

Returns a UUID string.

+

BIN(A)

+

All numeric types

+

Returns a string representation of integer A in binary format. Returns NULL if A is NULL.

+

HEX(A)

+

HEX(B)

+

All numeric types

+

Returns a string representation of an integer A value or a string B in hex format. Returns NULL if the argument is NULL.

+

TRUNCATE(A, d)

+

All numeric types

+

Returns a number of truncated to d decimal places. Returns NULL if A or d is NULL.

+

Example: truncate (42.345, 2) = 42.340

+

truncate(42.345) = 42.000

+

PI()

+

All numeric types

+

Returns the value of pi.

+
+
+

Precautions

+

Data of the string type is not allowed in arithmetic operations.

+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0428.html b/docs/dli/sqlreference/dli_08_0428.html new file mode 100644 index 00000000..91c5d67f --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0428.html @@ -0,0 +1,437 @@ + + +

String Functions

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 String Functions

Function

+

Return Type

+

Description

+

string1 || string2

+

STRING

+

Returns the concatenation of string1 and string2.

+

CHAR_LENGTH(string)

+

CHARACTER_LENGTH(string)

+

INT

+

Returns the number of characters in the string.

+

UPPER(string)

+

STRING

+

Returns the string in uppercase.

+

LOWER(string)

+

STRING

+

Returns the string in lowercase.

+

POSITION(string1 IN string2)

+

INT

+

Returns the position (start from 1) of the first occurrence of string1 in string2; returns 0 if string1 cannot be found in string2.

+

TRIM([ BOTH | LEADING | TRAILING ] string1 FROM string2)

+

STRING

+

Returns a string that removes leading and/or trailing characters string2 from string1.

+

LTRIM(string)

+

STRING

+

Returns a string that removes the left whitespaces from the specified string.

+

For example, LTRIM(' This is a test String.') returns "This is a test String.".

+

RTRIM(string)

+

STRING

+

Returns a string that removes the right whitespaces from the specified string.

+

For example, RTRIM('This is a test String. ') returns "This is a test String.".

+

REPEAT(string, integer)

+

STRING

+

Returns a string that repeats the base string integer times.

+

For example, REPEAT('This is a test String.', 2) returns "This is a test String.This is a test String.".

+

REGEXP_REPLACE(string1, string2, string3)

+

STRING

+

Returns a string from string1 with all the substrings that match a regular expression string2 consecutively being replaced with string3.

+

For example, REGEXP_REPLACE('foobar', 'oo|ar', '') returns "fb".

+

REGEXP_REPLACE('ab\ab', '\\', 'e') returns "abeab".

+

OVERLAY(string1 PLACING string2 FROM integer1 [ FOR integer2 ])

+

STRING

+

Returns a string that replaces integer2 characters of STRING1 with STRING2 from position integer1.

+

The default value of integer2 is the length of string2.

+

For example, OVERLAY('This is an old string' PLACING ' new' FROM 10 FOR 5) returns "This is a new string".

+

SUBSTRING(string FROM integer1 [ FOR integer2 ])

+

STRING

+

Returns a substring of the specified string starting from position integer1 with length integer2 (to the end by default). If integer2 is not configured, the substring from integer1 to the end is returned by default.

+

REPLACE(string1, string2, string3)

+

STRING

+

Returns a new string which replaces all the occurrences of string2 with string3 (non-overlapping) from string1.

+

For example, REPLACE('hello world', 'world', 'flink') returns "hello flink"; REPLACE('ababab', 'abab', 'z') returns "zab".

+

REPLACE('ab\\ab', '\\', 'e') returns "abeab".

+

REGEXP_EXTRACT(string1, string2[, integer])

+

STRING

+

Returns a string from string1 which extracted with a specified regular expression string2 and a regex match group index integer.

+

Returns NULL, if the parameter is NULL or the regular expression is invalid.

+

For example, REGEXP_EXTRACT('foothebar', 'foo(.*?)(bar)', 2)" returns "bar".

+

INITCAP(string)

+

STRING

+

Returns a new form of STRING with the first character of each word converted to uppercase and the rest characters to lowercase.

+

CONCAT(string1, string2,...)

+

STRING

+

Returns a string that concatenates string1, string2, ….

+

For example, CONCAT('AA', 'BB', 'CC') returns "AABBCC".

+

CONCAT_WS(string1, string2, string3,...)

+

STRING

+

Returns a string that concatenates string2, string3, … with a separator string1. The separator is added between the strings to be concatenated. Returns NULL if string1 is NULL. If other arguments are NULL, this function automatically skips NULL arguments.

+

For example, CONCAT_WS('~', 'AA', NULL, 'BB', '', 'CC') returns "AA~BB~~CC".

+

LPAD(string1, integer, string2)

+

STRING

+

Returns a new string from string1 left-padded with string2 to a length of integer characters.

+

If any argument is NULL, NULL is returned.

+

If integer is negative, NULL is returned.

+

If the length of string1 is shorter than integer, returns string1 shortened to integer characters.

+

For example, LPAD(Symbol,4,Symbol) returns "Symbol hi".

+

LPAD('hi',1,'??') returns "h".

+

RPAD(string1, integer, string2)

+

STRING

+

Returns a new string from string1 right-padded with string2 to a length of integer characters.

+

If any argument is NULL, NULL is returned.

+

If integer is negative, NULL is returned.

+

If the length of string1 is shorter than integer, returns string1 shortened to integer characters.

+

For example, RPAD('hi',4,'??') returns "hi??".

+

RPAD('hi',1,'??') returns "h".

+

FROM_BASE64(string)

+

STRING

+

Returns the base64-decoded result from string.

+

Returns NULL if string is NULL.

+

For example, FROM_BASE64('aGVsbG8gd29ybGQ=') returns "hello world".

+

TO_BASE64(string)

+

STRING

+

Returns the base64-encoded result from string; f string is NULL.

+

Returns NULL if string is NULL.

+

For example, TO_BASE64(hello world) returns "aGVsbG8gd29ybGQ=".

+

ASCII(string)

+

INT

+

Returns the numeric value of the first character of string.

+

Returns NULL if string is NULL.

+

For example, ascii('abc') returns 97.

+

ascii(CAST(NULL AS VARCHAR)) returns NULL.

+

CHR(integer)

+

STRING

+

Returns the ASCII character having the binary equivalent to integer.

+

If integer is larger than 255, we will get the modulus of integer divided by 255 first, and returns CHR of the modulus.

+

Returns NULL if integer is NULL.

+

chr(97) returns a.

+

chr(353) Return a.

+

DECODE(binary, string)

+

STRING

+

Decodes the first argument into a String using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').

+

If either argument is NULL, the result will also be NULL.

+

ENCODE(strinh1, string2)

+

STRING

+

Encodes the string1 into a BINARY using the provided string2 character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').

+

If either argument is NULL, the result will also be NULL.

+

INSTR(string1, string2)

+

INT

+

Returns the position of the first occurrence of string2 in string1.

+

Returns NULL if any argument is NULL.

+

LEFT(string, integer)

+

STRING

+

Returns the leftmost integer characters from the string.

+

Returns EMPTY String if integer is negative.

+

Returns NULL if any argument is NULL.

+

RIGHT(string, integer)

+

STRING

+

Returns the rightmost integer characters from the string.

+

Returns EMPTY String if integer is negative.

+

Returns NULL if any argument is NULL.

+

LOCATE(string1, string2[, integer])

+

INT

+

Returns the position of the first occurrence of string1 in string2 after position integer.

+

Returns 0 if not found.

+

The value of integer defaults to 0.

+

Returns NULL if any argument is NULL.

+

PARSE_URL(string1, string2[, string3])

+

STRING

+

Returns the specified part from the URL.

+

Valid values for string2 include 'HOST', 'PATH', 'QUERY', 'REF', 'PROTOCOL', 'AUTHORITY', 'FILE', and 'USERINFO'.

+

Returns NULL if any argument is NULL.

+

If string2 is QUERY, the key in QUERY can be specified as string3.

+

Example:

+

The parse_url('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1', 'HOST') returns 'facebook.com'.

+

parse_url('http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1', 'QUERY', 'k1') returns 'v1'.

+

REGEXP(string1, string2)

+

BOOLEAN

+

Performs a regular expression search on the specified string and returns a BOOLEAN value indicating whether the specified match pattern is found. If it is found, TRUE is returned. string1 indicates the specified string, and string2 indicates the regular expression.

+

Returns NULL if any argument is NULL.

+

REVERSE(string)

+

STRING

+

Returns the reversed string.

+

Returns NULL if any argument is NULL.

+
NOTE:

Note that backquotes must be added to this function, for example, `REVERSE`.

+
+

SPLIT_INDEX(string1, string2, integer1)

+

STRING

+

Splits string1 by the delimiter string2, returns the integerth (zero-based) string of the split strings. Returns NULL if integer is negative.

+

Returns NULL if integer is negative.

+

Returns NULL if any argument is NULL.

+

STR_TO_MAP(string1[, string2, string3]])

+

MAP

+

Returns a map after splitting the string1 into key/value pairs using delimiters.

+

The default value of string2 is ','.

+

The default value of string3 is '='.

+

SUBSTR(string[, integer1[, integer2]])

+

STRING

+

Returns a substring of string starting from position integer1 with length integer2.

+

If integer2 is not specified, the string is truncated to the end.

+

JSON_VAL(STRING json_string, STRING json_path)

+

STRING

+

Returns the value of the specified json_path from the json_string. For details about how to use the functions, see JSON_VAL Function.

+
NOTE:

The following rules are listed in descending order of priority.

+
  1. The two arguments json_string and json_path cannot be NULL.
  2. The value of json_string must be a valid JSON string. Otherwise, the function returns NULL.
  3. If json_string is an empty string, the function returns an empty string.
  4. If json_path is an empty string or the path does not exist, the function returns NULL.
+
+
+
+

JSON_VAL Function

  • Syntax
+
STRING JSON_VAL(STRING json_string, STRING json_path)
+ +
+ + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Data Types

+

Description

+

json_string

+

STRING

+

JSON object to be parsed

+

json_path

+

STRING

+

Path expression for parsing the JSON string For the supported expressions, see Table 3.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 Expressions supported

Expression

+

Description

+

$

+

Root node in the path

+

[]

+

Access array elements

+

*

+

Array wildcard

+

.

+

Access child elements

+
+
+
  • Example
    1. Test input data.
      Test the data source kafka. The message content is as follows:
      {name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}}
      +{name:James,age:24,sex:male,grade:{math:95,science:[80,85],english:100}]
      +
      +
    2. Use JSON_VAL in SQL statements.
      CREATE TABLE kafkaSource (
      +  `message` string
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = '<yourSourceTopic>',
      +  'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>',
      +  'properties.group.id' = '<yourGroupId>',
      +  'scan.startup.mode' = 'latest-offset',
      +  "format" = "csv",
      +  "csv.field-delimiter" = "\u0001",
      +  "csv.quote-character" = "''"
      +);
      +
      +CREATE TABLE kafkaSink(
      +  message1 STRING,
      +  message2 STRING,
      +  message3 STRING,
      +  message4 STRING,
      +  message5 STRING,  
      +  message6 STRING
      +) WITH (
      +  'connector' = 'kafka',
      +  'topic' = '<yourSinkTopic>',
      +  'properties.bootstrap.servers' = '<yourKafkaAddress1>:<yourKafkaPort>,<yourKafkaAddress2>:<yourKafkaPort>',
      +  "format" = "json"
      +);
      +
      +insert into kafkaSink select 
      +JSON_VAL(message,""),
      +JSON_VAL(message,"$.name"),
      +JSON_VAL(message,"$.grade.science"),
      +JSON_VAL(message,"$.grade.science[*]"),
      +JSON_VAL(message,"$.grade.science[1]"),JSON_VAL(message,"$.grade.dddd")
      +from kafkaSource;
      +
    3. Check the output result of the Kafka topic in the sink.
      {"message1":null,"message2":"swq","message3":"[80,85]","message4":"[80,85]","message5":"85","message6":null}
      +{"message1":null,"message2":null,"message3":null,"message4":null,"message5":null,"message6":null}
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0429.html b/docs/dli/sqlreference/dli_08_0429.html new file mode 100644 index 00000000..ec843133 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0429.html @@ -0,0 +1,1680 @@ + + +

Temporal Functions

+

Table 1 lists the time functions supported by Flink OpenSource SQL.

+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Temporal Functions

Function

+

Return Type

+

Description

+

DATE string

+

DATE

+

Parse the date string (yyyy-MM-dd) to a SQL date.

+

TIME string

+

TIME

+

Parse the time string (HH:mm:ss[.fff]) to a SQL time.

+

TIMESTAMP string

+

TIMESTAMP

+

Convert the time string into a timestamp. The time string format is yyyy-MM-dd HH:mm:ss[.fff].

+

INTERVAL string range

+

INTERVAL

+

interval indicates the interval. There are two forms:

+
  • yyyy-MM for SQL intervals of months. An interval range might be YEAR or YEAR TO MONTH for intervals of months.
  • dd hh:mm:ss.fff for SQL intervals of milliseconds. An interval range might be DAY, MINUTE, DAY TO HOUR, or DAY TO SECOND.
+

Example:

+

INTERVAL '10 00:00:00.004' DAY TO second indicates that the interval is 10 days and 4 milliseconds.

+

INTERVAL '10' DAY: indicates that the interval is 10 days.

+

INTERVAL '2-10' YEAR TO MONTH indicates that the interval is two years and ten months.

+

CURRENT_DATE

+

DATE

+

Return the SQL date of UTC time zone.

+

CURRENT_TIME

+

TIME

+

Return the SQL time of UTC time zone.

+

CURRENT_TIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of UTC time zone.

+

LOCALTIME

+

TIME

+

Return the SQL time of the current time zone.

+

LOCALTIMESTAMP

+

TIMESTAMP

+

Return the SQL timestamp of the current time zone.

+

EXTRACT(timeintervalunit FROM temporal)

+

BIGINT

+

Extract part of the time point or interval. Return the part in the int type.

+

For example, extract the date 2006-06-05 and return 5.

+

EXTRACT(DAY FROM DATE '2006-06-05') returns 5.

+

YEAR(date)

+

BIGINT

+

Return the year from SQL date.

+

For example, YEAR(DATE'1994-09-27') returns 1994.

+

QUARTER(date)

+

BIGINT

+

Return the quarter of a year (an integer between 1 and 4) from SQL date.

+

MONTH(date)

+

BIGINT

+

+

Return the month of a year (an integer between 1 and 12) from SQL date.

+

For example, MONTH(DATE '1994-09-27') returns 9.

+

WEEK(date)

+

BIGINT

+

Return the week of a year (an integer between 1 and 53) from SQL date.

+

For example, WEEK(DATE'1994-09-27') returns 39.

+

DAYOFYEAR(date)

+

BIGINT

+

Returns the day of a year (an integer between 1 and 366) from SQL date.

+

For example, DAYOFYEAR(DATE '1994-09-27') is 270.

+

DAYOFMONTH(date)

+

BIGINT

+

Return the day of a month (an integer between 1 and 31) from SQL date.

+

For example, DAYOFMONTH(DATE'1994-09-27') returns 27.

+

DAYOFWEEK(date)

+

BIGINT

+

Return the day of a week (an integer between 1 and 7) from SQL date.

+

Sunday is set to 1.

+

For example, DAYOFWEEK(DATE'1994-09-27') returns 3.

+

HOUR(timestamp)

+

BIGINT

+

Returns the hour of a day (an integer between 0 and 23) from SQL timestamp.

+

For example, HOUR(TIMESTAMP '1994-09-27 13:14:15') returns 13.

+

MINUTE(timestamp)

+

BIGINT

+

Returns the minute of an hour (an integer between 0 and 59) from SQL timestamp.

+

For example, MINUTE(TIMESTAMP '1994-09-27 13:14:15') returns 14.

+

SECOND(timestamp)

+

BIGINT

+

Returns the second of a minute (an integer between 0 and 59) from SQL timestamp.

+

For example, SECOND(TIMESTAMP '1994-09-27 13:14:15') returns 15.

+

FLOOR(timepoint TO timeintervalunit)

+

TIME

+

Round a time point down to the given unit.

+

For example, 12:44:00 is returned from FLOOR(TIME '12:44:31' TO MINUTE).

+

CEIL(timepoint TO timeintervalunit)

+

TIME

+

Round a time point up to the given unit.

+

For example, CEIL(TIME '12:44:31' TO MINUTE) returns 12:45:00.

+

(timepoint1, temporal1) OVERLAPS (timepoint2, temporal2)

+

BOOLEAN

+

Return TRUE if two time intervals defined by (timepoint1, temporal1) and (timepoint2, temporal2) overlap.

+

Example:

+

(TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) returns TRUE.

+

(TIME '9:00:00', TIME '10:00:00') OVERLAPS (TIME '10:15:00', INTERVAL '3' HOUR) returns FALSE.

+

DATE_FORMAT(timestamp, string)

+

STRING

+

Convert timestamp to a value of string in the format specified by the date format string.

+

TIMESTAMPADD(timeintervalunit, interval, timepoint)

+

TIMESTAMP/DATE/TIME

+

Return the date and time added to timepoint based on the result of interval and timeintervalunit.

+

For example, TIMESTAMPADD(WEEK, 1, DATE '2003-01-02') returns 2003-01-09.

+

TIMESTAMPDIFF(timepointunit, timepoint1, timepoint2)

+

INT

+

Return the (signed) number of timepointunit between timepoint1 and timepoint2.

+

The unit for the interval is given by the first argument, which should be one of the following values: SECOND, MINUTE, HOUR, DAY, MONTH, or YEAR.

+

For example, TIMESTAMPDIFF(DAY, TIMESTAMP '2003-01-02 10:00:00', TIMESTAMP '2003-01-03 10:00:00') returns 1.

+

CONVERT_TZ(string1, string2, string3)

+

TIMESTAMP

+

Convert a datetime string1 from time zone string2 to time zone string3.

+

For example, CONVERT_TZ('1970-01-01 00:00:00', 'UTC', 'America/Los_Angeles') returns '1969-12-31 16:00:00'.

+

FROM_UNIXTIME(numeric[, string])

+

STRING

+

Return a string representation of the numeric argument (in seconds) in the current time zone.

+

The default string format is YYYY-MM-DD hh:mm:ss.

+

For example, FROM_UNIXTIME(44) returns 1970-01-01 09:00:44.

+

UNIX_TIMESTAMP()

+

BIGINT

+

Get current Unix timestamp in seconds.

+

+

UNIX_TIMESTAMP(string1[, string2])

+

BIGINT

+

Convert date time string string1 in format string2 to Unix timestamp (in seconds), using the specified timezone in table config.

+

The default format of string2 is yyyy-MM-dd HH:mm:ss.

+

TO_DATE(string1[, string2])

+

DATE

+

Convert a date string string1 with format string2 to a date.

+

The default format of string2 is yyyy-MM-dd.

+

TO_TIMESTAMP(string1[, string2])

+

TIMESTAMP

+

Converts date time string string1 with format string2 under the 'UTC+0' time zone to a timestamp.

+

The default format of string2 is yyyy-MM-dd HH:mm:ss.

+
+
+
+

DATE

  • Function

    Returns a SQL date parsed from string in form of yyyy-MM-dd.

    +
  • Description
    DATE DATE string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string

    +

    STRING

    +

    String in the SQL date format.

    +

    Note that the string must be in the yyyy-MM-dd format. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DATE "2021-08-19" AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      2021-08-19

      +
      +
      +
    +
+
+

TIME

  • Function

    Returns a SQL time parsed from string in form of HH:mm:ss[.fff].

    +
  • Description
    TIME TIME string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string

    +

    STRING

    +

    Time

    +

    Note that the string must be in the format of HH:mm:ss[.fff]. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIME "10:11:12" AS `result`,
      +        TIME "10:11:12.032" AS `result2`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      10:11:12

      +

      10:11:12.032

      +
      +
      +
    +
+
+

TIMESTAMP

  • Function

    Converts the time string into timestamp. The time string format is yyyy-MM-dd HH:mm:ss[.fff]. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) TIMESTAMP string
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string

    +

    STRING

    +

    Time

    +

    Note that the string must be in the format of yyyy-MM-dd HH:mm:ss[.fff]. Otherwise, an error will be reported.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMP "1997-04-25 13:14:15" AS `result`,
      +        TIMESTAMP "1997-04-25 13:14:15.032" AS `result2`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + +

      result

      +

      result2

      +

      1997-04-25 13:14:15

      +

      1997-04-25 13:14:15.032

      +
      +
      +
    +
+
+

INTERVAL

  • Function

    Parses an interval string.

    +
  • Description
    INTERVAL INTERVAL string range
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string

    +

    STRING

    +

    Timestamp string used together with the range parameter. The string is in either of the following two formats:

    +
    • yyyy-MM for SQL intervals of months. An interval range might be YEAR or YEAR TO MONTH for intervals of months.
    • dd hh:mm:ss.fff for SQL intervals of milliseconds. An interval range might be DAY, MINUTE, DAY TO HOUR, or DAY TO SECOND.
    +

    range

    +

    INTERVAL

    +

    Interval range. This parameter is used together with the string parameter.

    +

    Available values are as follows: YEAR, YEAR To Month, DAY, MINUTE, DAY TO HOUR and DAY TO SECOND.

    +
    +
    +
  • Example
    Test statement
    -- indicates that the interval is 10 days and 4 milliseconds.
    +INTERVAL '10 00:00:00.004' DAY TO second
    +-- The interval is 10 days.
    +INTERVAL '10' 
    +-- The interval is 2 years and 10 months.
    +INTERVAL '2-10' YEAR TO MONTH
    +
    +
+
+

CURRENT_DATE

  • Function

    Returns the current SQL time (yyyy-MM-dd) in the local time zone. The return value is of the DATE type.

    +
  • Description
    DATE CURRENT_DATE
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_DATE AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2021-10-28

      +
      +
      +
    +
+
+

CURRENT_TIME

  • Function

    Returns the current SQL time (HH:mm:sss.fff) in the local time zone. The return value is of the TIME type.

    +
  • Description
    TIME CURRENT_TIME
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_TIME AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      08:29:19.289

      +
      +
      +
    +
+
+

CURRENT_TIMESTAMP

  • Function

    Returns the current SQL timestamp in the local time zone. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) CURRENT_TIMESTAMP
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	CURRENT_TIMESTAMP AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      2021-10-28 08:33:51.606

      +
      +
      +
    +
+
+

LOCALTIME

  • Function

    Returns the current SQL time in the local time zone. The return value is of the TIME type.

    +
  • Description
    TIME LOCALTIME
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	LOCALTIME AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      16:39:37.706

      +
      +
      +
    +
+
+

LOCALTIMESTAMP

  • Function

    Returns the current SQL timestamp in the local time zone. The return value is of the TIMESTAMP(3) type.

    +
  • Description
    TIMESTAMP(3) LOCALTIMESTAMP
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	LOCALTIMESTAMP AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      2021-10-28 16:43:17.625

      +
      +
      +
    +
+
+

EXTRACT

  • Function

    Returns a value extracted from the timeintervalunit part of temporal. The return value is of the BIGINT type.

    +
  • Description
    BIGINT EXTRACT(timeinteravlunit FROM temporal)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timeinteravlunit

    +

    TIMEUNIT

    +

    Time unit to be extracted from a time point or interval. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, SECOND.

    +

    temporal

    +

    DATE/TIME/TIMESTAMP/INTERVAL

    +

    Time point or interval

    +
    +
    +

    Do not specify a time unit that is not of any time points or intervals. Otherwise, the job fails to be submitted.

    +

    For example, an error message is displayed when the following statement is executed because YEAR cannot be extracted from TIME.

    +
    SELECT 
    +	EXTRACT(YEAR FROM TIME '12:44:31' ) AS `result`
    +FROM
    +	testtable;
    +
    +
  • Example
    • Test statement
      SELECT 
      +	EXTRACT(YEAR FROM DATE '1997-04-25' ) AS `result`,
      +        EXTRACT(MINUTE FROM TIME '12:44:31') AS `result2`,
      +        EXTRACT(SECOND FROM TIMESTAMP '1997-04-25 13:14:15') AS `result3`,
      +        EXTRACT(YEAR FROM INTERVAL '2-10' YEAR TO MONTH) AS `result4`,
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      1997

      +

      44

      +

      15

      +

      2

      +
      +
      +
    +
+
+

YEAR

  • Function

    Returns the year from a SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT YEAR(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	YEAR(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      1997

      +
      +
      +
    +
+
+

QUARTER

  • Function

    Returns the quarter of a year (an integer between 1 and 4) from a SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT QUARTER(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	QUARTER(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      2

      +
      +
      +
    +
+
+

MONTH

  • Function

    Returns the month of a year (an integer between 1 and 12) from a SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT MONTH(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	MONTH(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      4

      +
      +
      +
    +
+
+

WEEK

  • Function

    Returns the week of a year from a SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT WEEK(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	WEEK(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      17

      +
      +
      +
    +
+
+

DAYOFYEAR

  • Function

    Returns the day of a year (an integer between 1 and 366) from SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT DAYOFYEAR(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFYEAR(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      115

      +
      +
      +
    +
+
+

DAYOFMONTH

  • Function

    Returns the day of a month (an integer between 1 and 31) from a SQL date date. The return value is of the BIGINT type.

    +
  • Description
    BIGINT DAYOFMONTH(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFMONTH(DATE '1997-04-25' ) AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      25

      +
      +
      +
    +
+
+

DAYOFWEEK

  • Function

    Returns the day of a week (an integer between 1 and 7) from a SQL date date. The return value is of the BIGINT type.

    +

    Note that the start day of a week is Sunday.

    +
    +
  • Description
    BIGINT DAYOFWEEK(date)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    date

    +

    DATE

    +

    SQL date

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DAYOFWEEK(DATE '1997-04-25') AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      6

      +
      +
      +
    +
+
+

HOUR

  • Function

    Returns the hour of a day (an integer between 0 and 23) from SQL timestamp timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT HOUR(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	HOUR(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      10

      +
      +
      +
    +
+
+

MINUTE

  • Function

    Returns the minute of an hour (an integer between 0 and 59) from a SQL timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT MINUTE(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	MINUTE(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + +

      result

      +

      11

      +
      +
      +
    +
+
+

SECOND

  • Function

    Returns the second of an hour (an integer between 0 and 59) from a SQL timestamp. The return value is of the BIGINT type.

    +
  • Description
    BIGINT SECOND(timestamp)
    +
  • Input parameters +
    + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timestamp

    +

    TIMESTAMP

    +

    SQL timestamp

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	SECOND(TIMESTAMP '1997-04-25 10:11:12') AS `result`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + +

      result

      +

      12

      +
      +
      +
    +
+
+

FLOOR

  • Function

    Returns a value that rounds timepoint down to the time unit timeintervalunit.

    +
  • Description
    TIME/TIMESTAMP(3) FLOOR(timepoint TO timeintervalunit)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timepoint

    +

    TIMESTAMP/TIME

    +

    SQL time or SQL timestamp

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, or SECOND.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	FLOOR(TIME '13:14:15' TO MINUTE) AS `result`
      +        FLOOR(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result2`,
      +        FLOOR(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result3`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + + + +

      message

      +

      message2

      +

      message3

      +

      13:14

      +

      13:14

      +

      1997-04-25T13:14

      +
      +
      +
    +
+
+

CEIL

  • Function

    Returns a value that rounds timepoint up to the time unit timeintervalunit.

    +
  • Description
    TIME/TIMESTAMP(3) CEIL(timepoint TO timeintervalunit)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timepoint

    +

    TIMESTAMP/TIME

    +

    SQL time or SQL timestamp

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit. The value can be YEAR, QUARTER, MONTH, WEEK, DAY, DOY, HOUR, MINUTE, or SECOND.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	CEIL(TIME '13:14:15' TO MINUTE) AS `result`
      +        CEIL(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result2`,
      +        CEIL(TIMESTAMP '1997-04-25 13:14:15' TO MINUTE) AS `result3`
      +FROM	testtable;
      +
    • Test Result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      13:15

      +

      13:15

      +

      1997-04-25T13:15

      +
      +
      +
    +
+
+

OVERLAPS

  • Function

    Returns TRUE if two time intervals overlap; returns FALSE otherwise.

    +
  • Description
    BOOLEAN (timepoint1, temporal1) OVERLAPS (timepoint2, temporal2)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timepoint1/timepoint2

    +

    DATE/TIME/TIMESTAMP

    +

    Time point

    +

    temporal1/temporal2

    +

    DATE/TIME/TIMESTAMP/INTERVAL

    +

    Time point or interval

    +
    +
    +
    • (timepoint, temporal) is a closed interval.
    • The temporal can be of the DATE, TIME, TIMESTAMP, or INTERVAL type.
      • When th temporal is DATE, TIME, or TIMESTAMP, (timepoint, temporal) indicates an interval between timepoint and temporal. The temporal can be earlier than the value of timepoint, for example, (DATE '1997-04-25', DATE '1997-04-23').
      • When the temporal is INTERVAL, (timepoint, temporal) indicates an interval between timepoint and timepoint + temporal.
      +
    • Ensure that (timepoint1, temporal1) and (timepoint2, temporal2) are intervals of the same data type.
    +
    +
  • Example
    • Test statement
      SELECT 
      +	(TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) AS `result`,
      +        (TIME '2:30:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR) AS `result2`,
      +	(TIME '2:30:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:31:00', INTERVAL '2' HOUR) AS `result3`,
      +	(TIME '9:00:00', TIME '10:00:00') OVERLAPS (TIME '10:00:00', INTERVAL '3' HOUR) AS `result4`,
      +	(TIMESTAMP '1997-04-25 12:00:00', TIMESTAMP '1997-04-25 12:20:00') OVERLAPS (TIMESTAMP '1997-04-25 13:00:00', INTERVAL '2' HOUR) AS `result5`,
      +	(DATE '1997-04-23', INTERVAL '2' DAY) OVERLAPS (DATE '1997-04-25', INTERVAL '2' DAY) AS `result6`,
      +	(DATE '1997-04-25', DATE '1997-04-23') OVERLAPS (DATE '1997-04-25', INTERVAL '2' DAY) AS `result7`
      +FROM
      +	testtable;
      +
    • Test Result +
      + + + + + + + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      result5

      +

      result6

      +

      result7

      +

      true

      +

      true

      +

      false

      +

      true

      +

      false

      +

      true

      +

      true

      +
      +
      +
    +
+
+

DATE_FORMAT

  • Function

    Converts a timestamp to a value of string in the format specified by the date format string.

    +
  • Description
    STRING DATE_FORMAT(timestamp, dateformat)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timestamp

    +

    TIMESTAMP/STRING

    +

    Time point

    +

    dateformat

    +

    STRING

    +

    String in the date format

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yyyy-MM-dd HH:mm:ss') AS `result`,
      +        DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yyyy-MM-dd') AS `result2`,
      +	DATE_FORMAT(TIMESTAMP '1997-04-25 10:11:12', 'yy/MM/dd HH:mm') AS `result3`,
      +        DATE_FORMAT('1997-04-25 10:11:12', 'yyyy-MM-dd') AS `result4`
      +FROM	testtable;
      +
    • Test Result +
      + + + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      result4

      +

      1997-04-25 10:11:12

      +

      1997-04-25

      +

      97/04/25 10:11

      +

      1997-04-25

      +
      +
      +
    +
+
+

TIMESTAMPADD

  • Function

    Returns the date and time by combining interval and timeintervalunit and adding the combination to timepoint.

    +

    The return value of TIMESTAMPADD is the value of timepoint. An exception is that if the input timepoint is of the TIMESTAMP type, the return value can be inserted into a table field of the DATE type.

    +
    +
  • Description
    TIMESTAMP(3)/DATE/TIME TIMESTAMPADD(timeintervalunit, interval, timepoint)
    +
  • Input parameters +
    + + + + + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timeintervalunit

    +

    TIMEUNIT

    +

    Time unit.

    +

    interval

    +

    INT

    +

    Interval

    +

    timepoint

    +

    TIMESTAMP/DATE/TIME

    +

    Time point

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMPADD(WEEK, 1, DATE '1997-04-25') AS `result`,
      +        TIMESTAMPADD(QUARTER, 1, TIMESTAMP '1997-04-25 10:11:12') AS `result2`,
      +	TIMESTAMPADD(SECOND, 2, TIME '10:11:12') AS `result3`
      +FROM	testtable;
      +
    • Test Result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-05-02

      +
      • If this field is inserted into a table field of the TIMESTAMP type, 1997-07-25T10:11:12 is returned.
      +
      • If this field is inserted into a table field of the TIMESTAMP type, 1997-07-25 is returned.
      +

      10:11:14

      +
      +
      +
    +
+
+

TIMESTAMPDIFF

  • Function

    Returns the (signed) number of timepointunit between timepoint1 and timepoint2. The unit for the interval is given by the first argument.

    +
  • Description
    INT TIMESTAMPDIFF(timepointunit, timepoint1, timepoint2)
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    timepointunit

    +

    TIMEUNIT

    +

    Time unit. The value can be SECOND, MINUTE, HOUR, DAY, MONTH or YEAR.

    +

    timepoint1/timepoint2

    +

    TIMESTAMP/DATE

    +

    Time point

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TIMESTAMPDIFF(DAY, TIMESTAMP '1997-04-25 10:00:00', TIMESTAMP '1997-04-28 10:00:00') AS `result`,
      +        TIMESTAMPDIFF(DAY, DATE '1997-04-25', DATE '1997-04-28') AS `result2`,
      +	TIMESTAMPDIFF(DAY, TIMESTAMP '1997-04-27 10:00:20', TIMESTAMP '1997-04-25 10:00:00') AS `result3`
      +FROM	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      3

      +

      3

      +

      -2

      +
      +
      +
    +
+
+

CONVERT_TZ

  • Function

    Converts a datetime string1 (with default ISO timestamp format 'yyyy-MM-dd HH:mm:ss') from time zone string2 to time zone string3.

    +
  • Description
    STRING CONVERT_TZ(string1, string2, string3)
    +
  • Input parameters +
    + + + + + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string1

    +

    STRING

    +

    SQL timestamp. If the value does not meet the format requirements, NULL is returned.

    +

    string2

    +

    STRING

    +

    Time zone before conversion. The format of time zone should be either an abbreviation such as PST, a full name such as America/Los_Angeles, or a custom ID such as GMT-08:00.

    +

    string3

    +

    STRING

    +

    Time zone after conversion. The format of time zone should be either an abbreviation such as PST, a full name such as America/Los_Angeles, or a custom ID such as GMT-08:00.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	CONVERT_TZ(1970-01-01 00:00:00, UTC, America/Los_Angeles) AS `result`,
      +        CONVERT_TZ(1997-04-25 10:00:00, UTC, GMT-08:00) AS `result2`
      +FROM	testtable;
      +
    • Test Result +
      + + + + + + + +

      result

      +

      result2

      +

      1969-12-31 16:00:00

      +

      1997-04-25 02:00:00

      +
      +
      +
    +
+
+

FROM_UNIXTIME

  • Function

    Returns a representation of the numeric argument as a value in string format.

    +
  • Description
    STRING FROM_UNIXTIME(numeric[, string])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    numeric

    +

    BIGINT

    +

    An internal timestamp representing the number of seconds since 1970-01-01 00:00:00 UTC. The value can be generated by the UNIX_TIMESTAMP() function.

    +

    string

    +

    STRING

    +

    Time. If this parameter is not specified, the default time format is yyyy-MM-dd HH:mm:ss format.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	FROM_UNIXTIME(44) AS `result`,
      +        FROM_UNIXTIME(44, 'yyyy:MM:dd') AS `result2`
      +FROM	testtable;
      +
    • Test Result +
      + + + + + + + +

      result

      +

      result2

      +

      1970-01-01 08:00:44

      +

      1970:01:01

      +
      +
      +
    +
+
+

UNIX_TIMESTAMP

  • Function

    Gets current Unix timestamp in seconds. The return value is of the BIGINT type.

    +
  • Description
    BIGINT UNIX_TIMESTAMP()
    +
  • Input parameters

    None

    +
  • Example
    • Test statement
      SELECT 
      +	UNIX_TIMESTAMP() AS `result`
      +FROM
      +	table;
      +
    • Test result +
      + + + + + +

      result

      +

      1635401982

      +
      +
      +
    +
+
+

UNIX_TIMESTAMP(string1[, string2])

  • Function

    Converts date time string1 in format string2 to Unix timestamp (in seconds). The return value is of the BIGINT type.

    +
  • Description
    BIGINT UNIX_TIMESTAMP(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string1

    +

    STRING

    +

    SQL timestamp string. An error is reported if the value does not comply with the string2 format.

    +

    string2

    +

    STRING

    +

    Time. If this parameter is not specified, the default time format is yyyy-MM-dd HH:mm:ss.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	UNIX_TIMESTAMP('1997-04-25', 'yyyy-MM-dd') AS `result`,
      +        UNIX_TIMESTAMP('1997-04-25 00:00:10', 'yyyy-MM-dd HH:mm:ss') AS `result2`,
      +        UNIX_TIMESTAMP('1997-04-25 00:00:00') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      861897600

      +

      861897610

      +

      861897600

      +
      +
      +
    +
+
+

TO_DATE

  • Function

    Converts a date string1 with format string2 to a date.

    +
  • Description
    DATE TO_DATE(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string1

    +

    STRING

    +

    SQL timestamp string. If the value is not in the required format, an error is reported.

    +

    string2

    +

    STRING

    +

    Format. If this parameter is not specified, the default time format is yyyy-MM-dd.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TO_DATE('1997-04-25') AS `result`,
      +        TO_DATE('1997:04:25', 'yyyy-MM-dd') AS `result2`,
      +        TO_DATE('1997-04-25 00:00:00', 'yyyy-MM-dd HH:mm:ss') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-04-25

      +

      1997-04-25

      +

      1997-04-25

      +
      +
      +
    +
+
+

TO_TIMESTAMP

  • Function

    Converts date time string1 with format string2 to a timestamp.

    +
  • Description
    TIMESTAMP TO_TIMESTAMP(string1[, string2])
    +
  • Input parameters +
    + + + + + + + + + + + + + +

    Parameter

    +

    Data Types

    +

    Parameters

    +

    string1

    +

    STRING

    +

    SQL timestamp string. If the value is not in the required format, NULL is returned.

    +

    string2

    +

    STRING

    +

    Date format. If this parameter is not specified, the default format is yyyy-MM-dd HH:mm:ss.

    +
    +
    +
  • Example
    • Test statement
      SELECT 
      +	TO_TIMESTAMP('1997-04-25', 'yyyy-MM-dd') AS `result`,
      +        TO_TIMESTAMP('1997-04-25 00:00:00') AS `result2`,
      +        TO_TIMESTAMP('1997-04-25 00:00:00', 'yyyy-MM-dd HH:mm:ss') AS `result3`
      +FROM
      +	testtable;
      +
    • Test result +
      + + + + + + + + + +

      result

      +

      result2

      +

      result3

      +

      1997-04-25 00:00

      +

      1997-04-25 00:00

      +

      1997-04-25 00:00

      +
      +
      +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0430.html b/docs/dli/sqlreference/dli_08_0430.html new file mode 100644 index 00000000..f532ccf9 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0430.html @@ -0,0 +1,77 @@ + + +

Conditional Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Conditional Functions

Conditional Functions

+

Description

+

CASE value

+

WHEN value1_1 [, value1_2 ]* THEN result1

+

[ WHEN value2_1 [, value2_2 ]* THEN result2 ]*

+

[ ELSE resultZ ]

+

END

+

Returns resultX when the value is contained in (valueX_1, valueX_2, …).

+

Only the first matched value is returned.

+

When no value matches, returns result_z if it is provided and returns NULL otherwise.

+

CASE

+

WHEN condition1 THEN result1

+

[ WHEN condition2 THEN result2 ]*

+

[ ELSE resultZ ]

+

END

+

Returns resultX when the first conditionX is met.

+

Only the first matched value is returned.

+

When no condition is met, returns result_z if it is provided and returns NULL otherwise.

+

NULLIF(value1, value2)

+

Returns NULL if value1 is equal to value2; returns value1 otherwise.

+

For example, NullIF (5, 5) returns NULL.

+

NULLIF(5, 0) returns 5.

+

COALESCE(value1, value2 [, value3 ]* )

+

Returns the first value (from left to right) that is not NULL from value1, value2, ….

+

For example, COALESCE(NULL, 5) returns 5.

+

IF(condition, true_value, false_value)

+

Returns the true_value if condition is met, otherwise false_value.

+

For example, IF(5 > 3, 5, 3) returns 5.

+

IS_ALPHA(string)

+

Returns TRUE if all characters in the string are letters, otherwise FALSE.

+

IS_DECIMAL(string)

+

Returns TRUE if string can be parsed to a valid numeric, otherwise FALSE.

+

IS_DIGIT(string)

+

Returns TRUE if all characters in string are digits, otherwise FALSE. Otherwise, FALSE is returned.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0431.html b/docs/dli/sqlreference/dli_08_0431.html new file mode 100644 index 00000000..510077c3 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0431.html @@ -0,0 +1,139 @@ + + +

Type Conversion Functions

+

Syntax

CAST(value AS type)
+
+

Description

This function is used to forcibly convert types.

+
+

Precautions

If the input is NULL, NULL is returned.

+
+

Example 1: Convert the amount value to an integer.

The following example converts the amount value to an integer.

+
insert into temp select cast(amount as INT) from source_stream;
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Examples of type conversion functions

Example

+

Description

+

Example

+

cast(v1 as string)

+

Converts v1 to a string. The value of v1 can be of the numeric type or of the timestamp, date, or time type.

+

Table T1:

+
| content (INT)           |
+| -------------           |
+| 5                       |
+

Statement:

+
SELECT
+  cast(content as varchar)
+FROM
+  T1;
+

Result:

+
"5"
+

cast (v1 as int)

+

Converts v1 to the int type. The value of v1 can be a number or a character.

+

Table T1:

+
| content  (STRING)           |
+| -------------               |
+| "5"                         |
+

Statement:

+
SELECT
+  cast(content as int)
+FROM
+  T1;
+

Result:

+
5
+

cast(v1 as timestamp)

+

Converts v1 to the timestamp type. The value of v1 can be of the string, date, or time type.

+

Table T1:

+
| content  (STRING)          |
+| -------------              |
+| "2018-01-01 00:00:01"     |
+

Statement:

+
SELECT
+  cast(content as timestamp)
+FROM
+  T1;
+

Result:

+
1514736001000
+

cast(v1 as date)

+

Converts v1 to the date type. The value of v1 can be of the string or timestamp type.

+

Table T1:

+
| content  (TIMESTAMP)     |
+| -------------            |
+| 1514736001000            |
+

Statement:

+
SELECT
+  cast(content as date)
+FROM
+  T1;
+

Result:

+
"2018-01-01"
+
+
+

Flink jobs do not support the conversion of bigint to timestamp using CAST. You can convert it using to_timestamp.

+
+
+

Example 2:

  1. Create a Flink opensource SQL job by referring to Kafka Source Table and Print Result Table, enter the following job running script, and submit the job.
    Note: When creating a job, set Flink Version to 1.12 in the Running Parameters area on the job editing page, select Save Job Log, and set the OBS bucket for saving job logs to facilitate subsequent job log viewing. Change the values of the parameters in bold in the following script according to the actual situation.
    CREATE TABLE kafkaSource (
    +  cast_int_to_string int, 
    +  cast_String_to_int string,
    +  case_string_to_timestamp string, 
    +  case_timestamp_to_date timestamp
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +
    +CREATE TABLE printSink (
    +  cast_int_to_string string, 
    +  cast_String_to_int int, 
    +  case_string_to_timestamp timestamp, 
    +  case_timestamp_to_date date
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink select 
    +  cast(cast_int_to_string as string),
    +  cast(cast_String_to_int as int),
    +  cast(case_string_to_timestamp as timestamp),  
    +  cast(case_timestamp_to_date as date)
    +from kafkaSource;
    +
    +
  2. Connect to the Kafka cluster and send the following test data to the Kafka topic:
    {"cast_int_to_string":"1", "cast_String_to_int": "1", "case_string_to_timestamp": "2022-04-02 15:00:00", "case_timestamp_to_date": "2022-04-02 15:00:00"}
    +
  3. View output.
    • Method 1:
      1. Log in to the DLI management console and choose Job Management > Flink Streaming Jobs.
      2. Locate the row that contains the target Flink job, and choose More & > FlinkUI in the Operation column.
      3. On the Flink UI, choose Task Managers, click the task name, and select Stdout to view the job run logs.
      +
    • Method 2: If you select Save Job Log for Running Parameters before submitting the job, perform the following operations:
      1. Log in to the DLI management console and choose Job Management > Flink Streaming Jobs.
      2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the corresponding log based on the job running date.
      3. Go to the folder of the corresponding date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view the result log.
      +
    +
    The query result is as follows:
    +I(1,1,2022-04-02T15:00,2022-04-02)
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0432.html b/docs/dli/sqlreference/dli_08_0432.html new file mode 100644 index 00000000..091c3ab2 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0432.html @@ -0,0 +1,48 @@ + + +

Collection Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + +
Table 1 Collection functions

Collection Functions

+

Description

+

CARDINALITY(array)

+

Returns the number of elements in array.

+

array '[' integer ']'

+

Returns the element at position INT in array. The index starts from 1.

+

ELEMENT(array)

+

Returns the sole element of array (whose cardinality should be one)

+

Returns NULL if array is empty.

+

Throws an exception if array has more than one element.

+

CARDINALITY(map)

+

Returns the number of entries in map.

+

map '[' key ']'

+

Returns the value specified by key value in map.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0433.html b/docs/dli/sqlreference/dli_08_0433.html new file mode 100644 index 00000000..373edb04 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0433.html @@ -0,0 +1,38 @@ + + +

Value Construction Functions

+

Description

+
+ + + + + + + + + + + + + +
Table 1 Value construction functions

Value Construction Functions

+

Description

+

ROW(value1, [, value2]*)

+

(value1, [, value2]*)

+

Returns a row created from a list of values (value1, value2,…).

+

ARRAY '[' value1 [, value2 ]* ']'

+

Returns an array created from a list of values (value1, value2, …).

+

MAP '[' key1, value1 [, key2, value2]* ']'

+

Returns a map created from a list of key-value pairs ((value1, value2), (value3, value4), …).

+

The key-value pair is (key1, value1),(key2, value2).

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0434.html b/docs/dli/sqlreference/dli_08_0434.html new file mode 100644 index 00000000..ebdcaefa --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0434.html @@ -0,0 +1,31 @@ + + +

Value Access Functions

+

Description

+
+ + + + + + + + + + +
Table 1 Value access functions

Function

+

Description

+

tableName.compositeType.field

+

Returns the value of a field from a Flink composite type (e.g., Tuple, POJO) by name.

+

tableName.compositeType.*

+

Returns a flat representation of a Flink composite type (e.g., Tuple, POJO) that converts each of its direct subtype into a separate field.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0435.html b/docs/dli/sqlreference/dli_08_0435.html new file mode 100644 index 00000000..c2181d17 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0435.html @@ -0,0 +1,64 @@ + + +

Hash Functions

+

Description

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Hash functions

Hash Functions

+

Description

+

MD5(string)

+

Returns the MD5 hash of string as a string of 32 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA1(string)

+

Returns the SHA-1 hash of string as a string of 40 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA224(string)

+

Returns the SHA-224 hash of string as a string of 56 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA256(string)

+

Returns the SHA-256 hash of string as a string of 64 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA384(string)

+

Returns the SHA-384 hash of string as a string of 96 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA512(string)

+

Returns the SHA-512 hash of string as a string of 128 hexadecimal digits.

+

Returns NULL if string is NULL.

+

SHA2(string, hashLength)

+

Returns the hash using the SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, or SHA-512).

+

The first argument string is the string to be hashed and the second argument hashLength is the bit length of the result (224, 256, 384, or 512).

+

If either argument is NULL, the result will also be NULL.

+
+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0436.html b/docs/dli/sqlreference/dli_08_0436.html new file mode 100644 index 00000000..5239b896 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0436.html @@ -0,0 +1,124 @@ + + +

Aggregate Functions

+

An aggregate function performs a calculation operation on a set of input values and returns a value. For example, the COUNT function counts the number of rows retrieved by an SQL statement. Table 1 lists aggregate functions.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Aggregate functions

Function

+

Return Type

+

Description

+

COUNT([ ALL ] expression | DISTINCT expression1 [, expression2]*)

+

BIGINT

+

Returns the number of input rows for which the expression is not NULL. Use DISTINCT for one unique instance of each value.

+

COUNT(*)

+

COUNT(1)

+

BIGINT

+

Returns the number of input rows.

+

AVG([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the average (arithmetic mean) of expression across all input rows.

+

Use DISTINCT for one unique instance of each value.

+

SUM([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sum of expression across all input rows.

+

Use DISTINCT for one unique instance of each value.

+

MAX([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the maximum value of expression across all input rows.

+

MIN([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the minimum value of expression across all input rows.

+

STDDEV_POP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the population standard deviation of expression across all input rows.

+

STDDEV_SAMP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample standard deviation of expression across all input rows.

+

VAR_POP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the population variance (square of the population standard deviation) of expression across all input rows.

+

VAR_SAMP([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample variance (square of the sample standard deviation) of expression across all input rows.

+

COLLECT([ ALL | DISTINCT ] expression)

+

MULTISET

+

Returns a multiset of expression across all input rows.

+

VARIANCE([ ALL | DISTINCT ] expression)

+

DOUBLE

+

Returns the sample variance (square of the sample standard deviation) of expression across all input rows.

+

FIRST_VALUE(expression)

+

Actual type

+

Returns the first value in an ordered set of values.

+

LAST_VALUE(expression)

+

Actual type

+

Returns the last value in an ordered set of values.

+
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0437.html b/docs/dli/sqlreference/dli_08_0437.html new file mode 100644 index 00000000..506368df --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0437.html @@ -0,0 +1,15 @@ + + +

Table-Valued Functions

+
+
+ + + +
+ diff --git a/docs/dli/sqlreference/dli_08_0438.html b/docs/dli/sqlreference/dli_08_0438.html new file mode 100644 index 00000000..030cb1e0 --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0438.html @@ -0,0 +1,154 @@ + + +

string_split

+

The string_split function splits a target string into substrings based on the specified separator and returns a substring list.

+

Description

string_split(target, separator)
+ +
+ + + + + + + + + + + + + +
Table 1 string_split parameters

Parameter

+

Data Types

+

Description

+

target

+

STRING

+

Target string to be processed

+
NOTE:
  • If target is NULL, an empty line is returned.
  • If target contains two or more consecutive separators, an empty substring is returned.
  • If target does not contain a specified separator, the original string passed to target is returned.
+
+

separator

+

VARCHAR

+

Separator. Currently, only single-character separators are supported.

+
+
+
+

Example

  1. Create a Flink OpenSource SQL job by referring to Kafka Source Table and Print Result Table, enter the following job running script, and submit the job.
    When you create a job, set Flink Version to 1.12 in the Running Parameters tab. Select Save Job Log, and specify the OBS bucket for saving job logs. Change the values of the parameters in bold as needed in the following script.
    CREATE TABLE kafkaSource (
    +  target STRING,  
    +  separator  VARCHAR
    +) WITH (
    +  'connector' = 'kafka',
    +  'topic' = 'KafkaTopic',
    +  'properties.bootstrap.servers' = 'KafkaAddress1:KafkaPort,KafkaAddress2:KafkaPort',
    +  'properties.group.id' = 'GroupId',
    +  'scan.startup.mode' = 'latest-offset',
    +  "format" = "json"
    +);
    +
    +CREATE TABLE printSink (
    +  target STRING,  
    +  item STRING
    +) WITH (
    +  'connector' = 'print'
    +);
    +
    +insert into printSink
    +  select target,
    +  item from 
    +  kafkaSource,
    +  lateral table(string_split(target, separator)) as T(item);
    +
    +
  2. Connect to the Kafka cluster and send the following test data to the Kafka topic:
    {"target":"test-flink","separator":"-"}
    +{"target":"flink","separator":"-"}
    +{"target":"one-two-ww-three","separator":"-"}
    +

    The data is as follows:

    + +
    + + + + + + + + + + + + + +
    Table 2 Test table data

    target (STRING)

    +

    separator (VARCHAR)

    +

    test-flink

    +

    -

    +

    flink

    +

    -

    +

    one-two-ww-three

    +

    -

    +
    +
    +
  3. View output.
    • Method 1:
      1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
      2. Locate the row that contains the target Flink job, and choose More > FlinkUI in the Operation column.
      3. On the Flink UI, choose Task Managers, click the task name, and select Stdout to view job logs.
      +
    • Method 2: If you select Save Job Log on the Running Parameters tab before submitting the job, perform the following operations:
      1. Log in to the DLI console. In the navigation pane, choose Job Management > Flink Jobs.
      2. Click the name of the corresponding Flink job, choose Run Log, click OBS Bucket, and locate the folder of the log you want to view according to the date.
      3. Go to the folder of the date, find the folder whose name contains taskmanager, download the taskmanager.out file, and view result logs.
      +
    +
    The query result is as follows:
    +I(test-flink,test)
    ++I(test-flink,flink)
    ++I(flink,flink)
    ++I(one-two-ww-three,one)
    ++I(one-two-ww-three,two)
    ++I(one-two-ww-three,ww)
    ++I(one-two-ww-three,three)
    +
    +

    The output data is as follows:

    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 3 Result table data

    target (STRING)

    +

    item (STRING)

    +

    test-flink

    +

    test

    +

    test-flink

    +

    flink

    +

    flink

    +

    flink

    +

    one-two-ww-three

    +

    one

    +

    one-two-ww-three

    +

    two

    +

    one-two-ww-three

    +

    ww

    +

    one-two-ww-three

    +

    three

    +
    +
    +
+
+
+
+ +
+ diff --git a/docs/dli/sqlreference/dli_08_0450.html b/docs/dli/sqlreference/dli_08_0450.html new file mode 100644 index 00000000..eaa99fdc --- /dev/null +++ b/docs/dli/sqlreference/dli_08_0450.html @@ -0,0 +1,12 @@ + + +

Historical Versions

+

+
+
+ +
+ diff --git a/docs/dli/sqlreference/en-us_image_0000001238321520.png b/docs/dli/sqlreference/en-us_image_0000001238321520.png new file mode 100644 index 0000000000000000000000000000000000000000..f4a29e3895e3e8a5c57d3fe596ccac890d26de0e GIT binary patch literal 32910 zcmdqJcUaR|*Df5D2nG=xBQPSxG7=O78&ZS8v4KKFrAQYQkuJR?I6*8RWkdy~Br3=t zy@j3t4n#%)1*ri71c?wJKnNj(B;O9sJkNWs@0@eJ=e^GN{_*```Srcm-g~Wk-S^sg zgtj;%1KR@MU3w($mYN(K{0%u$fNG>IyNk$9W^U`jF~i;+?I&2_T-qwW+~6~{8Rir^ z8uK3xk=*KzZXq*DP;_NkdsV3tY9gFZ%oEL<2+z3L7i%J_{N{O6GeydmWmSw)^_K@~ zyT8Sy#0vN|eoUtzKfj_H|HH!gh&K90(Q*bOj{nS|YT1mF9UVPoUP(OCdoYE@8^~qU z$Le8i^O!cn1;Y8p-glLFC83e|L8B1T#0CgN=V^qq&@5M%>-#G#4!QNCW%)-tebCCaL*gAq4g`8xYg+g^S^P}44cPgLBS zMKy2Vjd)1;^!_Qj&-b=U*e+~OOqgEfVk8udV*?3>YA6y66m5TxDOYZRb}|SC^~UO(qtLMp$Vx`o?g-`8==|P zjI^#Z_?E(q!Gq4&N`388Dh?MD@--*mVI_}J{fR-$uQ%dGPKOV($`vN_NXaU_d)sru z>Sj5ys`{xj5v5L!%-=i0`t>^!O(u_1omAcYW!@i)rB2beGXhrF@oaNSB6S@E(w=uz zvCsOf9I#{*gSDD5P#?jaBB-q@sfgH7!j8N=n<8tG5ZH6dM8psoF%=4w;D4GPk)O+x z&-9qLkzC(z_&M4CCG?snI%(&T1d}vNtE?7md{cR*eOWzbHhGCx-4gI3F~yAAMt$9{ zc&^c&yocNp{*9uM5kO5(!3Xem`qp(}R|^wW7L92!pGSLY%ZkM9DwEqB$uOkllK4X` zpI`P$8ARjAoJyhf40$z0WF-k5rI7`dqF@eQ*cJ6oRn#HkndF7gFd;qKSC1i5$x24r z1N&z3wr6@YnrH}L#@w1*;ylCQ>TX#i*rZd->xV=72g^#>OI6;_h=1ta3`ZWww?UqM zqvL#-(dAI&;8Yi~5o+8KvJezN0t=#6?cRo6d-ci)BfhKgHWFZY0X{OV_E zV!Q;@Zd9oP3JtP7RR64g`1pZe-`qj#(1?MvF<}*L6+Y&$kvgsY3H_;EE6iu|6CWoc z-!Z%W>*lHsAQ0{DZESBJ-z6};^?c158;DJ~fURKiPL26qm9n6^$3#R9UJ1P!<6@(h zKKJy^PCvDvcLX*0 zl#;$Qc}kgmrxN)_)VW=$@Mk5)FsMh}w>oAIdvUq3t~ec@>m070L8~LQaoZTAK5VJ+ zfmrh~eF{&ajg$820P)yxymP4&hn~gkc{%ojVg_3?62nxAqgya`JC z21VeC+2g}0}g`Q8L-6Tr~`?mw*YaXv5xGW_^E%+>i zS#=c6&vLJmvTJv=$2q1S0he>Seughz+>!ifQCHxi0Gs`Bmgy-Y_OEvZdl29s$V~}x zIB0lG5Fezjc~!^}oY>X6oJNb!eUe(c{bv-I>=2#*>O*~QEV2mM@InLQynbYQ6AZFs zyNnXNIMA_fvjrOBhus9by86voz_H2}1h;LoG_e=ZorU2XLs{88nlMAvTHC4CX2+8>g`BVJv?sue&S-?^%9Q6}NsQA9Sp}s9+19HWauf8L!Dv07tJ61}q?BD4JO;4ur6Q`%TvyeNdkWd_;Gnc__N zAyIaMJQL&nvmrZdoI^{jZyOfqIBC91nGBe&Lgwv{FGy?aMhTABrrIDN&JX>di)`sUdFt-d@i%su{2@ znG#|Wlljw^Ixe5V3j=k1=AuZJ}N zdr4?fdQ}*^$J8(_ggaLizPjwsnH$c&Mx7r$BTWtIE+vf6E%$kD-KiR7g^A>ul{mL{ zrO@7CIbA$q?iETbMfAnyBLx-;VtLW(u@99DJE72j!FEzRA+ zWV!P^KgPtVUXe{qe8;amc#LGhpMKYsPEGA`vG2%P4)|-Fvshdc;lHyZf@A*>y(|8aTn{ulO&)eA54Vfd9B1EYZEgS8gI$+9o>@bIAr%~q=;)QJjcoNEl@cs zu?w-h*p*D*OVO&3hPiO3?a^{hCMwFxT6Oo+-=+?4&(W_uRM7YqZ>ex_xNuhWSk#u> z-UDGfVLkLWlHKvfva-6{;rXSRypm2zS}2lYDCIL)XD2EwwblOB)&7>Wv+u%|4JcGx z;N!x1FvMHA&R0Ah2Wd)g5`MQ725|tn_wThtSEb%)dNh zCmsD?1}Jsy@IoSNNxC>(+f@xjRY+gB`xzR8-K?IO@1xN4n|-BE_rlEDbnK~#TqK+_ z;4TNH0DC`cnX=m9b|pJ$gtilNwLWwvT#U-5s%5tKI_^T1w-vi2yXAOx@0&?skqbk& zOcHbO^+r^u+wSZs!pO+dv~KBK&_K`G1*YoCL?xuf-pp-EH0Koh+rtpVd8c2S6vITV zNH!_Hq%Tj)1H46KODJ)kl0vMzjT#@qus9!8jaa?e*;JM zTI=!URmrkiNw(@=W+tgmoj#4lnF>xi^r$3KLw|2|n%2$LLP(kJaw31P7+k{BF6!{B zOh>u0PjR?Tiqh1fWPzKM97_cOl?_AB%~vMtV>rqEp@$H+Bou@ADTUN*oQ;mb$%cj)Vr=dL0fYGwSNjjC+)9CWSdlJ)`jK>*a|PEo3Go~;Bad4z1a-}EyM>J z4`03=hs%2CyOc(2)scpkmN4Bc8JB9PYA+==C(|d~dS|-)hps^0Q_~0pp_B8$-BXl! z)7?CS2**@zMji3!%Kbx@PH9cd^TYC7t@uMU;_}O5o(JlPLG`-F`04@Y$+Mwt%uN2+ zd>kYBvGbsM?LKO0JmZpFM>Skgb2YzYG3>G}Ud1H&XknBfh!^&3ztj84LpB-o>-h1y z&agmZ+7qmQ`q$IZI2Go8gkDbP`r->7I*{Ki>#0M^Z(R1T^4=J>ybdYGme+TBR z_vDTi!;Qd}M8dkhOw1-&6Z7oe>Ism@?s zZCThDajuE`eH?mA9v(>2DX;sZLITRMHFtVQbnPanouy^SCh|8znS&QU-{SAxxG@IV zpZbx7upQjE*-aIPvs!oA*OJ7@iTRbzG+mdRsNifftFn)ee;jc1lhVh+WfU>{_Q!lfxs}dSyh= z=Nk!*jAuw$!}c6+(wBYFxS*b6J7%hQY?-EYD&=)~UGf zypy85^H-38O)gLRvs5fvoR)U?oUuov75e&X1wlH+b3Ebrd=09x{H*h`GE{5V2`|}R zwV|Hv@O#A71YF?iYWm&L=~HMl;^x#${BDHCUU^dHgRVx?t%^>nQ?v^@{kmZ@FYXOY zGA(cG`W&_}o(;@ol88ZE7P4~8)2UONc&eFhS=r{4MyH1&voTY>-MsY58}yExmGI-z zZHoilm{R7U+F`AZHw!O`&iG}$sXC#$xBp`YO&fnE(&2EdNurd_#%zLGY9t*`Ml>s=gfjcyM=!>>&Yvp4t}}P~+^O({ zN55XTy!#o4npJ2B+sA6@U?eMiEtxB%TEq&1qH(;XaYZ}5-zA~R!nUxZINbCbNt2b} z(F9yLHE`H;z1|Vv;=i7{w16RhCEA|dTO{4pbZyD_@PVJ)Pgyu=2EldxKec*VuR5i| z^*z`6u2)Iyt;=_Q7q}W8aoE~1OXZVv|UpX@_xVkfCFL2 z4*X@UiPo#fgd)A0bTgBaaglu2&vp+(6#q1o>ZAq8e^b4IltHaKv3p-#9%6><`5!>H zfyP(lAGB`AQ{R=AG~$Dt;dpZQXa{FO445ExAfx_Na>i{;E*@pBIF^Ee3pYGl!jU6duqh5o7NWRaru^L z$m8b(kkIuUf0?MFDGl@4x${b*s!5@7T}WCKjdwZTM-id3F)8odi;Qf%q^Tz(ayQX=|UY{6ZM1#4ODzB82BgpxdTxF>L7M_r|ROO64^Q#ws?g-a_( zHI+Ax~Hd|xvjH>sg;%!=|!s3hu19c zdw~2&=ROnwe^J0pl3L#_bP{w!*db@`3m<90LhXhoq93=>00WpAy_89M_~YazfiSS{ zGRe2^dDJ2Sv$_heHD3|EH+|yzv*F<3?9% zzLX`B>`dN9K8^1hyeLPlX^OjnLiw+H4dd1?Jpu;0Tf&d&hz$NE8$*4(4}s9w-p*OF z)l~2yceKUM9a|~i2-R}Cx88VICdr(kBE3?DnGb;nemZo2H+cTx+Vj%88a#{0{T`Q5 zjr(g>TGm$u=0y$kAigKpN_6hV)PqL5bms7+B^j7Bs;aV)?ci}kpUEPXr`T7n;lh8l zf|^HgQO-Wpb8-K7%PvVhXR%2h4-HE|Pi})Z=mDAmqG*U}d@coZNng8^0i1>m$$cF} zCm65@$CfjpkcPgDIKqf%DT*vryzSpNxo?FJ>gnBH4eJE6DGsDD394Amd@Q3FE4!9l}mfuT21;4q*tY6(dZo<9r`Pb3$alN+IB&Y|Ipgqq*s z4_@^}g!&$d32=Id4{}v$*D|I#%D{;JGb^CIMk!*$-YwY%Pwy1b7cwfIwm^bAl2DCu z%^&s+XAfr?A^NURtAa6w&zgeE^3@@3>Il7TE5*;*M>iHn+=s!eG3tV*W@@+K4ZXi$ z++)|zRF&a0uD(Uf=2KyVMLvcak@B-TsibYjRIZCtehYC?d9pk;Lp5r}CwLIBu1H2z z6oNEHTnTOFr3)h;ls@u2?tw4D%*_@54A(5L9Q?WX5$`FCe)OwH+Q*uX-$3G1vcPZ{ z&SV)rEhM;PFs~YslSL11@?m=*dV6rVbpWSFEelbck;dqO#>SL)QOjfu0J&+RZ&9^Y zj)F66!KJ)}?TMn6Z+B>-Z>pqAm~SJ5mA?+y#Lg5KZIp&thjR7g_j(+_;g)lj>i=8E# zxtiFb=l(Km#;AvEi_8swGrI?I_p1vEWql%L{8SKne`n|GJ=*CCu%hv*=J~GIH=7BB zFUTQz--65`Y`rPE_z_#VXgteYFT;tqDyPYQe!S4tJdg!RX(^;*Ij+YF>d+`pnRuMJCQD**#SJ@-p*F<;E zpNSnscD)Wgm@RO|?_NxUo+M(>8PuS)Z&7b{budH+4%&I`i>NX)y#CU_aajeBN-D6l zDg}%X_$Oo`dRed`n?P8czaReTx=3;TpS6-^jVD#$tM6@|=sOBKz3LRbSS>TGUYg2%ci#GII={a__$9gQVUE|)ZjH`JhuYpk>d$$-h3m#GW?9&xI06qbd9M32G)vX9HZi81-75^sEj!FtLc5!4SELsm6jFgovrT@a%7F!U*TQs8W;#3C*@ppLQ6CFE4kU{ul{c(Ob)qdj6J4VC( zjtC`Fv|OUauxB1Mvs#=L@jZWebmR>T$2mK74=>zupBnfu6vZ z1hp&w8>AnyDT@lyxX;f)Xivc7w>@qq4P8v4O%xDZsV%~Ewg?WXJ%KKMdm}_LYhcA< zINnzjC-J|<&AxS3Rc{Exx*3&8cYhzK6bCjytJ_{e2zxK?aPzu}`YrM2V#&XQ%3Tna zrniYf0C3%emuYaLr&#vX31;j@AiO2@5+gg-|9xJsjhoLP zaYe3Yo)xP9>#08NZ006$FvD%LG!l4m5~Fy`ZkCZzK9(PXwG|UyC8~D`F64Vm8E_Y` zqbNcZZWQqCHivG)>+N`eE2I(=$+`Hz|?rq=|7e?r{7 z)M+oY+1ap}# z>#Tnf+vI$(EN|8&gIeFye{&64Nh#`&7G0>FS)rc3=lJkY#c$Mv3>4_MgYY_Kw z({EoraJbqB$)8W6A$b4;Lv;2Tf;pA8MHF2)V zV(`>*oe_;28Hl&~RULWb5+)t|`A*#BjxUYCobH_X8aqtnD0l*+6s{e^I z)cUS^L!hF30SS|9ljbjB%;**>m7nO}Q9*(uMet~xtTa{KyX%mz@gFJ~JNI~RgBSa* zO`+CyNoZQl8yCB?b7!I!iYzTw3&uznJ<&QCit&(2ltROsXlj|DbGIQWS~ZG-()_@d zQyWqrdY+)feZ+ob#*U|C#8-RGmSDoU8>aWHO~leJRHN=mTvWiK2WPB;*#mCM@NH$v0q#1zwU`JW`RaZ$t8%X#aSfH;nM zVz{aEkUZC;ee_FXbPWA%$S;4+WCV2JaDkeA3IK`5gWg-@Fpn+9R}Q~(LTY?BheoOX z-Rva;MAaggcC6J|a^KmrI^~7v^@`64;+{k9c1?!M$f#~dY+p%}TWf=pB!KeLBzC$32-AyVD!=x2!wNMdEDQx+X_DlQo}C;H_=I= z)=r02Xx6*}KKR9A#_F9eE565tS-*kRs&wa*5>SPMSO*NlWHow&z1quE01av_7@ObczV%LD0v}c zO)n4;j`p&y^d&Ht)%Qbr4rh?b;twXrBSFI zTkiQMBu?-rrj`P?sMCkYm5T8EJFDlYtwPs>2YeW-2tm>jjoZmh!r73U&JWh>$x4!R zwQie;4zNQOPJWi-e}jfakSEVbmo^z~KN(A;3fYFRnifqcv`omO{QLwE<~nvcyrsZ8 zBgA*7G_MQHrDn*6yX;fw&wCM&)1FBLm#cl06KZv3sYmmS!1RCyzzrXvCpcd&!HgqP_RZVntG^E6w>%6XT z#aakv_J?``b|lhmZ3uS1z|-HRPjXHblJ%Ym`^PI{$}<%H6Jo{5>s_`N1w`CN}z z%48q-nEEMSKwO3$+6ntMrVPyx$j7amyJ&2TT{eiQByZW+x!XwluU;A0+fyq|EX1u% z>#jvqnXeI?nuQkvF0cdEk3>#wgF9eY5#5#viFuZ!r8~^tY{7cex`u}Pn_>G-g_Wg( zXwzkCINTzdQ`;mNE&;u$XT8(#{zgYt<6ApTJktP&NSwjp0`e5$=i`|D-V6P#by6|2 z`QBGYCisyIn}v|JG}jZ=&LQX|WC9;}Xw5n%F*&SA3I3^4JJZbz%&c>w38NjT#?A`; z7tZzG2EM!2HCX&T2*7(m7#DkG^}T4C2!zDXkaS-R;3q%;+&~3IQ6#Ff)l^5-CRjV5 zuUtYl{jlc@2=*GN#(to}Papi@OSAyK5*IM!>_OZDr{DbYk0-5hI!Oy<7EFD!FM>-8 zkWHzs)18N?z{!UH^dr#v88{Zb>h&Eq(D+U(EF2niu$60px!o@@r+5Lc{rwyWDVt2j z!J99Id&^))k0|5;A;;RcP7o}(_}c|3x0plk`?B4LfS+0f%L+m$k|u|itkASqo2>8s z`y~Jx?fdH5sJ(ivq|fA8a!HTOCNKymEZ0y40EVA|Lj*zfpSQT22Np11BQyYn{BODK zpU|A3QUA8`M>||-_f&N=s~2`ciuZ^|9ZmJ`0SE=UeTkiybJg-i&Ro`9V&e%Z7!bmP?!#VT(ol`@(aCf%HF=(~ z9|5AzI*4ZF3}IvnAfVb16EI!U&)5Tn?1T%^SXypMZDgeJ9Id%2(yCb1T9%1?4~0G! zvzX7RH_>vYxJr{{t02xdW|^V1nP9hgArZmyo;P^)z=}i$h0w zQYVu(Ze=%ITkxGu9{wU%+0D=Vq(y#1hyb4ceFPgxvHQNY>32Y;r`5{0i|cZpQ?1c{ zych-dU8q6%CS18CsF=|y_TC7MgN7h_(whA@uNxcM(6&CRv+@AqVNCcAP=M<>vn3#8{oM=Qt(7|dJL~+UcP5!IJ&h8hdUw$A*T*0+P*Dk)tK6%2Zq6d(J2NM zFn+ag)roT+v?~r+Y`vcv!0?KFxQscVJ;EKWqcxy_ebBIITedXb7Zd@is_6bZ;F`*0LxhwYg;?$z z{gwvlPiR8VARAR?c#>Oo;T$y+fz5)7G;-m0g<6*!`{wA{( zo}O)oMoVe*iQPdT=_um6lKk4I3V;WU9?mYOz1;-+0IcCs`Sn7hj6!9`Jd0G25_xp%Z@v zKpGN&u{WPz1Mq)$>;Leezb1^qnPNMyn4QdZ2CjH*Ce}WnbOYDop75jQXFIce! z)17Wh7w<>EkHILk)Nu&mM~BoOqD~E)7(1<$JSuc?>n>pgOurOmX&Dd4M9eUb|NJI` zG&$c%lzSn%2=o43`OJxcveqkgbnBs_~nGQ+kYqzrB=nCsP0K zXx|y~hufX9fjf_=zBzi+kLw~V49%TuSADQsPyXviL+yD?HmaKehU3HS!xZS&4jsDMEn}#?al=PY^<*OjR2gw#U0Wd} zz5{tfhJ1$IR(oIb*s*?MR_H;Z0&FtylS8hpg*5v{vwuowpLs{oAyGFDcZHFrs-1Dk zAIl-Jh2KijD6~`Jj$DR8&CK|PV+(9%%m$4$SE~#!-#(KEulFG9Sd?JS?NY8hwq7h& z+&Od72JQE*#E=p77lCkt^hztk{^>?&yqnf0*c4KoeU9>o8X}f5{@0!+I1`6kRh~OX z=CBaocO22m>eZ^F6YZ!kffy^g8I234<$uO=4z&3KMytQFT2#2^9Wc?jVqYogYiH-0 zIBCt;Jn<}^c7Os%6*d|I5L0)b3MC!9F^fG`C)_D~j>Fw>D>4v;uJ+Rt*ubZj(xD-; zhCTZM>Bp8%bQ1-N3Xd6xr&#geJ{)djsMdLQ`%e;LKal}yrAjeYgDq65f)55yN_MP_ z*{!Q_Gp?BxXup#+in@AMd&&6#!Zu(!9G}3`CgLAoS$FWKwK_cU z+?wz^o~8NuLqgcF*rilo00c=X`F1ggJuWWP&9M7P!~W>oCahqLu=Xq%U~0-nDCYed zWDFM5aibQoQ=J7I#;M3&;|g=O>Jbo6r{%??11?J~*iVKOd>H2N5`P2M4hABpi%YFM z{DiaEwP}g_u?eu-!EW^}+)nZP2D$6x>n>T65}5JUkq6BABg z#sMhzf8(|WQkY1@cjPyYD7_ngLhpT8l#02t;82rR}Q~gGVU4tScZHJ8Y8LtxCrU0>- zXrf=JVuBr`*yO1yi?UX_D+}S&fR@9_O=Caer{R52Vg zH+&9lC73fA%UD?aR{73Zuvn$?rl6qwW(Y};Jhj~AYZG#{{Y_w|D_)0Ig~?!UIAMYp zb=us{Cf*n$D-&ayMh!x5{?y-ipkRkUC_sX{EY<(I)rJMXH63)s&8ycNJOScvRBLrE3o zV{wBdXpjwBr`$@fR0fB`f@*Udad(K-C!fl*j>izU(mw7-p@w^I6cY%3!~er8U9M#N z|3GH)@arM4Pq3Bfn>uKPUJpN=83o@17SgOR#2f&MdUbK3wwUD?HakF`?zwB{ahY8c z{C0{-`}$aE1$BD+z?K?mQF#>5#3Ik2Jcfhtks{4$sozd`#qsYI|KgKZX~}B0bsrKI ziJ06xnsHcq)Zm_ta=pGw(!TArT@ujAT55ceVeCXGb#**^FLW*t>hR?B=R54Gs ze-7g>_IU@`{enQ?yCO3w=Lkuv>hQ%^mU^eX?6Xr@1|5MjIJzb2Dy!@-POhr9H-t0W1R<|^X`QFySHK-YDi0N(uWt7+{%Eznq;XzgnD0X=yCf5V|qlC zNF`8G6CL;Wbor?XjA13WI-=tjZFBpEd&rT~&0!r&mMae^oWV{Zs9wuqJOd?oN>Ld> zA7`--879tWP^kU#nXm4CitDU_WXhLbRLr?Jh@WQXxzaqoyjUvY;+NW_XY548rR|KL z;s(#XQrhs_EJ001a*F*zo>zh)$)CHG)TaGxBUGAk$na+AO;1p4RE6aAH3xVWy4$9N zZpmhT39cX0y?Y3;oduNUfD#tFw@`uF`wysWabJ4h+hwYIe?!ULjO|Ny^H+V%rTg-$ zGRrB4UdCXDtPwp5)MVZ_%0VyYZSzVk<6vUcfV3XuVwyZ$)qZhI`)@`24cN~=WYA78qV;A|B93X=Go z;~YF)9JO-L_cGp{;!h_{mROeglnI{9?UVVbW%@D-b!J{Qi~lV4BSPCOx;#lsjP#c} z+50jAuv>CuV4bE_aQHp?M1agdLP{pJl&xvJ#kv~l==pUAd~e9+akn7HqAD4e$Jgea zZrksh4&e-NrYb5;OqATW+c+Pj{RY&DHSFYKbr<=E%05<-<@$Tcl+h5#UZ_v*iG%e= zoB)z%shssCJF6oQDz$H1oO_}sOHR`AtMC=S@F%=cDx2Yq((6N0$ay{P1T%@4*bP|v zVq%ZTk{N7j7?Ka28crn;_!h{G)#ZZ=a5LKIvvNW8)5?{R;|?5a4; z+z4$Bt9b+I+g1{)U7;1g$lwCPI=niZHGEC+^F&1m)fo>qMs1)5qjfYMChmy2rRnhA z`NUaTb3*e>llx50DgTlY3*t9^(RTQGZan)%e7DsJZ6?Kg6zF;^-hmJ{Z4z!9HtY<~vW z(1z9{cRFr_xWT2Dt*D1>is~I6Kt1F>{~`es*Q$fC2_}pzZ0a?8Tyh5N*D>9t1p8VS z8h^^S&avvErh@eB1hIeKV#z!zBFdju<7?$-$t_4!A}S2$S_X0OONRVw+2Qy`Mq^JW*Y4=Z+AP7niCT| zBo7eD~(9^eSBPj_MJ zAuapm=YczKIwRGo=JH~0YfPu!?`vUV3oO_U8z)knd9ObKf);Wg^k!MH@d1X%LC7?I z&U};kZzuXwz?jxp?^Fk2e6yE53KhxqR?hanZ$;kY9NLThL>>V=(xgT7A&?kDAYKpt z4nM&cg%|H0dLS|HCgX;r=>8}1#s6^I|F6J?qBu}nTMvi{jejLlI%dC}Nb3~-7z-He zphKPGkcq($+#ud&A-dvN1rm?I$jd|IZr^lF`P6{p8i}%8#kqQ6AcVUEz#bRHyn7G; z!UAqlQH?-#YF_HvVUPBskNjbP3`Y-DRSDQz;pf+G#NqA{Abr08#P_%jq)6`oY-`w; zwd=&H!85K0z*LHTc3^kI%xFCz&l8fH*gqJci*9&Zv@=;h$|e%oo>gp&MCQ+C!Uz2$ zR;x}9lKv`TALbgn}!plstyLLWoW}Wlz z*PL}5-62hH#j^e7qwkhi6-qO&Bxc0_tondQSzmS3Y4fjtu@9(v>RK=8^;XD_&JRWN zNFKCRqp79*6TY;NJC9pPK@mnpVd&3c!v1-X9ia!2M*&pbyhmo;SZ{8fCD3pHRiq3L z1R^x?LM-Lx?}ibQhVb0@kk21aP38Mqjr4%(W!E-0$*@DT)RVz=fvYzt`{Oe{GLj3M zO=&kglgo4X-@L;VFIK9vZ%IHM8P19QI~hjeEJVk8(>h>j#%DW_@|c!iK4Hm%NwJzJ z{$|a71{S$CJnn+|&;`oJkGAA9n+{R-)4=G0l4@S>cXcRbx z(OT2@QQrpGr?RIQs+AP>$6nuCz>O&0C#Zp)Qo*f6pwbns^-6Jl{_FCnHSR$i(GK1@ zmus>a>=T2$2ODg?&G=`;`H%Mj#9g)-I9Us>=;-`ST>mEy_N^9!OzU~4XE>Nk|1F#_ z=Y zNQ>B9#k+7gzmyXia^HUfHM-mX7W*8Gv8$mI%Yd$-j{%={0~F}xCnU<|)vEKI|K;9- zYD!l0KDvce`}-ZWqfhR5wvw(6TK1h)pgmadY~cAL{@YB_or=DY?c!0Ixr3gY8_v2F zpO`3Xa`k&{>?<4&?H2;waZ(ccabjC~@7!yU+d@|2px9jR7f00|`qT?8N?7~FeO5Y& zM*B)P3pH5#&qm{{_;2&AQe{1|2`+?+e(~g7RFGZ;k>BG)@w>mnq?-(I&8vTehZ>1O zP2czq2iua~xM0)IiR|zRCDEaISUGNWL@MU;Yd`KuUf&w|L+eirco+~zZ)cT0oM{bH zKPB4FKG(5l$-_CBXT&6R&2aq7Fr7<*DrME&x;PY=Z-N4=hRSmr?t*<%kN$3^T-+F_ zcT}1wqr|K?Vi_I?mByH#^7WXW(}R=Z*YW_i!_pQkudlzaOF-8@o^VBSoxcd+dLv|; zGSt!Y$$c44Ul)>xocG_c<>-pLXU##-zx<-hbtzAm+L}q0&e*~}5;zCQ0e!?n)~`9u zfkVSqgZIafz90E1u^dI1rgFr&-y&WT2>#PG7=sQgIh`SOFM#JhSL{p1vgtYO*co04 z|Gt?2f#hq;YyjJ3+G4QBoul%?$6kEwPU{ys`qa+{`y2i9kr?+Ci-^#jv7Z%pXPNrj z?@vg9Lc@Ne>IUb!SZE#il%gEbO(0}`d>)O9ydd9GekkQR(ER0qLHtO6j@5r9p6_Gj zb{N|Xh0etH>*a;M`?f_w{OT4t_@A%!NFN;)Dz%_~xv7?wT3S7|ElyU`J|+kNI8_fK z5QQFY2gLy6*blVIZny7F?r_Q-UKvZ-fjW`mCu!=D4~RmY;zMBj1+m69{#5W0J*in0 zUFm7NKXfp4chH~pm$IU+Mn0GyV<8aRzvhaw$dakCh#q9d$8tryu|)#e3)yIfLZzDv zfsPn(V5Vz3Jj}~fo$85GVnZXD!^u{SWRZV0cPHu19p$DaW|*8!T&VXlg|k% zsa{s%FiGd^9VOJ8{oc4}fno|1Rl~DU$x(ZbrpkZiEM=Wdc&;4LiOgUG=0A|^uRfo6 zjHODoXfgf}C!FPd#aK(h_E8yb-SlJa5&o)RT+)3@9s+p24QcvWYW;I5)r*MuHGM|+ zSM(fM>((945!H%PPk$`t^Prq9-vy}o5O=Lrt0pTo^iSCS(c}(np`OK>>y%sD0&AeL zYVrr`C)*#@_|5kS1W?WUFDI~G*Ao312qO?WV1g7=32O!74OPGhVvfcOjHV742{GBE zC@!9Oyv9S}*0=0i)d5H5lgG#)ko|wiCH-GvruY+&a`3scbzo=oa0JVSMV5u6+l*Ei z)ZKj0JNxf2u+-)mGJCm*6r2-2mdn9@2hn=zI%hSmcdB>xPQ;5J{p3&7bBFT|ZrTKK zalZ$iUIJcO%&JYN`5kccFogIa3-|+Ce+!<#i~mp-{$CXQmTj#4SGB$#2@mxx zV{i5pEYFKe$ck$W8^RgDYdi4zPCDj+3ZL&8vL^eNoF@Q2tQY3`#dihNukFZfX3vIa zF7l!Z12YyF7LyKUl=XKn03ief9Y5WLV|}O5#TM&owA?nqo`djkZ>(lf)DR{MGYiSz)2hmx13SDf3_&}}MEDwhl+Ec4H^F&7={Ix!@<>2>T?;ZXt zGE$G+9AS07%{Ro!`qu~k?)39<>2+#pb(gNHlm)58%=r7rvMM@CW^7-L#8PoO6LpaE z)ilhQI z*WwWyFin$TeuK8jeMpQE#^=7`v?AG?+|x+}vq;XiX#j>6tniAu+G;5$6FZe5|7Xy`4Bv1d_72&vn1z1g=iXjy$8 zJIhpjc_mHs<|gkuq0I~>AGS6YtC#_mG(sSKF& zq2jfhkw_re8Ep)E)p^XxW;9j)x-|H~hc4Yj;!O7I5Cck`@6hwMLyplk^?ul7%wMN2 z<|ch)l;1`twTHMmP43Xrzgc43Z|zVPv$RlBQz)tt?|C9J8dqOl=qdeDv zkxdbMQ%X=;2L^c)&#)id*f3kQ{O6_aXkSd_)}QWCTqAP-h3E(c#kPno7BJj$g6FjA z%Ael3*Irp65I4O&P`X-|JLh*)1}RK@A=Y0mej59_$~j5J(^Up`>aqAk726*)=6G?2Xv;GOd!@PfRBZj?D52abBX;y zLtx)m92L@MZ0St0qMefyYE>}A2m>hQI=olp%Ze7T-g9*%)u zW8shY@mPot(up+<=LTZgwui}!sX8-wrw^O->yL{aN7yG7B639LyQs_r54$ch-Lmy z7-o~z$#`?DU?0_>(0+7q#fUbh3uytW@tUxX(sUTp`ry%MRcRUbz%Js|_eb6yzg_$Z z_M%@eeyqaoUR#Ud*|;YQMgx_Wv8k@`>8z`HU+wX2jMn@YF;#*3D$4S`?ln`S3&mVfeA2+nazW zesO-Vl-@B>6T?t~@BvZ-jgo}AZI@0h|L%G`pa1@JAsiGbk{~CdeuOj&chzXza-mOJ zCz1g&`@dBd9Ing%OyewYZ~yk)lS)7tdH)X~5XP$KzB9KpzT~w5(vF`Sa9k?^Sd(Gd z0~vzKc;uh_j5f z#K?uV>7PgLVgfkLDeq*F%Y>Q`*07l=t=Q5-6Vif0uOWoB5C|?7hYJTjc3-l|{3j`C z(EHa{fl5956Ug9l6$)v84D*^4ZYGLHpBq8>mJ`X4I>dFY5Tqc0%0c7^=DM07#*!uJplrctGdcI*txNV3ks+axlR>NgAr zKdzY>fB9+2L<3Dyc4vB z?qBy>-D~xGtJiw}Maj**=bU@?-e>Rc`*!+P-Y;WqKF9e2C}s8x^-eMToU+j#$6z#H*CfUF4+2%^c< zNP3Be1x9p&+m2ak=MG5+qn8_$FkF2oYd-#PMGD)&K>c*7v^%o^h#W|x<5MmQs5}TbS8T}c-brN#4E;^GY(vkz-0r85JOXZP*ZHiq7;0zvmaSBm>1M?X?8p|m`ilTDx= z_hJyOC@-iD_iqm&)qW$8z2Au*G2DA=NmFBuZ~)M=yIW0tjS{p{9_|-0^s?r z^d%pp;Y^~%a;S*?>J{cJ-4C$(n?ZfxMDrb3m|KE%p0BQ=dIvFjgY(a}a?Q`qKbYTK zRCEN&8WKj}FOZ_SHS13|4^^$AKI^;$O{y<3XMKZVezvnOT|NTQ|tbz3G!i;>h70=ri zG4aGfS=lE*;l+9X0~b*3P&@>RT%LVTdz{8s`4LNE={x=V{oTz z@uCl1o&+4>*qP)z{7|iQ+Te})#PRqSJx&$G4*omHw53qbwaAudrk;Is?ts zKW_3G+I~XV!D}cxGk7E1rnWClQ7#vpFPA%N3GDi~5*rJ=I`#3iBXNC!cNfn#`Spil z*>1V+gs{_TCT=uC-U~)$G9a{f@c40)Mmvws7WZ^;*9NA91;o1J(bkUdBNaPKT_=q8 zzsHM~giDwvzUE&Jf!8gg5$9xvVU2r%P=9~a-TUM}xM1xBzgq%Q~0>1tqn1*Oks=?2W zz>&nPAg^xSY$47M#=78Y=Pz+vsxp~WmVNBEBpqs}Gi0 zeWNshQcms-joj;QF985+1;_a{gZB*pF*XVO;eqm!X;)u)KyIykwF@xU&6h9D8ZE9s z9#~fyG>hj8X6DNPAPZGo-Tm zu;0C0P5Ct~1SE9%D^_E7vhX)kZQ1XBpDm=9PKS40wtp6({EMIXytaDZs)LxgE#)4T zDg3&(mgJ&(6h&3_x(ZWOr@rc9AgoQGg%vdxpm}|A%}MXXd0GS`E_#$mzlp+ zLjXvB#MIH5(-4?7SEW997%0_<#T&>e_I71w{Ic(Y*I=b;v=Oj3{;Wogd&|y;0hdg& z^005}v3NR@hqocLr>p+dj*C0=9#f7{)iN)w-U&ap1#nMOlDrFdwK~*Zb1wMOlk&UD zkDo-8{kvbhX$^Vja!reUvoImd46m5{zQ(qp%6zZ%tJ*dek_KheTH4t?;w0k9j9G4e zQ+Bnj1`bvv0bs;{ntS|`aqFQp#@xtI^dGbcY*BHv$WX(Cj6A&X4;+SrewP8Jxb%>Y znNOeQw#MPs@?KF(3>H4Ks`GCAwab=c`E^hMez{lQkt80&^zW(6 zN{c_;Y_CU@z-lVXrnYYUDYT0)TPHVjy7|5mx(N25923)3*JQ(UW4L_+L5&tC>3UN` z2%v`e87j0T9baycN)vXc^bHVnwOZ7+ zU+xu4dsN`Bun;Ts0_s|YGQF@fBT^&ynJ^C>vg(|-E%L7XtIYl+|M zl+tTj!TjS&U;7?#inZjp1n6t%kb*MWGl-$OhVn;Gg)oK|igfhdP56S&ijN)-O>n^& zH8+R1h`7<`lg-scAcG3TDH`9cR8gnE_KZ3SfC6LAu0xJ?O;R>$upO$A#AHatPaWDR+psnHmp6gpN;jKthj+ZR@J_p*C z?sSuht?ls~O_<0L?}m4e6>{VN^rT3?ntr{^5{{X8?o-5GZVRRY-8}Nb(y)ruNqkJ%c>U!Mw9GX}ZkiN*alU#De)u{qVAhF{RL>|Iq=8|Y$cosw4> zRh&{DfJ64TzlJ`K`_SN<_VBNMk&g%(+?3u}=aACYo`uMl?t^M);PS^bF`-2_FT zW<&^~HUm|WA8GoE@vGtAWxcetyJI0kvq;{@rbV1QA~FdD?p!A`u?&qV)tvUPqc>3bCfd+71R3;2Wh)_yWYhe)1~yXLO)#ggsTFt z03ff?uADd}YS51ns`~76INJz!z~)jt@H1w(OGw7)q%bHK8f@; zQ$3GaYj<_*qwk1#p-;tYFF8Xllm3i2$4xIX$xJk%`y-ZOxT(Q$c>+Ou@cEwk8i&AJ zhK1&gkde{UNjcX?h-|8IseYQ;AaS{Ms~jznz+xhzz5)%6@jUzFDc9H5P4?WC}d(AN7iK~0|faBdupe)POFn~5aCOqS~Isg1e0^(x@{;G#8!c1sENFQo3zddQRG9XavDQFX@Fr932TNNmuy=Urs5* zFb%&$AEtjXuC$Ib8%M`uL@E7M?;qWKVmyyq%1YO014`Z~+s`#2HaTOqgTxTjuolK( zxrM8LxD6MGcs%pH|6L6Od1xJ_lsmAO_9`ZBoXfb!cQ58WG?P(?1JZqjQSKn27AEej&HU{dS)tW+JnQE}kX*toUcC0AMO)y+-aRojbb( zHx-STz*f(yy>gCy87xX0Elx@j_F!so+|$Fz+klAw0c)xUBLs#ec|V=xRpuIQW-p(< zUN#*!Hdfj6rO(2Y7<(r>GVf!uHxkbcNPX#@82G&MnyZhQa$3eT?d*J@3ocg^$4H3P zqFNX&Of#IL&CaD(r3$M|3tPqr4DTU|Ant+250wQ<=Xu- zmg&<`89GZq&;JO^;yc2sw$ydB2RKeUFDO7FO@c_K`69%rVLH1EYoIb z=)yL=H=ICsepaYHM00^=C^E8?j=LrF)QP-t&kj;t{M`l)`&&V4%-eT!HI9k_eFD4i zTtkaNAV(jj5W?Mh8RuW}CRBEIGY?v{9VPYd@8hryfiCH}6wVhXW&oDYAps4d)(-QI z!O~i>F}23p@oXlcH9o-MV`6-1!55c8yEAgH2C%mjqBZyX1(OR7Xge9BD_-ZYaZ1?k zF#|!4*Sa|%h;yIZD47$>8}U)t(7?G|&3?lm0^}{*onk0dcMlpT=k|3~4{NQn-E#chgrJb2LSN?=fo-jg%=UL_+M5-I&JIVP#f?rL z1c1@mQ1gY-FrCuQdBbvqs)ZYE6!|diLOLxvwGJ!K@%|*-8>^>Ve@o84DqONvdnyiA zA1*Rgri#Se%ARqj9ps>9a&nO~>8PjD!ow*8KRFcBE7ZmJuw3u7=&Il22gSjB)WbHBumS?)?Gr0!%X6AIvPYN(a} zZ|=evaX@Nc=#y~u=ko&G&+2?`7o`r_ZCt7>9O*sR<3Jas(o2tlpe^d+WTvFt5b!Kl z)2Z;8t|Hve^X#C-Yq@JN+4o1i-mCT!IfSd%k&pP4u%YvIca(jt+AMo23>R7m&aU=N zC8%sgxyHarKprMg8HXom$e;qGU!N1L7kL_Yu4AweBq(2^vZ#LO+n0sov^w$uwrgC& zzI0v{nc=)=nwC9jgezrjHqGpd4}Sd7a`1Edu~?4UEcou01TR8x5XZd+%D?tNW zsYgA3RMSEgvZqIW4ojk43G7?;yRIOx1(;$1ho!qjW#MubK2f3v`##Fn{qPflwH&*WS)_(OUPtKx zAPp!6AxtMSl~S#Eorj>Uet%%oGL@Dqc==65w7$`o6$2L!qa*DU+OR!U@2iVuZlgF- zKZqqjAZo-^sZoN6ldD+@7X_h!Ub|tCr}Z~!PJE8($$YsA`qQ2d z*&503=dJYNuemwKX<6CH7Qc`6Ybi)9Tl-@`zJ{6X z{RHW^E9mQQGALi!U4O-wF_RCywn~L8|66#;U-9WpfE6LvY85dM4GyG;Oj_rkK`4+i zI)h|zsYMIJu_WTX4N``eOG6&_MQfOe@@f!$oSBtbpqot$k1Jn1A& zugrk$+5^8MWv-QfO&>p?QFA?L09g~R1qXa+DH>qYeDMnag=pEVC6n5rM&}QCVQ&Kh#dN3TijBIVMol}&k{-%5OMBBeyLmIsdlBk^K*GB!)nN<)mv|h zT5evz9FEYH8jv_s$#8sOR((hJ5DHq|wTpWr5p4A?QjpRgoR)BhN(!qyX+3L}*%9jH zR6fNM#H^pF(Gsz39_2hKo6IG} z<;qUr`P3kOwEQF&_ajOvQxj31w0zWHb;06Z@@}2cy5a-4nZ-(Y^Qho=?+82sJ&cU? zbp6@0#L{~QM~}SCgze#6p8cru&;q_%--uqxZV0Smh7Ee_x#DJO;-*soY6&ID_WKnB z)_#;#N||3QC+OyQ0>Y4zkLopm{XC&^Hq=HfAsolQ*$#3xN>nZNiW1aI47#DNacs$w z)w+KR^WuYJo~~v#bEUB{>Hz21^#jSKlWai1@dVkQA9PZ*OoQsz^UI|?G$h&j!TMZH z9T#B3FD`ahTNAOobz_$jT(U}65qjcU#u_|yr!!RZP^i)UR98}6W7*!SkhFqt_n_6m z;Rh$Ep~Y#Oz|#IX<^kI9*aM{SHzw9H{a*EVP39nzJzvR&H-CzhIVeY}lq!;YSra@o zJe5Tr0Z@;cl+o-h5K%`qkSO_b02hk<#0W_KGlHS6o!rPG)A#LO@!F>eZC6 z?CA{58cYnf)d)r?*#x_3^gF7r{601q$LRD+uf2qtT;@L_sOn71OG)I^-uCGMdjDbO zd<1j+xKkKCQHC@6qmpk%V1{`LWV%5i=XTcZs{DO&$FEFfDYsVar#!G#0L+2?s;52! zB2oupr!NBBf12y6D!vr!Q2BZ831RqtSG~H{-gv7p?EqID?bnoOf`b*=fF9h)=$uG1 zn&=~%N5`sEvz(;LBrM&3ngP8vtG>w)n?F>1avC13KF*F{r?}OcORY$HNxpj#HCvF; zBEBKeyMEy-91b2u(ZPk{yI z?cX5_?iyk)SyHgW|KE{g|J6K#=lHKMa&06Kl&0u=;-x*>QcgG6&fu#5P?r@9iSkjR zU<~x~{(tylb|%~T&!kDZh6UD0!g_IUI6s8|`H(N?g*k11H!C5;X6uD^s}Gyh8%aq` zN$|W|$gXxXUq12zXdpX1xj2(|f^>K#!?Mn)Mh>K#A|#G_9Z0muM#`?pG8czLHYLO+ z-uQ4@0nGHbzoSS#X5!J#eiDDaa77C%TIfb5T~aH*9Lq!0fP;)F$xm9tr@+ z)Jq(GTd+Q%__ZFZFXA>h zi+wPAKxuXR(r}d{+jbyT(wWSmar>@*E@S(?T33ZkW|7B4<|q#?#dg+3=VI0 z2xn*UtQ(i+I;EuY?u(Z|^2_PfE|L#wI`WSJ4rv=z4f)d5r6dKerEM0dZj+-s|Jv%#h!ii3iIUx ze7|L&Tkm!>aqsT7W6d8ztx)cEnb4_9Uwx(bR%ySwJaCPf`lNSl7!Lo@AppmfXc-m* z6IXVNGiO!=gXKp;fReh!BK1W$jUhp9^k@G!&FZRj+ho<1KwnEUY;A9HYHf8emE z0Gmik>e#>ibPa$IY>DBuf)5MsrF!BS!7p3;?_omo2IPTmC-A-l-vJrZ$vYoqs6{%M zs0!Yc$REJ<5$<1Pq?(q2ahYpOhtA*^=DN7Uv~mn6q88v*|C= zvFN7*7LgO=>KA!qwnZ|LSfkC%vB?ktvYg#+;o=_v7$RULr@*o{b25v1M4;$&M>9&$ zsejO2!2#s@^AIQ$ft3eP)9n_5nm_O$5e?F@E0>tV;b^VqdlnB8Aw6-GXeZG6AV@q} z*bl0}FM{ySZza2&j6-60OPwSE)8%=IB#B*U4j^L0j1Q-0ga$>0!woLtRj>MPL89#x zG&InryFY8SikCtg_wHV-pVm$iwxu6bU6yD(fW-QDP3ymGf|>H??oH0U+M03aBdaJQ z(4h2SOT5A^Q|@F~s&dldRsgDroBCJ@YQ$1txut-%p_GTrJg7n{D4B98J9az>>}-kG z!F_Prt@~O|kh}Z^{{1T80AS`<>;~6JHAMi+6LADku)7b4za#*hh?itkk0RG(1M-9j z3u+xwO*8jJQ=4yGzicTyA@P2oJYX7XBvf0Ya}z>?LMyE!^7y2 zR?s-~uYteu!=yC;pm>!lzTrnXsCz8SrX2qjoChqCQFqU;BV%i-gXyK%sL6*XoI9Kk_Z}DBqSIVT%SEJ)U&@p`ty>1~Qi?lUt!&RzgB$G| zXL+=KS9ZgzzyFrPz4*?@EU!cwVvCu+%I)o>g0_J5=Ne}zuG~uqP_z_;u8<;thhud1 zX+dW1>5ETrQ-V=N0zOs!ytv14!PV);U$?Sydq?C(AX%SvKyQ@Nuyv0)!lCEXr)5{%TDCsd$H79PL#x>>k8T2+!1rU?Yp zP)jBSCA8LGJ|88$wU~0;A`gBlcElkza-%NPePiuxnmkI21QNU;PtUC)%du5&Ortb3FhtE(`e-4zKw^6DBP9 zrDV?(n+(>|5F-s?fj{lk9w-K#Vp`ZLvKksuv*TOkGU`x_?#XKOOnrYIZmCj1L>; z*g9(nu6VCvti32xjcF8oCYebn^`>EQOMcu0nv5JOEgj>4;#g&{#2pAlHgMRwL-Ocy4aA?8o(IH(m@tv_?arrD)c|8Tx@)B6G{trfH&t4m>P+3XnYK#Ai8;T{0(bmLJ=VJ z&sd4)Y6Km&7#B5hC&oU&qT~07Aj}Np9}GHUewP&ZNjK7f};({dm#*;-}e9^o~dRX_@Ha5yRd0 z$A-R78B5dBmSw2NKj6D`_w2@swGd2~#LFy;5~7H|z%1u96`Dsx6u$P$Ic+!7<{ErK z-<{-~Z8uHv$2*c15;5mxAPZuCbuub`-*Aq>u&(;bbJ@Y0Q|Mx6H@kc0#&?D<%zP1& zhJotjPFf%8R>&@RGZl?LTT{(1V3Sk2!*NbaPe+cR5|7$vQ_}lFJ~py1kRt5Aw4ug; z$l|e-a%o7k;T`4a{j$ErwvEIIV_kn7I>)WECV0!4`B3m@(B-Aa`J^fpSTd zri+$=#$kR-O~Dwj3$ILi;Gjaaj$gm_2}c!{{|JJr7{FEAww)0^1tk+DD2?xww-)+a z6`<|o907UtRYLz@7IizXgc^A)a+9>rM%c~BWyjjQk20iKAeBxRmuQ^Vhb`asdZPx+ zV+iqkMOgN7o($)$$1_rd0+^?_XJ4NzmrC9C=8{`ytKX2tI;e4c()^h0^ZgLWNKi5% z?0c3QyeUrYepL^3H@a(EoSS%6>qO|`kuyN<4Rf;DRy;-T2jgO+i33=00+fWkn{~BM zi5om=`UkRug4aN_(knE#X~epMm;Y2U**>;xhK$*$r@L~e(#VE(pzD(idQe>*9@q3B z23GO-#@!U5KL-<4Z>P5vjv0?MUm6*#_p#9 zTl#(QYfu&&U+HStmLsz|0Rr}iba}2Vb+?i@nx;k1q3;Ae%adrmt}~psxl2@7goXCA z)%WgunGw2=-iWat;{*~TimD8#ejDxPUF&~4077p^MEyB860+X#xoc>tBn|e!Cxr2-8~|DW5I26$ST|}Ue?Z^5%=(MQ(fBfq-$k(20SR7< z41|vI8=-FTFS#BS5!$J4Z0aK;UPI=7#`Ik#gLqzsAOsxBN}K7kzX zB%KwFy3()uH2+ez#&hpCca{lc{BvL-67~2!L3w|9*Kf`mFapjlZb}@nxxV76m)o{- z_sWmZXAA=tv=qLR@-P6(1kYpUO7M<7D87(e!dtr1_*X2TANSb{DQfGUOs%GuGItC= zr*1Dk?UdE~2w9Fz5fuAW-pd7#r;_-mlJF{sXP~7&r8S07fz3h!FDVIGjuh6Z2on&x zNzJ)?XMvKWh3EB7ThO$dYSrZz<*!QuIkVeQl3CmAg+Vm%Y-{cVXp8#JDd)?L5I#kxjy9E4j%udrl^U$Hn1jaq9#NiwYA zm~V`r$cqocj;Td8cg-B&FD5u{35uM{dwq+oSi&jw6Lqy|E;>GaDXRiKwcT4&Ahew2 z8n}#I2@XcKetC24NP5iC^O|SQpNE}WXJqxilhyl1?_w;J@7rq%k}f%?gMk4?g!9f+*^%!iZj|N1lP^^wv9kwI+01L-QMzs zKo&e|q_MeW>0>GWI0`m&6g7tO$765f^8?4F${eVr`_7Z`C`qn3^!g_-cD?%P0gd-) z$X*LHnEb%AmzUbQ4;xyfmWgkY`6wX?aV{@VY5i?+18z{>+Z{^p1ED11P7Wxs?Jd)^JKBzAfY~G}`gb~;v zGWX;U7%Og|u5b`3s#ezM^#CVXypXp1kTV``y6S8Ofr#HjI!$Y|bvIZq)e|RPZR5rx z$B@oE*Z;cAW)z05>51X4x#t(KEb^9v;7RNccX28H2db9D4xO9!d3L9AKftgNPv&7z zNt3Pu7StB*X)wqpTpo*3%#u$z#_ux^t;?u1Uu&svNFuX<2UQ5(|_a&DiZwyR+ z+gfdMEz<^nX(8g=exFw{Z`-;@M{q+tT6(U2NkcT!Yk(ZMywxnn!6dG1e(Pn~ZjA z`0y3{lRKOk`t4ch4M@W*6!uUgdL=#8p~vER$;Ao<$u}eGqZ~B(5t9B)uFj}J_VZ$A zjY~76CsK4s<4}`0F-%?6S|s_pD9%nb7&Wah>p$yn-V73jqQ`=GdCPA!R3_|?nz58q zMUiu=mxpGH%-DY_yH61pJ7`s*?hrs=S&I~G}^sMCbZY zA~ejl*=Pc3V8UN!Z8{%rt|tGPcJ`CMII=V)WAq+5L^i&_51LGqIo8@p8_6#J{L2C* zATPItyjJq3k*qN~EcGH%#L{h9&4Xl!Rfub3DchVFqtk4X;OgTlm28CJUy{**h_$g0 zak0xCauB!RvzM~IA&$Y0ig?apP>zvfJNBDRgN@xAB%1D6vu2biP;{` zQIW!o%F=a5zA`!^_?DWp5^JS)4;N2TC0mGYa@F6W*qu_rYEMNLGS^!0)5z!EDsKJg9!5arx-KE@Zn~@9=HT zqvy$26^P739}d4~zwRLLD#6A7#Cp89h4;uH;`5=K$4pEc1caYP8WB88vm0%*NNQx5 zS}(Q470pIjN_fuwJ~na4l)i{@w)c6uhgu$L$?y1;0dkKu)RtIG`gKCwtl;J30cZot zV3YcRMO;DaT}O_Z+d|^<-cQxwM%qNUwTS)7TdK$r(_MAWgg52nhpqipn?nbI^QsHI zZR;nfWwk^}3`Z~8f>^%(&SR^?12jbP4O>g71498Hm|ZYrUcZDk*}8qe*Elgmx?z?N zH6&64H13(wzyncL0r9U~L2Wro*Y5X7@aU41Q@`Ysq>xL9+{mc;(}P>4r(uH=7(MEf z36vA`5^VEaz4E9>c)F<>5A&8>7P@N;Li6d|-(@f$t>rCvu2Egp6?jQ%2@NVAeU`&U zr+>VsK6Jo&ig~Tp?bDuaLzY|jIjN5KaC9CL2-`-KnTaIWv7%Ikqd9pAud>;|v#4}g zg*`I3Xs!oc1GC$Q6ht{e2g8vk>#hWEx?}??5nnkWH z3?E_h%@`F({p_RiqPsiKcAD)Rn-z3;%_G-Wf!)`j@KgWebm2Fp+JCRTCD1J){!z%z z5$xH(-U%UqJzFeI-PDdShZ0RyjtDx1gjZgzl4I zcU_N#tHGf|xbAINB@tYo3+=k8!f8SMIxXaCz{_PW2(BrB#(1?BS(v-oiJ*O+mp5zCX+Mj|mgnmFu!cJo|xW8b-8rg6HowA4}w z(DB{u3tMuOYppXm8=uqXu~-SIV%d&H+m8DDKnnoe7H^`B+-4rOkNfA8zIyPk{7yseaf;S-+1HTw zr6cFZRFB~KT|izcM>eazjkzP%?(g}7JaI4bNb*;jO0H#U-D*|S)E7y%fCyWlufW{P zMI^)_x{GU>%HK>cn<;?hNCtlE8%{w!8!uJFB(G>X1r`uVF~fEviPT!=xJYkpf%2bc z<*t_(m3fHw^!4Z(dBhOCy$woDh05Kn@vjD=w)eY6S;ED7u?RRyISM7w+^_7-@?~l^ zTBJzxsjKuEN9CVQVGxWIe!qy<7HfzgrYYFW<(#RC#Z=A?d4t+WFFj}2yhD8xiyRzp zm^>)0(LcR8*V7&d{iIxl{1b!(ll{X{%0=V5Ui;tXPf*Ry5@aPABfm0`7N}YtxiC?cf;?4ATUJGXJt^%8)&J_X z(OXk|2N5j=)vwIWR%!kD=CO8#ME56*s^BCxB|^m{t=vc(MuOKjf;9>QnNBiKb*XM2 zuevj-Gm&++i?Vh3ZPwO#UrK&%R#m*ec6@Ai5w?6iIyP#Cs43ggtsLK!jE&?a5?W<` ztO(lfcPH*T>BPt0uO>oijo$@DM2At2KhzeI+NOlit@EC4p2=q$YHz`;G_}&0Kq4py zTb=$p4D1SO89)a7ho$9ie$j7n8h>(v`aGOcH77Hw1)H~hJM`j;c|-8gx>fb0GqySL z&K50SPmvyET)mXw*X8v^uAi0iX8`3UZG-Bu~F&G#kN)344;93ez> z^g#T3efeq`<9D@Yg*Th^8VDJ!IUmfRO@|zOxokPqHg7ooOmoeSd}6-hz#-Dnc~6|o z?+zJJ$jw1&nj$ND6iM;)B)g_a_a@&u%H|ZxXmXiPYj|zL-20RAyiYIMFyt^+;sYNV zZ3evrN1?L6rHi%jY;O465V%z~s&s_rgVy`De8o0hVvM6La1|?jBik-Es^6m{iLr`(~oTS>|l3hR1Q|O)D^xe+I-|~RM zKkayG>p0ARaPW#vvU8sTONwRlr!h-w3ICs6(m@SdN5=h+*FP|cmzjgY8Z^FJp{msB zbM%q%3WCj&#fYn7#fT!E+qa7;)ts4<^9R_}@>KcS_$*^&I*!%G3iVkR5i(WL8bH;-}Ef?T>4G zNH=?W;lT!``9wQbf|CX$uQ6o%9DzvgoaH1^az_R^DOXOCNa(Jh!9VGBf=O_ULjYlw zE;1&WS&DbBXng=jeQfSU6y|}d>sFhNz}BlDUM?0r+XqK+u0WsGcaK=k+>dC~Kds@^ z6RTe86C`_WC{p8be4rO+nIy+bYLHPv^E3&sIFP1Gh>eBRz4vfwUqJd%n5HPMcZHsE8sFQqr4slG z>;?&=l^D7ScrOaeT<+%~bmilvhtcOOw`>JDR>wT^7(6D9*XdxpBX4q=2tkVcRRaId*0lBHB z-OBX`_88HhTbA%T{=SGA2tHf{C-v=S{WY>hOTGUB{MMdW@vH$@i)QdPSa$-+Lh)YD z!0&t*X;KdyzHk~_m+J-%8on{9IGjqS)mtbT3X?8CrDg25FF?$nqt0u=zRnUDFh9)r zYs*=-v1b<<=aooKM1sY0)O(f=B7v9)saSWTbvbYQY^|i<D zsj43Y7n%PA*PhseTlEq>Ds$sh{jD`Sb(%Y@Y3MgS&lX6nMI^XgwPS8H=hZtcI1{rj4O{KCM@osGPwpCzrM2Y9* zp_Adk6QDTb%ecsjtQGnyBz-PHY=FZXK$>|j$9V}NSwShC>sH+o_Wh%h9Tsi>&-34gN)8o|dcH`>KHXl&=zK`hm)J&ebE}R1* z<=xbbj=gKT1h*8!Tek)$C%-e`%*;T~o0LxDyXXU8Y2}06@?I*T4Qgt>2+8P2cx*IG zS_`lH{y@Of;=@G_BR4{FtiOkUGJqH^kThE5qIeRrXBTJsF^Lr4RgKQnvuM>LzwR)5 z|A!}SRZ1PBDi9NgzboZF*8o!D^H;U&0%nH4@ZuSQ+x&eMna2c>K*)QxJcXw$fuobI z;G;f9y6l)H8}kztf|6cmaK%N19eF+2`wwd4d5v0I;`$29Q{Ou zFVZO&1WU06-wmyLaVy!cl%?Z>!yOp(B^(95Pe`+ds;ZJdf#~M!yidWtB&qN>UX#3g zyfe?90;{XhH)P!Iq=&7q!OK8|Fn5mM^y3sQ=R9in58zF_evEn}+qhhGfJ6Swu{UNP zqA2hriwEI3xetVQb532VCJ9sYl|p!U{j4L=Lcf%TQ`Aj*p5ysvM$FK`rQh8 zN%)f{a=r?LK5`qr^v2r^M2IE-)UMaOz_|FpG3%iJ`Qd{Lq~j*I?>bt>H|=$L@kL^~ zCex$wedN5xd}CVV^Acy-2uGD8|M-$1MYzU$kMG7Mc%Yxq@hCr*&Oi~-*o#%qra!iC zsf3oeP%5PVMgO!O7?g6K;7CdG{1klb%v!qN+HV?|sVB&|$Sf8~2`zz9-2Z(?D1Ah+ zBXMA#iZ33n1OH%M%k*Q>2+xqIa-V(%SNCuGHwm4Zn!0YK&ECiJkcnjLqI+cAE~y0I z>3=lOe>_ApM@;I{gv{ZW7?#W9cDA{k1UIIC%MyLQGKdVZj5%}<`A(R#ccy_Z7LF=e z`Ff^gn~UJq>lRGK;s2r5i`r;#c6*%qJ1C1uMux~k-0AEazaBYWlSs*_ENVO3#df>`Ik@;3_N(eOY~g;>K9+@d$hgG z=ufK_eQ06PHrB-gGMfQ!s}iwk78WFg&?w30f&h4)HbFQ& z6=W``hG9`)B6Zvwi==?Cw~U5Og~W5ag#NwZ*IF>I^kkrS;Cimut31y(qZ6y{dR!D8 zF`57m_L~?2w^lWP@~m}@_QsjnGDAwvXvEs8xiJioY+1Q=it~tfZQf-5;rG|u*j(W# z`6|nd%XU6@)bB#WNv0t037(toZrYjMR!8dFrB8t3S??kVtJ* zFC4EduOJK+K2lmrt4c7}?n_?i zQliqEkU)-^C4ClM>cK~qII;z))wnLPvY++~+KGv3FP(}v6(`a}#W@HGU#-3O6$#Z| z(u%YGmQbEeJLx+s_?zh7ZX8cD&;HV4leWPXt=~IvQ&+rj?wr`GdRQ3kRp1)r*`Didn z6r0ul+B0DeC_u?d{*Jym-%b{Qq3lCEM_e^#YSSA~FP~;K!Z5+|;j4|W%wR4I-6R|d zCj>+t_+*2=#z4R7rB`*}kZMY?3l{q|lI5H>?I+s?7}}d!&a%cRsj5l&$oBmjvkt6fI^f&7O;G^^ zV7&SK?Jh0YmoAtaY#r+(m7Y+i5Ro@$Pvip9^LM*Vncwk@0mTyV0hQq5@Bx?f(j4lB zSgSK6o+QDX4VwWTbDJ#mai2SthZxNCx8Vrd9^UX^%18B!bz8aLwk+7Pvb9HlTxIA} zzuxm$BJCRWtY(ugu5zhL82Rq%EZRMtAhtK6yUH`%Z6@^geM7^?&AGL4xZvzd(CnIbBJyjPlvPkHTEs5=HkgM zQ~o0v9f4}B+)ggUqpR0&Bmm0D%x;scYAIYEBkKddlP3PV1ex^;a|lAir`-agDhv^8-8hoI8&NjL z4f{#7oTqmgk$QMWu&4bojY4^ETI$9vUAAC)kcP-*i8$(ng+wnS?3TmU&QFf#U)&ij z&vnDLw8mRoBZN#K*7PJ>^hAyG6dct6no>U=R6=`qHXfT}uuB!V5E{g~=#})h!;YTx zMi}lFFz5XI4CK#EC*cube9*mX?2|n}Tf1fI(a*Xp$q8PH;8^O{ z%C*?v(iih5W{=EfJrQbh`cx?<31!|Nu@zctBoNX&9-?J@zd4=q{W@oP)M`rD6?VQv zeAp|ghur4a1ZN;IQxQ%OQAj`Q@cNl%7sHUCJaoVXILh_l29C1%usVwjZp`kJuc5g& zr?m1v`SaYru_hcq2=y4F^~l7Z@kSe{`Q3uQWOmsN^Nf|~D{c!8x+(scu{&{cC@N?e z2%{|B$ye~wcB z#By;gHB(#SQe)JQ`0S_R96$x9x(L+fX#Ak1X?41KRsQJ@I4Ur`)oA?riS`K@NIDWV zl1KsK?}$vSW_=mRnwmwO751$mO8D*-;|(>P` z(XTWUEFPk|6O+McOIMIW+H_dW`yeSIHRv=R7qo43w~R@@M2VI z27SPs9BJsMy>7WUS9;2L7R>B;|?&;)vuywfB zbd~ALNL9wYRd*Z!VJypVkxM#20oE6zuo@+RQ84hA4K(KHQ>mMKI6vuTVP>ff<9BhS zo<9;)tB>^+B_0n0yxN9^&;2+lowO1^*Rb2wu+>|+S8cZ1*p$$qP5XQY>!~LG-uq<| zHoj2$>L|e;-FABV1UEgg-9_$G>aqUEQ>5cxSD#JlmpC!igmDm>dXi9SVXicfq~&r{ ze1NmQ019XaJKejmlHoutgE9Yns4}`9e5Y&lZ}jpLU_8Q70Gc0838#c|FH<=yCA;^I zQ}_h|Yc?!Bnx`klDX<7eEb}t3i@zJ8YvdQy6?S3j5K-Kldi0y;&gMBwm5>s?6Yv05WOYs01FQ95C6QBvlhUfe0B_IR0DZ!p*yk zXKp+bF}t%>$uMdc3NRsO2ZUi(NjPQV9c{iaQZNSCMM{FmKlF1A_|tWpi_{WlHf>rQ zKn%^ad`|soxgI|oc0K3k$NumC#iwF41&#c^*?CxV5{6S={flVW$m#77Mu^Y4Ce914 z0bH#&Kavv2!Y5R9{a?!t8%I#WWQz%#te}3qcWsY+7PF`eexu)1yz2mlNo-Aqxq)(^`sd za#9Ls;rnm=ZB;De!;JY}fbm9Uyl3xacY6Hv83wr9K#-``Q?<>&Ua-c-o=*6B#truA zGhe#;`2V4*3@}71Y9LTnDKrR8A0SFSTLnHC^H^$(f98Jzi&4&CB{WebMLDS7SLWNF zeBpe1a#!JM(8(War`Xp`l3+St-Wq@kvtOm@Uyh55+d&`f@avY0CGpyD$mJ|d%CO^A zJNi^JYWV)Pj!`}CEjVOkA=*thdL^q*r&yFRgJe}Qiu6~5?yV$@<&S2aN0!H}w#xr_ z2;sjI$VjU7HeX3Q;-ZVdFH<0`WsA8by*Lj~vI7X+@txiqO7r07_sx7@k&&Y@5#X4v zn4iBwz~Ap%hjE4(ozDo{vpsoc-8lcS%P_dp~fz#9cZs^<1+kOem)avzp(>8Dj;Hz{dKxOG> z#8*Udsg|p0bIsq8Dq8sNF>Zp{B`0k|#wW0drBE^Rk#=*rv=N=94DH8u7BKqN*m4?x zqkATNYGzOQj@ahIys%#6&U635A3}_s%<#IUG5-rVh-?Tg5yYR6ac-V`yY--I+q>rj zO*V-dvPQz4Er&%?G6tZce-$$R(%hesfko{~pf{nzsB1CPEPMXkOrws*kqNUd@f0F< zaEHOiwwHP##h;O6d$_?GM&4(bCxJ*WH7ih(j?pE*I|68r*OAltEB@A}ngIRi`ULd~ z)F}yL-5?YD%33g!16%a%q`+qM{x(1}{%C~jpHffG&x#Q^P@xCIB3f3lIg^}OO=0dt zfg*)@mw;oa*QX&D7)f7Yo>;FsE2ia4TiMU*{>Kh18d=3cK*$FlrZL@}wWhRO4&C!ElTc0P4%Wxi)&59r*l^&|L{>Fw^dI2A z-zUmQtxo*kl`##59+)EaTg%efxtj_!2O|pmp4}I1^63SYHIDMX^+-zB^7d*18l6Ea z2|yC_9;r&Va)@>+ahinwRi{=s(+fxCqC7O1;HZSz4=z2Gy$$%oCwk|%M_d-Qo%m^$ zXFRD4gHz?+REg5^WKkl>EA@8W+N_^a_Y}$BO~0qDZMl#NX)+y!HiKZ5P%BJ})Exsic|iKL;*->wIvZ7H`$ z@hvhkkrd_?1kwijopL4&T=WrrW%Knq;{D8TzU;f~xZ?8jGz~s~x@H}W( z9o()0Yp6>fZ5UZuTwKzHtQ}HTnHa>3C&Wjd=~$%cwg2nP96LjxYlk8<{@(_6&bk0u zjOXlYutotA0z2fY<9X%vyP;cZ+Asmj<~x1Y10r)i90#do3in(^a{Pz-fk`+2V`!061o3N z&S))~y7!Nkbbzsc65`1DJe{yT(H;%Tll-@#;4CCs)K39MQUWNojL4a%5$5Cwilnds zJmipI;dhBX<+hkTv8{wqwS z4FIiRfsA|jOU2ycYB2jNJz84F5{5Yb<&{y-v}m94)cj&1QRf-`*OB*A|DS~ux`d@| z2m8+>S?mNJS9!$T^N_G->XyF*T-KuPd=g7SZgoss#`b>b0>&2pomfPH{w`y??F<0k z@v1q~Mwd^Lyq`B!W>I;Ll!5WTXAHV@)}442j`HH<>Pk~oaC7#ssu#a7vkBq=C@KGG zlCbgps#n+RG!V6d+~6p&^8o++EBVf}s8>MTjvz>JIBASWio-o3Lg9i~IAz+E;dHw6 z{}Q;p5t56#EwNZ0X3|lvLO0=z!FJe9(7lX(T|%J5g^h^+&W{2Q;V1=1PFL<5<&isl zn}bA4I&L70c>`gr<=D1q11GE;IuG<5Y=QC!z!e?-(9lsh?fZ#=ET zP5dIH=Sk$zQ>3XLZ=BSWo>UYip7NO{6;8=t@+||sd5M9fDv>jus~?a4@5S025&ZNf zJp3{cM1P@={n5X1%)bLSz$t$4Xu~F$05e6xgJ1n;G**a*tGk@|UsBeei1&X%gy#T_ z1OmXXj!W3fe|heon7{IOH0|Ssfk#Hk0kd9D%>s8ir*;xmm#gxz&+s*-XMJpKASyQgD z>G8$Rz4USj=xG41?G zSai`O$=SiEe*3XXO8>-ZDr@*8kd1zm+eK}F*XiA1M@HS!)1uW>`HZd9c5R&PwY&ec zZQ3MZ`J!*jhftlT+cmk0aqwWo_?cX(%>b2ECSoYXp6B%&muTdTnXMt8Nq+Anx$MdK zkv!q6i|x)H_a0$iR_$4`Z6s&AtQ*zuJXS%egXUN_7S>|g37%~EFD83tH=PyztW?;u zT+vlkrRZuS&QiJQ^A&lzGkqIALDRO{n>K8GYF?NY4NwI4R2+Q|DG6E7>Z#w9ySo8S zDmItQ6J+36y|U6g5uc}GG-hGdfud%?QOUu78le#v0N?kls!+SH#YXV`IBgQE&ktnH z0HIbm|Mus*Kr-8@VE|A=jttQJLyX@-T6_4b#2YKOFQlpRNc6j*i>j9QQNp~ideCNf zYfi~*NmY;lLo(%~#cl&Rrz*%*8|^F~CB3oql(Muc_Cql{C;p>bM~qgf*XNa3r? z(d|DaqM~j!h+#PJTJ`%{+FZ3hJy$J;KXtMk$$b?bQW1X$=Q83`fs*)103H{wjU)M1rm25#Y6osh%od z6`j?Dg(bqJ>odLIeMt?RoH*bT6T@BO9J zs%cgWDgJw$5lXh{>X^C-mOuR79VpWL@;{=S?;amrJny~a`^MIO{m1f8@!x)~{skHju^dcz+1CGCKNpnO5OT&MB>B?HqR~Q~nHYJuRn3voG(n zMzlsaj|gJ$@oMAo;De8oKB_8<^l%psqx!7}-{2^5d4}QJxOGyY@F&I8JeC)`J8}2c zuEO$5$hK_D-AFI1naoJa>0_m=c z2)~CF6920_F;WM`kn;7do=d7}fjRqPswRFfvYu=9N(L{P`YcQzb>Nu}zliBaEQYrf z(8Z0+b@hDJy<>IB5l4Mgry!^>&N?cKVbPceAAG7_sh9oE^u@(U@oF6~(z9FFsdNzR zRgyc^R@_aDG7=st6`dIFcPh8D@8=pNF3=#ULK9%~$J)pZxAP<>+iv;h2+GEOfVwc& zQR7eZzMT~aIZ7sDWxh+@FS*BB{dVP^xJ|cB(IuD*?5Fm(-tFPY5-%*z7KdGO>Bx8$ zP9Y?ze7B>S@0!Tj6PL8Tvy9PKX?Jcb5N<2d`iy0wC_=4IqrQKyc7Z|Z}vc}NQ=;px3KddI--ZJmtV0Y~Gr5!dYkr`rQ$i%DDZqL{j&|8@tpw@~m= zYD{4mfrx4ofFSVW*m4OKIhZ93MqE|vXxs;sml|UzuB~mTL3nW&RKELuvH~DAx$ZlI zKC0PQ`HF4*ez7AuT9_Az)34t|th_Cv=s=#~H>eWczqY}@8|`UFBX!8 z?^!d|R)p_ZBlsQT>E;@B31sL*{D>~jN-|Ds^OH}nnnx66{K8|)y%j~{JjRCM9QTz@ zA2Fbh%||af;iyo0sQ5|YMS{hNpObQd`o9k{55anvZWc$1( zH7H9r3Hx8lbqerZz1u+nX0AMs@7VQO+)tfq_QF0`tG;LFPKq^o`Et!$yjt+$1=2l+ zjKQ+6h1rm2lQ~x~wd9Uz<2WXZXW6eTpZ}GRu7JKuUbpmM=3W6#ouuZA{m5&_Viv)z ziVTcBIFCs#Jt1S=nRQV#?|OOJSOxjD3Z@Q-8kjww67_`kU78VRc62*Qx7#Ui-yUV> z4!Sp6Z&mT3#5yZLx8he2O|RO2-PGb$jax6R>U)|YaUTBztbvqb=xCbcyFzS!>(T1! zv)!5E+Q9*JLGypwHxQxLDKOOWJEMCItPyUt{C@^=W}*Rv@lMPcYtQnf1-6EAN~hm+ zn?!|;_PJ_Lk?05CQAG44OEfKFK`Rz5_G`=6wGEC}-Tg{>o%rrSJR$#1O-uj?gebqF z*|=%dm?)g1O*_U@);t{_HER^@6Nx#=WrQe23W$G>EU6O4EFGbaiSbv`D0IjGyybaKO^NK$rj3db>%LUyXD=aoS77r8uJh+kpA_$QK zLn=ytHcVBbI9;+$9XcWcaqIH{aDwkO)qPf)dL@D3DEV&(+GCyToCIJk)62Oi#+!!h_1_MKGTIICaa%`_9gw;E91hy5i^g zoi;73gY{fJY-^pkvtH8bUMU5f65(jo?#|18<+IVSu}RibI$++IIKi~y_X`sW0@K~j zTt@Zh^GEz^LD+;FR{id5kxJcvih+x{z*{`O;=Ea+$2a-c<# z#61IqEP8HKWCu{rc{b@VlWc*Sw#>l<&X)47v5;Xf^gmH|j$Y^A5(&)ra(FEi@Hei# z)qwVvX8TY?BPlcgdXwq~kZyk=k*T6)BMfQqg)q-7;)MW|G^3V*$IF&n1jBQv90n90}Ms>F8Ph@|u&^ z&)-$vy)>T^(oPDCi=Okp@k2nlluso;Rf;^XqCc`BTQx4Dll5&$A>^q^v-*pR!?nU* zPp2O)5a`cnJ#G2!?Fn;^(N=8QIOY4KV9@y0`O<1uky<);$5q|l@)9c#A109?D3BC? zQLeN8tY^cPo7=^?Di1WHD6L<+v>x9P2+l}%GB2yYXPI+?@;0VfRXg)*I~b2>R_ANI z(`f~HN!E~XiSXFjynkf>%Ras+_(5=%$WIy?K~Gv)S$Cw>WlHlsM?a~0klm&>mtX^X z_SnbcqHd!r;v)0po7GhJwH-?T%?$AqO4|CRI<7I}XEru+ii(sEPrjqEkeskTL2&xnO2EC@ zXXvd13#Yn~>|#=UGd?jx|7gctF>VhKq37jRT!9#})^#Mxx&G105&*~|zqwBm;D z1yLpLo0_L~ow-OAYuwt|j=Z|7Z*6Xs4y{To2^mO^@(HHr)zTq`)S#`gO#p?T{tZL9EpF)B{ zl*Rie5qaQ{QV#rzXj??(8S{4~A7klgJJec>9gQ`E=^=+p&i>w~QB<^{CI1gps zQmL-OU`M#PhU81n97a8;Qkb=VbP0p4w(Xn=1V&DcPhVieNtRl9%&^^Rm(rhFrd5yJ znceSBP#j~+T8nxx!?)3O9sS9VxxABVUUQ%XwDUUSd3`fEZ9%XWcUy6c8`{C*n{D-_ z??br)->4^+PrW%mXnZd$q`D0ZHPm8?4lCFLy#^f!??VQ#J;_&gJqC3D59C|f z(mApkj_hs8CcKjIKkSe_Eq#po*lN|rrpGe_a-L3#PWBs=!`1<6q<4OxP8s!Ly1 z!9#QZm_#k~M(CG?hkJxGj@8Q2>YHcCt--Mk5xQwL90AKvb4vvsN%6GBeNP{y|0 zI(DCJkdGE8WhaAo*%Oj!8N1)l1c(kZx21~1m^dJ#?l~$vwba#blp#aEtst({{uN#S zx3jeB@d=Ne5$-!{T%TriHNMSEUb-*CxN&q-(Km?gTzZCVl^O3s4)RtBtoCRKx##>oN$WzN@x{lK87cRI(=m;- z+gEx!CYe?C(Qb6>{H<%~)*wuDvBAl*{)Vq9QEPZ=+LQSv$V7+xuG5lb|_4L8KKTk?zGG47QU?byXq2E?*6~216%(hI1A0B+3 z=3PFkhN?^BwAk?Mvc2SeX49;*JJsqeN@NOQ!1pD<6w0`U~k2f4z+wQ z*f)K&)^qQe$BK)hs1678dTnaPm4?~d4}-jTT=GTyt9Wz=v1SV%>VW-9gZgR%xR|LP z%b!IjVY{aodo;fFg$0gbp;n9KBLOn-J@ZIqOkMC^iG@&S!|Q3kV+9Mw>pP@`dKi(R z@pko~m&JI$sCJoG55aq=+y8VIX?mTyBhdJogcC-OowYrXV5(0w*=fl;`GZ_@zD!gN zi?{Yz72asZoD3JYI&GH>%S;zeO!@V~`f&ib810-iLf{Vv+KJ21j&C`n>)rIkm>43m zl{X4S>kY>P`38?CSIi|B2IFLxuJkDSLrVL>cnxt(>o_PO(4bQBK2I#&g_8tT^6>bo zhU!rUlF+PszhNNv0UX6#I@ebiaAo%A2}>6mA3h9@sqFTHs6k-2;i#&qe(1N}LG3X) z>A0l`-){BRPxgfu+&6gQ7uzqJX`S7cY(}@o+9~>cdq0%m@D8@Y$ev8&DSb-!&{WZL zzgxVw&$4}0jZR9L#>SFt>m}Qin7>@i&S}x)!+q9TNSZds{X{}UV@)Nx!#AY;d$E(f zd*6fq!`*vEHPv-(qqh~6CW3%e5tR;7lp3NUpr8^20jUDgd+$M|ONprT4$_o%(h?xFP(nh^hPyuH9pn4Xc+a1+e=14#UVE*%=A2hqtZD~5rZK4bqbVy!_5}5a zE{nw2L)EeMD+*n#NELrQT-~1GQqElKX|`g*`(70Z1M)iUk1H|?_jIW8+$6I5MqOw$ zQ3V|>yrIMj$HZ>kwAt-FB?E)m9rN&=_L$o>B*Dd~ZIgmYJuWy-Oc112xsC76@;?;L zT-axyrk)3r(rCMoUBX`Vr${4alv+umf%wWnU2lX`7{Xws(JFKLM;a61TRANW=Oe^6 zBvnLqPe-Kvwz7jw%G`Ta->OP-($u*nD?^oNOkz|JMmV;*l-7MDjM%f?*y%dt8IZY; zfU~yydXK#i@XM?&uR7hkzQc3)jTTEbMO`pC2|;=KPGR;h;G#<3tw3kstTE0RjW(4x zC+XC7`Cq1v*F=$=0)wDPE{j!(cfFV-XGuR;ox3e?o_N^<=fXMA^GJk zn7Z|POSxV?Az-m`&N%kv(F{ULEe&-HbkJkWj_^mOSkId>J++$nOK*X5KBnkw-MQ3AEZM#NGmF4{fgKwf)2 zYVD9C(@?P5d&Bh&z+z)2%O7&Qdp&Wi@H|p@_mzpXN=b_cCwHOgg)?$`--kdS`=bYg zJ_}6|f0CO&_gQ*I`AW;A0-+I6W>q67rn`?m_x$PFBh@@%h|5U#eN(6;Zyjuw(9cAl zocJJaB`vYCnnj&DsU;wrnR?IdDfMsII$@n(vLqwru1!=Z&2lXS@ur`uz-2b{A<-i~ zV8JeM)6L+QM+CkS=&W%yaH~9erGKL{^o6igUl{4h*g69K3B$x=<5CyLX`=*Zo8)t{ zQ+c{1Dx>{DIjN&)&qh?v2i_eIMQUh-kVeMu9F4uB+bd1KL?AaA15c%_Y6GOP&fV#C zd$JFe<=*wg2a)3<~pgkFlA^`?B_avxknVb&*mV9O1`_en1yec@1wWG2; zzn6~4{61umkMl1)iTKpNs;j%;23&kF-~2=}1EZWqAN@8u*4De34S=wFR;p%fJIfdFIlO0!`d?W%pIQj zaMBqrF(|FNSOCyF|Tlv=M_zK-MfISgUY>{GQGSdYxVgnQWwCm}$3Fu!d$P zpURW3KWxp@8<0Tk57XR;81bq{Gz2@X?7$6YUS5r-BtB~)032HOnrd1to7Q(SiWjgS z!>$RZ`{uH|%v#gFgzFv{Dp+eK9lFMG{8m;wRXu&1kX@1FF8wL&4SR-B3(&g1HHV+O zyis~3yl1PnWeUAa%&)b(4j5WcJ!S4eXgvi(#-Z8~r7rb$r9TI(fyoq(==FGtba7EX z2zmFg_0Zk7aCidUYmx&@Go*a)gv$$Wr>~g2QZNiGmo`htZQg%hI>|XmJ+s$$TMv7N z66ZslzEXUK2r7(RQ@q{@E2{3P3&<35$kBwJ2|O;`LC4T!3ecgxb~WkS-cD*sEsF-ZIF^1chQGAyJxBTqp?z3Ege{4%cEz+G87-S%M zW<+67{`QL(X2Yo~uRa(kT0cvWx)SQR#2)spCl^}`(j2=TfG=IMJ@1=V^RaYZc2RTs zb-jpou0dE|&0U~@XBAu83?u*#>Sp&PCx)jko5w=|{ruEZgWe3v6o9%=H!a8EI~zQT zXv;Q(l%NA7mq440zH4)k~VP3^3FE$1}^IzjVz^`f? zTHz!@A*M%Taunikv@aPya_3eUT*u}-Qh1L|Ri%xY2=ABxQYY6ROS&_rb7PlcQoc7& zy_xhW;vWgXe;x^xk#i3heCt|aXz>*)9*uXpGaLtFF!GEOzq>4QE<&TqD)d=P_rU{~ z0~F~e%vTZ>i0#psdHk~8aL}VV2>)<>{T_j+4PG2#Hsq-$88MO;p5$`eck#y;O3)W$ zgUW2C8~~rU34q>-UYexW2}sic=X5P0JpC|L>$9VMa^`$0qOub$M)1NT-9T?VYbdhq zTf_(iOnXNB3sE>=?4AydD_A|+%O8JK{5in~PAxXLhx1adr`p*oH_b?czA!Aquhj%b zJXn?4W`QB%<*IBg1&<4?@MFOGc%RExTMtfgIpyQ6K?!)(L6Vk=va5R}SsBhM3l&<6*ILoUwWyu2D!Dz%Z)`o7>9Pca&ix< zJANpNAr|2#LJ2fTBT>s>|4KhMa#a?rzYcgYEbU?$LCv|Yt zdQ(D3!5Uors4xwY>%Q^gl`RzxVbwJBoTfQJjCaIxz_l;{zitn5tG-Hll8YF(Ni4eg zac$gazOPR&Y$BU^qeV~pD4RXX1+`~9ww|oSbLF9r&I{S;Be5_NwuZko`ooHQ;09_{ z_<0QtB*zZ7Z^4<9l=8%;X4*6m{|?$?Q*w>lg1PN`r1Jg_`m}9>H%#7SO0F=s-Bn=l zkA>QV-gFw;V?t-AY5qteZ3JTylp_{0BeuC3Az;Rl!|*`7>AR!k;T*u$qK2$)d$Cbr zbk23IytFt*5mc;p+*rl{^4XJK!QYV0uXlLQheVVq!;IU&cG1R>8$$}m+UTntqy)Io79U3&Bivk#R%3CMsGufB&i-wx;djkh?1N98jZepI(lbhGjWJi)Cq0iTvON2(g*}Hr<8(k}$)_@9D z-53<#b|VnnC2O?gT+@2^?pim*`H~bvqD)uzr;;J~>rGs!zC6KFl;>f+Ll>^{F3{3a_btm>J1H(hryG@^1h zickmyWe~gdVdKv0iDiNo!GV`;#D%)K>{gqRg~92DA}67;@#K7d0s1aWACm7c{3V=m zB6a6oYw_Dm*+H}$OGjEQ>Jsn1T|txgMP=JtEl3tk`34>2bFw+@OE*&t_&E-^&h01z z78j`3r3!nE$~w1s58HoE{F3yw1@DG@f7QYgs_%RPHjIlk2A%0Y-WlzJdb3?77n1Tw>j<#bjU^^Bx^}Qt-+i*1i5?@r}3xXTaClHFLW`B<1p#&P-EVp z7&nt5L^+GaUghXg*Tknz?wl?la(h9FaC?RLvX`)L8bVn%3Q1N3t%UUl4Hv^lg_`@7 z&jgECnDI&YWB9O9nAD{f(3yk8C0C!oTw`Ny(RxHCdeAI%L@|=?_3tH*)Sv;%VH2A4 z)EfT&u=CIp(+L&R_>!KQPA(U-K98^8^eLl9+Z_SzwF9RG86uma_EfLzHsE3ZJCQRg`+=U2PlkQroG#+{l%Zip%;7$*_oSo`>Tb!US7<20EW>Be_J>HO?4BrZ^kMI8;I!|k>kKNZi@w6VKEMNKTvrdiWxdDe(p zjcDn}V_YqCUUrT!hbcP=P3T+;D&D;{6LTa1^Q>GpXSt?cDS8b?CeV2_)N14k*%8pM z)yppk#9LnGSftkX2vUP54>05ywr{aynI+Hvnw4Wrln517fjXQ&v%cLC-5)TotRyWI zZNJ+1fMOjKo>69ZhqS1}V&PWBtYfZ-DiUs!hzL%d0b1Sp#mLvi-L69us}NN5XqaE9 zm6rPvogy_0wY&udp?G+pDEz7Rbtu7RKuFrGnwhoC~x!V)JSiRC~;AANsN8$YOOq#*&slAu8 z#;gxU9h4I}5!^^swn@4*DFlyUgTmSRjO?}ji3&rc3@<^*uD!^Ch|rl`*12?Sdw0d+%y7 zHy=LeozD-%W2H7!*7s(ncowQz^4e1L`G}A+-c#W{zoDvP$1NsbdBD1i<@;-fZAunK zeMqeI2JqGn&Q6@=>e&bsLq=qZad|VVdo3C%_8Psyw3ss4yllI+y|iC{Zc9?dTn0AL zX|{VbmCwkUbf2^?H|mDFn_NH$+pe2L02%k^iwjF4gJ}&z^F{|!7F~~~BIOs{knzU) z2Mcx%t~tyZMqq%sC{FUU3oUGF<=njfD_cUOg57W4j0NFq(4D3z9`I=hNbax}@ajPF z>TK_5K`Jlk8FSEJ!j;a3WKt=CX)$cqT!O3 z?{+`0kaq;)CTi`PBlL@bEEUB}sa z_VAW~Pv$VyXorrz{c=7kKI2WTgka>>eU{s$>6C^rdbC^CzMabFPq$9*pRJ#WdFxwB zs>+k@q1odxaVRno(FSoy$uiN}IP9Csobnw{cpH`EAo0oU^YfI~Q8);g{@DQ7XcPCW z{E0$^RC8FuGWRlaaCO9eAJ4i-&t_=g7O8ljCIuDV7aAI?ho^xV6dCOfQz&R}Q)ZAp zonQBmNmX&eR(s*I0eiH7(SmzniN1l-La5ozQ1NP6uZuoWD}mduf%3erJ&6rf^M(vG7-!dad4V}6lc9x zZGwPxLR0(g*Ri|J z7<7xzMu48+^)JY;z1$zrl-^)RQA^u0{)nn(E8t0)kY(e-0!$F}%8$lMK0IXTWeEGy z;f9?rQ?sv!bTH|_ge;m%G}0^{e?wNB+086!SL2U%cA{0|f%g0v7o3Qt^2U@A_^3tN z3`_^6t}d2iG9V6VQR6a%J!~%TXP#4Z7FWF+5s4R&(EkEka`M@yMo`S!#YlfE=Nxn4 zRG|HSXj+z;vDVB>A7e24+N*LW_EGjwI%$V?(sPqMxt>5@CSGJ@R=6j*@|hRuu)51E zqw5Mj`|_~<6)`asWAl7M)!P$ucOC8-6fgqxrZXg2#;1bSrw*+u^sm3)tf6oLQr_yb zj!ATe-Y=k(dCW|zv5~3ug2-f~RV)~_B*jpm&n157Db#9OWxN+~4!4KqTym**V?|Vn zX+Tx1=9Jp+z`wGDDrQ6SFoz1GrMDekjg71%S^N{<(YcommAIggBlP7I+UU$hv9CEe&IH*Q?}q4agxUMM!>K)5C-o8Z@}=aljTyXY51yO&Bw` za7bK4>8irhU<=24m(fA*1-wqjM2~E=0cg|#vXNfrL&UtB!#w7$jE9V0B@1f6^;|Rk zz5gbn|f7VZ_4%W$oVwUe-7zP+m=9l3-hUvp}v=J0f*z9G1(qBi|Ap-_`d?0 zGL{dkoV1gZym}2iH-R`P`&LgJ-i>UJmhe|o?sb@J{A~*^6Jx~LKau6I7BJ5&v}kwx znDqHOLU52HM|`cM^hD0arcg=>H6n938vzimLp;9g2oe0-ii^^wmCbu0DK`QN;ipJ- zv1$gJZYbP50-+iD`D&zx&*A0Z+!s{cRHwmWhCjM+oTGqxY*m@xSJS$-IuXD+oNHe$ zx<8%25@VI8Hv7tqZ!BIg*9k3DRF9m`G|{cQN9mQj)RG0hUAe5)+IoHu^x?^m2M z2`<@W0tl<+H2=I^I_(?F-38a$mq%~94larOxCr^mf>&Zhv-;^%2&k@y6%l0m?k0f-PDnA?< zy31cg2vd=!o$A`&oge>D>b)Yv>v!35G_Q9ix&LUu?r3?aEs|xWHtmy`y-KY?GkhL{ zh#s{T{$8RoPndrx1D%&RAIhilGls2xTUv^T`lXHEsH99tU!U;m>e@pSTaolz*g}D9 z);c6t-< z^e*viSueD8J_hxAx1HiVv*TBBZfnXV;i|1OyW-Zvo1rJnkKVDC6}Fp?ejb?dg|(za zp?pYhNOms~yx?BCM?FWy481Y95dZBq`Xv9vK#+v{9vOdwXJ8-trUeL?R$JjG*cm_& z{tv4A*XshsSMGKv9aG(P6v-_4LO(|T0;ukcXvN!s_@;~iypxKi{hl}bT+%6p#r1QO z+FDflIVq~theg6Wx$O?Wj+p3Ez^&C*V~p`ilj7GZA6lnPVUEVE$}%xphRV!MJ6lZa z-Or8fwJ>(#m#(~bf=(Ef0;UD*#&tn`+6F(V&tNRk1fnk70n}r~OSH$)S2M zMZ?VMkz&XOMp^gCGuGqi$wSM`yCOFEVZ}|YlV1GwTFoVXWOpQ>m$=xDg4@K4@4UiX zTefk1%r|QE>0@+KfPd_FDUx-EaBNOklYW{c8&>u`BxR`!FW-M)q+HPSU0}gSuQnp< z3-w{*Z6N-g2N+7TT!6sG(n5MwubaE0Td`8#_4X%cK=K3S9QX44sjl;mKzR5oYA`J~ zpWUV+;6$MYeFN~wWVCZgzxpYj^LMB7G-dI3JtD0#vm_(~^4wxB1%*lj?UuP_o-h8h zIp4dd!8&VV>&1j%zh_MtE9a+SR&*ZD`eX`7uAhk*s#F^Xd2@EqTTs$1whRM58YRV2 z*xG&@_fmv)jreAv<_1JEMa8+p*Fav}H=-u$C5z>-4=r~MSqfaMg^+m)X2*7x^DFW4 zXzAPrQSmZ*+k1>9(!Svl16%WtguA*ZO~I2i4bYJP-ZHf?S^G9&6}i9}d#r|2RR&8j zD^s3oU|g`nS|~gakt0Ha%Yh1j(a)@tJ3QGXs zY7|H*Da4(hy4BG4dAZe*wy=ChxX*fVWCBr&KGf7~Dm~tvAaxa{I`xIxsPn@>9qooy zjC5*gt?vR($&1!u`Ef-1!8`Z*gr|*lQ=1YxjPRmdq0<(rR-XEb7&7zHbxFG0Gq79nn@Sn07Cpc3xD-frP1w)p7TK;+5tMH8M!65hyyN z_$XFX^aGvRtA7w6?pHF$jD+_ohsF}O6lGoIT%-;1MJO{K8jI-MabCY>i}228TZ$7BOsf*nlbQ^8i=OU)3Ck zm2A?euPDfkTRS}N7LZrf53Gd&EzbNB5N@+>`Q0(fs^3!K&b{$>lV2Z-?SvWaCG)IK-+hZG(8>9> z9mK>=yzNP%WtG zY6cGD>NunXnT(H#K4@Iu< zo3ZMM)#e%7-!~6M8t~chS4bL=M+tv-sbm2BOYBJHmyb+UWZ3TU`HJx!?zyX5D37&= zP+;EGv|D;bM<_%2hqLCsNo$;CR$^D2+a$|8dL1^8bI3f(*Tx(!U&0BkJyIg+$t12T zSfXqurH3NkZ{CuVxbiPpe{0X?fQ0ZKCw)xJ9qo0tw$%$D+qnT{WeJ!4%jdazW_B`C zN~%l4;6tBQlmNklQVK5&-ZYosu_Fz8XY6q*uSQ@%gnC-9<=Xlfrofn_18r!<#ta)HP z8ULuZ%2>_gN|omHJbu&krWXhrkb{C#ODkUGqTg^|VDN@EA83yby??q^Eg?6)t!ZoM zx_iYHs~Rz}qb8#s-QB&a5RchkNc9Z52OpBx^(kBA{E8zUTundW1W@z}ftolLYD=up zPP(;T=a8mRN5!09E94dDe?DKp%jOdF%|qIn#@gn%XV2JV^RsF#7D0x-lUM6%3 zjQ!9yroG_ywSPLv5Q&BLicDcfUl^B&#z^bvgNIO8ShRH`_s7;vTb-sYAmUr=ebP(9jjR8lZ_ zAf|U9DRthheQx8mnM_1>2n2jy)C=G&jKUkhcnJ9Rv#_6t;E*ytiGHR~UA3GP4N?Dh zX@bAn(Yr#q)LaHdv+KfpX3CVHR%e!}m37Ztpj$P>#HQlr6?SirA%5Zm%+S?@QWFVi z)wZY&6<)b|sH9oN4Xv53CTpTWEw7UWKRz~ZN;3nWx${>Zu8dUL#sqHMfo?vi3~&BXG~&HCcB2 z9pBk}1_|K`TKU{|C-pE#0c(^WX#P;RG=sUqNM zTPeYD{^bBg1dXlm%3Q}iviEZ2KIO|6{2(wFz6?>3(dQ$f?#1#K z12w>o@b8=#AYA8h;>bgpe!js5!Rr`o3d6a3o_`(&G~Wwpr5WsN6#9bh^F41< z=?52h>h5NQAfnn^oyt4 zUY!D@wu|78uhgZOhQituk&qew-UP_*S~oyosl(_@*{+3Pqo8^ZLIH>}?SeD{f}R2| zGaX=UTau>c?>v(fNF5Z5kU(W$Jvka3QY$b%!N42$Ez9?0mUBAXJW*imnM% zZiSUw)LT{rBF){rC~}wL3Wx5Km@Ut59d}EIRfTAAa!r)D02ppR#2#TeBGx0+;NpKc zj|-Fy-S%D$an#pmyi;Ii8rq?NTfVBR@0kr1HJQy{lBmpAN>XaeC~Et-JN~G|(8IaQ zL`MIBO0DA4Jz`*a0~k?+`8D4QF<ihWHE=1E6D)GlGKh_#B78#}<47X)Jo#iw;mfW*NveIlS>U?gym(8e}LyhTs=V&@@q&~zin$>HlZz}25 zlVlhEz<&7Ej`jtICy$l8?tfZOU?~zoq&c&Hph$tKtFKACEh%y|dmJ77L4N@kA=OfL z^%0y&Rp@(K!=f<=>QFNkTTcYhn<1ZKbjSgQd8{*C1-2#fP)VoLY{uE3(u_5fV+|<8 z0Ur%(x-VZiMEjgTB#uICZ*ld3u)UmzP3;QTbj%cW)bU$4K`?l4GW3rdWeAV61+jC1 zd|`xfW=yRIzk(b4)Jo2__*{*E^!%NPE1^w4%;!ZJ`G?7GCkxpEue|x##lqa@&$oRd zZk#ed+%I4%$L*v^_3HJ$jE;?%jDPOz>XIsh+%rG2M*>A_eZZpWje2^{{ym1))GmJH zcA8}q6|TEdWN^<-x7G773Y>_r3Dd~Nyiz@SCK!tBwGj^H32Q;EhRd1#c4$V0!sO^QGMB5mu0F6vv8p7y@F%4A z?!#A(#ErJ-pKo(LOh6rHL`InV@uiy|?@_{y1b2cIbx~g&2Kiied_(25g}a{^m-SCK zxc|7@YNVL9TgORXHg_OU`%$B?*t#t14m~n{-o8wT%{LZ;=EmrYR@hYNf7|pvuG5vj*)(DHZ`YiS~5UYOFoa3^!Xu`h3Z2OCTq9N#SDCdfm-IaWCbdQV42g!y*j?2*nS-iQfQF;>Fe^iTcKT7Q;$xkhX?k0=`Eyzu7OhRo}rm99N7&+t3>r~Z&5IHBZVmBv`d~6 zbsK8mY3S{nE04b$D30<3mIM4*_NVwf-3ce)v^M$!3X2Q}qelnupRC3bDs7G~@#1Qa zt>}|3`;CE=;0=r^YS~(aRr)dp^@ot&7y!w~8gDkB7k7`rd(z3WNCg~@zd*J1uYI`W z^syj?0i3PDnrjeMoS%>w+MmS+PzSg>!~%(co(yXLin5!P+W%Rjhu-q7qgZR+(56Sp zOJ|0x$4d592@df>qzoY`utpLssdoLHbLCep{x#*s5tmvMLpHPyNboA968xPO%St%X zxw5;jxKCo$Dgv1j1@1p{ksNqoZKI&7z=Jr|mdB|5Qe!siBPV~hEVALOwbqpw41h}C zuHB-uEtEl49AfYIy0jWo8Rk#8$Z+pb-nN>X!db`VtTM~lsYNawFw8+vo2iqNsD@!|xhfSIcj`ZKJ1vVdb2X zy-Ah4Un07(>buGoE&8E^AI@J*=+p9?ev@tu_IQ#bl$`xhXY6f!y!Nj`C(9t%=KjJl z)X$7EY|zJ<<+IIZD*RYLi4ke(W7yNgmE8m)dXR#fi4+6kF9u7|qSaMfqEzBt#RGr? z(d|n8CSufHwTlUuiBCxt(if1Xyyu`7c0Ibi_=)t<-Xs@q6N`)enbqDrI@ z#H}ScIC$Ck*r1W%|K=-H$xw8>LK3%q>7x#C#LF&p^w?49*9H7Mtq5drlH5y^EbjyP z$D{vI-4tnu1?+ z2h$x_)b^>`sw`{x`7wG{KMUbnYZF2**}Ie(d0okfB1QJ3`ze_E4^IjoV3$v=UYiby9@kb^znfm;#*#6A z_){k?tH5|1QGLak8>i^zoR)Q5^BM+X2;Z2L6tn5m3+nrGS&7DEclxL1w4*T;M+JfX zM*91cfqJ8b);c~~Ysr8(FSNerqMdTieb@^(>vD1r=z9x;;|@nkz(g@#8foGkISC7NNon#v){pQ#)e)W&hV-Lf*wYAigtMy`tsTx$in~tXjO`u7PWhHeQ*q@R zeXHtE6Md30>Au+QLn(Yawk*@VUJ(>95Z^LtiS|lVozrGpwCn z@ItygEo{G5GJx$b2W zYB#e#FgE^Mc*X%BbkjWvhnE2v{QI3|rooEQzUiUR-Ja)SgX-7$UK(&QFRT%l(X1P< z894FVfHP}z5EGg_B)3mutdKw>Ne@FLAGfR|ScHOIpD`OnXAqhs#93e!J`rRkM5)Wh zjE3|vEa573bA!9Xt#ae%=k7$Bcz)E04Z^<%DvPX%0~3)`zJ-Lyi9CJ=8`iPRdcZJ5 z{d}dJ#y|07@Z4q>FfefnIuFu0KLTgP@QL@S{)FX@6>o0fq<;v=G&ii@fL)if8F9>E z56xO(FWJZ69>1CK{>+%IHx!U>2^zU}^u>(nh%#Dqi?jHBedlG_cmn{n54C~wdm1h$ z3UnZ`LAt0Tie?UBdN7%Gy&Hxc48b?J?aAw zwtbStqh3fmyk7VE;UgK;By!MQy}WP!w7hLLVe>4(qIllZ_h#=Ru3o9*$^+Ze(~`c_ zm)sILtJ-qbj!YNaP;3is@3!iUYlNu$Ec!-=Y$z<`Q+3oCLv^I*HwUKO39k;|o5aT~ zMK6)DS-YhgDzut(K?n^f0*bma@4$b$ZF_-J!r&j+vUIfR#Wn|Y_&cyzoAtibbD*5% zIO#rCT0bYk&H+TGna=@i2~8(B@}+okBHM$S%LQvM_DgTQ%y8>HwVKnt!QExc(lWZw zZTYlU|GKyVQs{DJt0hHukJ_B$$0sxXR>i+Jq!)R}XyY4t$>~REWJX8Rw83VCXSK2tTt2coh zDB3DpITE!!BWTIae-t`sPfC_3j-cjQ8oFPw^I#H4DA@rO(Le9f9|K4Zwf{nY>j3XX z@5*{A7o{ZOw&isYEt}1SWww-uW+8<0Karj2&4~-b&om0!4)!_23s*i# zDdOiky=3&Y+bgUFzQtrc#z3F8Zof``dTZuEE-80U_O^_H%vicHjqzq2`CNUxW>5a1 zOff)FmpM^3{OGxy5Xr$)& zF*5!wV*=m<01WrZ#V+{gn7<(ilDoLLeDCT~BRzZmd{<&WGW8D|TbWndjcG?Bts;}h zdFJtjH~;gd@00ITTsI&v=QaUsZ&9YTjM!Mcwz-5`tDS$JG`4eWAoshh=}UP=zFVUo zJmdVei%*I?<6i1*d9Fo09vC7wTfA>T+tJkUI$z@51hA*ob87$NHrf8{SpGZAXXx|7 zv#3O-nBt`keAznpzU5iKbMCayvzugxm3afzbzAg7Iub8{vhn0v2F_;rL;cKW6*Q$E zTJYQZgMI&}bYjkFWf?Rc7PIWjT(`5+v$ERlUJG@~!d>h}~JvLN0 zxzbM-_Xce`1aW4!NBfO71<*t7k5seL90nk``1qnfLqBABxpNodaFteH^EWVRRNr_Bg=L;FXuX!vUy>75m`HgHs}9 zQF|cJ#lCxigumV(@5#_SN`qztEZuK8r=9`1T%A|n@Obdtsu-+x^CHi8I|zDbCoQ9l z7B_V3u?iJw#m%1@;6%uCW^Qt4E%~+sKzfdLBCFh`zn_nq{&yB81e}phyPM1=rV+y_ zB+f1w5S3$t^_!Xw)>V7W4(K<2?dZNSUE-{&J|Dgdbry@}xb^m6zv=`1aHuc5uM=nY z@aDI+L8IPDoyinXNOF-az{E&z5TOPP?EbJa3zh{=V+v_jdHm4FrIU5DOfVMKoA*FR zg*|V368GlQ7Sqq^f$$Hj-!I~bA>Sd0S(!zRhPFpn<-GsxO?-kF+?K7Npac^4g zpFdW3VJY?L^D|t3XJ5`>z5oU300&-%AdJtG^V!|e0OrBJ&WW>tah9RaUxm{{u0IN= zA!T_(WA@chh@?REweMrDK1kky5JSKi=HZ8YuLN8st zUX^pnWsUf)>Ln*<2kS7`(r{WuHbZws)c0P#NBS0pz%gQGYHtAAu*#!Ad=!_1*8(V_ z0w}8gcrd)y@6xJxHSoEWfV(+XB^mI1hP9+5^PJrK8hJJPA}<{IR0j$MF&bzW=hULo zvnyFaD}j78!#cOTM4RIZ38ItQ9zn)yeJ7-+lW)auoO~z1d^$qjxN^oD{_mdJ#|KpZ ze*N+DK>XwHmm_Xv@VVo=*-NVH->>vBwT=PF)gKHOAO7P+>zGOcIKUjo0JLTP#{_zz z6Uh4btJ0|%{tA1bzrH>l_Se7u{aq)vDF2sinZGB}i5JZO7A5BQ$?=;NRQ?g@E{ADU7be_o*j`yR$d>9@C2c zi~z^pFu#9FW0G?ml&%thcI zBT;_$crG5_@kwhCu&zsa=~tO7tG-l&Ss2`Hp4JEm`=x$UGBZl#uZehL{L-KCCvJaG zL~Lg#qv$(`7jU_yJ~k}*`@Pcyu$_d&e^o~`dHq!*s-PlQDhT~v7YjMsA_-B*U;)S|YC-6H*RH`nyEtkbV@ zxA30-{Dt#>L~4@p-y6(7a*e;A;QyD)>zFb3_h|gz#(-lV6dnt}FZnH*mm;Oel~1RO z{%KzP$A}#ZvZE_oria2lO?tnuF2)Ka7|EcqMQZxNQ_fPGf0zjB&){xc z{3}!VgUxunfB&_9xL;})m1qm;{=J#~al6NxYzFo(qdh-6z-W(V|K-8Ep-{2^7zmDQ zxhXG3!(45O2=%9b(Z5V9g0t^dTE4kibH+RWzuiLc;-Ja{PeV$y_NqA+uz)q=VzT7@ z7**TwUk1ahYjKctyu<$Mcpeit|2>@jMU(vB+Y|n$Nz4C9j_CW*_pc))WsPnO#sG}W z!hbdJ{oh>30nL^5wEK!fDV#vU0I-jL2`OboYI#i;&WRm8NZzd0%H2*I)wmNLwpg|w z^k6kbf^=k<>-dxpJURdPIO^||oZz>z`w(!2h;pqAVA&niURkfA3P@G=4nJ*H_|W-w zK%U^zSYoKN*;CREey|bN>Szd|+d-SKd-?2FHTw<^c-a~b0L{~^ll3ECCm8XRQy2D* z)0{R$&IInCdsT+G2onqy$F`V`yUx9{3v)5W+D;$Rm{vwjHmv~tR-ePTE$Njnq?VQK~E=9;P_f=S%eltu9iYq;CO#?j8T?&-JP3^OupgyUF@u zbY_pc3a_L=SCYHhr}66uWOIyAAB2xbsP=sym(qcz zZ{sYj)3S8^+j-cu(PsA^BW9PvRa|lL!dn3Ouu6 zVgR5*RxO0jbbj^9W1FR(GVZS>rP`n7gtuwMJ#J6g&6@AFznf)ARbT~m71nfDdd6iM z=(N!?eUso}z}vtq(B?egZ)F*4<59N}V9Uf&X26A3#pE4c`;=F=aR8xeKqZ8MpTC*I zq-lp~vO7OxzM|!o1{R#S1WmL~&bmL?9eLRiMSh$d@*=rk-Ybb0{v)00B?L(rCM^Fi2`_8nl>*cOuw|NvI zxzQY_nphA1fFK&ufmx-{@%oSI?XD3IH!Y!bq{Sc_0y_#eXTbk-#+xMELU&U;!DyVd zo3u1Xqgj)+^@Dq3UY|mYrMbD8`-nI}LpT&Sj%5Zd(ny5n+T;ns+vY-VeR02b&W_nP zye$fxiqJ<0Wq*{lXAVCT^G(mfNoSfrqW+cW5250!{W)biD`KJd+ihS%Hh3|aAPtO7 zS6JD6aAySd;^ZC#K$#6@8b{ji*(KM`@s7M<(P^9Oy7gsfx^Z+3yZQP6#KtdZ?7WdU zOQ7`$$9jd)szBT47pkf~u#)3C*?mwo|4lcUo*@9qfAPOwoN=va|Y*G7@OgNnN zS+!(*`l=mUgt}CO^7S>FHV2yRX*UY)!}=h+BHqHAj4=QpIqsCE<-}7Cp^Ua z$7s~{{S_<`)amg}hzyxT92%||gr%R78O<-Rs`^b-FDVvXfNWk@nxvo5Gs_|Q`j%6} zp0BnP=eO*$udlUC?$GIj*-t@}8*P_7jx?tZH%Q*%VZM9br=7TV=lgS?5cZXFw{7XF zR#VX9G7-~f$n1B6zcG+@h~HokUltUIvF)%o+a9;0Pw+5dbO(QsW?kR)xA9oIvtdse zy2o~7TA5Vo!aoyzLASTZ-N!xmLIvMmFVA7N#2zJEaXqrlsb{2l8uNH?THK5q1UkG_ z<`#X&-JYr?#W$3!v$Q6`jV60YZMX0Z;neEq<|~3gbC(8FfV^%l~GTYGAL`A+YVSH#9DDEX12bdTU&IxB@nCK5=Sz<2MF_;@Lg|Ul<$ot>A(WO9GKiMFZMomNBi^vy`~6FWD}n zT6azD{=!tFu!%vjFTUnKf;YTgE(vVlk2ksreIElP#2n`&?@oPlt4P1|jGtUTr+Dl~ z&@%K}+L8JtTX8nmB(aGPu5-K7X9>7Q!4dAkMg{btUGGc!#vSSL$F3`n*T<#m875rL zuQvlmPZF~6fcX*5ol+hiVk7txAL++g8@Y7jt*!ZbdvdK$GI2vx@41l+xLqgcV?K2Y zIhlj9q#()-#)Qr@In|xZUOT@P<5Gc!>g5id+rBmBo@Y(M^uUWuENlWgH@lj7rovg) zWcKGH9>6|o2<-L2oN?+}n8>EbVV4CVhr+IvSemHls{&N#}9D5!{Z7>5xQ z1w^Ems3=&dLg)}R(o3Y*5ETXKD$<+uPNcU$M3g8k0)a>k7$A@kLJNU}B=_KafAfCt zy7ygot^59a);T9Td++lp&-3ihht3@hF<6=j_C6Jif*iBwx_kc04e&aZmMu*fVe!rG zz1BN7lE-k0UK=s#-pNyJ$NF>tX74@u0uCZ(HX{%O;#}*F*lWH6_Y0fc(8YZDD0&(> zeL1|n%i2XWz$)U?$6nD@Y{#cBpM-8*phsNXGeQ@>7&ckOp7|V7na~2zsyQHCu*N-+2?MQ6cer|Z z-S7#_%y$fT*AN9-4h`d;_Y6Av2QuC@ww?}IrsPFoAi&8~sq+|Eva0SV7O+TZ_ls^x ziAr>RsR3%j498q?aXn>SMnnHo5+?EZc#FfCHh*z&in*hKQrKqNvC$om01J+8z!3o( z37yI_s9<5w@UIa?DUzluJt9i*eq*d@XgzQ?P*-7cBihGn-+%G9Kf`=lytg-gg>HN| zPk0EDr0Kw_*}UIyaQqFGNjA`fRTOpjWS=4tG=b3GXR(ZFdu)OxWx=ZVVmR)R8qMsz zE;lvHbCrv55_*o2kA*k;x<5ol4!`=2C<8^Aw_UOoPYWyPh~_y)$^?2AE7m+%C(Gk6 zXL#+7oYUA5PfMMt9GSg{+T)9sRqSI7JS;hdv{2sdiY};=)OCSb?mpFs}~y%^~EacZ$QAC+0))3SiN{LndF*j{5$} zOJ_He%=^mb}(s+vZg$#Xt%GtkDzc49^I7EVecR;srT^vlzp8$5m z>HwWoxI&k9z6kqzT-Pm)SU=m4!#x}X(eln-T4rE-*LjQ3lSdSx76sx znNv>s_B~>Z(yaCzDDBczB4V+ceslDj7vMtpk{}9&(;rnbe`c_M3H%2zg5>xb>{Vq2 z%Ia1g_R@iu4UUkD3d@S6wo~sEkxNNmtFBkxL&_oi^w>Wt8*fz(rqc%@99Vn^isf`2087rmj6=I@b8=igbFV75d<@xBJ zw=@owB0hY~lJX&sgq4Y9gAdYrB^W+C89v-9ar9MId-Z+M)=0*s;Ltw|T+m$!R(B+T zN|7{eVg)ocaGsy+1A9I7{105y$4T}4t>xja&HrHN|8DR91$KN<%7cerKixaQ9CFNo zRi#&rQ)FwI{baJ+3||-B51tZ9Zl1{Z(aRX!!Q2UTeT|GlEK6Qq$KpT4?!?dH=kz}Y6N%UusH*6|bWtgiS2WO$Z% zrE(v>8}@g{@hG9*_2ej~?u0V$g3pOhFIjE5{(G8cNy+T5sBq5}GwOM-U@Cu9p`-TfoY+b zkp-})et~>os?h5p-MFV2PWA!>_1_~+5o_zBi!h*;b>t7^MDA?t`R73W%BpQOuSb#s zE|Pyj5XvgtlTo?n=-xBYM#EB82&w)vJ`!w|Ny|_nmlRzG-A}Lo0s5u>p&b8rohE>G zBN-{Kf*07X_%^9g_WAR$hM1-ah-v z-$MUWm1XLsz>Q;b3_;>hjs4Uz`9tTnzV`Rzcu47GpC((OFN;aUx4$ejSC>(Wq~K$vJGHU~l*8VyG0vw4}w-e`wDeLKo}IONxe znb8$9lix@zgEwzG9}&Gxcw{DReZ2k#A4|nrYJHr%dSetvY(MX0C#|ABGn1g>C6O-K z|1q!R&3aKlZy~4YV@``s#uG-n$K#^u#1606s1Em+=dQ0kTxi(~kvpWwV|!_IET&Lr zAz0ZqX*1ICI$Vkxvr(<7kclqy{>0xK#?0B6$tayU(ImLqb5p7Dm2YRie?SGP3+^Mi z(%0R!Vd6l)$S4dxz@I`~r`*u3iP_eq)LycE8YbNZXdv|1Tjw}^6TfqIsqhx&@+ohK z7|;yosp6cc-R-#kVG^iAXUSqEF*`rW`*=#}V%EBNmL|Br6YB}Y zm<^7xNw3~Edd1=6^uC;$mffIjI$0q#TTqubk~{|6`q?hZ?9o3*{jQWo92w^IFvUMK z2(wb0(9#+*HGf)V;n(r#Z>N;}ireM*Y8++hhC{&l;Q;=TV-|B?dyU}2HK5%?FUrcw zt{NJ~pZQY=`FSHdTfBSYv5fI?%opa7tu*SV)_hOYblUTvFbw>!xf*(fzLZY#(Jgs>}`sG?y=>3L`FV4FHVYbspJ zeXFmMHPq>rlc;alA+TlBqaZPzT0r3cB!(PEIc|)m`(0c?suaK_obPjGO7DPHjmJRz zGD}H4Mw)LYuju9^8JU4Dsj{DMJC0X!H=7-pT+UQAw>}J=dB=G9aab@eNvk1<$+H}_ z1EuXy(qT9?5=0T@W?Z~^hes?^zMBJTngSE6(>6dMm8&E~FM(s<9tb&>b@?1_qq;+L za#P=-Ti{x2HFSe-%e8PzC@kvNq@1r`9f-t&1SYXBfqHtzRNo=al9a8|*;(QMhmt!& zTqYnppZ0*lVv6UvE4+PMbaWyi!+pC8)b?t-{3SC=lADl5KPFGIhVDmrq1F8FedhsUHC{TR2H zqSls@(SBv7GIx*9K~mJ5^0jhgEWv=C=3drPB)A)8L@Hr_Bqql%t_SSOyI(1-+!J!dr#u;5X@&SJ4qO4`My>?L(f^4P(|G2F5q*eFOd> zM3-5H7Vh7r*ZcW4c(a*T#noto&23?X<$m7hW&G-Svc)#bJW`b*vp``PL^cg<;#k>x zaD>rNx%~<15PYSAL(-FL+Bk^%V32?;7YKCMbXYqaG&d{ zzopR8{qY<)kjht}JF9&Io|R!nkV|iWI(q2f&+AW$BY_FQa=%-rm%q4*Yu3l{{BzxI zf1S^eC*#Zbe_lAy{1^G)pdPMy+|!Ieetxkj<${&1!ap~E7}ope*Od!&|G9Z87I-Va z8NvDI=zwc2f@V^WB?~$SUNZl=A}r5^xKLZ7KDWE_C+@d$gyv?70Ko zf8ajfzYk2pY#b}e$b_Y_S-Sub*SvWZ1a|PMUvMbTZNr4BZ&3=MPi`}*95J6`ypn#zJ9i+b?SZxlcLXyrTc_ozG5GQ{jd18qkQIhU zf0VWKqH{Vxk5FWyQ!2ZVebQqxU~SHh&fUAG6;Apn#k)VI`agl$6i6=1OE;^jn5Z7v zk=tmHQ_#KS95N;6^cH8AsO)8IlG)^-_9uC=FF8UlL8p)$wq2Q+vurV4tDCbDTFIHS znlRVVv6sJfs=u}T%|VF`BlTg85{`?W`%#-zD^d3J`>u1}cXt|Br%{DinO|pP_0UU$ zA1aH*=o*7kIx}eA?qpw^0BVm<_+Ijn&1y`#wul;T?Spz}B=&9A*G4{Ilz(dO&)mb4 zbSv+RX-N0D%VVXkUS*Ayf>I~w7vk!=F-7wgWo?eTW)U3#m52mm^MGkKT|9Gz~5OGg5mbw|IJg3iOb-Jmx06{{U% zLzM2js^sV&>WHFXuEI-f6UZ#u$KH5V6V|G0=#yp}g~9o5RM12Br|{)fVAILUn1RmM z+v`F)#|(c9zmRMO0;zb0Jk&3jtGxR*Q+*Sf6JG{HDsFe0a=#!e2-<=J+B5ZcW+qAs zs;%fqN9Fgs0pY=m%c|E4vt--#()4@$eS_*Rn{=8|X0-xzSbA14f4P8>txG~ir|eV1 z?oN86aMPqhvTD<)Z;+{=pwO0L7b*MYGqyZcZg*W#g5YDBTg7U-Qbq zJ(X#;gBNy2WHPKewdoqzN{y2IiUgsvVPvkGX+~>=%FM0mpkpYKzwb(( zT1}0)0S!H@Lk!qD)&aJ|^HSNXnYwvXYKfcClB5@@_gWnhG)2>)9$3HwjBSUGFtVB8 zh2ziSH<~e%+zMH2)@+qG+IPQAh{lMS*GP7&feZu_JA8~8MsOAshJ>LAl*uS{c85#c zW!cUd=Wrd{W@XPTzd1H*4xiFGkX;E&Ac1 zjrDQOX{7}wK?XfI-+VA+@KhNpEd^d?-WYvM2w}$fQdVQ1y))6kO||^3aPr0}&uvf| zcjldFqQ5(-F^zmFHDIO{3A^}g!qeL4n@FG(#7PmWnggxD0Nq}X>Hs&@+ zFnwUz91rYahw3UEC-&rSkgN!CY}tRCL4s*E+5O_~pU>m;btJhnF4FOC+cL)4b7 zq1m}$@$4z*E5Y1|oq&aPN8DI+&n76iY(ZJP`xa#ob&tDd(wjGzb|81^dWT!=2v>89 z7HS{(-FM#${A21(3=1NBEpOiEge#M?)^!youA)^-xn8e^IhV1CEr4_-+LY$V?0>HRP_jI!J zdwR={lfJf9j(`%%S&KPWi;g8c%kvaPcQ>z-JtW2>^lzUqTg$2ltI%Dtx;KII3sIdJ zDknkPf;ydiZT>tWirmDl@|qLt;(Zo39nm@=X+8}-w6cieDETv3N=I1&OR~a!a^Zb@ z{6UKPAT1!<51C*)li*FJQHLr*ycLhrvrcZk+xddqNC1b1Uou8kh{>j%O(_;3-t)ht z{P$L=Dw&+C&c=^LtrK0srMRtb)rn3b+}z=JTC6&M1gUKu+0pJ#yqdjwI+6S9pSCDO z0u?^HKhq;My2M?(RegBJS;-JLPsFWaWpNJ^vZKV*>L^xg6Rg#@yZ;@$zj7RKn=xiM zMr31u6aKg5i!VJBF_dN zr?QrVB(xsGeQnd<*P7{fWnqxdk&~)rtFP52tR#L;bV(mUE?>hv{;qgQJ zk%3YSvr-k@Yh_+oFm*8mU+U^$+qBU~ncoz2gFg#+56OLh?a(NF;!R_ZZ-?D+Iuw%} zdd)*jGoWZwF(aroHzWPDjHlsSv$cEXJ5U{w0b6h=_`>CnQfgMGuFz1c2<<;%*3Ywo z`#7TcNGLvOa|m7Rdb1Zj+l^qAfjDA@B6H;zuY%*C2@psEP#pm z#7V(pcuH(*H-exU_u_pdX+iEYVx={23Ylro!aF9L9BjZI2S@6IRa3}L=52wnU@pFo z#bDO}QPTdtYTu8>Vs7L34O^j{urCc-efV^93hq2!OGN?Ko8VMYrC=WVCX6<$c7hJY z*5}`34SaE0zxIkIcF5(S!KtJ0aMx2;N>AL@VMH^dAe5DI9y!Wg>rk4GKht!MAT+=D2|1FcNt!Ckpr631qatwkt9fVBp+*z&munWyZ_ zO^}o8clU#f1Ml}^R=|N1dN)!sNZT7OkIECUOvuH-d}7 zH6sKqIsdXVHYDG8Xk)%Un~>saLYHGRngENkWe~}r?WDhGsreQC+wN|wDge=TV8U!B zH4{i#JH^lG=`y31LQC#mwdC}I{Y1ffV?RjPOPacfJpIu#h%8&;WflTRM`0CXrKGZV zD96q^`|4PJv;bl6$rOMbZtSXhRL6n-RC@`8a3-;mGhErt;9AhW zXgTPHJK64SJo@QK$SdI$QPMp--BCGs8R*1ygKH-Gm3+FA=7jn*&Cn9_01S_R6T*V6 zhjyP#omDMJc;?cuJr5&`g;Zv#F)d&#ti(m34Tkn`#OC^Iv&XL1LaTcvBQEVFxk2;% z)NfgT@k-N$Yd!HJtJ3DS$xUN}Y(^;=R{ zf)lHwZK3j8SM0;aRqJZ5yKQmmGh%X*O6lZoyHE0jCUy~*8h&V^e|is?+z)RI z{_`~*rGIq`kvq3uj;gR+NJ)|FpDa?jAm1b7?CKZLai(E0en}5C4lkK#3!n4FQHT8tvcAzHPa!%BxI;mp1_caKyH4wO8Z@ z*ckr7v%%~VfP%c)z;>GUz1y`Vlr0QCITAORNb7FUg6CDXEG2F?cih8#E0g3q>Oot* zkPKby4$p}>Hmy0txx*hJ2`0ofyHg~_^7Z7Ip;1d1pQahCb*w5mi=(n>mot@HG6(;I ziDiEkoyn+#9*X|x7%kgu3gZ){FuW|E)=m+1iuBqOko-O6rCVR-TXtuEMzEN$YcJFW z&c`_}-;Z3Mknc&?m3PDM%0kIhYO>m+PR^}7p~WRo){m_^cWgHL@BQy~Z5>EG_meX8 z+Y|I!;hw_#J3L+w=X|msL^E1zv_IvfhrqE=Ne9H8nChp+BS+DicPmAAY)w}swDd&G|*Xk{_X$e{h?IDG0qaDqM#^YBkq%wdd3;LbNnZ?@&q2(BCx zdN1Nz-7BOE@ph%Tl{sov&;*|PEAZHp;)MUREPQ`bK2-nv+y7t4fQXsYfddS?)Qyf# zYy3MTp1CzJ;fuM%z)UInajUk)l>NvW4za1j0sR5dFFrC52WoHp7i_TVd^j12x3tw4 zEWN}?dM}`(mOWCu1VtyduHNVA%j7H^Zl~I^`uqsqqr&35I=yjp=kr5+gyw>ZT~2+{(@_tV zYB@q>Nr}Fb`if@O-K7B6#u>L@!gp-4j|oX%e{^j7%MN$8J{5YPMd05pw7njTE$;^b za8ZzEEtDLsk6kpL7FJ3H!K;q(y?o!dVGBlh-Do~cs9^2pwFPj?$?jS52Wv}LYb6s= zqnx%Ekr&c$$nN2OmzY^xB{m($r(ggd zbO!Uex46W)4`b`o$3R<$(I|?vnneEi3dRsGyqyx&d7B>*57t(Kca%7e8W~N19i3V^ zdMH|x9%FJVDN|G>yAs!f8tDnBha4i^Y3T6bHux+mDsjzF=1!$|x2U`K1mEUls>;|d zHtF_FSN3;+D|8$xyh_962+ih|^(Mw*VpNX&l}e}!^VO2th=y_`HD52f#H|9!FQz6{ z8=oBLNK%f`hVa63)TwQ8H*x&-%Jtd?YuZ>+TZ1xrvoizzh2o_&qn7*D?@pB$zAG9C zpEvJP*K&C6_&;>)FY~9r$+FIGD=PviS2|<0t7GHx#GADhuKnRr|JKgFyspAF#md6f zZ&kngPP!;j^#+{vTcIV1h;IwpOw`T#-TDF_yFlw8Qv5`t_Ocr8yK+UR3mVu1Kt@U# z`KmySp8AQ?u%oA5Bo51LMpIxLS#;$2xUh>eJQyro`%trm&pSQck?}JD$LR;PHPpZ- zwH2bIbD(5|YEjQCr;y?y!q8$A4G}j7uK}6#I*m4p5nLp>MIXZ{N|x)Nw&+o?#=a8Q z&CbchDMr~}@rKi?f*wNXH<_n5rPIVrdXA}RUy5*+3NM*@Nn;(DiUEudkZp2$6BahW zUS>VaQ`N^B0~>$n6+}#tJq4}s7ovcVxR*N>Fyq_tRW3K({sg_!h)q`S zZkvAJaoxu9Sz-pa&J4ji%?Mk%XGR<5f6V_1ku`2#1u(S8pdZ-B(lG*G(cAdo ztQqLEy$0L(Zu^~xy}Ov8>=ao_r@~RbiXS`32WEZot^^B^#mF>abbBSIEw97!?&Q0Q z$}IZrTRXP@JJ2aJOqU+t*Q>K_p=K+&)E83=S%K$sUPHj>5OS>~&E_o3%Y108l4%9a z#!&D+M4p*%ZtB-xbR@3uIf#mke4QTX)!w~c9ZAE{SAauDL*+Q5eC&E5A6L9{K+6SY zSi_$g-m%-_SFI_%q>N74%O3;D=tWdd!Q_dZD_`gMip~>hb+6vSX;^*Bgk#o zOb^~F_Sr*Nb*IMEnsp{U%NC^%;7is!rUek(%L*$AkDSasqH;rE zYLjn8^GRoSTE1-xwt0`KYjkVmP?<^tjd@S$&VMI#>bj``)dvW-fyf#s_^PX@O-&ADHs zn1gC63Wk>!LCV_;Br8q7ot{V88x~v3t6$P>6&j4AIJYEePEJynx0m0~{ZJ%Jij1bj z2|%*83B$3o+cG~M)N@<+7>7q67ny4zJ1nAWt2PJWZ~<{rM_GJFmz&Pv0C=!x>Nzje zj2)U1z3EV)=16<-)Qry0t&u1`18t5cxQ(zL-7u2X0wa7))1-8VhE$0vlfv}0iFLb9 z_|-I?1PnU3<4+$#@-luMNH?`5$F31WJd)_kH%Jh&SVZx(AQ4}WVMY2P#b__`a$U_Q za7wB)W)1byHnP-Z1M#TUXnINhO@tPOC`UJ!n~%l# zb~$RiYW9f5FgQhz7$T{S^NsU6@kr3bQ2E9CFAJ~zz$S;^y*eNynpM?wV!I{ehJ=Y( zWK&M%h$wL<6lKsj=Nco|ESaOKu%uVWG5j?sfMZ(FCNcMmYbUGOv??*jt!c?cg|t(# z0h7hf8REM=eTLoH_V^^XCic-9{+hku~iN?ddD~XsJ$6M z8wQs~l-2smS%lTCk{fp6ON%)l6qQc1S&&mxkJ%w8d_yHxJyAZ$1r7(IlShJH&PNoT z|I9jn^$-`{^h8L$lAXujUcjk)L#+T=)u7%iOQp68|7QaQYjmI|R0H*h= z`;V8(}GapmKml%C}Q|JJO&pt1EQ5vYhu)emq zeqoN%bgUw5K4BYd=jk`NZ>8RsyVSUshS*1lCk-THT9GCm*SE)s9W_0_KwA^c2n`lV zd9u^N(=4fS4Pi#obT-3h5Ve!Gt{YtN?0x&BV&!(tIx5HEPWJa)*#V?<58c!F=}8oY zO@Zr?c{7-QD&8VX^l!G>Y}A+xwKaGyq~X7kE(|lDm)}Q+gG*Fd?v2HN z+MRbqjukKd){}DDdF@)O(vO)kge~_Rsyse+y|rJuFx90SoZ`T|$$%K5iE39W5)G<_ zGj4C%=_T=O9F|>y|DuCA+_i2{>t^aHlxd zCi?Z3DUPGD@ykhBT6YXOXZj|_+)$2O_EmK?S}L*=7A|pe?io?Et>0%`pZ%^lbWc}) z&BLU9J#$jiIs2Kb%+So7_YAmmxo%w?n|rP=Sf2Px$dya<+e;2>L!C)77)4VY#5K2p zGBL(ceNWH<0ih&|S3-+WT(D8DLFw*MTnkXK!l_~HxWI#YN$^jM{LgzFA$h>h1nR?; zBW(USrdNOSN5&7VebqKX*Exatd9rcZnnB$ic9Wwd8Hz~-8wuugRWBz*K{}EMtZa?ChKTFLLxMqHLD4(fJ1` zap-;9;^IRFE2@k?T>C$HAWzAZ5?AN%enm)fX6yV^i>q{le-kk|fgS98>T$bcYno;650wqw~ z(olW<0Mg)--tf(rEHT>#Lj0}MK=T)yyhlFGm14nSAwliufABG!=#lPfiPQPjJ)K^j zBR0ekJ9J*#X1Ff!^!*ubA{S;$ng$;#GHLp_CeUH!3X@^w;7O7$TG`R zBu&ENee#D~E-xC!*}fhD)VEHT{`uU5paAD{dpq()#ld_1Pnby$(M=9v<$*+v zgG%W2w$Av6vBDRgE}P`?yF*WrFv$}YktY4;O(Sq=?Gcx2mcM9_3K3F9HFodHq^J@E zslWS>g^^IQvZKKo>dYMhqa(o`D5FNg~ z9C1C(gyUp3A8>k?eiZNU7*YV2N5M*(1m!C|ylb5tFGrldgm6Y+^PGNB=h?@sCJQ@5 z(jwldnJ>Fh-X7O{G58+}q{}Df&a}NOB&5djMWP+#xNut98}-Kin*jHmVS;mCk~F7L zc^-_Ugw+y1AR%ero!HCiJ&%64hkXmNvx~+(oKb=aj-aB! z_LenJ03r{=@^{$AVib30-%Bc7?dKQez8eNLl*-K?wCWhR16wiuJhsOFTt&}Me>0Da z*tnHoMKEUFeYuNDl94jU6`v*s=S{#>J$J+9x3ZbH+d~^^;nDY0pQAe8R!$5-QHfeq z=3g5D7k`>dDek)|EBC73L;64QECf2P^K|lS%(3!hCy^z&&E)XQ1=S1Ycfllw z>P5R==o!xB$wvVzhV?HrH^nWZb4TtA=Nye2!$*R%v!|MFHe^$nO6Wwl>~Jh)YHvc$ zxaWtaNClG@s!@Zs>CmTIvUPDvJdBW|IoJ1@!Pc5PfXHA zzh((*<7u0=!30CNgfOv=k>IxB@*_sZX3??i-_0_Fx`n((H(|Ij=Bw&``3MHtwr zjQD4Hr*v=&c^KSgu(eUCCOE9fDw@VEpLreB7QWNFbt^T*pfoqByneG>G*OtC0I!}7 z@$%&sBt++(W-VxaQ+i;EkH+94N@2b>mUBBTu7wREKU8zV%A)|>*_@fY$~xRyz-BEjkuY&EDS=zgT1%Ij|GGulc?Wpeh|$`SoC>WWWFf<` z(q+d)!Yy5QpkH=0$saM_xT!1NY z=2}dkE8|qzlIA|OoWUy8#`n*&B){Pc2QnEl$Ow~x&=jVBg^%_&k4Jh+{}EnP_jIC?k`KD z_~s)J8NbQEn>i+a3=jBd5V2{ra@9VjH^?a5p%DU!4o}L8!-UzNkG_L@Tyy>Nu>|wU zV{%$mSZDP?KbtacQ#ugUpU+%U3o&;2Bd*KOSwWfj=&?=Nm+wOEO%%+jfd~bQ?4#Oa zKLTzlhmR z{_uV4ZZC*+T|RQ3Ec=h7b|u(4F5Xre3%2(+U`;xfLtQQSd7>JF7%6UD=WMy(eXSCj z9ecT-!dF2HFhq?*JCZD=>yQ5o!Ns6+Z=2la@`)(a7K21VKc}*Zj?oj^OEQnTQ+lERL^6Q zXtfPca77d~?IJ0kQ8b0qRsNW`R}=Lxjt<~p*WqSE`A{n2x0$=If!&l&`l~kIPIdya z+=T^%h3l4H9o;=#>*ghj2q)V2%uiIzC5~BQ;s-0ATfEz-BL!yENRO zY;5uZ{^=?jr=eceR<{;#&AWxukL@@IsGFDRfVw%}&$U*fGWdtZYnEh!qPxdM(BDJA zHqob$l!WRqVTQ}m)gXW1iYLC|{AfYrE3Q|qX5eGKO1~!Ssle$&M4Dl(q?F>mX9>3* zuGut+&+`jo&F3r#bpyQJZ!-`eed*tc3pg;iF9LKKTEs!*UNTo73c5WQNG9a>=|SCf zLINh*rpRDsJD~4=(La#zxLyKuU+Llk4Urp9nV8tthj$~EnF-!bi&20B1csW=V%OSy z;Wk-IXsJh!YS>-$27n3L#XOGH-#a@t4t}+Zw>v6+@G8zDluB|tOXV)UmlU!vP4+~g zQJVcYttZ5NOZSs?qAA>9XM_NJjAVLefkUILB;{kF866~b&&_eCb|aXdytJy$W(>+z z&nITx9+Cvzuf0eIhV|CDSSq|~@u}5>okYaZPDe`p(R0SoRhOj*gyCr>THafu^zQ5A z8mO9M(9?1|=Zu2GQnLHhl?cP1xlGdxSgd(N+PR?Ur^|*W^sqb(2CAei`9FyK%Vh-# z6>$2s>5*h{rP3Z@Fk=Mdzqu`)l4_hMG?{E!T2m4{t*EK%HGS_ib^YLJUIMo#V?-J* zss!5>qKK9pIwgCVq3n+a$?&oXr<(zG?#pd_ZB~s|rN&OJzxo24czgF+U%}o=%q)xI zsKmt{X!+$|?g~%fm2pVx&Ms!&3}o@ zC;W5kxkIE@{M)wy6TUhR`^e$J_p%z@`wq!(4n`yvNc?0?jsGLh1_aW_wIpe7)&Gl6 zWMD>&)WBg>H68JYLip(O*_$A=ayGw7k4}!52-t2?ni$3zU&B0q87RL0)9QcxJEz-r zsrU+}hjg3Dj^#9#i>qdtfUPFQlV?fgVO7!6utv1^@^4lqgAxv)Z>0{qgJA=U4!H3& zg6nJ=qJqfuf^(kYKX{VxBWo5N($J@*%XC0qu-0@)TP7qZApE{XN0Q^xq=VX)5%Y&6 zY!U)Hb(*<1*mscx=!T7B8mXE)Yt>9dM7cz#-&|4-Nqv8Gk5B$byA>m{P*E|hnSlJu zr1HlPS&V$rUDLfx!-Z2U3rC02BY7r=#>L*A0;%+P0SDd)0+@^EKDmOdJorcoo$;c^d1<2@Wrz(@72~!WN3%Y-kG1Y^XK1LmP;(Sw5 zGfUpaYYSSAmDj18U(my}{tk%V?EqE(hJJLDv60i-N7@-?SpnT5KmrqXpXldsj%g)d^glxBV;rb zZrVWHpW^$k_svX3U}>ksztZRalZ$DXv%T%D#+`NIXQLDCl1qm(MV*lRQnb@I;9>Au z#tWuF1em>8-mc{KYfME?fole8iLVc*kR-|_VeO=@W~ zmgwN#VdN%Nz?P~f+8kYJL3o)`=2|dYQ&(H%*zF=tX;R~v`4HXd75v~>+6I3o7!Np! z)_qoHfceNb3`AbTLN)!0jH1H`6@@OQtr}?-K9m$m-LPVF7ScE|haV7uWrA_kELjeetUE!{{Fk=743ss&s^}dQ znMJ^HN8VlbW4og^+b1CLe@+CTT9pYE9eSV?2%)nh7~~+tE^5@G+sp2 z48tg&HkAVG*G<9_KNT(paN9+j4C7UDH?zpm?rp60iQEqJ=GKmVvA2nrBJTQ4w(ATG zIJE;p=R>d4XIdmw18ayvr>MKXZvDhJuAof@=8XlXi`8K*|cBmRS~wB&`BXe}fJ53K zh*@;MHm0Js?@9F34QesyMr#4VigHleC)_2Te<+_am^Q7NK=0mF7!|{YJ-Cl+QH{VS z4#$wPF}ufN706f)>e`~mIP+VJf|NFH<0UCWTwpYS4$wGgm0U)Q>I-|2B&^}P}P~?@q=tl2VMHp zE=ih(AYAKDZfE-)UWLhdtIemsP^ZN?J6oBKyJBoxf!$E^8p5Jol5lPsbgG9%<&<6tHnB}$-LjTxocy9%*uBn zU|d}{f21?qJiug>hkfTr0#__yb3HtYSa;P7(MfAwrz__XZsj&ysPGB{u<-5*xm;UG z>N@Ymq#xloM7g~00P^*ZcK;zK-Y9(<8DTc0)g(1Jzb{UuWC{*a3MTL~^0A z+b~|%lGBhh!SqwfsOBA$ZEiTq)U#Y?DAeXL4GL4y5~Y&LA$9ug#W7f7i6Q<#qKwp{ z39m*8x@~I{a@;?g0cwl<M<3<^M7<2Y7L<@ zjYU0sI7xi(Jh)T%`N&N~5>-ifEy?0Ak1@{P71LLPTOTywi!1dMM~0&yVPijRVR=hR z=^TlBwrTh17^r`QPo%ptz?t>#A|1CZF6#6jkeV<_-{_*78G5b7-C!708ki}5GD!VM zYPm|?8g!ZpdT_(S)zPKGMxac*%t%gkL&ed8D%iO4>;$ZLdU^ZQAp~@IpEb?)1~%XNH@{Od(N8%{M}gLSfVO=jp~x8hAikv8S)w#Md3!dA5-$R%uoP zvWpAz4I4J#s02MSL+oua4NE-#aQj1dqG4}gCRs|J5<7n<@MwWTTJbiq${{+`XijWPP(bkR1B<+;@YQQN6fcuv7y+3Pula}P=v1nAxksf` z+Hv#5$v(f@piVA^iGt_In{lw&tq4UzmAF6M@z{M8lQxJ zEtL^cJ~>42)DPm;mH@cTcX$K=@=>F$s|mS6lW6naLci5E*(Aymf-oK~N^0i5Nj{d) zzSI=|v`;+lsZBlp8?pU34Az25`=so3f)sPS&^zMFg}o49LqcVaG;h- z?|A(%S>#N*L2X=54r`ML>5A7`87}d|jIX0@O+MoC{ItmJ`v+*VIw5&Io}o^LxN4OJ z-D+V;meT7I_a7HL)%P)05iy+FuljLPi-Y#687?H&nxQbOmuD&-ORrmdEn~QExbV?P zmIM?0_dxTP@KsHn;Rd|!QXQIE-*X+GO{;qrP>pgt znoil-%)T3cjobMv`%uAL76kK!ZUv z$8?&D{l-$jouG_%w^LUD(YZ?dESmnSA5+y|tkX;QmkP7be` z@uzVUyCD4Hn6$K=6OnO`o4p5>Nt+~hBDc`vuCv_8?9Evo4{aDmWjKlBwKEI?J?ieH zB>4s-rc!u*Z|hl#)N6M5jD<DtXu4~fOSZ)YPoX&%JnzyaJ;{oQAY;Vs_Y zwL|?u>#n&6H4ZN%6MPr9dHJMs~9E+neCB2ky&=1 z>sdS3uk%Js-KAHj&3`z0?or>zBd7o`o^Dx`C~Nd6Q8z7PQp8@ncs^%@!`Aj5M4r>p z>7-2Iq8bD(LJ4S2Omm4L&#|iTNx&|OsX~`v`?6%bw*?1rq&V8sdc?idT3wU=PJS^ zAk%!1WA#11^y(bL+0r)Ar#&C~&sYN`cW2zMLeD|aLrrQIATy;~%*hV_ZSI#)MiBa{ z$o&jYI#aX%Xh>j83tf`V`1BA^1K+)omcVI}vZ@v=Y0A$!g9lp)lW9#_nylb6D3n?I zm6!et0)BxsMurcCYp0ISL+ecaa11?S^gvK<+It;c?q}i*Z zd7NM%#?n!oY4m&K_~KDs%IGIk@^E3!i%FlUXpV0xx!wn>*t6JGx_MrZTowr(FvCe< zoBjJOV;r8P+G;1$<>Je#=B4uyv*vRotpihxDT=Z}ksy|ZiJFH+ zcWSvt)A#nw+7d?9=NoJ(<$+n0x*n*8M(7L1o;V`GKb@l9%U;g{`~{yJL&Vsiej`iw z-MuLfOvjJ>jJf5b=Dub=$^3jQpFEdzfrG+UOl7Ls0tQW`nQ0?LH^OWLS}d2JV{u{N zK(4i`^7U7{Jr%IX{9|x@?k!>?hLRmVuw9FdTngvI{F<+=Z4C{Nv3gHB4l<7pP;UAUTk5<#6^lC}_ zE+07SPV)udJL_t*?9Q6D3!&UULg)jMYG(I}l4ri(OG&=|@k7Ubo=|znr&#WMPy7WR zJ8BPc@-@%ymA)T=C}@uq5P|*R8-!oKGvM(v!_mAmri|TE>f}$Es}^q2t{SypFJQ#loNs4uH)f|5lzRXuH+)9!S!#tGNz~-ShtU#EB6r z#q91k;eRAgjH({OJUU<ynz- z(=lqbV?;la9`CCv!AjN%R>B~K!SzDi^+HB}7)>SzPoX$rfw+&?_H_fG*Ak;)(u{_s z+uzXN5}skj1vjK4aOZBrPj2}O$lliZE!Imyqw0zj3d!&wepdcp}DNu~U^)YNbG z%e*w(LkEc_{=myp4`~k#cq5baVr+&(LY9WH!-XxhxIzmHdvsxjT~=0heZ_ZX`qhC6 zJDm@H_Xc$#{R@$aZABRGfjjAh0=on?ZBLt&%Now@*>Y?-b$3s$#ji1DiYk(jyiG=W zJS&j86TEWoU4Hl0N6>5jjb~n-Wq@w-kRqd8Ju}SsLRGN-o|+CNvz{kI$u>?w5u5^0 zF%UfTI6}c3zvVMz>Si#+V~4EA_OqA? zm)xB`*m3Z_7H*)Q9*-fy_MFBQYupC9Yk=z`c;v=jz7ZQ_7ZnT&t{Ztb`qcO>F3@N{w~sgzC==?{(GwvXaAp##kIRwm{Q3Kg zP=zn%CUd}dIUR~J7WR!#esEyoXFJcl9LTFgllPBx0w9Azh{njP@uIts=zZ|22B77t ze2pdvy8M@RZpjV-1tSGQf0EA9DJz82F3$`GtzICO3~9>#8G5qm@c6 zr~I`K)09qYpvJ$`3iRZsh_&XNYeG`{yhR|W_n~?3>(V{*&OvKVagA{&24y^n3`L|}L zPQ4}adWo{i%I{P@8rJ&AFZ%i2a(XS|6kQPb@x)w4DO5wNx4jEAr_~T~)xRto@KXyu z42TFI!J$`A%xd?Pga0T-}Y&jjq|ZSeGQZ%)v!}uZ>1gYwL9*70GLog04G+ioU!F8T9aa;Y#-l~ zuoQZ}aDKqJQ8^+3P4c0Xx)s4>S-tXkk(|lN$%e+p6y*==k}iocBq%+ z>4eBDI9Au&At8mG%mU&1BE;E#=`gn#(1$A6%;i_uli-%Q!sDp&*^ney^S3lgm8-Fl zIDGUO##>HKuJ9XnY?BXl?)6daxJ2n!iMr%#x!9LXW#MK)O(&m#Uy<)w9?l5N__W1j zY%k3XL{xrvh7_|0BJ@bfuLUvqxc>hXi+#jMwNp-4!jyJ7qziCtK58)i=uHRe_>kM7zU{u zhD@ca29Pm}AKAjEVUFX2MpcjP`!fUIcA%N|B?(o{`5zlj+pKukxyyW?TyUIL_bL}s zGh&Lc@5hpMKyGS>wmI;+x*u=bFaM%;oevto6q+Rc{p)nM;oDc2>^AY6Wv?mb<}pU9 zI+;>~tV7aBHKz6hUOcOe(9))!J-dTvriLXrRANeFPHoTgA_|scu<(|#ab->CfW4*4 ztThkI3Px*@5FyCc6AY5s>u#Aamx^fQ_dnei*+0ojlr6_yM38l>;>=fel)nTkeZga(aHld%sGw*1N(c=lve8=Ir~H zG^tQ*PxG?Zy^(cmxf1IY!@-Wi-T5WPun)zD#LFI=RsGqA;M+^&@SJj(7w0*kVEw@l zQD-nxn||}B{LYO(c;A2RUDMXw=lIBP{r34i&f<*s;<9%xjTU3&CJI^o1*FD}X{T4( ztQR1|WoP_Pg{(jeG||)!4V9-%x9@~oS);; zO-{Demg^IIX<-Fp_j*9*Y2nc6M%L2wf6NiU7FQBnlIV~kM}t}?XAbXZI~j7qIJUvv z%_5a(Bnv2c&f77iI0q-TYh;YCOIM60Z8Wuw7h9qUdx2Wf?^-K3Fj1UF%GUVbpjv@8 zAE)3HdUlC^+NRntg73~C{SowhE6qR-rp<}zmI2(%F-UPZW=^~MpHPgMm8-~b7-75GOcu0Y7hfTb6*M1H2h?}Zjii6YTR3j~{W6SM z3SEIfhuOezTz)ppc4Og)eR>AMRYMMg;RmYj&ZR=+RL8K8(1hS>MPuu+H;=p{HECf9 z`4gICT>s*2Hyp0G?8&*5qACv1P7`d_4Sjys06>3I+*NjPqi3+62Rwax_Ah1kY?L1XwitCnb1n zb|o!vhv%8izyu6jQ$i@ZVO15kt7T~z_&Z#6?1k}e06Lc@u=4buAw=~{9xnnt74l+R zEOGbR<X9dI(>LVDbmO3;{I`1~HUslLx)Fo*50g@qMAPyPo zyd;P2LNjT6f-Y{aU*>~t*(ZPv6*tJ`TSCV>T!X27^8>(sTu>eTv3{It?(a`QGm3M% z%g%g}{t{u6;_xIn>LhS#PsSY>U#uT=^myc9(h(Q+gos=S@1(SwS5uohd<6zCSt9^l zDe|;Bw5PdlXmp2+Vu{`I(N+*$jtTX((97a@gNK)48t`g@@A*H*pa0qe{&mSc{!ISr zM1CI?B1{u-ZY8D^`4-qYJ1$|;YbD0IVf&}O zA%e?Glm$8(4!Sy)$+-vj5RSaNdImG%gHN!r%4&$*7ZBGcHf{0+;!o?+=Ou{s*x3W! zqrSyNPSR8()u?U@eEsgBbY~M9R5Ct?e^g71_)KW-&8VrB&xP(m~}1qVJS- zWYqbf#?3K7{sL$)SP7aAfzV%Ny$swW_u2MKjTPGZ^wz5uXU&!10o5HZ!`ID^_)Hp~ za>#FYhY&(frbPy`a0eSK_DRJH>Vm6Qz1+^fLO?&YhE8apQC0B%ZX2=PRLfBeA=^|# zwy2Kd@BO&J9arqSf+<$r-Tzw<2+L>o#eW)+Z;iLK&(FADzm2kWJ5j}Y!-g#}W)0`e zO{$n}eHujVFGH%eAPOF6MlchcfkEJ*n%>amH;wG`nW;U z<*L&mI literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0000001282578421.png b/docs/dli/sqlreference/en-us_image_0000001282578421.png new file mode 100644 index 0000000000000000000000000000000000000000..f4a29e3895e3e8a5c57d3fe596ccac890d26de0e GIT binary patch literal 32910 zcmdqJcUaR|*Df5D2nG=xBQPSxG7=O78&ZS8v4KKFrAQYQkuJR?I6*8RWkdy~Br3=t zy@j3t4n#%)1*ri71c?wJKnNj(B;O9sJkNWs@0@eJ=e^GN{_*```Srcm-g~Wk-S^sg zgtj;%1KR@MU3w($mYN(K{0%u$fNG>IyNk$9W^U`jF~i;+?I&2_T-qwW+~6~{8Rir^ z8uK3xk=*KzZXq*DP;_NkdsV3tY9gFZ%oEL<2+z3L7i%J_{N{O6GeydmWmSw)^_K@~ zyT8Sy#0vN|eoUtzKfj_H|HH!gh&K90(Q*bOj{nS|YT1mF9UVPoUP(OCdoYE@8^~qU z$Le8i^O!cn1;Y8p-glLFC83e|L8B1T#0CgN=V^qq&@5M%>-#G#4!QNCW%)-tebCCaL*gAq4g`8xYg+g^S^P}44cPgLBS zMKy2Vjd)1;^!_Qj&-b=U*e+~OOqgEfVk8udV*?3>YA6y66m5TxDOYZRb}|SC^~UO(qtLMp$Vx`o?g-`8==|P zjI^#Z_?E(q!Gq4&N`388Dh?MD@--*mVI_}J{fR-$uQ%dGPKOV($`vN_NXaU_d)sru z>Sj5ys`{xj5v5L!%-=i0`t>^!O(u_1omAcYW!@i)rB2beGXhrF@oaNSB6S@E(w=uz zvCsOf9I#{*gSDD5P#?jaBB-q@sfgH7!j8N=n<8tG5ZH6dM8psoF%=4w;D4GPk)O+x z&-9qLkzC(z_&M4CCG?snI%(&T1d}vNtE?7md{cR*eOWzbHhGCx-4gI3F~yAAMt$9{ zc&^c&yocNp{*9uM5kO5(!3Xem`qp(}R|^wW7L92!pGSLY%ZkM9DwEqB$uOkllK4X` zpI`P$8ARjAoJyhf40$z0WF-k5rI7`dqF@eQ*cJ6oRn#HkndF7gFd;qKSC1i5$x24r z1N&z3wr6@YnrH}L#@w1*;ylCQ>TX#i*rZd->xV=72g^#>OI6;_h=1ta3`ZWww?UqM zqvL#-(dAI&;8Yi~5o+8KvJezN0t=#6?cRo6d-ci)BfhKgHWFZY0X{OV_E zV!Q;@Zd9oP3JtP7RR64g`1pZe-`qj#(1?MvF<}*L6+Y&$kvgsY3H_;EE6iu|6CWoc z-!Z%W>*lHsAQ0{DZESBJ-z6};^?c158;DJ~fURKiPL26qm9n6^$3#R9UJ1P!<6@(h zKKJy^PCvDvcLX*0 zl#;$Qc}kgmrxN)_)VW=$@Mk5)FsMh}w>oAIdvUq3t~ec@>m070L8~LQaoZTAK5VJ+ zfmrh~eF{&ajg$820P)yxymP4&hn~gkc{%ojVg_3?62nxAqgya`JC z21VeC+2g}0}g`Q8L-6Tr~`?mw*YaXv5xGW_^E%+>i zS#=c6&vLJmvTJv=$2q1S0he>Seughz+>!ifQCHxi0Gs`Bmgy-Y_OEvZdl29s$V~}x zIB0lG5Fezjc~!^}oY>X6oJNb!eUe(c{bv-I>=2#*>O*~QEV2mM@InLQynbYQ6AZFs zyNnXNIMA_fvjrOBhus9by86voz_H2}1h;LoG_e=ZorU2XLs{88nlMAvTHC4CX2+8>g`BVJv?sue&S-?^%9Q6}NsQA9Sp}s9+19HWauf8L!Dv07tJ61}q?BD4JO;4ur6Q`%TvyeNdkWd_;Gnc__N zAyIaMJQL&nvmrZdoI^{jZyOfqIBC91nGBe&Lgwv{FGy?aMhTABrrIDN&JX>di)`sUdFt-d@i%su{2@ znG#|Wlljw^Ixe5V3j=k1=AuZJ}N zdr4?fdQ}*^$J8(_ggaLizPjwsnH$c&Mx7r$BTWtIE+vf6E%$kD-KiR7g^A>ul{mL{ zrO@7CIbA$q?iETbMfAnyBLx-;VtLW(u@99DJE72j!FEzRA+ zWV!P^KgPtVUXe{qe8;amc#LGhpMKYsPEGA`vG2%P4)|-Fvshdc;lHyZf@A*>y(|8aTn{ulO&)eA54Vfd9B1EYZEgS8gI$+9o>@bIAr%~q=;)QJjcoNEl@cs zu?w-h*p*D*OVO&3hPiO3?a^{hCMwFxT6Oo+-=+?4&(W_uRM7YqZ>ex_xNuhWSk#u> z-UDGfVLkLWlHKvfva-6{;rXSRypm2zS}2lYDCIL)XD2EwwblOB)&7>Wv+u%|4JcGx z;N!x1FvMHA&R0Ah2Wd)g5`MQ725|tn_wThtSEb%)dNh zCmsD?1}Jsy@IoSNNxC>(+f@xjRY+gB`xzR8-K?IO@1xN4n|-BE_rlEDbnK~#TqK+_ z;4TNH0DC`cnX=m9b|pJ$gtilNwLWwvT#U-5s%5tKI_^T1w-vi2yXAOx@0&?skqbk& zOcHbO^+r^u+wSZs!pO+dv~KBK&_K`G1*YoCL?xuf-pp-EH0Koh+rtpVd8c2S6vITV zNH!_Hq%Tj)1H46KODJ)kl0vMzjT#@qus9!8jaa?e*;JM zTI=!URmrkiNw(@=W+tgmoj#4lnF>xi^r$3KLw|2|n%2$LLP(kJaw31P7+k{BF6!{B zOh>u0PjR?Tiqh1fWPzKM97_cOl?_AB%~vMtV>rqEp@$H+Bou@ADTUN*oQ;mb$%cj)Vr=dL0fYGwSNjjC+)9CWSdlJ)`jK>*a|PEo3Go~;Bad4z1a-}EyM>J z4`03=hs%2CyOc(2)scpkmN4Bc8JB9PYA+==C(|d~dS|-)hps^0Q_~0pp_B8$-BXl! z)7?CS2**@zMji3!%Kbx@PH9cd^TYC7t@uMU;_}O5o(JlPLG`-F`04@Y$+Mwt%uN2+ zd>kYBvGbsM?LKO0JmZpFM>Skgb2YzYG3>G}Ud1H&XknBfh!^&3ztj84LpB-o>-h1y z&agmZ+7qmQ`q$IZI2Go8gkDbP`r->7I*{Ki>#0M^Z(R1T^4=J>ybdYGme+TBR z_vDTi!;Qd}M8dkhOw1-&6Z7oe>Ism@?s zZCThDajuE`eH?mA9v(>2DX;sZLITRMHFtVQbnPanouy^SCh|8znS&QU-{SAxxG@IV zpZbx7upQjE*-aIPvs!oA*OJ7@iTRbzG+mdRsNifftFn)ee;jc1lhVh+WfU>{_Q!lfxs}dSyh= z=Nk!*jAuw$!}c6+(wBYFxS*b6J7%hQY?-EYD&=)~UGf zypy85^H-38O)gLRvs5fvoR)U?oUuov75e&X1wlH+b3Ebrd=09x{H*h`GE{5V2`|}R zwV|Hv@O#A71YF?iYWm&L=~HMl;^x#${BDHCUU^dHgRVx?t%^>nQ?v^@{kmZ@FYXOY zGA(cG`W&_}o(;@ol88ZE7P4~8)2UONc&eFhS=r{4MyH1&voTY>-MsY58}yExmGI-z zZHoilm{R7U+F`AZHw!O`&iG}$sXC#$xBp`YO&fnE(&2EdNurd_#%zLGY9t*`Ml>s=gfjcyM=!>>&Yvp4t}}P~+^O({ zN55XTy!#o4npJ2B+sA6@U?eMiEtxB%TEq&1qH(;XaYZ}5-zA~R!nUxZINbCbNt2b} z(F9yLHE`H;z1|Vv;=i7{w16RhCEA|dTO{4pbZyD_@PVJ)Pgyu=2EldxKec*VuR5i| z^*z`6u2)Iyt;=_Q7q}W8aoE~1OXZVv|UpX@_xVkfCFL2 z4*X@UiPo#fgd)A0bTgBaaglu2&vp+(6#q1o>ZAq8e^b4IltHaKv3p-#9%6><`5!>H zfyP(lAGB`AQ{R=AG~$Dt;dpZQXa{FO445ExAfx_Na>i{;E*@pBIF^Ee3pYGl!jU6duqh5o7NWRaru^L z$m8b(kkIuUf0?MFDGl@4x${b*s!5@7T}WCKjdwZTM-id3F)8odi;Qf%q^Tz(ayQX=|UY{6ZM1#4ODzB82BgpxdTxF>L7M_r|ROO64^Q#ws?g-a_( zHI+Ax~Hd|xvjH>sg;%!=|!s3hu19c zdw~2&=ROnwe^J0pl3L#_bP{w!*db@`3m<90LhXhoq93=>00WpAy_89M_~YazfiSS{ zGRe2^dDJ2Sv$_heHD3|EH+|yzv*F<3?9% zzLX`B>`dN9K8^1hyeLPlX^OjnLiw+H4dd1?Jpu;0Tf&d&hz$NE8$*4(4}s9w-p*OF z)l~2yceKUM9a|~i2-R}Cx88VICdr(kBE3?DnGb;nemZo2H+cTx+Vj%88a#{0{T`Q5 zjr(g>TGm$u=0y$kAigKpN_6hV)PqL5bms7+B^j7Bs;aV)?ci}kpUEPXr`T7n;lh8l zf|^HgQO-Wpb8-K7%PvVhXR%2h4-HE|Pi})Z=mDAmqG*U}d@coZNng8^0i1>m$$cF} zCm65@$CfjpkcPgDIKqf%DT*vryzSpNxo?FJ>gnBH4eJE6DGsDD394Amd@Q3FE4!9l}mfuT21;4q*tY6(dZo<9r`Pb3$alN+IB&Y|Ipgqq*s z4_@^}g!&$d32=Id4{}v$*D|I#%D{;JGb^CIMk!*$-YwY%Pwy1b7cwfIwm^bAl2DCu z%^&s+XAfr?A^NURtAa6w&zgeE^3@@3>Il7TE5*;*M>iHn+=s!eG3tV*W@@+K4ZXi$ z++)|zRF&a0uD(Uf=2KyVMLvcak@B-TsibYjRIZCtehYC?d9pk;Lp5r}CwLIBu1H2z z6oNEHTnTOFr3)h;ls@u2?tw4D%*_@54A(5L9Q?WX5$`FCe)OwH+Q*uX-$3G1vcPZ{ z&SV)rEhM;PFs~YslSL11@?m=*dV6rVbpWSFEelbck;dqO#>SL)QOjfu0J&+RZ&9^Y zj)F66!KJ)}?TMn6Z+B>-Z>pqAm~SJ5mA?+y#Lg5KZIp&thjR7g_j(+_;g)lj>i=8E# zxtiFb=l(Km#;AvEi_8swGrI?I_p1vEWql%L{8SKne`n|GJ=*CCu%hv*=J~GIH=7BB zFUTQz--65`Y`rPE_z_#VXgteYFT;tqDyPYQe!S4tJdg!RX(^;*Ij+YF>d+`pnRuMJCQD**#SJ@-p*F<;E zpNSnscD)Wgm@RO|?_NxUo+M(>8PuS)Z&7b{budH+4%&I`i>NX)y#CU_aajeBN-D6l zDg}%X_$Oo`dRed`n?P8czaReTx=3;TpS6-^jVD#$tM6@|=sOBKz3LRbSS>TGUYg2%ci#GII={a__$9gQVUE|)ZjH`JhuYpk>d$$-h3m#GW?9&xI06qbd9M32G)vX9HZi81-75^sEj!FtLc5!4SELsm6jFgovrT@a%7F!U*TQs8W;#3C*@ppLQ6CFE4kU{ul{c(Ob)qdj6J4VC( zjtC`Fv|OUauxB1Mvs#=L@jZWebmR>T$2mK74=>zupBnfu6vZ z1hp&w8>AnyDT@lyxX;f)Xivc7w>@qq4P8v4O%xDZsV%~Ewg?WXJ%KKMdm}_LYhcA< zINnzjC-J|<&AxS3Rc{Exx*3&8cYhzK6bCjytJ_{e2zxK?aPzu}`YrM2V#&XQ%3Tna zrniYf0C3%emuYaLr&#vX31;j@AiO2@5+gg-|9xJsjhoLP zaYe3Yo)xP9>#08NZ006$FvD%LG!l4m5~Fy`ZkCZzK9(PXwG|UyC8~D`F64Vm8E_Y` zqbNcZZWQqCHivG)>+N`eE2I(=$+`Hz|?rq=|7e?r{7 z)M+oY+1ap}# z>#Tnf+vI$(EN|8&gIeFye{&64Nh#`&7G0>FS)rc3=lJkY#c$Mv3>4_MgYY_Kw z({EoraJbqB$)8W6A$b4;Lv;2Tf;pA8MHF2)V zV(`>*oe_;28Hl&~RULWb5+)t|`A*#BjxUYCobH_X8aqtnD0l*+6s{e^I z)cUS^L!hF30SS|9ljbjB%;**>m7nO}Q9*(uMet~xtTa{KyX%mz@gFJ~JNI~RgBSa* zO`+CyNoZQl8yCB?b7!I!iYzTw3&uznJ<&QCit&(2ltROsXlj|DbGIQWS~ZG-()_@d zQyWqrdY+)feZ+ob#*U|C#8-RGmSDoU8>aWHO~leJRHN=mTvWiK2WPB;*#mCM@NH$v0q#1zwU`JW`RaZ$t8%X#aSfH;nM zVz{aEkUZC;ee_FXbPWA%$S;4+WCV2JaDkeA3IK`5gWg-@Fpn+9R}Q~(LTY?BheoOX z-Rva;MAaggcC6J|a^KmrI^~7v^@`64;+{k9c1?!M$f#~dY+p%}TWf=pB!KeLBzC$32-AyVD!=x2!wNMdEDQx+X_DlQo}C;H_=I= z)=r02Xx6*}KKR9A#_F9eE565tS-*kRs&wa*5>SPMSO*NlWHow&z1quE01av_7@ObczV%LD0v}c zO)n4;j`p&y^d&Ht)%Qbr4rh?b;twXrBSFI zTkiQMBu?-rrj`P?sMCkYm5T8EJFDlYtwPs>2YeW-2tm>jjoZmh!r73U&JWh>$x4!R zwQie;4zNQOPJWi-e}jfakSEVbmo^z~KN(A;3fYFRnifqcv`omO{QLwE<~nvcyrsZ8 zBgA*7G_MQHrDn*6yX;fw&wCM&)1FBLm#cl06KZv3sYmmS!1RCyzzrXvCpcd&!HgqP_RZVntG^E6w>%6XT z#aakv_J?``b|lhmZ3uS1z|-HRPjXHblJ%Ym`^PI{$}<%H6Jo{5>s_`N1w`CN}z z%48q-nEEMSKwO3$+6ntMrVPyx$j7amyJ&2TT{eiQByZW+x!XwluU;A0+fyq|EX1u% z>#jvqnXeI?nuQkvF0cdEk3>#wgF9eY5#5#viFuZ!r8~^tY{7cex`u}Pn_>G-g_Wg( zXwzkCINTzdQ`;mNE&;u$XT8(#{zgYt<6ApTJktP&NSwjp0`e5$=i`|D-V6P#by6|2 z`QBGYCisyIn}v|JG}jZ=&LQX|WC9;}Xw5n%F*&SA3I3^4JJZbz%&c>w38NjT#?A`; z7tZzG2EM!2HCX&T2*7(m7#DkG^}T4C2!zDXkaS-R;3q%;+&~3IQ6#Ff)l^5-CRjV5 zuUtYl{jlc@2=*GN#(to}Papi@OSAyK5*IM!>_OZDr{DbYk0-5hI!Oy<7EFD!FM>-8 zkWHzs)18N?z{!UH^dr#v88{Zb>h&Eq(D+U(EF2niu$60px!o@@r+5Lc{rwyWDVt2j z!J99Id&^))k0|5;A;;RcP7o}(_}c|3x0plk`?B4LfS+0f%L+m$k|u|itkASqo2>8s z`y~Jx?fdH5sJ(ivq|fA8a!HTOCNKymEZ0y40EVA|Lj*zfpSQT22Np11BQyYn{BODK zpU|A3QUA8`M>||-_f&N=s~2`ciuZ^|9ZmJ`0SE=UeTkiybJg-i&Ro`9V&e%Z7!bmP?!#VT(ol`@(aCf%HF=(~ z9|5AzI*4ZF3}IvnAfVb16EI!U&)5Tn?1T%^SXypMZDgeJ9Id%2(yCb1T9%1?4~0G! zvzX7RH_>vYxJr{{t02xdW|^V1nP9hgArZmyo;P^)z=}i$h0w zQYVu(Ze=%ITkxGu9{wU%+0D=Vq(y#1hyb4ceFPgxvHQNY>32Y;r`5{0i|cZpQ?1c{ zych-dU8q6%CS18CsF=|y_TC7MgN7h_(whA@uNxcM(6&CRv+@AqVNCcAP=M<>vn3#8{oM=Qt(7|dJL~+UcP5!IJ&h8hdUw$A*T*0+P*Dk)tK6%2Zq6d(J2NM zFn+ag)roT+v?~r+Y`vcv!0?KFxQscVJ;EKWqcxy_ebBIITedXb7Zd@is_6bZ;F`*0LxhwYg;?$z z{gwvlPiR8VARAR?c#>Oo;T$y+fz5)7G;-m0g<6*!`{wA{( zo}O)oMoVe*iQPdT=_um6lKk4I3V;WU9?mYOz1;-+0IcCs`Sn7hj6!9`Jd0G25_xp%Z@v zKpGN&u{WPz1Mq)$>;Leezb1^qnPNMyn4QdZ2CjH*Ce}WnbOYDop75jQXFIce! z)17Wh7w<>EkHILk)Nu&mM~BoOqD~E)7(1<$JSuc?>n>pgOurOmX&Dd4M9eUb|NJI` zG&$c%lzSn%2=o43`OJxcveqkgbnBs_~nGQ+kYqzrB=nCsP0K zXx|y~hufX9fjf_=zBzi+kLw~V49%TuSADQsPyXviL+yD?HmaKehU3HS!xZS&4jsDMEn}#?al=PY^<*OjR2gw#U0Wd} zz5{tfhJ1$IR(oIb*s*?MR_H;Z0&FtylS8hpg*5v{vwuowpLs{oAyGFDcZHFrs-1Dk zAIl-Jh2KijD6~`Jj$DR8&CK|PV+(9%%m$4$SE~#!-#(KEulFG9Sd?JS?NY8hwq7h& z+&Od72JQE*#E=p77lCkt^hztk{^>?&yqnf0*c4KoeU9>o8X}f5{@0!+I1`6kRh~OX z=CBaocO22m>eZ^F6YZ!kffy^g8I234<$uO=4z&3KMytQFT2#2^9Wc?jVqYogYiH-0 zIBCt;Jn<}^c7Os%6*d|I5L0)b3MC!9F^fG`C)_D~j>Fw>D>4v;uJ+Rt*ubZj(xD-; zhCTZM>Bp8%bQ1-N3Xd6xr&#geJ{)djsMdLQ`%e;LKal}yrAjeYgDq65f)55yN_MP_ z*{!Q_Gp?BxXup#+in@AMd&&6#!Zu(!9G}3`CgLAoS$FWKwK_cU z+?wz^o~8NuLqgcF*rilo00c=X`F1ggJuWWP&9M7P!~W>oCahqLu=Xq%U~0-nDCYed zWDFM5aibQoQ=J7I#;M3&;|g=O>Jbo6r{%??11?J~*iVKOd>H2N5`P2M4hABpi%YFM z{DiaEwP}g_u?eu-!EW^}+)nZP2D$6x>n>T65}5JUkq6BABg z#sMhzf8(|WQkY1@cjPyYD7_ngLhpT8l#02t;82rR}Q~gGVU4tScZHJ8Y8LtxCrU0>- zXrf=JVuBr`*yO1yi?UX_D+}S&fR@9_O=Caer{R52Vg zH+&9lC73fA%UD?aR{73Zuvn$?rl6qwW(Y};Jhj~AYZG#{{Y_w|D_)0Ig~?!UIAMYp zb=us{Cf*n$D-&ayMh!x5{?y-ipkRkUC_sX{EY<(I)rJMXH63)s&8ycNJOScvRBLrE3o zV{wBdXpjwBr`$@fR0fB`f@*Udad(K-C!fl*j>izU(mw7-p@w^I6cY%3!~er8U9M#N z|3GH)@arM4Pq3Bfn>uKPUJpN=83o@17SgOR#2f&MdUbK3wwUD?HakF`?zwB{ahY8c z{C0{-`}$aE1$BD+z?K?mQF#>5#3Ik2Jcfhtks{4$sozd`#qsYI|KgKZX~}B0bsrKI ziJ06xnsHcq)Zm_ta=pGw(!TArT@ujAT55ceVeCXGb#**^FLW*t>hR?B=R54Gs ze-7g>_IU@`{enQ?yCO3w=Lkuv>hQ%^mU^eX?6Xr@1|5MjIJzb2Dy!@-POhr9H-t0W1R<|^X`QFySHK-YDi0N(uWt7+{%Eznq;XzgnD0X=yCf5V|qlC zNF`8G6CL;Wbor?XjA13WI-=tjZFBpEd&rT~&0!r&mMae^oWV{Zs9wuqJOd?oN>Ld> zA7`--879tWP^kU#nXm4CitDU_WXhLbRLr?Jh@WQXxzaqoyjUvY;+NW_XY548rR|KL z;s(#XQrhs_EJ001a*F*zo>zh)$)CHG)TaGxBUGAk$na+AO;1p4RE6aAH3xVWy4$9N zZpmhT39cX0y?Y3;oduNUfD#tFw@`uF`wysWabJ4h+hwYIe?!ULjO|Ny^H+V%rTg-$ zGRrB4UdCXDtPwp5)MVZ_%0VyYZSzVk<6vUcfV3XuVwyZ$)qZhI`)@`24cN~=WYA78qV;A|B93X=Go z;~YF)9JO-L_cGp{;!h_{mROeglnI{9?UVVbW%@D-b!J{Qi~lV4BSPCOx;#lsjP#c} z+50jAuv>CuV4bE_aQHp?M1agdLP{pJl&xvJ#kv~l==pUAd~e9+akn7HqAD4e$Jgea zZrksh4&e-NrYb5;OqATW+c+Pj{RY&DHSFYKbr<=E%05<-<@$Tcl+h5#UZ_v*iG%e= zoB)z%shssCJF6oQDz$H1oO_}sOHR`AtMC=S@F%=cDx2Yq((6N0$ay{P1T%@4*bP|v zVq%ZTk{N7j7?Ka28crn;_!h{G)#ZZ=a5LKIvvNW8)5?{R;|?5a4; z+z4$Bt9b+I+g1{)U7;1g$lwCPI=niZHGEC+^F&1m)fo>qMs1)5qjfYMChmy2rRnhA z`NUaTb3*e>llx50DgTlY3*t9^(RTQGZan)%e7DsJZ6?Kg6zF;^-hmJ{Z4z!9HtY<~vW z(1z9{cRFr_xWT2Dt*D1>is~I6Kt1F>{~`es*Q$fC2_}pzZ0a?8Tyh5N*D>9t1p8VS z8h^^S&avvErh@eB1hIeKV#z!zBFdju<7?$-$t_4!A}S2$S_X0OONRVw+2Qy`Mq^JW*Y4=Z+AP7niCT| zBo7eD~(9^eSBPj_MJ zAuapm=YczKIwRGo=JH~0YfPu!?`vUV3oO_U8z)knd9ObKf);Wg^k!MH@d1X%LC7?I z&U};kZzuXwz?jxp?^Fk2e6yE53KhxqR?hanZ$;kY9NLThL>>V=(xgT7A&?kDAYKpt z4nM&cg%|H0dLS|HCgX;r=>8}1#s6^I|F6J?qBu}nTMvi{jejLlI%dC}Nb3~-7z-He zphKPGkcq($+#ud&A-dvN1rm?I$jd|IZr^lF`P6{p8i}%8#kqQ6AcVUEz#bRHyn7G; z!UAqlQH?-#YF_HvVUPBskNjbP3`Y-DRSDQz;pf+G#NqA{Abr08#P_%jq)6`oY-`w; zwd=&H!85K0z*LHTc3^kI%xFCz&l8fH*gqJci*9&Zv@=;h$|e%oo>gp&MCQ+C!Uz2$ zR;x}9lKv`TALbgn}!plstyLLWoW}Wlz z*PL}5-62hH#j^e7qwkhi6-qO&Bxc0_tondQSzmS3Y4fjtu@9(v>RK=8^;XD_&JRWN zNFKCRqp79*6TY;NJC9pPK@mnpVd&3c!v1-X9ia!2M*&pbyhmo;SZ{8fCD3pHRiq3L z1R^x?LM-Lx?}ibQhVb0@kk21aP38Mqjr4%(W!E-0$*@DT)RVz=fvYzt`{Oe{GLj3M zO=&kglgo4X-@L;VFIK9vZ%IHM8P19QI~hjeEJVk8(>h>j#%DW_@|c!iK4Hm%NwJzJ z{$|a71{S$CJnn+|&;`oJkGAA9n+{R-)4=G0l4@S>cXcRbx z(OT2@QQrpGr?RIQs+AP>$6nuCz>O&0C#Zp)Qo*f6pwbns^-6Jl{_FCnHSR$i(GK1@ zmus>a>=T2$2ODg?&G=`;`H%Mj#9g)-I9Us>=;-`ST>mEy_N^9!OzU~4XE>Nk|1F#_ z=Y zNQ>B9#k+7gzmyXia^HUfHM-mX7W*8Gv8$mI%Yd$-j{%={0~F}xCnU<|)vEKI|K;9- zYD!l0KDvce`}-ZWqfhR5wvw(6TK1h)pgmadY~cAL{@YB_or=DY?c!0Ixr3gY8_v2F zpO`3Xa`k&{>?<4&?H2;waZ(ccabjC~@7!yU+d@|2px9jR7f00|`qT?8N?7~FeO5Y& zM*B)P3pH5#&qm{{_;2&AQe{1|2`+?+e(~g7RFGZ;k>BG)@w>mnq?-(I&8vTehZ>1O zP2czq2iua~xM0)IiR|zRCDEaISUGNWL@MU;Yd`KuUf&w|L+eirco+~zZ)cT0oM{bH zKPB4FKG(5l$-_CBXT&6R&2aq7Fr7<*DrME&x;PY=Z-N4=hRSmr?t*<%kN$3^T-+F_ zcT}1wqr|K?Vi_I?mByH#^7WXW(}R=Z*YW_i!_pQkudlzaOF-8@o^VBSoxcd+dLv|; zGSt!Y$$c44Ul)>xocG_c<>-pLXU##-zx<-hbtzAm+L}q0&e*~}5;zCQ0e!?n)~`9u zfkVSqgZIafz90E1u^dI1rgFr&-y&WT2>#PG7=sQgIh`SOFM#JhSL{p1vgtYO*co04 z|Gt?2f#hq;YyjJ3+G4QBoul%?$6kEwPU{ys`qa+{`y2i9kr?+Ci-^#jv7Z%pXPNrj z?@vg9Lc@Ne>IUb!SZE#il%gEbO(0}`d>)O9ydd9GekkQR(ER0qLHtO6j@5r9p6_Gj zb{N|Xh0etH>*a;M`?f_w{OT4t_@A%!NFN;)Dz%_~xv7?wT3S7|ElyU`J|+kNI8_fK z5QQFY2gLy6*blVIZny7F?r_Q-UKvZ-fjW`mCu!=D4~RmY;zMBj1+m69{#5W0J*in0 zUFm7NKXfp4chH~pm$IU+Mn0GyV<8aRzvhaw$dakCh#q9d$8tryu|)#e3)yIfLZzDv zfsPn(V5Vz3Jj}~fo$85GVnZXD!^u{SWRZV0cPHu19p$DaW|*8!T&VXlg|k% zsa{s%FiGd^9VOJ8{oc4}fno|1Rl~DU$x(ZbrpkZiEM=Wdc&;4LiOgUG=0A|^uRfo6 zjHODoXfgf}C!FPd#aK(h_E8yb-SlJa5&o)RT+)3@9s+p24QcvWYW;I5)r*MuHGM|+ zSM(fM>((945!H%PPk$`t^Prq9-vy}o5O=Lrt0pTo^iSCS(c}(np`OK>>y%sD0&AeL zYVrr`C)*#@_|5kS1W?WUFDI~G*Ao312qO?WV1g7=32O!74OPGhVvfcOjHV742{GBE zC@!9Oyv9S}*0=0i)d5H5lgG#)ko|wiCH-GvruY+&a`3scbzo=oa0JVSMV5u6+l*Ei z)ZKj0JNxf2u+-)mGJCm*6r2-2mdn9@2hn=zI%hSmcdB>xPQ;5J{p3&7bBFT|ZrTKK zalZ$iUIJcO%&JYN`5kccFogIa3-|+Ce+!<#i~mp-{$CXQmTj#4SGB$#2@mxx zV{i5pEYFKe$ck$W8^RgDYdi4zPCDj+3ZL&8vL^eNoF@Q2tQY3`#dihNukFZfX3vIa zF7l!Z12YyF7LyKUl=XKn03ief9Y5WLV|}O5#TM&owA?nqo`djkZ>(lf)DR{MGYiSz)2hmx13SDf3_&}}MEDwhl+Ec4H^F&7={Ix!@<>2>T?;ZXt zGE$G+9AS07%{Ro!`qu~k?)39<>2+#pb(gNHlm)58%=r7rvMM@CW^7-L#8PoO6LpaE z)ilhQI z*WwWyFin$TeuK8jeMpQE#^=7`v?AG?+|x+}vq;XiX#j>6tniAu+G;5$6FZe5|7Xy`4Bv1d_72&vn1z1g=iXjy$8 zJIhpjc_mHs<|gkuq0I~>AGS6YtC#_mG(sSKF& zq2jfhkw_re8Ep)E)p^XxW;9j)x-|H~hc4Yj;!O7I5Cck`@6hwMLyplk^?ul7%wMN2 z<|ch)l;1`twTHMmP43Xrzgc43Z|zVPv$RlBQz)tt?|C9J8dqOl=qdeDv zkxdbMQ%X=;2L^c)&#)id*f3kQ{O6_aXkSd_)}QWCTqAP-h3E(c#kPno7BJj$g6FjA z%Ael3*Irp65I4O&P`X-|JLh*)1}RK@A=Y0mej59_$~j5J(^Up`>aqAk726*)=6G?2Xv;GOd!@PfRBZj?D52abBX;y zLtx)m92L@MZ0St0qMefyYE>}A2m>hQI=olp%Ze7T-g9*%)u zW8shY@mPot(up+<=LTZgwui}!sX8-wrw^O->yL{aN7yG7B639LyQs_r54$ch-Lmy z7-o~z$#`?DU?0_>(0+7q#fUbh3uytW@tUxX(sUTp`ry%MRcRUbz%Js|_eb6yzg_$Z z_M%@eeyqaoUR#Ud*|;YQMgx_Wv8k@`>8z`HU+wX2jMn@YF;#*3D$4S`?ln`S3&mVfeA2+nazW zesO-Vl-@B>6T?t~@BvZ-jgo}AZI@0h|L%G`pa1@JAsiGbk{~CdeuOj&chzXza-mOJ zCz1g&`@dBd9Ing%OyewYZ~yk)lS)7tdH)X~5XP$KzB9KpzT~w5(vF`Sa9k?^Sd(Gd z0~vzKc;uh_j5f z#K?uV>7PgLVgfkLDeq*F%Y>Q`*07l=t=Q5-6Vif0uOWoB5C|?7hYJTjc3-l|{3j`C z(EHa{fl5956Ug9l6$)v84D*^4ZYGLHpBq8>mJ`X4I>dFY5Tqc0%0c7^=DM07#*!uJplrctGdcI*txNV3ks+axlR>NgAr zKdzY>fB9+2L<3Dyc4vB z?qBy>-D~xGtJiw}Maj**=bU@?-e>Rc`*!+P-Y;WqKF9e2C}s8x^-eMToU+j#$6z#H*CfUF4+2%^c< zNP3Be1x9p&+m2ak=MG5+qn8_$FkF2oYd-#PMGD)&K>c*7v^%o^h#W|x<5MmQs5}TbS8T}c-brN#4E;^GY(vkz-0r85JOXZP*ZHiq7;0zvmaSBm>1M?X?8p|m`ilTDx= z_hJyOC@-iD_iqm&)qW$8z2Au*G2DA=NmFBuZ~)M=yIW0tjS{p{9_|-0^s?r z^d%pp;Y^~%a;S*?>J{cJ-4C$(n?ZfxMDrb3m|KE%p0BQ=dIvFjgY(a}a?Q`qKbYTK zRCEN&8WKj}FOZ_SHS13|4^^$AKI^;$O{y<3XMKZVezvnOT|NTQ|tbz3G!i;>h70=ri zG4aGfS=lE*;l+9X0~b*3P&@>RT%LVTdz{8s`4LNE={x=V{oTz z@uCl1o&+4>*qP)z{7|iQ+Te})#PRqSJx&$G4*omHw53qbwaAudrk;Is?ts zKW_3G+I~XV!D}cxGk7E1rnWClQ7#vpFPA%N3GDi~5*rJ=I`#3iBXNC!cNfn#`Spil z*>1V+gs{_TCT=uC-U~)$G9a{f@c40)Mmvws7WZ^;*9NA91;o1J(bkUdBNaPKT_=q8 zzsHM~giDwvzUE&Jf!8gg5$9xvVU2r%P=9~a-TUM}xM1xBzgq%Q~0>1tqn1*Oks=?2W zz>&nPAg^xSY$47M#=78Y=Pz+vsxp~WmVNBEBpqs}Gi0 zeWNshQcms-joj;QF985+1;_a{gZB*pF*XVO;eqm!X;)u)KyIykwF@xU&6h9D8ZE9s z9#~fyG>hj8X6DNPAPZGo-Tm zu;0C0P5Ct~1SE9%D^_E7vhX)kZQ1XBpDm=9PKS40wtp6({EMIXytaDZs)LxgE#)4T zDg3&(mgJ&(6h&3_x(ZWOr@rc9AgoQGg%vdxpm}|A%}MXXd0GS`E_#$mzlp+ zLjXvB#MIH5(-4?7SEW997%0_<#T&>e_I71w{Ic(Y*I=b;v=Oj3{;Wogd&|y;0hdg& z^005}v3NR@hqocLr>p+dj*C0=9#f7{)iN)w-U&ap1#nMOlDrFdwK~*Zb1wMOlk&UD zkDo-8{kvbhX$^Vja!reUvoImd46m5{zQ(qp%6zZ%tJ*dek_KheTH4t?;w0k9j9G4e zQ+Bnj1`bvv0bs;{ntS|`aqFQp#@xtI^dGbcY*BHv$WX(Cj6A&X4;+SrewP8Jxb%>Y znNOeQw#MPs@?KF(3>H4Ks`GCAwab=c`E^hMez{lQkt80&^zW(6 zN{c_;Y_CU@z-lVXrnYYUDYT0)TPHVjy7|5mx(N25923)3*JQ(UW4L_+L5&tC>3UN` z2%v`e87j0T9baycN)vXc^bHVnwOZ7+ zU+xu4dsN`Bun;Ts0_s|YGQF@fBT^&ynJ^C>vg(|-E%L7XtIYl+|M zl+tTj!TjS&U;7?#inZjp1n6t%kb*MWGl-$OhVn;Gg)oK|igfhdP56S&ijN)-O>n^& zH8+R1h`7<`lg-scAcG3TDH`9cR8gnE_KZ3SfC6LAu0xJ?O;R>$upO$A#AHatPaWDR+psnHmp6gpN;jKthj+ZR@J_p*C z?sSuht?ls~O_<0L?}m4e6>{VN^rT3?ntr{^5{{X8?o-5GZVRRY-8}Nb(y)ruNqkJ%c>U!Mw9GX}ZkiN*alU#De)u{qVAhF{RL>|Iq=8|Y$cosw4> zRh&{DfJ64TzlJ`K`_SN<_VBNMk&g%(+?3u}=aACYo`uMl?t^M);PS^bF`-2_FT zW<&^~HUm|WA8GoE@vGtAWxcetyJI0kvq;{@rbV1QA~FdD?p!A`u?&qV)tvUPqc>3bCfd+71R3;2Wh)_yWYhe)1~yXLO)#ggsTFt z03ff?uADd}YS51ns`~76INJz!z~)jt@H1w(OGw7)q%bHK8f@; zQ$3GaYj<_*qwk1#p-;tYFF8Xllm3i2$4xIX$xJk%`y-ZOxT(Q$c>+Ou@cEwk8i&AJ zhK1&gkde{UNjcX?h-|8IseYQ;AaS{Ms~jznz+xhzz5)%6@jUzFDc9H5P4?WC}d(AN7iK~0|faBdupe)POFn~5aCOqS~Isg1e0^(x@{;G#8!c1sENFQo3zddQRG9XavDQFX@Fr932TNNmuy=Urs5* zFb%&$AEtjXuC$Ib8%M`uL@E7M?;qWKVmyyq%1YO014`Z~+s`#2HaTOqgTxTjuolK( zxrM8LxD6MGcs%pH|6L6Od1xJ_lsmAO_9`ZBoXfb!cQ58WG?P(?1JZqjQSKn27AEej&HU{dS)tW+JnQE}kX*toUcC0AMO)y+-aRojbb( zHx-STz*f(yy>gCy87xX0Elx@j_F!so+|$Fz+klAw0c)xUBLs#ec|V=xRpuIQW-p(< zUN#*!Hdfj6rO(2Y7<(r>GVf!uHxkbcNPX#@82G&MnyZhQa$3eT?d*J@3ocg^$4H3P zqFNX&Of#IL&CaD(r3$M|3tPqr4DTU|Ant+250wQ<=Xu- zmg&<`89GZq&;JO^;yc2sw$ydB2RKeUFDO7FO@c_K`69%rVLH1EYoIb z=)yL=H=ICsepaYHM00^=C^E8?j=LrF)QP-t&kj;t{M`l)`&&V4%-eT!HI9k_eFD4i zTtkaNAV(jj5W?Mh8RuW}CRBEIGY?v{9VPYd@8hryfiCH}6wVhXW&oDYAps4d)(-QI z!O~i>F}23p@oXlcH9o-MV`6-1!55c8yEAgH2C%mjqBZyX1(OR7Xge9BD_-ZYaZ1?k zF#|!4*Sa|%h;yIZD47$>8}U)t(7?G|&3?lm0^}{*onk0dcMlpT=k|3~4{NQn-E#chgrJb2LSN?=fo-jg%=UL_+M5-I&JIVP#f?rL z1c1@mQ1gY-FrCuQdBbvqs)ZYE6!|diLOLxvwGJ!K@%|*-8>^>Ve@o84DqONvdnyiA zA1*Rgri#Se%ARqj9ps>9a&nO~>8PjD!ow*8KRFcBE7ZmJuw3u7=&Il22gSjB)WbHBumS?)?Gr0!%X6AIvPYN(a} zZ|=evaX@Nc=#y~u=ko&G&+2?`7o`r_ZCt7>9O*sR<3Jas(o2tlpe^d+WTvFt5b!Kl z)2Z;8t|Hve^X#C-Yq@JN+4o1i-mCT!IfSd%k&pP4u%YvIca(jt+AMo23>R7m&aU=N zC8%sgxyHarKprMg8HXom$e;qGU!N1L7kL_Yu4AweBq(2^vZ#LO+n0sov^w$uwrgC& zzI0v{nc=)=nwC9jgezrjHqGpd4}Sd7a`1Edu~?4UEcou01TR8x5XZd+%D?tNW zsYgA3RMSEgvZqIW4ojk43G7?;yRIOx1(;$1ho!qjW#MubK2f3v`##Fn{qPflwH&*WS)_(OUPtKx zAPp!6AxtMSl~S#Eorj>Uet%%oGL@Dqc==65w7$`o6$2L!qa*DU+OR!U@2iVuZlgF- zKZqqjAZo-^sZoN6ldD+@7X_h!Ub|tCr}Z~!PJE8($$YsA`qQ2d z*&503=dJYNuemwKX<6CH7Qc`6Ybi)9Tl-@`zJ{6X z{RHW^E9mQQGALi!U4O-wF_RCywn~L8|66#;U-9WpfE6LvY85dM4GyG;Oj_rkK`4+i zI)h|zsYMIJu_WTX4N``eOG6&_MQfOe@@f!$oSBtbpqot$k1Jn1A& zugrk$+5^8MWv-QfO&>p?QFA?L09g~R1qXa+DH>qYeDMnag=pEVC6n5rM&}QCVQ&Kh#dN3TijBIVMol}&k{-%5OMBBeyLmIsdlBk^K*GB!)nN<)mv|h zT5evz9FEYH8jv_s$#8sOR((hJ5DHq|wTpWr5p4A?QjpRgoR)BhN(!qyX+3L}*%9jH zR6fNM#H^pF(Gsz39_2hKo6IG} z<;qUr`P3kOwEQF&_ajOvQxj31w0zWHb;06Z@@}2cy5a-4nZ-(Y^Qho=?+82sJ&cU? zbp6@0#L{~QM~}SCgze#6p8cru&;q_%--uqxZV0Smh7Ee_x#DJO;-*soY6&ID_WKnB z)_#;#N||3QC+OyQ0>Y4zkLopm{XC&^Hq=HfAsolQ*$#3xN>nZNiW1aI47#DNacs$w z)w+KR^WuYJo~~v#bEUB{>Hz21^#jSKlWai1@dVkQA9PZ*OoQsz^UI|?G$h&j!TMZH z9T#B3FD`ahTNAOobz_$jT(U}65qjcU#u_|yr!!RZP^i)UR98}6W7*!SkhFqt_n_6m z;Rh$Ep~Y#Oz|#IX<^kI9*aM{SHzw9H{a*EVP39nzJzvR&H-CzhIVeY}lq!;YSra@o zJe5Tr0Z@;cl+o-h5K%`qkSO_b02hk<#0W_KGlHS6o!rPG)A#LO@!F>eZC6 z?CA{58cYnf)d)r?*#x_3^gF7r{601q$LRD+uf2qtT;@L_sOn71OG)I^-uCGMdjDbO zd<1j+xKkKCQHC@6qmpk%V1{`LWV%5i=XTcZs{DO&$FEFfDYsVar#!G#0L+2?s;52! zB2oupr!NBBf12y6D!vr!Q2BZ831RqtSG~H{-gv7p?EqID?bnoOf`b*=fF9h)=$uG1 zn&=~%N5`sEvz(;LBrM&3ngP8vtG>w)n?F>1avC13KF*F{r?}OcORY$HNxpj#HCvF; zBEBKeyMEy-91b2u(ZPk{yI z?cX5_?iyk)SyHgW|KE{g|J6K#=lHKMa&06Kl&0u=;-x*>QcgG6&fu#5P?r@9iSkjR zU<~x~{(tylb|%~T&!kDZh6UD0!g_IUI6s8|`H(N?g*k11H!C5;X6uD^s}Gyh8%aq` zN$|W|$gXxXUq12zXdpX1xj2(|f^>K#!?Mn)Mh>K#A|#G_9Z0muM#`?pG8czLHYLO+ z-uQ4@0nGHbzoSS#X5!J#eiDDaa77C%TIfb5T~aH*9Lq!0fP;)F$xm9tr@+ z)Jq(GTd+Q%__ZFZFXA>h zi+wPAKxuXR(r}d{+jbyT(wWSmar>@*E@S(?T33ZkW|7B4<|q#?#dg+3=VI0 z2xn*UtQ(i+I;EuY?u(Z|^2_PfE|L#wI`WSJ4rv=z4f)d5r6dKerEM0dZj+-s|Jv%#h!ii3iIUx ze7|L&Tkm!>aqsT7W6d8ztx)cEnb4_9Uwx(bR%ySwJaCPf`lNSl7!Lo@AppmfXc-m* z6IXVNGiO!=gXKp;fReh!BK1W$jUhp9^k@G!&FZRj+ho<1KwnEUY;A9HYHf8emE z0Gmik>e#>ibPa$IY>DBuf)5MsrF!BS!7p3;?_omo2IPTmC-A-l-vJrZ$vYoqs6{%M zs0!Yc$REJ<5$<1Pq?(q2ahYpOhtA*^=DN7Uv~mn6q88v*|C= zvFN7*7LgO=>KA!qwnZ|LSfkC%vB?ktvYg#+;o=_v7$RULr@*o{b25v1M4;$&M>9&$ zsejO2!2#s@^AIQ$ft3eP)9n_5nm_O$5e?F@E0>tV;b^VqdlnB8Aw6-GXeZG6AV@q} z*bl0}FM{ySZza2&j6-60OPwSE)8%=IB#B*U4j^L0j1Q-0ga$>0!woLtRj>MPL89#x zG&InryFY8SikCtg_wHV-pVm$iwxu6bU6yD(fW-QDP3ymGf|>H??oH0U+M03aBdaJQ z(4h2SOT5A^Q|@F~s&dldRsgDroBCJ@YQ$1txut-%p_GTrJg7n{D4B98J9az>>}-kG z!F_Prt@~O|kh}Z^{{1T80AS`<>;~6JHAMi+6LADku)7b4za#*hh?itkk0RG(1M-9j z3u+xwO*8jJQ=4yGzicTyA@P2oJYX7XBvf0Ya}z>?LMyE!^7y2 zR?s-~uYteu!=yC;pm>!lzTrnXsCz8SrX2qjoChqCQFqU;BV%i-gXyK%sL6*XoI9Kk_Z}DBqSIVT%SEJ)U&@p`ty>1~Qi?lUt!&RzgB$G| zXL+=KS9ZgzzyFrPz4*?@EU!cwVvCu+%I)o>g0_J5=Ne}zuG~uqP_z_;u8<;thhud1 zX+dW1>5ETrQ-V=N0zOs!ytv14!PV);U$?Sydq?C(AX%SvKyQ@Nuyv0)!lCEXr)5{%TDCsd$H79PL#x>>k8T2+!1rU?Yp zP)jBSCA8LGJ|88$wU~0;A`gBlcElkza-%NPePiuxnmkI21QNU;PtUC)%du5&Ortb3FhtE(`e-4zKw^6DBP9 zrDV?(n+(>|5F-s?fj{lk9w-K#Vp`ZLvKksuv*TOkGU`x_?#XKOOnrYIZmCj1L>; z*g9(nu6VCvti32xjcF8oCYebn^`>EQOMcu0nv5JOEgj>4;#g&{#2pAlHgMRwL-Ocy4aA?8o(IH(m@tv_?arrD)c|8Tx@)B6G{trfH&t4m>P+3XnYK#Ai8;T{0(bmLJ=VJ z&sd4)Y6Km&7#B5hC&oU&qT~07Aj}Np9}GHUewP&ZNjK7f};({dm#*;-}e9^o~dRX_@Ha5yRd0 z$A-R78B5dBmSw2NKj6D`_w2@swGd2~#LFy;5~7H|z%1u96`Dsx6u$P$Ic+!7<{ErK z-<{-~Z8uHv$2*c15;5mxAPZuCbuub`-*Aq>u&(;bbJ@Y0Q|Mx6H@kc0#&?D<%zP1& zhJotjPFf%8R>&@RGZl?LTT{(1V3Sk2!*NbaPe+cR5|7$vQ_}lFJ~py1kRt5Aw4ug; z$l|e-a%o7k;T`4a{j$ErwvEIIV_kn7I>)WECV0!4`B3m@(B-Aa`J^fpSTd zri+$=#$kR-O~Dwj3$ILi;Gjaaj$gm_2}c!{{|JJr7{FEAww)0^1tk+DD2?xww-)+a z6`<|o907UtRYLz@7IizXgc^A)a+9>rM%c~BWyjjQk20iKAeBxRmuQ^Vhb`asdZPx+ zV+iqkMOgN7o($)$$1_rd0+^?_XJ4NzmrC9C=8{`ytKX2tI;e4c()^h0^ZgLWNKi5% z?0c3QyeUrYepL^3H@a(EoSS%6>qO|`kuyN<4Rf;DRy;-T2jgO+i33=00+fWkn{~BM zi5om=`UkRug4aN_(knE#X~epMm;Y2U**>;xhK$*$r@L~e(#VE(pzD(idQe>*9@q3B z23GO-#@!U5KL-<4Z>P5vjv0?MUm6*#_p#9 zTl#(QYfu&&U+HStmLsz|0Rr}iba}2Vb+?i@nx;k1q3;Ae%adrmt}~psxl2@7goXCA z)%WgunGw2=-iWat;{*~TimD8#ejDxPUF&~4077p^MEyB860+X#xoc>tBn|e!Cxr2-8~|DW5I26$ST|}Ue?Z^5%=(MQ(fBfq-$k(20SR7< z41|vI8=-FTFS#BS5!$J4Z0aK;UPI=7#`Ik#gLqzsAOsxBN}K7kzX zB%KwFy3()uH2+ez#&hpCca{lc{BvL-67~2!L3w|9*Kf`mFapjlZb}@nxxV76m)o{- z_sWmZXAA=tv=qLR@-P6(1kYpUO7M<7D87(e!dtr1_*X2TANSb{DQfGUOs%GuGItC= zr*1Dk?UdE~2w9Fz5fuAW-pd7#r;_-mlJF{sXP~7&r8S07fz3h!FDVIGjuh6Z2on&x zNzJ)?XMvKWh3EB7ThO$dYSrZz<*!QuIkVeQl3CmAg+Vm%Y-{cVXp8#JDd)?L5I#kxjy9E4j%udrl^U$Hn1jaq9#NiwYA zm~V`r$cqocj;Td8cg-B&FD5u{35uM{dwq+oSi&jw6Lqy|E;>GaDXRiKwcT4&Ahew2 z8n}#I2@XcKetC24NP5iC^O|SQpNE}WXJqxilhyl1?_w;J@7rq%k}f%?gMk4?g!9f+*^%!iZj|N1lP^^wv9kwI+01L-QMzs zKo&e|q_MeW>0>GWI0`m&6g7tO$765f^8?4F${eVr`_7Z`C`qn3^!g_-cD?%P0gd-) z$X*LHnEb%AmzUbQ4;xyfmWgkY`6wX?aV{@VY5i?+18z{>+Z{^p1ED11P7Wxs?Jd)^JKBzAfY~G}`gb~;v zGWX;U7%Og|u5b`3s#ezM^#CVXypXp1kTV``y6S8Ofr#HjI!$Y|bvIZq)e|RPZR5rx z$B@oE*Z;cAW)z05>51X4x#t(KEb^9v;7RNccX28H2db9D4xO9!d3L9AKftgNPv&7z zNt3Pu7StB*X)wqpTpo*3%#u$z#_ux^t;?u1Uu&svNFuX<2UQ5(|_a&DiZwyR+ z+gfdMEz<^nX(8g=exFw{Z`-;@M{q+tT6(U2NkcT!Yk(ZMywxnn!6dG1e(Pn~ZjA z`0y3{lRKOk`t4ch4M@W*6!uUgdL=#8p~vER$;Ao<$u}eGqZ~B(5t9B)uFj}J_VZ$A zjY~76CsK4s<4}`0F-%?6S|s_pD9%nb7&Wah>p$yn-V73jqQ`=GdCPA!R3_|?nz58q zMUiu=mxpGH%-DY_yH61pJ7`s*?hrs=S&I~G}^sMCbZY zA~ejl*=Pc3V8UN!Z8{%rt|tGPcJ`CMII=V)WAq+5L^i&_51LGqIo8@p8_6#J{L2C* zATPItyjJq3k*qN~EcGH%#L{h9&4Xl!Rfub3DchVFqtk4X;OgTlm28CJUy{**h_$g0 zak0xCauB!RvzM~IA&$Y0ig?apP>zvfJNBDRgN@xAB%1D6vu2biP;{` zQIW!o%F=a5zA`!^_?DWp5^JS)4;N2TC0mGYa@F6W*qu_rYEMNLGS^!0)5z!EDsKJg9!5arx-KE@Zn~@9=HT zqvy$26^P739}d4~zwRLLD#6A7#Cp89h4;uH;`5=K$4pEc1caYP8WB88vm0%*NNQx5 zS}(Q470pIjN_fuwJ~na4l)i{@w)c6uhgu$L$?y1;0dkKu)RtIG`gKCwtl;J30cZot zV3YcRMO;DaT}O_Z+d|^<-cQxwM%qNUwTS)7TdK$r(_MAWgg52nhpqipn?nbI^QsHI zZR;nfWwk^}3`Z~8f>^%(&SR^?12jbP4O>g71498Hm|ZYrUcZDk*}8qe*Elgmx?z?N zH6&64H13(wzyncL0r9U~L2Wro*Y5X7@aU41Q@`Ysq>xL9+{mc;(}P>4r(uH=7(MEf z36vA`5^VEaz4E9>c)F<>5A&8>7P@N;Li6d|-(@f$t>rCvu2Egp6?jQ%2@NVAeU`&U zr+>VsK6Jo&ig~Tp?bDuaLzY|jIjN5KaC9CL2-`-KnTaIWv7%Ikqd9pAud>;|v#4}g zg*`I3Xs!oc1GC$Q6ht{e2g8vk>#hWEx?}??5nnkWH z3?E_h%@`F({p_RiqPsiKcAD)Rn-z3;%_G-Wf!)`j@KgWebm2Fp+JCRTCD1J){!z%z z5$xH(-U%UqJzFeI-PDdShZ0RyjtDx1gjZgzl4I zcU_N#tHGf|xbAINB@tYo3+=k8!f8SMIxXaCz{_PW2(BrB#(1?BS(v-oiJ*O+mp5zCX+Mj|mgnmFu!cJo|xW8b-8rg6HowA4}w z(DB{u3tMuOYppXm8=uqXu~-SIV%d&H+m8DDKnnoe7H^`B+-4rOkNfA8zIyPk{7yseaf;S-+1HTw zr6cFZRFB~KT|izcM>eazjkzP%?(g}7JaI4bNb*;jO0H#U-D*|S)E7y%fCyWlufW{P zMI^)_x{GU>%HK>cn<;?hNCtlE8%{w!8!uJFB(G>X1r`uVF~fEviPT!=xJYkpf%2bc z<*t_(m3fHw^!4Z(dBhOCy$woDh05Kn@vjD=w)eY6S;ED7u?RRyISM7w+^_7-@?~l^ zTBJzxsjKuEN9CVQVGxWIe!qy<7HfzgrYYFW<(#RC#Z=A?d4t+WFFj}2yhD8xiyRzp zm^>)0(LcR8*V7&d{iIxl{1b!(ll{X{%0=V5Ui;tXPf*Ry5@aPABfm0`7N}YtxiC?cf;?4ATUJGXJt^%8)&J_X z(OXk|2N5j=)vwIWR%!kD=CO8#ME56*s^BCxB|^m{t=vc(MuOKjf;9>QnNBiKb*XM2 zuevj-Gm&++i?Vh3ZPwO#UrK&%R#m*ec6@Ai5w?6iIyP#Cs43ggtsLK!jE&?a5?W<` ztO(lfcPH*T>BPt0uO>oijo$@DM2At2KhzeI+NOlit@EC4p2=q$YHz`;G_}&0Kq4py zTb=$p4D1SO89)a7ho$9ie$j7n8h>(v`aGOcH77Hw1)H~hJM`j;c|-8gx>fb0GqySL z&K50SPmvyET)mXw*X8v^uAi0iX8`3UZG-Bu~F&G#kN)344;93ez> z^g#T3efeq`<9D@Yg*Th^8VDJ!IUmfRO@|zOxokPqHg7ooOmoeSd}6-hz#-Dnc~6|o z?+zJJ$jw1&nj$ND6iM;)B)g_a_a@&u%H|ZxXmXiPYj|zL-20RAyiYIMFyt^+;sYNV zZ3evrN1?L6rHi%jY;O465V%z~s&s_rgVy`De8o0hVvM6La1|?jBik-Es^6m{iLr`(~oTS>|l3hR1Q|O)D^xe+I-|~RM zKkayG>p0ARaPW#vvU8sTONwRlr!h-w3ICs6(m@SdN5=h+*FP|cmzjgY8Z^FJp{msB zbM%q%3WCj&#fYn7#fT!E+qa7;)ts4<^9R_}@>KcS_$*^&I*!%G3iVkR5i(WL8bH;-}Ef?T>4G zNH=?W;lT!``9wQbf|CX$uQ6o%9DzvgoaH1^az_R^DOXOCNa(Jh!9VGBf=O_ULjYlw zE;1&WS&DbBXng=jeQfSU6y|}d>sFhNz}BlDUM?0r+XqK+u0WsGcaK=k+>dC~Kds@^ z6RTe86C`_WC{p8be4rO+nIy+bYLHPv^E3&sIFP1Gh>eBRz4vfwUqJd%n5HPMcZHsE8sFQqr4slG z>;?&=l^D7ScrOaeT<+%~bmilvhtcOOw`>JDR>wT^7(6D9*XdxpBX4q=2tkVcRRaId*0lBHB z-OBX`_88HhTbA%T{=SGA2tHf{C-v=S{WY>hOTGUB{MMdW@vH$@i)QdPSa$-+Lh)YD z!0&t*X;KdyzHk~_m+J-%8on{9IGjqS)mtbT3X?8CrDg25FF?$nqt0u=zRnUDFh9)r zYs*=-v1b<<=aooKM1sY0)O(f=B7v9)saSWTbvbYQY^|i<D zsj43Y7n%PA*PhseTlEq>Ds$sh{jD`Sb(%Y@Y3MgS&lX6nMI^XgwPS8H=hZtcI1{rj4O{KCM@osGPwpCzrM2Y9* zp_Adk6QDTb%ecsjtQGnyBz-PHY=FZXK$>|j$9V}NSwShC>sH+o_Wh%h9Tsi>&-34gN)8o|dcH`>KHXl&=zK`hm)J&ebE}R1* z<=xbbj=gKT1h*8!Tek)$C%-e`%*;T~o0LxDyXXU8Y2}06@?I*T4Qgt>2+8P2cx*IG zS_`lH{y@Of;=@G_BR4{FtiOkUGJqH^kThE5qIeRrXBTJsF^Lr4RgKQnvuM>LzwR)5 z|A!}SRZ1PBDi9NgzboZF*8o!D^H;U&0%nH4@ZuSQ+x&eMna2c>K*)QxJcXw$fuobI z;G;f9y6l)H8}kztf|6cmaK%N19eF+2`wwd4d5v0I;`$29Q{Ou zFVZO&1WU06-wmyLaVy!cl%?Z>!yOp(B^(95Pe`+ds;ZJdf#~M!yidWtB&qN>UX#3g zyfe?90;{XhH)P!Iq=&7q!OK8|Fn5mM^y3sQ=R9in58zF_evEn}+qhhGfJ6Swu{UNP zqA2hriwEI3xetVQb532VCJ9sYl|p!U{j4L=Lcf%TQ`Aj*p5ysvM$FK`rQh8 zN%)f{a=r?LK5`qr^v2r^M2IE-)UMaOz_|FpG3%iJ`Qd{Lq~j*I?>bt>H|=$L@kL^~ zCex$wedN5xd}CVV^Acy-2uGD8|M-$1MYzU$kMG7Mc%Yxq@hCr*&Oi~-*o#%qra!iC zsf3oeP%5PVMgO!O7?g6K;7CdG{1klb%v!qN+HV?|sVB&|$Sf8~2`zz9-2Z(?D1Ah+ zBXMA#iZ33n1OH%M%k*Q>2+xqIa-V(%SNCuGHwm4Zn!0YK&ECiJkcnjLqI+cAE~y0I z>3=lOe>_ApM@;I{gv{ZW7?#W9cDA{k1UIIC%MyLQGKdVZj5%}<`A(R#ccy_Z7LF=e z`Ff^gn~UJq>lRGK;s2r5i`r;#c6*%qJ1C1uMux~k-0AEazaBYWlSs*_ENVO3#df>`Ik@;3_N(eOY~g;>K9+@d$hgG z=ufK_eQ06PHrB-gGMfQ!s}iwk78WFg&?w30f&h4)HbFQ& z6=W``hG9`)B6Zvwi==?Cw~U5Og~W5ag#NwZ*IF>I^kkrS;Cimut31y(qZ6y{dR!D8 zF`57m_L~?2w^lWP@~m}@_QsjnGDAwvXvEs8xiJioY+1Q=it~tfZQf-5;rG|u*j(W# z`6|nd%XU6@)bB#WNv0t037(toZrYjMR!8dFrB8t3S??kVtJ* zFC4EduOJK+K2lmrt4c7}?n_?i zQliqEkU)-^C4ClM>cK~qII;z))wnLPvY++~+KGv3FP(}v6(`a}#W@HGU#-3O6$#Z| z(u%YGmQbEeJLx+s_?zh7ZX8cD&;HV4leWPXt=~IvQ&+rj?wr`GdRQ3kRp1)r*`Didn z6r0ul+B0DeC_u?d{*Jym-%b{Qq3lCEM_e^#YSSA~FP~;K!Z5+|;j4|W%wR4I-6R|d zCj>+t_+*2=#z4R7rB`*}kZMY?3l{q|lI5H>?I+s?7}}d!&a%cRsj5l&$oBmjvkt6fI^f&7O;G^^ zV7&SK?Jh0YmoAtaY#r+(m7Y+i5Ro@$Pvip9^LM*Vncwk@0mTyV0hQq5@Bx?f(j4lB zSgSK6o+QDX4VwWTbDJ#mai2SthZxNCx8Vrd9^UX^%18B!bz8aLwk+7Pvb9HlTxIA} zzuxm$BJCRWtY(ugu5zhL82Rq%EZRMtAhtK6yUH`%Z6@^geM7^?&AGL4xZvzd(CnIbBJyjPlvPkHTEs5=HkgM zQ~o0v9f4}B+)ggUqpR0&Bmm0D%x;scYAIYEBkKddlP3PV1ex^;a|lAir`-agDhv^8-8hoI8&NjL z4f{#7oTqmgk$QMWu&4bojY4^ETI$9vUAAC)kcP-*i8$(ng+wnS?3TmU&QFf#U)&ij z&vnDLw8mRoBZN#K*7PJ>^hAyG6dct6no>U=R6=`qHXfT}uuB!V5E{g~=#})h!;YTx zMi}lFFz5XI4CK#EC*cube9*mX?2|n}Tf1fI(a*Xp$q8PH;8^O{ z%C*?v(iih5W{=EfJrQbh`cx?<31!|Nu@zctBoNX&9-?J@zd4=q{W@oP)M`rD6?VQv zeAp|ghur4a1ZN;IQxQ%OQAj`Q@cNl%7sHUCJaoVXILh_l29C1%usVwjZp`kJuc5g& zr?m1v`SaYru_hcq2=y4F^~l7Z@kSe{`Q3uQWOmsN^Nf|~D{c!8x+(scu{&{cC@N?e z2%{|B$ye~wcB z#By;gHB(#SQe)JQ`0S_R96$x9x(L+fX#Ak1X?41KRsQJ@I4Ur`)oA?riS`K@NIDWV zl1KsK?}$vSW_=mRnwmwO751$mO8D*-;|(>P` z(XTWUEFPk|6O+McOIMIW+H_dW`yeSIHRv=R7qo43w~R@@M2VI z27SPs9BJsMy>7WUS9;2L7R>B;|?&;)vuywfB zbd~ALNL9wYRd*Z!VJypVkxM#20oE6zuo@+RQ84hA4K(KHQ>mMKI6vuTVP>ff<9BhS zo<9;)tB>^+B_0n0yxN9^&;2+lowO1^*Rb2wu+>|+S8cZ1*p$$qP5XQY>!~LG-uq<| zHoj2$>L|e;-FABV1UEgg-9_$G>aqUEQ>5cxSD#JlmpC!igmDm>dXi9SVXicfq~&r{ ze1NmQ019XaJKejmlHoutgE9Yns4}`9e5Y&lZ}jpLU_8Q70Gc0838#c|FH<=yCA;^I zQ}_h|Yc?!Bnx`klDX<7eEb}t3i@zJ8YvdQy6?S3j5K-Kldi0y;&gMBwm5>s?6Yv05WOYs01FQ95C6QBvlhUfe0B_IR0DZ!p*yk zXKp+bF}t%>$uMdc3NRsO2ZUi(NjPQV9c{iaQZNSCMM{FmKlF1A_|tWpi_{WlHf>rQ zKn%^ad`|soxgI|oc0K3k$NumC#iwF41&#c^*?CxV5{6S={flVW$m#77Mu^Y4Ce914 z0bH#&Kavv2!Y5R9{a?!t8%I#WWQz%#te}3qcWsY+7PF`eexu)1yz2mlNo-Aqxq)(^`sd za#9Ls;rnm=ZB;De!;JY}fbm9Uyl3xacY6Hv83wr9K#-``Q?<>&Ua-c-o=*6B#truA zGhe#;`2V4*3@}71Y9LTnDKrR8A0SFSTLnHC^H^$(f98Jzi&4&CB{WebMLDS7SLWNF zeBpe1a#!JM(8(War`Xp`l3+St-Wq@kvtOm@Uyh55+d&`f@avY0CGpyD$mJ|d%CO^A zJNi^JYWV)Pj!`}CEjVOkA=*thdL^q*r&yFRgJe}Qiu6~5?yV$@<&S2aN0!H}w#xr_ z2;sjI$VjU7HeX3Q;-ZVdFH<0`WsA8by*Lj~vI7X+@txiqO7r07_sx7@k&&Y@5#X4v zn4iBwz~Ap%hjE4(ozDo{vpsoc-8lcS%P_dp~fz#9cZs^<1+kOem)avzp(>8Dj;Hz{dKxOG> z#8*Udsg|p0bIsq8Dq8sNF>Zp{B`0k|#wW0drBE^Rk#=*rv=N=94DH8u7BKqN*m4?x zqkATNYGzOQj@ahIys%#6&U635A3}_s%<#IUG5-rVh-?Tg5yYR6ac-V`yY--I+q>rj zO*V-dvPQz4Er&%?G6tZce-$$R(%hesfko{~pf{nzsB1CPEPMXkOrws*kqNUd@f0F< zaEHOiwwHP##h;O6d$_?GM&4(bCxJ*WH7ih(j?pE*I|68r*OAltEB@A}ngIRi`ULd~ z)F}yL-5?YD%33g!16%a%q`+qM{x(1}{%C~jpHffG&x#Q^P@xCIB3f3lIg^}OO=0dt zfg*)@mw;oa*QX&D7)f7Yo>;FsE2ia4TiMU*{>Kh18d=3cK*$FlrZL@}wWhRO4&C!ElTc0P4%Wxi)&59r*l^&|L{>Fw^dI2A z-zUmQtxo*kl`##59+)EaTg%efxtj_!2O|pmp4}I1^63SYHIDMX^+-zB^7d*18l6Ea z2|yC_9;r&Va)@>+ahinwRi{=s(+fxCqC7O1;HZSz4=z2Gy$$%oCwk|%M_d-Qo%m^$ zXFRD4gHz?+REg5^WKkl>EA@8W+N_^a_Y}$BO~0qDZMl#NX)+y!HiKZ5P%BJ})Exsic|iKL;*->wIvZ7H`$ z@hvhkkrd_?1kwijopL4&T=WrrW%Knq;{D8TzU;f~xZ?8jGz~s~x@H}W( z9o()0Yp6>fZ5UZuTwKzHtQ}HTnHa>3C&Wjd=~$%cwg2nP96LjxYlk8<{@(_6&bk0u zjOXlYutotA0z2fY<9X%vyP;cZ+Asmj<~x1Y10r)i90#do3in(^a{Pz-fk`+2V`!061o3N z&S))~y7!Nkbbzsc65`1DJe{yT(H;%Tll-@#;4CCs)K39MQUWNojL4a%5$5Cwilnds zJmipI;dhBX<+hkTv8{wqwS z4FIiRfsA|jOU2ycYB2jNJz84F5{5Yb<&{y-v}m94)cj&1QRf-`*OB*A|DS~ux`d@| z2m8+>S?mNJS9!$T^N_G->XyF*T-KuPd=g7SZgoss#`b>b0>&2pomfPH{w`y??F<0k z@v1q~Mwd^Lyq`B!W>I;Ll!5WTXAHV@)}442j`HH<>Pk~oaC7#ssu#a7vkBq=C@KGG zlCbgps#n+RG!V6d+~6p&^8o++EBVf}s8>MTjvz>JIBASWio-o3Lg9i~IAz+E;dHw6 z{}Q;p5t56#EwNZ0X3|lvLO0=z!FJe9(7lX(T|%J5g^h^+&W{2Q;V1=1PFL<5<&isl zn}bA4I&L70c>`gr<=D1q11GE;IuG<5Y=QC!z!e?-(9lsh?fZ#=ET zP5dIH=Sk$zQ>3XLZ=BSWo>UYip7NO{6;8=t@+||sd5M9fDv>jus~?a4@5S025&ZNf zJp3{cM1P@={n5X1%)bLSz$t$4Xu~F$05e6xgJ1n;G**a*tGk@|UsBeei1&X%gy#T_ z1OmXXj!W3fe|heon7{IOH0|Ssfk#Hk0kd9D%>s8ir*;xmm#gxz&+s*-XMJpKASyQgD z>G8$Rz4USj=xG41?G zSai`O$=SiEe*3XXO8>-ZDr@*8kd1zm+eK}F*XiA1M@HS!)1uW>`HZd9c5R&PwY&ec zZQ3MZ`J!*jhftlT+cmk0aqwWo_?cX(%>b2ECSoYXp6B%&muTdTnXMt8Nq+Anx$MdK zkv!q6i|x)H_a0$iR_$4`Z6s&AtQ*zuJXS%egXUN_7S>|g37%~EFD83tH=PyztW?;u zT+vlkrRZuS&QiJQ^A&lzGkqIALDRO{n>K8GYF?NY4NwI4R2+Q|DG6E7>Z#w9ySo8S zDmItQ6J+36y|U6g5uc}GG-hGdfud%?QOUu78le#v0N?kls!+SH#YXV`IBgQE&ktnH z0HIbm|Mus*Kr-8@VE|A=jttQJLyX@-T6_4b#2YKOFQlpRNc6j*i>j9QQNp~ideCNf zYfi~*NmY;lLo(%~#cl&Rrz*%*8|^F~CB3oql(Muc_Cql{C;p>bM~qgf*XNa3r? z(d|DaqM~j!h+#PJTJ`%{+FZ3hJy$J;KXtMk$$b?bQW1X$=Q83`fs*)103H{wjU)M1rm25#Y6osh%od z6`j?Dg(bqJ>odLIeMt?RoH*bT6T@BO9J zs%cgWDgJw$5lXh{>X^C-mOuR79VpWL@;{=S?;amrJny~a`^MIO{m1f8@!x)~{skHju^dcz+1CGCKNpnO5OT&MB>B?HqR~Q~nHYJuRn3voG(n zMzlsaj|gJ$@oMAo;De8oKB_8<^l%psqx!7}-{2^5d4}QJxOGyY@F&I8JeC)`J8}2c zuEO$5$hK_D-AFI1naoJa>0_m=c z2)~CF6920_F;WM`kn;7do=d7}fjRqPswRFfvYu=9N(L{P`YcQzb>Nu}zliBaEQYrf z(8Z0+b@hDJy<>IB5l4Mgry!^>&N?cKVbPceAAG7_sh9oE^u@(U@oF6~(z9FFsdNzR zRgyc^R@_aDG7=st6`dIFcPh8D@8=pNF3=#ULK9%~$J)pZxAP<>+iv;h2+GEOfVwc& zQR7eZzMT~aIZ7sDWxh+@FS*BB{dVP^xJ|cB(IuD*?5Fm(-tFPY5-%*z7KdGO>Bx8$ zP9Y?ze7B>S@0!Tj6PL8Tvy9PKX?Jcb5N<2d`iy0wC_=4IqrQKyc7Z|Z}vc}NQ=;px3KddI--ZJmtV0Y~Gr5!dYkr`rQ$i%DDZqL{j&|8@tpw@~m= zYD{4mfrx4ofFSVW*m4OKIhZ93MqE|vXxs;sml|UzuB~mTL3nW&RKELuvH~DAx$ZlI zKC0PQ`HF4*ez7AuT9_Az)34t|th_Cv=s=#~H>eWczqY}@8|`UFBX!8 z?^!d|R)p_ZBlsQT>E;@B31sL*{D>~jN-|Ds^OH}nnnx66{K8|)y%j~{JjRCM9QTz@ zA2Fbh%||af;iyo0sQ5|YMS{hNpObQd`o9k{55anvZWc$1( zH7H9r3Hx8lbqerZz1u+nX0AMs@7VQO+)tfq_QF0`tG;LFPKq^o`Et!$yjt+$1=2l+ zjKQ+6h1rm2lQ~x~wd9Uz<2WXZXW6eTpZ}GRu7JKuUbpmM=3W6#ouuZA{m5&_Viv)z ziVTcBIFCs#Jt1S=nRQV#?|OOJSOxjD3Z@Q-8kjww67_`kU78VRc62*Qx7#Ui-yUV> z4!Sp6Z&mT3#5yZLx8he2O|RO2-PGb$jax6R>U)|YaUTBztbvqb=xCbcyFzS!>(T1! zv)!5E+Q9*JLGypwHxQxLDKOOWJEMCItPyUt{C@^=W}*Rv@lMPcYtQnf1-6EAN~hm+ zn?!|;_PJ_Lk?05CQAG44OEfKFK`Rz5_G`=6wGEC}-Tg{>o%rrSJR$#1O-uj?gebqF z*|=%dm?)g1O*_U@);t{_HER^@6Nx#=WrQe23W$G>EU6O4EFGbaiSbv`D0IjGyybaKO^NK$rj3db>%LUyXD=aoS77r8uJh+kpA_$QK zLn=ytHcVBbI9;+$9XcWcaqIH{aDwkO)qPf)dL@D3DEV&(+GCyToCIJk)62Oi#+!!h_1_MKGTIICaa%`_9gw;E91hy5i^g zoi;73gY{fJY-^pkvtH8bUMU5f65(jo?#|18<+IVSu}RibI$++IIKi~y_X`sW0@K~j zTt@Zh^GEz^LD+;FR{id5kxJcvih+x{z*{`O;=Ea+$2a-c<# z#61IqEP8HKWCu{rc{b@VlWc*Sw#>l<&X)47v5;Xf^gmH|j$Y^A5(&)ra(FEi@Hei# z)qwVvX8TY?BPlcgdXwq~kZyk=k*T6)BMfQqg)q-7;)MW|G^3V*$IF&n1jBQv90n90}Ms>F8Ph@|u&^ z&)-$vy)>T^(oPDCi=Okp@k2nlluso;Rf;^XqCc`BTQx4Dll5&$A>^q^v-*pR!?nU* zPp2O)5a`cnJ#G2!?Fn;^(N=8QIOY4KV9@y0`O<1uky<);$5q|l@)9c#A109?D3BC? zQLeN8tY^cPo7=^?Di1WHD6L<+v>x9P2+l}%GB2yYXPI+?@;0VfRXg)*I~b2>R_ANI z(`f~HN!E~XiSXFjynkf>%Ras+_(5=%$WIy?K~Gv)S$Cw>WlHlsM?a~0klm&>mtX^X z_SnbcqHd!r;v)0po7GhJwH-?T%?$AqO4|CRI<7I}XEru+ii(sEPrjqEkeskTL2&xnO2EC@ zXXvd13#Yn~>|#=UGd?jx|7gctF>VhKq37jRT!9#})^#Mxx&G105&*~|zqwBm;D z1yLpLo0_L~ow-OAYuwt|j=Z|7Z*6Xs4y{To2^mO^@(HHr)zTq`)S#`gO#p?T{tZL9EpF)B{ zl*Rie5qaQ{QV#rzXj??(8S{4~A7klgJJec>9gQ`E=^=+p&i>w~QB<^{CI1gps zQmL-OU`M#PhU81n97a8;Qkb=VbP0p4w(Xn=1V&DcPhVieNtRl9%&^^Rm(rhFrd5yJ znceSBP#j~+T8nxx!?)3O9sS9VxxABVUUQ%XwDUUSd3`fEZ9%XWcUy6c8`{C*n{D-_ z??br)->4^+PrW%mXnZd$q`D0ZHPm8?4lCFLy#^f!??VQ#J;_&gJqC3D59C|f z(mApkj_hs8CcKjIKkSe_Eq#po*lN|rrpGe_a-L3#PWBs=!`1<6q<4OxP8s!Ly1 z!9#QZm_#k~M(CG?hkJxGj@8Q2>YHcCt--Mk5xQwL90AKvb4vvsN%6GBeNP{y|0 zI(DCJkdGE8WhaAo*%Oj!8N1)l1c(kZx21~1m^dJ#?l~$vwba#blp#aEtst({{uN#S zx3jeB@d=Ne5$-!{T%TriHNMSEUb-*CxN&q-(Km?gTzZCVl^O3s4)RtBtoCRKx##>oN$WzN@x{lK87cRI(=m;- z+gEx!CYe?C(Qb6>{H<%~)*wuDvBAl*{)Vq9QEPZ=+LQSv$V7+xuG5lb|_4L8KKTk?zGG47QU?byXq2E?*6~216%(hI1A0B+3 z=3PFkhN?^BwAk?Mvc2SeX49;*JJsqeN@NOQ!1pD<6w0`U~k2f4z+wQ z*f)K&)^qQe$BK)hs1678dTnaPm4?~d4}-jTT=GTyt9Wz=v1SV%>VW-9gZgR%xR|LP z%b!IjVY{aodo;fFg$0gbp;n9KBLOn-J@ZIqOkMC^iG@&S!|Q3kV+9Mw>pP@`dKi(R z@pko~m&JI$sCJoG55aq=+y8VIX?mTyBhdJogcC-OowYrXV5(0w*=fl;`GZ_@zD!gN zi?{Yz72asZoD3JYI&GH>%S;zeO!@V~`f&ib810-iLf{Vv+KJ21j&C`n>)rIkm>43m zl{X4S>kY>P`38?CSIi|B2IFLxuJkDSLrVL>cnxt(>o_PO(4bQBK2I#&g_8tT^6>bo zhU!rUlF+PszhNNv0UX6#I@ebiaAo%A2}>6mA3h9@sqFTHs6k-2;i#&qe(1N}LG3X) z>A0l`-){BRPxgfu+&6gQ7uzqJX`S7cY(}@o+9~>cdq0%m@D8@Y$ev8&DSb-!&{WZL zzgxVw&$4}0jZR9L#>SFt>m}Qin7>@i&S}x)!+q9TNSZds{X{}UV@)Nx!#AY;d$E(f zd*6fq!`*vEHPv-(qqh~6CW3%e5tR;7lp3NUpr8^20jUDgd+$M|ONprT4$_o%(h?xFP(nh^hPyuH9pn4Xc+a1+e=14#UVE*%=A2hqtZD~5rZK4bqbVy!_5}5a zE{nw2L)EeMD+*n#NELrQT-~1GQqElKX|`g*`(70Z1M)iUk1H|?_jIW8+$6I5MqOw$ zQ3V|>yrIMj$HZ>kwAt-FB?E)m9rN&=_L$o>B*Dd~ZIgmYJuWy-Oc112xsC76@;?;L zT-axyrk)3r(rCMoUBX`Vr${4alv+umf%wWnU2lX`7{Xws(JFKLM;a61TRANW=Oe^6 zBvnLqPe-Kvwz7jw%G`Ta->OP-($u*nD?^oNOkz|JMmV;*l-7MDjM%f?*y%dt8IZY; zfU~yydXK#i@XM?&uR7hkzQc3)jTTEbMO`pC2|;=KPGR;h;G#<3tw3kstTE0RjW(4x zC+XC7`Cq1v*F=$=0)wDPE{j!(cfFV-XGuR;ox3e?o_N^<=fXMA^GJk zn7Z|POSxV?Az-m`&N%kv(F{ULEe&-HbkJkWj_^mOSkId>J++$nOK*X5KBnkw-MQ3AEZM#NGmF4{fgKwf)2 zYVD9C(@?P5d&Bh&z+z)2%O7&Qdp&Wi@H|p@_mzpXN=b_cCwHOgg)?$`--kdS`=bYg zJ_}6|f0CO&_gQ*I`AW;A0-+I6W>q67rn`?m_x$PFBh@@%h|5U#eN(6;Zyjuw(9cAl zocJJaB`vYCnnj&DsU;wrnR?IdDfMsII$@n(vLqwru1!=Z&2lXS@ur`uz-2b{A<-i~ zV8JeM)6L+QM+CkS=&W%yaH~9erGKL{^o6igUl{4h*g69K3B$x=<5CyLX`=*Zo8)t{ zQ+c{1Dx>{DIjN&)&qh?v2i_eIMQUh-kVeMu9F4uB+bd1KL?AaA15c%_Y6GOP&fV#C zd$JFe<=*wg2a)3<~pgkFlA^`?B_avxknVb&*mV9O1`_en1yec@1wWG2; zzn6~4{61umkMl1)iTKpNs;j%;23&kF-~2=}1EZWqAN@8u*4De34S=wFR;p%fJIfdFIlO0!`d?W%pIQj zaMBqrF(|FNSOCyF|Tlv=M_zK-MfISgUY>{GQGSdYxVgnQWwCm}$3Fu!d$P zpURW3KWxp@8<0Tk57XR;81bq{Gz2@X?7$6YUS5r-BtB~)032HOnrd1to7Q(SiWjgS z!>$RZ`{uH|%v#gFgzFv{Dp+eK9lFMG{8m;wRXu&1kX@1FF8wL&4SR-B3(&g1HHV+O zyis~3yl1PnWeUAa%&)b(4j5WcJ!S4eXgvi(#-Z8~r7rb$r9TI(fyoq(==FGtba7EX z2zmFg_0Zk7aCidUYmx&@Go*a)gv$$Wr>~g2QZNiGmo`htZQg%hI>|XmJ+s$$TMv7N z66ZslzEXUK2r7(RQ@q{@E2{3P3&<35$kBwJ2|O;`LC4T!3ecgxb~WkS-cD*sEsF-ZIF^1chQGAyJxBTqp?z3Ege{4%cEz+G87-S%M zW<+67{`QL(X2Yo~uRa(kT0cvWx)SQR#2)spCl^}`(j2=TfG=IMJ@1=V^RaYZc2RTs zb-jpou0dE|&0U~@XBAu83?u*#>Sp&PCx)jko5w=|{ruEZgWe3v6o9%=H!a8EI~zQT zXv;Q(l%NA7mq440zH4)k~VP3^3FE$1}^IzjVz^`f? zTHz!@A*M%Taunikv@aPya_3eUT*u}-Qh1L|Ri%xY2=ABxQYY6ROS&_rb7PlcQoc7& zy_xhW;vWgXe;x^xk#i3heCt|aXz>*)9*uXpGaLtFF!GEOzq>4QE<&TqD)d=P_rU{~ z0~F~e%vTZ>i0#psdHk~8aL}VV2>)<>{T_j+4PG2#Hsq-$88MO;p5$`eck#y;O3)W$ zgUW2C8~~rU34q>-UYexW2}sic=X5P0JpC|L>$9VMa^`$0qOub$M)1NT-9T?VYbdhq zTf_(iOnXNB3sE>=?4AydD_A|+%O8JK{5in~PAxXLhx1adr`p*oH_b?czA!Aquhj%b zJXn?4W`QB%<*IBg1&<4?@MFOGc%RExTMtfgIpyQ6K?!)(L6Vk=va5R}SsBhM3l&<6*ILoUwWyu2D!Dz%Z)`o7>9Pca&ix< zJANpNAr|2#LJ2fTBT>s>|4KhMa#a?rzYcgYEbU?$LCv|Yt zdQ(D3!5Uors4xwY>%Q^gl`RzxVbwJBoTfQJjCaIxz_l;{zitn5tG-Hll8YF(Ni4eg zac$gazOPR&Y$BU^qeV~pD4RXX1+`~9ww|oSbLF9r&I{S;Be5_NwuZko`ooHQ;09_{ z_<0QtB*zZ7Z^4<9l=8%;X4*6m{|?$?Q*w>lg1PN`r1Jg_`m}9>H%#7SO0F=s-Bn=l zkA>QV-gFw;V?t-AY5qteZ3JTylp_{0BeuC3Az;Rl!|*`7>AR!k;T*u$qK2$)d$Cbr zbk23IytFt*5mc;p+*rl{^4XJK!QYV0uXlLQheVVq!;IU&cG1R>8$$}m+UTntqy)Io79U3&Bivk#R%3CMsGufB&i-wx;djkh?1N98jZepI(lbhGjWJi)Cq0iTvON2(g*}Hr<8(k}$)_@9D z-53<#b|VnnC2O?gT+@2^?pim*`H~bvqD)uzr;;J~>rGs!zC6KFl;>f+Ll>^{F3{3a_btm>J1H(hryG@^1h zickmyWe~gdVdKv0iDiNo!GV`;#D%)K>{gqRg~92DA}67;@#K7d0s1aWACm7c{3V=m zB6a6oYw_Dm*+H}$OGjEQ>Jsn1T|txgMP=JtEl3tk`34>2bFw+@OE*&t_&E-^&h01z z78j`3r3!nE$~w1s58HoE{F3yw1@DG@f7QYgs_%RPHjIlk2A%0Y-WlzJdb3?77n1Tw>j<#bjU^^Bx^}Qt-+i*1i5?@r}3xXTaClHFLW`B<1p#&P-EVp z7&nt5L^+GaUghXg*Tknz?wl?la(h9FaC?RLvX`)L8bVn%3Q1N3t%UUl4Hv^lg_`@7 z&jgECnDI&YWB9O9nAD{f(3yk8C0C!oTw`Ny(RxHCdeAI%L@|=?_3tH*)Sv;%VH2A4 z)EfT&u=CIp(+L&R_>!KQPA(U-K98^8^eLl9+Z_SzwF9RG86uma_EfLzHsE3ZJCQRg`+=U2PlkQroG#+{l%Zip%;7$*_oSo`>Tb!US7<20EW>Be_J>HO?4BrZ^kMI8;I!|k>kKNZi@w6VKEMNKTvrdiWxdDe(p zjcDn}V_YqCUUrT!hbcP=P3T+;D&D;{6LTa1^Q>GpXSt?cDS8b?CeV2_)N14k*%8pM z)yppk#9LnGSftkX2vUP54>05ywr{aynI+Hvnw4Wrln517fjXQ&v%cLC-5)TotRyWI zZNJ+1fMOjKo>69ZhqS1}V&PWBtYfZ-DiUs!hzL%d0b1Sp#mLvi-L69us}NN5XqaE9 zm6rPvogy_0wY&udp?G+pDEz7Rbtu7RKuFrGnwhoC~x!V)JSiRC~;AANsN8$YOOq#*&slAu8 z#;gxU9h4I}5!^^swn@4*DFlyUgTmSRjO?}ji3&rc3@<^*uD!^Ch|rl`*12?Sdw0d+%y7 zHy=LeozD-%W2H7!*7s(ncowQz^4e1L`G}A+-c#W{zoDvP$1NsbdBD1i<@;-fZAunK zeMqeI2JqGn&Q6@=>e&bsLq=qZad|VVdo3C%_8Psyw3ss4yllI+y|iC{Zc9?dTn0AL zX|{VbmCwkUbf2^?H|mDFn_NH$+pe2L02%k^iwjF4gJ}&z^F{|!7F~~~BIOs{knzU) z2Mcx%t~tyZMqq%sC{FUU3oUGF<=njfD_cUOg57W4j0NFq(4D3z9`I=hNbax}@ajPF z>TK_5K`Jlk8FSEJ!j;a3WKt=CX)$cqT!O3 z?{+`0kaq;)CTi`PBlL@bEEUB}sa z_VAW~Pv$VyXorrz{c=7kKI2WTgka>>eU{s$>6C^rdbC^CzMabFPq$9*pRJ#WdFxwB zs>+k@q1odxaVRno(FSoy$uiN}IP9Csobnw{cpH`EAo0oU^YfI~Q8);g{@DQ7XcPCW z{E0$^RC8FuGWRlaaCO9eAJ4i-&t_=g7O8ljCIuDV7aAI?ho^xV6dCOfQz&R}Q)ZAp zonQBmNmX&eR(s*I0eiH7(SmzniN1l-La5ozQ1NP6uZuoWD}mduf%3erJ&6rf^M(vG7-!dad4V}6lc9x zZGwPxLR0(g*Ri|J z7<7xzMu48+^)JY;z1$zrl-^)RQA^u0{)nn(E8t0)kY(e-0!$F}%8$lMK0IXTWeEGy z;f9?rQ?sv!bTH|_ge;m%G}0^{e?wNB+086!SL2U%cA{0|f%g0v7o3Qt^2U@A_^3tN z3`_^6t}d2iG9V6VQR6a%J!~%TXP#4Z7FWF+5s4R&(EkEka`M@yMo`S!#YlfE=Nxn4 zRG|HSXj+z;vDVB>A7e24+N*LW_EGjwI%$V?(sPqMxt>5@CSGJ@R=6j*@|hRuu)51E zqw5Mj`|_~<6)`asWAl7M)!P$ucOC8-6fgqxrZXg2#;1bSrw*+u^sm3)tf6oLQr_yb zj!ATe-Y=k(dCW|zv5~3ug2-f~RV)~_B*jpm&n157Db#9OWxN+~4!4KqTym**V?|Vn zX+Tx1=9Jp+z`wGDDrQ6SFoz1GrMDekjg71%S^N{<(YcommAIggBlP7I+UU$hv9CEe&IH*Q?}q4agxUMM!>K)5C-o8Z@}=aljTyXY51yO&Bw` za7bK4>8irhU<=24m(fA*1-wqjM2~E=0cg|#vXNfrL&UtB!#w7$jE9V0B@1f6^;|Rk zz5gbn|f7VZ_4%W$oVwUe-7zP+m=9l3-hUvp}v=J0f*z9G1(qBi|Ap-_`d?0 zGL{dkoV1gZym}2iH-R`P`&LgJ-i>UJmhe|o?sb@J{A~*^6Jx~LKau6I7BJ5&v}kwx znDqHOLU52HM|`cM^hD0arcg=>H6n938vzimLp;9g2oe0-ii^^wmCbu0DK`QN;ipJ- zv1$gJZYbP50-+iD`D&zx&*A0Z+!s{cRHwmWhCjM+oTGqxY*m@xSJS$-IuXD+oNHe$ zx<8%25@VI8Hv7tqZ!BIg*9k3DRF9m`G|{cQN9mQj)RG0hUAe5)+IoHu^x?^m2M z2`<@W0tl<+H2=I^I_(?F-38a$mq%~94larOxCr^mf>&Zhv-;^%2&k@y6%l0m?k0f-PDnA?< zy31cg2vd=!o$A`&oge>D>b)Yv>v!35G_Q9ix&LUu?r3?aEs|xWHtmy`y-KY?GkhL{ zh#s{T{$8RoPndrx1D%&RAIhilGls2xTUv^T`lXHEsH99tU!U;m>e@pSTaolz*g}D9 z);c6t-< z^e*viSueD8J_hxAx1HiVv*TBBZfnXV;i|1OyW-Zvo1rJnkKVDC6}Fp?ejb?dg|(za zp?pYhNOms~yx?BCM?FWy481Y95dZBq`Xv9vK#+v{9vOdwXJ8-trUeL?R$JjG*cm_& z{tv4A*XshsSMGKv9aG(P6v-_4LO(|T0;ukcXvN!s_@;~iypxKi{hl}bT+%6p#r1QO z+FDflIVq~theg6Wx$O?Wj+p3Ez^&C*V~p`ilj7GZA6lnPVUEVE$}%xphRV!MJ6lZa z-Or8fwJ>(#m#(~bf=(Ef0;UD*#&tn`+6F(V&tNRk1fnk70n}r~OSH$)S2M zMZ?VMkz&XOMp^gCGuGqi$wSM`yCOFEVZ}|YlV1GwTFoVXWOpQ>m$=xDg4@K4@4UiX zTefk1%r|QE>0@+KfPd_FDUx-EaBNOklYW{c8&>u`BxR`!FW-M)q+HPSU0}gSuQnp< z3-w{*Z6N-g2N+7TT!6sG(n5MwubaE0Td`8#_4X%cK=K3S9QX44sjl;mKzR5oYA`J~ zpWUV+;6$MYeFN~wWVCZgzxpYj^LMB7G-dI3JtD0#vm_(~^4wxB1%*lj?UuP_o-h8h zIp4dd!8&VV>&1j%zh_MtE9a+SR&*ZD`eX`7uAhk*s#F^Xd2@EqTTs$1whRM58YRV2 z*xG&@_fmv)jreAv<_1JEMa8+p*Fav}H=-u$C5z>-4=r~MSqfaMg^+m)X2*7x^DFW4 zXzAPrQSmZ*+k1>9(!Svl16%WtguA*ZO~I2i4bYJP-ZHf?S^G9&6}i9}d#r|2RR&8j zD^s3oU|g`nS|~gakt0Ha%Yh1j(a)@tJ3QGXs zY7|H*Da4(hy4BG4dAZe*wy=ChxX*fVWCBr&KGf7~Dm~tvAaxa{I`xIxsPn@>9qooy zjC5*gt?vR($&1!u`Ef-1!8`Z*gr|*lQ=1YxjPRmdq0<(rR-XEb7&7zHbxFG0Gq79nn@Sn07Cpc3xD-frP1w)p7TK;+5tMH8M!65hyyN z_$XFX^aGvRtA7w6?pHF$jD+_ohsF}O6lGoIT%-;1MJO{K8jI-MabCY>i}228TZ$7BOsf*nlbQ^8i=OU)3Ck zm2A?euPDfkTRS}N7LZrf53Gd&EzbNB5N@+>`Q0(fs^3!K&b{$>lV2Z-?SvWaCG)IK-+hZG(8>9> z9mK>=yzNP%WtG zY6cGD>NunXnT(H#K4@Iu< zo3ZMM)#e%7-!~6M8t~chS4bL=M+tv-sbm2BOYBJHmyb+UWZ3TU`HJx!?zyX5D37&= zP+;EGv|D;bM<_%2hqLCsNo$;CR$^D2+a$|8dL1^8bI3f(*Tx(!U&0BkJyIg+$t12T zSfXqurH3NkZ{CuVxbiPpe{0X?fQ0ZKCw)xJ9qo0tw$%$D+qnT{WeJ!4%jdazW_B`C zN~%l4;6tBQlmNklQVK5&-ZYosu_Fz8XY6q*uSQ@%gnC-9<=Xlfrofn_18r!<#ta)HP z8ULuZ%2>_gN|omHJbu&krWXhrkb{C#ODkUGqTg^|VDN@EA83yby??q^Eg?6)t!ZoM zx_iYHs~Rz}qb8#s-QB&a5RchkNc9Z52OpBx^(kBA{E8zUTundW1W@z}ftolLYD=up zPP(;T=a8mRN5!09E94dDe?DKp%jOdF%|qIn#@gn%XV2JV^RsF#7D0x-lUM6%3 zjQ!9yroG_ywSPLv5Q&BLicDcfUl^B&#z^bvgNIO8ShRH`_s7;vTb-sYAmUr=ebP(9jjR8lZ_ zAf|U9DRthheQx8mnM_1>2n2jy)C=G&jKUkhcnJ9Rv#_6t;E*ytiGHR~UA3GP4N?Dh zX@bAn(Yr#q)LaHdv+KfpX3CVHR%e!}m37Ztpj$P>#HQlr6?SirA%5Zm%+S?@QWFVi z)wZY&6<)b|sH9oN4Xv53CTpTWEw7UWKRz~ZN;3nWx${>Zu8dUL#sqHMfo?vi3~&BXG~&HCcB2 z9pBk}1_|K`TKU{|C-pE#0c(^WX#P;RG=sUqNM zTPeYD{^bBg1dXlm%3Q}iviEZ2KIO|6{2(wFz6?>3(dQ$f?#1#K z12w>o@b8=#AYA8h;>bgpe!js5!Rr`o3d6a3o_`(&G~Wwpr5WsN6#9bh^F41< z=?52h>h5NQAfnn^oyt4 zUY!D@wu|78uhgZOhQituk&qew-UP_*S~oyosl(_@*{+3Pqo8^ZLIH>}?SeD{f}R2| zGaX=UTau>c?>v(fNF5Z5kU(W$Jvka3QY$b%!N42$Ez9?0mUBAXJW*imnM% zZiSUw)LT{rBF){rC~}wL3Wx5Km@Ut59d}EIRfTAAa!r)D02ppR#2#TeBGx0+;NpKc zj|-Fy-S%D$an#pmyi;Ii8rq?NTfVBR@0kr1HJQy{lBmpAN>XaeC~Et-JN~G|(8IaQ zL`MIBO0DA4Jz`*a0~k?+`8D4QF<ihWHE=1E6D)GlGKh_#B78#}<47X)Jo#iw;mfW*NveIlS>U?gym(8e}LyhTs=V&@@q&~zin$>HlZz}25 zlVlhEz<&7Ej`jtICy$l8?tfZOU?~zoq&c&Hph$tKtFKACEh%y|dmJ77L4N@kA=OfL z^%0y&Rp@(K!=f<=>QFNkTTcYhn<1ZKbjSgQd8{*C1-2#fP)VoLY{uE3(u_5fV+|<8 z0Ur%(x-VZiMEjgTB#uICZ*ld3u)UmzP3;QTbj%cW)bU$4K`?l4GW3rdWeAV61+jC1 zd|`xfW=yRIzk(b4)Jo2__*{*E^!%NPE1^w4%;!ZJ`G?7GCkxpEue|x##lqa@&$oRd zZk#ed+%I4%$L*v^_3HJ$jE;?%jDPOz>XIsh+%rG2M*>A_eZZpWje2^{{ym1))GmJH zcA8}q6|TEdWN^<-x7G773Y>_r3Dd~Nyiz@SCK!tBwGj^H32Q;EhRd1#c4$V0!sO^QGMB5mu0F6vv8p7y@F%4A z?!#A(#ErJ-pKo(LOh6rHL`InV@uiy|?@_{y1b2cIbx~g&2Kiied_(25g}a{^m-SCK zxc|7@YNVL9TgORXHg_OU`%$B?*t#t14m~n{-o8wT%{LZ;=EmrYR@hYNf7|pvuG5vj*)(DHZ`YiS~5UYOFoa3^!Xu`h3Z2OCTq9N#SDCdfm-IaWCbdQV42g!y*j?2*nS-iQfQF;>Fe^iTcKT7Q;$xkhX?k0=`Eyzu7OhRo}rm99N7&+t3>r~Z&5IHBZVmBv`d~6 zbsK8mY3S{nE04b$D30<3mIM4*_NVwf-3ce)v^M$!3X2Q}qelnupRC3bDs7G~@#1Qa zt>}|3`;CE=;0=r^YS~(aRr)dp^@ot&7y!w~8gDkB7k7`rd(z3WNCg~@zd*J1uYI`W z^syj?0i3PDnrjeMoS%>w+MmS+PzSg>!~%(co(yXLin5!P+W%Rjhu-q7qgZR+(56Sp zOJ|0x$4d592@df>qzoY`utpLssdoLHbLCep{x#*s5tmvMLpHPyNboA968xPO%St%X zxw5;jxKCo$Dgv1j1@1p{ksNqoZKI&7z=Jr|mdB|5Qe!siBPV~hEVALOwbqpw41h}C zuHB-uEtEl49AfYIy0jWo8Rk#8$Z+pb-nN>X!db`VtTM~lsYNawFw8+vo2iqNsD@!|xhfSIcj`ZKJ1vVdb2X zy-Ah4Un07(>buGoE&8E^AI@J*=+p9?ev@tu_IQ#bl$`xhXY6f!y!Nj`C(9t%=KjJl z)X$7EY|zJ<<+IIZD*RYLi4ke(W7yNgmE8m)dXR#fi4+6kF9u7|qSaMfqEzBt#RGr? z(d|n8CSufHwTlUuiBCxt(if1Xyyu`7c0Ibi_=)t<-Xs@q6N`)enbqDrI@ z#H}ScIC$Ck*r1W%|K=-H$xw8>LK3%q>7x#C#LF&p^w?49*9H7Mtq5drlH5y^EbjyP z$D{vI-4tnu1?+ z2h$x_)b^>`sw`{x`7wG{KMUbnYZF2**}Ie(d0okfB1QJ3`ze_E4^IjoV3$v=UYiby9@kb^znfm;#*#6A z_){k?tH5|1QGLak8>i^zoR)Q5^BM+X2;Z2L6tn5m3+nrGS&7DEclxL1w4*T;M+JfX zM*91cfqJ8b);c~~Ysr8(FSNerqMdTieb@^(>vD1r=z9x;;|@nkz(g@#8foGkISC7NNon#v){pQ#)e)W&hV-Lf*wYAigtMy`tsTx$in~tXjO`u7PWhHeQ*q@R zeXHtE6Md30>Au+QLn(Yawk*@VUJ(>95Z^LtiS|lVozrGpwCn z@ItygEo{G5GJx$b2W zYB#e#FgE^Mc*X%BbkjWvhnE2v{QI3|rooEQzUiUR-Ja)SgX-7$UK(&QFRT%l(X1P< z894FVfHP}z5EGg_B)3mutdKw>Ne@FLAGfR|ScHOIpD`OnXAqhs#93e!J`rRkM5)Wh zjE3|vEa573bA!9Xt#ae%=k7$Bcz)E04Z^<%DvPX%0~3)`zJ-Lyi9CJ=8`iPRdcZJ5 z{d}dJ#y|07@Z4q>FfefnIuFu0KLTgP@QL@S{)FX@6>o0fq<;v=G&ii@fL)if8F9>E z56xO(FWJZ69>1CK{>+%IHx!U>2^zU}^u>(nh%#Dqi?jHBedlG_cmn{n54C~wdm1h$ z3UnZ`LAt0Tie?UBdN7%Gy&Hxc48b?J?aAw zwtbStqh3fmyk7VE;UgK;By!MQy}WP!w7hLLVe>4(qIllZ_h#=Ru3o9*$^+Ze(~`c_ zm)sILtJ-qbj!YNaP;3is@3!iUYlNu$Ec!-=Y$z<`Q+3oCLv^I*HwUKO39k;|o5aT~ zMK6)DS-YhgDzut(K?n^f0*bma@4$b$ZF_-J!r&j+vUIfR#Wn|Y_&cyzoAtibbD*5% zIO#rCT0bYk&H+TGna=@i2~8(B@}+okBHM$S%LQvM_DgTQ%y8>HwVKnt!QExc(lWZw zZTYlU|GKyVQs{DJt0hHukJ_B$$0sxXR>i+Jq!)R}XyY4t$>~REWJX8Rw83VCXSK2tTt2coh zDB3DpITE!!BWTIae-t`sPfC_3j-cjQ8oFPw^I#H4DA@rO(Le9f9|K4Zwf{nY>j3XX z@5*{A7o{ZOw&isYEt}1SWww-uW+8<0Karj2&4~-b&om0!4)!_23s*i# zDdOiky=3&Y+bgUFzQtrc#z3F8Zof``dTZuEE-80U_O^_H%vicHjqzq2`CNUxW>5a1 zOff)FmpM^3{OGxy5Xr$)& zF*5!wV*=m<01WrZ#V+{gn7<(ilDoLLeDCT~BRzZmd{<&WGW8D|TbWndjcG?Bts;}h zdFJtjH~;gd@00ITTsI&v=QaUsZ&9YTjM!Mcwz-5`tDS$JG`4eWAoshh=}UP=zFVUo zJmdVei%*I?<6i1*d9Fo09vC7wTfA>T+tJkUI$z@51hA*ob87$NHrf8{SpGZAXXx|7 zv#3O-nBt`keAznpzU5iKbMCayvzugxm3afzbzAg7Iub8{vhn0v2F_;rL;cKW6*Q$E zTJYQZgMI&}bYjkFWf?Rc7PIWjT(`5+v$ERlUJG@~!d>h}~JvLN0 zxzbM-_Xce`1aW4!NBfO71<*t7k5seL90nk``1qnfLqBABxpNodaFteH^EWVRRNr_Bg=L;FXuX!vUy>75m`HgHs}9 zQF|cJ#lCxigumV(@5#_SN`qztEZuK8r=9`1T%A|n@Obdtsu-+x^CHi8I|zDbCoQ9l z7B_V3u?iJw#m%1@;6%uCW^Qt4E%~+sKzfdLBCFh`zn_nq{&yB81e}phyPM1=rV+y_ zB+f1w5S3$t^_!Xw)>V7W4(K<2?dZNSUE-{&J|Dgdbry@}xb^m6zv=`1aHuc5uM=nY z@aDI+L8IPDoyinXNOF-az{E&z5TOPP?EbJa3zh{=V+v_jdHm4FrIU5DOfVMKoA*FR zg*|V368GlQ7Sqq^f$$Hj-!I~bA>Sd0S(!zRhPFpn<-GsxO?-kF+?K7Npac^4g zpFdW3VJY?L^D|t3XJ5`>z5oU300&-%AdJtG^V!|e0OrBJ&WW>tah9RaUxm{{u0IN= zA!T_(WA@chh@?REweMrDK1kky5JSKi=HZ8YuLN8st zUX^pnWsUf)>Ln*<2kS7`(r{WuHbZws)c0P#NBS0pz%gQGYHtAAu*#!Ad=!_1*8(V_ z0w}8gcrd)y@6xJxHSoEWfV(+XB^mI1hP9+5^PJrK8hJJPA}<{IR0j$MF&bzW=hULo zvnyFaD}j78!#cOTM4RIZ38ItQ9zn)yeJ7-+lW)auoO~z1d^$qjxN^oD{_mdJ#|KpZ ze*N+DK>XwHmm_Xv@VVo=*-NVH->>vBwT=PF)gKHOAO7P+>zGOcIKUjo0JLTP#{_zz z6Uh4btJ0|%{tA1bzrH>l_Se7u{aq)vDF2sinZGB}i5JZO7A5BQ$?=;NRQ?g@E{ADU7be_o*j`yR$d>9@C2c zi~z^pFu#9FW0G?ml&%thcI zBT;_$crG5_@kwhCu&zsa=~tO7tG-l&Ss2`Hp4JEm`=x$UGBZl#uZehL{L-KCCvJaG zL~Lg#qv$(`7jU_yJ~k}*`@Pcyu$_d&e^o~`dHq!*s-PlQDhT~v7YjMsA_-B*U;)S|YC-6H*RH`nyEtkbV@ zxA30-{Dt#>L~4@p-y6(7a*e;A;QyD)>zFb3_h|gz#(-lV6dnt}FZnH*mm;Oel~1RO z{%KzP$A}#ZvZE_oria2lO?tnuF2)Ka7|EcqMQZxNQ_fPGf0zjB&){xc z{3}!VgUxunfB&_9xL;})m1qm;{=J#~al6NxYzFo(qdh-6z-W(V|K-8Ep-{2^7zmDQ zxhXG3!(45O2=%9b(Z5V9g0t^dTE4kibH+RWzuiLc;-Ja{PeV$y_NqA+uz)q=VzT7@ z7**TwUk1ahYjKctyu<$Mcpeit|2>@jMU(vB+Y|n$Nz4C9j_CW*_pc))WsPnO#sG}W z!hbdJ{oh>30nL^5wEK!fDV#vU0I-jL2`OboYI#i;&WRm8NZzd0%H2*I)wmNLwpg|w z^k6kbf^=k<>-dxpJURdPIO^||oZz>z`w(!2h;pqAVA&niURkfA3P@G=4nJ*H_|W-w zK%U^zSYoKN*;CREey|bN>Szd|+d-SKd-?2FHTw<^c-a~b0L{~^ll3ECCm8XRQy2D* z)0{R$&IInCdsT+G2onqy$F`V`yUx9{3v)5W+D;$Rm{vwjHmv~tR-ePTE$Njnq?VQK~E=9;P_f=S%eltu9iYq;CO#?j8T?&-JP3^OupgyUF@u zbY_pc3a_L=SCYHhr}66uWOIyAAB2xbsP=sym(qcz zZ{sYj)3S8^+j-cu(PsA^BW9PvRa|lL!dn3Ouu6 zVgR5*RxO0jbbj^9W1FR(GVZS>rP`n7gtuwMJ#J6g&6@AFznf)ARbT~m71nfDdd6iM z=(N!?eUso}z}vtq(B?egZ)F*4<59N}V9Uf&X26A3#pE4c`;=F=aR8xeKqZ8MpTC*I zq-lp~vO7OxzM|!o1{R#S1WmL~&bmL?9eLRiMSh$d@*=rk-Ybb0{v)00B?L(rCM^Fi2`_8nl>*cOuw|NvI zxzQY_nphA1fFK&ufmx-{@%oSI?XD3IH!Y!bq{Sc_0y_#eXTbk-#+xMELU&U;!DyVd zo3u1Xqgj)+^@Dq3UY|mYrMbD8`-nI}LpT&Sj%5Zd(ny5n+T;ns+vY-VeR02b&W_nP zye$fxiqJ<0Wq*{lXAVCT^G(mfNoSfrqW+cW5250!{W)biD`KJd+ihS%Hh3|aAPtO7 zS6JD6aAySd;^ZC#K$#6@8b{ji*(KM`@s7M<(P^9Oy7gsfx^Z+3yZQP6#KtdZ?7WdU zOQ7`$$9jd)szBT47pkf~u#)3C*?mwo|4lcUo*@9qfAPOwoN=va|Y*G7@OgNnN zS+!(*`l=mUgt}CO^7S>FHV2yRX*UY)!}=h+BHqHAj4=QpIqsCE<-}7Cp^Ua z$7s~{{S_<`)amg}hzyxT92%||gr%R78O<-Rs`^b-FDVvXfNWk@nxvo5Gs_|Q`j%6} zp0BnP=eO*$udlUC?$GIj*-t@}8*P_7jx?tZH%Q*%VZM9br=7TV=lgS?5cZXFw{7XF zR#VX9G7-~f$n1B6zcG+@h~HokUltUIvF)%o+a9;0Pw+5dbO(QsW?kR)xA9oIvtdse zy2o~7TA5Vo!aoyzLASTZ-N!xmLIvMmFVA7N#2zJEaXqrlsb{2l8uNH?THK5q1UkG_ z<`#X&-JYr?#W$3!v$Q6`jV60YZMX0Z;neEq<|~3gbC(8FfV^%l~GTYGAL`A+YVSH#9DDEX12bdTU&IxB@nCK5=Sz<2MF_;@Lg|Ul<$ot>A(WO9GKiMFZMomNBi^vy`~6FWD}n zT6azD{=!tFu!%vjFTUnKf;YTgE(vVlk2ksreIElP#2n`&?@oPlt4P1|jGtUTr+Dl~ z&@%K}+L8JtTX8nmB(aGPu5-K7X9>7Q!4dAkMg{btUGGc!#vSSL$F3`n*T<#m875rL zuQvlmPZF~6fcX*5ol+hiVk7txAL++g8@Y7jt*!ZbdvdK$GI2vx@41l+xLqgcV?K2Y zIhlj9q#()-#)Qr@In|xZUOT@P<5Gc!>g5id+rBmBo@Y(M^uUWuENlWgH@lj7rovg) zWcKGH9>6|o2<-L2oN?+}n8>EbVV4CVhr+IvSemHls{&N#}9D5!{Z7>5xQ z1w^Ems3=&dLg)}R(o3Y*5ETXKD$<+uPNcU$M3g8k0)a>k7$A@kLJNU}B=_KafAfCt zy7ygot^59a);T9Td++lp&-3ihht3@hF<6=j_C6Jif*iBwx_kc04e&aZmMu*fVe!rG zz1BN7lE-k0UK=s#-pNyJ$NF>tX74@u0uCZ(HX{%O;#}*F*lWH6_Y0fc(8YZDD0&(> zeL1|n%i2XWz$)U?$6nD@Y{#cBpM-8*phsNXGeQ@>7&ckOp7|V7na~2zsyQHCu*N-+2?MQ6cer|Z z-S7#_%y$fT*AN9-4h`d;_Y6Av2QuC@ww?}IrsPFoAi&8~sq+|Eva0SV7O+TZ_ls^x ziAr>RsR3%j498q?aXn>SMnnHo5+?EZc#FfCHh*z&in*hKQrKqNvC$om01J+8z!3o( z37yI_s9<5w@UIa?DUzluJt9i*eq*d@XgzQ?P*-7cBihGn-+%G9Kf`=lytg-gg>HN| zPk0EDr0Kw_*}UIyaQqFGNjA`fRTOpjWS=4tG=b3GXR(ZFdu)OxWx=ZVVmR)R8qMsz zE;lvHbCrv55_*o2kA*k;x<5ol4!`=2C<8^Aw_UOoPYWyPh~_y)$^?2AE7m+%C(Gk6 zXL#+7oYUA5PfMMt9GSg{+T)9sRqSI7JS;hdv{2sdiY};=)OCSb?mpFs}~y%^~EacZ$QAC+0))3SiN{LndF*j{5$} zOJ_He%=^mb}(s+vZg$#Xt%GtkDzc49^I7EVecR;srT^vlzp8$5m z>HwWoxI&k9z6kqzT-Pm)SU=m4!#x}X(eln-T4rE-*LjQ3lSdSx76sx znNv>s_B~>Z(yaCzDDBczB4V+ceslDj7vMtpk{}9&(;rnbe`c_M3H%2zg5>xb>{Vq2 z%Ia1g_R@iu4UUkD3d@S6wo~sEkxNNmtFBkxL&_oi^w>Wt8*fz(rqc%@99Vn^isf`2087rmj6=I@b8=igbFV75d<@xBJ zw=@owB0hY~lJX&sgq4Y9gAdYrB^W+C89v-9ar9MId-Z+M)=0*s;Ltw|T+m$!R(B+T zN|7{eVg)ocaGsy+1A9I7{105y$4T}4t>xja&HrHN|8DR91$KN<%7cerKixaQ9CFNo zRi#&rQ)FwI{baJ+3||-B51tZ9Zl1{Z(aRX!!Q2UTeT|GlEK6Qq$KpT4?!?dH=kz}Y6N%UusH*6|bWtgiS2WO$Z% zrE(v>8}@g{@hG9*_2ej~?u0V$g3pOhFIjE5{(G8cNy+T5sBq5}GwOM-U@Cu9p`-TfoY+b zkp-})et~>os?h5p-MFV2PWA!>_1_~+5o_zBi!h*;b>t7^MDA?t`R73W%BpQOuSb#s zE|Pyj5XvgtlTo?n=-xBYM#EB82&w)vJ`!w|Ny|_nmlRzG-A}Lo0s5u>p&b8rohE>G zBN-{Kf*07X_%^9g_WAR$hM1-ah-v z-$MUWm1XLsz>Q;b3_;>hjs4Uz`9tTnzV`Rzcu47GpC((OFN;aUx4$ejSC>(Wq~K$vJGHU~l*8VyG0vw4}w-e`wDeLKo}IONxe znb8$9lix@zgEwzG9}&Gxcw{DReZ2k#A4|nrYJHr%dSetvY(MX0C#|ABGn1g>C6O-K z|1q!R&3aKlZy~4YV@``s#uG-n$K#^u#1606s1Em+=dQ0kTxi(~kvpWwV|!_IET&Lr zAz0ZqX*1ICI$Vkxvr(<7kclqy{>0xK#?0B6$tayU(ImLqb5p7Dm2YRie?SGP3+^Mi z(%0R!Vd6l)$S4dxz@I`~r`*u3iP_eq)LycE8YbNZXdv|1Tjw}^6TfqIsqhx&@+ohK z7|;yosp6cc-R-#kVG^iAXUSqEF*`rW`*=#}V%EBNmL|Br6YB}Y zm<^7xNw3~Edd1=6^uC;$mffIjI$0q#TTqubk~{|6`q?hZ?9o3*{jQWo92w^IFvUMK z2(wb0(9#+*HGf)V;n(r#Z>N;}ireM*Y8++hhC{&l;Q;=TV-|B?dyU}2HK5%?FUrcw zt{NJ~pZQY=`FSHdTfBSYv5fI?%opa7tu*SV)_hOYblUTvFbw>!xf*(fzLZY#(Jgs>}`sG?y=>3L`FV4FHVYbspJ zeXFmMHPq>rlc;alA+TlBqaZPzT0r3cB!(PEIc|)m`(0c?suaK_obPjGO7DPHjmJRz zGD}H4Mw)LYuju9^8JU4Dsj{DMJC0X!H=7-pT+UQAw>}J=dB=G9aab@eNvk1<$+H}_ z1EuXy(qT9?5=0T@W?Z~^hes?^zMBJTngSE6(>6dMm8&E~FM(s<9tb&>b@?1_qq;+L za#P=-Ti{x2HFSe-%e8PzC@kvNq@1r`9f-t&1SYXBfqHtzRNo=al9a8|*;(QMhmt!& zTqYnppZ0*lVv6UvE4+PMbaWyi!+pC8)b?t-{3SC=lADl5KPFGIhVDmrq1F8FedhsUHC{TR2H zqSls@(SBv7GIx*9K~mJ5^0jhgEWv=C=3drPB)A)8L@Hr_Bqql%t_SSOyI(1-+!J!dr#u;5X@&SJ4qO4`My>?L(f^4P(|G2F5q*eFOd> zM3-5H7Vh7r*ZcW4c(a*T#noto&23?X<$m7hW&G-Svc)#bJW`b*vp``PL^cg<;#k>x zaD>rNx%~<15PYSAL(-FL+Bk^%V32?;7YKCMbXYqaG&d{ zzopR8{qY<)kjht}JF9&Io|R!nkV|iWI(q2f&+AW$BY_FQa=%-rm%q4*Yu3l{{BzxI zf1S^eC*#Zbe_lAy{1^G)pdPMy+|!Ieetxkj<${&1!ap~E7}ope*Od!&|G9Z87I-Va z8NvDI=zwc2f@V^WB?~$SUNZl=A}r5^xKLZ7KDWE_C+@d$gyv?70Ko zf8ajfzYk2pY#b}e$b_Y_S-Sub*SvWZ1a|PMUvMbTZNr4BZ&3=MPi`}*95J6`ypn#zJ9i+b?SZxlcLXyrTc_ozG5GQ{jd18qkQIhU zf0VWKqH{Vxk5FWyQ!2ZVebQqxU~SHh&fUAG6;Apn#k)VI`agl$6i6=1OE;^jn5Z7v zk=tmHQ_#KS95N;6^cH8AsO)8IlG)^-_9uC=FF8UlL8p)$wq2Q+vurV4tDCbDTFIHS znlRVVv6sJfs=u}T%|VF`BlTg85{`?W`%#-zD^d3J`>u1}cXt|Br%{DinO|pP_0UU$ zA1aH*=o*7kIx}eA?qpw^0BVm<_+Ijn&1y`#wul;T?Spz}B=&9A*G4{Ilz(dO&)mb4 zbSv+RX-N0D%VVXkUS*Ayf>I~w7vk!=F-7wgWo?eTW)U3#m52mm^MGkKT|9Gz~5OGg5mbw|IJg3iOb-Jmx06{{U% zLzM2js^sV&>WHFXuEI-f6UZ#u$KH5V6V|G0=#yp}g~9o5RM12Br|{)fVAILUn1RmM z+v`F)#|(c9zmRMO0;zb0Jk&3jtGxR*Q+*Sf6JG{HDsFe0a=#!e2-<=J+B5ZcW+qAs zs;%fqN9Fgs0pY=m%c|E4vt--#()4@$eS_*Rn{=8|X0-xzSbA14f4P8>txG~ir|eV1 z?oN86aMPqhvTD<)Z;+{=pwO0L7b*MYGqyZcZg*W#g5YDBTg7U-Qbq zJ(X#;gBNy2WHPKewdoqzN{y2IiUgsvVPvkGX+~>=%FM0mpkpYKzwb(( zT1}0)0S!H@Lk!qD)&aJ|^HSNXnYwvXYKfcClB5@@_gWnhG)2>)9$3HwjBSUGFtVB8 zh2ziSH<~e%+zMH2)@+qG+IPQAh{lMS*GP7&feZu_JA8~8MsOAshJ>LAl*uS{c85#c zW!cUd=Wrd{W@XPTzd1H*4xiFGkX;E&Ac1 zjrDQOX{7}wK?XfI-+VA+@KhNpEd^d?-WYvM2w}$fQdVQ1y))6kO||^3aPr0}&uvf| zcjldFqQ5(-F^zmFHDIO{3A^}g!qeL4n@FG(#7PmWnggxD0Nq}X>Hs&@+ zFnwUz91rYahw3UEC-&rSkgN!CY}tRCL4s*E+5O_~pU>m;btJhnF4FOC+cL)4b7 zq1m}$@$4z*E5Y1|oq&aPN8DI+&n76iY(ZJP`xa#ob&tDd(wjGzb|81^dWT!=2v>89 z7HS{(-FM#${A21(3=1NBEpOiEge#M?)^!youA)^-xn8e^IhV1CEr4_-+LY$V?0>HRP_jI!J zdwR={lfJf9j(`%%S&KPWi;g8c%kvaPcQ>z-JtW2>^lzUqTg$2ltI%Dtx;KII3sIdJ zDknkPf;ydiZT>tWirmDl@|qLt;(Zo39nm@=X+8}-w6cieDETv3N=I1&OR~a!a^Zb@ z{6UKPAT1!<51C*)li*FJQHLr*ycLhrvrcZk+xddqNC1b1Uou8kh{>j%O(_;3-t)ht z{P$L=Dw&+C&c=^LtrK0srMRtb)rn3b+}z=JTC6&M1gUKu+0pJ#yqdjwI+6S9pSCDO z0u?^HKhq;My2M?(RegBJS;-JLPsFWaWpNJ^vZKV*>L^xg6Rg#@yZ;@$zj7RKn=xiM zMr31u6aKg5i!VJBF_dN zr?QrVB(xsGeQnd<*P7{fWnqxdk&~)rtFP52tR#L;bV(mUE?>hv{;qgQJ zk%3YSvr-k@Yh_+oFm*8mU+U^$+qBU~ncoz2gFg#+56OLh?a(NF;!R_ZZ-?D+Iuw%} zdd)*jGoWZwF(aroHzWPDjHlsSv$cEXJ5U{w0b6h=_`>CnQfgMGuFz1c2<<;%*3Ywo z`#7TcNGLvOa|m7Rdb1Zj+l^qAfjDA@B6H;zuY%*C2@psEP#pm z#7V(pcuH(*H-exU_u_pdX+iEYVx={23Ylro!aF9L9BjZI2S@6IRa3}L=52wnU@pFo z#bDO}QPTdtYTu8>Vs7L34O^j{urCc-efV^93hq2!OGN?Ko8VMYrC=WVCX6<$c7hJY z*5}`34SaE0zxIkIcF5(S!KtJ0aMx2;N>AL@VMH^dAe5DI9y!Wg>rk4GKht!MAT+=D2|1FcNt!Ckpr631qatwkt9fVBp+*z&munWyZ_ zO^}o8clU#f1Ml}^R=|N1dN)!sNZT7OkIECUOvuH-d}7 zH6sKqIsdXVHYDG8Xk)%Un~>saLYHGRngENkWe~}r?WDhGsreQC+wN|wDge=TV8U!B zH4{i#JH^lG=`y31LQC#mwdC}I{Y1ffV?RjPOPacfJpIu#h%8&;WflTRM`0CXrKGZV zD96q^`|4PJv;bl6$rOMbZtSXhRL6n-RC@`8a3-;mGhErt;9AhW zXgTPHJK64SJo@QK$SdI$QPMp--BCGs8R*1ygKH-Gm3+FA=7jn*&Cn9_01S_R6T*V6 zhjyP#omDMJc;?cuJr5&`g;Zv#F)d&#ti(m34Tkn`#OC^Iv&XL1LaTcvBQEVFxk2;% z)NfgT@k-N$Yd!HJtJ3DS$xUN}Y(^;=R{ zf)lHwZK3j8SM0;aRqJZ5yKQmmGh%X*O6lZoyHE0jCUy~*8h&V^e|is?+z)RI z{_`~*rGIq`kvq3uj;gR+NJ)|FpDa?jAm1b7?CKZLai(E0en}5C4lkK#3!n4FQHT8tvcAzHPa!%BxI;mp1_caKyH4wO8Z@ z*ckr7v%%~VfP%c)z;>GUz1y`Vlr0QCITAORNb7FUg6CDXEG2F?cih8#E0g3q>Oot* zkPKby4$p}>Hmy0txx*hJ2`0ofyHg~_^7Z7Ip;1d1pQahCb*w5mi=(n>mot@HG6(;I ziDiEkoyn+#9*X|x7%kgu3gZ){FuW|E)=m+1iuBqOko-O6rCVR-TXtuEMzEN$YcJFW z&c`_}-;Z3Mknc&?m3PDM%0kIhYO>m+PR^}7p~WRo){m_^cWgHL@BQy~Z5>EG_meX8 z+Y|I!;hw_#J3L+w=X|msL^E1zv_IvfhrqE=Ne9H8nChp+BS+DicPmAAY)w}swDd&G|*Xk{_X$e{h?IDG0qaDqM#^YBkq%wdd3;LbNnZ?@&q2(BCx zdN1Nz-7BOE@ph%Tl{sov&;*|PEAZHp;)MUREPQ`bK2-nv+y7t4fQXsYfddS?)Qyf# zYy3MTp1CzJ;fuM%z)UInajUk)l>NvW4za1j0sR5dFFrC52WoHp7i_TVd^j12x3tw4 zEWN}?dM}`(mOWCu1VtyduHNVA%j7H^Zl~I^`uqsqqr&35I=yjp=kr5+gyw>ZT~2+{(@_tV zYB@q>Nr}Fb`if@O-K7B6#u>L@!gp-4j|oX%e{^j7%MN$8J{5YPMd05pw7njTE$;^b za8ZzEEtDLsk6kpL7FJ3H!K;q(y?o!dVGBlh-Do~cs9^2pwFPj?$?jS52Wv}LYb6s= zqnx%Ekr&c$$nN2OmzY^xB{m($r(ggd zbO!Uex46W)4`b`o$3R<$(I|?vnneEi3dRsGyqyx&d7B>*57t(Kca%7e8W~N19i3V^ zdMH|x9%FJVDN|G>yAs!f8tDnBha4i^Y3T6bHux+mDsjzF=1!$|x2U`K1mEUls>;|d zHtF_FSN3;+D|8$xyh_962+ih|^(Mw*VpNX&l}e}!^VO2th=y_`HD52f#H|9!FQz6{ z8=oBLNK%f`hVa63)TwQ8H*x&-%Jtd?YuZ>+TZ1xrvoizzh2o_&qn7*D?@pB$zAG9C zpEvJP*K&C6_&;>)FY~9r$+FIGD=PviS2|<0t7GHx#GADhuKnRr|JKgFyspAF#md6f zZ&kngPP!;j^#+{vTcIV1h;IwpOw`T#-TDF_yFlw8Qv5`t_Ocr8yK+UR3mVu1Kt@U# z`KmySp8AQ?u%oA5Bo51LMpIxLS#;$2xUh>eJQyro`%trm&pSQck?}JD$LR;PHPpZ- zwH2bIbD(5|YEjQCr;y?y!q8$A4G}j7uK}6#I*m4p5nLp>MIXZ{N|x)Nw&+o?#=a8Q z&CbchDMr~}@rKi?f*wNXH<_n5rPIVrdXA}RUy5*+3NM*@Nn;(DiUEudkZp2$6BahW zUS>VaQ`N^B0~>$n6+}#tJq4}s7ovcVxR*N>Fyq_tRW3K({sg_!h)q`S zZkvAJaoxu9Sz-pa&J4ji%?Mk%XGR<5f6V_1ku`2#1u(S8pdZ-B(lG*G(cAdo ztQqLEy$0L(Zu^~xy}Ov8>=ao_r@~RbiXS`32WEZot^^B^#mF>abbBSIEw97!?&Q0Q z$}IZrTRXP@JJ2aJOqU+t*Q>K_p=K+&)E83=S%K$sUPHj>5OS>~&E_o3%Y108l4%9a z#!&D+M4p*%ZtB-xbR@3uIf#mke4QTX)!w~c9ZAE{SAauDL*+Q5eC&E5A6L9{K+6SY zSi_$g-m%-_SFI_%q>N74%O3;D=tWdd!Q_dZD_`gMip~>hb+6vSX;^*Bgk#o zOb^~F_Sr*Nb*IMEnsp{U%NC^%;7is!rUek(%L*$AkDSasqH;rE zYLjn8^GRoSTE1-xwt0`KYjkVmP?<^tjd@S$&VMI#>bj``)dvW-fyf#s_^PX@O-&ADHs zn1gC63Wk>!LCV_;Br8q7ot{V88x~v3t6$P>6&j4AIJYEePEJynx0m0~{ZJ%Jij1bj z2|%*83B$3o+cG~M)N@<+7>7q67ny4zJ1nAWt2PJWZ~<{rM_GJFmz&Pv0C=!x>Nzje zj2)U1z3EV)=16<-)Qry0t&u1`18t5cxQ(zL-7u2X0wa7))1-8VhE$0vlfv}0iFLb9 z_|-I?1PnU3<4+$#@-luMNH?`5$F31WJd)_kH%Jh&SVZx(AQ4}WVMY2P#b__`a$U_Q za7wB)W)1byHnP-Z1M#TUXnINhO@tPOC`UJ!n~%l# zb~$RiYW9f5FgQhz7$T{S^NsU6@kr3bQ2E9CFAJ~zz$S;^y*eNynpM?wV!I{ehJ=Y( zWK&M%h$wL<6lKsj=Nco|ESaOKu%uVWG5j?sfMZ(FCNcMmYbUGOv??*jt!c?cg|t(# z0h7hf8REM=eTLoH_V^^XCic-9{+hku~iN?ddD~XsJ$6M z8wQs~l-2smS%lTCk{fp6ON%)l6qQc1S&&mxkJ%w8d_yHxJyAZ$1r7(IlShJH&PNoT z|I9jn^$-`{^h8L$lAXujUcjk)L#+T=)u7%iOQp68|7QaQYjmI|R0H*h= z`;V8(}GapmKml%C}Q|JJO&pt1EQ5vYhu)emq zeqoN%bgUw5K4BYd=jk`NZ>8RsyVSUshS*1lCk-THT9GCm*SE)s9W_0_KwA^c2n`lV zd9u^N(=4fS4Pi#obT-3h5Ve!Gt{YtN?0x&BV&!(tIx5HEPWJa)*#V?<58c!F=}8oY zO@Zr?c{7-QD&8VX^l!G>Y}A+xwKaGyq~X7kE(|lDm)}Q+gG*Fd?v2HN z+MRbqjukKd){}DDdF@)O(vO)kge~_Rsyse+y|rJuFx90SoZ`T|$$%K5iE39W5)G<_ zGj4C%=_T=O9F|>y|DuCA+_i2{>t^aHlxd zCi?Z3DUPGD@ykhBT6YXOXZj|_+)$2O_EmK?S}L*=7A|pe?io?Et>0%`pZ%^lbWc}) z&BLU9J#$jiIs2Kb%+So7_YAmmxo%w?n|rP=Sf2Px$dya<+e;2>L!C)77)4VY#5K2p zGBL(ceNWH<0ih&|S3-+WT(D8DLFw*MTnkXK!l_~HxWI#YN$^jM{LgzFA$h>h1nR?; zBW(USrdNOSN5&7VebqKX*Exatd9rcZnnB$ic9Wwd8Hz~-8wuugRWBz*K{}EMtZa?ChKTFLLxMqHLD4(fJ1` zap-;9;^IRFE2@k?T>C$HAWzAZ5?AN%enm)fX6yV^i>q{le-kk|fgS98>T$bcYno;650wqw~ z(olW<0Mg)--tf(rEHT>#Lj0}MK=T)yyhlFGm14nSAwliufABG!=#lPfiPQPjJ)K^j zBR0ekJ9J*#X1Ff!^!*ubA{S;$ng$;#GHLp_CeUH!3X@^w;7O7$TG`R zBu&ENee#D~E-xC!*}fhD)VEHT{`uU5paAD{dpq()#ld_1Pnby$(M=9v<$*+v zgG%W2w$Av6vBDRgE}P`?yF*WrFv$}YktY4;O(Sq=?Gcx2mcM9_3K3F9HFodHq^J@E zslWS>g^^IQvZKKo>dYMhqa(o`D5FNg~ z9C1C(gyUp3A8>k?eiZNU7*YV2N5M*(1m!C|ylb5tFGrldgm6Y+^PGNB=h?@sCJQ@5 z(jwldnJ>Fh-X7O{G58+}q{}Df&a}NOB&5djMWP+#xNut98}-Kin*jHmVS;mCk~F7L zc^-_Ugw+y1AR%ero!HCiJ&%64hkXmNvx~+(oKb=aj-aB! z_LenJ03r{=@^{$AVib30-%Bc7?dKQez8eNLl*-K?wCWhR16wiuJhsOFTt&}Me>0Da z*tnHoMKEUFeYuNDl94jU6`v*s=S{#>J$J+9x3ZbH+d~^^;nDY0pQAe8R!$5-QHfeq z=3g5D7k`>dDek)|EBC73L;64QECf2P^K|lS%(3!hCy^z&&E)XQ1=S1Ycfllw z>P5R==o!xB$wvVzhV?HrH^nWZb4TtA=Nye2!$*R%v!|MFHe^$nO6Wwl>~Jh)YHvc$ zxaWtaNClG@s!@Zs>CmTIvUPDvJdBW|IoJ1@!Pc5PfXHA zzh((*<7u0=!30CNgfOv=k>IxB@*_sZX3??i-_0_Fx`n((H(|Ij=Bw&``3MHtwr zjQD4Hr*v=&c^KSgu(eUCCOE9fDw@VEpLreB7QWNFbt^T*pfoqByneG>G*OtC0I!}7 z@$%&sBt++(W-VxaQ+i;EkH+94N@2b>mUBBTu7wREKU8zV%A)|>*_@fY$~xRyz-BEjkuY&EDS=zgT1%Ij|GGulc?Wpeh|$`SoC>WWWFf<` z(q+d)!Yy5QpkH=0$saM_xT!1NY z=2}dkE8|qzlIA|OoWUy8#`n*&B){Pc2QnEl$Ow~x&=jVBg^%_&k4Jh+{}EnP_jIC?k`KD z_~s)J8NbQEn>i+a3=jBd5V2{ra@9VjH^?a5p%DU!4o}L8!-UzNkG_L@Tyy>Nu>|wU zV{%$mSZDP?KbtacQ#ugUpU+%U3o&;2Bd*KOSwWfj=&?=Nm+wOEO%%+jfd~bQ?4#Oa zKLTzlhmR z{_uV4ZZC*+T|RQ3Ec=h7b|u(4F5Xre3%2(+U`;xfLtQQSd7>JF7%6UD=WMy(eXSCj z9ecT-!dF2HFhq?*JCZD=>yQ5o!Ns6+Z=2la@`)(a7K21VKc}*Zj?oj^OEQnTQ+lERL^6Q zXtfPca77d~?IJ0kQ8b0qRsNW`R}=Lxjt<~p*WqSE`A{n2x0$=If!&l&`l~kIPIdya z+=T^%h3l4H9o;=#>*ghj2q)V2%uiIzC5~BQ;s-0ATfEz-BL!yENRO zY;5uZ{^=?jr=eceR<{;#&AWxukL@@IsGFDRfVw%}&$U*fGWdtZYnEh!qPxdM(BDJA zHqob$l!WRqVTQ}m)gXW1iYLC|{AfYrE3Q|qX5eGKO1~!Ssle$&M4Dl(q?F>mX9>3* zuGut+&+`jo&F3r#bpyQJZ!-`eed*tc3pg;iF9LKKTEs!*UNTo73c5WQNG9a>=|SCf zLINh*rpRDsJD~4=(La#zxLyKuU+Llk4Urp9nV8tthj$~EnF-!bi&20B1csW=V%OSy z;Wk-IXsJh!YS>-$27n3L#XOGH-#a@t4t}+Zw>v6+@G8zDluB|tOXV)UmlU!vP4+~g zQJVcYttZ5NOZSs?qAA>9XM_NJjAVLefkUILB;{kF866~b&&_eCb|aXdytJy$W(>+z z&nITx9+Cvzuf0eIhV|CDSSq|~@u}5>okYaZPDe`p(R0SoRhOj*gyCr>THafu^zQ5A z8mO9M(9?1|=Zu2GQnLHhl?cP1xlGdxSgd(N+PR?Ur^|*W^sqb(2CAei`9FyK%Vh-# z6>$2s>5*h{rP3Z@Fk=Mdzqu`)l4_hMG?{E!T2m4{t*EK%HGS_ib^YLJUIMo#V?-J* zss!5>qKK9pIwgCVq3n+a$?&oXr<(zG?#pd_ZB~s|rN&OJzxo24czgF+U%}o=%q)xI zsKmt{X!+$|?g~%fm2pVx&Ms!&3}o@ zC;W5kxkIE@{M)wy6TUhR`^e$J_p%z@`wq!(4n`yvNc?0?jsGLh1_aW_wIpe7)&Gl6 zWMD>&)WBg>H68JYLip(O*_$A=ayGw7k4}!52-t2?ni$3zU&B0q87RL0)9QcxJEz-r zsrU+}hjg3Dj^#9#i>qdtfUPFQlV?fgVO7!6utv1^@^4lqgAxv)Z>0{qgJA=U4!H3& zg6nJ=qJqfuf^(kYKX{VxBWo5N($J@*%XC0qu-0@)TP7qZApE{XN0Q^xq=VX)5%Y&6 zY!U)Hb(*<1*mscx=!T7B8mXE)Yt>9dM7cz#-&|4-Nqv8Gk5B$byA>m{P*E|hnSlJu zr1HlPS&V$rUDLfx!-Z2U3rC02BY7r=#>L*A0;%+P0SDd)0+@^EKDmOdJorcoo$;c^d1<2@Wrz(@72~!WN3%Y-kG1Y^XK1LmP;(Sw5 zGfUpaYYSSAmDj18U(my}{tk%V?EqE(hJJLDv60i-N7@-?SpnT5KmrqXpXldsj%g)d^glxBV;rb zZrVWHpW^$k_svX3U}>ksztZRalZ$DXv%T%D#+`NIXQLDCl1qm(MV*lRQnb@I;9>Au z#tWuF1em>8-mc{KYfME?fole8iLVc*kR-|_VeO=@W~ zmgwN#VdN%Nz?P~f+8kYJL3o)`=2|dYQ&(H%*zF=tX;R~v`4HXd75v~>+6I3o7!Np! z)_qoHfceNb3`AbTLN)!0jH1H`6@@OQtr}?-K9m$m-LPVF7ScE|haV7uWrA_kELjeetUE!{{Fk=743ss&s^}dQ znMJ^HN8VlbW4og^+b1CLe@+CTT9pYE9eSV?2%)nh7~~+tE^5@G+sp2 z48tg&HkAVG*G<9_KNT(paN9+j4C7UDH?zpm?rp60iQEqJ=GKmVvA2nrBJTQ4w(ATG zIJE;p=R>d4XIdmw18ayvr>MKXZvDhJuAof@=8XlXi`8K*|cBmRS~wB&`BXe}fJ53K zh*@;MHm0Js?@9F34QesyMr#4VigHleC)_2Te<+_am^Q7NK=0mF7!|{YJ-Cl+QH{VS z4#$wPF}ufN706f)>e`~mIP+VJf|NFH<0UCWTwpYS4$wGgm0U)Q>I-|2B&^}P}P~?@q=tl2VMHp zE=ih(AYAKDZfE-)UWLhdtIemsP^ZN?J6oBKyJBoxf!$E^8p5Jol5lPsbgG9%<&<6tHnB}$-LjTxocy9%*uBn zU|d}{f21?qJiug>hkfTr0#__yb3HtYSa;P7(MfAwrz__XZsj&ysPGB{u<-5*xm;UG z>N@Ymq#xloM7g~00P^*ZcK;zK-Y9(<8DTc0)g(1Jzb{UuWC{*a3MTL~^0A z+b~|%lGBhh!SqwfsOBA$ZEiTq)U#Y?DAeXL4GL4y5~Y&LA$9ug#W7f7i6Q<#qKwp{ z39m*8x@~I{a@;?g0cwl<M<3<^M7<2Y7L<@ zjYU0sI7xi(Jh)T%`N&N~5>-ifEy?0Ak1@{P71LLPTOTywi!1dMM~0&yVPijRVR=hR z=^TlBwrTh17^r`QPo%ptz?t>#A|1CZF6#6jkeV<_-{_*78G5b7-C!708ki}5GD!VM zYPm|?8g!ZpdT_(S)zPKGMxac*%t%gkL&ed8D%iO4>;$ZLdU^ZQAp~@IpEb?)1~%XNH@{Od(N8%{M}gLSfVO=jp~x8hAikv8S)w#Md3!dA5-$R%uoP zvWpAz4I4J#s02MSL+oua4NE-#aQj1dqG4}gCRs|J5<7n<@MwWTTJbiq${{+`XijWPP(bkR1B<+;@YQQN6fcuv7y+3Pula}P=v1nAxksf` z+Hv#5$v(f@piVA^iGt_In{lw&tq4UzmAF6M@z{M8lQxJ zEtL^cJ~>42)DPm;mH@cTcX$K=@=>F$s|mS6lW6naLci5E*(Aymf-oK~N^0i5Nj{d) zzSI=|v`;+lsZBlp8?pU34Az25`=so3f)sPS&^zMFg}o49LqcVaG;h- z?|A(%S>#N*L2X=54r`ML>5A7`87}d|jIX0@O+MoC{ItmJ`v+*VIw5&Io}o^LxN4OJ z-D+V;meT7I_a7HL)%P)05iy+FuljLPi-Y#687?H&nxQbOmuD&-ORrmdEn~QExbV?P zmIM?0_dxTP@KsHn;Rd|!QXQIE-*X+GO{;qrP>pgt znoil-%)T3cjobMv`%uAL76kK!ZUv z$8?&D{l-$jouG_%w^LUD(YZ?dESmnSA5+y|tkX;QmkP7be` z@uzVUyCD4Hn6$K=6OnO`o4p5>Nt+~hBDc`vuCv_8?9Evo4{aDmWjKlBwKEI?J?ieH zB>4s-rc!u*Z|hl#)N6M5jD<DtXu4~fOSZ)YPoX&%JnzyaJ;{oQAY;Vs_Y zwL|?u>#n&6H4ZN%6MPr9dHJMs~9E+neCB2ky&=1 z>sdS3uk%Js-KAHj&3`z0?or>zBd7o`o^Dx`C~Nd6Q8z7PQp8@ncs^%@!`Aj5M4r>p z>7-2Iq8bD(LJ4S2Omm4L&#|iTNx&|OsX~`v`?6%bw*?1rq&V8sdc?idT3wU=PJS^ zAk%!1WA#11^y(bL+0r)Ar#&C~&sYN`cW2zMLeD|aLrrQIATy;~%*hV_ZSI#)MiBa{ z$o&jYI#aX%Xh>j83tf`V`1BA^1K+)omcVI}vZ@v=Y0A$!g9lp)lW9#_nylb6D3n?I zm6!et0)BxsMurcCYp0ISL+ecaa11?S^gvK<+It;c?q}i*Z zd7NM%#?n!oY4m&K_~KDs%IGIk@^E3!i%FlUXpV0xx!wn>*t6JGx_MrZTowr(FvCe< zoBjJOV;r8P+G;1$<>Je#=B4uyv*vRotpihxDT=Z}ksy|ZiJFH+ zcWSvt)A#nw+7d?9=NoJ(<$+n0x*n*8M(7L1o;V`GKb@l9%U;g{`~{yJL&Vsiej`iw z-MuLfOvjJ>jJf5b=Dub=$^3jQpFEdzfrG+UOl7Ls0tQW`nQ0?LH^OWLS}d2JV{u{N zK(4i`^7U7{Jr%IX{9|x@?k!>?hLRmVuw9FdTngvI{F<+=Z4C{Nv3gHB4l<7pP;UAUTk5<#6^lC}_ zE+07SPV)udJL_t*?9Q6D3!&UULg)jMYG(I}l4ri(OG&=|@k7Ubo=|znr&#WMPy7WR zJ8BPc@-@%ymA)T=C}@uq5P|*R8-!oKGvM(v!_mAmri|TE>f}$Es}^q2t{SypFJQ#loNs4uH)f|5lzRXuH+)9!S!#tGNz~-ShtU#EB6r z#q91k;eRAgjH({OJUU<ynz- z(=lqbV?;la9`CCv!AjN%R>B~K!SzDi^+HB}7)>SzPoX$rfw+&?_H_fG*Ak;)(u{_s z+uzXN5}skj1vjK4aOZBrPj2}O$lliZE!Imyqw0zj3d!&wepdcp}DNu~U^)YNbG z%e*w(LkEc_{=myp4`~k#cq5baVr+&(LY9WH!-XxhxIzmHdvsxjT~=0heZ_ZX`qhC6 zJDm@H_Xc$#{R@$aZABRGfjjAh0=on?ZBLt&%Now@*>Y?-b$3s$#ji1DiYk(jyiG=W zJS&j86TEWoU4Hl0N6>5jjb~n-Wq@w-kRqd8Ju}SsLRGN-o|+CNvz{kI$u>?w5u5^0 zF%UfTI6}c3zvVMz>Si#+V~4EA_OqA? zm)xB`*m3Z_7H*)Q9*-fy_MFBQYupC9Yk=z`c;v=jz7ZQ_7ZnT&t{Ztb`qcO>F3@N{w~sgzC==?{(GwvXaAp##kIRwm{Q3Kg zP=zn%CUd}dIUR~J7WR!#esEyoXFJcl9LTFgllPBx0w9Azh{njP@uIts=zZ|22B77t ze2pdvy8M@RZpjV-1tSGQf0EA9DJz82F3$`GtzICO3~9>#8G5qm@c6 zr~I`K)09qYpvJ$`3iRZsh_&XNYeG`{yhR|W_n~?3>(V{*&OvKVagA{&24y^n3`L|}L zPQ4}adWo{i%I{P@8rJ&AFZ%i2a(XS|6kQPb@x)w4DO5wNx4jEAr_~T~)xRto@KXyu z42TFI!J$`A%xd?Pga0T-}Y&jjq|ZSeGQZ%)v!}uZ>1gYwL9*70GLog04G+ioU!F8T9aa;Y#-l~ zuoQZ}aDKqJQ8^+3P4c0Xx)s4>S-tXkk(|lN$%e+p6y*==k}iocBq%+ z>4eBDI9Au&At8mG%mU&1BE;E#=`gn#(1$A6%;i_uli-%Q!sDp&*^ney^S3lgm8-Fl zIDGUO##>HKuJ9XnY?BXl?)6daxJ2n!iMr%#x!9LXW#MK)O(&m#Uy<)w9?l5N__W1j zY%k3XL{xrvh7_|0BJ@bfuLUvqxc>hXi+#jMwNp-4!jyJ7qziCtK58)i=uHRe_>kM7zU{u zhD@ca29Pm}AKAjEVUFX2MpcjP`!fUIcA%N|B?(o{`5zlj+pKukxyyW?TyUIL_bL}s zGh&Lc@5hpMKyGS>wmI;+x*u=bFaM%;oevto6q+Rc{p)nM;oDc2>^AY6Wv?mb<}pU9 zI+;>~tV7aBHKz6hUOcOe(9))!J-dTvriLXrRANeFPHoTgA_|scu<(|#ab->CfW4*4 ztThkI3Px*@5FyCc6AY5s>u#Aamx^fQ_dnei*+0ojlr6_yM38l>;>=fel)nTkeZga(aHld%sGw*1N(c=lve8=Ir~H zG^tQ*PxG?Zy^(cmxf1IY!@-Wi-T5WPun)zD#LFI=RsGqA;M+^&@SJj(7w0*kVEw@l zQD-nxn||}B{LYO(c;A2RUDMXw=lIBP{r34i&f<*s;<9%xjTU3&CJI^o1*FD}X{T4( ztQR1|WoP_Pg{(jeG||)!4V9-%x9@~oS);; zO-{Demg^IIX<-Fp_j*9*Y2nc6M%L2wf6NiU7FQBnlIV~kM}t}?XAbXZI~j7qIJUvv z%_5a(Bnv2c&f77iI0q-TYh;YCOIM60Z8Wuw7h9qUdx2Wf?^-K3Fj1UF%GUVbpjv@8 zAE)3HdUlC^+NRntg73~C{SowhE6qR-rp<}zmI2(%F-UPZW=^~MpHPgMm8-~b7-75GOcu0Y7hfTb6*M1H2h?}Zjii6YTR3j~{W6SM z3SEIfhuOezTz)ppc4Og)eR>AMRYMMg;RmYj&ZR=+RL8K8(1hS>MPuu+H;=p{HECf9 z`4gICT>s*2Hyp0G?8&*5qACv1P7`d_4Sjys06>3I+*NjPqi3+62Rwax_Ah1kY?L1XwitCnb1n zb|o!vhv%8izyu6jQ$i@ZVO15kt7T~z_&Z#6?1k}e06Lc@u=4buAw=~{9xnnt74l+R zEOGbR<X9dI(>LVDbmO3;{I`1~HUslLx)Fo*50g@qMAPyPo zyd;P2LNjT6f-Y{aU*>~t*(ZPv6*tJ`TSCV>T!X27^8>(sTu>eTv3{It?(a`QGm3M% z%g%g}{t{u6;_xIn>LhS#PsSY>U#uT=^myc9(h(Q+gos=S@1(SwS5uohd<6zCSt9^l zDe|;Bw5PdlXmp2+Vu{`I(N+*$jtTX((97a@gNK)48t`g@@A*H*pa0qe{&mSc{!ISr zM1CI?B1{u-ZY8D^`4-qYJ1$|;YbD0IVf&}O zA%e?Glm$8(4!Sy)$+-vj5RSaNdImG%gHN!r%4&$*7ZBGcHf{0+;!o?+=Ou{s*x3W! zqrSyNPSR8()u?U@eEsgBbY~M9R5Ct?e^g71_)KW-&8VrB&xP(m~}1qVJS- zWYqbf#?3K7{sL$)SP7aAfzV%Ny$swW_u2MKjTPGZ^wz5uXU&!10o5HZ!`ID^_)Hp~ za>#FYhY&(frbPy`a0eSK_DRJH>Vm6Qz1+^fLO?&YhE8apQC0B%ZX2=PRLfBeA=^|# zwy2Kd@BO&J9arqSf+<$r-Tzw<2+L>o#eW)+Z;iLK&(FADzm2kWJ5j}Y!-g#}W)0`e zO{$n}eHujVFGH%eAPOF6MlchcfkEJ*n%>amH;wG`nW;U z<*L&mI literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0206796795.png b/docs/dli/sqlreference/en-us_image_0206796795.png new file mode 100644 index 0000000000000000000000000000000000000000..9d0e1a889a75897003276a14e77053e2b0ec127c GIT binary patch literal 34286 zcmeFZcT|+i@-93x4l+bR36ccCfFPhEIf^1G5(E?_ifknb3?exVsGuTX1SKny1p{f2 zG>U+N1POvLz`!U7NRpgyy`$TWo_o%B*ZQq*t?#?*{BhQC5B+vmRaaNn^K^Az)7RBv zVdP~*p-?P`4;?T-p|BJb3Ue2?7JkWU(~5)tL%SMiX`r&}_y^!Wur~X2_MuSEBbXMg z=-|H@@P|&hqEM{m$p6q)&S`(4P**M;KCsWo(_*+&@2#Y<|8{qo&Roro7Z;048nW}) zH@@U%Y2WzdilV2$mZ!N4xDoHf%_g?H_O+W;QYuJ7hj42=)=DTgu(`^)ym%SB^W1A& z+u~@hZK4eOf-fdY7h22ZTbFtHJ9oPlIdyt>rDnt%9`CR34-646ac)U>RZSg?o$&4U zj_*&7QpT|fsS{9W4EF!Ee+*$&<%iv=UU9F)pB8_Q8k!x}{Feb6UXr#81}u(;Rk>mG z*1Z2%&p9meFJhhCEJP@h^eO5zMHTeE&tdvg=)?_ZbSx_Sj-k0U!=GAJ;cyfzzADx$ z{K%h!e?6d%+D;hyY}9)6Pb-jiV=(e)@@|{>r1gKANs*n6#6VG0Xpy<|r}^+V2q;~Y z$NLBUX@8nEP#25EV~D!CZnazfRFGdph_Hbq;M%7k_FvU##`*88{oz0V*&%He2@8`= zdd^+`3&SxBGovlE{Dp>CQ7cW&m{ZPFLO}<3GPIzMxOHaNpj=~l1CcFJl zw=~)|_A0&fY1We*l;r+B+X<##)1Sirbb0-2q^MNy<=)A~zckslgBu^e5S_X0PdBE> z2jVDB-S&mTS39;67J5UaoO&nPdL65bSmult`C{rc#II_boJ?9`*_>XrFI2BrvxIFS$1P3Gw+O}8gR$X1utKudN2N);{|EYK4 z*1#eRQF|Zx9GbkeFykDd!unoV+2{M`caHL0^|--Cs@X2?vIx9HHD9pZj?da2wJetk zalKf)L|k9H#-9qy>z;<_B6+oACqX-5fy=6+S>%didBMY!1f47H<(+ zZZ0>~k5Jkn@Zitc!iSL@QscUJe4R69*O2f{an@O)MyM^zm(LI^B!w}#xDMgHp`@Li zak8AGxP{xn7hBB~+s!>bRmhH?W$t;=1l=lbwGpv#IUgqb`S zp)(ke=RK_jz z2G=-8nwM%`zCj;Xh9O>}2t@v2!nPBM`6wqb&yKwWZI5`(x3W`LrEK4_lg#ID6io8C zDEX}==i78%9KaAong#w`dTnNjk*JYuienSXX_l*Lyg#QRYWL%U@OUwog$m|@-BYz` z=Khu)iz<%NkGw|WB+5y5EZ+;ooiNAs%INaB zaZToN*$fy${W9Z#(k8-_j|pNaLud zUnS6H!OZ$7j|v`J<3A^pbXnaFO_qt<0%6D8b-69-kGT?hc8vg`%eKXUJvHaO4axCB z`i{GQwE22m9o1^!9r;iKL$nCN3~&3x2lLmnkOvE%$1ue-bW`D?#XpBaiX%CzV z&c{)_1sHQ<{^-8#h&n22bFO_l4oyB06@B54{3OtV4%T|I4Nto;VaQFi!Re3gPm4qM z!3`P7!dSeixqbE4Kcw^KWFzI>jX!G}ipp+%s9yAE_gRp1@MvU4US}gYx~1><(?I`K z{=YN-|24Ny!TRIpd$z|j$F8Z3T{F6AZ?g<1sbr!~Ne*zr^M1~?7~)RjUWdxQbCu^D zB!+6`=~dtE*&}-S$HGg~qjZ({sCmtFdX&5^?$u1;9?-aMP!}k;^i@j^P)K$CI^Jt0 zjFc`m@4pyS!o43p2nxR>dF4sS62+%`p{qM`Pi0!jT%7{GQvpvA-1dU?DC18&asqb% zH8a*z&g?8}fJI}pn0)8HL}7@W)I{^>4YpzgpOvMV1b&r#c~vyV3zl7NtsL(GTA=M? z!co5OOuMU*h()8xi_hyLb>NpH8*erTq5`?0sZPBLuNJN82p1dal)!phEp?R5o9%dU zA%cU=$)@!f9R?iby#XyJOtglOU^t{0Hed#kYy5Qfz<&6l-}+(S22qG*HZOkcq5{Ye z=8w|31HWJ}#Bh379d43@;GS>$2&6i|Hg`B_;rZ)HbyUGbbxjZMyr>Xi$a7cgMKn3s zI`k0(+gku7(x=!F^b;5kQOb+U=*VXMAOb3z`pT?{4!@OIV^Ezyiu=7Yo8&`@0w7P- zrNR^@N;^+=?q&6y-PfAMv!4P7(A5d z`um7OzFX7gkQw_iU~98&%5H2sbp7B74Qs1c!Hmdqdz!SQ{_4n&)yG69QSA4QGVME4 zUSLOl2~{_Hh@d|HyETm$O?2oO-hA*=luY&dLNtA|U9JRPnrRQ7AUxe$)6 z>xFKf`rTpnJbl8UMN$7{QC5B*{%BO7*Y88hIluZ#93un3`iFwX!hdZ@?hpu zr!Jq-l9<`0v0K#}SA5;Kx|8wts;baO%XOhWzTRFq%Fz=)0+xSZ>qpsrDC54{&h{>X zq0O^%vrmWmgYr){e0-4>XF^VJ7`?rzhO<{fq3>1)BjscG3q_{1d6$-84x)@HgrdK~vj=nwO8RZlFOB!6IYQV*uU*+*W5o8CobEq>g% zW2^JrRmZ6Ff=q3mHSccwRl1qHAeL<-bR;>tS}0{PRNr=4ESd=oR&Z22!-ppKSU+dH zyIKDXDynZFd7qgFnjHJwPrfw!LDiX+w$bc4YtI`FeP10gM3r_~n`{$lt?ksJk+BV= zd%I6((EZ4*pB)vF_=XHmhks7E@odoUq}M|C^Ie<#KkPHPS{byvt5l)OfAQn(T+)r< zM(!;K9@v%5%2g{ApyD!`Y!dw3O`oRq&q`AoU3!Uz3#@7r!)Ch*j63>v4mQd5tFCcs z>&kU3mXXPmxX~C8QHDQLC#yYpvOFI{Ji8_}D~3~4fndt}#%%*h#e4Fr`v_lM!g#l# zr9GP^ifv-tGh9`!JEc(GZ&SzEMwj<6Yu4}h1*{Ad>+p`d%v|4$HVO7O@>(xey)0?Z z|4UbCh6i6ho$|f0ztO@(O(*NQ{ao9Pt?pFEg!px}tfjmMX}hC0+eUhP&LyG+hIle9 z_1QK*(SV(kjrGCkQ#;=@z22RF?TXE}JN~lEgfElaWGk=dOJfzUnxnS7k@aY?6C>~~ z@ANxpVnVLk9UR)4j&gdMFJT^Ax3oUkcB1Wkn;anf-Dc#}XKo_}O%hoFJ35#s-Wx~q zT!PhE&}6bub|^}6dF*cAi~CB}x*O{rmOoe0$nEisEge@{>apK=A^EvNE!xiF#)}YF ztwmp7IdY?dx38S>{Eb{I^N@r&SvMQ9g`=sWO)WB@tG)AJRhwL%8z@L#@o||OO-0O{_G`{w?4~*lwN2Q2y9Wu1iF*(3GdRaSkq|L@R z#B6b^lElG~fG^=?Y^j?4vroHs>@4OWu~?pD9`!96>Q9$6&3CNDcsik?ER@4)8{_D$ zL-L$sPBjIupJpdbv2$kiBc=23oatIlQthv&}T4|g$7G-57@VMF~=xMu8bDf)MO z-lbe4KpJkzHw#@zr&}pRNrv-C-LHHVi<0c$<@)?dNUDS?k$8^4nfpE;sf6v#6dOsWgmZ$7nKanNwUA7A&g8A7iMs9; z^(fW5UKyMEUq5yo=3DW7n*YUwte598E8A^b-yE9WGfsz}qmNNK!a*9Y9zN}XYvw0~ zT|$gI9_!u8e_>$V;pfi0Ga-V9@vVxKY@q>i)DE56)Yi3lpFD|HewCIkj&lb+c_!R) z2ePGJ5_A1c3|kv#82FOp{$BNz8QmB)$*+Zb2|iqJN`Ng}8R;nxNq%aaozZ<8u_WUu)l!5r@wjc(Qd^XV~-QRln)8yoMg~(_Eyb3d_Zc+SbzP zya4%6Z{&nWmlivkcCTcf-JhPta9((9Aiddg6P_iusBd|9n6dcsQg>It%2I!~bm6(1 z5}pa#0XL)Pld5-_@ySVqEtD&@N3O-&Eo^n2%tMon@I=jykFkud-9fx(_52hLPHNuW zJze}$BQM3ZcPDwzoQw*{e|PnXJ}ni)CzI$?msY+_`ljCOk4BBu7B$q?c_kVd zGNB6&qsZhraFuCeh@xsEzS@F`ab1iYq3z?QAwEFbNmKl(a{km6l~apf3H>YUt*h@@ zXLd4tW}E#y!0-3QZI|2ovQzBEnzl3jsr>;v2n$nBUKk(BI~NXic$pwjK* z7Gmk$;({`+9tyEi=~4@m8*e9FqGgmE+u5Y{P=`+>D@S7S1+Ppqh^Uc1_ucoq&b?^u z*ekVEq2{;HJ~nYSn}srM-?0fz9^3f5A)=tuW-D`lsi>V87Pcr$K4js_xVDoE5-ZyW$n$p9 zS}5b@s}cn93?-vr#Dc*{thw{bR1Zxy4>%q_Kb^zzR>v>pq{&^WIi`{3*D_riJOOlJ z)?sR&6zMj{<+@`!3PpS+H(q)SA;764suP zQb*_9i3M`cPFb*WB|@!C!jGrESQxZCUf**QUNNvA^eyGV#wJRQ+e&OH6wMDtW=Z}F-=Dh%m3;q3Zo1z2*EDdJ`#>{dqWeF zHCJXO-}Hn&pEX|$s9EPhdJvBzGd8o4%o)30GEjm&Jl9oCPbwr()p#p92M5-j-94SE zpjf-+`4UiZ@qB|(Yw^z~8WrZrCZ}hIJ9n>3u2+xoju}F>mUVR{Yb5ESKG2s>HQ`HS z7MY2&qp5+-aUMjeEa!Kyo&5xmrwi5Jp~+7uSp^W0Q>p~l@-tD!Ey>m$Cl6GPR(4bg zUl=qkK2zlCTy*t{!=k^ZT*sKyYpV6*T~5XIg%^a4w9hhk^ZJa?Rl%SyQ1SL1h0`es zshU2Fn&kn5d_%!Yo+HDV+Jh$FAyfErsZb@F12TmezG62{QUy^lwx)4*(?nadK*#Qn zRrl56)u=Xx9)~Pp@@jj^>$!DYYYP<=JG100n3(tHS@{?&RXN;5QUuAE=!p6}x!3jS zQ-+J*$ely!g7toPt8TxHEX_h?zf;gkLa^0o>%jGEh?=iVBhY3hcPk$4jyJTb_3Yv5 zcsyKS*8D}j(&(nAaomi3#|fu_8tJ}VrFO-|%!LzfiZ9L3!rX3r*zVL$*S_!CRZn_mFhr9S_9^6G zFYq?@2ovRzYUU;tUnYANraf+3Zte$=ZND+s&*JQlru|bCN~!Es3U8#A4#wl0MbpHq|58dVD#dZy`~*64ww!=A3AHq)6!5* z2@IrT*8$}n-(`{ByNq$|`#dhYyTIr31~w9>T0oi*vORGCjZjm-b!*ZLswtSBSgbcfy|&~dn(sd{TBFoSG;HtDsT~gI+|2` z2AS1Fgauuexwf8xjc5R*M~m{~=ly`+jl;LPZ&yFf2cR-Nlopg2)fkkC zT-P^k3txCt$e#iBI3FaHQRuhjKqNg6+(3tuC475a?lir z)s{VBY1@&ecR{QUcfUs$p(*rwBzjx(7XZoR>*M8s1P>D3bT@pe4SzVj*bjoYi{>i+hg{30V*{aFlc5JhCF{r!7JMQ$Gj~ME~I; z3zxkZ?73C?%UKscV}SVu5+pj-buX>0EKml+W#?QDV6c+V^l$y^Y!4IEZ-K8{u9TYQX#o>g zR-PESLDb|A&BeXFk}hE9L7Bf5H%r)`cw&R71x%OAVm=z_k76J}hoM{6s?(dT`te<0( zj?o$d(~}*Y9DaTMSD1-{@wZQBk;S*f>|=5SAz#cSnyz{y2mb1ZLDh^%B{A*9b@21y zrpqt+5rWM6ZPO7<3s5_(u{^+xfQ%W4ss*2qSz(63x*2hXM#@iNkfk17CI+J&;Lb{I z;?E-zdH`H5z ziOHe4FWX5;a4+wz8q{D@}52A>9+JNITURr->4K5dK z*x~uv*jECyn+}$f5N?3AnH_0&Km|ECNZbOdZm1Co6KaTrpC8+4m?MN(o=mPCOW--M z6MY>ss}ACjE#Q37rx@*Uf$s2gOWh8!cw;bg-LsUf*l3_{z6gA~M}usB;91cotai+S zve3})sJe;QF=$wMGldfy4Xe?dzs&JR)@=diktx{HD1?~;^SnQSb%F&xwa&&wJrU+| zwSUVL?raprxIwGGm$&hZe#$g98jQ7SbvGEHWMb}IWXw%rMIP5kaksZv`Wucf76 zVO3?W6>7>$!j+XLHjT#e_Ck>gAAKCN4jo=?v1i>%CBI`>FTX97AKZJdxcp943Gi$9+HQcFNoutWSc-BknffTD+{))W5!1am zbg8Z)5<~1R@)+$ZlT$6&tvaN&=~smE%pv#efG1T!vOlQ-xd`gzyD0y|+)Fe+oGhGY|BDED~cK<8)E@ zWFDqvs!DEk`pbnyS7(SLhF{nt+a^(R<&{}-m3nN z32W5@UJ75`KstM91dC5LV;tNEfh!a~3q2r9%Z`&Eh-tl3l0;*Y8SnCvI-lTruQO0o zmnNHHW`L0H8!qTBEeu#$t_6NmmM5&jecdKy;!-D4$55~W+_p1PT*Q2={UsczCtG9; zLPa9&2u_y~*(yaLdow=Ynzj**p=Y?ei6p_$%esyte86X-R?c~de$J&`xi1VdEVM}CMV zb6fh3@5F0+%&*_F-0ak3$&jb*?lSLJVUsD+{I3RJY#G-xTZ2?!?l4JsLywWU}9}4sG_KHM1Jd z{Z2!|qt|k;9cq$n^Qd2F+3O`l@#FQ=yVQ}#s#l|yoss0dq^n|aD>P!vhSTJmM46#3TT5q^CDLI~vYPNh!76Oa~PBsm;!@5j3Oj(o;vt)xV>iEZ|{>8bMtQ0oX;t z1WM_3NER+g1QmEBLhl$^aoQL4iB|8y3OR`YcJvuCEqFNd^7!h)rQ15%hfDWkFG+hL6GAwqL*$ya;_S<28>FdsI&zph6`r~ z1Y=NvForlOkp_Nt|2+tde@Lk(m^Pf1r9rbxNb^@pwEV#g5^i(Z7YKJB&H>bJCucDy zJ4wZVarTp`O-Q#rZ})ZYmE|SwQnZYLBAR?FHQE{wVWKx!Ysx$g4f5I|NfuPm@t`q| zxEpJT3a?Dr(PmitaoHwM@xgSB`mWp2%WALl$4w!Rag5D1K~~Sd72yz?e4CPWA0??l zBvK{1f_ZzO)GaHW{k@J#uWn)6^t(IT`&AX4W)ieKJwG#0+7RRIG6nO;R0kvfeg;{D zle0?|va+XmWK~avYi4Bar_AE=#utT!uq(>dY4% zMo99FWS8?_ZL+qgC+Z<>vg@TSGa1-uRV@zn4hUZ=hGg9hhcga=CGJ7;3^Ey3Au^=_ zEHpk_kd7c1BsHJdf%*V>a-f6rz%f7kzv(g*nQj6kxD@X zc4G13dICj=F+fYRdz~qb(C8}=2CZ6Z4qHJ*S{uLkITIa%`crU33m*%l%11vCh7Hm+ z(1c;1K$6Y?K~@n7vg)8EQLIQO85%W&K%)r%k4Hd7TAO+0i99&ZMT(9}ri}j8v>QlS%5L8vcMlYUM#tS_jv$Q?3?#VpHP)eq7~?uH3iGZQnD7mPPJODd@%(a$5cPH)^cUO zb;YU4VlZi}yW|Iolap38b#^Q5Q#P+x+i~&hdJ<>*?hM*aD^=2b6vfRH7@<1IWeVhbN`(htNY%mxgtDBa~f}UFasVA;0HO z+KjyotX=(B8V5!b`eh!0_y8-i5CY28vp0^wP5{dhAszN5;vw`}#jEd!K3;2?wLlC) z1qws(V37#kdqrIweirU$x+jbegKzJlO_Xr4uzFXKmsorxkTV>>O$@mmX&ecLPb_)$;?$<#;IGH_r&DR%O)U&5 zazX;iT#@P|Vst8}+xA@$x1>il*uu9qkAGCU*Zd|U!c^lpfKzceOZdo&*$xq}MPM}a}48-LKoQ04VY#$WSd<|2~C$bJ%70de0q$5mjjU=xMlT;M-!rH8@FLRR#T zI=_VcO`Vae$3v~x3*nJUrPdvXi})ak;hACB3nM#t=6(d85w`cE;4=+p4jJNdg~8Fz zzc=0$h~(+NWK^;jb)yx|rXlr4JemY`m(}wk?)3QhX3P+1b??)E>kBq<6As?hB zpJCvKUG_J1Jl;`HWwiM&fz7Gws*}!`I6td`?d@ik29Z4=6YU7u^8|@4f0Rya3XO)ZvzPk$O17Cx>m3|l&aA8J4m*Wx8A@(|~DVf}jjU1RPSaa87 zrHrnVWL67lF3sOgtKmg<%u@&1h4f=r%?qboV+LE`6nqe@<)A}bhqWNZQ0u1YE$XL5 zR=e3hGXADc_{ltZxVzy2QNlj{L~|So7{xlGUgBn0;?;w(3@yYyn$XKmIF%9AT|F-#WEa_Yejrs2L1tcyv*S7KDqi0|Puq+aDMT0@gi;=n zb}vUCZD*>*6qI$Z)_6G2z$JD+mUcgchV~}v)zm)Evc7~7T8HjF?qorxge4?qWNNy@ zxx4Ef$3r9UH@-Y-q1`Nk{e4YF2U#Z0=ej!iM7`Ex;~5IG*z}vJ;M}I`R7a!F%*fJb z2)HFrd~#h=)ODno&&j5T)?)DvS{e)NZAPc3`28~Op|VLIe8hfSWhpO!J2z^^RM!c+ z^W0^jOGg5Z(&78tiylk#Yt(E#+nk?;a%y^hq6hKqhBTzSXz}d#@qt3mTT+Bf$ANr3 zKRWz^e81{-RQ3lRtsHyqt8;TTxTX_(51^ua65s208KFF~+qn9gpj=@Mb=Z8G{ZQB_ z@B3r!>1P34-w1L?F0urU&>dl>G}0GIGQ$0lWXTVGt>RqI#<`*tBHASL%O<&JxGZ+P zAGdYnLz~UGZXoQTJ7b~iI@u_cU4)XXHFWLIyWjM&IL!4Ona8znY@-hy-Y3_wPMIi6 zD9Y`EKp}IwWPEZRo#yc)XF(0vG;nwMNfO9du95Ly&QJ8k!1bdON9K<{m|gL-;$Ep_ z3et+$q;o>ne^Z%WN2*_Ng5?IK$}^tDI*TjO+;U&irVKZrV;Qc)VM95}$&Rg62PGGu zdS>KWe{&S?8*c75rt(!2$KOX72Anog@~GrjP_0C2mXs1Il@*tni5FuPChVHs?EO14 z9ktI@x+b9I&O3`ql|HkwFWK$y;A9`bn3`H`Vo+t0@7Lb7o?Z-XcCF-*E;0php1mI> zCk`}2%ZtAjj;k0pL|WW`6Et1EPR(~3v)Ny@)IrEU0aMVX%GFrTEm^MVGJiB`GDw8$ zL!R$J9dngJ9Y@;!tH}t{?0ShPi6P?h4A`V)^jP*2?^AS>?yXyI^G2;_vJO zOjc_!N%U(Bz3O>vkN=w>kHyy_62a7`GlmftTVKttxk-nwni`kU;v#X%8KFinI{2e^eA@Whx@SQGi#p9RKublV( z;L>VC)Va5Vpp+f)RN)1x?to_Ep}?(ZvOKM5Xri`RFtU=`Q>ezvQlM@1VQihAc7L6= z>*;ywi8@-j%mZsM#K5{h_5%m;ypAu2YWBiWSeP+}_~_+++A26LIwp0-<-_ZVFHx4u z9_s{E&2l#7)T1%lmIugrOS4jQhTUU*HQcOUKZ~=@Y9>vOPNXjjYKC2O~z&-I3X z@k_y#eyDC-nXBg`p1C3F(yDFMnv8cOr*LqwjDE9F@`z`O=v!-Wtf0_?q(c{Cn(J7r z@a%Dkp=CJx+GCY%IaE}3b1PffL?Lp$)E0)O^Xvy@XaE;`&SO4U3Fi?6~ zIh^-pWhS(KJb|*gAguqG1ijEXr*DSZjQ*}5-UJ>l*OQAyv~n4CF~1^WLE`efspV*N z!K3cmc2IDr@-Ie)`jZ*6v26rXJ{SI??nac;psRhb#8v0mP=aK&fU0E4to(^rAL?N* zJ!%BIDEL{}r2;1mk>gz4!i&TnIS1CVGzwY%e&5sas9iVu3yU-owOwr#5B{9xef?c% z?w>rMx$>V+{DQhwE8#p2F9a&w5qF1pJ(O-h|JYx|v|FL;;D~kF|0+!pz>rlnd%#<_l zyOSIp`Xa$0+qMcwO@%2@oYc5qya|C?9*du~Je+AgtVE?ta%gh&*019WJN?+%Wpo4H zMfg!!FP%J8RW{&yDp=4c&g8xY_KocLLXS;8|I@^^BLwLU>}f|%ixG-AilIWY!ZQ*R z?>f;u=9GUwqH&Sa_4qfw`n}GYC=L&BT8F;6i#A=)bj#;1I-xwARO9CNw;}~4p>@C! z3N9I_N6(-8Dr=_qCZH;RoH-(0-_6ckyL;KrN1LC7#c!Ryu1E_~<49474$63bz2i&& zD2oGcMpL_%*#o>b`LkWviK4EA1e6GFa+wtt%I7PcO*hPrFezG7$Be8?=aXbh>dE_35Fo88U8_xhRUum-Swm=KSIPS-y;zqK)s^A zbGqNR!wfELsLe>==NwYsI6y{rrJg^dHBPwMdmn{-*hRdZ#@=F4gkXB(^rdCIt%{2MM$chX6xF zoxOeu68Qs=n329cV!*~j+9<(;b=0Q8#m)cZlANWaV=5RG5aFL(vg(ytOblqxsoI;d z2^pOO(3^w1-D4~4AoBAs2sXv~&R&;72V%&SKlB6YiN>|XYQp7~3J373<|SfFoWF!B)edK0D1$hNB?xjRh<(V(tNCU;^m^ z475LVv(Dy*>tKMn(@yI!S0EoX;YrN9L^=#}HG4JF8!?bRl2z}yjtcCB>|C>sTTGL} zyIRQKFpFu~^fM?z;ajE7KQQPwbyh=+U-0;+i~=%u72(VTi))6emw#YTi*i<)QXo<* zA-*QpolPDFxiN`|NJIL+WUQfg^|OO~Zi8yr}NtybqBA%9ax z=}b%SeJqX)6Z#7t|Aaxjo3dnRl@hW@na(d5B+EC!c@(+S^N)<-yhj9RFvuRz=r0)b zYhJGN*&V-10^WQzW?3cUFLeq&c&4AkXn{ZfrjEdKdlz^S0=akdkE{I@GMk5Wn1=ok z?vvMq*^V*r1gKr2d$_&x*=iWX67eybRssil9jR;#m0#$WI_)+OQpZWabkH3a=8Kh#pIB9Ph(F*SPsMXF#o-0Blole*As1l2$v zalT{ag3Dvh?>~GLtqYW`gV+#l)u3uar~7b(eq}adWfrLhAt^koQed;L+qeIMG@9&D zIuWHih=+kxh{*3jsRSPtKQpMh*dBq@Pouo%|r-0RsG4aJ1^LA!qZT z@M@!yJe2VOlVK`*w1ZYiM9ZzA6>^&|tp5^9S6G8~RgcKKoyiS3heDPw65%+Y!48$@ zj{&^KCDDh7z==Y^UpMn5e*T@-5DB*}o^bT~cr%oe!J5x>d7*FfqXSxlBe3z@Yuunc zxS7zQ;Ek8ZfD>!4wd+6#w7*9p3E-y^)?noAaA=GYtogwN#oS+i1XZb6GTI|{Xw(8SUwHGL-{;#ik5XrLyUkv|tRUVlbIBxi#i1FWv`O{VZ|2Z*l@x*$y zmBnrGK$KQ`2b4~h~@_Z;f@?fr-mLgosTl(lgC6wmd= z$fZh;7LtacWWm43=EDnIhU(&2c|R{tkLlk%6;4Y!)+4nG@EA%?a?z({b{@}hY2fO3Q*WIBm;9_cjC`W< zzclHom9Pj4DtkV=Xxw25%)6=DUmd?aRsl>yJuK(HJ2ae18`dpwjVv5-=Zg}26@wbN zDm@E#zb%><<_1ma*KhL|}UKfIGrLnS`q zfFgsOplSp*{0ue-hd5G!8kt@j^9Z@7_q6xYEdMiNli2I5$^2T+(B5Wh=F`%bNNGoo zV^fEMAl#FD-8U}VASzew4bQ2#E>zTydM{9kx-RcUtpQ4OYkCcYayG71G2Cz8pcbl? zVK~2Hl^m5T(SA3a=WI&Bl@#ii`SW?{N|efB9mlKQ?+0RxS|92Sj((P&Jo+V-8V)z+ ztj*>NBBe@CvS&zq(uDl&d^RnA`zbUBq{K>sq{N(&tABVxl{A8uKz1o|C zUwvs1sp`)}u885(o1R-IO!`I#skO?bm%iJ!tkxnk_hz)RTQ+8J&!SmL&-B2_{^hJM zp;D2L?ZoprNt`NCnf$Bv03`R6q{n}ub19+6?>KNBbO&D$r}rwm`%2p|1_z_{=f7XR7z>Zr$X6C7Iu7RDt$YWMU2Kg|L{ol9!TfE z06O1iRu5GY``ORcmS~I9?K8A8zrM!+cXv5pBT1)1z;JT|@y58nArOe11`>s9oOC)2 zI_v3f7nYU)9G3I-`5IfFumKL(854|t(DZ*RWeELGp~)^t|Kvi#pxMq_LC@2bJ_V!A zE_)9QKa2|B%^E04=k5|)7}yN=vUEBm=}!94{FRPH9H)l`NSd7@wxQl(WnYB{RZ9&= z4VL<2H#`rMvbrxml{vgjS+`8-j$d1_@$| zeDPnG@!BJV4mnS4w_&(Yvca8Y)LVOBU12?U%>2>9WVG4KP|0wgk*sV}U%2X%Z_fB^ zniVc$^p~F9>-X-!`8%7U2Fv76yeKz=7@BQotguS8NhozPHjp9KY0 z#fEsE;X{5K90jJo?23g_vSDgjQ$a_Hlq;@aydx|rf!aU*w95W}uW}4GNpoy^-|p>) zM$u*maO}m8N#a*b$48A&(#+n$9hCuEqSDKeHJ~`-T0h5?G-INc{^z;aXQRlTRRpB0R;TzFTnW+4*qR+j%7Y(ksz*^p6T&3sELE@BNlLm?8 zFROC`fErmwMDQm0@FHnaIqwXZg;;Cy9&s)X14qf3_* z9Tv(NDfY2Dn^(Qd6=(pNck<+$+i=Y{UyqN!$GBUzXp3(s$XXk(fFW+ZlOM+UBKoq`lU`dAuw;5p!*{!MaEJhz{f zMMOX2KW4<)%{MekaC_l;$G^!F%yKq|1hXU0kNod40`cVSkNFT(k(iC@;Qz{&BRmx9N39}t*5Rf4aH99CsH^ zu`>e(8!)Rz3f}-1Azh}2B7iuQNz#|X)WXX8xb71i`u?3=sS^;VaRoB&f95p5px7#D z{ynh#N190H#(bn6@^2@FjCD|r9^S|a=I1?#(3}4usWnUIx#G=k3zfMA{%`YX_p$!Y zei5fx^FMZ)dPtP)l|@!d6L#l{BCV|Ahrs$k=w;uHKWPBPvHwW(Z-9)0?B|AEP@ViA z@$!*c(dJO(VI1Pr{(Rcyo4<_%^E(a@;s3yCbe}eKd-1;Z>+R-SE5~d`ACs_VbSF>J z9;XEr+#7GDkDXU6!vCpqg5_PVs3jl{+}~L~6Y(LhmaXa(sBVy~A&L@QuW!Ic{+r$Zg=ln8Q52xDkpWWa+veFgNE#?6gy_PGZl06sOcw`DIJv|WpaFLzQOq3V*2pyWrgX6gwW zvRc2wr8(m_1_!sA0B3xb@35(&MD7*!)}M{DhceS*E14)S%<36wr%tR8y}mrLX@1NO zt0Vtb?k#BzX@rzG%fn5FmxTA=u=rEiHdm0PpdJF92uJP_`+(GJbsTDMw0kIleF@ws zt+BbK=9j$|sHiXACup16MM!qUZB&xBg~2}YBJ6QkEPM1m2oOIkz2gr{!*2Ne?y+l; zKTH>?%jCGZA@677p?NW})N#CJ0vq5X5v01C}F@-Sh-{+ofC)P6Ia=FYQ5kGB3WU2>C#DefYmHCp4BrPr(NivO6GI`4xzyg-S;yznWA z?1j7jAgRkSRKI~390mMtx-&bH)=L1X3#OBZ<7(GPbokl0b?Ug0E|u>0dAaaW+P?AX zk9m2^^AK)Z{qR!xCm;R3~U7h|0dGe{tccM0ie%+?KhUwMz!pQ15 z@o{3NySF29j`Z)o47C>@ez~t8tS*Pw#kNu2A#%4Qr_q~yk zTTUWzYPuI?u3mze9W^qtxjYvdJ1I3bJ5J58*!a|WAm4|P5^w&h%wztk z$49ezy_rw>Nq0+6Qp`}?Qs=7zP1@c0Nfq)NIx5TH&8*k?=|G)n6A!TMAQYXsoaWj( zqu1hZeYS-TuPv4PVcJC{p~!Tm`cYB4ITV;P<|75>xQkNyCeYcT<-tiaejs(SEf*@7 zG(5`8M@0!fvf2J@XaL@xXW2abY(zI_k_w>=L8u=?e8SoO@EN~HkJtw>>3X3h>w=0%&9 z0&dPNH2vg_%Fr*HEN~Xv-MpOPc^{;J%Y&yVP$JmseUqB1@+>`Kp%7)y_X=b1gO>vVMvhIS;SYLfw``31NG_p2xz%PU6&k4AIF_&R!mwL9-oy zS9WDt9;%2pEvX%RY0^?yfEqaiB?ebBC$J~=hj5Mt4V>sxFLT7MoTaW~*4yV*OgQSH zTT!-SOmXH4{Q#W}cAv|QNu9@rugqWa$@)jZ-K>P$C#03rf63*d6T1Q zgG{|_9K3s^x^bGi>+R8mTihPY$?GnyOTb@WQS;EEL@P!SBWPo9NA9%A7dCDD%y+ES z8r~-JojU75%L+~;rl3aZgy5co|K^rz8<{Nbw?**Uh&xiVRnxJXE;3RhHNC(;6(x>| z^L$)z@u1LN&X12#wzFq(16TT*3mj7l&V-Gg)BbX_L|&(?=pwyY^9-4|hm-aYODYLo zO$utpHC3W*`K4ahh*~cyC7@e}Upg#uHFJ*~_4D@l+-zz{hj)ss)o3Y2IyS8AC{GF@d*Z~<3f9OY5DTYp}HoVTW|YO=lwcXorOiR zjd2d;_?_k*hOhJ~DMh1=9fF28FOJllObNeL%`=*_1$hh2K`#XgUW^*NMu^~Q#!^!w zs&a&wmm*++X^lso$nn0%KzDS5-?r0kVD zuOCf*IvABb?1J07*n8P^))kYU>_Hm-ke|1`i9F3Z%6emkb-@*itK0@82n$JS^GO?- zmhZWYC##t%NFKz`EF*UULi74xEcr`NtmU$m_|B9pWjXr|)iagEue?jQ2nhIABd)cb zhIN`SE0dE{+_PRfXNGzisgKqPW!~^+=n7PWb_NYLhEBRAPZzb0t}KtPe7~r>9HNn( zF7k9}*GV58t&$kkYagLpYUHT4K2>O_F;4~FL!S2V zx?*|7qJE{%=;=xolN#cRLi&&3@uedbmdCZdcADSq>RNL5F8wBvHpJ}}W%2l_RAIeJ z3Ek8u{Ca9lql04S64#ZzewZv4%E)|Qsl)z^ylNgfZa)^439zSn+8K^{8t-d*&W6<*QHpZ?2)mlPJUA)!ZZimOAx(ZaNU{mg_! zT76{&c;sy8MlAk;t`o$!n;2rRMSVIO;(SKJ`e0Ixv2BDRCw5Ms)Ox>{L)lS75si8b z7@{UP^4LdB6u0pka}WPyDdj^Rx?yFUZoZloQPQ!L_=Wu1WHdQhk)Ju<9n@_w|M}e=8}6fv9*nk)1@S3Ea77gN zIyeG1)M$QUz|@dB1Gh>zf-A;D+^%zH4QwogDkS5Mk2emv0)@9Rx=T1F{2uvHY)?ms zA*=!iohAEERRZ`NU`)7y7u^&8bzmJs>71WR23#uRz6Q7D0MF0S9a%?_M5LNxN=sKd zIWq5?uB>HW#(^fs!Hb5JtQeGJb}(^buSOouR`~8U8^)KFQf$;K*7N4=vUCc^Dm@o~ zDr>e|42w_K9q3j-lXn?~4mSzp_YdY8cSMG(gKVPGbW!F0{L^Q>#a$Gao20gSx^sV) zC&GhyjRkOxk_UM^37)yu8@o6hwsP~)9=2z>TO<$axt|p2|hRgUD%{SLSD&DAPP}HajFATy%PleU3`5tX@{UaB&Et}ao z^8L3%k?bDi@goLGq**R^{?wc^68`5sZ>7OyI(WIk60#mA7sR{#H*;=%Ty)%*l{^)X zAwpgu^U4$+g`Bd9TaUX7;KfH0d6x+0A+Bp6N2Y!$P^8H!TIwJwYK86rJd6YAYaQ*$ z^wt*(T+KtRJ}z?aXI-ynOXZcV$Kva=ds|v3n%2)NdF9?$PeOnP^^uen@%#KCkGOea z*pLwH0hU>9v?@|>Zln&aQxH6j%dotN;=60y4IZ9oi5mB;8d7}Sg^g!?V`@W{{urs8 z+7-!k5$@^Gu%l@JqiRu8cc-(lkZR}g-%(SMzd)lA!}#1$7w%Z@grOq%v*Q-?ZieYC zyVwpTxOgc_JC}`}sgqVMsuM~0*bM544`8e9xNss!*38w=pK%9U83$`Ow2%4ax%=FdKXV*F6xN zjX=KEw^RB)(tjku?Z*2CEfF@NQBr-NHRV20|!=Azd|}vj|17 zy8RxhBOGu645r3y54%tU0$cxAd)FQhb=Jn2VmDUNx|FDvZPR6VOSxuu@{%?Ym9n|y zvLfr+q_LCBOl>N1Y1gG(mIzt7Z5U$kuEnmlP#A?VXpzaSWk@pnoZoMlsrUVS-oM_z zUj3Chzu)gX=bY!9^PJ~-zTbn~FW)?;-eInN@31!tv~w6PFX zgxocGhHqcSLE_^(NAd@i8rCY;@6>yafs%p?t<{J;1$-)H zRe(EM-i|6lexrcwROU|PL=yGE+*H~Wu8w&q3~mAf8#y+R9J|jLwX#2Y&z-@uiJ4V` zXgVCo6j_X|CbSw9uSZJ77@fR#{1o~_#B-$iSGtKm=2tnq8Imk9cMOtaU;gE1NuRvy zziO+JI?PA;g5baEn>}_3(7LE1q*U!GA-qON@ey&4LEk#YYD8iBI{fvLaubkSZ%Mh| zQ|?5d!4ID9PO^P|ey83ml^OPQ)ifi!=+k^+G^jlKNnc;aDY^ti1B`a-aP} zA!8KB(6>`3*Xvj7mrmIvFDqhK0U-8}6jQB7uZ{b_n45J1+jGlFkXDzo!S$EOI_Bn! z8BO$P%fa3UDbYmzM1DT^Y2bj0$FNEuv7x5ff9;NfU+2zDstnG|ZUmlwc`C7<3lrs5 zN(TDWkOSx9$2GNVx#HBt=z$o07}nc1$}LH>>p8%~6~Vb~s9rq8Km`ZuRBPuml#bU3 zQzkCp#;{0?4naZ5F6dzqUJJpiFs;C;Xh(B9f-K^W;}CQxU}T8osT2Z=)o0!DOblo$ z2~|s)m@d={0{EdcmT^rhWI4*}!3-t)h?F*80C({v$5u%w-G%Uo#jd+?-a1SgcUZR` z8Q7CyGq?Hyp3DNqP4pdsY>!}_N?wQlwt{$5en-Cb4v|ttSX&jHaJmZ~1O-R9BObzc zKlD-uWM;ih4*Dr(FDzo>)^Z}nDgb;ntdu^AMInS0;r;nxG4#ndx0FigFa(h>laxnn zAF2t;zzGhUX#TyGEQIiE1!rz<4x=?rWU$D9OdSNTIHYkSg1FLej$RWfMZ5tLwm4fx zFbwM(sv!o$6tub%npNs=gWZevHFAHGjv`*Gg-*E)%k|1Jf&%Iyy09X_%S3qj#E_{s zlwvl|KEq@87TyGR4*)&6cWZZ}Oyb?px1Phq0FfXS3O+n4--~*r4h029lob%9Qc$@Q z9rQpv-U1n3h=8xFiXDZe`{JV2=s>f)?c+cN#R+Rc2iM9fV4o5cqtukz3X%kpMx-LV zlq3zMid5@$P}+P4I=3aNZM(3|DIlu>*Y6?7O0s|_6RGA7NDP5&Lakd;?}biTQtu`8 zMssUv^lA1-P%IOCK+H25(b%+kE`Ep8Zi z4=k!}o?m9{6c#sv(e|x4jyFI@(ng1#J3#a$cz~V`Mg-UK>{Hxc1bgJfV;udA)lWZz zT25MMIic(fOV?9qk1RDC8AnT_jVaUrM#()KIq1gKtAlW#>-^n3jCS#>Im3`3-cxt2 zx$SW+d*UTQJ<||KNOS@c6nFlFX1}9^+8#rTI<4ivlHf0f*gd)>$^DS+$GT?*gl7nm zDV$pnr=1>ey{6l~>rd>qx0&%?Ij$^_>A;8L2uH-bP?UUG`3z-7PpP=BwAOIw*s(ev z)cSt_wHjj}E<6~U!jQ@or&!a?Ug1yGTnT|L?PCXZAp3^ojJ{F6_5PE*!be9`3Cr&T z6!6;>3v&~ACyD53hkWtfz^0~6K({wpRJ#3z$S2VSO1^N5Q&9WX5T(vvODC=|LP8*o zO3P~l<#yfJ;jbpX^+FdH1zgk6;zhp7aC~vED+_$wp@GSh{4Ic6km>o+V4m+>H|>%1 zLkwuG5v&F6%%Pu^u(d$Lh?b-Zp=Iic+0)y^34_b~@@{NtJwR`JVi@6jv0y@-P(1mj zmj|~HXzjW3vuo#v-1NI}-P4XfO21jGwoSGkFS&^pK_j;L+p^9klPUEhEjsRqjE)M9 zLE-Y#x*FX)1Gq*azc@jRUPS70iznnc%PLanbt&$0jxunEi@434PvB+yE~A7C`M~sH zWOz`DbC?rHxPO*nC-ppx(iA*Vr_?ko|7TVMT!PUvV`p_az#?^p?p%FqFAw^6y*C-t z2(x!IqC~>a9RMKq(dH!Y&wzt6#yES1GPi%P?W|(U3_qvFOC`osFJV3O)Y~lf<3!`C z-W2i+%kl}`#CVHMjxm6^dt5L{Wmbi%^7ftlLp>P_e zt&m+Kb-SWRjh09?@rVyeV7Kclw970j=5{;IbhLd|6gW%B1_Za(hz ztYut^TH653m_C8WyZln+gEA|+!zk$~-1M{8l2IU1CmUfns0vraM&TY4-T5P*6sXo0 zIM2V%aXd-AilZA2$OmQ={!X^+&?$M?53xtPP)z{SZn*>K^Sd&fuz#`O&KwC(U@9>0 zY?vthh*Rp7qz&LapzE7^>HbB<>jB;BuRO(+=v@_?s)h~=NE{Mt9*d1mpq zu-wvs9WVv}e$(4lP-@CQL*17`>h;}A94U#M#rGT+31G*7z+CHU@vd{y#7!S>w7Luo z9kht1^AUbq*~*D*O@q18R{w1csZHlzL;)?O)nHc~<4?GVwY${dkyHuHR}x}~FDl3o z_yo4EhHHG8vJmEYrTJ6rJ{pNhcwuLiUFAQCvRFp7`@&M2co5}MQhqzslHuO-ePT#C z#7?+&R{iFmSh~)PXb-xh$Tzh~em4YR5eT>|ou#9L<0tE+lBMww+LW{g;B1@YNm5VM znw3M!jpz0-iR8fJwo(m!lp+f%TD2I?-DSxTochlBh@0t5d`Q(UT7;k9nhp!*)Vb)= zk^r8E3=Gu(OwaIO)l3(pVI-&PHF~s)H6Z^0F&?N_f$>nzGSg|B#E}>L{*~nu>Jm2n z-u?Ur-(GuQWta$~I~rJi=OZ^qe7!pCD62!a@ylp(YB0mBb5<|6_)0b3H4W z0fCh6!avG#i6I;KQ5&osW0Qts8?S`AruKF^jcwgrY7ubo&96;a>~l%KROfXL%z=-C zbO^Remy>2WK!Xddvi-DBFYx1aQ2i; z8ytv0-Ab!b12tJ?{<$|=;;}dM5Rci=rtnDY4bpk2PCdaT3fLiW5t=W8ea2*2pD@9G zQG{Wl+P*rij+eq0ufrRrTGn0%vGQlI{Hd19lf4#OxE>6y5oXk5HSCu{AdX|zet&4) zqJUAb3=pYTUGR}<{t+C1zh&M)l&!c3lgMc{77(k5*^hKnsBV3H(6jvu0{Yzs4^oS{ z{5qV!jR)14Y1XMM;(gLSfF_?W$SA&owP-tXY8F0!vTsp9dR>h^>ID_>gW655uB*q&(~MxMTsPm*un09C0yiOx&D9{{ zwsHgrXte!xF~;7E28FX9O|AqD!Q>i&9*$a4f&Xt6s6>~M%~rr6@MQH=yIuA8zaJle z3tbt+heKd=^^{%pRA}+Er!hXq#pvaPf)t&g1`MD7&au+1W+LK4xx%jlU@bsp-DpT(mRLzZ>B@l;*m&MoR&SFT*SqogRS zb>#{H_{tT0iJRAfPhk3@O~8+0J0Imt06%Aak zTp@1z^Y3b>Q$FI#m5W>@S!r!AlZ`V6Btp+;Il{v77tBJ}@?Q6+hJC!KDo*{z@59LP zR}=73{xMsjF`-!@L}@MgBghn+yGuglM=#e+^pKa3>22?F;kLnx^phFit*G$6@UC#AuhS{H zXm}s;sO;?<3hHN`GncQ66d+el=F z`f_q))lRB^z=X*oRMg^5aUuW7Xh)B2FF?~NN7j1ue$1jG(d8%FWWh8R?^~i zCJxJ&`h&$=x`$5pOV~H><oMD2TmoU3}*$l5k7ib6js+`24w9L(}P#@(m9XJrY&> z+b`jzVUv@)HzF6LFR@Cn9HfkEy+Io>vp#2#Z01qmTj#xx6B+E-ySgfRy_6wdr6QI6k;^mr#QWsx@^lU$ZdzN@~|+eAiJbeYiox!EUn}p{1VV zeTT<8J&&gO{bi{mTOUmqEqh$Vdhxtt#t4+QcyGyHhNEtoP~C>}=~Q_lYFC)$c+V<#?BA(ZHPo zRfM0Zc!v(&=XUhg^TA@U?^53ID|v_pZ`U;ieuhgtR*Hr&K@L6G&}BXDvCN#ZIqS3i zGQu2`_~7h#nqf7N|9-6U?R1q>zrDp;UdIiIBe%O?y*UzbHS4Aq>uvOcTk~rrHq(L- zI1L?F5B_BVK^YMgfxIOJkqpk*B&zRX>+R??IX(AR*2`*fZIO6ZUQS9)MoUyKR8eQS zV6J+|lR54GTf`XAWQkW&{NT_;RQv*ov)lMyV^eE+d-_v|P!74|e-m=n0^TRF`rw!}rsv6Z~ zKLH3-c(t3M^m5_gU{W1J&1hy1hm9=>`-?%mmL`AAui0@hsIu2+(!7F+kEg}+G$qq!-enQIh@ywao8q2HRC;qMR5EoxTM-YvO$K&6JdJg^_|Z%h(|i!{yPJ&&-D{b8SQZgUe($kZ1!45MH#Ab5&@wWe zXTGWVxy%N-I+;YIL&?pCIhJD`ogEPo=L( zMKuu#ayhc{hYTxZu_Q0^z24@1mQyg@mpVFU&+~?O)qEY4fkFWXkiH&-ASK>|%#bf}LP8`q`hfD{wor!KE)F0jKR z=TcA!6|R8vEi+uio~9+iAyrrfJzo~`)x*Xh)Vy0UC;0Lu&|H;87mTw(FxoCJ_%F{x z@}yFTbXy??sQNutP{`$C@c)R>rTD*M{`WL!{eR%z=qtv$SlLxh7#OZ`bC(>{)Oc>V z8cK#Ezd^d)FAu1+CuSdH(HWbYbCwDV*{rhYB|)7Rm*)%TB>aF`#{(6{Ip+_u@>Ug< z&10~RIG1*oug@<}kdcZ%EI6x*_s=_SnlnNer*lC&;17M5-^)ts96MY~K*uV~6Wui_ z5EzlO7Wm1Z@~)r_3ca#nqdm$zPNpa1P<~>57f%N1`hm^J0e6le7Ml zZ|3dS5alxoeV_xW{V2!>N*@V}K39)2(5?pu%76 zh8z#kk>;=U>1*$8ZVnk>rXbfDC=B<;EzL$XZ?zuhwwaJp=LEfB#-vv1I^tcL3e&%Q zO1!A$dJL=Ft7)%y`L?^ed*SX5$4KsB2hefD_#J(={QC51b)e!qoRFMi?i^k@5FN(D z<}jB{rLQnc4#dl>B~K(rh-|K=soi$ejuDFNS0ju%QD_z-j7o|jJQFx@N;a7lJ#<4f zoR$I=k!i&044ns12b$Ga9-Q>{X3WD3P0o!^&OI$>q9v+-403jaU6u%|ncfn;z})gt z6Fo1MMVs+cg?xUceKRPOIY+jgjY(Hde!9V7;X{pca%kJG3puexD;vYk+PYI_(Ho?m zqkrALXC`p8&9!Ej^^2;-4!>#-)QUeo&#U%(({=Z<13=#yZ_sjK*mP?7dDMskMTB8* z?{;x^LGjc;;KjzB`FBs;Y%0wXVmjt}f(>r_j9TLmO@3H-rSR#O8i8avw2UGb zP?6<7`|kVm_o-Pd|MlqDpnMDypF{w5%TWiBaFn<0o^_U!BNP$vtpXv7_}FqedJ~P7 zdL}0y5oy|YixOWF_elJS3fc(4)sCtwF~Gx6SUTw610Sj}Rh>HMaDy$#^7oGVYdO9? zXV6k^8ueJoBiTxyaQS{cx~nVX&gEMS!c(IFKXVxd4@e`BD5}N7GGNzsdK9L~?-W6D zA|uYGbLUZBn~N#0<&sT$PN8#rc5)OPyeZY@FYND%lp~6J8RaA{%kp0q$gJOVi7FZu zhjy{8E6wvXS2c{qJGE<@RjD2zBI@A5M#2u$u33v1k?om4A(gT12fmwUC%bl5GVr^M zM4Sr7k{<^woqT=sIz3i9e8buKFLUY?k>G|%WM@QVH5vFsuEpB8u6Zs3?|o#@yP}Ty zdoE?96qOhOmtB>>(~uj=6NJmF$FSzHQ%^4N@)^BL;AJIDSr`5q2lPBirw61^oFXvC z2TW^kO z;7PV@J8uAHP?b*{+WLEi!n9(Zl?>c+s^Ced9k&wRMa3m<`XKMQ^W_!amnonaqkUJ& z1%?kLtAsU=^@?me8m&HXU_ZuY5?FX|bZpNrQz9K>R{}{Y5TiTF1^YPzjXu~xI=_Vu z&j8ic!dK{t>`=HZnEj{La*xl*Zuv!vjF^=r4T%<<$fXr!AvH05?+2@3eX32i3*QabXikI zt%ua8baH=_^pff>xjSpCnH7^(me0f~o2$@Ynyq!0W}YM|iiaa$SXLo7c%poBD(2li z4C7#PqwC`M@c5m|V(s?z=8zEe2|tAcpQQQnfb7BU&jxl4gWUkso-b3`OG?(8OWsyk z)H`RbmNT-L2e5g|=wqYA(B-%-k%Pw?eeY|ODvgh2RcZhCnhl8Dr6&MCVZYTTho(AO zT#rSG#)>B30iEFC0ms6L1%tNUS;(DFYwmqTY67WB*zi*DlF?;rn~}!~J)k zyteXJ(VLAl+mxx?RK(U_LI&AktH4SuZQYg5{Wly1drXM0zC&UCHk^8doxHeBMYc zMcaHb%gC1!xb}H{KJ+A}S;*(<&vJygd;E_}W5}%aCOtH>;VVTff34$2S_^uW8AN)+ zSO5ef18W$^8(mreN|`}{{f&2dw8~ZKXq0BTdvd2?Ny!_G_2mM0;-i0Fy566_dU^aP z^6{h)AC)er0FVlRXN#Kn#IsuSr=Ag9^fya(*BAmL?4#+Be5 zz+%yzc2}e25k^7EpTaNQ?Lt6I-kZuhAb@Om!mV}vGEo5BAq5Y{c`$B;c1?!cLgFuz z^j;pg$xa)*-*pQDf4mlKbm@LC0mDI`V_B|VT5Iq<05*Hai~GxPIFjgT z_g%8fETWnSFd5n|H9yV6kq-HCJ^$Zy%OxCaTCot=pqOE5VhCvs5-t;UFypyzRwRec z&^X@!aT!nHM(2-C$NeR7xTx71k`q&N{&kh*XPf{XERYo7&91gr|1jo@75t=fp?b#P zB&~X2Zg;om#J4t@X9J)dzwRdkl;%RBXHBWZ#ZrCqR>r;xfW&egoh%BhAh{!MfhOZ; z+^dz`l3C_`R(`Pbov4Pb()AKLClxFChyT_=`K-Zh_GG!CcL}do1$Wq8!>*(nazjUw z)jheS5$S7;$Lm#tt2G*4Un>n9%ocl}ZEDt0{g)5t19meK*Cb?!71Q`0RNV5(@v4WG zLS&aB4Q)N03_^U>g?imrRz9;iHmlh(!YVw2acts^EV>?f0(aM@z5RFVr6>s7ODuSx z=jyf7#!~?UZd3!~2fee%+vjF0( zb(<13_CE`8UBDd`Ic=Kw;x;2Q_YS}D(q>vU?%~H1Kc1F0yJ_Cnj6wVD!>csVl`KfB zUoKU+hD`6rC!Cyi6DR01!UhWU|HoO+@U>!Hym6jPJX`Y(_b9zjYJGB<=Jk}vF0h`d z6yiUIUkMLQ)%`XAfsGaUVqKVzYATcOSPdETyy`ldjGS-L2*9V~|GD!VN7nZWy(PV& zv8oq&F2nT){kmX2;iE+cHv>=FN}59e7!#>UO9sySt*t97A0aT?);1;$M;-HjXT=y$ zpaphCzx(;y*x zF&T_}O;_W1b1itJH}TiSvcN7w>w7{6djDwd78=;$MPVc|*=c6(=2bye`*2Ez8yR4N zVa1a#x{*lo_RQ0zjFUt}KidaYnH(-`an`I4`ejnW@i2k^3HQiSpXqq@esFJ5P` z?XB=d9n8_Bou+jHGv+}O64TG>*H;T7JaBz1andW9Aj;SZ`?@v6Z|dKPT+du@OA+Sl z5avV7p{i)*(ACu?fmBh~qsNuaF;g4nRHfSs*8k=wY8kV)U6ZS)CNtRwOZA+Fzx+2d z9CA4=)wGmrH=M|CTVLz9y|(IV?_OJeg2{4?)%+Tb&s#|ioA4M@nf)xcE@X$E;t{2k zMv`%}{{ShY}x$~$Bvh0C`XK2Sld7BH>Lx%iywOj|nYz9+W)Q4eCAzjv9KD}!y)r;DYlH;L2AX8x> zF?U-T;EoC@!%-mxU@=wPG(&7;^HHBy1ZK*cRHKk2<-H$R0Me>{`G99`@b zxf$?`#BA@yLHbH%x#Z*OXEcXGOVhgDMu-G2wpJJh^_>YWG01$Tb@DxEaM=NMfjX`n z!!};9HEbXKNPi{2tss;n_rb*y697MX3MKv<^j~)HznU|?xfxa@ym~~TCMIS_6G#f` zQ~-U&8{~3IsAA*EJ9_*lC$Urq@47Z&yR*(@c%W&a8Ti)?9vS)KhPbFn`YJv&0=Vt# zQ>YXo!MaG|S+FS+G+>oR((8*{g|_`i(-H~E@~DPwIfodJ*cuL~tx9#x5d<1NB)|XO z3X+39O{|V$ZHttvL}zRg!gEbSnRB@X5zggk<_#A=c4U-yFqyJ^lm)~{7kxC<5;IQzh*SS6%J^jzck z(eYiZQg61#9GfIj%AXaPKbiK}7k}4vbZCqioj0c0_m(6?s;8gp2A)!gk>m644B!H_wfNcDv ze?2e2i8lGosvG`Oz()Q~EdpsCOx5wlQNeDvGbsYOZPzzHSo2Y3G%A1>lQLL0|1lu5 z3CgQ9Ep_mfMS$`*f0rEEwUI=k#6^)8w8+A6kT)y|M>+TiW6o8@Y!Yp=+sgxL<=nTi~QI{>S^0c%C1>rk`3> zPFiPld99Y^eR;UPz#8pxQqWx2%Hb!gQdcMn5r&a>8s2K*k3+X&c&>h%fT z2;}#XmYVW?8XjnmkLFo0mz=+EOAHQqqPB_c7QrT7h2-=vyTTE3;OeTlcHX6knJllI!62wF`bUkhOI&pwoz+fCc^MzQs^uHg!{|-?>XTEZ- zM2NT0aRbfELF1fBY-!0VITbUPZ668QkWFQu_do;PkAOTU*D2R^&D{Ms6>~TcVxKSa z31fWb_GOBHyVwPQE3Nl(K68ow+fok>WpQrmqB`aOZl3cy&?25BQmp8v+pdsD_e&YY z6egfb8hR^ic7Bf+7Dtd0muW$G^O2I9jV$^Z$*ta7#bht-^xX!4#Z*xiQ}-|M1nO2a z{FnmoZir^xVkN9$P%;yA*`e2uGW-FJc?Hzls@K%H0jfp*uyq5Lvv00>7II8zfU)hf znsHJz9h^bd7t{tLW+rs4BcgV#-W?%M)qp3(octGR304G%^MG_1)B`XGIiqh+S}X0w z^mKn%%fawLb(`-ya`u2$N^gd;V1w1SW>PfP@-#yNMZaXC{wNMR^k1tG!_PJANAQAX z$ozmrp2`d_b;grCv=pimIz zQr?_|gaI6D46eeQ_z}pSv*q~4KE;~A>xph_cjf6Y3WpyX(^6tv>$yp<IeQm`>gJgD)L#& z&^&#Mq+)BU*-4YbjBmvR634n+Hx8`nSB2$07W&{n`Y2}j1;0M;Ylqd;FX3IHGS?oM zb&JX(n$AZ|UptJf(T!?&y~6p_-a*r?g$eT&-?8qSI6XrH=b>Ph0)qBK?=RASb0nJC zxuR>uNtmvt13j2Af#a!?gYNtt34EXKVk&0Y{J@)yl;R^? z7zyly?^2A@L9mf9GlX!KNbF9SPkmjv?+&|XY!EYMCFCkFVe?OI4|ID)A(%(gyKQWe z`f2&EpwCFmv z)0#LtN3N}t24%q!j+8e01>iqx+F<+PpG3s6X!IC8IFlrpG7{OlZP%oy^=pdsw_GEL zOF#CQ(BY7`{XOo>@@LYZFYWkAi-t0eEP^q~p->2>o$KMhpe6X{A5eIoR9_K*g+vCM zcaKMb54`KW*5um>==p8{pSlJOaR049h!g=7eU&o<^5o0yxQ(gWSImr2SMlIvQULW! zTh!{zhi?l2r*f8i(!zf+1UD-H%yw^m@*<^D1t3+@)nFw6V?v5`p><2L&!RO{DE`;f2A@mzy2#Z6hQ5DZ^zU>Fa$9(I$Xhv2fibVNdGwN zOpL$uXDVk-=*!vPRYpn!*Zga#S~Bndfr0-+odJz$g1+4SXMG>QE`&GJ9|BNOKw;p{ zzx$^INMQCGlH4!@sQ_uIs>cN4}LZVKBfk$fx1|4|N7K26p)wU{7Fup_c8;^v+9w zc?M}8iu@%fxR?ORp~W7z?p);p+gu|}c?En2f??Wx?Mlz*1Za%?e07WDpNdfbHr^%~ z*t4eAr$|MmIX73`$ zN1rdQ)glmmlJ$2+FT#T{0m519bXZpK;b4G?{Cr&MMPfV4gdr;9C}FvXVZ?%{`d3p03c&)Qsi>fr|VWNwA ztsEkq!pm{m8P6RA_Mf4H2t%L)b!YrbS+|_+v#xcXeSn{LVFGkT!jUJn(c=JkZhSu8 zD>>R55j7jBVeE^W>u82;8+-lA_4GV&@~?AmLru5{qD1)-H0S4s+rS%N354n%cJx&;qx)KPdMur>x19}-NMK_YF>%jrHgmHBXovC^w~LoOUgAQbc-Y-CPR*dz<(EgU=jF zvg$ENuhlk zmdoB}V>?a*fsPl;bP@jzDkXe%axg`?3vMcRHmkujzP8SP0TZ>Dn7FNEwq|c1*pcx? za(9O2k)Hj@D^$!hs}D<09mSFN2PH}s7V<<8jmu9mSqTan%=i@7_evrq*gM^u6d&Ru z5G@2o?s!J~JIM~4v5V=(R-BC*93vJ3`>0b42nU4^{z~A!%AQDuEqYlU+|G#HC+J#aiUn4*Ut5Bk%WRe2-S9UV&G8U z7v7GPZ5*BO*7m+n##)`}eRvPVgufW)@z z?OK8T!?(sN<|T7ghmo1Bflqn-q9M5CZjh^%OKPTrY)q;i6oU0oABwB zag>W)0bT|~41)x7r<1Z~O^Aix=bBC>9H(jX@B5ofIL%j{umr6)otNk`ys=f(Q9*|t zY|<9x92v8yC#A5+yddT2GHQkr_=gLuv-~$++InA{UxD4 zN&iURfLui6wo&I^ge`V}$XWfa#{yF*BdaBDx)+6&r)9*m9orynKec`D$ zIarSVU9qFF|9QXbL_^p+oniK)({7gX3jN9~i`>F1?jsjO^U|7p3Cu8N)nYT1!+KTWtkdRRC+G4fn7WMG(b zLra1@D<+RSYk)3K{nIdihF26^R^IjcMQK9P8$@eP(mg5Lq}9^Ofb|wp{%zVT!8o#` z7R2UWV9E}bZ>v&xA!S}f;VeJwR!~W0+y9&S!o4KyYh&nyCG4g|~Fz zeI4|vJyt?r^KPC}v)fssgzCj+FZrP*mhPrq$^6bWQ zr|g;uZ9Q$iY4VMi8ojL*-Qn?TRK(&g-wl`>vDlXLSPA%Z^9(Pk+;4=&QYr|Pfbv#|cYDDHNJ7UYZWHL*k>pGOw&&yEGa;HWqgKHbhU8zinl-n*~X zWy0jCPG`b%I0`nPz}Z&;i*dVM8hwHj|G4{tKl+)$P^I5Y7NT#PL|eqGFUB?GTXe+P zMD#v;aJjfuq-(sD$;FTKZS(wSTpQY-u%BVJ?!MN3Q$S$muq1YMSmZN4%9?+WICz)sviPS+pYy*r2^(hZ8ZXVS8$R8`2y1-&&N8lX{oq#{{#8@mnV z?AOYC`P9(8i>`Ny6>lk$WqA^|{$Ow1Y3AJP2~i|R^3zfldKK%@Lv}mRgQDf5i|R$? z0=H;xTl3kYm%K}ih7y+D+?pyxO|6SG+R3pxJi<=LIQHW54YbQ@3-^M?HOjEgE;9XsBp7ndI7( z+@iq>oqMm<*2N?pFt@y#Qt21pzJ@*%b?i*eR1PUon`KrtnB3!+9H4n~5+<&VZXe-i zL`KLN+|Ww7e97l;25bz$I9c(6vb@jwgW)Lj5r+n0|8tSvAW+;QC}k>zZs{pN zV6cu^1ZJqtt*Wl7%A(nWe?k8iF$NU*If0+D!2acAvgjCH-HD4@$%__0M4qMuSdHnS zI%1{~mV*@ zA(TDqlsJw4BwL1(W_7$k8rFU~$CG(HUeiiMd}zmPs&(%CAbZdC?bG#aTb_gm zj9P|s*-1V{C8!BP}WTNYz3t#yAK{NTC9Vf`~+-b3)2Z6NyGb9j9R;h#WQLa1=p?X~W z9AKSqFt1k*>YePxqE9z+vkr0bQ8?j&$s~u|wG`7#5^XMUh>riv9PIvKFE*a-O$WX8 zfz|-pfiS4k$)@xTGNMcvF+(RP`TH~q%0g+e8EBxt(av^i6`73QS5S7XVS^qM!K1dv z_yZ^FO6}OYJKkuy1NSd2Khii|+~#*p!>mF3Ix3{xI>ha_9@9fGGetqS|DIyu(=U?6K8<)0!_c|~(B`esR^zt)u`?!3B_t6kFiYMem$C%*s_TFo*2USrh z%7FeR(q8}ucc(##JS0?y zK*R_X)dmzDvAy1p!G#^j6x$nnj-+g3G~I!8==-e%(*tXL1`?ClQXqOv5)S#`A{bjP zzNGSz3w>s7@zB0+!bO5-$uAmA(VQNF{w4UbY6;Z(a(GF4!qf+z2Sm)`SnmSg>uLTm zepC3?dtd8w`f~4~I@3(oRFW6(zIE*iW??Nh-#5WOR`@c!)4Sc^nG!iQ)4yy!+~Bqa zOe?#V;z9qyH>xP9`yg6YfFNJmhb0NOwX?$PSNiSV0i*Neemmt8b1fQKXOwp=h*0Kn zhBXK`BGKl@dddWa%+oBh-8E3$p%{w8&*sHO0SB~UtOFzRj6qPna50%p{TE5dytG<- z&J_^$r?*GBny$Z3PASH<_u;hq*0ecxDP44M0$-G5JP!8~`7#Y=ZoP9C8-Fahmp?St z^f7w>q~`TIi-Ka6#Gc5aH$yfQp4`Otaj5CQ;@zDCIl?G0d3Iyxa4db&z|{JOU+>t- z=v^rjefp;r$hoY&aM2u#&zhUP)rkZ_)F*3*DwJh|0x+eY%&G^w`zN#%$iQkwM#nHz zV$D5BPCaU+^6sDJW2vD}mD3EJL*HTfSg1@?X-6HObsz5_k)V2(;1bD$8xDPgFVk?8 zrgVq+qPrM3Po6&GiZ)7RNG^C6}&mi*=FmGJl@ zYq<}hFDG`Hwu|t4MQvrj`B`F8 z_FFA_Rnpt_KxF*eg*P(lu>|u{-*Q^#k;Vb64l{36n zZndq@k|>>a=@GFGW=g8CCv`G-$Hh*n>P{xAdFc8ko)oNcHRc&8Me2U%DMl8pYNU$x z^R1ush}|Lb#{?K4xJ(oiO}Pq7CsK6x&K6xh1op+i&Ae43U^O|vb=+eiKQ~Pj{V@qq znrQ94uZv&y`6SNWem#q_d4a!k00sj)(+uprJfCt87}JfkQwJbLL9|%`I8LONJC43H4Ilu2e{L~(^LoeV%V|tI{(4n^`&sO4rZ~rS1l@OM( z81ci`gkjtEAc~!znw>f(voM()N}^U zd_4s1SAUVe6+|VmWasDA#M<%zt+P1u@*4U2ygH3h(N9~m2=47i=UMJSScre zAuId_mfP*gVx3DAUAEFScVIW|_?v9ys%2camk=FhNs}mZCEht`Xz3;FJk>xfngNR5 z-0>7a5NH}Jel#~J_s=iORy_kPEed`jCu&>Rq9P7idtx!>eMUr=SY#iZ-O-LJri`vv zDC1J(78KaQ*11+St1AVFTWO)}d)@S%>7m4H@T~stBR@s&^L5JnK>j&{H000>3+5-X zXqV1ZX5Zl|o6^}`eoh(?8jKBbpV3rjek;&wP3Iq zkEn!&Y+!F3&PaSU@9}i*qMtD(oc#2qBwF?N2-A28&2|4{rf)5->oN`&pO5!k>_lo) zzO_$0Yv49)IpNHOYMEsBrt3c5SxTFy0l&}Pgv{t6^nh?|I-Wei6eNYTwdDDingmu`^+ zKIyalTC0KcsPSJ_P^|?5o?C(pUUUB!dx`&ytpw~}SKAW9o5=N~i8E5(KMd32T%%&zJ&+TsS zO1X#be>`~-aIzp-u79Sa@tJ;b#$hRu^($+r#x`#_ z1!#q{;Km|+d7#QczMH%fE@Q81Sxu@sUgrX*gG_h=6}Y3p*0@g@p&Dv!wHBGA9nQs_ z4-Mw$rh{O@XZv@;9^0Ar_S{rK7yS4I5r7Px^0&xIfPn&l$b}R2W7k`X!)G;++Dee+ zHaU3zB?l;b!!xRLIJ^9Jd`im(_3xJgS-!XLfq~H~USa?99~e~3V{-HeC{Ap=^9*>& zr};<2^WmI*$jI5F6P`PswOaSUZB6vpSlV8_aOc&?4&M1wtY1^$LgGb_Ec&8Q1EIQn zRp;u9lu1#CSWRz_qyo~6n2RYQCAte^gm}H5Le^2X15pewsN{iK5b>5>Trw&gDX z|99s7eJFlf%MWX3Hyf--UByJCuJ_L7X;^DvYsl-P>@Tcu+bBW9P;OWdBH&t^EI$QR z(ZIkrB~P7j)mq)W$Y|-L#QH=y`CYkA9L#r~m}Nf9u{FQqqq)tRle1gw&U?7TOI5_} zaS~XC`AuDL>e%ZW2}+^zB%;qKHHp@PM&pLK&pv8{~>D_Q|nbXUUyrSxc zUZOMx&*}s`=PS0xUo#WE?xSu6$v+tTb|ImTpmnGVdk(d|xME(zl!HDEF$DW`tl8XCjVK2*n;D+p3CIVG+P+It~( z5#MWtmqSoThzUr^Y61pYzYb(U{=OeJ@J0#?q9x*ie!|l)dqG7H{S?2YkQv`$YBc1f zTyh_HcSjdu(8#tFzrXT`?-)MnpT`K5zdBf}mfVwRAar*;y$yJsY@v`SY$iw5Ox!m5O{%p-wQc`Pavhg&w zz~e(r1T&_*SrP*8z22EbnTLZ1^va@VIC$?n32+{Cnv4o#NB{_6fM?13vM}pd9j>|d zL_~jQT0aYg!1A;95h3>T;Pxu^sBf*NQp72~dojGjEpWxDx^2q1bMKM5z%+hSS8!J6 zQ5_rq7&rLd?SbCU8N{o8mXj%2+f+OC+DlG`?xgD+HyLrK8|-P#G*lIF1=e7K8{Yu+0CgAFYwN(tMnYT-f!XW3jl6;d1( zci+;{t2w*UNqv{_7y_mqp$`wXzb6?nPaC3&nOdY6LbnB2e=mv8Uu!wiU9|?|ViP&# z(-P+M!W@Pdi3;7a=k=odl>BU04Y++9LT!dT@^(EI>u!eQt-z$s+z!T0KN7WS1`J|# z3PbAL{jB|UdX`~JZ94|xS@Xj+hs`yGIcb7ue)44>eMUfaD2S+&1RMJYBkQf*Xa4}csE9ysp zR_Vt=>TNCj z0B{UQ;cusZN&Y>KD4H@HiTd$$Z_d%n)t=~OsedO+N0_C{6x1ID8>) z+N$tj_e7>S8(3Uqlmww3!Vbi6_AJS+U~kV-+w8>}MI_`8zU^uvs>lXMY;t z9%#^YJ5+z{Rx5>YYgNTmpg-+;!H_uWz-St{-^{SrN0c!Jg-&vti8&O#%}e!U#LFq* z&c38UULG%f&g7)zp{Vx^Z}y{Q-h9=Rc!CUBbeXXT4R-&;2AE{7sjwSLp71z0{^+Zr< zZklWzV5D~xqPDQXt6$GEWv;ZSRswBj;9HrS)+WEf%&)n7`iQr7O|-yh4699d=1$Hn zTkX#2E}J#ihSeN(uQ}@FQdh|?rF^ACo!;qn{{DqGSb-a+|IzZ0XtS-3V4wn3e(YQ{ zQh)Fv)xyrG_;6g#tPTQ0z*c(aEA$1s30YWBpTD z6+qz7CA-_$EI=#Lx%#v0|NXxClH%W3GjyhX#Vr$7R6*&g z$!B_38@~b-VGEV+XPrzfqbeTUXiz3cR5Z~ zfF}%YFK@OHpg+sW0@HGna7O5W3q59>k;3{sWu82iAH`S6FGevQF4#<#kd=}xKXy^axTZ>D&h_E`(13$xrali@UmcY1WpQ)h?g0}wMvee%WvmfD~qC-EH zzsSo#Cp9#aWvta)V_NehPA#cY$^9+^Ngve3nIlq0Q`^n%%fhz{$y02d9~*C5+`Odf zdH*eS8F-nspaQnSa?~TckeB_i|1LMR#7&s%H-a!CBmRp;@%2Z9QOX6@)Rdg);b;5e zgMmuc{k@CseEI1+jcd)y`?1`#4HbQg?X7#XgA-;0A>Y50U3fp}o!7&HYQ^+o?wd#q zCc6qecVX5S=+e`@A5exUe!~4WYza7nQ|9$!hiO!1I^LfP){fFamtq9A)+f!IBC!!K z#Jp#=>5^Z^?2qw}mO!c>PjOR+@P7UMV)#@bZ74q(sq8Ii?MnBe!$j7k*W^nFJ>&3= zA3vJ@7kh6R7G?J~{3?Q?fFMXqNH>zwp@fnm(hWlh3@~&fAW94%AR$Ny64Kog3d7JL z-Ga1q_g(|s&-1^Z_dVWsf7##m7Y-e`)~vOz6=(j=7Vg7Jaf_4uu-&Dp0TJBz7Hl%% z_otjx`z5fmcenkLJWsl%yL}yMV2C4?F!pXju)@!4Q(PY=h95WHars|OzCav>;*ma; zh$5N(>SOc*)5^%$>xo=0;dWm098|VBW;Q!r;%FasC}4xXe(1pJKkgC<<=kQlFSdEn z_hj~{!uSmdl=H4m-Me5oE!zhc>pt!sEoq3cXcBLOzpa% z6zOTYb78G#d&glKVpAEE?9Uulnn$epDDx`h!ohn_CN-Bx!Qps0O2hrm9-DkWq-fzQ zmw4S<*60Tprl$i zqhB>OXZ<mL|upL(&s1vMF+Q~F#dH;+uzQQ;6R+<0osLXE7>}VUzG)tFp!Py!2 z$1@i&L$?WTNv1qW0ix=xG~#6_HtBu~$;z!zH!qXA`D;<|p?=m=K|zsUKE$`x5*$!lC}Iu^V@f zh|Yg4dal#I1U*^R?Dw<^uPDve6aG2h7u@d`zt#T;l0vY=7Wc#xK4Kjj##Wx@lAHM2 z0xzBx?&MNsHBhWk22pG3*dy73zVqRStN5htvwd79maXw8!$oVnIyx;R`>bv~SbQ#r zGs2`#9+8tziDqx5P&2Jjn$kqTdIW~{joFxg6~bWSMb6`!Vt=m*`1h~Ds3MBy{jo-_ zs5}C`^PB>Bq_OQfTDKG-3&7UU3&W$4ma};EYGtwMBwpHo?`xh~W3Z%HXrCb;VSPBQ z7KlG3!usUQ$#4DLY;_#JH=VSD`VkSTi-GVtE}xV)eCi3$?ovL*B&X(%Y4ODEF!sJv z79d2rzs)61zMsQMC|^SyRs!Nj*+ODhDfdBqs#mTc?&W;iAujZIkjH>vH&m~XXCs@) z=Gq<7yyMrmAipVa{>;?Cm)^&>gavCTj|xOU9u3?Uv_wOFciw%{gdRsFq={39a;oG$ zhAH{9&XIsYx7XO$l{wZzjOd|28ftF-)OaM_RoD}b()2yjm60%3th-Zk(K3NC0ckJ! zknm$}k-cBb-!r;eSG1_0?SwQYW0oJcKBE#z-KE+@6Gp|_YhJaS(78L``(U#XM^&!H z=na2=mK1t?xE5N%MztURKY~Evs7n)s8{f-u*MCnhhPQom%CPnRTjdZ1j`xn1$fEgG&Jlaj<+y@b1nIt&(wo1XAi{H2q0nCC>K@oZ-2=Wz zfm3y^0oJ>?rF|vR0ezYwA--a98SzFOgXka#R56QwOH&*Tm=M57hqC1a$tW!Xxa#NC zG}ixuz{(nLB*o=qPy;aq;}!^{ODGX!2U^iBv{?|A`~W3oQ{#hkI~+Our@SL>>d$C` z?Iciw+mmaq#BUjfizjE1(Vf0{SLwF&7=o&oU)o*TXvc%u54vum$l_;xY3C;J6z2EQ1Hxz<=cOv2H)WU74Akv1OmsmnHEGNyE-Wa zWlDm558{+~N3RdPk@y7;CjLAGlJjCnY;2b+o49=QptHnwi>DOg z`EsHIb&n%zkrzmAU7B05o}$E4E-8dO$O%A&sP5f$Pox*-L=T_74E#?Lt(mZKnR~f! z=TzWf?q#xnW>EHI=VQLt>1s&$_k+{pJrMj`dDd4W++BOtDCjXignF?&o@>dSYd*sQ zaS+!Clqo4$0)ljvGHoy_L|J+_3iP;^=Hi2FGk86dO0zQRt|rN(K5kXbhUh|Yshu^ zZgo3iy@^ zGHtT;C*Z_*9|#Ai%=5(XL*e9u*6y)NA2*OT(*4Z`^HeJHXJxnN`$K(@|1rOO73_}B7@-^E2&v`Ebkgi9* z4{Ok9X8#_=cr=P<-8SP&98pFs3>n19h-hpflv)Sm4v>pi-n%^D4Czo^c_oElo!>Z& z0L03O8y56}{@y18H8mc$T)Gkn7V}VnnZxTV_LvhmUDF;E7dfpKH||WQK<-XIec40o zfnZPh^DZw6o=R8A016SPwr0Rj>-%Uvq4CZ9wtG>EIR$8ZR&?R&uE>x_?axT0tIN}< z@D;OG5(Y}M=7Wz~yi2M;D{*`jBBS+qv65PVa&qh z()T`2%_T{`NQtHskc0HVw7))MO864DZEg0&Szv<0ol;|kz1>p9*J(UbWsugLD~+y^ zf$pfQh%^@pJ(annw2s%CKKXIht6kW|*N)&Rw6H#lGr%WZ&ow|rjCz1X^{K}yTp$Mc z78Ax76=AYmpPsmSTTa^Asg%{NlfIkrzixU0KkDi4f|0mnO!!%?!9bj?!?n=E(Gfe^ zodUB%3-x{To`Noul=-A9v3V4=VP{DqdNFPhla=K?F_s!ue;sunTGTh=kA7oRqGa?< zk^wzC(~iPARjteKZ$qgK2kEva)8M@Q1sAMB9*`1~mKjFHIW(7Ni5i8}epnQ4F)MVp z*{1>&)1=1KN%ZmeKa39)*vJyZ=c#}k*AcszvS3hOQ*%G`Q2ADoiiy~)SU1P12EmvF z$#!R8gC6ZyPC}4n%`@-7akWRP(aGwj@!*xzcfDh*i$Eq|HG@Q)nm^ko-pv=0JtVp0 ze-$%BLh%_XeRkPVyCc-KuWXev+9OlG@N_MJ6{;XA7bR7k_G}Kt=ta1Hh1!Bd$YOjgpzfekzi+FnAG(a zIkPAkGn`w4$WY<2aF?l3Czsn~+w{B$VmJ(B8Jg!iY?a(ebLWvmoIr;B^dG(Bn$pcc zuXr|}skY+Vw}uf4{YP-1wqrooE}RYw3B7Sx-sUHPY@6RqfHeXPr|lPP$%ouUDj~S2 z!6;ha*!L4gr$034ci_O1p$~#*tL|Q3#sVZG5_v!$0I&!SZkgoRbwV)^ApNn<@uNAT zRVL!^_?mw`sWc*bpT4J!r6oYd;65#P(zw2A1lJhRYnzvog$>HG4>zJh;5NO;N^v>i zIg)uaC)*oyTvH3X_@qDMY&qpkzkDJE!gA7hwEb10iy`PUJ$GBSBvzRD4iV3RwHxcf z>yTFIpo0n>{$WA0-x8=qbuB2hHbVe1l7rvuN31@FMbSfXXtBkMWV`1PS>`nqWnM9G za$@@kuA^FBNewh5Gv@d^k4N+McfK7ZE-*hT`dHrm3i7eLp>UP6q_)bFwp>*T5w#%1 z!R^fMsx)+xw=C_a&(wsuz12A^KF_nZt_1W4QYjcTvyocd&KiHlwFcq#Ukg@MwhWqu66F_U}eQvv@}M2X01it;^F-I+nXo zIm^ogncDEph;dE?bFFJ^i>dAQmG1HSpO}rcKHZ2=z!rBYDuj40OOLf#JxP08WC$Dk zZVu%e8MM6_&NIwE@s3M;jAH^vn5195Y(jTXen#`aItPB3jf?O~QasAIS?;?t;-tY$ zwr$eqN-&{39CyC;dtQIv@Dy8DS>ZJ)!t^RVZLluhSjCeR)9+s4n@2xiHrJ*VW8ovkwhfbs#P+!oz<`ec$OlQUJn4&cfYx zr$bt1%8!g)MzKG7H?Caum(NGix6X%Yc%P9{k34#JCoxJOo}(4=;@_aBjqzM#gW*Vl zEo@e8ZkdQ)0}h!loBj^?WV~zppKr4FcVv2vwL86&ahiO2Hc6asv-wI6 zDilNNjJoTN-pCsjePLIfAms6hPoLda*WwPIK-hr&4v4#J_YGx<;KkOpXYA5{L$O4J zy;n$e{>%l5?S779Y9`jB3dpOKo{o*)Z|*w{6s0KNd!J%jvPV0Z#3dfYop%kF%dRWY z2^uZp-Jvlh>4OW7Mu*Xc>BE|%4sfsR8k1~?&%_zrIViTs5o*J_jKRpd?mNgXurvQM z)i08PT-%k&$+TArwL!|D5@?|>T_<^9T$(&P9eyfkAV==`Qze^L*Uv~MBL%dyCZ3>R zd`G&!$jACgu3ZX$tH0mFDEO(UH4-?40I163O#h+Bi;VA z{%Fd|ha7}n15YvR99y3)DdWbh?v-CK=XR+SWJjpYQ}eaX*T^8Ocw)DV?^hF*bK*0mS;wtV8kBi@+;w!X>=3P z%%^S+c_ z^q$KEvHJ1ofUB2uVMgq*<(e=ZZ}8CQ;GtIq1iR4?K!P6kwk7QfOv;LPqNEF=bR5X+ znKhc3VnOnZ+|`ALLEWPiQKSoVPxvIgq>xLw6@+;na!=v;BMW(QOd@P^XmgLdybM9B zaSCzBWZpE{ozicaU;cLJ!|GBr z;^`=uwkijh0`YlXtf8Cp+-u4?-7`X0U#)&m6Fs6xjZnsj`kJpczNSRP=AO*G<`NdR z!Q!wke%0dKD89t1)k?@9c)R8y$u(Dk=GC?~SH2dp`S+89ET%+?7tlK7V|Z3`ri%GE;`{JbzYh(#NI>E z<;-gwL$iZ(uTIIU?^6YNtOA5^U34@&fae72*W|UMkZ@E2?-j=2Dy209XaL3okJAC& zzzl_&3S8&C;OAVX0;6z3{}b*NA`RLoT2AN#3TX+d z3*`AYbu2(fWev~064t66Ub)UHa+UB%a6Dp1V9 z&p1Oj4G1HO8lzTIpJ-D(d;mmRL~A-GA|OE{4%y3@i}3xknkLAO+hR zJkj;e%D*5OIXvA)yK>pkR#*{GR2Tz#) zENj$|hFIIpML1n<+^gv}L<28@Kw|bj>eGvsg08Z6|8u~FnxGBhoH}N9bm~Xs;E9so zqtiJ6+g`ba6uR7g|Kz`42SVKK6xbc`>17_U)KyOMe-1;F4YU!WHuaGR4KEI5Z5pwI zZm8WOCD(O>P+aO$SCPqaZ@@x8?-9TNpI&A+r~Yq%TWmz2jpl4yIdDjJyaP{ISMncZ z2f}gkMT-AH#as?pK>{oUG>gwD*eNij=F_BY?XJ)c4%%Nqqy7sc+_l2IItB1pWwGme zo+!-;gk7$6MeE4w#M(|yE>cqFJ9~VOPEWdRdGdc>AXz!cMX!_Z_nKq?2fQ2y{FwP; z{GSu@Mq(A10|x7Yk<{qaJ=ai4ss7Tb>9ZFP@bmuP9{}l~;RNJ>VGUn9B z+<{NH{MXnxOIQN522cRm4f6jzoBwIJQl#s76~RSFUaz~~z{x@BJXmoHGg$unKxg)~ z*EA*^k7QKrod_a&1uXt9{0}l+!z(cMD|$N7$A41rw5s%57W;(9biS);G3MLMp_y1) zeV@xtH#TauXjxDX3x2b@#HP(012>oaw%u){TztEm~mXdP}>A`djw--497 z@UK5YlVf};$Co@iyT|^|jOf)pcUF1PAeWMpC3f2a7kl}pq={}-6!CggyR+>z4TR$v zNAqg0H{9o_>fuyUM$#!r_I5fQ7Y|d2J$H9Y-_F!Jn%rt@+XpA%8N>}_;l#wm)>}>K z#6K6_+5^yt{*?r*f1nYUYdsK^VGjn9$+$rk#`#B)<}7hZ@cgNs6ygW2Nxem-i(QY3 z{XOm@qmEoko@BVkEm2_^X37=gSuFr{NGv;lsyNyzF6tGvaOYnu>l%6+>cF{&`cO&P z@E6C!FPkIr=@pE4WoPbgRSphNk7MzyaO7gD(S1^S68&Y6r2E>c;-%oQyfozRq}EjZ z09H%u(aVJ;GI_J>ZW%z%UPwZXQVlTWSh*69e|C)`sN$|20+88$N93l<>haVkBjKZ| zu`+R|BZh9m|2#;*NWo!x6ZH`w5&A^%w#6*f^D~|Q89&`Whk(o%8LHrc)$X>bq}%W1 z9@|V#ybw}?l;%w)Jht(vh0X3wgs%A}nltPEe2W;*hAp3wSuI7uR7OST{>?F*mjEy_qDX2e0ke!HSx1EryG<8L76GqfggV{ z-MaOlX-I5zvVQmX;J}c$fXnLK&(3z|d^U2^-avp~WRDp8_j1cakt>c^GTwm08;w9f z3E~Lrt?1$y0@BEK8he+UVJw^W4i5v{noD?j(!r?km&sqHgr>5TEhOh1D@c6+X88s*#o}N508Y7 zTfJFYR>dl!X!!2;ejSjX4QimsO6tBUwJK2<52bAwH?G0Iy4{sYv&xJpePpivq1^S_q%tAnV+k7Z@8MrZyG2FLuS=GFIE znxF`J&sFS*eaB%p!DX%B$nGNfpu%P4I~G1I6+i5yPVQVRmo_)T;@9t_hl(q=yE#hkMbv5^ z2m<7hN9fC{q!9j*meeVgK3ejpN~||B{(aWyLHyBrWm{T{MeH>Z+~8=IN~F+dg;Q*E zPQ3a!CDWj!-gKhUyhY47CLEuI@R7_=sINga>-TR*#*Q7D+)4e@2bBML>f%`tgL}z& zfH56K_j|5ml94=cO0(NZof8Frc(0-v{ zrFu!aIHfrD-n9Z086qwqrj{m|uX(`s=qt$^8-C=?&RK+I?1tPnc-R$3Vt zioG$9@XO@@F_8Vxc_*(={?0*aUY~WWjb#G;)V~fIiBRw&2eY`57k9rPepVjRwLMjS zs?tkC=X1p#ilc}k&#jHbyrzdH-W|*IUv%Z15@YXGc|CCD3Xg~CUV|1|J0*YW#pNOR80K{E+Hx{LqJwTlwxYD{KN1*86oEUyF+#C zR!_cnA5SMl7xOft%C&2#WNyPZwFBWI|MObNKp|%0d8>#}`)(dTD;c@SHj&!fBpEKi z-fQ=QThhKPK6Tf)+w+k&?-DgfH77z#ohSxg^0mF}-y-f25MFZ)5kA8eJALw#gjF_$K)#J4aV@pTzNEdnos~G=v?mQ9)vf#E!cOt6M{ve4}X++ZP zw{k7FgSW(=Ohhf=qK=S)P34P!eM7dO7aRCqA|5B#L66k8+{QrIe8RnCu5R-BhLpoKC!#Wef zi+Kf5>(h4=sTH-gTW7NclM@rDl<5nTtWYPJB`z-)^182$GDz@_r%`9-*?P0~_|Mgt9_4Hn;OFlV4tobLeSg(tXj|?9~ zgDMy8@^U*hb1=qP8nMLCRf8pKgM+{J=Brj?SF8+7Z2duu=?ysrIM5wiZb z66Df|r|dJ&03Vgjg1iCEy6?AX77|&)J7NAj23~9oaBqDX+Cu9?uakd4lQ-U)r;RqW zwZ#^lB*_`HNnh1IK@C80;!FSI}W&_^~pv7prtwHTkr|`bPC0Iz7c<=7o=lY|gTMIVh zbu~pk{W>f0-HrK&_&ZJ8gDZipXL*zH2fB5qo9|pPlI8)$*TnsJX11WN?05(3{laFe zKx4@_a~-s&_o>&KxeU5}YL125WKa8Ux>Ja5yYFeJ59Yt;nmU={o>%S(w@2OAUKO|eN&2ROot!}Rf^$R;P<22+i-AM zd!VYQ(DwzW;e$3hx8h8IG3LRx;$O24Tb1*!&!K0fqc>+fz5k0o%nd(uKO@~%9{FZ} zWpJ3@LD#0^W{ET(YI-}jZrN7^a5{SITcr&=ArXpNK}|c?QRa5_GT+DD(7%)Wf`D-Q zcX7nW;vD&wvcz>Hs`*9?oEDpy5!99e_3(UHcrS{8pz6%fvx*)K-nn67k1C~57gU5# zOelS{nNV!Zo6`daMpewbt;(k$7d{5wE{<(-);T6Q%#QFHW$0!=@pP%X7Gh)4kd-LV zsQVs0LZ)alg<$B$Uwr{n-1enQiODwzv-BzCBj3|<#Wn^C6%?E|Dc8ER;TI7fbY0aw z9yRAHqYYh*V!Qjmn^Z{KD{q`eL}4~!IOaObpB?1h+AR2u4wPGAE|&*YjxKy>%J{8& zHmCcc#gW)_e=N|@$8Z^<2Q6VDUNp0372)Oi^*e|zloYP}AWDJ(WcxD2iIREp z))6F-R9{=ITGL3Y{znP(1pMj%x9PF6MWGFFd_o~=N>HP7s{8NeI!jOm6^0(E0 zSIU1HO^x^`%s}R0&l7+S4+gsY*$X0mZFJT{G|n>$inS;dfYhD5k~o!e{iF%S-IWSB z^{2&B+CnH5S{Gu-?sUf;z6P;~z=?h@qkb>Z@E&DTLIJrg0og=m$}P^{f@Eh8s*usa z`r1tG_zTs!Hlemd<%a;5%xxOf0_t6|6O&HuBPT${FXA*~fA>$aH$MSsiJ;d*Zn1*| z_bS&z0HfQw=b2}fAK9UHhh}!si{&4M*B<68DD+jlYk2F>lWBsA8 zR=6h~n@#C3*4VN2F`3Ts@Z>Xc3vY5vPJEUaxI0L)IhQUq>)&hjty{|&2`OLJmQFr; zQHWku!tn(*MP#E~Z+M1baMVu070zrUJtru1CTOoz*Q`wRcqR+_eI^@vIw35IrHvP0 zSH4TNFVTEW3AX=3Qg&5Ul8Rd%v0s*VUPQe6a?{_CJsURF0!B8!zrVv5qW8XtGc=}+ zZ2uA}OnM^{(1FUA%-IU{hMXTUwbin1Z`zJ!Kfe=jp-1@M#IuM-GKHkSg7kl-6&enw zNh6|mOF`3_ejRZE>#hPm(BW3b*>3%ao)o$Wm9w=Ueo|wZ3QbE`S-o$%+{Cm%am~Y; zsEsmkVMNtSu#eLP2Ytiu?u1}p*d1oF*!yP4Qd7xh<@rC*21p)mB~8t*Jh$ z@~`J1B$~3PQkXBKYUZlaad^vdbbZ!BHy+&b$>C21MJ$xhYuY6$csW%_i{eiIGpI%<5;MdTXl~Z$MPv-IV)mv~6L=_>aQQc9hOW zD^1!hnn-$wQb&F&D83Uv35vO<&!~~Rsb`O z@=y*#Nzo{oozQnM1g>poVHE&!XK##_-46CFv)Fpcw&f>{jyixrq)|SYX^z2X)}TmM z-h?u$Zp}<#1Sk(%c+bx|&LC&;hH_Ayy*9RPlT(+v|GdM89Je6x+QqAvLA5uggsGpX$V$l7HxS60OjBM7&M#AV3C$Bi*@FkR(KJmuf?Hu3<3RDMtJae|z&3N*fs~>ALpnu&q;Kx`kGdOu>Z-Rx$!l=EyQC3TBQeh_ zAjUO2_is`qy&n<5otJyDhMxK2DVBZEth5Z(w!hU!Jk15BDSlFe7gFuIM=l5fl+RS+ z!9LGE#cz6Fw8)7uUV!H%+kfm#<=QquQ>5oE`3^1kHjr5>qOmSAJLoI9&eBV zYR;N01PglA->=X_XFNuKll63)78j+84*#$k#M!#XLh?n7r>BU^KH^ z#}89$>7miys9-3l=Lcdbe>q={HvZ}j2e zn+8{O?hraH`u6PW@`%k17%zJKb3=uQUcb7xfjevdq*QU8xz6@D*Rg`<&f@sOV>#!4 zFw(}rd9R?$5{W4L&NqG}7O-WJ0Tr)T0Cpc;8+g`=)CQg#Q9nY(OZIBPu40Z2^FhZJ zf|C<~e~kcG2tH$yGfJk6fDaS&eEv|P5ug~oBovHSM({3R;+Y-UnzsSN)XM1Y3OvE$l7>lbCSZe-~KU=qsBu}?NFGJ1COiH{<*LM zfJafJQarS6(7eF85cMs*I4tn{D#L}0tL8U>2P3Q-&I2qd|IHrm00m-ZAEba3+J5ja z6ADXTk*hOA2IaIwqq_@^eM=cIe`bVFoT=@Iz>vEAxcA>k;GmpAg)fo#SCTm%D&7SFV0N`ODX(p|VA#ejN4}3F-IQfcq(eXl5p&#NeG0e2+qo{sB`Q+TFAEAM<-J^zKqK^WIb5UrS z+^37Bm=X>CSVt@G{vEXcU^Pntim>x_UkqSltV|(PF1pw)DQ@6T6Ew{Kke~tQD2ljz z9gK*>FkKab32bDvt5?pc9z4)loNr9UV1J_c&9Cp`{YU@*afkq`r9WyNWECS>(5XLw zwe*!4)nchU%7L|<$HBfE6C?D!GmqlK`l+)LI>50*?QA(0KT@x96<_}T`lNiSIx~zarAkK6j2(J`=KJ8$`H1< zUFrJfULH1279NoO>bI>b`93lB_v@}U5$Q#ns+`lFoB&;Ha5jGBx_rd#D-;YDH>naY z-C2r~H^K@7#dI#>tDQ=n59eFRQH3FYHGKzFdZNGU$P#~~bXyAr6-vd%`}T#DT2@4a ztq=cr*fr7M4QsDf2xz{we_Rd@r;F%q-|OhS^XEsckWpIh>|9`T)}2)pNzEGa)Q{-q zQ!ak?fS*`!sg}W>jHer9Dx%HWT@AAqvtJFe^Z0}@FcetZECry|wz_y=PP0L+Byu%9 z2oV}1UZwez)%ef?fYp*qAvABM|JY(Lfpi@4ou66jza=`<+B)8!^WPK3_l4`ILH?>$ z-97Z!(51^sp3UcaIZx(;M`JRZ^8xK*i8rL;Wz`^E{e}ZU9VHy6n7MKNb?lu)-gXcB zRaLNFMd&4z-b>_RRw5To&hUx=Q~Idv9a4ZV(~~ANnCpE`&PTM>`PY|TB))U+JOHI7 z=Tckh$q&AjzxWjLXK1H-qk1Wa*5NctCy76NfVwpyCtk#-X~_&i`i@ve*z~wjhk0Es+19t1BesB5B?8P8=4Hy zg1|Qlz^SZt4~yT%Dx_!`j0w~i6d3}Ss2A?u9;n{w3<|5Jt>$$5*x7(Uby3a8xyH%) zV>D)M?mcxnrWk1i)86aPeJ*ocT_=xipxRB?UHVL>x>A*2Rl}~ z)NjxyM=*vrzV_eY8gH5KMO%DB9FWFq$?4*~8vH^VUx>%1r4x(clP$V$(M!fJN~b=p zwZ`kyUDZS8pPX!WUk+g)w{)BGv~MMwJ3(li@UqP{>r|F!u)`%GB2S0M?|{nyWzZx; zl|-HWVd8dJL!@|AA{8s9sG2oZWhx&45iVix%Z@re(PYL8=ed!$|-#ZZ@B;xtpNQUNg(Vmd?W z+cJp;K83@Sk14AZRmq(%IU+Vk`}hNNJvhaYWA#p@gM976=`h5!qr8*Y{89cJw)!f= zn6O_aY7=x{EYvjb5z6f!WawR=RHF-$I84AP>8UR8;BpQgaiX1nt5<{m zTmYrSlH>S(`g|#lTRb4Fuyp-X_1HOo{K!t8<|~7F{50pBI?nc!ZZBn;9%47ux}bLl zB3l?Th1M@7)L+_sOKFxV^b4vh$R_YmT=c?Ht$lZ*!rriDm=Fu z=QU#g%IjBz5W21PZQ#q++>Azj_cQ1^g@`i;J7FDpj(VJ1ojq&4!pY}`YTkK152LJ2 zL3uZQz-H&={9xzYQEM3I(;>NyuE+SjTtE!sAb&duNtz|BXpm* z;YG97u?K74YPot^7fAv*P|n4;wnZ{2S|< zoxC)Co927n15Kh%p5n8`4u`#%AkZHKj4|pA#hQ8}n23#Oq0^Q8g(sW|blla28s*mQ zhCiz?`K%AZzHPkaI+*;B|s=?nC*Ej$mTXdi+(NCexR`Ji>iwhdf3uIm27& z-#J5!eh7z~;fbTwE7wh4hBQO0JnG7ZCZZ1b_X?xE-cHj8;E}Yzh(%`2cWgBSQ|8MZ`h(h*ggum6qb)VY$RMLM@nTpk%I&+YKG^wCo%;0qh>pTf7c|g9#iS-YaLS3m^{^Q=Ne0Q5j!l=U^_hp1@?A` zoy9hzlBY(!HXIUHv65FE%Y95+JuBb6N!yUe=P@1HnZsO{7}3FpTOZk>o@ns|(?;d2SiXZpx*et|NFwXXm6!Nq@_kLSt~I*19VK6Fyh%z_rtL_};+HTxSNxLL}$9^NKy8Nsj(pOjLQ7C(BDA$MewU zu2%h^`Qw(9)xLA5&+E3{qU8Jag@lL`s`GKg-r7(5wEiVWF`gFKl-|pS+=Djw&);3z zhGWomA^qlh-ri=l&F_Lj5<|N!j6;glQ{ea+U|Zm&f!nVL zNei4h!fuuCWjiBMsQJV?|4|Mkc7obyykei zlZX&V&vr_Do5#w3mql%x7%`pmej9bZXNsjEIGsZ-KvX|pTg>-Oi4 z>G0l?r_0mB0Ao9!v<9&oebo25!pTKAnHJUsu7i5e!CzdNhkHSczyR{5aMBjiUIcj-Q&fx$8J+!-N``lv zt&jpV0-Qh>6~1;2o$N^pQ1x*}%_2TZf0BY&KMN7&NI0bVoAQgw%^`EYP_c>c_gcmM zjR09V|7r{g$FrL%#m15Pm#<*QSX?TtZFqkm{p<*T((9=_^QhrPtY<1@gwP_9r6I^M ztYqUwwi0r?f#cIT^Y=q#)n(rLo%3L-uPdNJa@y?^9<#jPgK(orz3|6}zhoxIR~~Zn zSF9lg(T{F}iWAE<2HhSBz z2y4%s_1(jfW35U(osH41BCU zM=uhyk(H!aJ#>AJR~q~k*;tb z7kpndjBT|xLWh*9By!&o921AtxeCs`Dx;$FmE&y9#m~2HM^#@K&p*4caKE!6 zZC|cstbljSi>F1eGuF~x;0DsBg1-pE-m+9WiQZH zEA5eF=m>^K&p_FMW=Mx^3G(3R*1peDxX(s|8Ua~20V!4ep*r4iHceVM)Mk^ae2qcf zrSXWfNTz?i`JnPX+ci*(gnmZ*P#PdR{1MXvHZm}tU>)g4}bwN#=tySfK@A4O2;!oky z2_g3z{}kJ6SJ7ebVKo&y@)q^X6S=Es7ehLvIwXI5&8O|+aE$2=!Om2vv-dVRYw*^H~ubrv_-LQkjZ1U9@Hq1^4sL zPAi$Hs|xB};pIy)EfRWTUQg94tXno8+brdKXRfr_zXW&YyIb3^UfTx|T-D9bJr(yb zu!3h#sI8>A`b2+*VG>yUa+9pjs`+)C3ZSQ6v$t@u=Qqy00@O@vZU4N?k9}eOiQY82 z%ppICxWa?jA);<3<`J7D zsLA<;vsUR@174IIBf-L>=gJJr0CcI@VBx!)-e*j$H9nlG5QCNu>Dnz|Ze`&vgCm=s zv5{I${s1u;`jQY_P%r93p)0&{SS$iI&Slf8?+uNg4f7P~Kx9Xz#0F`37(}NhXw4lv zPj&n#D1?VhQN_N80Wt?pOWvUWDVI7%3sXny&=&nRJ&9k;PDdmc4sWXWWVu`oVH(>? zDNc1^j6%K^EIKu>m7_BKjMTxKpVM`@Y<((obt&rO?gdPecVQ8*kMiZ-kd5tmj*Hrd zv|e|JnC`$ep=x)o-}ToSWw+h)+I(j zdMac?Drj@q=N2{kE>{=SS6+!w5?CHl=r^m`d1YsR@8YbtxPvmGm=pr$_6_w_Tv31! zmX1>{^n%eFZ7x%tRvh}DY@ft7G2jxtBrNSK>d{XPcti8e$GF@7Eemm;?%%BwbQtRx$7-^- zUTE^`5BShS_NM0b*tbRBJBqt+{KVdAU2GhToc6XiN{tOCOZ&CuMt#4?F{0@2E$*9k z%=6cJD@sDDmqcdT8F*PJTE@beu_E>&OIS|KYL&91j1*Lp;c7A8Qa*)5X-&8$!e5EC zmvqM(LmUA4e$NF2M!aoJzqHEIV76O6U6N0i)+~i6 z&^jJLbfQi^Sff!8X=;76JQvn;1b1G_H`I?vBn^6oBCvR+mEUr~U5CoucW7jT!-9~E zn-(9My;+2+H?m;LvlF3X43yq06BNWFKgyuBzsgL6s_kY6NL-~`T;*<*ylSp|g`Ts9 zw^76wE|gd$1gQH8r#LJ}JGoMbOkD5#&QD&klVIIjJKk>{Effi7lF|u!CzkvSqsfcF zBYNj058c#rE_??#2+1$r>e31v;?(3~w-1b3BJcVcfu*Z1hd=zzVAcqh2Cnye z`q@`U93WE3@Xw`Q(8CK%iHbQzL<^6D;SRm$6ytUi9z$-cG3+r%Y0|sVIMd=NcuBW z@t)P^TY84yI@6p^49{;4(F~Ro^%X?(bG;gEMzMV+SpJ`!Hoh(&=Jnyjy=}RhXwm{u!-iad!@fnCt6<6CGj{&c!k2w~^b9O=>F3G> zKBrTRL&5g!?RT_Z{!u4{{+Vpz20ONB5XpnC^gN}JCJiHZGlj4CPE9`!k?~4wF_#ii zsLI<&j7gC3?&b*MJ2>odN)35d%BSVK^&6fW)*7Rwv7P0MQAOoqY{NJRhsAGE58g#g|%@YJH z5-pG|@h>dztu#8}=iTscPVH2sEO#d~m{VlVTTh)-o?rgEE%<`jwn$`POByD?YLf2h zE!yorFO`jpF%?yomrJrO*u$JWshXtZxb)uvnGIUNCm_PwH@*Nt3qoc|2M|2~p(Tqh zot_%68>a~z72k@gF)oTVDlv9%4jZdDbrP|7uX=uo)_U}=V!zezR>bC?Y~gKM;C(?A z9c(zcUHTjXw~?fqw}4~Keh8I3+iEv>^ZJ2DQ!egPIt%%=qqNd=eyo{P6VpZ}UoJSz z8JZ6A2wYDU6tGP!A8Ti)w{EMzOKfno;1y?j=Ukr#APmbG;vc~BAPc9bkyF4P%P311 zkp~`XeHPimIt6s8Q+>+WJh-~j>bop$OLgg&$xlCur!^aKbu+Qy<0V;LRq6TEln5o;b z)2%r7t+{q#nhHxo51nv}Z#(X}ID2_z207WN7#)7teIB8$cfFJO0gZ{na`5L11PmLu z8Q%DiAwQxjKwk)fgMVu~uY1IKEZgu8T0)bwbp9S!S4+vGFq!sJYiu?|^+}wy=L6m@ z_oP0UX8nR&wB08y)0+lu4Ee3vy{ES_BEE^@m*w723uCgEHsYQ4a-HXpt=^f zAQx}%N*VPSqbE7Z1l=KSoQxB+_bj8^WAn;5!+hX-R=oM`lRtw+i_IeHLmu_zv@fVIpQQvlU8F7p6goB&Ur z6aBQ=a7*wRdZ>xlR-~PTR<^_2nRF9|hMO>+7;{a*UsW<{hU_7pDz!T%H*aVD6j<=v zC^~$;(l{^vNzIxCZD9_JQ{9A2zi!4IkiRj1M0NSjkNN{w7@_#%t6WRC5*UG_`U61+ zG}8!`sIf}SUs4*j{mReEvpz&^EjQ_%mskb7V#}Iw`h^i=P+ysO_?AVnaN&%>XP&Po z4y60@XJjLp&@*IHq-oN2jlNAak~F$BkZ6z&$^B5Fsa>U_$O|aq2)X(Bh!0+elVhUK zUv5^1`~R@_mSIt@?Z2>61|mo|f`r6?G}0&yf`D`k(nv`+2&gEHGy;Pnf^>H(-Q6%q z!_W;w>}%BZJZr7@*#G_Iecw;}%N%nIH+NihU*~!LVlQ*_$1bPSS*WoU9$c|$7t&F* zjxAvfW|_n@YedO4wVf+moMVY>WSw+ruUkGDY3nTM;ju|J8c~K%<|-(%vL(m$;?6A} zJBBuL2&D0*_sU3D+4^i5^{6gvEO_B0Ax%uFmj&G?3r~Q*&Nq*I^9}1V1=vkmreBOu zaRN{d1WT8Q$^P+X#VJt6={9fW!a(=+kBiFpG z9ng?-y+qtZvHw}iCkbOcXT8qc?H?{0%KdY2AoRPlTT!_AWh;!vF5B3i@1lc_`&hN6 zm_1t_x9YRaAoZt-fD8+T!fQeHu)sRZac|-ctVaQWu>EhCU`4*q$;^+9ew0K$||%v)1U{ z(;995V9%yU`8!I(i$`fuofIzjJif6X93r9{^5h;>c}121&`fK2LI^%$9*^98z>2m( zOeRA%f3yD^UhSE$&xIV_oE$v-1Fje&$qbi5J1Lu_O6t~|`+b%rFOKegyF5TDxxn5f za7YrcFlewicQ4A_Y9WRYdkf|3`oHZ}49Vsq&0ve7_S8#$w#U0F(z6Tjwcie)y%RA7 zCQ0N<>=?g*8czTucPu)Q4(1@}^GF`Oo0 z#);CjkC{Dipf#6>vHDZ_GefaSv7S}}$io9;@V@89>RP0IDub3wLMHRdCnL8) z6N28hd^mQI)Uc&Y$$V}WCpa^Q-bylkDBomLrq4^hBYz=vp^m^iyP^#1s8TE;r9trO zr^u$-8+o0~0E{o4i2fS|007p2Z=kATLV$)R3+mwmly?hWq1r=t_01UR89G)W=P5%D z7QKD2WVyVPit6`;)aYv#ooDRJqD%%~CGjZ^V)Z_Y>NBE?!CekC7DuVZT)zlHjzD=cw;_;y|X;GArk4o^U= zTL$jMyfZHQacV61W~B{TR6Ib6cb&^tiM1~bmOl)4Q2ZHJd6l$nE*i`%C9?Z*f7mTu zLEt1gZ4Ub2Brd3vw8pYNPDP#0N|zU%n=dH;yM36Y#%lY<--x29(5DOFH|E#J98AGK zYM}m6Di-l56@?73(tC(r#25BTprf#xMWiyB1&sCoa&G;`QFs(8-8?WA{(JEupgLt7 z${Ls>0c_*I$>id!{!qie0vyC2SGkwc;7`5RdCL9%28Zd10QW)meM#fj&hLufAOJlG zWci2u0gr@%_mM0%fQ}@B^Ax%}ONY_Ed;w08A>9Ua2o%A$?i_vq z<3R<||Fz*x-2tbjfW9HTf;JDJBfr%)iNJ5VDxrjEzp`4yD7*^Y5SQs|@k!9lWx3MK zxETL|s)$p7UMnlqU?%u=+a8zXM|l)FVIBHX-^8g<=mLf!%gmZ5NZN z!DSWG?h4^=F}Os~ON9@erIbK6lnG_~{r4-2gR@v(`_r{=@w7m{)MAI%>;pD9sA)N` zF8)`4Egb+olbKcw{z*Twb#(0@GvT0gXr zyF107nbppMae5tO_Ul}K?tFB0{aqmcIbIKt*{AOVlt_VYmZc`11+m|H<;Fyf{@&5K zZfz+_2(GgecofIi18e$kRvhr^3Rl*iqa&0(@%++sr~n*>aJ;nU<9KLI{L(?_rXQ>@ z;8`O|)WQPY*%rNCR6dX|z*q-x0P>wKN>J#O_@48hyMW!x<~riz>t4B64OG(VjX9(s zoO}(y)4q={b@|A3UHf8sQEB-oRwz3kEDnaRXFuR&68Qnqp##jzEYdxjDX7rVHNMTu zy3=b11ol%I^?^Veq!Lv4o_mJzhL3v9EbV|YXrTtx{v*?+LDAI(?z*2$jj#BgcJjT{ zs(BtBwGwZ3F{lG@Mh2ZOJ)}(n!h3vogWt9qc*5Dp;6FatFFn4_|EfV7UqIUAi(Rri zyrU+_U7c#Op=A?Yxp^8Jpkz$#YF3WqrKA(GeE7ebq9l+?{K!lE(mm}P%0!vrEdgx; zrwyh#yBh>DRw&e5VNKZuPO;Hh3L9CBhTsnOh25fZY+Ou8Ia5jix$w1+_AUi!YxLD( zt9pd??cS1Z^~PP$jW4AiJD-g&b*(k)3kTcHDq zc)qyamb1O1%Y}NNHJOuAvsxKL)W@+LRK3wYTIY4LSRiu#yH_0T(4QmnB&8|Dhs7}A zw7W#0;_+@?xxUS<*{e`^cdA;*BkddEc?l(^7z9P*1PGM9ISE(3e#oyOh|aW_ zJIS6L6cTO*oaAWt2d{|<*4te*B)tCpqTGGnMb9k)g-4gAs4zY$`C_iQI`Ik9vB+Fd zc(QCPg7-%hyvFfTtxR9Xm&6Zhw0#Y&p2yxmIfZr>11mTLMY68H98TLl3*4VL+VbQf zAZHMN(Y2)m{dtac?)~`GbIH6*ITvTD!h5m>IZ1&cVFEA826m671G+}oNjBdmB7tce zJFWTF4MOyY_aQKQ1psc&^-Da>eUpy$vChQqdkQTWrs5RwrWBd`B2=zsXiBm;9QL&6loH7lJ7kRiVar1HKhE##EV5(z})&QC^#g~%j!`U z|BUx4CK4*^`nzksPGJ>Yqz@e3U$e&})6}RVVXCuB!L}PS9VRU=&fg!(=w`pJ2sja& zL1hW9xVW0$h%dy16Lh)!Nzl_4Txz%7k2F*H8&Qc77D8{z)aB!!e(p8zO2gs#hSO9x z1cQon7HLJ77BxwUZ2fHZ*XH_$`xKgb0xQ!nZm%Hiydpnth6W;G5^mQKQ{lNy{2^S~ zm-mm*QOaj_s$$9p32}rrvsIPiH|018pXikw47{;gB41>tSU$)>?y)OKSIS0U6Ak)} zlm-un@b(yKjuj{!jVlJK>f!m2E2q)}j=+diOW+1h?2MUyY%*|`Qe(yqBZ_Dqt3upmVwY%sm{t1c+lKJ~lkE&U$OluX}p!d(PPfR?m_&3x5yiV*##uaR3!-BT^ zaX0Yq9(FtQ>oZz(g^xV>rS4|X>y`h_CYAK4G;CmM2DWD*8&RtzEKs%GR2LG z?nIPY+>SDtM03maBn9I0y;>Ln2_E(1FBPPB-g(z)VOlW|SYikEy$}Cd1Ftt%`njb) zWJFItAqbX7FeJzIAf7(JQFYmQE09p{-W0pPXxsM_T2IU3a&y1Z9`?+UAt5Vvz}=KY zs~d^RKX)o-srMK`Y0W}Y99L8H(KZRvCc?JSERxdDm-iFDAYV9gs(5v-?jN*sUwu3w zPBkIEW4%Rlc4U@*#^IVjY4jtl-AW-nhxG9Sh-3@jlN07ne|%XS`2on_CQNF>YFG60MjR$ z?&a|BWe< zJU8Vb3{o3!eX{te>A8M!;!53)&XINTuJ-*D88aRz_D?EKR~C+BW2e^-tjmxs{mh_{ zwU0b*EdGgI=b%_aNe`kfE2Es(Eyq}UJ*SI6#*1h5U2JHH71%~hOkoMn?W+fqlY33- z(4^L-Qsj_UC-_d@K@#2ud;K+qy98w3U89}Ncq@%5o|qm+UFJDfds;H%M#w|XOR|s4 zR$XzJX;X~McqZ4cTh7-x8|`^jzH8HhtC%Dug)(x9ng+X_NMjRAc}&{a)z;W&`F;5B z4J+79Y`tE|w9=j>v59lRwgR?zr|0WqCXJt@WblZ(sv66c5#38UYte`SXdl*EKUPV@ z_263s6!Z_Wz=HHL0>TF(&{Vrc!s|nk;-+1=7ulx$q+Z&S*wQUtBgHwZx^OJ7uJC!E z9aYY-6CG?&@-9T$8m45?8TXbch{fx2d0>3M`yesB?;i0@{owOduaBj{Ibov8c2C1!pJ&=eU?1S|-1SUefjsJ5)f1GWXuk`ygnzgHgok+n%yhTWdhN zcd*XAG@1c53;DusHjK4<;$0@9(_0^o3T9Lcu_iUJSfUj>H%QNHr0+)L%v`Q~gHJ|I`(0~gyI zH31UN)~#dCMQ!)3T3Q#eyUSQQpn+Yh>HgKbkOLt}dxqaHRU?Y0@VQv$#_!UP(zJQ1 z+A3-WP6kPFb~f;iF(d6?q@pwc*2jqgr50!hrP zV@nQ#Jj4Bmrqi5Q4{i@&k&j@3A5x7MiSaU-ueNwCt(W*UTcICN0!4Q#LB)`STmW%R z!}bK5B{69!TlIYjZ30YIY3}lFk~_vTF+WfsizLOn8?f7t_UAW~g%HW5rSMUgRl|0R za{^$Z7qxKxH^k*Pp9jQ4ynw2rHQzyY4^W_u8|I#(2=X9!Nny6N{U<43+)xz6C(A#) zCtQ<(u!nBC4b)ep+d2d1JpzbjIMU+D! z@pA#ewYBTHs^C95)bJ;&q0)YRP%~p?Izs6YuDkYFc#unRFQCvH$i^EuvJhL!YKE#=Xf<)GYr~liF_6k=+&d2mTEK=CK z%EjIz^bv9xd!B*4iiE-Lv&-~ok0qstS7#a}WKHX|Ma;WUY>B{Pqju7@<|tUyc__oY zTq#%776>-{V}k!AHUDbeOccjyN3x|VC=`%(xoNW|5xKHX07rsSo0q@(T%+8h5sjT7 z>NFrYI*pVhGBog74D+?5$W(On0m{6Chf29#zY69E~5k&pdt+x@G` zXlel9Vg$pWx}j!|$dU2lyA0L+Xtrd5_KY?Ra->@iVep~uL^GKX{qF9PoV!l_P1g0> zq^|l}xDS;#j&$Rx(`4`gi0NOuH)>B8#zOwcT6T ztP0q3M_Ol6-WM6jNbHBu86VB|oL;DK+hgj}gWBR(EQs~ux z%G(`C`s7*NER92fn-JiFER6!EIdg)0i9oMqcgW!n>FuxO)vpDWU`%e)A}K7K2D680 z0k-v0{gKf)8U)L?5=id9ef=%#T}+Ti0NWTnV-#2~H!DfxEsmYo|B999a;~jR?=sxS(=~O!2VL9==<-qN6g`nOJFXMy5;;73V zK8%s}U1)ot01=waf~b=Ao^i2fd)0&d!H%-!&CVk}yQwn4+Ckl|a(SMsTm_p9Io$TU z+kH7SYP3~&sp|acr#Fc63k`P5BA1QNc0=dHb_Fg!qy)~3UQ2;T>;Pob@7&*i=O$Rr z6r~%aTFN)}m80Zu`Eda`u zI9a8j5tbm<+@_*DEdlJgF4K(p}&+~qjk>ivY2c{Jk}8*3-q4#{O+ zf?`ML?s^M5Yp!JTkG4ZLte%p)ofjg5Pm-Pzk;TQuT^8RE99I#Vw+}vz{nm=1EL?>g&lv zh(*LYbcesI!rR5tVXie&X>0`3z9wcHBwo5zxb6GJaJXqAuDO==))Q|vGk-fXh*sIk zx>yf{w3-pi06~5D{)ck?OSg;n`QGNpgi!{bnMt-nK5Us}?iJz7deK2glSRSIjW0Iy zPK&JNX-~!OF`6fhw5NKdZJ0hFG+zTTF%EH%&pI(1=Lk zZCS#8r=+GHp0SYE@;9CXcyBgcCfkm%x>O5k6Fq?dtL`_NiF%^)BaMqKKV@kWGH;{{ z5Joi9FXGxypWPY=hjdV|-G;~atr~mMR4^6Qk1fxl-b&(Kr zM$VgS=>YyTvT#rv>5U_9)_)AwnT1`B4e$$PBg$h(M=q5+lS_T8YNp)#>yTnB5M~IZ z_QB%5E#H+oFRWyq@;@7iKPmOQeJ*HrrdBzZ`Q9(GIldWgLzV6}TwJavT}}-6+OGC} z7|9cJ#0t$k2iUJLc|3CZxr7xb`>hdA5_&!jsV`X;nH%N9m+eQeoGGKV+&IY1Zl=_; z{qYgQlPl3}Bc;aC;kzzVxaB5tCm}bC)FV~s5}LTueH2J8n67S!FD zgF}P9)_%FdxT?HuI?j=!{V?a2w;Y<0l!+6&5xvWE1rK)8+a)Hq7S~y=q)diN=`n&v z34lyYb;n{0aI2TMINQ*i8Ta|FYKqv4?diWnx+V7ITx$0Adv|;``Pr%FOxEcVw42bD ziTEFg1EFQ*iVj3L)x@>7#% z*K+xRF5jL7-V`XT{jK>dDEzj}ta9pK=r(?RnxbrOEnTQ{W+ilph!y31gH&>O-%L!F zk3xw5a#M-UqDu%?n^@C(2Q^iYK3ZNGc^8mJYb8}aus5&zQ(>~y_`|k(*=E$6Md#v@ zXIVi;Rk3*pBLjf67s3oBe)k6U-SZ><7edpASjzAH6y64hNVzvl9PM!}nDWu8n<|z^ zA+#NHH2en3`7R81&Q?G2Z#&OrwwZsd%4FH+9LhcKa8dj?d+^uIn_}=3IFs zTRkNlUSl=4P%WfPvky}v7B$9jDED~gqogjHO{3pg-mTf(@=P`_D0tX~h>e#ah;gpu zk}AiP2#@J{w~9sd6(GGXS5MX?1)yMkf1qH04XrDFqNuGCMe}{S`I=^_UVtBbEuCH@ zt~GPY`kdva4nPXb%@P$q3Ska<;{Gy9DPy$#jJxXYqQ5eWS7in98=!COhFI}ugT$Po zCu#L1E*tleABo(*qofhpud|hQH`85p#>w}FTD{?j-E-vO(?}JS%CyVo8es*Mrai9L z%GH-6FV1||E?+XUy0!%agQPLfo{l&v!&q5O2m)lWZ)>EVn?kUYcXY}aCYl-~j6U74 zkT4+}&1zd&CJktE;GMk59z3FGk%JN3PDbopJw92+6 z`o0rtF)Sdf2T~t8A}x^Fgm`4Zj)p_F;^ic!{rdz^c%vQw-V&h^*#Bvn->E-h+19H zDXW$W{Sluj87Sj}jw7SO=5e-iuAM&293vsOsoi91)NT6QxMDYQ zr?u+R2s%mF%&!eEViUh=n`q_64}D@mS>i2tS{5>yriLv&oWTZh51iFWzQlefhX16S zSK=tw;f4k6PHRnvQReI@I!%-yWSb(LlT~@?im$~+Ue}(%r`nsHETzFYx{qyUYdUUc zSvgpJ@!>`iKF~*@WVrHflQ*v|(`FdI^nB?W zQD=YhO#ZasX%k8Pjydivmh9J+-#-t$k=Di_{+v%g5Wz+36mdVNM%EB)>$D%-_Ra5j zRCwlFo)-d>A;F5tesgm(0hyY8CW|Z)S}c;QM42Fo zPpBQ9Ag+Nwg)hOXQ&LOBHZk?R7c%QmA2jar^X%gNeLmX@dIx!n=+aq3-w?~015d{7 zgwiQP1?ggwPBt=zp8NP^Y%}x|7D8^&3l@z~d{Uy9>Fa8oi;@sAbld^cDtD$tG8^1VORpNozOw&p$K)B0^C#QO; zvp$7$<$A0j*|GT+t{SAcixivPdSo|6NAnuMI4dqRs7_-o*~pYd-T~hba$1tVt+uGI z5hzUq1CyNUiV8QdnDCx3L|cnrK|gtLpRsKGOsMOp${+?>{)nJDJMy(aUJ z*Q00WCj<4@!KUThCtx=UE33cE;E0w~s%46QuGx^v(m#b}i_YYh7FR-BJl3b3E6jB^ zg0{noi__NZ7N1E$Bs>nD5h(kW*39 z)XV05k`q^DOWo2k{%b$;Wp{k-t4r(>h%e6rj{HkMe$WjQbgb9(5Xl#-FN1NfaMx}Q zFXxIRkP6m#72Y!7*i7TpNuPO`4_T%aS0J_#lup>xe!Uv+01H8vl$L5dv>B{(7S!rB z(>(}ia3c+<2O*-zeVN}P5C7N`qd+IG`BG*3_kxH;9v#XDvMJ4yfSle5)6aHHCX$b; zD#~<`!8$)KIPB#=M`19(w*@BP??M}opo9|x;`FqJa>Rg8NE>9w-hhKoF?dd!K}Fir%;9ycy#9#PAGzlAQ{(qC}~N-Kp!sDxuF} zBw`a@z;d=)$ByseqZV1nLU~%Nm%4w22mea8zeys0y{|`YR7c#+5X;n$cLmM;4d3x3 zU-t`;Ys>B^xQe8quJ3Y;F-#0!6mp8OTI{q96kl&A(3P8!&9eP*immC$x-XW=-`>hyng zt4IU;>hmx;_L=sH?$^nNZw?FeHGQ_6yrmpQM7)F?*;`C?HK)xD-Z@#vx;CPz=q4%4 zQkT%fZ(D0beXAF>35>-|yH;}J0{8KQgq3g=_rbDp;2k0E^qeFa*h6VJ7#$Ya1m0e3 zXXE*yVR8cN@*jU~QoQp!_2&X3;2waT-BL9I|tg?+Ja&U_5R0?(CmADLerp zkbF@4ncdwU%)ljI^PbLdiVO1m+gi5rclAwz0y>`4OPELQ_BoTvlgT$C1uYllz%S_v zr9>A`W4||ve28Pc`!{F9MBHBGi`6=ujp8aCfdll1!e~zW!&){pV?{4hS70- zU6e1^h)d%pnas@AZTjJ|n3MBOxoIlSScBvnZnM2)i&M5MOY9-%aO}3?S@u$GTO2Zx zZS6~MxbYGJ_jK5XY1g?_imD0QjX-?dYhTfk^<`2Y$5NQe4S^a&0W0t@dGs>rUzs$+ zE8y8&c!H$}$Z+{hAxolIf$yKI|18|*AZ<=RM=yTld627*DH!=GGnInYuyrGxAC%ut zn~TLM83i_dC(q`_w`ff-cBXc>R=dQEhvCoV@H$^-TI?Lg5!Y(3POAoKcx>svL$e(J zphB0tyXAAakg=sYTBbOup{v~}m#M=A_fmL&JqE~fuMF6CI(fh|xObkJy0>(sFVL;o zQ4o0NKkLqp%Y`;5K#)ccX?zc$o0|!~tun>4oR+W6L3pFKPh@$g9D3aE&QN|ghDJ$w z`0N2L!{W zGv}J-`w<&PcI^)pnHC+t+_M1quCBDS{3|4{D!i<^#gH=aIl?OO(W@}U=ll4zR6uh}!OqSTOjvV2aJBit}Kehg|)SkKVnvS|kjZYl4jJP!|cPC+ppwn*QF0NM$SW z*tdndu=V!@z*bz9sckAx6R4wTta1iwkPovwmD9Jlu}7jh3$_4{a0e{&s*~k_Acd|m*laD@B_}T zo)#A?Ro4)sRh;5#^hn|Io!@{7g@a|z&G-pd=nuu<+77ozZPPkO)a^>*=B*l-#Zz0U+UFu>x|J~lHqHI6h9G}7x4c0& zvHcvVWyCJPIf%%dtrInS1F=I4^6uwpq*0k`B-wV4YtDT38g?+?q#ZgE7VwB&(w>>7GGL6q$u9)isJJTOB_dO?y&E@2v6*JST2~E`&s1TvKF|MsB^_e1=UhZC3Drrc6PZikak}`B7pNf=`I09^}OENZI79 zBku0?UF4SDeb+wG$F@J<)ZTPM`J66>4ZoDC>fIEg--Dq|p9|O}p>G}t*1h5Ns(MLM z$MHc@Zt13UzbmXRl~Yh>RoE zIs|RZRu~5(PAYx8>~n>~wI8;!K)<)V(ZQYN8iN}ZLIg_<*>oXgY3A|@ehkLQwP^f* zEs!V#Xj$4rkd7zK`N*YEu*NquG*MuMW|(+Zy-!)ML~Z@!jJyLkMK`Xwx3N*+?INuf zu0{^^HQP8|`<#z9rnh5t&CXXYs5ad8)H4aaiK}oWaUZD>kqvH4F*J7Xsoa?hGmVJh zFD=Vng6HnOm6D)7xKvkK9hpNtzz}(#FNZEB@iJknhruc21~U8`wa|-)L3>6%?53*~ zciGjkfzwGjC^)vhl4SnJxb<-MK~=O`Vvy_ zUnezMep#h%_yTetTk?GvyMH>di&V;IQ~jRQe6N7l8s#k%#5nFHmzob_&gEkv7t(ju z!?L4ZxQCN`V1hs&_Nsr~*($Ly-GCe$SRdiWhHT_P<$+k(-$FVz;a|(PA0|GTH^OPN zK2HqIj+bt0$G6pxqK=_jtWTQw)em%gdYDvdSo`km=eQ{FK9Xc@$0d8z8pRfK^%9m#A$m%}0dg9dh7Z8bbZWq^KxdLq_UubbPDCQ5H3~ zd>vLYckXUXesX#C)s2k$?$R`Va*dnd4Km7+{?CFMF-o9YD6zpGm{WIrSR8$|QXN3I zOMN5;{14@Pl;Q%Xj2`HSg2-U;Cj&OM>h*VvSD0vLpTuVQ;0MFaTUM)2q)KEl zZWqUAWCSI#MRVD$kFa)!HKDTo^Pg*eXp3V9019QTp=POKdxuQ-Spwm2HHQBTOK~w& z$CiL5l^&hv6vYd^-t@zpCKf%uX=QA<#Ny(Te41J((fxl@okUGT22fhAqUHwqFGb!; zErAuGOSrwnvolg~#_|Xa6Y9u9qG4Iu|6K5Qt?$;aB59c)sz^%tEJ#?Zol{Dfui%Mi z|I;FAfQz+$<{YX0uLqw8+F6uwS|Z^xCOPJ{iuW|%g$$;)@h*W9p8bWh`0amPq6-B~ zzV$QZCm^Eq;_;P(((2#+vi?>s?4txN-Tf_l-T}By{f|XkL(rj<{UA3xsS+NIhPDBO z-^OOD(oOyqzye6{ASjyas>Kjupg~Xv4FsiY{p%n)zb&0L?NioQJc(fBre%kBJvKx+ z#ITDqsQnRr_)`ovjQmwQJ-qXOxGad9!3?+xKGq^t#ZzlhH>+D7{ZD~<;3;KnHG%rb z`rt2~nhp-q)hQ&z{zv-nl{o#j{{IIBRMdF+pGv^s?_Y6HA1ut*F?0ZEyfB?eq3~ZF zD~Sd#eXZQGRE{OtvNTG|@JhTI)jrie%vAp<7!+*(>MhjA|JYl^elC54plf)pfAotXDVfT zpCN<8GWYnngVlR9C-^sH#B2gC2zR^TGm^4%%@o7UbzmVB zAs-1!{n8i9^@hgADz@{Ts}s_9t#e|O_r!TO^q_>bHiGEAqVo@J4DU=d3=GH=is@gN zuPIbB3fv!l=42rclZRGa?^TNGtSC7P>rFeneOqv6JbzjbV5XTxW_C{2NyzenBvmDo z!HESDB|0PuWOII?8OZr=QJt;S)YaP~H(NqKmh&Hi-Jv|Hdi&@0`?0D3F5Pabrut6V zBjE6oTN@y>d}Lee`m;aB?vaV`hDG$dkTQsfqVR28%@{Kkm|m2R(FeW)gE~R=Os7ei( z5?uKMZ=JweH*GOhK#9_4^+q-vm(;HGVTW5CqDgv3WwhGWnR=`3w;w-UisHPES6t}u z;=OXq=Sy5-Mf=8`>csnr5!HFoqvjeUS>X{Yd(U6C*cdJoh#|#x+IKbs#10%^WW-e^ z(j#oVT&1plj{eHcuk$MVo%+He-F*I+UZ2$yvH7a6zMsO=ak^4*=;td=MtvE=PLjoL z&-GpOCpNWA6*0r5bwiIorajj7A&Ql7{%I9dt1$jP@L%C1To3SHyP_Kn_ei!<;mJMF_ z6{Cx7#EQw-CV{mUrT2pa4U<|tul2W>trJ_pY1q}HFi%$*<}JkPUdCk0DtFk=ZRU(4nvG@IgLQ4%a;wT>Nh2Nr=SN8 z!7X?By3B4nAL_%0w%k;qYkQfr-pr27Em7Lx8SQ)2rHQVtWt+(MUS#~`YqIzHz{GnN zjiW2=`{C+JhJ8GoCdYPP+LvbU8e-`PXwV)f;QR`x^r!pb0Oz*S7pn z4AekD3aSk2)pFICuPrYT)o)Z>JxQ`R4OL@$aoJ?uK@$?A5bIUUt5@w-^AEUwzoA>GsDzo=}wmW!n4(Gh!L z^w=(@ZvRW=vr3NI=>2qxYFHKJlFE{iwHs=dn0piVdU$kSEc+}+i6Jp*_CyTLw1nQc zKO6pV^2OZH41SNR=hjAoK?NIEmS4N|1)0^$#0IOPN*dymeh=ApOzERRAlZsg@<(vv zL0(YY4-VP9$7dVqQ@>4Qbsmj09-6FBFdHUhY@_D1IrV;F1=8kRS#ZsT&HG#_$j6mU zZbPs28IIbs?;0+)?|v?m>+p6tx9615Kq3q4PZH`Eql|BaJ-=$cM_BuQ^t;yNS8Y<* z(0Drbyn)*x(1Is`oqClJFIS3H^C|hFA;2s(EtJ^I7eoN-^NEy`+0o#M0*Zyh))NH^ zHnMPpGT^tm^wD!zU!=4!LB*^#C&U6DnJkRM4vL}PoA5ddx_KhI489BK%TFroAH2@# zkGKKlg(q$f8F`~k+ha;F!{W0D0Xc6QrHv(8J{D<{bn%O!1;=gO1Z}bVda#~&z5+L! z)Juro3+-igiqCy`xuicnR@53WVZDk^;P?b&-PiW!H7uX7#cFb)yun%VNv4f>wDQgK zxZDaQX`J}HpPp16#bJx>l+#==z$h=9F4uoTKU|^q?;nqa;kk=?V)uSOj{gQNJ6AEX zF3{kzA4%Bs+Rdn+-L&tBo$IIJ=OA;t=v_Jq+}js8I!f=iSU5)tB9Q*iep~m9 zxGPb?ROr$7PMweU@D#0Neb;o`Lsho1t$j9j>-N=r=lAB$qV3tM4?l4v0|{-IK!T|$ ze>_>x23Wn$z3noT+oz*z8)7_branB}>y4+d>}u6V1MVNJZa4zKO94;2i!sG>+F-kz z^n9zlh**~)=5mGlkbUEbBYTu@$Iv0|m0 zU!*8Uo~n|NA(5&}Do=c46QeJu&Lhe2zQl%nYm48)0(qcC`rtw24SE9Q6JC$DM~PO* z$gU*@zP0XHN_19z&Uz~E_ibsC&~(7a`8th-wa+_)>)uqS5EN!sCrQnPZT%$ZaYNj= z5UX7U19+_LI)f2t(G^~WmMzTew}$O*99mrRhWijvd=iFrQELg>&jmsTl0spDqir^K zJvVt*)dx&wCRR4X&$}-10U;u+Ey+a@;ejk2%dPT5zwm>POJ)*OUZ%RQYQ8=0ZADCk{HMR)tHAA@yrtaAkMry|0e?XP8&b!G#l>U>+x8*dO7 zq~auH;kaGmziG;Ww5nJpDD#hHk#%ckA`~8=BQ^svoKc}grl44}{U9EKm zy?ywIcfDY1C)9j)+sg(V0`q(;xB-Y*TAm*_uY)J?Mf7~yug)Pn5Qdg>*5Z-z8(?z~-XUv6Je9#COS|0O zf-lo4^~bfi6FPod5>|M!epC~>%8;|Pl_;~}_4ysUz75Pl=uMk|0e?gPJl@j`ON}f2 z9g7WK&Ix0g8-cMU5h#A0I7Q}Tiftdc5jXjtxs&T?wa+~JW8a6+#Icc?F5)q@Z0Sc} zreR1we4{}K)ehcp#`P~y@NZP{E)K&WKfqXk-nSo6HF4aQs;>`57@|B;};gO zjkP(CM(F2&Q)rQO1Vb^#HJk9*1^|F4$<=Y@>^xnX*k>y78!z#DV5>VonrCh>5|liA z;7jezWm~g!aR`bgUpPV=_wv-)$an)!AK}?;>DK@?mRtSR)#JiJbxwR6Nf&9b>|2~X zK8Q-qr|?`PaLMuN(wyvq-cO7vu8V^$kxRz4GR>3ft2o|v9vl5oiopxevClojDH|t=Eo-kj`>EU^T-{TK zm#xyg<=$TwRdjm%^22?enajcYXYWET56v%#fj_ZYXQX6{MdSKpMv1t6u>F#kFmhkN z+Yfv(&&O zL?dKtv+Y`1nXHvP{#i-ei|E2vh1<6#UTuASw~4ozBep&7y9eKH-%&oFgP%8_qhsEy zst#tDCv~hLkd%f`kgz3M>n^5m);_rr(hrC9ih9%R!5GW@RG}|=i}h%n1hez86k3Ku zeW}r_n`f1%O^IP4O#9ib!6DEHx3oCkT`LNgfcS>_z5A!Cr;*$UnL`h%Q24Gc2ZP_bijo#WI)!E9qENf}h(+%jIcoELA=rYo0(_+%Hzq zEzu9`(9w~qlM`0Pf^_pGrvrYI=>(CSbp`EgvP3|p1NTEJIXAhCyC~w)tEVc=AG?Nh z0hp5>jezC#WXkchdFCxFCMwMQzEj{|x{kAD5KIBinNED= zW*K<#KzyJj!Eo{OFSP-s`iFlY$>P%}Tuc31VC&Z#&Dw)9=)RTeAon{{)$m_c0lIm` zN$@wt9S8gCLx3$lzn=|w8^D@z?%yH;C~HE+0tXe4`ROSEp?nV^2T7nfE8<}X%culTDECqio{~NG&_O8kr=QT|WNo*SPQGcR zJ$_GAlG|;Wkz?_Hi=XGCx&)AC>pNk*f_?=+wdy3{vNaow3 zZ!rBB_ip@6~9xK2^cV5T73t=&@XGk;f=~~9TKH|Vs zRuhSPe{EQ*<+FkETurGU6{0ngdomgwEYG>$pylZBvi)WP<%SPCl9CxBuTBOnZ>*ht z3l2#%x2XGMZmJ;$S8svPa`gl~7)5#XUh&hx&&+OH84>yAi(x*TRW>T5^+4$+MWp>R zhh8%R2a(Ix;Lu!rvgzG!2$>_NJn^HwBK*YVS{WOTQqfJ)$o}!KFn8@a_j5X9)v@MW zV4R^cS@W6zCPT%+2Tad8_Mf%7`@V`f{z96;$mJvW5qiA$`#!o;fYpWeou#T@DeL%6JqYPyboH(ynm$$sTJgL{=VV9{HXTjvi@0G~vmoO^PfQ)3UY>ZnIdtUp6WY zn3#E#Mr&^gbIHvu$5%$ndqb>z?0jcravP<@*jH(?>5NMalI=6+lN{|H*7UVYwcJ@V zi##g>H+K3uoo;@|2)G$$Dl!SPd%g{I^eQWkllxhj>eAeM@MH2 zK_n>tsy~WTT<4KOnQG8+^;F$iLVJgNKIA_}fYxMj40>R`q=ZM-gp!S~?2$>xxu&{F zbnjUFZ9)96X7zYah^xnoYs+2xR61Y>7AgP$>XcSySgJ6rovDetLXe9;=%GISHrlbL zN9Ad*YC(L7?Hs4Kb3og1R=l$Y|Ha8nE~zBzG42^~mt0bQ$$k1O<7w z#x(0uMm9UEhh0@HPZFAmG(wpS!2^~~Q!#PHHn)5Y4zcC5=v4!ca$oi4bz`5WYK4Vj z1+u;v^K?Z!+YMZcsoePAgI+Nd>D>Mfc;CFI&UTfF_46zOl4%@ewo1brkI3E87aE!Siqn-I9k7s?f=M{2m1}Fc3nARFoW1A-Dd;$^=2TC+qPR|0aOxjd&cd7 zAMDo?H%}^FnPNfqVmGx5pp{K{x?dw=vbej#OQy9RQmje~uh93T`$vT)f$md>3s=;-k6|f?iQF>Nq z8EvD{?K_76!#zmpyPWnE&Bf8ALUK=i250ihK-wJDVnq>{`<*kbuSv=ibVJ5Tn8U54bKBadwGZ$8l{&>4Ba^?0(+ z>bDi#92}W%ydB?5_gp~Hd64WJj~7$?RBSED#>3TS&c@b#)<}vu&1zXL3_xDdF>6#b zyJ<7oIXR7|A$kC6%%<*r>G-6Wx3}u8i=b@9mj%X*5Sl z=I2xOzPX<9bprQ^XT#QVB3x+n#l;Z6EPQ{#&of3?sKBhl73By!V4BqYs>-zH>E zG%n`cKPtN*d`E%NTdkg+!Rpy6#)iTFV(%@ZqKw*qVZ{Ii=^814p?e5vq)S1%L12&; z=>`Gmp@vjSK)Sm@MTQ1JI+YPdS{eq1d2i!6?|Z(y=lOire=R=D8s?6@_qDHmbYq><0rmJKfIe5wyDJA+3G|uRC1Qp;ABhYVK?mR@T%W#>?&AEM!fTC zX8kEP{IH>mJjr#K^(}++BPapJzIU&zyF?KxdFSY5lB=AnYngi@8TtW03|N-F_s8-F z8u8+dMZ~a<^-gFPAPfIhDKJ*EoBCxBWlv60*LqMmBw@;dCr-U@9S%s%@>=jwuz}+6pYrNH`Q|LfwX#h z?P$KCk)hBiWbhmhIsGhYf6Q1U;Z0z$Z$&EdAULvLRL(?lBk?m!gje7hepK07!&UX( zl$)tmui!tSu*2X}TXw)(Dg zpM24*H$yk@*aIlpy)R9P8Qz|~^tZ+WUL$xOxRnW&w5!=PlSD3>CU@mGL!^uK>Blk8 zVM||IYoq}=g+MIKFx}RD`uxn6O!DGXe_ABRY1PX<8b*48=DwAT*Q%E!(9Fy299DVL zNwsMujfbQF&**YYQK(*{DgAa`30JHIgKpC)0ts~DrQb`NBswx#9!L!}m&~Xhh{5&; zBuRb4c6~|Xr)Dy4&z;%4A@bqVJO+eLv&L?px@V+5bb2ycQMVLY0}e~kF`E*s6y4coP`T9fMQos0{szAjc1#4x%9c+Ws_*+PG zykwpeuhofB;qzld@87LV_r?p;4W_U5d@az?0Q>iJpv+Zk+!EledI<+iqsPYpoERPF zb&2{-oo;bi3v|-r=;e<33@@O#h3XiBp8{yCft~2tvi=gRqk+oy1%zQnrzP-$*Y(nC z!@ZqI5>WHWQE`6y&yQ)*S?e`{$FkA?^5s2ugqGNl7JNKB*kt!p?4%~?nT_#CKubS0 zWe-qU5{6(ymJH|6Sz89U*xRfPjSyUF8$t+40XAO-yM(UG6UFo7${5(x&RAH$W$P^~ z2j47XigkRmh5NLh70Jd@c;7rqyKBVzQcv;etqHA(>6J46HIrbopXz}s5f}R08z}}d zsJ!8PTLAQ%cpAvQHofZZ%=@uw+@0Iw)3NK_b9uVf$s zjdAL4zEE$zC%ggy{ND~1=q>A`LIxuXhwDdI`grdmH4$1Qxi1%^mss3#vOBVy21QeE4_Yw*!bt(!fHMqXG{yLjh3CL zvCcE9-%*KCQW?3aTNM{E$zRyH?gJ!jBo6w@Jv<2-Z?g5X088FM7^tvau8+mEnY0N5 z)q^Hp7d-#znL4=&yxa0L@b=gK_u;>$sK}vPZ)R467;99*s+h|WG|f-!B(|LgV>CY2 zYVU7tgfYye?Ibu2dCk>n0`-Uum}bt$W)clN(_SN0w<@(baT{H`U9}#mUnEoiWJSM{ z9L2~9e~D}ICnBF7o)Am}jCGh+jc2tfJLV)(mYmzfEN!UV&F?;dS4)}CI*mC2uy&?v z{h!Af+Z05hW*$rG6UU{vFVbU~tp6H?H$IM;@ zlLIA0r@HX~P(1|_XoOPss$DTgm*$EWU!?#xMn2ts%XOxLn~PVko-$-5f62H7yUTkw z|J|78n-6vXK$;P1s=K<<86z(!ca~>{1fSmcEyYOp`~SOd@PrH`=<2$mlT?&{3UskI z+N3HV)ILNyS^6KJ69LRzA-}yQp4>ztV{!%8@nE&b{*F%;c>}Zh0oWh1!ok&d7Zxx| zU)~eFN}8qI0+YUTTl#D7t-GiNu`pgiSM0ru{!vDQ+IkPOHH7JfL?l1r0Ac(mEEn7q9_=JtAGIj{ZYI?aHtr7m-Q@p+3jeO8YUO( z`y^&5!MLkeIoTly4aBA5=Q#u9a(HhKu3BCFy!d3OKfFT<$`uM3|LO`fuCNsXXtyNl zUDzs_Qzbm1)zVo~S{}4j1Cq$O-RUvuk~ZT|jtklJ#FnzF&T4US8UJzAlKWnG&!Huk zC-E*MI6RFgIz}rCHWPTcZZ?E$ml%vxP%#2i;_q;)HM)$C){AD9cwfGTq)%xAyP=AZudI-Xo7l7tLcbs(aNV?Duy6IK{o3mOiQ1Z?U$L=35p-wRet)H&-IZS^Tx*z*&7j ze3NHE&8MP2gJ<@4<1bDoL3bB^5Npmnj`=5Ryw7mlp=h~+o0`$y`{Mgqd_h1HpYG6m z>^-hMYrd{v3rjsvHOZUQEfzfKH)|98qf>_|eJ{5Gh8Ky}oU3YU8=j3)V-YI1uXTa; zkLH4QrpMX3j^bqeA30ZzNhv+IbkK*fLL%%Ixd2)8vT>K;r2yf^0u|4KRqun6YSB3I zn0mCiQA)8%5eW!aZv^nkS8qk<#7))#I1va7>%IW)L|GUaj}(^!a&9$#P4S<8IyL>jziT|Wzv zvc>1(1}X2Tvt-fo{Wc5+-9G*H{Ec!GrMf@XEtra!3aB4=3<FTcmAJV-2u01Nyal}$3XlWC$&m_y%97BPShX}>o?dF{QSqE$+u)JrULHM9~T-{8BJY-awC=N z+ObL)UUeZBY&W)Y>{-%trZ*K6IfJ1HJCmv^P}9vwo{n+Ic(O}*yh&E_&hRvSH73@d z$}CHbhw9j0*g{-GRjal~s*@r`%09HMaqNq8NcnuPj_`bL5b^?C_%_3c_R+a zmEEx~l_EM@!|af&pB*l-Y1CBOh>wcmYg>OfYpSlsmv8~h&~9{XqIm!t7_(yJe|5uA z9bI)4Sl1{nBU1iw#VdPSw(Yp%$RDAbRd!s2)!PD7(Nq#P=q)7Ae5c~6<}_iq(68cK z*i)+|sKvZDY3tR20TG_v)J>0f>rcl~tEw_*P|4^ouU7!>qf#4JpZ$=2(c259MiBiy zF(B#(15~_p=E(7{0Z|fZjX#h+Aw{Gm1;CGh7^J|nxR*blC28Rw^NUr9a@w{Z;i-7y z0U7zw`^Xv~1$noPbde-O#egCQd>JtQxrg1s#>$H6<(ixUF$mz3(uuKhl6(8QQF$1Z zH?cM>llxlnbe}4K>G?;4#nmy)nd+>#*0rHa^0s$tAGJ(7j^6RjPn0{}$`c|(9r^z< z$xsZTSBDM@iV)e zcO+%SN|J3}t&(lj23k2q@jBzY!_Nj$C5W4inlMr7hij36%w|Vq$YkDZ0IjuU_@$&Y zLpb(}kR4PRfbr7zGvRFjU>?96s<|O;6piL(Kcp?qpt`@(UyxkhQb)+@K?IXrm5v#B zWBWzb+e~TxL`ofgqXaeS2`0eq0U6e3L<dI$S=@PEE{A0i zkb=skK~gd9qoU2E+)a^GazAkqi#L_C z8wI(qsJdfG3avSVsm3^5Vyj-wM2+cBae5epc)3+{%T0-D`9320G$F zdaC3goe+UG1iJ!4HpC)B=UzD>@U^fm?_blIIO8`ZdMUH>(XESob*>Mgmp2EA3h*gq zAJsfOa7w4Itpc{xO<{y(CBQ~xa7-`|x~F!^t{~g>;);aj*X#F{9;EpYM(!oFct2b2 zYq0Z^6P_O9VRHd|Sso`0`Zv2?SKMrG?{Df{4b{F)+B@De-KppJ^%qo3=I7p2{;l$8{NHnaI5DyGM5*Pltc z&f4W$J#ONe({7`Xn2tWL5Sspe!Cf-QpBtq`0F$#?RHAP#DNcL5+f#Rh9ZrAl*OC5p ziy6h|I+Ng?*L=`C3euOxtTeazo99~$d2?^>WTJ#_lVA76yM!q5O~K>=upwPdEvQE` z$w4D+-~fGiYYN-3Cc69baFFT{fGSc(`F<$}*;Iw3D?*)~6(#}b-atcv;Zq=YiEq3w zA4B(>BD&e_<(Dlx%Q>l^;nB4UQ7v`}`Qc2OfVnX113;Jko>LxT(CIb^fFjXvZ#wHh z4`-rIp4g?}l|15BqbKp*D`(v`ieXA(iH=Ss53dPsbsV$#rRu|>5Rx#q*|_^1o7>qP zm-It?PwHdYU|H2J@VB1Hp-!)UaNOB8b2>P4Tfp14CSt**VL>}{a@30l0S<36@pfP& zyzf}mwTMZnyJRA{FW1Z{7aL*sCI7Qo%}Llj%a0);bSY4KF`=esZm-P;79ke3$1ep$JKFI1BC@E!~Ws|XKT&{|tvh>{MO1in+v{$ue zjy*H+AFJkWp}oeb*C8I?#eNh^6LDmoYH20;4(78`P^6FcWsKhViKBV(bYXR8ka-%g zyS1a>P47iiRfA)5`*6Lo)d#t9ts!FF>`zzjp|ZxlW~|K}(g$_@DoFgS#X;p_BS1|# z;XCxBoW%sp2F7B)?Eek5kQ41;|pyZS>H1i8pavOe<1hWo(>rwwP++y;%RB|V+mPEPN9>_5_ zpbQU;WJjnVR2?5I{DWPmRJ=CO!hY~Xs)}CpvCwA7K~mTmfAu*PHhEiisqk}ZW;GM2 zk4GLTs(p`GshWb1Yv`P>$ZXr$Wic6ocQDs=dtM>QZ}sw{uJT5W-di(W2@M-9Y|8|D zn6K!iQt^~Z!Jm;=*Kjsxk5LS@YoZbl?z#y}1ZUg6^&nu8={~X@ zl}If#Qm{Dh=oRN7+dqZ;8av3HyNc-_MA8i`Cf3!B0@RQXIm+;gyL8K_FuCa`sEa2- zgjp6|gfR!)@J^Kjw6%^+lxqtvwf7xF&gKpimJw85oB*xJDLu(JDxC0=?%1w%lqmX9 z=={?~^#@mst8?v)k(9#oMJ~9j(yM^~(9u7{Q9-oky%-3o!zGlOEw`qP9H1Zt_uq-e zCu3HC#2XcWN!3 z`g@7voaBF5ZH8|d^&M(@Bet=5os! z=?=p~vY^dWiBql(?|UrMiy<-j$~cn&nQK+o9S$9AplE`-ys9$q{M^l6eiMc?XOATIj0FHbLmb9|@%r7}x;a2yB~Nz7 zXk_y8P(Q%3oQg{Yp`{oCttif~Gx%G~@aXtvB5X>~EJljc%0?!(VdB^4+=2W`IjAlZ z^&w;2T-cy?R=^Y+aLrpg7^xQBy&kl`iQmimu-9Uk6Z)%PQ=;UjXZaChYJMErnsuBO z?@Q3MPg;}}U-w3`CXA|wcU1{eS<$;1sub#K_pZd2e5P(Jan$Ef`0|?l!tZM-uF?IK!X&Ii{G_t*dtX1S>0PtbMRu{E#e z&3kLDK)vJzDu-BGlmut+XopLtKo@F(D61#2?q2@PO{7ioSe$WGs0!lF!W#h*Wk~#W z`n$#Z^_hx1iN$K=+N`L)`b9!4I8nYTLsiMv!I|t_otaI*A1i^SgGO_rWH5 z8XK&b+S^~33|GErHH5iW_;Id_9ElNr&Arn8XoLLOL9qT&@eL8?B-{v0(zD8G`7y;w z*e0{DCO9cc4pJoFil0YWXex;vpeBb4H`y?jVg{K+rQm{bKeLV&`{tGt-dr77uLpCj zxy&~Crr3HB^5R5h&y?&XR4C7N6}%dy%(s)H0XAMOk1)w|=UAqYWGWGc8T{+r`V=2& zip={d*#{!V1RJ~!OH;rFOcWSJcylVfJv|e(^O1_-uUi8?a&*cN{owt2<$r=D1qjyh zOS8~UzxS!pN~($6WwnXaC%^22@$s*p^{{0lzpuqZE3W+PI-wrmw$bg65R-3U3aha0 zzTT%w&d7^9MCHAU=J0EbZf+j;A2QH`=CFr#(YP!Y8H!2b#@V74jq?!-#a!$ccM)jG z(I@%@r@TZ|yvh!gJsq)(lcP4TLhA8kkaCde=ixuxrHf>g(?9-lXFy2=4IdRVJj9xC z1ot$Jj0s9a){NXi32$C7Zzb(XY&Xlge`W+ZI;khYratkYpKjorxUe0&KUx)}LPjw? zg6Nyb%+dUHxZX(vN(VjL6flONgYJPZYSeQJ@CQVd1eig6uY|s{sZklgj~K(cRkPZaggFqgGMdTX9`z);F4 zLfv3YXxsHEkk{=}hNEUoo`@O6FEN4A3uwjg|JoS0fS1-|Un?CI@JbY>9sK%eGC{HR z0r1ALKmpan>rC!a=;P{U)u3u)h=$v?+XXQd-%p17KkEd{*b%6gv>7)9Ope=Ja(V*H z*IW z#;!d2mx@C_1lS}LYOQ7Nk{oJ9Wf;D#AV3qPZ%+UlW1QP5LG|KYI??$jE!<{j>6xAY zlJtOm3X2LLY#$&E)%$sX!!ONHQH?13*)cwziC^$3q#XPRY@I5E?E0z_FEgk(Pv7{?xzAR}Y48tbzXlz2bi&oDGEto( ze@>IwR)b-LW(A)s{7Hq(l56_8i3(yp^~HqrdwgYIXhA-WG8@g66W1#uv&7GoITM=| zSAiDnj0y&}_zMp}nFcJJV;#Sq!&(r>0&tE+G%mw~$nT=#QmH zO+NyzxuNUzuad1K{2IKLvSOPY=gxKDhEvyn-%$KSbX*SN8b2d+E_Y|G`?T6l#vv(F zTq7os7Y6*??+AFiH+6mY@g9I2Uk&p9tEFosegv5gP0f}A9^gm^e2!_3$X<}sb}WAH zq34wFx}lZwV%pTsqXY)`c+UDBpj?C> z*`)t%whS@;J%W#e8ow`WBm_S94vjK?Q32+7GxioR6FMh`H-Krm_jHcs-~3Yo|1++s z)A@a25FB_S+Di7n9L_c^P8=9`Vwd6n4C8-R<2SVtjye};X6E6t`O41# zHud1vuG;gr&B=Rk~!qobN5KxHtTM_5u7IJ9Y=1`woOt^7yt5U#YH{g~M^ z_d%aapsG6$Mq4pG61Hv1%9%?qybFqPA6?xerDg=}7-N@}Y#nDCu{>3QRi|R6dy58} z?agTm+Dl{Du=$D6^^=DuE`3c$2z;4YiL>=~i5ZIy?~`vQ`_ot7EsGa+oM)d;=S&ox z2$wJe;Ojh%BEoyh<7v|defBaDb&b;P=-oVq!mdk1FbY$GS?Hsx?mk1Xm|aW*1pS_a&clym?DZ-m&{xo`x=K1mQ0dl4 z#a0Ubnzk*VP)6s|=)J(uYVU;6X(Ns8Os{vFz%I`^m`6GO+s|+DxU->)^Q}H#=EP6? zvUiroxSX>WFmjieMTTk@XY6;s;zQi6t?aS6g{Orp%~49E1e>Kd$}G~jLj}~c$11&> zuHMx*VwB4g%F<=9wZ$@a<09YhNhZO_8r6u`u$8W z8|kvwEV*$!8S-+4uqSsfYy8-0<{-XmkIiMN^PN3gBCNilP!U|PQb3QLph?d;8$8|p zI`*Yl{L(UGqUS=Zq40RpeLg|9DuH&^)8%xJ%WG+CO?Ey(4x-_@bV%{f+KaoH_879f z)a5`##|vV3IQQ%G&`LWWRooc%SI4@&Hrsifr}m`LT(hr<3V%rfVM!3jM#y8;!WWJ`ySozGYLmSSi-z2*Z z7rh$!muA-^G4vbwNe2bG-NGW5KdXzfmubmqlW2gcC$(`a1S<$@=_908*K3JC`UH$i zAmElN2#&Y)wP57JsgxzD0f`1_JG{luhp%5bGK-4HqJOWB?!cx2IqK;HwNGv(zFDWN9#HdyKPkr>547IpWK@9K)HbwkJ z4wzJYW)CjVPx`trS|eVq$n9{ZfTp}ce&;E!mOL-$tcjC)pZJWwTAUc%d`Icdp5xe@ zd?pJRnexPBS5Yvg{VbB*w#t8AYV%R~)}us%Ib!=@ z%jDRI?LwS#^@O>mw1o+Nf~9$F>+L}HbQnF4ena6=K}+s%H4&)#Vn$xZVWrAFxHVBt z=@yBK57~g|`tjKFe|z9QUV|wlM#ZHQ&_lHJC*p65H_%BVQ}B_IBC)v+cUo~qRyr^( z`P}>FcZ^1n;WxvKH($-vZrf%F4oBp^F}?he-i>_)_m1!RB^_JGMalWo(eztqb)TrTgRv*(i?91iAIZ*z@hYpd0zlYzTxR7`OJK3o(=DjF`pi$Z6TOJEJ)?; zoQV$@S(4VG388mhmfm&t6b5yy+f15T&4u0~ZL^Z}`TvZ}MV*C8@*IaH#_NDe#BZN{ z{1udTJR=QUfY$1&QG#`iF(K^KH`t~oH18MF@lv`?s@&?%MPdAM2|4^*6M631;42u( z+#P698*9k=3&||>`U9y7LO2$>{bHMe-mA?I--U?6co7)DXs~ThQRR*N<}9^vQw2x^ z>biTDhFx;=M(`s-obAK?k@mrNdwH|VUFFslc%8Sp0$o%Ng(e`zuZtCZ(Ty%v`b3nj zZLcLzJfR$tv@%zP{^_Denj^QCtFYxuNIFFUwT#{_ z*>n{IS*DKSP_grdmXo^hk4ZyHOiA**Hd?!(*t6LPGt+sB^mG|Rnap^)T)%BRIbJ}0 z?8Z2`(~Dt-ySi?RLH?M3+=(A1R1w4@K3h{ew&45DXZF&OLsS)HP*s=p3$mVPl;Q-> z7r#R)m#a)s+p?mQy)%4^J}2$|xwU>}==sjC5@ZZ9NwDJP=rAHg%yU@MaG!*q=!x&b z?P}j5)o*W$mt1V=e!8en#iIkwX4=|8}LFXDGiY@Em_K1_dt>v4*8RZMChL4{?49`CeBus30d%(`HuKCwRqzIpN zH8Nj|j&S5_(P*UjAmdy>T;Kq9bgVy8_r{HKcMIAI@B7nOm}0D5h70wwH5`@~NY?BY zI1byN?`}6Uz1W){T;7{M&P|oQN^To%)S$C7qhnv`KE~Eqbu<=wRnK_th3?mBN?2;a z1m4=`kvt)L=!AbJXJNu=B&ct)X(P9i7twXYdGc3~e@E_C+iB3`@r!`kUs1=`(d$;G zAHq{;;VOvU1Juty+rH)j&JZ)G{?;=gNx7aPWai^7un9DoUVODDvarBh<-UHY2S)Ki z=FaWIk{d=#_@4ab36_LPCJjHnHQiCle^KOkTevvG?9}h$yU5rpN166rlvWpQnAdCZ zr;c>}dka1D+U+ymJ2@5#{Kg7C^aHPL}31-f{FKZ>EAVk0ZiP0lPc*ZPauPWto z?-CZEax9>-W&V?R!HUIoO~>MAuS9QbmV8N}vnhF;&0L9N_a3@9XvU!15`Up>>(TjD zCDzutwCZqoeEc~hh*FxtqvouNAzuj=G*(_u=#JL5x+ zPkt&hf=uS`!kzz|tr>A2wAGZJ9PFyr2^5)IOoXWfP;X!*u#tU|TaREJHc9DoZ)>*~ zx>h|94J{|Q)mYy&fxi4Fd4#$;xJoO{$j;?G3_(&(o?iAlo2i?KF9iE~f1YK>7gCr4liGUVe-N*HZX4y*;4y)^l=7NYIfnf58&sR15Xzqm z+qa*Q_H0`gzh`kat%r+fbwN3E9`b9flxTcOi`QZGoWeb6zwbOE>RFK&$N@$kRT8iK z0Wk}qBr}8zhyp%5FKZBG5?zfQVJu}cecYJ%afjG(R^2=9bv@ksEBuZ8VTYU&@$wP^ z&UAJ^Y{H>V(Aq?fKQCC4c?~X*;FZy2-E;brd`ug6v+J{gFSb4fe`T$$h_Ioau);0V zV5Zfbr{U1pu+Xi%e0QwD0*riocJ_DcH>7^JtAc2vym$WZ@cftPGgi4aF#Ac4dFYS4 z@=II?Bex0W{?XF@FR&-JSJ3yof3EnTM*0X+1+8_T_Rl zm-#%MsP%uxu$l*g{zZsO~d!O{% zP>xd_H2LK9%?|-v9}tMWD$jPBX>(N}ktZTdpv;#KnTh^xeQ%s{E|3JN*44|dE1s>) z07vKSNr&4;zFrkh?MurD<1uh(ajS)Koa8%o1jEyn)@hT#O3elR>dH>41^`Y1}-u1d1IDU zOlrAja8R-r^J(Jz6B#ARo}pQCS|U4rQllc%NjEdNb)jA)pK3lT$Ba$w zu=^2>hk^?ic{I6bWw6F+ALd2J!}t!F34Ni>Akkgnk6t@YL;7WlgMqG&j@yb_te|FW zC0fvIYmY7>3X!8IWSm)m(NsQ+jBM;+meW^cLx+|6yc% z$TEe+yx4=~Lnk%Z@7^9LY5J5O;x;gQz6j)OUs&wTWA??`WsKYA8~wwyTX#JYcJ{#3 zPjO3bP|SodI>)IaDtEH6t3}7QC+(4d32LnX*rzr*R$^Lz&tY%~MKVl4TTzQEduPYO zmBVwVb>=ZQ{hs9Ugn#?R*a|@VdP!t0P7D8pdfzM9KiEGs806>s!op{TA{Dhpl`zVA zP~<584Vfhsx8XCIC(ysnGPv0ejv?aHE;fZ=O4FBzH~qCyL!P;9qsPnh;{3Bp9wK7_ z@;eHB&;rHoZ51K4mcIUFy#&7||ANM()n_Cy0eEfwF>ad-{j7Y@_ z7h*P#K(LYecT!BONUyw*SkSQD8^%zi{; zaj)k~?ac9MKe;Y@WXX+@sv8*R(PO?&SNp&~W);K^e!6sJ)1t-w<8DJG2=CFIK z{11bqhGGLH4>SETwr4v6c?&PEZ=z#eOtt+kd;J;jQSo~yWsZY*#IqAjA`kcZmfC-; zwVwWptd5YB4)(a3=c>-uDb^v&s)mA*ci>u~`yXx=D7qPCH=|z=ljI=bBb4Fg_ z>P+pAzA=S#(VBC0 zmm4b+D>)s<=v#{4?Nokztff2|(9bdt@1+a4j!Od}XKvWd+E-EPJU3XnYM9#Z7^UIE zI6oEAwy-U){*@`y{ZU0?%DM4QrtK&uH&n7dH!rSVp`eIJh51=sE6Bi_B*V?b(tU3J zMnDQ|0VQ31fh@+|O>p4L_u!r<*hF7$i4R+L-SvFXv~i`ai*Zkfp?IZ^EH?M@c$%XS zTgs=OYV1eG3_A?XXTzp#>w`G(ySXdyMEU<(|Do65hondndwTecu1b_-kW^s61-~7x0|r+lkyMb~Xu_Elhc>9u=WO zUVvGY;r@}o43l;ke`(NdUvAG?IqOgnC$UmNOP===KmBYm)I2BsH=;)&o|J06Q;7vJ zp5;Cn&sCjV5-etM>I>tOm>2R7u==njKFOno%{DegX2n*!!>TtaQ#dM<-c`p4vkPQX zr~EvgR7+X>MHY;5{LGE`IV=6K_sB>Ig&LB--JDGc!ddx2CIQ-TQBagxbEwG#@|M{< zRcfSKOLqx8?Q z9gxRkq2jLrM&4bN6I1*nmefG5r6DI$U5SCI@@H=i`zuc;p*Iao--UjNyyvygBdBxp zK@0-O#)l2j0(T%_Yqt>F)o(%rCE~%)w9Sq&f<}SC>tdCYDuEVuQpMSCx+>`=luCFE z=w?Fs9Dpcjm~(h%fS48pP0r-NH91{2lA5v8E~9N*TwPxiM(6qMv-msdGvTdtyHx+q zn|d^_p+Ax+|22D?;&8Em+SwE!>#-JRSsrt0o}07mZ8j?r3BJEiOD3N9ko2&K9(E2n zO`zTMTYnk6Gsv6)9#DxKMt%b$pXCPTh^ zK5|Ag8wG2YE0=d`s~~W50NuNG$ia5&kMG69`U*+_AE)e8S+^w6x-U{3yPOm8vl-*0 zXEm}T=Wz4J9;`3k7C5jq$Oe3bl{n~tu%w3u0{7GA#41@K+TZflF29RSR&u4h6w27e zpYDt9b+z<_)y>T^WcKL#43ITYMVvh=bNry6XRCoYPG+%23>d67HR)?9CP4YVwE&h$ zLucg!EKf<#G@lmshwY?hZIh%~|ENVM@xbhAu~yw*k_L6a`7?yW-WnRif#8(;_tj)1 zs)sj#$v%iO*H{Dz9}j&DM!tJSGE*a}(RJ2KJD9q~nVCU9 zE!fQFOEcamlJ6KB*xQ>hS;P#AJhIS;gK~p`h|14!jf^0sKt@@UkH_jX8gxMTxFoPP zC>o9Qs|}bd@|1<}PSvxs-S{(g>PpuJqM%L&ebfV@=43){-_wQQK;oi)vTW;P3`S!{ zgb~!|$Q-~0MtU5>w#Z&ghMZ3V{xQ^9+~hxHY>%%E{Dce8h%~#DKX;HuRNYH$imDnR zo}v(kUpkV9nC2ggzxCc(uMA#fg}c@PBD?y{y~1A9{viRE)b^2_?!mRZYP4 zzZ>x>{&g@jyf!UH??NL9RzQ+K0*tGoDQ$OW_hi&-+7G^zX?uE9zqc}4B)xSsx=RqG z@2v5*wz)IAUZk~n)VIF5zWtTE5+vS$BoT&mwE~!Xp2-3xIX=#q<-g>(4#^`36R5aM z6dy|pcjW+EbFPC?2z7k8K6BJw&c@MNCpYNS%iHNzsZUV0VEYXmx9>AL;UH@6ueVaE z;I19oijere=t=;V;TCWwUOdUjKOhaj*Z{eRf@YPia$l;eAb`VpL-n(YjCyu`=Movy zO8&4(VuN_$#2D#7Oa0ui``a~fzR#PXqxXOn+Xt2h`B@*}v<(Ha*;F@kdj93niV(hFb#VO zpymf822|n_sOtmND0J3}3>fKCk@R0p$OcORWh%S&Owis7mBpQ$IoRQS-e{fw%Mg1heai#-7NlYvSFB)DPrpBAmL!%{$3>>?Smgl9NUqm8b;SWjNukRtxj>@_n1@PCr1+%w*DnjU&;Dw5jtq`fLF^m;cXKqMjtE=mtYo5F*y}S%2Yj@`Ro+xsc!N61d-5f;a?`Zp z%IOz+-HX<+0{n)`?QB&DBj03pzX-m6pyeV69&+*RQgY56lQlB`_59GOOU-i_^(|(q zcW?yB!zy?{3xBH{_0fuAyDQ+uE`eFY^Vus2+tKH-OA7j&^^M&3g;R`{uz=u^0226L z$jDkEj5Qma8ydwmGT?*h^|wuZv5fB9;cDJ}p4BGP-5z{GdHfEJfZIa&*b@nw*2s%%i_TADmM3XH`w@g0_3Z6{83rp^qVtJ$l~YC>$Fzr$YH&S zNI9=oC~e&XRcXmNX~Cp9RsEJ-D#tLKr#QSy0`$(DGNsUt-|4|n_8t0c9%YDwNnr?M z?|@`Vm{vHZr{qR8374z?Ef6a%uED1h_gJg(E)xYm^e0Ic5v_>l3%2Kpl6pr)KOB77 zTdpW&w!**fHWIl5HGngZ|EmEZ-Bc0M9C`#K@pa`YaM(=y>8N?DQIt};5#uL~9#^yb z5qQe!&)-0Wq8}1VmFz)Fxj`2vm6#L7CkS>KRlz&*(PO$0^UlBy_up=5tJ8L@TZ40$ z;m9FMJ|>X9Pc+?{{#L2wV9pqnWs6#mI)dK+ehC!@^==2v%p(6jKR4L0)1(96oblk- z%@;W`sF!o=UnYr9Gpmh%*l#RW&92bbj*&OUKuvw{jiEsJ@!h8fT!6-S04U7Hzx7xT zzOeq@fYzCtTOYz)qhjiQvfnH91U3HlP^9-$_m-N2FV0ru0MDyfMRZ{ocC&vm!h!d# z1Ft1f*tPbn)$zIGfSMj0#|%1v1f`k}8Nuf$lo#gC^XQEhAPyJLF3 zSjDge5K;2u8FAqLpny6OdHCoBWhz2$eB0_YJ} zx1i)7ye%m@+84z@6Oj)sy2s& zTU>1~7l!pT^K!0g*J+Hy4C>E8Qt(=C5|$y!`~1WBfH`A%{Y?ntple84O`s|y4aEzS z&x+M6V^Th@tRTVJulko^ZIx-W{Q0?LrAkjYy6vw37R6y$@ zdL!*D6-TRLV9tiMNpyG>guwB-nIdd^_m8*^0ri4_k!LFfSk0Z#OQ75$6WU=bU$oL@ zmplTD}J0q65B-w4kB@f+6)pxFzo(LwGBDD%u7rkdRMQPFU`2LSB^rE7XY~DQ=E7 zxll!8Kq0y-#J&IZbKkSp19sIiA>x6IetlKLH9f1 z0Wak5-h25eli0+AXY_l8f0j8@)tdWb3v@$~o|d+ZEZbquqor2rL_Qbmmwf8&gq+E+xZ*mfhbrf|VZ7Vq%IyNqm6rFCXnxo9c=Q`( z!RPp0?|aGuNwSo|_mx$YRJ$zm8xnIcPANp$VcKHV7~dFDgHSD0VR-uBb`q>!$VWJc zYQT_r(QCJD5z%_;_v04X&@>EuWj0ySG5Ki#^`H zSHSb>8PV|o4%u6iN}Bb1TXP+j1h_Pr`;re$pA3j<1Y!2 zQA!Zf{gK7j1m|rbUHfLPIpLCVv;AZW=u{}|Nnq(U#d(o4UN$0NT*EiV#3k^yniz&Q zJ%5@rYOK4K$k;DCs`zG>kCmahn=2Jdf7R{V)a;1*7CK%aWIDf*Aue;zns>cK@~%5w z8cV^TG|B-*94VC`9vM_y#<-+pG_rSRmT^jGN10bpyUcf1ml`Wuge;5etl2D>LBHF(k^R1w)D z?DB)>(i?llIV)w(HE-ej-W3_b+w18XJiBS!m)ex(H{4t@ehoqOY0>59#1yS(%P-Eq zi>iI7Z&Hb&PqTi%i$44p#xvK92;=%Vq^o0t{-<>7eQ?*#<%F~mGXN*C(s47-x z*_;m#XMB(Fhpto*XE>4J78;%K;-yEYpZ(J&pTn+{!N}+U{bT9Q%_P`SkA|brItH+d zmp_Zq|1C^?Mu{`@03@|)*Wl!o2!+)>dkV=pIyJiMW8dB01|Q$Lu$!AD9mr6EphFo+ zopK@PA`Ti#79|RBt)AfHCH~-d_qftt=?E>=Ft(RMJ!IF%d-ar8NA6x)_r^vi%sAlk@Gsu#5fc>0W z)QB9Jd_rf7L?x~F9&QJdF|(jVwrvOHB;^K~lrUMJZx4vhOdG<-7h=Zi0MD`EnLb83 zTVwC(uzm9-SJhXdOh*af^^b`_W0aNPpCx_uh#8MN8A{7TLjwj`lTW^bN^b1Mm2f;6 zx(Rm$hF%UOh>cim*X`#p5mG$7v~OJFmpl_y*s*^h%W{v(>!Z)tn+0Qx>_)6wM)a!} z?ZIxRjD@BmZ=YViFD85aHns`q{;&w;Ne?snoL6M=lv7;^i{&5qnX707qKet~B}J-@ zxJK1ew(mQ$>^8);sI;xecF$(P;LWZt5E8aWZ}RC_&X4G1x`uj=I!p;;vf=frYx#n- zy%XIQX)w-iVpsc~meW%r2?faXbWPFkH&%cU^1Pnv&{StKt`kpEQ&Ob!pRi@**^oh7 zKH)Y5Ta&4WOa41(G?;2&aMuAVDHF!+A_;xxV_CtraPn1QPfB+-? zBL;|M6?C$rtaPzl4RUp|V@+darr|n!%syZk|LifA0VNJginnjD&SE$aRZ+4q6XzBw zU`IG<^w_f?5Oa3Fj!>(Zz7D# z;{T!SJ;R#X!uHQ|Y@jFzBE1OGI}wo@RC@1K1TjInNRv)%^d5SR^xk_5Dumv95fBhU zZvg@cA+tT_y#JYNKFsyb_k74+Ywxw5a^Jt_`d|Oj9Lv}%2jEqf;!`~I!NdGXN_<@# zAFME`rcU(c&>zG%@(=HC>JQ8sWv&ViEs1MJZX-y_lWT+)rbjN`War2bGlV}-(hl|< zrBn>1W#nW;@59#Jo`W(CoUIXE6Ygb zG#|**B|;0K=`BQQ`^C~mzn&1;z8h%sT%ZhHoHH#Drn{M|qSV{z06UtlkJKa96XXM< zJ6faI^oFAf6wUqqrYCN8r1lfB%YAlG2y&NJL&W}L%123-)w=Rs89hCD#C=qX5{U&^ z?|8+k`yqYBPoK#kwRV~yHIn*zvsz{EE)^x|XkntE(DiYdz>sdoGdC%}pOPv<&94O1 zm1v`Yzsb2*5jA=QU;6mmOt-y{E}@Y%3Ngr zY~Rq|fq7ZxHI$>0sjCXbbE$h9C3`B)7Qfk;%2^!*>=QCJ@j{!EqC(^2c>7 z`X=u=m+ei+eck+m$zCfw)U&wl56WXUk`?2;y7r>9kgWF~MfGNNY~YxSk*ZuZoFeWNdF40Cb6- zh@lyEx^>|mzGm%U!)-ju%!QbK!BU=nilqn==15X{8r|{KpHdU@HHq3kHlp6O&8T9U zEUUVaZ{49*=Dba2lV4+ODLMDF+b?~{y>+!4Y}WJ>leMf5WVKNa17oym3CC5AY}JlF zktul7IBJ!$dG>#;2LFBt_wShifjw*C)HLzH%z zPax;RE@;)Q?PUBIo0t7m)gk2(%oi7}7{B*E8JZsM;cnV_s<4Tigx(m) zI{N+$#d0)MMib=YPc*Xe#ZH&Ed%3^&_a`_Tq8o%|4(wEA5Lj)>PpK^xX^P*iF~28e z(MBPuvDYHI+uW2!Ls?wV-ehi~X4I>b<;tQg8HN)%!WY>*VE=9;?6M0%{1$Xo{l@|s zhRsK8wK7nB%xcK{I8%|M3#D_}M-!kMM)vKtW$xPJ@K?3hElYQ2o|t-}f}ZeayZ+3r z=hD2C&8XlW%%L+H8ltHmU6QqbB=GKDHr>)flVfa#9Awup{qv~T8}k!<0QO+uWN*~e z+^K9^@)f%MmgMo(XiK=d|P*7+{(y*edY=7hSDq-`IV$o8}gb` z&esc-s2lsqYrlUI3LJ&2T~kw$dsh1cxUQ~-Nh6c_L+=XO;)>$%Vr`NJz}EQe-5j9` zIi3a`MiDfzEDQX>3k8JChqFFogtOfCZ8OIAh*MwK-S;$@|tk7pQq%v&_(Wd4;pzJjW``wi+5i7Oz0h#mni!Bbq=S zm-tG9Gbc9zJpJBmMqxC->hZc`;MUPr&$-B=qoHk*!y&pYrcXX7m>`$nN)^D?9{NB7 z2K%FAPtd}(!seei%_qBCWO8_Z(ik)vB_~79EY;fA(&%|}#DT4Q>4LJkOVmkJO~XPo z0<*)lE#2SDXlGPZtYj$JoYuJ8(bHA|A%P!eA;YArtxQo>sf8T*c<7{hY<{Mn)Peu< zO$rCZ2Qk$Kiei!21UvmW_<}!Fg(dwG76v6O;w3WEq3iwfX5uhQ9C5gQc=V zI(PO<6huwle8qSvgo9?)5e6V1>D<%zPdiP2Lw2}lL`7_7Mc-R9l=YuJ0O8p ztki6vj8~W-e5w{D5M|ncmsI*@53tWj)_3*ecmKnaE_{7q}u?y26x` z)X!eVWG%5nyv%6a0tU5Tg9Ic{KThUC$bkr(2-*^8_r45dG<}Vl!=9aNF`W4Ty6XXifb>vrXB z2F8=@N@wy_jc=MhM4^R=>CeYwP4PiI1;Qup2rX+HDa&n<8&na%iL`y+HziK3>loGj zjrj1CcJy;$i)>&^RnI*^XtFj*NIXODP?AMpaz@`CSpAqiI>Tc9Z zRyG)V)d-XBwx64rA$(=Ky41$^i%t^8`GVgO6mGk|^Wfa5HGFT7Q_*)d430}?644v>))|2i2*`5)5`(ker&4$XzJ9BjYhEh-2C;>4-x|l z*B~^SzbU@6h{lt{Zcpdj@$k@UIu?7gLPI;rTRt6=q_KCZzR(znXn}baD%7c{E55MU zTOwBPpS3DeIqY5{AZVNIw&5<`UhZJ)57{QZaq67Rj`Uhe*j*rtwzvPNWrH`C z%8E=b%qZMWMZ9k(o>Mz14$p;E$e#VuCXBDq2{8vRtxtb?`7rRyCPKY&A1wPDmDzB0 z&DUj-<#y;ne?^J_&gWIsj7CJHFY(lYjXXM1Uo+nFuPvFCeho#tt{&B!VtIZO3YXQ+ zF(1oyHQT>BNG3#FURk#DwGz?jM1h}zEP)zhS-b9#{9Gow`lOaN{O_l*l+4cB{lM}l zXDn^rcf86@3tjgJ7X@-BayM~L6o*v462)K@ds^cc(7)Urg{?r+R)r=q>-nT?6G}g?E*fpvY%Y#I+5OHTKV@T|!F_QAXwanRZQ7~lT-2qfmgeg( z7f*)>?TfQU2xq+UYnZduZWAujCCc&gzl6sqANDx0-AMJaf8TZhXfs?+j%uO$NP7wlTYL&^g32*}yyA^7!*f5R=T?}1Yc`6?Y zZoyt+<-WSU{gob}3)=cP`$eC+@AR(M-8?I{%2oN~VQs0M0%RPR#|N6jekvw2v*l_c zfi%KZMHK79$s%H=g)W($#d>=`j>5y@Ni%+6ixVBM z2*LM<9DKFNTrDjpbNB+?0klfynGQM>zp^xIgZCtaH+$UT43i{$4IzS6|?ZVq>W+^IezID{Rrn zKuI%05wxxwWZH6X?u)U!+Wv{qq0%s=NqiAmYh3a=4754C69DH5`PnzEL1-08;#0aaaW&Bb-Y(cvT8 zuKar60pPxA`}p#|x5l2k@)rCsQ}-g{`I}=YpKxg)*h!bLVAk3X*)DV2DOBS)1*9PM zXY}_N<%0}nx2n5og9!`4uH*`mb5|ozi^y{n6c%-dY4$>5VGgJt%(ETus}PQF3q^L0 zC-0X1W;T^g=V4%{%{d*(3VgTXxrz>$V+{xh9UDWm&fL&a{4aP zSFXa+z5JhMveRpa{kr4ovb%!98=z4YRq)ZY2923*pSdOBYzP04REmBN z1w)y*`-rHz-CYVi!KH`;5)i%7t{rSg`-7U3fZY^Hcv|11cb>To-_1?>(!qXl++f8^ z+RGN+6d3Y5R@XW}b6!1#rv)IR=z4AH?0Q9?Ubh_U*{H2>eMM%cC~CG>0;L?EdUHD5 zdzN^jzu4?Gp8bsk$5&s`N+F^FN`#s^4p-Z~uD%SKGmmy<;WZ;zSYaPWlOJzBx{-1^ zj88vxJG!9hrqF+RJN?D`3Q}PYII8YOPGxvC{nYR+Zw(p&ew=Bnyy*1W0oZ6`KM-jR zYwumd{!tSa7-Zh7X(ibBMM2VnICiCr7YFN{G8;bv7imO<@PFEU8kJWmoRIkN(4G)V z{Ylj>R~tQN;(a#5y+(dlwM`&Xu@$-X;DuxMPkMp%NMDDiPrDRF1sM1RE)L9)2cKCk zfu!^UbRawRXjzeF?XFbA)NK5_(F+IBC0!<*NagV}g(FcV5H$r?SfYeoPXl{5x#QYP z?afZiRSK8hyHk}{oW!Y_dS|Ak3c=Bd;SLT-Kc2uzc8pY%`kj%@4&v9Jd2>#=uNg$Y z+kK&vMNOG9Je{n#{>7@xO68sCH7(s37G~4T7RPiVRy&uqK>?VEUVZ_N8;!cuK}tXAH;W?(>qgiY#D?+fs7ZTmeXfNt(K zsqOZi0WZz1T+A}-SKV#(z1W2lf;%!rXGB3Iix6vT@AO#vbN021AxJOdxo$W$OUw$4 zg_cL(ax!O`W4-aiE)FWT^abyHTl_pLlk@1=&Nw|Z~0Iwe~=Z&xM_cqCGwd)ITT zNmK;ubp})oH4^tm8MEXwRXb`QmG_`T1gLMJG&EQAlbcEap zN{jFqe@HA7Ltbc3q$;)<|N)bKO24&AG=T%eIF$i#c8_^JtRA6 zA!Ca)2%!>q@%J3sSXQ*SG$w)tg?1a=3dmTDn`Fty3?}0F*g%hZCYG}e3E>7HoWBg{ zV80XJ8r{eTM$0>oKdiPcI0B(ngbHY*pt$C)j97=cF_` zA=ENKTUY4tXNCXC5TQe?Ka7-}p1*njcwfo?(K-N;5Ck9+Kz96* z>z)Z0Q0f!{YLt_s+Dq{z7#YXFryiHXU2Hw7B(mkjboEOH@$xX6_A_6;->p zN#YWc@Ps0f)n`I=%Dd=lsn!#?i)~G;TGz5`)g)YW*iW2wu~71G<*Fw`ow0z=%IKF~ z1){j(r+n3etPyajq|yw^wq1r{Bg<9OPh7q`{U`+^eQ&Z#*E{Db((&;qM)c3UeTb@vngYq9aC^oybe8fdY!8H{-=$0YS6}V=#{nr}CS)W?-;w^5i%S5c z6RWcp)lAleB!4rNVpO18=~#CdwRDKr(D4UmJXDTn6_jF5)qw_lIXRP}goY!Hx7(^SGEL23pF%Ma#v03Qs64 z6v#PYZS#uv#t#G2U%L5Yv0nQVCnZ^x3`+BLY)x!qX7W?YrN}+ZGi5eUbi`f~vL`|K zY-L3QGvdRL_C3bWr#7#@qWZ6XPLs=p8OwvDhz=ZMAB3ryv7m}h8ZNrsAMu*ux^sL} zYWZ63rB=EF=A`ngCX#gNW_U#eJi=o7$BMPl63D96ZOnVyBrKh2s++GpV((rpJA0?C>N`70l4*i*l5Aq}IH*TvxHc09KytDesmzw_nH{vvKW__)Dgc%ez|_CvtySv&^6p43&0aY~JIDZR(% z?A{DLCPkf%=P;`4hU+u83<=-KOhUJr@HRKZDwQ1%r%AB$jkBt{Wv8NsF2Z z`8>7&v~iZ@yes>IFBg`?1kA8{?N7XaCk5dAw{v~n`R2l`I(2PN3M4wEqHPB(K>1kB z=tYVSW6jz@KKeVd`Mnubyv>YqqvuHoel9fX`x>ZhptEKVx znEyf?j%@2mA@Kpr<#RWn(P5Z!|L-k-`YK^;Rv$qqx^n<8187r+bLbU*C3obx9hMmW z5DXJNy;(+tvIAxI`MVn;)>@*l0(GDbd?E>|(_9Q5$fh(Df-iOiZ5Q~LW-rkh z7KiodH!dJc0ICT6Qf3xw^GMZCtYFoJ?*H`bfQ^l!paf)g^pAQAn;uTiYM`_DltHKJ zUDKK%E{Zo-;V$ox#Ypl4elD&WEzI|R80ITnVfCX&&ASWI3QkCuVN!oudplR znsPv?%?1H=)edJi+(IPGDJHH^VggXZ4ZmGBo`oCX*$ zDUP)_yPJZVZQVz=h27=l`f-j@SVrLsTkVrTpt7IVLtn1|8u}}`;vd=MQ(vG7=Mdt3*i!jObm0Qgy#`hT|TzT3LMbke99jb`f zzg2*ZU(S#%EsTHuzUpIAt!Ou4H7wl@zU`+%A`!WGOsbU>xw1+bn;Oicq4M_L4~P2; zuR%wS<*i4L6V=6}oqcD9Ok83qEP^T9! z%z8IGnjOf5Plh2|l>Tepg<|fF&LK#_w_>Fa@WIN(1FJFZakuh(obeFOLIt&EZF;EHuSg$t(m3_pnbQnx6(~9US!Wb*pkGFNgJ0 z)J>oTcZf#5jmKx4weE0H(?ChBq`Nc$x5$uC_MwN1=3Tk55o@v6P=qIu20PJu#|cT} zh(MDtvi5Tue+=UrvdloqqaW;VJ)J0@2%6-fG@BX2Vi3WyqNv%JF?q-hoSk`!fGAPmL zLg3g-XKnI^0fP z$lLSj(dUl6?>5iht@Hd>y|V{oDEx5|;@DUgj#5l05f1uDueD3D%*f0V!Onba|HX zZ@}gfLas5cf)o#hDl)<5S28&$a?%A(Md>7y$@<{Xf%dqIHl~j}rkQF=syFhiH2heU z2?EOBy=o!t69bsJC)#;v{)A}#$$syEo4pb&yZ7TGkc~n8bTF&+4!sTnUyCIz77c09 z_T6fCriBUDt6E#q<)`~MSSoadfLVoPBcT)KL8bEi(FuXa2!y z6}VcVirHFjSc19)tI)3SOGrRJn%=mdyhtp5j{Y)<3rQ_mkh`g~63?T`k#j%Icx6J!#V2Vc&^F!Ktr2=mTZ|m54G2STOy+gRMpO-pse!f>3l%VtB0oBn zBF|QYXJY~~|%K;B~IAGJi11hGkB4t}2GPVUG)r zI>MyZKkFLH8#yyoXXkKy9EMN_@h1cw%AA(6crDHwx_?}+9IIE0Hv5tlWMbK?kTb;n zJ{4~~P|s(oplI5eJp4VL1?_^5(xa}39gJI;7Ul^P)%eSqGRft{lJRs?{#Cz6hDz6{ zF|k^$OCcowA${GMu6cyry975@DcJ{yj<7>#(}~xh-%?5Mn+|H6FdMyNw+7Dx!bp4m zE=t<*H31t;6Ox~OdNZFitRLH71dRujC*GB^jk_+$?Dvwu;kVtPC5fwPU3q^uV`w?o z!SxicTj%=}P%69ia^?tk#t!u+m#>ucRIq2a)2VG^w^lAMd`vhv@-Ol453#QqY5l>% z+0JpNSAP&D6n+U{F|QR+4rp_uCX)%-$J(<@AE_c;CQzPxHTYeE6wKx>j(4^&Y8h=e z8oU_Yiz8nn9TW>YXdv>Z1<&e%g+1z}>Xh#WB_M-^#N2#fO8)doCEx-psbTjWPj5?s zA@5yb1Aj%w1Wl8*S)iOAD8yvb`-1LSY-{>Sz)K&E6VKN^=yaSpejx|5WfNMZZGA6T zHHlWJ@63c%z173QhcaV`uz``z$LMU$NDZwyj?yk%&8J%swP?}XG*anObE*Cx9m^o# z4*)KOaGtTN?G+p*Dvhr%nLUmL`gkg$1KiLjMch>d`$5q81pRX>R4}OIvml%2 zyH_10r!_#?Uhd^l4a{THxS=EW_f(P5?^n1=_l_egd<|(Y58X?P4I8hgHTzzR9$Jf8 znmN?rg&#eSdiy#mIp&cP(X6wQmpmHJE80oK-~TLGfTVF3((y$?)0ej_|L!fs#rBEI zI`ZGAfa)?SrO=@Hm}mxRPnWLIK3 ze6Zi;bg`CdK}F3k;Lzx9UaGeH+)}Mi-D%!h5y%dojt&G=Os%{{+Si1ax|@{mErDT> zs)PBZ*)|jGdUgdBo{KxTv|`1dbZNM9)A9n9?Y!EplNsMW5?p=Gxf(XhjkP|-mN7RV zwKgy_lHn=&4Wj|+E=|PvL?@YmbOvtAUqiUZTfP$9HU<90nkTdpsQ%&P_FBg{K&Y0wwckmNH_3`nBa|A5+ZU%Ey;cm<0~)fi>7QS>#4N95BXgp1n)NhP zs)qb#p{#2roLEi@KQbFEaSQVfn% zuE6xcM>bry#S~_j@{?de!6Y65PKCz-*pZ;t&M5DV^!ZzJYt$?X0w9t1d?%G2?x%D7 zQ=$~zUe3n6GCL-V{!2dN%t2OW+>7HU54TC@T?oEbXNL8!^Md5O-Lr>1^ANZ+snhd$ z{5zTO^1B7-Xh#XXh}wgp)~IupnEXRF{*d$-HDUl;_`^~XwYgZyw;-;Y~A1jNZ6 zUWre`@o@}V6^5R zjBi{p?vR}_QlC<%QiK!;D+Sfxv3fgowxY266-k?$X1QN4ce7FKdThc{J46mOEGKf% zlsrk&yMBww$D-H9-tEWT-7nTK1DkkrluR(8TQ|Uc*tk8H_)~ko-|LG#wY#oZelPwd z-l#sa)p^}NW@gEIP+>(kU&hDqO<;#bo;qfshye5@&DHDWs!EZexs*9*3(_1!}a_ya0TBdnHniIGVZsV#1~M z=L$G#g=yZX{G%sV&F6IT@Glu6oP%Cv-RboJrsbws@IUI%zex=MHm5{5G-C8KTWS@f z!s@|Lxzx8y@z#wV9OKTI_V%mdIQl?~o!{U%P9h8}1M+3eQ0p>)Qnz!%M{|DFGQsVE8(lk!H{dSK4%J0VW`Fl?2+x?apSKO?#V>_lu$U+$-^Zc@tc(@D zHqjm7=e`X)N_}>RJ`JE~z-KCpkl%AEay-U2HBfH0fkV0knuR0Y_E-r18iWn8F#1ZS4S?iv)Y`>=30pLBcWIwM-qT|YQQw86-VE4r;#Qovx zy%I?`&T9_A$o%Wq{vUWnm99`uh|!bdSNXPA?nQ2y2i?ysb3^n=MDvltSf5xkn|rtt z1RM4XytbL8+kirbRJa{_nkctbkj*L&j^inCwP@?u$Nb#1@zUuJvbHmHU|oqL-|t1o z4RF1OX<+06?{loaurJ;sC_hqmdJO>Pm{^sGgrzZub(cfANea zhBpXg>r5;9WCW6Ev{<;8e)J~mcx*Kq=i-6=sPe6P!tEx5f-Nt6v|{>u)@Yc>PEv%= zhCrE6Mg^R2d4`xG_p`m_kqCE=^3!$pUlOxi%ty>H?0Y53D76A4smZZZJdmHMy;9}O zQT`sxR&OfSz7*U~2@^{EG3>HGxg5Y67+-+wsdapoU#-GNlneQ`0|pOiNB3#bdK`sx z9Xqcrts)^W64Yu9GHK8&V`an(oF(`To>x&jlgl=zF`V~)N!+n@YecIy6`u2ubNuN! zr@qo}bn2SqTVGq<_y+kU`QCzso_<~jA_T@?kL6B^g9#0p!YU_*)i9Z-6|K>0&&~ZN zhL6q}IPZ3Z+(lH3u6Jq~Z=R~4fUTeEzThlh7aCe`ohtmxf#Zg9Sj4pJfD1o6J(@I4 z{(oEtfNeEh5fqe_=5qE`uJf$&x4}i^))|FgqM$|Wdx>uE10JX{P6hL60IPnn{fn;;J=Z-hpCs^FM^ z9Bu=C>cPDjMwoQ*@;2UWGO4b&J1vubzf(<_m#il zqV3AuvE6IFI*1+7zaO2-O1D_zWgTZ^r{oyX1u$5A^0CvO@B4y(om3$I$$@VZV!N_6 zF>kt32EozQwP_h4_2#@L)>Zu8cX{lS?28+Hvh($F_Mq^Yp==rqNOjGPU1;2ohUy4P zIcxd$(8PNddafG1>E;w9$x`M&^>fw*Jlgp5YlT*M)=0BuyyerRwKC=IE~!t@(k8ac z(p?0Aq-Lw}{imW~%kl@i1lM)L#0L*iXLL(=j(HWekJAVJ;&AH=TvE?ix@|3yULjqH)r(2 zWkx2($19S(mNxiKahZhE;>4zd^W!BBxQk4bx)A+i)N`r9=dICq7HvuzEh*XyPFedO zehd%!_1)KpuUspvl!c34JGEVuG;i$UV-9`9$~bYh((SZ8AzIXNp6=NOAyfj?p{x=A zP(FO)2irr*>VUD00HgEP*X@q)SRC*#>K{yr^qMH-(Nd-Wx&Z~C+aN4dYAC{s`_vJ| zcocVhE>o?mn@kl7cI`xuPqa>LrcW(5io0Ld1|(Y20&^LrHME-U!(G(g`Q7;^SenxW z?zJ-LK)>vZr*+|nJwRJ!g1F|A-DRQ~qK$RzhftyxoEH8lPU zMob`e*w~$%l7yu$N%2My!2$fk(blEPi8}hyQ!x%uy+l8a(1mYaAWSt3?=z^vu4n}-X%sjEerpRwEb-Ev|b>ifAGAWZP+HYx6`rw5N7%|S@ zlF8We?k~7YdZGb;ZYlrua=ksNVRuPMznt2w1xJukP)VX;KeXyule~h@4#S|5{l3@!w$Vg` zcha@xuwp+zne7VZb4he_Nt=x8j^QXTORfxLRv2c0hL-F{MSKOF>5LqAK~jRcPKL55 zAIH5EQy|dCC4f${4(4nm-iv=R)5WWqy`0B;T3KkR9*;C&0w5QX?|ueaM#MosERV&F zyngX-P4HVbP%3-zfIeL1c2dM8SF6dO2!d$SQw;a9H8^V6G5g7`q(Z|?xF|cNV?LIV zL&80#Zek0SU{A+H3A;$LuCqk&dmn9?bqV9^0DgoMQmX#h?Y`2p(<#*VkZ(N8@qLK% z=Zb*_{AD5;s@?7?E~a<49`z2sX4R##e|_n`QAJ@lg?%z?i(3C}GV5~W9~@{T*4aeI z$DA9jsKa*)M5?RK&;`Uj)iKAI@!=zzWn!9*Zwr??5Qs=_dYM%n>&B`7@mHNcztgLJ z6{zUnG5Xx_40NN{O!3Iq3`ljCj%$v>I~t{2%vFZ;y%RPJpmumgAP7MFjkUG?ObXQ> zysg%SY$}TbYr-tMEX^}3A502Zn*ppv^BvUGH<|FvCrOA|c5xwkc{%=VZq4!TTa=Bn zJSjZVhEJ+dZ$YD*pQaEQl+@m-ZT$H42;Vac<t!`TX5xrBAJ*r2P^bL$xJe*JK{nhg)srT`QfTH~T%j24(m% zlfO_82jX1EL3;SIu-nVglG#Gn5!_&pd&78PG4w4O z`mBgfRLbw|+F7;Yr!p@W9MdiF1uESVRZtv!X+)7<}8i8 zeL~5o`xk9TlfQ*M;;JZuoJ+$dr#iHw(q<@WOC>RSru*o+U$u|b_{edz*5PU6N2epG zb&MmB5;HJc{=*>rn^fQf0C``7i^BRd2A3Z>vbzr=3MZamv)VSx)j;`}TX`6bP0xK> zY@05{^VePEB5;>H&YYmzYF>e%2X&vCuBF_3rpV9tNoO<8QoJ{blQ#775mDgYNBGBf zfBe}q&ZN(C%1eI1p^nFeszmyS*FA9@tAO2zaeiq@W~00L)SOZ`g?u@2hKzlj3j-VA zIx()cauKg0zM!qkINHHCp8~B5+=9RwzM(a^)&9&)+)-)?CeAU(36UXsKKEJScMfKo zDpHW`QGnb{49lMoF}?TIDoTxG^~&2(Hwob2Cy&vV===bUz$oHmTaxh-eS!#z#{;3M zL!2A(E+vf(LxD=|(BqIUckqvKJcy-=_WJGssDjw)*1JJSqZ4I78%?AKvz-_JnkAl? z?p(c(kr_&_IL3+D75NuFc*3 z4Y%&=qsgIWKD=DhDz{;&}&Xu)T zE%iCaN4ThX{l2U3Gj!SFcHSsV2MI^1yu*d%v1k@$%>*E+5LRGtnZ;aPRyv!Mq*jv% zhV1j~i_gox4}hz+8VE}zCYub|8T-k`Z!dPQ9{jpkOjV{b_R_Avn3J5A`Pt}xX3G}M zL?AqIkM{d*rPa?3?iilh0L{%q=CN)yjD+Lp*RX&MO2mT-_-}jn?ec@U6QZo`6KwAiC{so4t@#0ocen#9{;ue!tM_?P7HzBynWz{Fn zgw=X~>h+>|-?I*5P6Lii4#(P~3M15{P7d$2ZQc5%VI%)rb$i0}edV@fng;Di(2B2WIs>ZDf=bz0t;n;mFUh}6HR|VF6NBzJpJq+MVQT&zxteoLXX7K* zmS?COLSO8swR7^v)2rX!Z9395AA0!vJZVwyfFos z2E88q0dS-a8_g0PF_-BeqlHUUj$+AY&-46~QODM5oEBSc>umojQ}3~&>q#f4Y@pyh}M7l6mV z7EM(tFykjt$<7j9sLZJ4s2eRr@Ixv^-#TU+5Tg6s(|Xc8n6M#V6TaNDhI(20?&!Md zktDqhJVM+uSJ5oLqoU5z(o~A}lc_=W5xqXs4Q9RfdqNLRyv-fE&~SmHoq3$LDDt#V z8V%4@nzW~VbvPhxeg$**u%F$^)pnrIaYy2YR+v0}Ai~gA?7Z<`1LejaW=aOsQD?XK z5irZjFg`+@PXhz8Pfi|uY+qt;7QbBod)knZN|=%+a5V$7_8W0NTtLCesmWlk8z-=( z9iGY4R2gSbYxNt=T8FZh*_DGqb>o)m_7!L8*twmS6RpUGY{`Lw7t}l^FQoEVMg5L( z6j_P6=rX!{Ty(r=;OLXM3rt)Fr!FogWh&A&Y#P2y%?na(;vuR@(K>Oz2kL@jt>G;6 zsq4y!z;ik$leTYJx;y zX0btLUle15xM(|W&B(J0$fh2Q3STO6J+geH%YeC$p$z`iz5L5%&5qa(9vANmKccpF zh@JlK7G+)w%d^>$R_ow^m?~`fE>udyl<{Q3fAaWj_6_{b_B8>)8hLbaKyEcxaL zR1RL8k8o{hi7k7IJb3d83as{O0tIl#XgLC0D}fLoad`GiQs6&xFyB9v27~)H%a+dp z3{i`0T3QvYQCiff2Sb;%sUAmeLG@QmdlJ?==H{RXYp$w!%S_IZbNhagb9J63l`%vMb$-~i+*<57YvBu;LH0(-ytzXC2Papd$1C2?3yZmQ}>1Tkfd`THD0D<0}5 z4eHXZf1bDqB3Y0~dM&EGqH7X+{%eLtgP!l6>@7DD3hOG<#7u%W3$fL|T~}Pm)E-YR z-|fFqJB;3VJ#qd2-)f;aS4YMz8``y!W!POOhTRdf@546w*%?`z{H04>nb)7xdf$I6 zCKY+bPk|qlHZp$7;$eDWz>%LPBPG%se9590F~LO0xH%*}U!nX)O$_{+GExrGQHSy7 zsmYpP+ZVqk%Ou^hZr_x-J;Mv!Cc6L2m4P@Vj3_RJTrIrhwXdcPw(DLVprK3-m5Tmr zV&sjgl+(R0S>es*Gm%nLc30fh{__59!;ml=t{eHDTol#B7rgG@*D*wJ079mi|JCXD zXU!5uvPmAzZpf!_t_jUw5zrm7yHtv{FsOYUBJMcUt{_fMJ{Z-}2swsc#em5bfz&O2 zha7-iCUOjPTp+~V1N`M##%d?Eu{nLT+P?Y?i4mxK&Fh80G;IK7{95d1V+c*-r{?9)NvmgIZ@dsaVr!WYb z^&Mz8uHGl_$opR~mi}4EnA>8hz?ay%^6dWuDrF0I+5WjlE8uSt_tUd}nBZdLfRCP% zegUgQE}s9K!{PSleeNVqfz|D)K9`zLP0KYbZko*D7DzaC`sgPnD>gD&gJ|MD&wNE+yDZW5rti!s-}h-t8L5_kqtD2}GUE1BQ=2gK zgn*gAa!ooM=~)5uQt{Zypc27oCtMe-z~O666T#K{SzH=Gw%Rru{lvxa{B4@%m{x6p z6A#)Q@FN1#mXxvGGe#zJQU$)NtYK>mSY_O`=db^rr9j%(Q2wTqqF@f=&BgIZ^~^1>6w zl~l51ST9DDVTXTa`&2xcR{#OzJP3yPI##u-5R(xZwdLEbZ?rTm@4sJynK(gBddz5d z2iP)ZHSanppoInD*c#Ef^RjU50K#72AGB7IiqZ?yi^7K4y2AaTsT5(vj8S36;9g|y z{bQ013SAQW3V?F!Gqn`%pMs(A{{>^t83V{|qcz;7|4L{%x&J%{E*v|w^<9r5`4rEY zcks=dAN&rC0iate@;1)u9gf1D(a*v3w59?s3~&FY#6;hrhnK9@3fEL&n;E~Pj%c1j;b(`I5@Tw4>z0+I0JVsMA3P!VSRbF;(tvTYx`Z~E-<*i-E6Pkd79P-A(FMZeQ+~7a7=dV_=d@_teRP!k1eU^bA&a6XQ zA#Na+vSR=t5uyEOLewhP9S*Cup>CCO>~sJ2H?Fc}|7VXD=Sf2qIbk;s=}T5mS9)@1 z8I!WL|9&exfMAhotFqKpZ;Vtr`&fwPlMG34FyA*FmT$KR5 z;Zg#JS*MP!#hen^8o)arqV>rb9ve!WCoLKG;fd7e?xdxp0)Agg5;xc@p$4LHLbW}x zM(Jtb_o8d!>6h}HpD}+WPuC%tAW1Tx=pP}VVRYAW+6{*AyXWuhI991-(p+!-b7YRV zUf~y{r$7R5r+!{qgMUyZ>}?zjc-DMCAVU&7ar=hPH^Z^Jc8c@49l83wg@tL38%t`+ zAiivMJ@KBuOXjtcp)M}J_+3g?FK24tBywWjBN47#tnnSS9uJ%!mTGkekFAC?;DCbg zi5XW+gtp(>E}n$~gJ7PYV6Wh=j`fjpl3nwiVBY$+A$52Dh;=SJo7LLs`~~f#Ha5VQ zp_eW{;HEwxte-hT>sM3!!&MUGsx*UFl`M}1GY{@&FVk*Cs5JAy)idI?1I~dGTHuo$ z@oVxhIf>d}-teD2ak3_x%VD(-ZrGX5><3DCN#BkW>f7JN`_Vz^gzV&hW3nFk6tWPP zVppyR`oF)Z5PPLgOJamxHP7}OvgL6Hb7y#r-0xj`ZgjNv;NJEG_WGpLT!Q`Fq#k1y zuC%#KYVTxW{>(ijqdaymwmhLjOSqmguJ+!Y#*Ko3=S^iBUEXh3(@_<{%WI4E#r~6I zVcRg8`EVru;YmX(j!aoXj|8{m%Ib02oQ10#zwVz`Z^1}s`9DuWC zt##K`=XIW+XhN>|!tq(CXanwSMzp=hnxdF=4U06IIN%$8H-}JvJQ?|gl&~yJ)idL^ks;oDHRwh;=5ejZ?+o?_Kc??tf!6$%nbS)U zbvW{SVp)A&J63;Pl2C*#!sTU-|-_~ZSxifa9f zi`z1vh)}hTM{3H~ZiD2jMS5hw7ZzQl%>av$nUEgYGc|Ua@jG%ep)h7^C@}&ug~+|TuI$WTN43(8Tu`}u14xK>h`*zr!|p|tkdQm2KcqYN zR?g_VFU^MQTW#*fVy2ui>6jNy%w!_8l&Fpm9RGwhTo{^)(8mdC{RTVFA5-!b#cCWp z$*P>Kq8W+{P;O7Po1fg}h9-N;lQN!UDv`goYVmxaDHA98!u|1kHOk&DzP_YRug;ZE zaq{hr&oTSqHagz%mP1~jjML+2w zO)6JJLVwwOmiTSranomL{W8}QE#S6OaLcYO_~?69p#)ZK&6lXd2%#TFcjdAA^C%w=-y~1o*H=oWzqKPIf;Ust zG?1&FMyZ^m7gDjD-wmd!s^IHQO{Ir4L+&DkiIh`2^Nivn?4c9KX(Npj$Eh|0y{?X#Nvx;*!ELu} z=+>VP(HeSCN;UDUWH?W_=6~HXv-6iZPapcvkBn&-c4XxBwi080BI(oxkwYlU#6@uxnyc;ZV7qV%M>XklJ+2>kmY%6h=t`tr(JiK< zmZmw3!uMw~XZy2-Lb?0TbKjXOOko708*|2x4F7b0`C9Z*KH^~6N@ zURMS|&(#wn5DlH|E?Hiic6{(#zypkNOEz%5X=niaAN&L8&5{Ng1io_Zjt75kG6YJ% z+Wf|cvsVhq)$xx`0HqJTk2P>iTNS4zhpw-2Xo0 z|BmGUuF3!Q#<>H{E~g!C{U}oPu};w22y1P53t)sa1RDmvLO&`!ZBja`%d>7hZ{FY1 z)!tf?v2c$7A@@xAV)vhI`3eJMDTZ&|M1oBHOpxR>gb*d9#>yeb=ccuHNr19i2< z#3(ww*_D)9h17iLm8|IE!k*{sb>Y~jHlh+0zq8hOv(L-lh6?QCavaWpz2#|?cFo*4Q2kZ(kI@vGIBtm)=f-&%bAHFR^o zi?iK^VMjr6hN0$Rm)d4aa_pB`{vC# zPOYMm?=D}<+Gta4fK7{oVzNdv)8cK^Sb#ch#GlhkRS+Cni?ZW$2vC3EYknru`S^a> z75m4PN)yobTY9xRJ}aBUEikceYmd%X;M&`}JKm&*JAm1} zwA?$w!z)U2E%HX#c5h*YHeE_c{?uK!DFO0MwQ1!VLrsh^&wZ0iY>+KIeYZDF+^g4C zWZPY1N$aj-_mHc`lbfj5adDvby(6WEfKN8Gc)U4#YlGhIO=PVxoYt&&U*oV6zcblD zN_7x;ECbIOFe9owJ5v+jU7~4bIH*v8ithgqBk0Q>={q^&dQk`fU6Z zm`P63-WuRD8kL?|vfzZQKTIN_nb;qpMvd(g(q2avSA_ni?AqhM39kme{)y+B6wH7G zW$Czn8Y*J}B(f*!3Th(UVnE7CAt~}puRk>!$2X|u{8v+jmSIyv(cSqEALff%h0DWM z;$Cn>9HhzJ8Scj-e}h#og9Kq?ibo6lcD|ET}`;SVoP_$UfgJ64mv^*`x+ z5>w7V5cZ3@>!Zy(xE{X=q!3-y`@4R4?J8vvewdNEfizM9Ja1<>vCBTAlT5+VQu>^w z-krq}Z;G@rOl#9do*Fe+ZRBn$DHXa|ff;I@^pZ+!w_Mr`Fj5KIB;jFll5Jm#xh;6j z`zi=*SuX)!pfv+{abzg_{f8odW#OeWg2V4O&X-t6yH=?IM(4PNMLi6;2D$HchjtbM+4iCcup8B8J%}qdNw@GP>SF`Js1t;N z#8V%8kKGZ222K|C&C8gPv&x>)s*LzY9ssn<{he8@%Id!u2BOd&WNvw&04n1A-Cpz2 zMK;y@4SUiO-``;hG)-iKUp8tu5YkTsD*M=ihp&>6tBUrG+sLMiR_ZjZZ}Fk+Pk8#j zDe2q33+(;UC5d>x*LuMBZS@a7UmXxD^4;T((4m~JIn9k3?SrE;S96z5krl)Y^M{+u z3^a%7VnQVytT?QqwcZU3QH2iV{>Ox07lMLw!iqLj{Gm@!%>8s<=q?SHFV3x4$tRQ< zH=;W`N^+X+iXC&s=3t~vC^ zy)$ms&w6re`Al@1OGLnXwZLr?{YNBufcMPccW{ZH%Bh&~Gx-1A1g|K+|2{)pVi0K;J{L3#|w}N@AN~g&oZS zw1lpz$idn-`gyn5(IO|$zExd>C6FNJcSF7swUwi{X@L!e1{G~=_!jJy``gXT)WqZe zO4-$d{Ltj74O5(8ihVh=G>JtJoAXJVrj^*mhF~)^QH(Dhp6|v8F}ja%*z4xs^*GY! z-c&K1?Xqhw##uaQub3UiYM{i~>!nL1ZFN5MX+1vzCPB<8{NA+_2n>YnGnz<$X(Dhy zl)aUfYY+6@o%eVq&oEEK`Teei6fehP`9hrUVQTD09NrUevk+?QR5dMPK&~L7d4HyO z$#N+3uM4~#r9$S=?iV^xpX2np2Pb6_m%n7h(l9yV_0##@{fbfw&1J3Udp}j_^$Ee% z5x|-7xlFdqrmx9qv$IF5Mk%HGRqgo`RBg0E`}c}HB?jc`1#QPyS_m%gppf%cYM6MT6~f_+qH~#-2rln&gT#oTrqHQC>Nef)Mh9BW6rg5xHX z-ajv>Pl87K@y@3>(Q7^^{M*Q5Yvx7TrN+IC*3Lk@_!%L$xp5WmgVZyy@6u2K4bS=X zac{~LKywP<{H6HOe6+Y5El%b(p35z&kg?Tq`y5VM4HC8e1P!js(8nL7Vmy<>e}g8g zn%Rbh(ddN)nVC9lE$4<#5zu+-9Yt#t*-au}?vYbR7CYLzblBBtL2gWK_)~T%Z)TjA zZ)^8W_9CBHXI~9_k{ZD}mpD&%&&nXIJJsseWWr#DA51uvK<^LE<1-;RKRJvu9OYxd z51JtrnWsQgd5u4!@~@CZWF*{EffFJD@iLTj2~ zS7NXJ>ht+-)Gt(m)ES>T-lNtLxoE($i-ph4#01+7H?VrtA)UAr<7d)W8)ae>^EY$s znroE6-6k%70USf(_Kw+gRLcsEeLaP6uf9-FvoZX{B&f^*)Djp*CI?hGe18rt9Q-~H zef|&@dQ;zXv&v~gvmo*@%T34OXidBWdQn|94ZM#}QL6*SwRJcuL$@p*BHr5WK_^Oe z^b|h;;q1r7!)j$}-)6r!*ss0AJCK2%Hinwy`@5u_V(Il9B!7Q8YN$rZ-ubEc^!Mm9 z!$Z(gHtXdkQ{Ji-Uo8j$++%WkGOI%O_n_5J5j)Q|{AeM>b+PeZNKKl2IjZR6MfLg`>owM!G7 zi<#mzKE-R=3K9E82g%7TGzIkN<-;KreU=C2rX|o7*65^F3lF%!=tL^F!%UT%nJFJO#Iv6&t1-%l@aP>Zuzh{#$yoWog=xIXdsd=l4sWOVks{f^85=0zj`S1JL3}Ss zeKZwCpgcx(f<$mCb`1-lDQ;kvUPSUbv&?OT!4wohvAD#$0|+ggpd1PS&n4$H=YLy5 zJT;t?7A_U^_ulZJR+G2o*OP+c)g)Q7T3R5Gb?lI?@*=8M7pKlR3NFet&tLf?90SRj z{UGyeMQn0YFY&0k_Yt$f9&l*tTfFDK?*T8)-{?q`@Ne1nGTlpOI{Nu<9mhBSyfE7> zphEzSp?8gE#$Al|d?{wXWO$ z88sfyYy0$}3Rvx6@0@ILu zL9@=Qr5~yl-@6Rk_cz?FW0ZJ+)a>h7+Sz*wM-xHgoBU14^`kWInXy*QEw{n}$u130=iOKC#BXP_zkGE9eFN{LrUk+UtoT1{0-D z%*6Bp%$O;Ts>O^Mfa5LMvVF&mgjdCw?g0p@9lfdkk2@&sE;H30Wi4f><_EhW!$|l6 zx1s4=y!*2yvg>S*FmO+nVlf4+Nx(N%bRy}c14=kHSi{l?QDfh@^`51HFMK8g9eMAv zp31n()9c{A+&yxL8wEGLWjhcc+uo^g3#BbMK=bsx&~RQ`{ofZFKxCMhxg?nbLc|n8 z{UW2}M+i06c5AM?I3M&)?=9q-=-@e}kH0bK!KwA0`-&XDr5+&&HhNFAtaP$bv5mwK zFK2Q<;rEOIr_=b300C+s35OGZS=}kxi>kG5&8mQQ1Qzz?Jrw9>!SEOnZ4<=c)i-KA zZ1SG)tnC3<)r-G1-9~vZ0VrB>q#{Wtds%-rOn=#;VA*_y#Wopj)0f0 z;++!34z&spxh21(;^LQW7m^2rtPJ2JisfON1zKW`=3$q25M>jfvai_%)a>r2J-k-( z(FLdC7~(x`G8WJf*C#tAxGVji?jH${=C;}A2goiGJVe=5Zc%W10sngL2*KRNJ$Nv# zPX?C5*B^9{WjGbM@0lkt_@ngpON;PwDF{2T>;IgsLIB-@Tk9$aLMV$Y$y8HT=^8@UVzbpN4?GM2?!*;yVe zc9t*0r&aoo_Js(kMvWTX_}03Q0I}`%FjMnx(;2<=>&ER5O+XrIIR!I{JHDZ*hgXik z+YJ|e=<2rq_uLKSs9?p@=KVy88D#;0OprgsiiAJ6cIyTI%@jEy+m5Bu1lt{&h`eZh zI->m0?vDG!2A|ePO&t}z3Ql@t$m5n0#a&usO9Fqc{qS?Scf;Gm2(Pv$$#l3UHqyhXg@_GW6* z?v(H4$~kvmlWXy0lT-HEdz-H-pPQzqXU4-fJjr){h@y^z%N}D#!Mzh)`S?AbMt3Zi zJ?d-jKR(gTg$;Sjjb5^#Gi#$i&WH(DlD_(`$8SG9ZMbFI{AsNj0CK`EChZM02C$x- zc`cuYhG2_Ucl>Pat>ye;UO%(P{S2C>nMCY$IbI07wz8WE>(?jKgakO5(#mObLc!Kc zp>}iWK+hn2Rx>sie1YK9o`e=E3aBJdA~zFFNBiC7+NZfwQFLS%`jbqMEuE2AAB#r5 zU!}+uNhy}sc3TfuGJL(g=k0>iuC$`x4*)P#`%2A{TsvyF3J_@13{xnq;++o!Q4H$S zBg3}{!UknfpiS9+(_%*48xo+<2alq>K7A&^egfh{3vM%D%=hRvDIHxpWkOQ`(F0pP zLzfL%_6Cb&evif2Ax2vuc)Yk-?(g}?JrD&vweX}YP)xSqm7G``#yBWQ1#EGCxes*%6x<@|YeI

^>{46b(ym}-|khstv$fXu&OHG{bSMEfgyPqL8Mp2arIoC zJ_#_k(3GRLWh5Cf*;0ee)K|`(=iH>vbYgV8B17}EA7pE@@bu0mWLYhZBsZmiDNUXG zqck$oyd64R26QzTFg!9e{cI*=|vjN zXWjN-v?Fr-5>p9ju;4q>EZ?$u;q~+ad!q}Zzj)j#%t_HssMLVUj?|4u$cRbJ_7jW^ z)h;M2-D0@m!MXa#pPYaw~qs)zzmF zh2X+9W&MMff9d0w25{%cD?KG5y<+>_MM||E}t2&2f==-Y^tfyxYDxW1*Dh17Q8md~h zTu4}i>Oa_phQc&7s$fHY2!|=txWH%eeY_Uw%N@4J~ynbK_F$;m5TgRRQvoMq~b~S|9aTGbG4jVGacxWV%@jL-)>%}A3vL? zg&cgeATTpK`5_`Su|GtONlbmYVHDJxVs~#!(cd4e?^$3wBg59#T3U`SeJ`I?6)42h znIUZvilGHBYy?dEXw5zAY-5~-)gH5J(a%4F&9}F=hvk#_Kpq5?vF*m+r%FwbYdjay zT6kskdc>sm4O@$`-TP*wYsM5KMi~z*{b}+Af!$mKLn7&FaFK^-R8aLAtyp#!YTawV z^f0u;1a|_OZhW$PzEbgnU4d3^gj`5V#CJt!{2}Ec_31rT(q8gERURB zw)Oe`XsP+XwUdt8-|9Wm{Id;j5Gu5xfTen?A<3d0zNY{n+8ul09C}kqC{D}3_$dNxt6oK zH%n2_XYh7b<^22QG~W3+=XoLKz2vQR7x3@jj%!AFankq5#bRO+8=BQGLx#ES9sD^i1qaS5up_+1~nk=f@LJ>&o_{J!6B?(>h)LJ`#lRgSf0FZ#3Tr=EPc@x93D{c@z3$#&ES zZLz~6q#qUQ#PHuo>7F$BgQfv?11HbBIF+HULYt+hdO;30SU@v7wTGLN>0m#i!ozwa zdUP;xYZC1fB4kmJP~vOjdGr;zmf91)e*rQ&{5*Xbh)t!W>x&bvb3Rd`v@+>J)LqsG3dtmyQ-stk)^(YfC^E-|apc=+Zx)?zi;#`Kn7lp6 z1nEl=L$@R?eM#C%VeUkWy_A(64t$hWenRH`Ji=Q}Ua#>v%FL9!M*#3j1^c0m1-l$B zw%!XH%)WifW^Ej*$1HL%klb+PBWQ*i$xdOMkkBjbh-cQChjz?+T`EZ3i5vLLo93O? z+oP*k@X;G+4o^6eb8v-PY7ogWsEzScN-!P_T%2wpMm|Kr^QlKj635EV1Eqw8%N;Z* z1ve5}m+~HJ2LAeS`Nb>iy>M(8SH2orIUHu|Wnf^0#(SwC&Gn$dQL->|aj#9s%kumO zsSWrZi(aGiKJq@t=8F)WQ~KX-zS@%p$&~--GLv6zg3sbK3SqR{>3Xl}$>k2XYxx&% z^@?db5635SKp36~$KLi6dleYij!^9gq#CfLTt9q=?-i?50qRO&8GrX4&^%enZbX?m!>|v7=XkW=S>DZ@7^miVFch!a| zGlPb}DSm{bk?i;~=L5#ap+F+EO14le)ew3pgApM3WUnpd*Y-Y7EUDv8pKvFSWX3`8 zFCUaZzS0l3yCfDKxOa8Lx|H(R^@D2ID&l<)eKj~^Z39hUBG zZF!%`I%5)fv5A zd57n{KxZhk774GvBfd(NnyLeR4fGpj2`D(+FWUGytX6N}Z1}8(lI&QV#iuyFOO!rK z4KG2kGkQZD>`hJA0x-~Qtyy=oGT51sxm*ey7z}R0u{C{21*N|VykbD!yHU7EF;I#w zU~8F^Uf6>VcaeiuXVA_BW9iF=n_IESQLkXc)X=sSwaHU;N!I$oZ?kgWyV0BFgS7&o zyLET~%{Cr$gG(jR5}P1vUar0ADvh3>6Cz*bC|Y9NVE~|&XebjsPn9V13>G+3P16ZHxX?O%4+-vJT>c!p%i8e z05#G_C6qj2@9&5E8-@7a|Mls-b96^@_KlV2_Hb*-z%%-J{@H0k)%)oOGK&7jrN&$l zWaFk;+M_85V0byB92k&GIOpsB>Bqrm5(cFiyA=xUJ}gg|%VwtQtPf<$872L7nszc; zqYC3 zOdPBxv?V~U9a*Kk93OjBPvV2=RAg?ZmKLpcAb+4}*SoSt{g3kkvcv@Cx2{88IuPGC2=`81>p{8*l2SzNb-XDfd;2q+8o|+j}8r1secMcrZEt&jHnM+?l`gZm& zRYzM3?n@DDIllgA`*f+JKQL%QD^Qc{ zRM!!@n`i|sKc8l@pKjV7s}LK*Qp*c@*bUncMsQ(c)kqbVo7OWAzt>$f&Bhx41bw+o z*~hu7WNqa}E zq5eA_oyg3u`F{f^Xh{w@L7{S#MHb;&2kfqZEHf4$?5y zg0z4d2W}-ng`9NyEp9eVD80!xgvj!Ftxn8LQW0U%tf@RL@y7d?e#p*ct7t3!w2RG| zEBd}~hcqEIj>+49Iwx|B3lwa8XVRvgSmj=1pS_@ZKq>NQuiRC-A3h-p8mi9(@(4j89=~T-g}`~`kv2pe z8yaA;9NL*8fPJ!ReRmTFjt!#W|h4$xlW3(u3*$>BW ziUl`#YElOxbCC*+M0T4EHFrc`@F4sM5oH9!m28l^i@zQ+m~u2J&J>oy*5gBY<)u-z z%`fB5H0yhoCAxE@WRb*XL5-BEdiX~~ElwMtjLBJpnSQ)`M6F7GnAp-7sjZ$&FcGHs zcR-7o+B=mdpf!f=@y$lq=NN4c_X7-Ts}H@<&mS;;_v*eZai&dI6tYe}hAu96{ce-T zqBo@_-IKR?p)&-;nt@KC)VBzc2OxnGK>_NHR55};+Zj~Yc&>8u4pQO)k4noi8YO(n zLDd+BUC^FQAd=OQrk0LVIFlqY6d3qA+0>@RA_`75{=%D=2C?Iu?b9c$!!hz0;z^Nj zgfMVuZc-eA-Tb~d08-MH{Z`EaT?5ik-1yL!AbXuuwzT(G`^iB)$p&$4e2IO;ghIBY z>i*2Dp)}4Pgve9TgQ?UrF)Lb!0lw#sj!a?s-4h9qxY%zh-BbmyO?~;~Q}exUMca1S z_R-#xn=CZ)LJ`WLnTl$!4QIcqZWD#OUnuB4WbW*JB;4h-t{=;U8jfU(kZ;{xbdqXc z>BN^xlzPo?uG~)J=wOI&U=M~>p27QOlZVAPJ%>eNi!wF#;$Pf~3dD{VZK_Lm$D*I_ zzBhj3I&&2TMkg#wI*q0@1>-U`RWrqDr_vXP3LL0hcepHDi6s`oMO1ALUQl1=uFx}J z-pNuK-;!5p1We5|kuiZ)J@3nA=s z)^5A_U}{XpbQ^oXDoNkB1#dMPuwC^x3IM9Q6h+-B_a)O6c<;?fLRZm%qT98QNTfc9LM?xm~d5WuIDX!(`mo)Ym_7p0t{1Bn|LL4r%?CNKiNbXDKkgO^5g8gj9RJE0*u?EHyaHI+@_@oD zPqpdgd)QdtcKNqzr+zODE^xP%0R%n^=ai(LzK*`U6Xr z#q|aqVNQCqkne2Or+gC7k?(OLe|*koA3*R)p)UXfggyfAJ`c#ZlP!MogW0-oq$6P( z2AC2X-2FkY%K8`4u$X%dJMY=25Zz_@vm)mSv(C`qdNjf&<14r`r)t`uh4#9#^23{ zVhNvE5tngHp>Kz#!rVXoWIasG(pdeCHm>()b9^F+b2TJP~FL6=$=JSfOrnK7OBK>B9XcGp`(puuwguD6ld#IoEAzrIptYRvWmKH)t_ zGLIs`PFvae-~AkeBOv*uqOj>Fl%KJYAv%^Rg6FkDM_txqD>YX&^Cx@A7qo!y;_Q40 z36)+=da^R%71}0Fnw1(L2x+Kp5jfXk4Yyfzkm~jK7p1rI3YQLjBMv1mdAtl0uC_TH zZm9m)>i{MP*FSvJ{$aLmch_;6!BL{6;*#V(>HRqiz6fJcQX9T7@!6?>ajc--YSCfw zl!&r~?=PJmJr*MuT|3#4<-aKMEsKTGBsIsD-$fS-it$os9cDSbyii(BJaCc?R0657 zMf2`K&xW*p-#!cYdV`y$%UP5;=s+^>!Y6&Pv&J)Xck^|L8=ujD%k22nWkAizpfn)= zecvf==MPw{dsdGUo@h)MiXIC$8{=x(aEC~FpKE9zn{|t)nsL+5v+W;WcPCU{LFTHY zWZG#;zhaPcK!`;MwK75Iv$+!c&^7H`o?qM+}@*fRDvH- zW_HjNC)+L#C2_(?+AkN{gAlS#?CL9G=`mGND<0%m;cg@g@ymLfbI@~R6K9w7Ev(A7T5msB@mp{ESuhc+3 zDU`MkC=$N=jme`7CF!RLk=(OvXC>)*JDrJt@TNHEo6N>@RTjYDXes4wsC=51Nk&}e zc)}biTV`2}&;H{2gww48ayw?G;K1)MS3Cb^sUt@&#pi($tzczW;y$G;T(z#NK4ca% z-^9H}K`~ps;h73V?!TWrHyz3fcWNrI`pbhR|L|LG*JDBDh7z4X1&h(dF_#TDIRP$S zD<{nqYTkFbq_;Ts>3=w1tR@{e^Ll=XY=J#F$=TS8X>uN`F>bv;8S-dX!eqx4?)pCO zXL=Ofra$nqn7YcbKL7NU3tjfuCG(<7+a=YS{1&{E`)N%Nb5`XKB~Hoi_TlcW6g7he z+ew?2L;j~*TLGkui!oqqJkA^PuH=`hAXr+^IQSoQM)*Ts=v@tZ9F8a=7vP(^JrI4N4H9j!)y)& z@K=)LKg9H(cdytxRCF8}@wF*$g0!*6=iw(hR|qaiJfF)SC39t5fJ|JJB2JiG3e?)F z3qAUtLm#Wlxhy~^;!6BrimS3X2(Dgx(nN(R785S{2C~GQmJ^pk<8ne{pCV7mjB3st zo>Q(%AAS00X(}9pa9NFoq#S%yk~io524qE2InC9uuyiW1#Cjlt<%0`HUJ*nx;ui0<>v179B&90o1(Ch&{gj#_gYaWiQ zVEfC~v&`op>E9G~mAg#7&Rsr}0H-#EIzH^yP5gwvpP4csQza6fa!PZr(x?!KBf>o1 zYQm7}r`w!2KK2DLcyo#}Pw2ywv{)sziiQc=n2$kKRrR*YbfqJTEMf)moWPt*okgCL zK`Kd3NcMhK6g)T=QV3ML4OL+dLV*T9V`Z?v2bDaD5opMsuhMT6c=L{Q%wp8>=d6hE z{Am4x%4y~o2BYI*DOOgim)i-2!V0K}IW@%P`^;m=?P$bA0`pqo21-52BHlwya%LJ= z&9~Eykz#PG&Evr&Q-ryO$R1c7@pM-77BnKrA&m!?Y5X>mjRuO7&5r~@A<1iVT==G% zV-NCMFl@j6m^%boWn+Y5D0&+*uba?1x(Sg$3IM}Ug_G_L=*iZx$8TNxVUgfO^7ogH zqbc3RAm@b4Pr2_r{}J%8-d2(7v%4Z z`oX&Imu1Sf3-CUFp#NLrUY?giTc*)>(IJ7B;y_tmx$4NTZx#h$C*L$qn?Jo0Jh>Dm z?BDyCuI7;x*}IU1h0-w3-~5#6JQf;ZlAIPSLt(wK{AA%AurzaOu(^v~JZ!SV>YvGS zI-CQwuZ%D?06jx+HC-C8$*6hqNDgJ6NkTQpr_xJ7iS*$kr+-SgS5%C9oqi92UCSZ3 z(vx+^V}+aJb%xNRJ|Vx&!}20StGZhg5YnKkw8ePR$Os}1PNH!+X9J z#AvQ=?zdMkg5Gz;#`d5vwDH8~6<^NWimWuJbGm(rHa>%+@{1IH-omA1wzOa30`e`F z8!re}oM>=u=t4Gr9R;FfXBj0$&O(EQsgq3AW-D@6pM1~E%qI?mAgM(L)T4r_AZs_v zgWjx}bl6b|7$zudv!uxO{VXQ4|vb!*wF7X@`b^V0z zqR7@>JQSa(QI$FLrAOj8@y(I6DM*1T6Yr_ZLnKA0hW_k_QE&H2V>I=!=)S#}qNIoD zE+3r@(}8aZ6PL%29D6bj$e=qzt26G&lT`ul4=Qt!9K8d5Uy!0_X1|^&FY@gy9O7Pu zM5*?Tmrcq7gWKh6-+Vz;Ir)qT)KXJ!!Tu?h_ka?Rtzlv$-Mwc&d2X`Dz>^g0lK}Lt z2+Fg+Cn(=u3f+&i)S)*8RrHj#v69K%>O44wg&30^x8mwaH4p_Kj*=+4pN8`Sch5cB zNz8oj)Scdxt6LCv{_r=&d;RX!lx0(G(JuFg%U)6N9Ld5Qr(6$6jL*^mKzOYr-$FR_ zGko5;HbXSP3}JogeGbe};=M{x;B|hFbpi#Idy^6%cFkBZ*FaAA`&Y&TmPw{$Wg&hN zA3?LZ_{XgNhp{n}iVey~^DPl$#)dM8j#0M!{$)S5zAxHoM$j;QHrUTx)A&7sE=(d6 z*>5w&Xy)M?dBP;-z{~y0Gbt_GK7PDocRhD3VD1#K!fhp9WdVP8joAl)6*NdeU@2Wm z1F=YnF3ayMdkdH`n{*{Jdj?mmUhU*PDY4+{`xG#bi;e2d`u#*yR$3~g82Bd$N+9^? zE8+C+@$}odyPmq8RqPS=$I*kBEp%)CHW;x5@FzJEyfz#~LZkKxfJ*9;2>R4Dh^2gt4k2l2vU>T^+~RN78M+#S}2Mr%MSrn{E&s93t$=kO8f^RvIi_fvKHgQJrG!g{e{@!5g+sl zhouDQf?DSTCXg+(mV%PJlyn?ZhkhDg7}HG($@no|jnrgK%iL7j2hYp&)nye#%`XUg zngI)eYry)AKZPRQs~q`Pak?&iZFf+970L&vt9*V{A4*%zJPKa$7*cVc$M!}dOTe_2 z2Bpdj4#3NNyMnLaPE%Zw?fnDm;E4d2xtocHgQ#y1JgEvDPtP(i;<>5&l>Z=STC@IZ zJPGEFd-Py+yJ+0KT2TK3^b2qiK%}sC_3WSj2Ot!%q3BI}TmX2I;nhEn`QHH+fl>^j zY)R;EfU|i5?)cxt*(ia%gVJ^a>=>GuWdD7*3nK=A)fLv^0w4|s{J#$+0v)Z-pdu{{6qxc7;*`)Al44CS2>?=(IR8 zZFg2z<@&Oehx$5otdmF zRL57-zROoz)~`-iU5{2QD<;?M{JghI(m&@Rb^x`>OueTu`sC^IOBs9ez7LZ%5|jon zC=VR-!U_}uHUo+=DYZsBlo~XzSXa~+XU1nP&JRduYb$-v#&%Cn{d1a+mo^VJ`^P^_ ziga1@6*I7&WgE&6$+rVwzpJv#sbdZ4T4isThk@QkvgP;3fra6Q)qED9VG0r(U!&rQ zhhF&pQe{|l++xmSmFlq9IikG0@IrUD0=3DyxRzIOqF$TESJP5vZ>6LA#(J1XNp$8NsQ zTsR^rj%)YUAHXwyZZz0=%Ju6MH}P`=pK;)OFiLyqd8qCM()mbpX`2Czom!X-Si4=> zz258vu)=7_S#k9SB+x<+up})H#Ml8sz{-;kMgTMTJ}UJ;!e2;l;&&jo9=rqKGjxWb znmC}x@H}G443ll=;{412^mC?6C11c{l^q3fGEhFLg~$l&iQ=X}G219O*FZfg3U2q< z-e7OEWxqY^1Ls2{FQL$Cz**B>-7<##|J1|q?A7o+>|oeb-Ill3;mpyOi1Ay?!%au% zef!^EUZ6*3d9e#CJm>m*58I5^oL$-N(AIH2Z9c^wjwFYJV`RvURIHX}@41ZGG52ln z78ZaSW#K4Az|KcU(QfgltGTOrID1p>+qbAE>q;Yql48$_J1?|as|Y$TnrBsgVoIpb zRyci}6Y1ygitUz&Rl21Q|Grz{DsJb;z#2b0Rd!m;a#Lv1nrTRSVdGnV4W5z!y3tPk z#CB`S!pv-A(3|>LDR#E+eBz4=pAOW}3oUjb2ncWhYRadP|025=Nwk`3;Jb4fQ~7;P z^d%|!?ZaW0s*||82Ddt+cLQK4nLX6!_Yq};@O6x9xB@@}j#U%VwE&(yLKN9@RKw-3 z{_!i`9<3-gkFoQiD;A)1gz4`ot)yf%Cl$(v)b{?cS%U-Eo=xh`Ct2b$uccVGUQnU- zgs6m?c%6728E|KNYs=ixYZiN<)>IqyaKH#hf{*`k%o*_ z!+q=ILEr10mO23mPl>;yuank^+E9KmP3+nHxR={$VNVpiZ%o)nMa-A5a z#+P72yaFvJw*vc6x#zjl*uQ#LlP6xZ@aLrtyW}`g)QSzijo3XcntyCXrC@N&#N!Rw zULMRBI4AI>OFyMZ_+Z6+@-PFaqguNU;z4|oQwT$@2V|RV*Z{heEFqnA|rP)~??}8N7#U z97SEgXKNxd38uVNZ7`A7Qz7lhE3C$aDP}HYa+d%!=r10zEaJOXh8ru8*RZ zl_K-yN^2{Zdt%uAuI``gTyd(@pzY|H;U;i{kV}-(a8?Dx!Hs z$V@XXVXasa?MM=(B>JguA$}}4N!Vowi6H zIbuxzC$L>s``P6K?$|KnDB_ZwjN*}(G8HzDo@&PMae1gMZvGzB+zcTLjWYfIlzbp> z#Te(-*2Ky9)a|t%wo?J;m+p$BK?v2+D)hJI4PhnKtOLD8+DmUURSI-{rwis$@@T&{ z-_9w^1k>=aK2QpLD+Swf+e)dM5}CMKoO1&{4BtFPtLELB~D0-89a4m?5paF~Rj?q8I*EDl!&F4S`{i@R~uv4@%l=6Iwp8pzeGU@B5V} z8a}fW7?0&z8ME`0Lun4fQ=ZOH|4xurckg<Mnrhi^rVkr( zUN%cWa+}+8%ezPt&0GZ1-Jl}&M%yL?=YmUC8;<&L*hd4cvrj<{KSaHqmb2Mkxx6{6 z#w?StNw3#A%{#_|9 z!wqx0W&?@(IA^6Whu>wW335b~?selGE9keYMYQ$GNqpSEIMv!2`jA4!#(@98s>Xhg z^Yn)i^(C2a8cEytGR_z##s)PO7!5Y zx>V{M40VZI#ss|>SD0Cw7~svO`Bz9=iCL5>nQ zY`TdO;CsyXhx+`T8Bn6q&mLINe^ly-AuRvfa`3kJ!x5LpJxFvIc~ZXfy#LlFi-;ZT zGD6uRa+?;8&(19RJzxY$%gj5}^K=H|x^f zr*WOQ?ZJ|0Pk;gmR&(X6u}=!^wl@;GG<{pKf|E;L7Z|QRyzolaxTBPZrPM1kBdW8X zS)6yvCS9Xh-ZxfjW_$JjQQI zs8VTr^82|%7gQT#jS!m&Ys7= z+E({j(2TLqU#yBp??qMbWF6)~-j|hxJsz$LFgQBlEvfF*; zUaRgltJcZI=ob{+d}N?+Ocv+5@jv!oC@*MAyTLM!?P8@j^(R$KrvI>9^>xsGpcwz} zJ%YdYeu4nFnJwA5sG6JeAGhHTV+I6dq5jo6UjtyT?9Wv$LF4Y9xzm;d2Y4)-t1|f? zfAb%N8-8vuh$k3J*Gc~THR-C8ZGD%b0U{{{Nnj%+{ZV<>_2*;oobweX3irBYKQqjqrkjhe+uW^*sKI@G zRDFmC+zrRI;f>^y83v_}pCNXDjep0E%V5j-@)1;oFPkuoq@Q`bL+t;s_m**0c3-=w zEs8YKQqtYhh;&Oxm$IZ0=>{=CVbR@PO1GqffQ#;irF0|Ru;%M1vxSVGdpp z9srT6o9>9bjg z+`$K;9w^drT=G8Xj@t<9aq2My*>K#1(o`xKtF{-?w3Ff zE`Z#uC26o~&NRCF#2OF}>^Hvl+}dKVx4UFaXt1w2pCOHrrRS_RvwneF71XU63fEiQ zZ=h85X#P<^EeRMex{pucveIl)|0cKqwx6HAm+ThmB!PnD(fn=7M+&l(-7TXPvNah> zcZ&A9P1%Tx%-(LP8`V2BhQE&slu9R(k+N%*l_o5wBW_#`SDo3)mh!Vit68WSEME3} zC2eHqerJ_9=Wp9tkP-t&hEyFNhH@Jh!2I}C#>uQ`@9ZkH8d4Ki8AWSE>YrW`K!hu_ zzH6>xvqW=}trD(()mY@(9(!24Esk*gJqC)K_Pu6u%%!_}Tz7`2)@3j~D=hRZbi=Yv z|B3`y!97qH_kSWgYy{Be!uu9(O;+XBP=|AZcnKYGkq9BE1*vBc$i8XBQGAsR#Aav;7WK($%D?!RrDzR3N&fG%(=Cwgd#t; z8&T!va@y6f#t|K;);pkYsyMY{?90?_(q0hAiwaHDrVy&9MD+P9P;ltGOBIPKgTN1@ z4~ii|wGYONsi;rp;FDC$l{O>yYoO3oiqmgNO0K-QIaqCE0Y{_XTT{fzqqV93dC|zS zse?Y#3Sf-fjlrQuacE$hPMiSn%8LH^2QxC4e}=2m^n2t7?7x#uIJ_Gd|V;GU?;L7+`1elEa- z58UuVB)Qbg{@r)lfWK~*dfCAmbt$51TpfNr1L{8?YG*mb{Sw9b1eBr%h{O62_tKWfTdLC-|1%po1K4;RtThIfUCy`1gtWz{HGN&5Z*8Bd^9F| zienE^QUbw|zytikA6!c3;{@n()zF=*hyt@3A|wqjanD}E`FlwfMpElNmiO9RcV` zkUQ7@v=n6XAX2}70#FHI*N$I+x=a+g|K<8WB?b?8$uNdvADg6+AJ^_YBIHq|U{c+cUpwZ0Tol{+>u zHieg=drn1c^}SvBKfCD%zD^RncJp#Kb)gVt>9vQoK~$8!3l^Ey`n-ths@j1GeF-kU z?Cfxye`jFu#zFO# zW=BN*KVAJ4%B^Pcj}fE9?E@GBE3K4hg0yigjEsp?OO9(JX{wV5Bm*^{Be4FRDF;Z| zl3HS-&Z-i0R@E#*WB720Xo*i?Ch9aMWUcvB(@E1~N&H4L-KL^*wN;g0xjFo2rZ-+~ zTGNk7BR3P**D>R4OKgo=o^Gd|Bvqt>V(}db&rNE3$+zeL`I zZ~Wn$F+md(6JkT4q;68I&cU=EFkdC<7D08g&=pKR=?$vCAfpCgye8|<0Y4dJ3Ih7L z?0xIl61AHb$2U`HOz?vf%#`@XIcKoVItJnlPKZOkRyhoF{q_lr`_pJK@yEa>VZLl) z{S`hnw=|b?Z9PU7Q;*aUP$o|qM{oXH2`8Mx{2J!y;1$6uCzrM@u}Y0@=S1>JK02-o*fQg! z5e)p;xnBa(H;f9>h|;I0VtSm3)*XQ-ta(1LA)LAW+yX6 zPJ$DSrNhgo_Od%)=yyJcI6YXD7%+tYrOluOfI87H4JouVvv;+wDMOa|HzuH43t!~1 zZwg#cuJ$UiTv-`l$|Y)derF`>J?lh&h(S#xEur<82>r?eEaJ5TyWq+;j8he}V5 zz47yOrbE)XqS*Wyl+oe&44(mc`N;6_Mcdct=YnLOM?M#e9TTwErl_Ogs|z&ie}p}C z8E94|cB<`1cYIAdS{O3Un~#rMFET)pE>46`^6`9XPcuKmL)yE5;GWOmiM!<7{43j? zU(U8)Scr@2++9xE3u-6C2I?IaK;xK#inC^UKVK|qu0o^~SM)-6zOwguKMf((#N`DG z-QIq%Ox#No^_x62V^|6Mx?C+R(H}of!@QxY?w3N`k;@NjIV! z#?i;~#uwK(IY~-la=h^TS|;IkqZQ}Q1F#n$2jMZF%#v7RWIQ5lxHwqNQ-vh#d2 zVnk*Ea0-m!BV!l$c%=3ZQGV0o)Dke`AXXAC6q~9O7{;(%j8{*>H{q?J*yV|btNJwNlisSOV0LS8#RCm$kz0We!%_) z8h}OkmWa>=vdocBddlh10u!;<%N0cq=MZ;3qZe*OyGc7I{$lqmKzmze1!j`Imzm!3 zB+-NceMlPo4uCK-6*~h}&1@w1yBL1wnYZd_CW1T^o7RYMQO?&y?Aw1G*G_Pg*(s+A z+`Amle>@=5ha4)5_5k-ipPnL*M`M!6USh;0dbC$dxNqC&q?*Tad*_2e=}`885`H#9 zD>nN5ZleMNW*_MuOl)IY;&7YNc zn-{~^O+Ks?Miv^V6axao2r*3^5+asz-*}iMF0@(y`vAoz(-t z0!w@D2JW8KJYq9Nw!g;B5J-4yHiy+;3fAKeAm4hL#xNHe@OT`t;ZOD@jNz(*1#1-J zFIa2O&})RZqBf^oph62RCZB!Av-LGoHZ@w3OJY5FyO+z=uAO^?v&bi<$Ut)h1AX(w zqn>?dC22t6QuDFp9YqVGW9%Xlh)fh z>IZ2Xuat0VLb0RyM)zM;wiIGS1IYKgh$1hLosbe@+E$fAq9eO5QqW`wL-vov`H-_g zWCxK!}c_;1w~TZgu%sm zOx5g25U#Hehow})Ljl@k)oS38`P|;1qxsz5rXv4mpkzNwWLa8140s6}N!%l~G66J` zZk^QVDV^!WUcU)LgVkSb$553?4F}lI5i*t}FNDnTJ|trZ<*qL?zc= zh1Rry%g#_O=oM1AGHwN4%Jde7irHvJfi-)kkh}wRgD9Q>lE-X#HF|BAlu;@k&^?~L zsxwaSqe+FOhNojjc{1$g*q~MPGa#0bQN@Gj{d1*ToF|}0zd}prVNuR-Kh5l=Ko`q{ z=X}u^Ph`G4L37}fhTlu-URlOcY((|2UKOwI@JP*6&{=bH_rvFGl7Xh>Za77{JQ)k7 zKu&U9C9u}DA9=bNkUVJTblM@KyL?YA6DM0px7qVQ@5>4#1sUx{`ri{n~%gzBq+tJ z@~@rEg5snR;J92M7D#Zzb_JY=Zjdla5ik+o-x^uf;V~&15LwZ||DD7QF>EwId0#8K zivZZ$UdgogY2gY@-Z;crnH|lP1U3}oF*ND>t8#tsqy+D(!sA0Y3muhcOor|^W}oqH zyMJERA7lNiT_F#F$)c1UWvpO;d0GVoz=Oq8!_;7)ekCpI_jf);?R3>}c9w>I5n+JQ z_%b0?W7UY9s_f}uGT4l#(P)RIrBjHs^aOWXdUzR~^otIZ+oE@6E{DBS|7{#^dxreF3QXJM=FwoXP$|#Qw?)`MU~!1@V3OfDU_vjt32~7+3oN$%D*!j z16s;#wyg?v?PP(6v9j`)ibOtsm?c|FYr<^>DD~Lp!8nC5hT@=9A*|B8-b}Dq!zhhDW=)vmuAKDo&FNAN98JWZVB5)rY zh$0p6rSF1bkL2Z05pX!-JK`L$|6Z8Z;Cp3TvlEv))T;&-BAS3~j9ea^&KqFCYyj%B z2Oq&tm`#R?^J%^VZ{}@}E4x%IzPQkB_#zJMYfK)GOd>iM)y=8@$^LmT0GR;?B5fxu=mkGcw&Ic!}@=|Ct$(G1E4?!n2_RMImy2T-fS7kZ3d8e zU`(`-squpgNGF@RMKC`h7}edH|H=OCKZWjS;Ah;e>W047Jy3a z_`E$)0XAKE;I;O@eK-NSj6z_*uaTl)OtF8*#6ktegiFBmH!Pb2C@GS=KeCQXfc<+9 z7M2+sisDG@kN(g11ONvsDykYPhxIm{JxEXd{gd8J0L~?&`dUC~xMd?GUsUF`v9n(l zc-!CBpbqM@mh8UVHMPnBcHGEs>(gz#U0#ub#e+_dNBA!&e}Q)l-b7s(P`YX>V2Yyh z$bwpiE2k9Suh5;Oh(p?XNS4t)9Sr#4$=#Pofa$CAljqiVU$BxB0%n#5B97IRp5k-< z)SP?K2KfyIZOv!5E33rFvS*FugImgryM&nj;dCLzj0w$3d|$bGOZVa|6i~}^9IE?_ zZtPnJ3wK52;KOFeS$5`m)dC`EXNNaFTm*GJIBdgxp||MVSnTv_6aCo0M-MrEp0U?G zOp)&*n3CYT**~HZXnZ;eC}tddhlckloGZJ}!iY9RPJf3O`D_I#fLW1Ls#k)w{ySDS zn%U#VMbRdaNIq}Il{W?*W9V_~fnr0*l;`Kj7M_YA0WjkK9Z-^CPH3BH5~%@L^mT=w zHwhG|#eENMTetSPTkv2;Aa{6Vy(WGz~O{aSyseWlm>U-wq_yYyV- z@VJ)N3|$9>=5M#v7j)1uvb%UBoSfhWePJXPd-cK^Ni$|rNxl6n!W1_Q%72lnJ)&C~ zpRzHtUk7}`jGu-Y?zBQA1h3B&R5_@LnQLP}P*q$&k}E#eRq_CLi64Xr#<$F!(Iuof>m9w__MR-KQrY`%f?I=L8AzLa3&T565gTAe|4aM{i7razUQXZ zJ?FRO!_O}B*c!oq?z}E=C)lOipdMgfU!NHCzrR<>etKxjuP&8tz?lu`cFZs_cYD1? zbt23YUm0eATRZ}g>AD@{EpAL+nH+uM1zNXzd{bkkqzY0Ivl1XTh|J(J+$$GNTvUD&_@Xv1NcOFF1$@s_o32_u>Sg2 z2jod1gP^>cs;apiX7wYs4m{Em9pK<4j#_bXxgdsGeKF9N=yzh#l+u~h!ao3u0=bvbi0-m6zWt z7N-Sra>JkI&=skm+a!{{{$pyWf^mi0kN~vQMcf<`8WKyWUC&V`{;gV#JFlDjlaS@^ zmkb9n->XikUx8{gckc4OEaW(=@jf&(Z?#gRQwgskPw2bXj`_g^e> z4sj_QtrzX=F=r=gM#BKXB+}nf6j-1v2=~_^%1VclVd^v)(~^YYTkj~;WriR2>>3l1 z-@Wg)Pn%9GBZYJwjMI*y%@CY_elAQJK(2{y_yUo;q&<}9nyh>ONLjo5hnBSRL(xyB zBshIhQsJ&VpQ`i7<+PcfD85xiZ|-C$Ana#&w3G@+Sng)95F=CAvA0Yp&RRk&OXz*l zQawIV+t(+}jnl_R2=-12Vs8%cVHMX8u0+7a4mvJ$!*)Z)c5jD_3%AFtivr^@569*t z2B4+c#kWy6vO2J4FpXDR1j4FkyJz1l874_=!G9e=ks{IK zrvTWr#RvV|&_dWPN6QJG{P|hN@x#3ID={gP)XYk0K<92+grH18Wd8FRsINna$jMYa zGoNGPG4~lSjo(QRMbgMyMcqWXFO3S?%+xv#FP3YPKS$|LvGeY_Tgv(7HjMEWIV*z$xTixyDrP)VYBaK0l`CD( z|FI8VBIW?NCe)?~zF=Z-ZlN%<9k9r>F45q<&e|>*O?O$-q zI(%z(no(=F`?H*M3?^<+gYMTKv2<{=tiEO98(+=)0(C32*l+7|ko%<_08r8^MF^QR zZ$vD?b*vEmZsJ0n{KG_5X`R$uKb<0y4h-_ykZ+w&&dr3im%}FPo-Uh+qDF6VRfKG- z4oQ2~Ia|10USLcFD#;uHG`UI`oaEK=16c$o1$n_y`4d>O6w+Xb1DWArMhkyZx*5uK zix_XKPtU6$x5mUlPjG-=_^Ct}*oLKI>Jxn5><_qr=#}#ej6BMsNg=FMQ07xHzwl`{ zanhqb(|9YHNy8}ui7e1q1xuA#b06N9IeC{3q$v{7v@A?&r|;L}r_Zp)F9VvWO!n}; z#n8Y3sZ;aKyhc~sgK?|P#MRoE*J5c(nPPo#!Cz-WCwa@tgDGLIbv*|h*%n7{Ur+l++B$_e?3?>Df7 zn-T!m-VEuiUQ2*yTKewGC>eVPiZq+K>0W3JYYx>o_RVNWHZO86wvFb(IrT^E zNFMB&0c>^FRfB*{YT}r5B7r-XS4V6l@D1bYe4ScLu{Ev=)FkJD#^iEyjqKpezySJPBPkkj-K&w{KM{#$?E~(R9+PzvV3cBk7`hHeF z+4{Qg2jI$fzrEl=m>+}{%NMXxr#+9sC9tc$cy1O%bzT;_Sc{$O`=)1=G)L$T`}G<+ zVx&=L1@OnNdepjrUcfF#VIHXEfYQW(J?Etz%kPm=hC9F)ie}9zUjt(*i{uEmBXIX5 zyoLe1XlrfXaw;K^hmOQ`LwT<4z<>fbz1HW32h_x?^BvPvPbt8)%mQji%6eyOvCTn7 z-lX|%CdQaJS7ln{PgK%e1N7+DhNyUm#+x1-O-e@*swcP*2`$)>XEXU0v;Q+&S;OzIo9rv&NBD0DdU) zO|MRi#v~9uBp-z$-37CdZC$qN(MAGy<-?~S%3i46Ql}j>;y@Q^K=;iCtH@UYi;rRd zPt`KWT#I{Ac+W-;_7}9viT$xmGFpLm`6v9)^SnET;AOrw}GJv)@f2#H^x$$;dqi0b2V?2b{=An63gXEN*Y01c9}*x~ z(8>gWZGcvW1Kqc5fBqmFKPV-3{+q&u0nKbbAt!xxIQTKM&YF4>z?(2xd)`3>9{>Ir z^WongGy5VFo^au|xRN;0%v!uZB)etPpHRU^s zcwL8|bg*i6kuqFtUN{bK{{qfMhvYzEZ&Paw$_Zb;l+E~6P;_bfZH@feGwef=2}HF6 zcqc!m9jNe%9KChT20D0ppIF%%!8=P9f1=*`7;G4ULTi$eoUiicT6gl?V~GnYVp3fW z82U9vU%7N*v8y$lK!)4i{Gbb@F0QJb5SsYJPQcf*e?TSeRhfIrw^R*b4yBvR-omV1 zuZD?Ss)Ag9AaGgXcD61#RdG_}PTgyOs3)zi767@Liyq&jOjLM55Tt?&q1OpBk?EgjcdNhGD-YdM3neJ=K+^;#!CQbd&!>1 z-=8`buKEGbNMoTnYyGB0oe?vJcV#wVu7k&?@n7xlVAijH^RQUCFZA*{4;FidJ-A%a zH;)T$Xv%RNzuRFf1^Dn1S5TBkzr!GZkw5It{TZ!IEf<=MpaTYf1CrZG zjF^=)3)|Y{``v;riW2t|RHC6m! z(u}XAH!rvr#fHi50Fc*Vy?$?g0Zxmy@dJQCc{!_c`a^&fjer10Xv0S2Pg;oQk2sW9 zr?l|j%k!N#6PI;qbZ}TTb#yA-Wypz_)a>To(vEdhbjMBU<7U5oda^|5TSISo{=?I- ziMO%wSoC-!hZqsDnd5$y=pY6wW4_hCUB9F;?@;8?3qui6f^oJjk*6SAjcHxGJ<*G(Pj@#NXK;gT|Yl+xI38*X69^7M?nCSOP?KaOYPJLSo*pj+xiKy?pxOkWewPdcR;U!3s zy!XEz_~WAk%UE&lnukyVXk=DxEr5Vl4mm}cJ zFt}Pv$&&fp(;}+TTwzX!?cfQ1y3OJaKJ&I1MHAVk?^#%w&JE*AFE-jTuamQK;R`!& zF{NO<&91X>ReL!(cJ`?LmS@W8Typ2PGELp5MboK-2lM2{<_4|*ly^%EkmQUMtLFA7rNK33<(Z{xPJ?3k-!>idLV*aGgWw+YkbELk4*MamDvr4T?q?nJb z$xf#ko7b;qu~j5(xLw_bN?ltPds}*DhfyhQcwdVN+;t7W4xYQcPnG*QLu*jE2{W86 zEnn@BNU84J$jOnFo{E^&pqu}o?#R}Bfib7j_H$di8`=94f%o1b5>C*P`%yA5QRxIb zpvD90%iz937`mBQhE`v;KgCVn1r#tG)c79$!dUImf7U2{7PsD;^7YUv3H%Y4$=>W` zA+UiwU)xIB(iN_}G>fjHvE06j@h)N4*J7wr3ueN<*ecs7ZOt#3`4?yi{__V}*fyN~PgUO>t+b8RvrG;g zQ=gp8>%z(5%|o-N?^?W6Pj5A>9l3oU!cPpbzCgoW^RAP)pE(Z_OyT_j+$ zS1@AU;ms9!)-^Z~e}utz*ghAf5@vdNp^;9X?Ec2uiS(x1%8mJq8x&wVSnRFT6`s_b zC-JR~eNWTc?FGnNRG3XR(lKma7%^M&#r@3+Ye@cTjQd6R)zY3bann6#M{R<}MDQLH zJ5qyB8C#XpLxhx?_<+qIYr0Q72bxhX=4rFL*$(heXOXJ6{2rJ|co{5ZiFKf7{pgt~ zHcTn1j!m!wJ`{R#WJp4{0l%x*x|rX-`gwyNvBHYTi>RD00zfkpH-NZsT=^L>{P|&-?snVvl(CMcgj;d4pY) z$sHk;V;?s?uLU*bgQnMFySWV&0J?tGl2Mj?i~9Q6$&r5Gxjp5yin_Slz()Q2rb4F$ zl8rBI z1>dyHk$msr@4;k*!hY@YxU;`Z|ADHUQtd0YV8!B!614<@}w$Z1|M$+(*{48;Q&OaJlsXHfpiSH4CB{Deoo z12iU~fzi`BmAu_|%-QpUJ@kOHrK91kQhK7^psnyxbaFL|y=Ue%^6;R8?$5SO8KFhE zxtBeg-g8~tQkE?qjnRQ`P3&W5j@2LGeeh)V2ey>B{d0*6JaF_Jcr?6W+d;s`{i=)F zd3)PHx+BCED>rc948UPWp~jvX=ykED(EI359!YR1f3Tr#1TBrBhQlc2E26qG8X;A5 z55TebEIdC_FBuW5VG&S`HGd>0!=Fk+ig?laq<+MD>Av)UL{;^O7Z4_pF$#gxO(6A) z2BH14{LGwccJU&pLIr@+g@E*dY1{gtkW0z(?!+wC&rk;G2(xwmFV*1m-gT~Sy z3J(wc3fKzHvZ%NHFesD{7Tx;2Do?$Y9*+`hCk;Wkib0hc~BgI_9k3R4^ zRygqFPfz3#K*_LQuH0SHOU1pf5W3losU)OAdyF+7?kJ8vYGAp{rn z$)b9&+xekZgA1}^ktHPBZ(hU<*1z{DrqKmCOLw=qLprc&aIRwpC!GDtBbt;v7}=5$ zCGmhDAmxnII=t(>j(j;X?Nj@9v6~TP)!fC?mI6_9eQSSMB)%d=W;+&EzzZ~qpWAX9 zfw%94Z1jc}&ODd-VlpgXO@gRyT1U^p^nLm8*Fk5*JaxET4Ofq!+(~W~Vs)e#l6h};AbnC#bz;A8ZQhpD zaT|c8ZI7miiH)$dJIErxKYaX*K$5GFia4jXNoz~5F~XqvKyzzO!0gi3&^EaCg$4vZ z1J}dg?uTkJVx$2MhFF5PNmerDCd$&3-BaZw&I_MC+?^nESfmnF%BeL5EY^ut;$IS|o-y={h*5MMK)a&`*SIbN{(y4)X!VZ1vz8!_+KPyp9| zg-Vrk%>}*eK>De$B9}am=}7$G39yWK5mYiDRPwe2hhChEFFl@-=F(2V2k}NLfV9K0 zTX(kgS}aIjH#|fvuZkVw~s!CbLRUK75dtJ zw)g)0I?_=$LUo%}`pSNe?8&Yfp`Cl9LOmfX$2%ec?Ps^1j$07?}*0yP>~`CB3DR(g@qMUDez)SFJ*u4sFPJ zd(l}9?{0AkX01ZV$UNz5p74^ZQHnU1H~cry(z8CYg^e$KM-ilH~_5r?01$zggKYOfP<5l zgYVKv1!#*^z?ttv%1bN2@*sWhxK4e@5>DE<3(K zPwdN`(|`(%D#QC4maKB*rMNe|%hO0pC`s3)nb%5nKIT@RM_{q`&ulE)j&U~BmQ&+) zZ;o|6sTu)Lvpl_ULLh1T(43z=U*71)YTD~6vRDo=kJ}@15>dDB8*M!Ivh2@KnTcHG zpVC$1q$g0CG4NTp5jtOgout7eO#_aFk-C$0$|2tGPY}p^gK*%0k5c7~R5ZeK*F+qE7|szviyS^=EaTx2k_#MbcMtDpHL_ zno2!zf1ey!%Y}%p_a=CFQaYkh|46>tKRP5fGRB&m_FhF?N089Lu;V&L-x~zWHH5T^ zyK<47^va<^QdR$s@XM5p$c@}2S(*9yw^Dpe7!%dBYj2a#q$ji&%IDIzi5i90CS3gA z4K(sJXi}_>N7CKxFTB&Qd^+7*2P%O0in{4pIm7ML?CVf4X-iNg0Jf;pB^eh1N8Cknb9P^Eq!9QC&Rs5)4r>wptefON!1 zx$^Y6q#HVsFiydza=2@rZAHFk6w{}-t4n2++AtY$qQ+zhZE<5-@Oy>`6OXOu2X^dP z=N{f|OQ;M^Q`6*HsVShiQ=u=%msbZm<MT13)+PBVhV}5;#uY zTB(0|!DCX#sOl)G9crMS%es84CE}IF?uD|e$TY)Jt0|S@NW2rzH4xULFkukA5f9b+ z>ePE^)EC-Ol@<|3-`ZLdjdAk!+Oa}*d$a4`J{w)*Ek5*#v1Ge>HXiG>W+XO?<$A>9 zs?=rZ9~F{M1K;G0%dsl?ByvIaNhrqUw{CKz_-0J`y&0~r(Fa9aTY?*IJ@#K2^DGcK zp=K1e@v& zm-Pz8mmcP9S1(pW^|F!YQqNnRLS|XsIWp}%)`01MQDuw@CJtK}$SX@{_g3~{mFclk zO^=ov`o427y8dU=DpjYa`KuclRc6%0A){rayZc3YIv&yIw(r^%RZ(sVIN{&d4A+zijWp6MMCR z`i^W*!W<*!_7Vo-vV`W{qSfXd-N<(@eJDJWOC9JHK4&6HPfyzVzEowKfoh)e8D+`| zqwN!ayhCeIRBK$y*Dr2>h|psr(8hJ#5I<@(Cd8s#Tw2y|L_)9?(9W8??~(!6FeF8Z zFO*R7Qt1GPmpHi;JybDbCieXOYR1 za>HUgs>^B|c;fuRMFF5wz}`a~EC!GE-5U+uiX~&63mO1&{5q)(bV*z+(PYHCylKcl zqK#iHe5+1;v8qd?@o z)QT*R3{ZO{NxT2Ztm9Shf|ZirNJIts8E9r%4jf1E3}(u}WFdq5&xTe9?K9~Y-$#WNdb}FGry~&1s;7pC0wsFB?U*36Y_4j@qq^7MA^q`A6@IC22I;WNNt zYq#i9sbKR%$a~d0e2Zrw)jG&ai{kgVhkDTiz=!m|z379m_^(_rAQ{@x#Dj;#3F;$} zfJX)>g;4nI3g78;0wY@&kAkGBvnbLk&_JqBB&(oJ%w=nQ{h+(j<)Ei-u-;?b5ZSw$ zR6lK>GomL?XqEHnB}k)LV1gUzI#?W1?PdJ0mjM3UImY`yCTK$wQ4|p?Qh?>%_ZUz% zR^0)XYez&GSkelJ>r{$2Ob!hWse8KOTNLL#1#48Y-}~_}79RJZF#ONK@MK$wAGy)Y z!mm2G17im=@Sf%0-@f6S0d|o*%GhE0!ipG?d$IGk>Zlj0(E?v575s1ebl2_eH&rmu z5cy((O~C7zQ^?48DZ{YKj5hZMHh9zZh26h5>=$k3xjWZnKt5ZT>H9(*1!6R@COZ>a zF)(uBT#RD6Sw{~v;QtB>kOEmi2eP26ffx7wTPzr%$u+A#zk!{jB`O6$QhH(6B)KpL z0iL7=+8Bf6M9?)P_a=}4*3X|5pBO7bHjFRtA?Q8*jyWYAa?8!RP#d}a-zDb%aU;1< zYvqFW|F%{kFl%K4b+t>U7%bo5tyngZDROWY*;Km0#Wo0-eo6?2BEJ3k?cAzD71U3<%?6gxf#t;ROv|sHx zOP3y>B1(WyKpDmVSb%5oX=b}O&gE+-Y@7s>XtO?iU>G*HkpA|nJW8;BTCH}%szS&` zYoxMAq(Cb&*=Kgz+2i0mZqasZ_KS-OX-5!x)kc2O<{E2YUL(b2fw-mAucwGuii209 z)m3G+q=mHTHNn6pEofIXrQ#^MQ787<$i-dZw@`prircQ1_u0tVNoSGwR#l_x`u@AJ z-yT6;DPLhov+=3gFG=IHKRD?DEQz}cHR(h} zUYqr0LdY?(9o#Uu6Y-;;^uT_ANE|*H>e1EHZHZ>iX4?EJYuqdHp8ZPU`5HiGef(rw zn{WMWtpj$L;xqN9m&1AuBnyXTwsjwmu#)`blv^*v=XZJ*s5h1Ku}$=taT%!;+i|oy z2`j4L>p4}nvf25hD6E+iZAC~lEIDItc@e2n8`Pug# zMdDqa5&R_d!G9q(0EQ#{n9F(Jy|TH48u#E4O)I|5LHe26*;}tLM}cgH z$VH^9%S13YC`2+<+-%$BLg;){sXdW9Z1KtQxPUi=2AUWzTM>u}o|2_rsA3()~!aN?X` zQ$@rk)_tq%=k?v>fGFqd66wg%L0kh2ZpF3I_DqVi%A-CyJ$}rfMts(%F;})#T2tr) zWv;m{k$(SS7jAXup854>w%1!WeeGJMxt@&=+YCy-S!^y9;xiC5IM0fHM;w=& z2sF37a~+@(2ltyLFcV!r!qy!3qgmhDt+z8{smSrH5Lz~hcS@7YWmg@QTaeH0ZauMo zq=Gi(g|G}F4R^AT+R?|FA^ zC{GNe&#EuHTD>`f5^fPhd*>aQOYhS|X8L=QO%$t=E~tVcrzWN)VP*Aa4oQKZ6SPXCYvVsxWed~i;Is>7G5?!-9SJnyn83}kW~sThN07)vLq;SCg@jlTG-p1@6;$|t5Y=_NVYuvS64j?B31jj*)#e(K8Hn46);WLXpm+Kbt+jptmj2B0`c|zXT))#hCCpe~+cyDsP%JPC&7&f@dc_iy8qV7#H|42%fYIl5de+W_x86dxg=;P zaKC#QHvI~$n1@64K5~Ok&Iv4X1_SdXND_R@6m(Vs`3F8pTYe$~@W8?N$D?*NDKmTT zSJ8lU@7Y|xj(mBHGz0BZ$P07Afwic?VULTtRRHDB>}W+4nv4;5Xj`>-hbnI4ZphSK zH&F8NRO?zCML{svtL^Cw1D9X78!f7QcWdv31v))QG7Gh(nZ1{4FOPZ(=twp@x(B-8 zg+FCEQ44{Q)!yH}1XTewg%bEZ8 z#aK=-G?Z}+ewGWhnAux-54D)Nv{j*~C?GfkyO!^A_7Bzyx*FK=n0q)ASrFVSuNJ(B zmwwa06WH)Wz(VVp`E_RI#)@yIhDxLZjO<{M0QgU~12TKB__l&m8LT-kA_V6_za`t+ zr~AwEfs|3al+jFL`4^^gU(HpZXpcyW88HBnA2hBXORgwjE956`eT!F*H27G)lb|(Q zahOBJ8Xo-ZS5cA$a3@wA1r=ezGiXcami_+mwD3ST&H$V>bFT^6Xl5-`V{cXsEUt47oW zkz;zAD{FiItGcfWi?VzBeN`k>6p#*S5D5V(kx=P&0O=5Bq(h`6M--Kgp<9QR?gk|W zhm>wmkS=K$YOg_k|L^-<`}?l%WFKsgIPjTg)w9;U?qA&UP=1R%TkioGjc+4sVYu4kJ*wTKVhE+k9+K2zV z9!g3w_k`fZ?{}$y-GNq#Ki|vQyrQCaZQQU9?0Tbb^!Ens#u~Sas*Ys>2Tx`Ry8rb$iT}culm(t-xz#TUbx( zHvP&jV-&>IB_$rG9cAlvu3Djdi+90M+qHNSm)F|x3>}A?%ogjU@iD8MsE5){ekSAG z*1$x(PQ15fAu^7IR-LMd?yzsmetgW1{g`7ab3r?ZBPbgjuuCP1lLZynAbY6caCdMZ zW@<_^@;N(k@Udmz;#(^fS|SdRGx^E|rp|0YIQ{Ipehz%z04hm7T)AU=-9N@z!};*@ z$h`N@&v(U9Y4h?;R3e#p<|_Or$=LO?RiVtU>y*W0TsjE&P06T??+6FZMYW>N=99>E zjcra8DXRZt#kuv0Y*;CM__ow|2^%$8y0=ujiEo*w5fFxKr?k}x* zFt!C*o0CYU1+f4~xbp};;l$6yjHMR()0OedlC_8Okxc7JQC!S)&EM)bs^D|Q=(IfJ z1M_Sc={=cOR!db#DnVP($uK=q%W8rv#KC2BaR|%a`ap0jlOOg4N`@SxpT1!MHK>)h zIVr#5ejLX=k~_aXWWFlLA|oULU!GXaDQLAoRg%8tj`|9%n-~q5h0y}$gfG~lNUUn{ zenli8M-C#A4uxi&wC=2WK|AoPa@PR9^Nw4=c(y3cztquoim4Z!|fzE#hwBb zm(1sghX|*c(A^%ZqX7wAhxC+#SFnCI$vD5vz>lXk#=TRBAS=!ywU%9}lvn3$-|N{j zge>lryH=Mw`5nKARFpj=RyYPypM06DhJ&wzW-Bc_F5y9r{Dm4<0MH~2rjhu81LdzT z5DV*_LqLd~SxUxr1L=A$?sH2JQYgu)+Nxdxf zybxPoqeFUeO9xHmUGlaF6SkQl8OEoRWQUAv#fJ2PlgoNp%UOfMX37P7&s5jIc2$JE zn98DP&Ni#}*qqdLMOm32ZwNV+gWlka1rCPYhxFpIF^&X3?u*EfXv(ob9X-Sw)%&s3$4R;3#4Wp|;ls&CdR&mglZe(3 z1A{{X21g-lfTTUe;D80-^CRnCtX%2_8vILG>xj$#)F|v=h z7#H1ZUdIq-+s6Y->AcqP?{V}G_)*Sx_oU7?f(N{^AD*{jG>0&nu4RkIew%lqwbBV+ zq$o3W4GkC%*xx~HfXlL@XtD+uXam+<)4|^X#9QEq=uZaANeoRn7}gi>@W4+8=N%J; zb4hyk`%yAo1&zjsRuPH*Qw&NsY;E9kmJN(ixdUwH?v>LlDhVaUFZPzvYiwTpA}?F! z(+K9R-co&-g~|U(e|2zC-HKkyl1|DJS6>XJlEsAqiwl$NDdQuC2NX$bA2B(|{yRW! z!!B5U$I!hC=8v99y9gfGrvPBXOLq`1UH$aJBB<~+A!8t^@;8NqBkc| z)#78^qk}&l@((&^#qv%;m1M{ETaiBZ%CFm6+)+5RbeBXP1*Jc>1TQVS8zbb{lfeV7 zP_1&4Ad}N^@7!9+T-{p^`m+JJLiBA~I2v|jKm!XyS$=Lay)VmrLbV%NUS=-%Nudj2y zGxD=#w5|BIFz>w6%^NsNil0#1s-hXzGk56wDgdfxorJIl>ji^T*&Q#T(e=yLg^c}E z%QcYKW&^$)eBZ26ZlHBmF~BZ57Hf3FZ&wRUnk~q|2**F3iu~m~6!aH`BnZUroyVpf zbG+PHpk>RTg6$Km0VTR}g9r3M7reqs%S%J=?7O(nA6@2C8E9$n)f+BDQ!@-Pv05LT znWbsqHN?EH2l2+XJYdc{?P%;*VHe2Q$Mh#4qlCrzGw~xd&f>fK;`q-tXtLOW3M>K0 z{Hn#QU4R8==dGZXhfi3*d&7Ah0LJ|F+4*sE!P;|=&h+OhEWdi@0@8T@4|{elLrJZ| z9JIB)#LOH>YjPXE@ZiXTBn2(Obv)2`EJA4W;==ngVQakn%fPAoc_2AFMhuAfv$`cE zK*6JB0;4$3j14(`{a=IV^X>Kh@+jj3xs&;~j=!Ee^Si|+dlT;cPosXq5+3Byc}wWm znd`#_?r%WeGkyZ9li(Vx_JQhGhLS(7o{s>pQbh&3g4T6{hT8l+Q@p=2%K!7!0`1aA zd&OVIJ@Tkxp-=HaeXCSJV6~Y$G6FPAIlRS}eZoOlD%aTj09@Rgz(mqa{sr((&^0y^ z@U6Tb3(g7u8-kzK4KC%`|1cgK z)mblF5x;!7y)L-(^InpHHHc+s4=;K0BZ|&P-6wxbOevK7aODwLic-W5XIF@eEX={b zt|ruS?>WMQ8id?ClM`X%2DTTg7P4Om^7w*ELJ?^?e|e=GnZ)b=>oP zSPj(hO*aV(LI}h~2G*j*B#F?mUQ^%SB!4Fc0#c$ig(SV>1*p>LWbfx<09|Z}j_UB7 znwSi4-Rb^H;Q1--DL0)O#dNIm`If9cIiTm$)c*l=RU#bct4|+2`ZTHV^kw(2cn+Ud z*oRF)Hk3$AS|~Xe7WpVhxE}4a_zK3QD0f;z9~FU*2Wn&QrL%b@v!w22zQ!tOGFF*( z4%8;;*0CYs1F1Yo6mse+_YR7iVY@ng6K9RRu z31!mV&Be_AV%onw048RY#|R*pFl^pi1t2Ll54kmD&-TMF;Pczf^kVDFQhUU&w;jY{ z%7Z9!bagg&h0A1DZfzf^+8-!*7=bmi)x9Gep{p|~@0z<-Ftbo_ZB5T>8`O3e6K!fs z`p$rkjR|hR+Cd z{n*r0&sX^PoFTz`;Rv#PSWpI&|rvqb~YM&dpld(BT`KN-d&nU)v<&E10CK@j14aP{t zukY$s)bAw`VES5>+S{ivo$XU%2~oS9KqS1Q*RB9d*lW@Z8{aRUU%xqSkQc0@Q7b`!eoim04|hK8!G z!IQVWQNmMq9|t{;^wp#eT7dYf)lzBW#38x8M4yPrG5u9c)4rYl(9?MMh{3T>O!qQ< zWKd3K_t7!|%i}v^Q%}-tJMVIxtOI$AL7L%;MvLI%K+~x=QE5=PMrQ4mEl1MBT1*u( zp*=Yi`pGD$m*&H|96?M9Nm8eW@mIp=9InRawKvyooZg4Z@KN9SVeKXUR98zY*coqD z{EH6sL*x`}dF>;z!PUhhE}i4*ZDXw~d#SR=t%)a^u5j+BkoDQW-YUwXc1=rf!~57Q zE|mJbITappwZI0M*vwiMy^rL#M)=Q|hL)D}21y0ir^S(08%Nid_m~PWYiBLLdliJMNujjSm&mECYUoIbi)%(dkObVwQ zg3w3hp1$-`@Rbq0 z^17&%7fVWNrT4_Im+`nj4~o}sZ+6OZ%}Lu-TBk#W9CAB&RESwqW5oM)9q8m^ z{1qm9MQLA1*nMLixk(;TFr)A9^HPwC6G904U8Uhy0+mRF090i_iR)kf+1^^d(f$&4 zB%{hNaf+&2Ej-Le%c7g7HLb+=a_gdNHdL4`rU^F(VoqHgGq(m&Q3>|DnUC3po2x%) zRh!;Byw%dMH^@rUM>7qZ0a(3)z=WdroCxp%*4NnDqbaL#%22z8-vBs^tV#hASBby8 zv8b39^>O=N_EjVD!(@TLP2MR&sLaNJc>_&r$o4_nx>8mP>7ln!OLu2`qf!mM?M*|% z@V*22hMe2*+U)rm>kFW|Z+v`Wpasa=<5I;WU{;8Q9Sk_3?>8l_??uJTPr$6z=Wfc- z=bE=_pohcGk9i){uDpiK=!+M%sm4hdGjTwDXOjwnO@ywYz@P{c(BaS@;*|fLVQzi! zgcMzAPIZ8g*6X%=siw$JZtpo(v)64QY>k%nM}5B83IN>EuNff_tyq^YCr_8M`iABr!!faper z9Z`lK?T1NAU=AAZM%2@zd|q*Fi@o!tU>V=4nBFHfZdZ~e5JHz#@&KfYnm)LwMJQ*! z^s3F!FvlBbmYTZ9GOq7}3)C6MjJ2QtESbEQv+?|qllSw{O(NvyCeaFXWsH6V-*@Vf zIaX->%E6|L*0F^Kr*^TORW60Gwl=7Qcw9)x@_R-qwQ2(GrLAj-AP7&!M0OQ|FW*qdZ|TlCHUc&(O5Z2^+^>lOfB+d!rKzW^%HE)A?j$>?3>FM`9 zu5Lw_e59|(8XmoGw_gDnOQH;})d+zK^VLmf9P~}bsv>IFnL*9AZlj54w<-RcKX zF($}?xN03bO10~37y3T;6<<>{INgww?0`K@Lm7lV+?(YoZI*$3Z-)SnGQr|-sW>R;zu~`@LZvhOoD`(Vgh{ODUMih|dS@`t*)7S6_=_Tn@%aX5 zqYE!xEVZR@o@Kwr1EJ=5dTA4g!MhE-_0!(RN?zGv%0tS{FuVD|6`nZsw$;STsnSI+ zDWn}L%Mi#m&gJmDGh>BT8Q1^^@dUPsGQ$Azj|I0;EI;Zuj#DJ!gOPnxBl9eW$ijBD zCPL!S!J#3Qda~AlpL@l~(@pi_Q_o1BcROC8?p%u96f1R#ZRQ(glHsr)eZ#ko4td^InprrFSQgR2X$lv8J1$@_F4Y^(7;l6Pe zsss&{A-2g8a;9@Krk)K$YPuuuHamiH+VuUX1&|i*UG1z7Dp9?Gn9s&ma?V% z1IYp@kbVMraQUH7Gvc2cq~*+AjtacLIYh!JQg|W9NLaz)tEdU19y`rIWUU+|-1|-t zEYba5V$5|#{u^8r;XxPC7Kn!7DTGCB?WbG$s}!u-s;~nTcRY2HVJ5GC#$~45A8skC zbS!!bQ+t*J2-|e1;%3!Mhz!ZA2nAVaG^d=w@}tJCb`mDx%TH}bmWo7qVi2{QB#Qkp?NTI*;KKFHcFK)Pl;f9S_@CM3y@t~jV&oU09{~vuQYh_y%Vad| z9?18KJ)u}*Z~K(9pm~8w<~oP_#x?K#&ojv<`vq%WvOO{Z_XCzQS{CHeDY&a!QzJaN z@1R?k%W1g#(&g;=2*sXnpY(qXHpXS9g0d8~7ujgO-!t5qvf2Fd$!9(yt2Ni6eRrmK ze#d>+?YLq8M({O>r^7>D z8Is$a(e{`qw51@qo`&Qy31D;$-Xc#ViI;#MQ zmLR+r4C|;>TX$B~eQSCA=1b>^Xn6~!&*20b3d@nu*Jv%1cxuDE$R=WQ)ceUNlIb@I z>i0(IsP=vm0Z|7`pwl~Kpel2fec=EP1EQ#cmT43f6QrjtnDNR}j1j%isM^Svar37^ z<&JSEP3H=W^}VhL7+19JtG=NSuEEEcnUIV+_8J#vsv1D)^k`=Lce%GuP6Ua*uoW)L zB!1g~XWs{t%7{Kg!-W_dQOFmbd)n~+$T^$p{Z-1&{T-J0jR1etRE3``E5Rj&mlP_O z_D4fbVuMxHBg4Af7cXr7Q@K~Lf-&wx z;vniN&&0?5m2FFq#F6IGOp@9Pz0(V#sDz7p-s;&L@vk|< z)?{Tb{W9M9^RWrMzF!8cKne>^fAkq_W(S>6W@xhC8+^OyOKv!0`tUU72`w8{-$Lpu zSnXz8lSY!Ay`Wt`xX{avr99wB;Gycm~%aEdAg^uH}=eORxAukIufV^$L;r&0S&iiVd5MBPahB7nx9|O#6 zT&r`)RVY)b%3NaYUPjP(!?$myu0Z{Ii*~v_SBq^lLzDYdHeV)|ChSs%!>lt|HE77( zNQa=MG(Q2#isTe;%&MeWv2d1WnXyQm+s=Bch}u+%nSQHG62NoZyHLdZ5>}?qnHE9UT)5 zwDZ{?EXx7dLb2~CJBLG;>fxxWDDjLLgGQ#PB%%Dp*6VqM*?fcE zV!8t&?C0Fku$jV*f%f?t^>_Gc!4*OoOIx?&S#H!fIYqWpzdl54e0Cpmxl!TZT-CYO z>nQpY0sxhN@dSPatY06Y7ib#B1+vM69QZj{izrj^W_mByo0%jcX=k6fSo)r9;wm(o z?iYPKp`uG6Gg=yI!EZJ-?hYvSIiZ)3OxA$5b?MXF8bC@@Sc5O#Sk)F8H zH54g|b(huQEmPQhbfO&ERnFCgIr)X`F7}{PCD)G;*&+=(zl9CEm)A6$+@=R|L#HQ*-RAwQ2Qs}NObzJ(+!y~p#JU>RMdwsa0+L<{t& zPA?Ghs)scdMamizaI(;C&IF$=BIiu)EC*_Qpko(i$TlI zDuoB9gFL786oDH=JWGOwnic-IVru_Ha#)?rp(*(Ub(=mhaA&nI-5*8^x1CxdwEgj` z`qiKC_2nmgoDh9}Hr>t6Ysvyk8T!KG)x;s$F-qP|$zGA9n$?4R9F{$zcloH2$Mh>4 z-;##<_uLyHoi!yrug3NY(sv-5)TM5}LTF4Y2o5z1r!5b8DOPBh1zX{CFxJ1;&tL>uZ>Ovc>f#=BdbQt@p@(QlLmSZ_+YBEo?ln{_Lj-%?d4N0P3PB# zrTXd z5+&aed~0Dg#P*{)(n$26O6R4_O4%ooCPm$&nv*Q)UK$1Lt=plTe)Y(Gn%VWndEai8 z3}Skbxs}+P9U0rLVfFXLUIewBzCc`$ZelV1AlAaC0?&jRJ-1n-Y%f5m#{@E3i2+A_ zM-z+-rTMjX4+CLgP|n~BIqMp6a6Ow{kd7BoMuqjIBWL|4^;HJx05%#cMyh#J-&CSM zhsf{m<0{j$hlrk%NNE9{n~Kdyd}y&?M=^(PU#`dm@8O~oVj7zV!xTyU~>xIM9bI>lznecfuGx-+rD@c}mq$#y?tn`(Dz{`>{)F@4f0a0oA zVXU@`@|Ti4Jc}7C`8Wq533JJn7gblOjHB=d-l>&D-_$BKGVmyA$j=x=?UdaqQG&>O zV?sqNC6ned_N0^egf{QC)r?_HqbYR-N&i{P)H zO9_UFosf%Yr})82JC!ij`)_tgBb?Q|@*WaY77Jc_ck1`;crPo|;X<_M=85Foi#sq+ zMPWCE3>kU>vT9|1OyrXHjd%0isib`tEeNr6AUlW7IrQ^s74z35%EVr>A6OspTFqm+SfX$)1W#UXAl_otlAs@dojex~WMKbY+F>tFPD{uYfGol5aAX+)KF~a3hkU??TIo8MAJ&8#qDY>VVOdSBQ`$*9RVEuoN=6eKAJdl}O7ej1_SZV`|n4PMV zMmIstWsQ)cjzTUI&R5EAG(L=UGHrp9}3&%Zy~e`xTp6{w7|#>b@3s~Ol7mk zpMt}`8e{=9Ml(*&P-Awrv!z$|4CQ5dFACa*+I)|X3^Y|Q}Q3%YiEtxLI z7Z%i$N0y^d{9i`#2|4z6(*gd@(;c6b=a1maLrUL_S^=dlpBD`)E4ey`oG*%fe((N} zncjv&zN5ZBACMU-YF>!@_r`h)@E1iUE&=sXjCp6*TUZbTY*w8g9j8~KBd3@T13;q5 zpM!oLn98*(By^vV>HnV2C9Gg>A|6<$V4oedKLlE|ISciyA7az03Qqe^?Nda-R5J;kvE^@!B8@rOf&qm30O)67+~AK z{#(2mn~C_{>NB)DbL_AG7M%Y_?*L8@aP*3E|2_8qXT-pOU}*q}b{DW}qO;k_)qN9g zh#!P4F-*dC_209B^{^C~zy%Nde>1?~wt5e+K3DB3${0Q%n;Rs2d=&uQ)#^-YH`zA2 z%WPb7!CkSt!NIE8qS;PFal`@E7N zsB%hz(EUGUZ+^{z4>4dN$l(Z%ngBMhNar#{9_5y&+cuyBmJv!z{Nfom`R}N-YhVFdNG3u>kBLH_C2G+OyB}SqNICqBHy_Jm_~u-!*gt*){K>8i zR2=16gD1$c@&nuSC=LCoBDr+?3N6*+SySuPZVG))czNM4CX{>tq71j{78Jk87Kk!0 z8o=Cv?#u^HTdp+!+}xYIZ0%aaHbWqMsO#nF4+}b8>^II&REH~K)K}EtCbrDmnO}`I zy*~a;NsA0!9LyBVIPIl?9|i(5S;DMDihr`PiL$7CfWz0$&Ym!E`udO1$?kpiUblf7 zIq+j#8yW);?r0%XLg`npzKE>_N_yKRhZdo|N-QJx?9v-gAX`*xX0Td$=eG06e!5xG z?9Y{GdDPbwySkb@_t2fwPfMru1E=hnu9FxGsbhsz84g>??Q~Qqs|vkRj{3=TtoK%@ ztBo=ozJ*bf7*|ixf)6^Y56KI8-}UtSf&b6xxc?-dfEE<=KKi-FTpeC*G7T|R7S=(S zBbXVTO{*Q2DXf7E>qo{$Pgl=(v;H5sND9_i41xd@Dw?pu=7&E322Mfw2O;Dx^2ciC z-{X`9#c;0s@*V4=W9$1SB)7&xZIy?vIe>%@Z5PMC+48(XKDDsgXUmiEa{P+EwLj-3 zB+lf-l(B=lM)0%jA31MLy4-An&){X72Z2fW%wscOog5*4^+;vM%}R(b6SWs#Ywe$m<086f53l3X<1_T8r1{-k90S) zRrdAl0<5n1OI*A#dS7PjC1$?PmfM-g`=t`fX&q4Pq4c%~_y09P?_c4Nd$7rluG;0K zT2yyUJeC>aU}DDS4}7vBXDnXohpLoK9i-^*B+pLtv3uC0XBdYs8>1K__;}DxtEtp6 z;nmQZlQ!AG;G#}{`zx8oH_Z+1Bz`-Y)tXKpk(vcq;wRJlfF89Q7iVo-hOE-m`0@dl zcL>+>xP<~cXLgftDW_`jOnQRS6T5|V7rMu%#$RJqo!rIWJ%9eI#?qfnUs;-govQ4M z7m=}$AeZWf=lV*N_B3g?y{DwCaTV8=Ag)`GHMd!WW1DKI3iFWX^`wej+rH2`XFfT8 z)qWSw{fv5G(Cxu(8i1L4+vze* zDl5%hMk-Dbfe6-r4cIpwF!@iu%}DZN*G)u7cC;ZzTFexrwjI7l6Zrq^}vOBExE#t`?l;B9OC-ZQ}|>>4}~^)N4` zV@?+2YZD+MFr$&jSZq*d5%zdq07;R(MxG5!xDQYnU#q}M!2c1aWHI=_L}SRahk|Bxnd8Y49GhKCpGF1vovY&@_5i5C5mKl_;6ZuWo zm!z@cv(WaZkoLT=++!YdIGX8X__?SS#9rz%TJBRxtUR8p0IkNj!j&uNS9)h}vr*oE zarr;T`x)jiN&+aZ{<8iBH)Ypw_>h7bcB3kh;uS?ugs%Y41&6UIBDsyoXQLzQd47(5 zH_8%#4$sy@%@SqE@ACS72oiFB+LL{kJ$*Q3K_iHLCWM_9UTm8oncP}`)Ws3J$U}Ju ztu83KXAGrD|6P~#&tA<-?ISI8D;^R!m=a?nKt=8wi(GrS6pk_@8d+r1o~LNa2Ow?m~v5haG4y5A6P$vi|y5mc`6q)>B#(D zto!#scf5KjC-C-5b?o#UnZHxyiD}DFjOm*<0(B&u3lqFBX->Lm}y8P3XN8IO0jM-!m zGRraK0&n641aW<=;dMNNCb?S6DK2)J4GxD%&Vp1BW2w)9-G!MD0^lU|ul4+&v-3||{={zo j2;=^je+Ef?JG~(NFip8j#iRQ#@J~TTRXYFSlUM%-jgjvG literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0206796813.png b/docs/dli/sqlreference/en-us_image_0206796813.png new file mode 100644 index 0000000000000000000000000000000000000000..479a4cbc210e38f4b6cfbef99c55893b4ab828be GIT binary patch literal 45366 zcmeFac|6o>_&*$tvL$6LyB3us2_ajOHcR$3N|-Q7ma#KbDxsCKWJ>nE#@LrqRKf^@ znUEtC4avU$uFp8Oe>oe!KxKGkW;sLisR{XVwW`U&@ME#SDB0)5iPyYx6i)%4s}rOE!*i8?<{ z&-l&cXm9PpdbRmajv+4;y8R8;-oKx$_PaP&SOr6gyUvX{##ZU6Eo?zt0>aGa>-P)!X8~|AxYZ$onz;J{PMH0i>$-8lZmr4bhE*^l#eaWEMpPVB$Y= z`ojJnT1X7CnROR&H6yygtE4sK42X!fYhHn7fnrx|)`wMNVFqz})?8~yLzV-Gq{D;m}n|S^1&c ztdwD<3*l=pBB#9uN}DJ)b1q%!k@mt)*Q zj5@QYSQ*#--jqNK@Z2BWsjl)5*}!L3C>qTwgeAsdBezXrTeZ0s!!jDw=D$~eEIU6w zTVL8-5=tD#=35V+E}fq%SI-Towfv$?S^N^Uh#qqLJ`kqrf6PtW!11?v;Bj#g%DGHg zth7-^mj=#{b>_qW4{oNM9HnSjJ}>hVqZo*2PHQiH5xFuLuGr^Ku-y31MQy-+K=p#p z^vu`qhD?H%OY?6pf66V1qh(e%K8cDH9@*}Ekl10`xL1RkmwWqaMN;jvGC<|HHX;Ri{@sM%^|un&9_UFcQjZHMZH09^&Ze8Zq*l|AO9(C1Bx*7q_4-Ek92F zQ^$D5olKYj1}ZBooIq?@bIw2m;ZbgQ;DM!uJY`Cbh#t*cqB}Q|l@c;cVcqf%p~<{Q z#n67-dD;8~p_jzYp;psl-4)&yzAc^&%9O6kUB1P3*+Sn6L~J~@sg%-i+HZ?ymgy`9 z{b(vLx>f*k>oRNGZ&s~a!z_ptG{%cVTp>Aca6#HZu8=p&=otqFcm`d_Sz~Z z_K%@3zv3m7zyESat5V5#q70p~)RUPcdnD1A<=6cT_XpEiHcG$8H9W>))edx=?0nXV zf0+NhJ)-EiO}c%A5`6afSM|AnwxGxuLX5xj>>*|Yp}yn(f3L6D`uT>h(3i$6S08QAd8bWgDzR%161THUHzKAHF}MBZKfmZCmM-c@ft z*^U34V2V;qCN^+DtW}!vbiWQ8`AuaU&kMG5 zY~Un3bxj`a3eJ6QyLY>6h`t`v(&Ij8?(RI#e$+2(2Pn|csR=~_@b`&1< zQ7sgwKYG+eokS#Ezkik`Yo{psgcpa&XdXN&Cm~D~SO4@l7%%m0Ywmu!lfD%ajHmVN zyu-x&N*W$LQ(|1p10nCIo~hGO{0;^UDywXz4qd~zH}Jhy!ti;Q9BAu}gGp-#$QM1d zHPsrmk%yCj@jQ@cO>Z+V0eWcSQfS>y;)d@Pu2U`{NXBpr1LG+cX37(#BWcS+GSgZ) zBuo1W!tH4$6s9?a(|Lnv-YsZladp^c`o@kUL8n)Av$#*#>VTTQq@VJ+&dN+7$9ubk zufm1FkB6hSXXwyp!~rxa{7Bq9SbZM8-}D@B?7oqc@LJ6}T@3dG2B9c)mT-k0X|i*1 z5_mjIQuB+Ut*biT(eF;L9Fl#|$0l|9kYGa5C&pjt6tL_E0UfZ6+5kV&Z?D3oL3@$9 zbO(t)2!a`<+U?S`jSa?Um%X(NSN>%*^-VYEz21`$MO%J(bvz$M2L0Su@Nv$^W9k)a zh|Khud)i`J5m#}2PPyg z0#IBQs{XS!U56|Cl3H}%FSqr(zBvQ(&5MmftC=)YhyOlbrAFV~#|2xnsCXh9S z5!DYuyo)Owle&oF_*>}@=b1UL{iCnfL6Ip}QNmz0%j288cs36vH|`(ftJiyB z6kO&1_xSo71&@oW+n)bbD&qL2=pQUue~U$BH4KMEw->SlD|K3b5cf<8$N%?4ho2cf zwU!Yz{A-``j@MB8{+>dq(DV~r%;#Wj|1>M^4TJ#ifDSb^6(LFqwX!1M&EnHN1#2pX zGb(&2Bd5FO`f1?QV9JiYTQy*J zbZLBC=FX7?+NOyE#J=WKJuZh<50edq*PW(@QH>$QuHLa2*?v^!pRv6ptG|-VP-=DK zz0g9u)Uf+Uai=!b+at~%%4kVON82q-7|Dhrd7kI$E*ezV^=3=6-^WV1NGQze0kv=` ziBHo9yU9WrE(f*DWrxO$%D~eWPL+-V9>gpv-rMNFdf;{M*0@mW@$YPh;nXHJ_CCrVyAOMdS`O^z^cjl#phnd zc7q;!re^Gyyu8~wJ7xsOCzVa8DpTg=^IwHrG=+7FJIToyYdidffmm8xv!i<(4}nKv z#Q9>O`g;v(dzBOgFIC&?F1I#H@$|`r=O>g@)zqdcT|Kj=K8{7DFYugI!4ed$=rKNS zKT}Bv@iESlpke8G$uLaP_+slk9Xl$ADHlFARW3Te22w?r(&1Oz`V>2=gKM1 z_4ezg`Cs0{)8zF7NI{+|iM0i;PBvGp^hnYDpR>s?m<+)AuDQ^(2Gb#wxW4khX1}rr z`+{P;`~gWtOg!>klu1rJl)DAZrrIcZi|#?Q#4kz%swvVEA`}FL`fw0(LgI!>)Aopc z+!pM-w5E6y%6ehVoJT5o&ecNT%;}+_n3%hLWMbTl*7wg+;=jmIJ6w8`D^#{el$`GN zAv@Vf?j7=U@$Z~?mtT-Dsa2bl8yPvn|I#>7y1ioO0^yZX2+_FTLg2cMXfmF5>JzUg zDY|adXK*y|+riV6(c(=t#1SRw`RRH4`R26c?TUf%e$_2}w)DktWwdgbbsMg6Mz^rp zj>Qx{{%C~Tdmv>}dt36Hj33Y=Mo!3N9;yZIBcA4`k}^m(ZCy9eHU z&U2`hI_SF}d!x7GsiSgzw^JB9h3wQ#sxbFe)QAgZz$K~Q(6F@m#ET_Gza5=9Eo#(A z6^m?27D^l`<_+}cB`BIL&$uuyQD||o6algGEquJ$Q<0u3DcG^5%a9i7?=-HjRqEFJ zz=3saWw7Xte4bOQBh7GXO8M0utabU)+_ncLW5dK8^$~w<%if0k!or2RQJFe7`-pW8 z!sW85=3Te!OBtW#N7gS6u^ymeH(q9?kX^dl11+3pOh}V_Va4a`6T5|pruExe|^2DQ}uK4N5J5ck^x&9TiXB`GrrQW6)a%Q4;kDrt%M z15t`^PX}}iZWSe~4RGZcKJemr9Vzi3wN_bAA}&DUJuOO%K=lx1W`=!K8U^Z@Pv2Soufb)X<+=&0<4`cb^ zkCYG8Otx;UTgnWY$Tdk+Q0jl7=6+2yFTX12-IwY4AXCiD_}Au*hDUDgGgMP84C)y3 z{^lVfAbf^HvHx@k(KFIUlQCDZc3=^YNtjd_yeqd<&O>;8F>5#@w0Ejzt#q6Ogp0Vk zq}U+Osx)Eh$Jx}1xO4AclvwpQA?H zdr)v_Udp9K!*WThdI8(Hj_t|Sk307c9A0!I5nblU2TuhNvdP!|3xaZI47&@)x(}#x zwwrzGPKw)`F9sQu+%C%jt1jL)@UPkoP)o|5exHPtgI$b%q6ehRJwBvU>T5n9TZ=pHQ%xSTgctJtbDc+4zB1pqk~JTYYs>GVSjCSI7%#3%d$y(EL3-EX;dD zi9OU!Ki<4qz&>R!P0IEuv{+K6To_VWYHF(GeN0gxCPrJFCYcQO94)nE+_?rr3(me^ z0E!x{9hkz?Mn}&Mm#H`iXhyH|oX=1@p7(0BSPkv>O(-M4eNL)nHoNyZzWz)&-MNi3 zKjkJ&Fu5FK!VF+TXI6LE$8Y{89DIf9F{=_VYN6aB69bu7avcgy;_ISpJdBJmg} zb3@(lB^BHjG0hyg!vPbSK2N&m4FrA!$UKWq7-MVMQElz--vTuFIpFT~#R+3R0 zcMmc$!BXjXsfpgPq~6yuH-{97Q4i}*D638}VWP^|4ydwG1hbao$d?+_q+qFReD+FEceLxV^b|tQ?QY9*YuNcviD-813${94}X+&Q>I0&{fe!B(Dhp(h*U2_d*lN z=~yn9<$_(zB{S!*_)Y~5b#mMnzArcNdBk6#{lTo46t(BzHng%**PQ69p5a$9oq70d z6^@3hLOp}G7BfT3i+j&4?^W_HKZr(?j56I#^80O1>_XS|OdtI0-86BcEZ^PxcI`lg zO|bm1!-!JlyXom~k;HH7H8&qc!eK+Ev+R_bH7;+NFjK-BW%HI(b)Knf4v&a6^#zF6 z-+eK8>{IJ?-`*y2|Mlqy&uSi3*}`3#nqG%PtFW~rS^cb{zSwAv<^=odl^ zL87Vm97?hD!?PX~WXLKM_tPA?;$mLaa;7{YSwyMtgn;+DEWY&;*EK2hT=nXU3p`Ev z=lH|io zNW4vgnY&Oa5FhuS3?*+vT-P0V#;{R7OFOtq9J;#0p+WK02B7+eJxQOD{;1${x}8<+ z1>D*4eBF==xA%0@Wt_+!?;U6cqwV+TA&mH_*fQ=mv(vI(*Ppef)_m-C%Vy=-`Qa z#Y_65TYi1CX~Pl2et@tJ4ckm4)J3y`@tB5t7wFJ>$FW}oxbMOIfvgO=eCU3?m4%a8 z5~BR~e=Pn{SBd=NWoIr58E>t&YxEs_MdqO$hXW(G!C=&O3W>j`_nODo#^7NxwrmT|v!kMFRS zN&i)=6V*3nxxf;uEI^jL0ny`I)NNB>2D86*dx`U{oPW#D+Ur`8bUhCJYpeTf*Yb3L zEv*~CFMnbiIAU)p@8HjEZUmKEyP?*Cea zE413OUUE@kbVmdo_!A3Kf~eOaov>p3w?*WPqL4v7`E~qFf+9xeEPok4 z#!o*F#>dQg6nx8y#hUEC>QuAVFUCsz5}$h!dGrlr&sIEY?{T^59&!fob6Lg7CR@fJ z0v!K!Slfg`2ot|*b%hgK;Q;>JJyy0^w^)>fLzWfiQPEO7Z78?0D`ghNPq#R6O}~+Saf8QG-xM@6X^XK7`mUzo#LNZ*XyG8^QL4crx_T?7Z}LCpwey z$-PSU;D*z6ST-;8L)fc?l4_qrKk!k{m@o} z9IQ)?dVyZ0HdBdBXXiM$)CNK@=poeVMj`@e%zc=ftu~05`Q47E*;$eL#PNWbK zx8eq?`VXPBSuim0COUObpIMzC@p^O{@IzbC;FP7(GwC!7eq_6NZGQ$90xUcGO%HXP z@8PA!%B{zsBXFb9hOSO*@Xki~bz2uMLoZle9}J0ba#vje3ZpS4bp=3^?J@Z3PRTn2 zP2f&}MVzwY-L5X`4njoV?jRgtq3r9X%0>Mr;(uEF&ldk%E}Z{QoDQx%gy_nVJ$E4g zN-YtH6YHA|wmZ|9{vx{MRlxol;BL7UaY4e6H+~GfDqJ`cen%Obc}=Ge%E@2kBGj|9 zF=0~Hz;3n`IjTs>G`a`gbq0cyA0u{BTh(og)Uz}!d596~==_+TE!}sJJ<<{!q1RHn z3OK@Nu(R2>CT!M(h!NKwN|eD_?$@A(A&H(bnH?_k-zIZiMJ_zaTQ6POyRXrvj5f4n zlUwLlY#rzj?S=Kk66!z3L1?=QHv#T?$ec=N!W@J|^d@oWsfhXMM#4Ilk58B>lrtTk z?UMx8FvkI4!BExKGxwt~UN*=^d;kJ8_SMKIdZ?{qn#Rn)xPkd8FQJp|qx^b`YFUNV z%V$8tvFzm_wpj;Ysbh?Gb^s_g-vJL^(OHp>m zgP((RmjG<9epRQ&$Wh&d)ng@j%~E zwwUaG=;cEg-xFq{Ty$vG)>bQPh15^NdF#4Y+ypscq3mIfk7rLZcZfgaHVR#Z`vO5x z6ZSSM04Tg5NT(Q(os43BD)GJd8a|t&ebD5{MnbZmHnnC1eZrNHAp@!^K*NK}umFT7(s}4_*G@{20C`=6! zI{d-&g_R=A?u^oI|TU&V*r!&^3m+S3d}Xde0uxK?E%WL8rx5L1WjL zGVmCeDqdrf7dy4VMl>MEkvf_>(4?}#adHswhqJncUR#Ak+^33337lf$_0{b7fRm*dPIZWI&2RJk)Pv+LEeFg zcU!aZuH(@AJPc)KDxY_ws6Jo{9g<1^n>HtiNs&{;_94nic;qNNa@*^s6B4}8pOJTG zHbkT110D=*57nD=6lB00+4U_Z5E>#IC|^1R1tXmdFDR z5;cHFo(d&dz_<{|2#!kd5TM#%^?lI6xsu~rfYokQg36Xu$%hzG?_ek_ly`zN7COP| z*WrY|&VgMHS?PoRT0a7Gs$B;qW_z-SLc zL1FA8{j;~L;}63l*UNM25gP?u$$P#wjTbbFpnbsZVYdJ}i{82k9X!l2I0YSi2J}}~ z8C7s=6>c94C4{(7yTXJ^RR0N>!K>444)Dm~U0{v`J0k=^BA^lCdd)YGF5o;0DT3&e zrs+GNgC7uD_I1}<91T`S4m33^A8xV91f8FPWq&r;86g;pBk2W-Oa>gB(hof$?I3mz zfiM5pUK|ftJ`SJ#{&sFF=zIrQSAJW;Ly+M%j76Nr-wNO#!TWHY)zA41!1I6R^Z#@6 z0r2Ff$R9Kg44O9%>_&40wPvQsvAM$Tz|y7huXht!)^rCgEmZ6~IByia_@3*czgOg0 z_Rq3w0_9zm%+6snsNz|y^z7VDMqs=Id6tM~NZ*_vnPYk)vL-zeXQT75CCi;RZD7i1 zPA$b3cy%k!RDWC12rl=?hD+EEGL+s!>PhkNr1a-ls{q}TdyK7 zio;%%>{pT0k9r19Ld)Z_J`dv=a9HLI`h)hd3cIN8V^Lbz3E*R(S&>`g%j5YjgRU+ zb2@_LG2Fm$M=Oh*+Q-1i1>d^~ZEd-#4peCBA^Z4IxSucyzyD(@KYj; zS0F^YabOaC&Kx>=IpA5}s0pHxUu747HxbY_^E#@ERys=+H+KBRZZi1ZO! zPvw}Xl=v+I_*49rTPe}v5V882nD{V`Ao8TR(CEQ#G5AbJJ@B=FHu0=XD{(|Ix&@v}MW=gM*hA=QUtp znY8TAp|gelh&5ZbIm8(HeC3}mv!Gu4Rq4wzazRVi8)`Fj@UZ8_9awDdIEEh?Q`|4C5rhVV(|g*^%WTn;idJCs%&)9ioE^oa}H#NknJLT zsqDx)7H&|e^yCbdIG__O8m@jb7mpONJ?jht|h?bZk?4VDOBdh;1J%@&s7IBC^Hz5bz^+@<`+t_Y~hnQTId{Bi3 zXe?5}Sx~yhrPp(cp!RL;fIF^G`cc%<0>Q6*XG%cVMb{1)&!Yk68q^xu^m-&G4KY_f z6Q)vDbJ+W(Q37gO6VE&SeUcdDQ=wX%!c_?3*X%|cGuE-ZS45oZBd_qZP7*$Qs7!-2 zsd^ZNsnf|jPWOpr$oWym?0iShKqkxk5Tdy5{aQH#l6-^Y)nov!2*Jkev~9zvIf=LW zvxT=1Q1(T_#;z{n#Lrlko*^1#Fz+o4>3A~+_jBWAVj$`xBf|Sb|`{&U$ zek9*%pRUTXu8M(_O^*@|byPnq9C=kN)wtzprq}mZ5iDSMG%&nJ>au77KmSPJ?Tptw zP>TQR8)Z7@_Ht4ga3=6}N^1#D8G^o6%6t5FP0fTCjxFUzU7097T6<5yEu+hZVkz=` zuKS8oimGAbx%G8);YS?2DZ```4~VVI)2N zV2pzx-+k4G4^L4DZ>M$u9ZGa!&5Yuyx2yN_{!!AM5H#vOR~PSfv6-FXmC84Q6>6r4 z^lx&vDrKd3mG*`OQYtTQw(PohzHDULqP4GPbo<3`Y8OPPSdJ%G69ufYvGxY8L;B?4 zD6x|)lp0?>O!vJ&*78WaRJx+2J>ooVHIc(&ZHe&^BFWK+Ev9V|Gi{6~PhZmrD5SLh zNbN>-FRq!J-BYBEmE8UD@M}F-wixfjdFL4y7=hBST{{3glSv_?0h@0vIK5gSat!D2 z#;SFyDRPlErpqK9QmUtw^BU7^yPkVLYIK|Hz2u{)F|(5o*i|mr6x*!Dwqa}Jh|TO; zmeliMj-I5BDO{+Gh~@*2)B7F=*v+BS`lG~2&3ux;C{=QOah8dG=Ci@f9Ko+x|44+gv%d{Trc-u8O)J@*R2JJR zk}?8J+m_in&IntlICOAwg4OVyfO0|+3b*!y<+O`@9e?j_@LUC)i5^foib;Il`pT_)_5 zTPrJRHo8$UM3GX(weAzHGr7aO-espZTlBR&^As1UE_=%1CMYsqO(#fVx!Z^z}fNhGc4!+;BCM)?mVe15){eM_I28wwRDy-+pU(c z3+-I7dbsHUMzBpOFujH2u5cqm;Yhr|A#8xvuqtn2&`hg)s;#@SwL`B^n9x9&KYLH- zD6yhF}1wecly_ zBa}0guX~8SF)EF70 z8BE!HQTRqEGx}(qiRn_Nh(C-l4y`JvitlwFkBTH#$10hfS9361z|IX9jtN;Mt$tEl zTaa_Ta3tWI-LQO|(&MO(np^grOh7iYO&KFWgGL-FdRI>~#Q)%B_YuAr`ysB;&b-q40tvI`XH+56Xcv%w0$;Zxd zVk|~ppq5f{b8<`cRK>dvzmLK`j-QM%m8tK%9Bq%KbxOmBDKC9Zk}i(*UV1_^z`ov#6yhK(R=;IfE0-boV;r4 z27=u1IVF$23Lk8XG1}nDMIo`~b~}p9Dy6XFg|!9lZ^$`y33$wLpVg`TV`u9zRtRAx z%=~U7)i8vZS>YTIF)@FiJhfi;GM)L?u4>`rqhZ-~w~S1WnGPVE;bV#KA;jc{G~PYf z4_e2DE*C4Ezp&UML8{M43`z>LAdWQMxe}K-jf2qRB3IcRW)wFXDzc&)kD)Mam4sr{ zJ_W3EK}hawcdB4B1#4Llb-s4#!WG{D;PtfizHoacxmEChN+O?fcQJXQfKo9#>EA&N z`1t6X_lRUpSSgo{S9J(+@<7qx-JD?0&pvNkCL?G`+oJghO!239=(KQiHel06sNnbr zQf~4EhaMX0$xBPKUbyTMq3usG7P%zxlC{3zIwD_Il;Y-?9CaSxmZ(Z_5v+&BhrIWX z3#M6hE2=1_pOVMb>%HBi5ud6;@LV?t~wDWA}msv`beOK z$7ShKHV5xeWsrw9Ri(1x)}{JM9a=8u5DVo)LRBmMn4HXmuwBYE$A)sRMc)4|bD7ie z)H`79K{WKO#nFc`lNq8RkOFgXq2nMf4dUC(_XYBY5MQzhsYt6AV-VT(>GWB|Ls15%>oA&L zIky8&uA_Ud-`c+6s5u~x28N?2!XW_HIlxf;@W}i4 z?8k_OB8+j~>8*#|0J}jS_3^FQx}vHOQkR`~yIp_)YzON!yi~)YCOOFeG~XUT%9B`o~9vVwtF0ZKz$RMlII|o>0ImZ4P$~e3MYq80d zONOcMhu;4V^}`)>jW-~VB1s3{C7+1R8+Skr%`$1`CO{1yxo~HCdClnEL^))Jkc8g~ zF!pfmmh1)#n4mBISrin4Y_kT=N6=Li3==e0Dz zfo<>X7#`g)9^Ao@?V z?A`EO6tNF(V2rIm%kybZXA}|m_7_@?Y<@0}INz#`pk=GZtAucMB$=`;#n*mgI|%x7 zhI9r_(PIzXWf*3|W0-uTY>KquurUie;s^dlmuI*hB_neV=T0josK4s4sPZrNcHsh9 z6fq(s*+J~zbOWnXe%8GA;$5z{UAxlY;_SF^-&W2}-OA4U1#c`DK(WJ@q82#+B@8EX>kEI{WKZ>ei>0S>Nf}ZMjMj{`cv@y|#Xe~XC28f{OQ+2v zaOBaMx;H7MYN`p!Ms4zouXbOA*nKIkawbkMke`2bO%qIKL3Pf3M?AJG6@rH8-j(ht#OMKkLJ+3^TmrRuk4wQv`wjR=*cyj9W4Y-v9& zXsNdj>O{{2>`KP>*qtf#Qb) zp%Z;?@Q;2z!F(91`NFd_eKWV91^Bql;QZt(7?M#W`{1ATg-BcK=is|HN{31EDgt@i z85j{FTDst-mb*wN%e~R-^KN@y!%OMwA*yTO&cdM$4C-Gbh7`zi2Iq}HUjlo6cUwaRu{8Rq!Nw7y zn0Xj8g^`Kr2Bc0)z_;Gt66%ZW5r2xz`4GN)#<78*)3_$94Zu z-KKxP$>YYNdLbE)4gmb=yesSbmCjB29i7PFBKy^^Gx| zyZzD&?o6>a#ge}UCBo$>og){V$-N)mc8>UF)&(s7f$8o#GNGF`==L%#()tpyrOmZ`gMI5g1V2n0 zJ$ny4(WPJ4lF)URZ14R%Vk~S&4y~(A<(}MkJ7&93B6aVCMt4Fe(Tyg+Md-X75#v$8 zGd~%`es-|P%vnXl(n{OuH#PR+r6Q}O`taESe8y8kmTg!AD26z(s;#0rbJH5@&L+>2 zxO7T`@q@Nm#{O;N=P{+5tm=0=AK&}I#qoIrk`dcK=o>KAksJ`ZPQb>ie4h&@Nhp5Z z%-|PNl$cekr#dxqp_3KmSNrCsbQ>#$SEXuUl6&4{twhk@dV&wP;6x1LO(J4GMSGCE zD%@#Gqzuj2fq{W7!^S&4_@eTJo)AWgN*1T;yo3Xqo(*+NV^4Xu6^?AE)uwLVTrzRL zd4hjfar`{fsR<$l>ojr2yRgS`OMGT%?IXOmwHk49J z7|X|mFcHeN4I?Z9rsVdE{Y}cW9Fg(>YGu6el8f3Nzp?gjJGTlrw|^_mx%fPw%{C2> zxmK-HAwUJa-Epi+ppFb zPyQAzCl#nu!H~%QYpvR#_$H{0xm9WSQ%PXr$M4T-*2vQ2KUxO8d8Qz8>%iJmDk351 z&VU13zYnq+jC6J?d~Rymi7AE7&E{$*^;qhbz9ueyrIq#%mxo<=7&Mn?s2rOiy<(e& z+T$SK>FYhQyTX*_>q7gwtzC?^i4Y2^35K8C=bt%Bnw4_&?6KLvL$IzrXg!keyjFu| zUn4-q)~}n}e7;DTGI)!iytlOCZq87GXB`u!xH5^BxY!)Dx%%?_2w&Vu9$ZSHKHRUH|2hFVOZkkj`J!VECe#F_lT`{}6 zCG^)~mVAnJwi^E!iG(8=mUf^qH;6S}DC{Vl;8>NfsyH^Z<6i{={+}Wcf;)SYcV-mS&b>{K?aW?s7thyPTIipM; z(vd9!YLt=1m2l5t=*-E#6aH+&M+@0D}2L1bSM%3^BJ!{^oLj|ZQh;%OP(In~4y z%u|c(UT=VyM1=~QBXBL+qu%`3zZ@d6(4i_GS?D@d(S{^1&lrO(_tK>&8tDli*jSYH zGhgE45>3*uBMk(vyz#WG^d{}*Kbqu)Thloz`%$N`l_kDKvZMA22hq+^q3&}fFI#Im zFIfk)xlrRLCpxvfM}}Xf2gJSf9Y)uE`kuqGLhmkOqG&%s0!ARO4@i7|?4v(iJEb!c zFDZ}}pE`Hyn1;#I@cp|UI3J|=P85B;_N@Q%E>o;W0^`e^S>Yl7MKjGUN57>l305}* zf8&m9#kJgmTZ@MLeAZGSv40YV>wbhf0p}r7k+sB?;qn^T`xe2+21`MqxD_{kD;OSZ z4E7hj+fXYu3nHM=&NZ-ExIpgg394_SCL1=NaVy4f4!CwFlDXFNRj6Hus}4l- zuKsH@`!ySC@gls%KFSLR;6IU?3PtR{OsgL*+Pw36)kO$q|1H23c*JzL{3%{)aQIa5 z+JD)%k??;zBLve3{ruk=q8>b}5BB!&=s>4NBe<&nf9Qhf^9m0t`giaUcN0j1^?;iH z4S-Rd zYm9YH5XJ2BKcSmAZT4LV82?w`^7g>;y%+!5g?tph5qR2vpRK!YU`@sULl+ofnKW?K z|BeT@_X2wT&md5k|D6W$=5~)`n1Sl-8O|5{(@Jly?J2z#!#vyMe^ys+&C3YfAV00q zgJIx6PtB7B--^ER+hrePLx_DY)zSV%<4cPdN9W!`kxv!B*%|ivec#ySiwKp9yNlK4 z=Y79#9^ErN(-NJb63FkOvSx8_m-V*mu?z14U4&)VJlnjw^4Qt_>xFk3+~V6@0!}x7 zT-4hzybEqSRMxr=ehG>)bv3A?s84^F5{2r(RYf*|=84y8yf?;vIOrHeT<`j9y>j4X zl|Yl)&qr)yKL~PLTGwCie^o#5sz$LPwcYEMW&jGa!RrQce@J^ z5$^R11rC*quLnWRRsJ64fjTG7Nracc^pLy$y2g>Ghx(cKH@HFdjFEQLO}QQ*gRl|RB(EXS$Ua!jiUH!BfQxutYPbnZf-yJ5 z^X>x)JHDv3{dwniXv=o)c?c3c(kpPL$2hm~LK>G%-j0>R+7-25MFjXLiZY2!S=)Yn zG;VA=BLg_B7Y-*iEUVnocQ9H)BINyb%Lo4tJc%uYI5VI>x85~(&f2rKW~^?pzSAX13vO=k%ULid$@_}jIDf^9M9zUgTq?!O z#hedZuUp+{+sDZnK7j~j1G{v~T@!GQR-}~AiU`ZSlhckgxrlC?x)TNRmlG6Y~r0F01WR|gK_^ce~LTj@>IaA&(e><0)9gyCXCi;NvHit zYbQCcDFSy32PA__dX+9k zej4prwe~Z^Q@hB0szIYfSJxztRv>eE>J<~F7ToFvQ>9x(3=`dpQ*}^sr?g{XE9K1fseJygacwz4^^y3#OHIu4m^`CQYwRKv72Hb+MYa7WPG%m`4OW z=H>lpV`Q7cAH{J4SBvjU2+?OiZ-6hrl&^OYIbBpKOM8DdC`U7Tvf>g)rl0CX632yD z4eIdt9}6eGd=NR(x#m)ged)_gcM_4Bpjx3Xy(IiTLdx}BR}_iw0j&f0|*@IUo%eDPyNXofIj8`j0z z<_+Yi>Z~1U%4M+{q*fv82<-OLSA4Ru>#ScUB9|)}4=scPAF8x)|6lhjWtW^DKImMK z%kyADNxtH#f9o*Wiy~&xZK|CLOr4BIKDZ%hnar@s{SSj)$Lp8o0#7}AMZ6?wNRsb4 zUbhQDVfs($R4%3x;3sZMz-rZ3aSWOUlFTj{De6MOR=vn2ZPZW)Fn!wvOu8 zbvqb0?vm=$W)3(y8zC5WgA(7nTaJIoW;eOg`}?WOO&@^mn9CyRcE3x$S zy`@ELSn2aY{cJa}Hc!6S+e8NgZ>9@ODr>_NxmL0umXgHcS0Viw!LoeXE`GMq9RmxKcCZN zi<%s-C|xQc3p{g)>F}#r;fA=dZbxz}Ir=NaLnYvpcL-(k{bH}C#V@_gsv2DimTyy@{juQcXDEp|7vSWj5u z`2%I;`U=ZimAc6iDY}PJ6MnEzQi==OW<%AcS3~-w|4!D_z@(|~Q1Itz`$qRwW5Q|U z0Zmwcb)(}XvdCa(P@8q5si`GH+b!jpSZw(fxXov{(w@raAkSua11?l<$bR;StNP}H zz7v}^ev!5b-(N^NPL!{DwC859fLH5BF-~JY**iIkrF&FYE|F`z4XPcf(K$ei4qM{cWG&Oy_5kkBtJ<~$JkAlp7bSHYKRFsdPqvoiT`_+z_fs+YCD<10V6fpnR zI1ubuSn4Z5^+OvibF!vzomaQIGd`JJaMe6i;5RpwlxutSfdF!GQCq3d>CQ|&*>+zi zfArZKcHL{mFEEx@TX#Pe7Q(FYw_f0T7I4OaFq6zoS+f>n{{H%y_WK{(JOcJQ%w+fH zGXYZ}1`WJ^SeQH6fLyDhGy@poK#-ruvt4`lk%X;@PfEJx2HNHP2Ghs*1Bd3CB~ z=jM}7r>;Nz3mwZG@nAd76$eE92>W;>$DV~^Q1RIi3)94DW+>W^$9!QbhJUbNQzt1V zbZxs48Kw%^tvVB>dm8&1QsYVsEW|#-9?c(F5P}b#ooEvj!1`HBbP1Ui6CdOWw zWjmEeXBrA}a^ogj3kOc5(WL!#StyZv2BNkS=5@vTk?XhTLWmhR9hOY%$Qpl?*N%!z z*rZMBd`%@mGV)yE9|JyLGWUI{1N zE?#~)Vsxt1sB~m+Wcs6gvewGj3QpxGTpW^oi!(98Wq7Ifn>(S*i91_Il$DM$Z{WPs zUiC>f<>njhEcRulgwPsUk*jxb?*VAOP3Yfk(=*$2BL}M@RXIT@{1c^$f8-G^f3-xK zYZI~e{w_>pj!Qd^gXjBSRcSh1avQ-u_fepptEz@2wi4=6<7Fdnws{^>%64a>gIFp- ztmunH2vN)D`EDzcgYL~UwsTBpADjk|O-j+gO%JFZkgxgi};cclIYz5mM; z{Fm7HFKq;ww*S*vE+ns*Te;}FG};UI2+40+Z4)@tCnKsfKYwrLW4^~&WaOly`@@-o zG~_0vRK@&Ro1hf72V4nlE|*R>$A=0G97s%A-}jn4-`i|8Mtx1h+aepS=w5wMI9ybS zxI5E-y?kj&cz)Wv?GxYXDUEPXw)y)q&WUnJ8g)cKc8(HP5vSZ1}LY+^w&oVpq zukuqUVh!+FHT@+_W2@aKM?`TTUU(V}f7=12h9-vO-9jdN z15`TvW{2aLb?VRHI*q?uZvsB!6WV4THAh6*&>?IpV z)LWNJD3Av9i=fc%I|F_O{&Eeul+MsKjn1p3kBWiO4u)m-8cIyz?Qn$toAk>mup}HW znWz(45MlV?++Cpm+|HkZq5c3m>gx_^C!{J~99q)yDy1XU<*^}^u+ciEPRLk3AV)ym zVHQ6*WB7BR@3-Z-G(@sx7Frs;{_#)Al0Yd*MZ4)I;3q%>(JjXEE0lshAmn2ev{G(jvFvMqHjzs(W%K`sQhCtZ(48bWNUc>Jl?U%!D}0&U#Hp%S?R z0`&x8sdq^2qDRup4OFQE@kf0}IGykM1ElPWz^wQ;$>>GUbsf8qdk9cRKtV@B%;{48 zDH(D_{wdF}cZg&zAX&#k?!-^Y43HV@3LOMf!6CQ%ZSqV>Ss@vm2AJ5UIwMrlvryCd znj$;6QZi1EjC0%nYVXVAq1^kwBV;K_3CVWa6LN}*Y)R50EtVR)(pW}G_7D|LwpNNn z$r8p=(~x0kqe95qjI9(R#yZyF`FyW&%6;FzbMDvcc|HF;uirWUoXK@v-+lS4?{^vc zpm-y+%;*jQLkO9z8l}$)yx54v6PLiumc$;vG-pUX;qk!m733;U@Pw8P=**k*^briW zzDN9f*x53Q@YI6!Sov4G_kkJbc6~byxMctp1Ux1l5ou_EW(z-)hMRSbR$V@M`=P{-09`X=IQU5Ot36SPcf{ zcWB0``Uh-WPpowbD1X;lGV?k7D+JzL?#f7&2sj7icKC^wN~cRj8e-EH&tL5=L+dsH z1`sDJNxTa$@70E4cPTzz6)?zk+39m&iFBES#tCerT!Agj-*dJCfrpesYw!5zHS^w* zJG+pk;Xhq43LJN(kT*4^a^w`6x1EZ^xv`G&;OJEY0uOmaL zYheoj8(2^{+tOwT{dgdm^$B$v3B0yhsgRdGhoNmDte!=;pR9yWc{ia*-FuAypK}L% zQ4p*lroui%cR^KG+zaf`ltZ37F0Ak&a66!iX4&yJqfF*w@6SOsGTCqj5?pK~zyS*+ z8V9RXtQ$=+BMU!NB|;mBflSW;F(^@qc~x}@R?#Om`CT!zmTtJA4BJp&U_9UaL4bGR zI9CC%0Bc(;f*2)D8{5$g&%pdMF#n&enBC%}LU46tG}QFD z885^r_AabHJvwM@ykpdOdTgNG$2|VN-`0EBX{Uw4RLa#%0kKi$8p_T|YB^%UY0pGDjrS za_58;+#)%r18pjDc*hoHcm7GiZzx5-fw4Qi^)_>OxWM0w0OWx{;I@!oamJk?Nob}Q zD2x_N`^@v2e%xB8@+j=VI7DtySo&CoyH=ImLUgNPC(UB{D#; zr33%^Ui?{XP{b2j)#M;)^`hPUC#F>1(nnf>qswhEq)fw4f+i{QmQ2 z46*Do(<8-_g#FzcF}#*NaIo)jISv3-OE}@9YsG{k5X5^yl-3)qtti_tVs2OA_Q4}6 z?p1ydrEMaR>}uLDvEuuw*ywW;E!8T?VY1e-YP3CJ$3jtHKcbT%*ZLT;;9$RY9p)?X zL+vuiIbgFF-V7xBM7kS)KE{x=cguD!NGJN89A@_AW{xOon>VzlNc!zF5QlsX}J(g?@H(uwrb|F zaNF$WxAW=99sCHRQ|*1XYHpuFk3e^3Yr6ZSRG$0#dP$Nw=krXDcZ>MyX=PKaqM1?0 zNlm`S6x)cUN2w}3{p`@lWLQ1Psem>E^Sm6i=?TVVJ~xqc#;;Jf-Om z=JqzqsP$zBdUL?1S888Q^g2;ndsi|Gn_Ipavv94HZ~5=VgbP^2qLEy(l!yQGw}a8v@d0ri(4(jmX@WG3tev?H9={vYN7yWy86b?}bUvw|tz>mg z%U9M+^4=7PAX<9^9xwp$^+AViqLHK>7l9QS(ISvav*hZ{zxktDt?h#c(am>!czAs` znC^}jeNbysl{($)SQ7+@Li9B9M&F-=)iYavdZFJ1%fi zet9{8IC;HX*2CZ2H>@p*x}}#nQ2js~=Nj9q7%8}j)^}U!(LtakVy;_+1fKwa_K2;< zuyN*YLui4bRqRTVxo=fMXLaR}xed`jOY^c}>zjJfR5w4j;ib2 zd{QRqd}y+{EZ8-(PMp;2l}oP^)o1Kf^?qhXX{i`gS9K6YxppXJDNv;yquxb;#JV*i z8+Xa5Ar_deR4``7ISh5--6wDIAbCIw^7AUXkmU0VcJQr10_+SPr>B?5wvF`#0vQ8qsydqO zO)Qu6de)MTmn7=#V-TK-|A^C!s>-x1ZFcUo$`%pS;BV~q7bTs_)wIXl#k{mIbZSAm z4#;_0>mL6Wu$!BzUOVZrw%&KRR49nYFlo~|K&u(dz*71)@aHAUzp*%qxH(TkHf0&f zRykA)Nh^2%NJx+$P}dqk5~LLXrxC)U*a#5_iuz#zKy_OIRvJh|E?uS=@(n-8FCvZK zV_+j((eh>_c9k1@P#1VBSDLTd8V{ zWjS)f$K3AuYmtCv3<6q)dZGo_d(hJWG_GYn3DcM6Do4vf6qA3S4iJNDK{0`2KRBWe zZ3rZa;sW9_QHXGe5y*>Y`$DrfTpHXd+gz-wWX;F0MO_hwpKYx z{lH~e|H3THW0L@FkJQdhfRD=o083zFdTNAh8VI_RNbiA;ro{Wjj2mkrOv^TEamt~1 zu-$!Nkg=l}!Bz?PZm$FIB@CF;O>IxrT1kim7lB6Q-=q++iz6S7tMiiv6>#JTCBmWH zY_Wty6jo{kI~mXmt@FTPC+K-NSN=kOZ5lz;DS$iNocZ0EWWu zg@B>(-)j6S4fGt4(vR~vmy4aYdm;Ng%DMIN4j4M56BFrx;bZh0kAy7TX@6hi;9{71 zl>e>voDNJ62s;qM%X1mE$W)~S16J^Nb5JGqV{+^!mxWIJEwsG9J}XqHCe!`d?>*X0 z)H1Ih1Z}dHFf(X&WnZo~z0f~&LkO?As5V`fRI4Q9eV}5)ArKOy$k>Mbg_aCH1K0#5 zL({q<3X*^QY+Vrt^&6r6Q!6y^T{SbIE?{0!oZ(DL_F|n*rX>&@Z?M{V`-1e08_z&#^h~%+r zCRXL%H1+t5Ub~kwt}|2d@A?Ii9kKy92s!KNW5qcZYWm8)wvT(EO?UQ~3VC@-Ex4^~ z*!l{UueJG9h1Mmr_Zx;_i&x8q$iR(Ev=K zG6Pghi$z4_^U=ItK zB5q;}=HV)L;5bylI|hmM&Px%$S39Hf&6d|hTmR75KrL^6TGi(#yy8j_D~%+D2bw{fiGptaW*Uty-y7=VLQ^R6 zmDYBzzD^iW%e+TRT;CQuIUN7H2D#%Aw!5wbshQHAF21$aV@HJm0J}c~%R>;qQ=fDD zg>B7ic8m8cQN{P1&%&b8eq8R2_m0lw5~RK-)RHN0^l z?ZgY)$?J7P8fPs|TeOX*W+hKk5(raYoflyWz)|FJl$OfRcCKXU8TF}`d}(dGt;93k zon3T6a)312{=nJam%20|OyjssYx=}^r`)KkBuTzn>XZdTpiYTlX)^ilkS1&E2+ix} z$jb!)ZuG|rk6Y^M6%^NADML$7=K9z6(VxC8`5jNW)hgX~k8z{N)1&d4_D7FsV}D#} z-I|kCDh=%&OWHDJ2wM1hs@xMsU!Lr=d90JYe$$e&-_uGT&fGduRX;K~DFexsUO5*go2>dMpE9ME5E*8U zPX2pCT&X?ZHws;l-e$s35*Y64sr+(r>-wXt$>eSz*bqMva2BV9@zD<3zP@gu>Sf|4F=X~QuKG#eGL;rzL%OB}D zI&1T0p8V2c2CIcpw=UIj!Dd^#dRxi#o8GML1k35}^epm^&TfVL$oYQVs?*WIr;6mp zI{okVhd%5!Ah)Vd?VdzUbN6@|2Wz~Y$?z4j{t|~XfrU+JRDGHIp_I?(z!5=O&|OQZ z+zUJVR)MU?VPCm@g>cNX)@R#J#tFfe`F%xX#I#-Ul*U&vp>uaW>VfhmMQ zWsE?Ijn;7hqj!}elJlI!ix_o*C!X9oy$U5g+;Bb8ma1PD^h6WCWSx#lRhA z&M?{n0l%eQxTNMubIh{1UgjOC#LseA4tL=ZpDDQ}Hbo4G<*TK2Q^z6~RwSE`JJ<%?|aL zn_L&J2;0hJ`9rfzLff;!s;L`scfSZtgrv^jD9lF_wBFYGxejMEp>XL z&bzY8n$3Ij7c~_+FL%`Z?YWQF;@oac6UA2PE|+b1%8*f=W9xn)j}dA<`Q5m{-3Dn} zv38}B3;WD>Eg;?y7D!LV_NLJ1c z0g<@;_@h&NC`qa;Vd;Xc+CY$}eHbtDCuJ|+FJ9lT<@*=l!LVLw(_q`XiE`UAFP|@wa_=@agm6Pc4xFX06w^ ze=T%AN)vaGjEqol<26)3BJ@{q-3z!eAFSm317uU-@2Pj|mXX%D^77@P7Z2k8s@VZ-S|aQ9_{V)Xfg#9=pWoYDe6 zZF&gnp<{Kh;x~rZb>CEvn2HR){<@=sApHD|l@Lg7OL@q@`Wgr^@)dp(ILF!V z8{-n6T`0G%C{I3kLVZo-ot}+&oUh<=64Ai<5?Wng4LzT4zlL)h^v}Vbn#|>Vw5vP% z1!(UZlb+pxoM4S0!*Um09n55vd{Iwtn0j8o!>xx_a=lOUITT1E|G113L;AKlrr6@5ac3YaK`_z~jrWrvK3lhVAzf5ZmtP zP!fn?<|pL%Q1-4|N8ZDq(0Al<`%r!%Q&1AgnWM%ya18BKO@0b_1L<+)?tIH&weIgM zPDsXf8WD=hzPY*rVPV@Yx9`Z*CWinM*SNdkn#v}02krSN*Xue)Q^uR z^sz);rBabam`U+2^r+$x4*21b=I&OQi_8rj)k<*?Lh`C}Xc9qRA_9 ztL+s)2otx3_VvDSdqey+dx2z;)9$gDEgeNJ#Kx0T=LNXIuO9C+&y=-sA4@xwZqJ=O z+hn^k%x&`ys()#4V{0@FNthBE)2g{McXdZ59?@A~#2f!Hb6X-KzGo=l=??3jnscH! zfsZ>jr*1&6JAr~N_hYrbEBx`j*!(~;VR5FMvw4+qi30I9mb^{e{*;b5PIy-%N6!>V zmzy^OKEgHlq!VFdYp6s%27uY0lP9r78TS(!ZtiNsDFwUFnK|P*tN1@>F4pV(fzxi< zbYr!m-4K~9H8dD(X11$+0rGx6%@ZxA-E@SAFuXKi} zYEuOV@Ex7qfkail{!Yw&RLd#HL(r6DzB{w-M_Jv*(s><2ot3K24=<|G)Zi9m?EBOh{ky`0p>vffqlcbNQ~iSJ!O_h*(rJU;KpRE(tE6WiC5NYMVk(WG>jej zI%0l4dSq9wOUAq9WqJ_7!q@a=G0oufkWnMOv69yA+Diop(%Dn4H?EAyy9``4b51!e z2T;3upyJRq}U> zD|X@6dhZ|XQZaBj?h_XRp4$MNFv!m4j?sGgXQaI>o@Q|q=u&1TO>nqYNS2X#mq8*n z&c!Yl3**~+B0-QgP0^9l*wa7MX&!0SJWlCQYr4uu>x*lb2aqJHCgF!yN0J5Wv(99* zNd=*4=SI57qeZ+tbcPFsi!emrUVk$En}jw!ep5l^jKkOIrVGJ1P7ni4sZCeJ4_1-S zYgX=XueI)p)|b}LB~B^fC#NSFk(SL&;6d5U69t*_V{a5=ie%{S9hByY+!WUc2ZLd@ zU({(MAi<2bj=C|-tz4#XB6+Of?s>R4vs+^+J@e*TnH z)GG@rct{jRE1isMDyeCm^zm0@vRGmbF`>zIDd z^(rd_3Za@NhB-_#Pv;kV++A7P zAm&Jh6dj(IVFCUszVA;s9n_}Vkod9fp2Io9O>jGV^>gK+Rf=M10;DqrBAPASM_)n& zf-L>JAaRe3$Z23_aLH#z9|e8aNK@6jQ*fs?-B0>600{}iJ7`RJ(&C$|Rn%?OGVi`s zquFflOT|J`A3^H;{-VXt&^7&u_t6THGO(#{xynn6i3}lY@|>Nzs7=zQ)2F|UvvMLb zy<7bmW-N}&I(tV4pZIG`tB~6VJ`SnTCR#ByVqRqFhjLwGochZIqIh=QFO5y74;2kx*o z&TUbPV6EW1=;%o)&Wr}d`>it+k=JpI^=k5#3l97G7wzU9>0c!nuDY0?Ln88;MfcU&D?rj=+@rsw7$u{J8#` zMvzd~ik)RP`xaItpZ|K*gT(S|jMg-0iccI9d%3LDv>vb@S9-A>yURdO*}#lZMQ(|s zh!Sx}c_7&|vwvG!Z~{SW>MD?pqqO4;HJ@lHTbgeDk1^R>`$IL8KH0xoJZ2X-|m^|x8Me+g6|@eaJ9Fu4gs zf)VxD`SA#bSl=!BJsRRO@LaW_fV>N1_ugurgV*IEcu5yLi+~dV0xP%(JX}a>L9yz% z;UgxWxxRoX|E6Kx{@T4&%PU-rbzy+Cth}I@!4fhSNiiSheZSe~jW@MkLo-9v)>+kY z5#~n*F2|fz|IUzCEP&HpG8?B_k(HPre>0I}2Sr{ihB(%L2?QQ*c)CyK+AWxi$9*wo zK!UQd)DXNxApdNKX1WYtI)Qx#A+IXqzt>;o`Uqy^q)%sT9p&+^VgI>bq4dUdlYcR( z{KZa#H;Y4!Lzlb-A6V4>LoH`u{kFWQreSM?-JT;zPxuNVuUvZ!PLA&$48f^|JOJpv z;0zJ#wj6{HaQuvgd7sw-IuwZLp#8|lv}at!T0S=pn+H@he#%(A8D}3`nc(V8lz>>94#_yTWpWOiYkY57i@dFVbW3XCuU`TXsZNju#CE9qPKnp<3I#1 z1nIEHAuo}b4cbSn$5|20LZV?Po@TgDqn8_Lp3Ed5A6p}E&#&wV-%lwRtmvqZ3ALsPhU|7 zhdh%X#>O05S3coF)&9X+IUsb5?SVyLzlNtDBgBYvqHj0g%L=nd7&SZE+E5(mP_c)9 zu3anw6Wi!${bPaB1kM;N^3n>yiT<9s4;|!aG|*#7_><}fG8TpF3iKD%nln+ z&EM2Ybk8Zf7-5~8DwVZE}`m|Aq}3~ql{eS1JLO&Jxi65fnm03Rww{I z@Aeu@ceaLBzAV0qj?O4zDw$l+YR?d#-LYRK>^AFd; zILU~@n+ocWC4Y_RT(94H==5Rt(0xeKnXMaL(<(ymZO{kVz}`ymvc>&+s%5nU8heh+ zCbPN@-M((Md``nLezs%H4}OQwQIMLk`-~%ENSxi zG1To>-J5qq%XYs*MYBULMC`C@>CZ8fg_iBgzKTgV1JJSoyZT=rYspAKeLH<0tVJ#R zwTJ&4QG0m$u{mWBp1uXCEXu^+0ZC=E?M5UDNh5hobd%e_o3G;%9cG`Qmk@uP`sydx z2xcHEgZ(sj_Lo=@rjWZvzMS?uHrWu2O%RT~J?C)<^lQ1kUMde(lsX?RjPuWm;V(zP z=9kpu(~aUI%Apx|-BLT|YydFA7@G#ei40p4-h9)f){Wg>?A8^-Ak$aL&S0@K>97oq z6Unl33SSP=k^IH9z35MO)?PbW#k5?>o5mc&!hs-C7)1qP|J$^MC(TfftQkmd^xo|Y zjW^83&P(YD7X|>+|9lU7J-bmX^}}=9$yNAJqFyah9k~U-MJ1_2K2yYUpLw-d^FJ%# zS~0)Xchr1Hl$K+z!AG;{Su%nSqZgc?xC!wZzO>0X+gfXT0N|<#(DZ=gv*QI4m)HaxJuJv)B#pYQ(}zkk-x|C{}D zN2WjDhbAqEL($;vv@@1FrDrDXM`_1t7=D-ixL%n-Y6K?7BmiNK$`ohW4YV%}$q>E$ z7_(_RZQQ&T&o=&2xm*Lu2Pq#)fczQcCu62=Vm)@9pGBH3W3kbUgXc(A)Xo-;f$udb zfd*b%kx2MmfOLuMBM*@7-*YWc;@GtyICj5?T&+XrL&A3SZN7%TitO2?q$HR7$tc4( z4_+#>GlvnS;?(4LPQ~c>ME?+^uoRkSDI^NkexUCFqQkDl#9VrMu1}}0 zbGY8YXy?{_=8jvS>PB?M4LFoxmy6*&w%HYIcJJ(?!+|X#wzyEM8|V~W-82h+bnhS! z`S(OFt(Cj?iqhHa=iz+DbrKkI z_j)gOPl=tDo66^-8$^1L83!C&pSiOb(HHwJm>jQkkMW4Rg1EXYr4skZ!F2T_mZmw= zaqJQ~69SZ&V=LbYgPma0=HbwLI#E6RZxuZLa4QP!JvU1G*4k5?E_X*Q$TeK8WfvHi z$%^ftAKA}N^1%Yyli8>#FlTzS*3^s$-hVrtIe`yMsNjRi)fG6;ZEh~36Gge~5MHX) z6DhlMyj_Pw1A)W(7(SRFVeykqLL+XbtT?;Lpy-%HLhfYA4>+1fI&;FWw28YtGzpxa^ z_VpO^0-Un*X(>dQ*TSQ{s=Ip@7dKTiKL>f@ui2kRa*JL-y(A&WOl@n=Ve^{VwxNb- zyr(T}?EY=1YE1j_I3VWPY-$By;n4Y+2(mR821Wd*fc#KTJKb$UqpM#^SkUYIFu7dC zCY8CUZ>0b#*(Ct*l{y|`E=pdZt|gwcroz{UnvTWK3pmT!nMF{cE)n~C)`nXbMj4wg==EM%QV(q6|dEVh;R z(FRyyww#cfDt!7VWGu_r2c)azdj~#k5z1(HDQka{r8wDXd;FEOB;hxUzFTVC&M8${ z)046gK-99v!`zrsqpCx_LW>^0c-RXDWG9GgbDBP}dej4$DC8d6&Wwo#9^L&*`Vj!# zq{Je|l$`{RjLo>VNBgd0J;nrmP^2t3*>j*{Ku8Q!6<5JuijNfs2#t;VmtqPXTk8_I zcWeVDNukAuaGhD9YvkF&wJnJBA2^U`%hn(^fv;S`$KD3D7tUQOoVx)xH|oMVnbk0$ z3;sFaW<^C=Wx$!gLk4RGdI11^AjU-TAUhBSgD8+cdn5^yPuT?ol7|(eNbI7*U?z7g zK>jp#0of=j`bW|q->}wbO@I!-ZIHI?7q~gd_0w;_iH=p31IZ|z7k>>2l7+(yx8`8> z1);rg36P8RZ~ykIXW*YLGX2Pw%{PM=v127hIgJfTF(*`(6TLQytqXlexz^hsANhq= z0ZPM!=>A!-g%S`>@6G=;B&HC8t-}2H8fhP}pCGN0&biL$>aU)GHD|^~e>MXe)a_VT zt$!x_f5&780Q6_5<&@3L_nLKBY6SAqL52kOzW^=Z@+zA8Hefb6yoI^5nZ1B6Y6Wsu zTq>IJ-?7Bz#mhObbJ)s!72Df?MEf>-RR`p77U8F&uti~rcpJCe; pAFi&S%ia(__wS!S%9oAJaGaz1kb={=?B>CL`nm?YGj~}9{vYA=vVH&n literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0206796876.png b/docs/dli/sqlreference/en-us_image_0206796876.png new file mode 100644 index 0000000000000000000000000000000000000000..a13b0bb97ba1020d8ff12004f7531cd581da6f1d GIT binary patch literal 17792 zcmeHvc{J4R`#++>BT7`3s3%D(G$GlOY5?V-ElkAxZjVWEoJLmWFhhvuaeP7q>zV7RFUH5g} z+_APiA|s_FB_bjsbL{9r8xaw4SVTm$aQRa33Cq6Y8TcjYYjb432)0RO7`zd`w9jIn zh)7YKH21s&crWR5^sKLl$coy9AJKa6+zTQihZK$-+;=+AX}s6;_8(^})%qj7B8NC@ zpZL`3T)z-0QF}|(u!j*T-#HiAXY?o(C+suX#jE4drcWFuC3Dot&y_w20Bp#(nZ3*JpQe9kl?U_N-U&ZV+?>8^$ z>Lxu&>p=A$NX+)#>TteX4(HTtsL}0ak<=I_Gavh&o>Hd5{>}KUMBMNVnsx$ zq@)df9>1(S0O;^9fOmdfoJD|(7W^M=(a@wA&ir&vZ)@jlGO`CJlcFrJ{n43n6qtdM5W=0D|N+2!h~{0y=4VF6zNRF zJ+ujX=BWTl!Z(f&Z*E);D}Iu4+AlQN&SXKecQkv*$tp?!&rurD2$syKH<}kS0P|<6&>>`8PH#>#Pd~#n!mbuIs8SL{ zZr0YXQX|J^cwV--@06Znm9gPNkI9otivxbGZS~!$PbyV&Ov+yt5C}#cBLmYD(**w= zeY62eb6M!|YFsbvHtD}bVs2gZrq&Fv z|Fbu2cy(@}N>I+DHRaVwFJ*Qv|Y;d;bKil9G5tkvijl&qduJEaLVz1*! zq*SlZ#Sb!cBP2TY+;-pKmo)d3I8;KFe|tx~wsS1?oplzL44S0_@i(PJ}mn_w`^=4~r_?9y6L{PO*OT|tZjSHSm7V%BQO6)qjj1Ob0 zyq*mWjd|qLr2dP!FX%7|c#4DzOjbGv_qt-<4;~_ID3OKh#4`Lv+CmVMyaLR0=y;JT zgeQ+U-jU}j5G>1me@n~8^mKG~Ubf>(Ya7Y3i<#$nuEby|)JR&3z5!=w6T8WsYls#>z45#gyp(ikUMunH3?n04 z@QbUE#3gyRYy!!p|d6=D0T;X)VMl14e(tNibt}E6BwH~)rg}>{TR?VNN7Q~>R z3+7sMeJFp9);7?e!lVa3Eb$d~!plS)>C={>p6PeI1h0PR*cVXsfyXbvH#fhjFY}q6^2j_j(d##xh7v`*$)?mfjW*!E?U&dUjWYm3F8dg68M0gIab7R#v_i1D+>BPDE9*5 z#u$?Zgw4Ih{@DwL$F`ZRo4AG&>uIb%M&96Bt=a7>#IX`*Nn$SvEjje;Z{}FWur#KQ zFws%5b>jHWj&0qVQKoD>LrQjbqZi6C(sdlSc?WMErGK=NYv{s}dVAmpJM5!% zCLB*y+xb1vvTV3|AEuztGBgdt!#mK&)i!-!8vY=BT;)+kv)>@{^K^EMrjUVlhOZ*x z@-mPssU$KIUOM5?e#W~|;@QFQlv-Rx267jb@M}BiUqAvd9;I`_Cq!E*SaJbH;xR&Z^l9CS=&v*f+r7qFkg z2QrV?9jY~sVwB`_oKrYqlZP04SPRApDE7rzN1&+ zUBb|wTfKpM+;U7tNQu7=f%N;L``2noxm=sKGIZakVxG%{Fd@LXAm1^V>?S5&x>^ou z-&x&#Ss}*k6JLH>3!DTlO1)MU+VXAc>fHQm!=5>ka-BA2Y}?uycQv7W{fF>D zsZO-Kx^)&!#^acQcs_ zPD?1Bx>Io60|%#|($8LK2chZRSC^9*1$Bhi&A?@!Y*ptbZ5<2}^4`@t>fEga>G1`p zaRs#n*GEC3Le!82yyp100oejnBnE-64f;DB6LP=v;3J~pry;=U>fns85SA+`B)b8e z#S%VSl|BTMKLAf2weS?G#oWg)R0k%Ct^#e2ZUodw%s(kK_!?lM!A|YcJ7BOIaBVjX z-cm^NVKCo4!`f^>CH)JpmG_~Qg+`aIk%K%b%SsnUl!3}buYC_kNcO9zVGNHyg1t`w zSdD-ql0TzeghqR9SLasZ%UQphg}r5h$;UZw6@@s<=XXF#G0&=Hz(VP+l7lX`t<#HG zSirwIH~cji@|s`|Vy{^&c&UPZuO}q?{y{T#!Y$iO@bp6MBf*c=2?{d*(Dg=u^+8fM zD2SM?1A}$vKiCOHOgTWdQf6)9K$w)g9Hh{3UN2Ed@=p_m+kknpm?`6%e`WyAr2w_w z(=`Cfvp~SLljklk`$z6CyQ5qUAmq(t8E+684fM`B2%fi22b4`PDXagfbiRTdWJ{j) zPZ0^f3*;&U5YGL~Jgk$!^QG&{VDNlkpd6GZCImMh1V7u>cK>*Q;s_{N{V7z;dZq-F zM4ao1>jPF14F9Jfc?FC$v>tO6>DFJg~RSf$HC5_CMpSQV*<3nV7%ghHjH9TpJpAu zpNkV_rT~n!wX(|+80#6Rm)Xb(>2n%*e&v>#_puErJN*6!a$h;%Hee5DIjzYO32zKfEd#xrr z{>5itG`6 zrU7P(2Wgp9`i%%w&HMBkv7>B-o}8qGoal+!QWu!v@$%7L{$}umX`&_N8RO$;bHLII zd`_J%U}&>tV&+2XGAjwR)jUf@ zGS0{U_cC?>2{D9|H;Fn&bif)d#Ha|XVj`};cFRqP`wZ=JE5t&>Y*+Yibr6+wMl;Bf ztpF)uGM7f6Vx0hrgE^xg)|AN>8v{~AH*4($4KR+K#x#^gN*5u~D9-vq78-^BwRaE4sD;v0#iVSx5!2MW;_7J2eth~a2ptuXC{w6ySm3zOt^E@3p;ssc z7Y|D*7h8jHXi8#?s+rXB1!d~u+OA`!g)vit`AkLu%dye`;wN2PzL{&kM>oCt1Y3h^ zZ*z=@({Oi^2S>0&q@<)K-twd2;$Z9A*=sU9Z#=qB{@ z0p)QG`1nG+%e+cjb1cNm1uE$&{nA!w2K_D32Po##?+casIotKhrB2m7j13u#SB+SQoS4i)C-~ninja%-EK$Q@2S{u%Qu1ow?fxm4aFLeQ#B4qIc*`Q7IN%AgM)t|?XUQfzQ z_qX!akDU-ia~r1TmX-HT57Oe4&!n3iJXegeQ2S&I8$_bk(ALMP(fW9_yBaYGCug6U z8-X+?3-nF@wr;E`l?0-GoZJa^yXuIQ{1hj_VO&rt?-fw&IEAlo`atYz*UhO9#@4w( zb%y=4;W*JxZ`%zm)_y*-ZSvSk$j?gs4HA0}eWmH0Ntx{9E2r+(V7mNwvo3n1rmSe| zdVkDId-=rr-!Jj5U1_#@)tpy*x>*6*E(K#FoFi_b277Mq0=}0=CE?{>=1N%SPkI;l zUrI;bbIhOw_;Ke{dgmA;({5PUzg?h`?lH}uOX#xbFWt?xzYh5h_6%@U%6ywBotLb~=NFKDhinTbl!IneP}=9rgH*|70jxZQeS+xq3)EZUi?%71N&a8QO)L&XC2ow*8Ut*@E}29i5WL#)?F4$t^xZPqtj8(8 zTP!cntP5uv)VEZ)Q=;mknI6;%3f?y^L-vySJiY?G_em2Gx4o{fHl^=gQCV65wwfY1 zw}+|bbx+{y<|63a^WeaE+5CX@9}aEqvGgtzXegziXQ+-%#JNRTQh2`+vD^{KOObT9 zR}&_77&Lj!trIdQ=dnxOm);oA453nyoMN7qbl64svR;mC9ev+bLMk2f>Vsa|CwkQv zsl{n_*P7niIr!)a;ZdU&PMK^PSHGOg9*FbZ{Cjh%;%+Vu@b){T!jaP>YJA2rynZ2b zXb&&_i=x>|e7w1`Z3v?U8sYvcwh{ z`eDcA$uFE}2x;P(GAhr**Jo*(nJ;V5!o0ekTr3USr@v%kWctT1gb)?{FzFp9f_fjC zOnJmjxAUhG;W0?Sp$HOrZ01ckwuUNCs?gMkDsY29$Vw#Q=J0{z3YXIPg4(`-0ys3W z>uT}0Q|&|Y?V+2wcMDrJ3M~w9N0lH8HlD7<@sp(@DRT{YbhyRU$rL}aOD+OCnwG$= z*5J}*EShuQvxX8%hKJG<^_XiEAzsv{CizlBuN8Fkt1~t^q22HV`F6;tyD||sA!0le zjlk%Kz=zoFeU*eqS-H4_X;w1_^w&9leWc61%f&y}IyGypcEUh(%)|5|8~^U_s|9Ox zHMk>l-4P6r;}N|R1`~7bMhHUFlUKFfra0Zre9ww^{iui1K$qVyMz*g)RsBIy6*v;6(gEvcxM z(RApwT~8G)_zeDr(+6v>zP;wUlYQ=TmYq2qta?+?|1J1*{_IWUsOH z_ULwt*h|En>avu9y_ADNh%?7Dv>Rby`|0&6Gs_N>)+;C7*X%XBPHflrT`|U+huJj#gG5GSuAUN+Rt;gqUP!HZW;`n41{8 zp`C=45p~HphGKj+{BC;VzInO_5>oBnYx>!(Y>wSNJuy6huBD|DEh;AYNj^Tu)V6hH zd-#=H|0}IT6^XO23Z`Sh1;_d#AE#xqvWZkmynKrXiWbAqr`wRxL8%On-P|OlUag#$x#8cikFPs=Z(=SBf<-?3f{t;(web z|Mdg(8d@1)syti84({kQHQ*+#3RW;+=CEBqjS$D&%-9GmZo}_0s~{OQvFe(A+LuS( zq<<#A>mq1&YpJvxd!+m4+$dYQ=O?2vqc$_^meQjND z?)778pMHSzf0kCPdSEuhskL4Wv!{TZN9~Q52RHV**QfJ?sU&AMLVsrKR_@5QDP2_m zSrL%+4n;a0+7Xr=cln~RFNozSzAvULm|yp*Q?F7u`{;P`jh_A3B6x#PvyX z=uAO3H+Z1Gi}rR2Ma}XCA!P0$W-flp!&TAjrbZz9!#t{SdrKTG)XlO3+>I;2*(#qQ zm}wz>RMwDdDEV99W!~_zI)2S(9%@X@4XVC6vD&(=lX8pcL8$SKZUAY@V`@eRA=TlT zKAKP136&qZ8Yn9!K5@W|?We}u0?u2m(UR~A1xMnYED7ttnDt8A+(oaeWqS6aPabF6 z5}%wg*I36HUC*&{IowkgXrpRG#BIVSm7b&`^K19L5uZ5qW1_8KB8O-xI2IXtF(d)x zb%8P9-k}S7hr7;k&=;;yD$R{W6&2O-E9?2LsSh}|sHjzIAdS5C*Wfw3dreQPu8(cz z6)q{k+_#%N)uhq=jnGKMomjz5NZ#;z(xyJ+obGGK)RcMpH3}LeW@Tw9gk| z;t+uZt2%ygJ^vwgeifLybp}k`kbc-Ey*z%~>E{*`V!(+=_4uFE}+JU{ACDk6nDNhQq}w~2|0-I2J*tK+}ZQ0%B; zXdf%g_B_p2uoEaPbZmDmROu>g*$R$4MKdY_tRMCyD1XZ7IFIQw>yfrefFzH(BRn&S zxZTTc+JVyH{gNMoKnAO6aim62Cfo2@1DUw;Obd$!-_&$%2PMTbw6fpRNe zts3LN$V@6?q+Fkw7tU7N$*i{&S!U&@pU_ju;>MgUa;r|i(atk1&dzr|A-en7w4NEwQYo&S6w~|{%eZJ Nu|t*zvHP8G{0I9B3ngM3`+KOq*Ov1$~xtug~&3tEG@Li zR<;%XnKhP)Cb=BF}9*1yES+c{wIUIH! zc4TAYd5-)y=cWC<<7{m7_}x3y54f6j)ATP|QDa1%J63KF-;3Ve$ailScKcS(xmBNb zZQF6~%<<*;em?yPqU7$j3#U$zxRxFuUg;aYw>i(QN^BvgHh-Svg$MPUhQzd8N!KPU zhe_z21_{EG>eO%2^jvcEicoo}InBui*{rm^z#J)}I7N1)6OAj$Zi?@@b?dNsi zE^h5_I=b_h{yzyUjyX>CqSHq6y1Hzd7rZ5MI<_?Cxdfz|RED|89Nfw}o9yv>4Z7JM z1KEg5sJ`Yxp^U>rUy5Swp^ghfL93#r;D1TQtScsQ z&rvsNzgql-14E%0?FsT@B?X4^X&d9abua#WH;V#)={ab(Thh)+b zXK~i!D9!2FBswWH*=Nr=SMeva8y;CA62Z#sd$5cB$c>FIyIUmjIyVQOOVKCB%jw|5MV+QNT&03#aXJ_40XEv#5>M)h zFVd|fdFY!WU?Fw45FOWodcKL)5%iQpWEiWgpsZ6XfzfM=1dgrgU{8WBH!E)aDb*s)5 zQA-0G>(`4`$T}Z7CVYq zy15^D!M?X$^6qzgoZ{}8Onpt6J{3eFZ0YRGc=+fUi#Q`4CTdg~4kkEl8Lf*P7Ppu@ zTgxpf5)@Hz2XQuNqu{mA}@%cu_@HWt@EzY*9enenacy;P6U){awE=GG>BT zF0uSR0XkL)l8Cy3GlA>mzD^2_<*EtcO~Jb>B1H+uFO zlff8%i1KInnG53&@&7lo9&UovGuT;?XJ1IGQJm{YdbVtAqASH^viQW9x0(~LRD;}L z#KA2GBKc>lnm7gGTgBu;7ngHB*;S*RgqGGw^`Nc&c5%hfI+OR>dltEalR;YdjpCcf ztu1&C#LkUXJW{kV<*@L^tpY4UzZ&cC-dLk?D+l(Llto#D)Cn~fabjdzq}%oNg&xkg z$PZPM#J)k~GK&-l6L5H+o0J&GE0+4mSKh|dTx&Ippjm>lGd|H$AIe57V^2C9c|%#8 zW%MiXxsu@`h`Lrow`slQk{Tm?&7$ zZNb@JieU|*vLM{b=>s5z+HXf)w=ZoDB1Ce$%|C1{L4P-!Vyo^vIx#UDn@ z__q-;l((}9gu%XAC5A7pmdWH3vnbm*pM|tyNxqRasmc2m&q1w*5fj*1zML5mS82_| z%2|>i*c0sThqBGF3HxOYmh24~7YISmFxV~PFrv)Ajp(P~$a1lwH-dV&(Mcy6jWJ69 zWepfGd0sJx?J%M-E7fL3L=%45 z-s61O46CnU#vBO4u&!Z-VTk;wnsx74&8?Xc#qgVc+1?wzBpHLOYHeR&m|xcLheiLe z=pPpSBSmL~o+W!)bB1?asrOJ>(Xx9=^JNu|w5EZwu>Tij!B*z#c*1bjsF5<_h01ko znXfkXG`82He^@i{P=~zD$`y$$#x&6wb=%%JCBV1+dBDQF5K*-WL>aj?S4c=GAa#})nOWf1k4)nfREH}tni6D8s9Y^^!V&O2LJ z;}8pk|8#&l_TEZpnC-4C&w9s!f&St3>Znj0KL~XPn_NGZ>xy*Omy~=d`?(mVnirdK z@JN0%tNH&7uB@-0_{+6P957C{ys0|N?X!0uSUo!$EU_?u3`OculOC%r*$Pv3`o?iW zqS7BEbwk7>F_yL6TO)?IS<$;5gK~m~0*CAqC1)v$zQ7Sdq#aFvFu zV;u}px|FUhz`ICBHdW2T+AAdAD z+5IRbrEArKa&6xtJ#a#`r?EPrHcMm0@xt55jnk9*3`bfKsk7(9hu)9fWxZ1$2}NIT zW@G?&)Z3gRvK-qYG;x4Q{XpyI?52?rR-PiZ*`}M-%uBP4ie)t8^6a zIr3v7xL6!ej);FOgyA0>w$0Rko};nLp0sUg3s1!Z!fQC)uhi> zY%zG7l=tD`;WmZkvC7kGMov~|9azqf7`PNe`uxZ~Y4pK7?Pj07i$~n<%Iq_1JCPxA zs>Xl6^tP&zioTk_=6@X20Yf>}l;OR6NrniDJojQqupUN}OewY(Y1Gc`b(5O9LdMwC zJP1rX$0~5lh#1A>6n29CTm^NLM%(t{bTu0H-A*C)(uv1F=I2}OJEh4WtL(<@)gqhzIxyG^;-#B+HpqO#hp$SAk!T2E=FnjXG9!9aW z2@|@wC?kj)+IXE%))d`^t0Yg|?Y_t&nywx~yk~7H&7B9#V>3Vp* zBnuPVk^&Ni+1?AsiOo@`R>#+Rv(TXuESfOG7wwJ6IWs@nn1G0C@j+kK0+gQMuY zn8<}+Bn~*op4?x_Czcum&M_4eEmkG&HsTyZQKH$v#SYG~Gs;U?M!$*4J}Fa`CNl?> z3BS%%Zkb{^qy6B)F}(4|Irhgn_QyH)$2s=LfBVz+|Kq>?6JkL9e-vU6bF3WkUPxnr zw}!X&yFK5DMB~J77_e&N+>F>=)_f`wjq|@Xw@<;C5smx1!tu)Uy}Jr}U9%6J);lpl zVTK1?M|&H^yS)pmR)o2gQ7*nZ$zs5g!f+jtHDD)U>OFbEY0^TQ9Q7j`Kg=5B`*|kd zFStKWpFh67Kfb=;I6baGYXgduB{S=9lwf8pq3?K5zteF@vzq#%#E5+uZOfXY{ ztDaKcM~<6!cNMrT)vr{LTF8-pZ8hat#?forz1t6Y#~W6TgpiHIqR+9IXqcOrWq#xc z#pGD7X9WNrv!;7JrjJxr?Yw(kY5u?_%GYc;uNo$PEHvpc>$t>V^ooXzU=B*~T(X7- znxK;?DG=P56>Nwz@4x9s4t^wfL7RmxDV#s>W9;XyuL*GXI6d9Z`-0(|Y~H>!UQzCY zINrl4D02>r8F+RTRvz!M7EoVSyDTJYT={`3B)<|0BT30|56|CW+1jFqs47452E=JL zxvh8!M-;>X@^eclEYm5{#=V;ullf;T+0~-$OVAs^CuaO)-C}+g0__oi`1u~i>OEo?0aNz%oee951rL!J z!Bwey=Mk^K`~~H>N?4;61~9oL@44mqF)I)p+&=-)*g4kSR2Pc_&u($4-v*Yez32cq z_e#!cl%f3?OV9ta^zVC1{Il!-uQB{C(Tu<61LV;@Zt^UQ&BX}xhP#zXEqhSL;!qch zMAXFC#>N&=yCIsJu3qCX_FOg^#0c{OVuxR3mLnyR@P^vXSuh*Yt^!3Q+Mr*0OVp#Q}2!T6X zr)J&nzHG!B;9}EVfC7M#Q92V~G!r}eya#4}f?M$j>NWV+UJKvzoISIQbpR1-?_DZP z907b(&v5m{*-Cfi6@X7!8=n+I=K^ZSsml7??4%By9xQt6WWp&_KCIcj=gH~W(GW2r ze2doIzhVw*6hJ*Uh%5`90rmXBX#Bxw%y8r%q#FZU_Xp`VTfpJ|fH4`U;6Gr@8A0*i z4lhi6t4{4y87fD7Srf~~;=C5hDj1JJyiIgOvv9rn!-_+-+_mNAz2*UGP581iOMfhd zSdPT%%kX0Wg_VeWs5j9!AhAy;S=`m88RdP!(Zy?B^mm;I|{aE3aHEvi{zVT>tj$3REJPB|B}xf`007l3-sn z1YyP29|lxZ##c^?WW{D>J|j)!U)J@vekc(ZeqaRM9_<<;C_vFUp|s;#vW?;N$bWW3WX!W;#ktIJ9> zW{)5?Lm;`>kNkR5Sui?`w|~dq7i`s(r_4!|MV9#R5h=O!&?R*(aE1sZA6EDoBE2Oh zh7UYDpCpKSjZ7Wd$<$97+Ib*Uk}oa!sRr`HZ+jZG7V{FzzPBT!-~O@#4N#Q3A70P2 z3xpk-p`Rev5Be4H|D7&jZhgIL(79ke3nb_Ik!8&#=7|8D{BMMoc_-p)j9CQ`3r4%+ zgC;pW|HKHHE~1t@&T$t-;TOCdyf}a#ZFF!zHz8S{}VrQ*e$rZOAJ!n7?LD;{k3MZsJ;=?x_mdj z43*JQ>66M}Bs&wBzSNC`R%6ks~83XrmQeNapIt$UBjdZHRm61NBARUiX*k)E6kB$K59|ZBZbj@vPi;dRzB4ywntVrF<@Fz zjjMOsDC_06LRZfx0h#t&MxXn5csOO|ZSiLT{NM%kH6=qOLp%X>{h&o3?$%6WH-8b^1DymzgBCX6!PrK62%uB+3!Nv z;FSkp&jaIqHroaka*aOy*yv+&W}Cr+w4RlCY42G4{GS)ob|U>` zA-7+G|lj$=(RliJXk?TNQC;vHeqFPr{>R+_?_=RdF zJ?%DigZg=m-7YKoJ!F0Jyrg;^ny76t6I)JwF>gWE5P35K=kC}5& zJ~j)EFCh$gY)*XU($_?w?V!>_I$`qnp-jubfiHz`Pad^gXw94q5NIA<&}Zqn>9ZTpf>^yJW`VI> zy@zd?Ugmd*C)>yH@yFH`${h*A*iKO9v3nn?w3b2?DO@RPi61#}4)uG>m=dBG! zYHD5;U+z^ZC&g?*1J4|)Wf@;a{>DJj?e{=7O?QTf_=wQ=WJq`~dB*p&TBq=0OU~nE z0r(29?&0FL<=&EwH_R`jzHr?xyOB#s0&g=ell)i$lm&@XVfzu~h;&5k44jT4NxaEe zNnA;*o!xqNg8XM8l|)j{@ye0S;5ZM8a(byY+P!7V#K+D%A~Y1S6P3I{z4ldFOk?+$ zD9`fqK8FGl%bU8*xY+eo@|~wP$%^*)(9;{6x_MN+HBAl+u@lo| za(A9aub4+jL9)TqCOJ*`jR2-OvIg%V3*!mAEz&C7)t(dbIfj$MLv5mE1(LoOYsfo^%xz>{7-bXa)N|9Sszt|q!&0H z5rJet6ga@)0!x6qq9#x1^l?-iT8W2}kYo}&syv{1Pl?rYN_1B6v{##q}TMrj4@yht`vv3-h+VFYTzcm*8!h@4RUuWpU zr`1i;#*-UHekeV^5;-}460>qe>h13x6BIT@Uo1L_kP*>nZJ>{u>rO`yUco@d%8Z%} zyZ1v){+G71RpnqV3ahI8Ysa(f4Imt=z47br&DNQrV-}s+!UWDIxv88{<3bp?WybJ0 z`ZvSlS6a+go)PY3vEF`dJlg^R?qsn*{>GiN%sA+n#f&2ofd8rP%EE}DYrAeXjW5gm zr15uCBo!$=A#?a8asv{vu>Z5{IfUU@?76=+o@J8&-B@fA3mKrBaau4Vo-7W`GRv@o zj#&*mGQ|A|T|Ksl6i9tM+c=RX|7x84?TE~_@PKYC7T(_)&oYyNZY*XpF%xEK^IAz3 zp_@P0{Ig9x=$OUSt1|>~k(`x|L=cNgPykq7D5~16>OJ*2lDKuRCC`x~N4iIBYWiv3 zahY8qedVLd!qN7x-#4HMeNFkB4oSZ~Sj$oS+LC7(t*yvKryyGdiaf1=L&6)*a1$UA z9#YR_o^bE;paM1fxeJ>YkYnchkYioU?K8%#cqrc8!zaBq&VTlnXx_~9ofLqHds_gZ zU}Xk;Ou)a@LQ?y_I=0X*JUk9Ru$Z_rZtPW%YnScpUFT4CMhXS%WhOA6mAtBg?@BVg zphNN?cB`9=a-$hqb0e4Q-Q>x&Y{(@^0N$tcrd09bkt_ydt)KuzM!AC_JX~|S4fqFl z0@$Z~sa-buIyAkmxObc}86=D5PZxh^@rN(|oRL5B;*ae7bC3MF5dVL#9Na!fc9}Gz z)l5$^>MuNSY|z8d!a&iN>H304+GCLz#n0jae#xKT zFe^?BfcY}u@z-{w(_3I7&J<7BWN4np07(*6p^eu_<2`T#R0hGYKT8-mIVP{)%a~5; zO|Wd%?tY1MO1ugS{aJ`W@Qmr9MGVSa1nVfUK_ru*#oL1V{j-unWd1y5gen;9XGCP1 zybMB2@@TMLXR0ZjIJDq7({nfo(;4IzVZeV-p#08MlW6&1dJ@+EJycRq(5fh4P!L=!GnFZxr3`Q~ zp-WZJO_kE!<;e7npl06;l?6>hM4)69{9*`Va$R>yOJHA!!7%-qDi3WB1MV+-m0;Mj zaxeh*6JTPJSHO{2aaNrh=`%HAHg%H83d-vl z)4>D&wC2=_pg?4ec0h)o%PI@?XYS`3BoydZ&u}=Ofqj_St4HkSnT!dG`NCe|_;13x z=b*Y+_R2jlopH|Ch?+pOAcdM#WOG}<8k`~W5pCpWK(p_xR3_a9^~ z3I&ToZC2-CED8zO{@n89tS!nLnTtZoF5-`IGZ!^`ugc>8FE0ule6o@}eeQ+Lkerfx zspy#)<~UGB_eg#>vz+Gy9Mxf}$4aOIhZ92LQ57Cv#5YeGt&zo--gq<+fK@Y5CsRf?7ZlgJ+sxqn}m@32z59@76U1UM~>z@_nKlKq( zRh7Oc2&RIQl;CEdg`9aJ4i!M^iS4tQF#6(=ZMobdT@`KH+*jD#Q|NW}Y%!bF7mdj! z1f9T=ElpKgT0XfD*h$YD{^&?@I@L$A8+E(ro}+X* zsVkP*mfkXWWu7`72~Nt<<1%p%uo*!|wX^n}kFDw`b34D2vKBeJNNvGK?OaEd81nT6 zl+{f(SLL^(9i@+|HD$jI2l9gRS;ll(T#F$vvT#b69-2fy?*MO@AH>CL*`~As#C>=x?G^7NdaMC6ZHbKfNN%0&0>9H z_@Yoq068qMll-tW{#NEWdW^oX-p2K)?O2f4OY=lOa%1V_TomkX#8Kl73`ZN{B+5e1)c-kU1oLG2K?c|xldLN(8$fq~9I`C^Z zmW8w(rX8sqp}J5PK5gI|Jn5=rw}pNkQc?`xEp6K+4W9BdyB*gt_`LEw^ctujyo%_I z_U@B?yBNjYyg;vv7^0sr>8MUk6zHhzAE=ZlL=#T7EjgbB`1ybSmPcQPJFwQJV9B=I zbDzs8xlRm&r+Lbj9N$@qZIT&>ppHcHhTsg?8j0}DFPXZGDQrzHK3$y;uhoLh@UlIz zOEWF42u6LAX!@z&kDM`Z<(UaoW|Bz@+X|or*n`i2(5NxU3JPTuIG-1hJ%D7gtXf{8 zPC_s-Z35IU3}J%v2D(o#Upj~Q2+omdrpL9XoD?a4e!bl!0bu79U8(1ohc^a(t>Q#A z;@Aw=o(<`e@Iv*<8tspk%g|CVIR&%F(ZR?G#C(F-KzrXEUhHRWn8Cq3p(RPAV}{+}UCTJMeRhj_hS@_^dm%PSfIz0YPI+XJC;5^l;wc*;CDx-@6XcyHc^lbWlnn!zM`wHnQDtoBUTZc#Oo`Hur(u zf;`J0^_B?nd6AgEAi*}by=+{w@_Wq?Lp}!?bB-FzW1Lep1xbl&dz}*D}E->u=p_c;C5f!t~vJQ8swe(7_4HE;cGLFml7r;$6O|JLkx1w3Wn`J26)Py z(D2X;C#E5iN`!{Nt2>kW#`>iZ^Lj3^89F#cw zldTKu1$QEb8XK_+bd0$Bwsmk9BxhEaxV{6$(bz~kh{#41fQIJ0wRf1x$UJB$ET+73 z8BizJNb_kjH2k>kqLesJ>@Q!%PI2npH4GkJZw!01Ih5N3iTZ?57V9g)2V6S$H-M#a z7aB@`{9a(c4hIeA9#6aoW{-|g>6&bLXc)!5PXZ!Fi~2jZ9XDCTeDgz-D?2PCA{h03x|p zQB6?+wt;sU$Ds&6r}kUb0R>wG<7a&8*tRe5fy}!25)fFIp<&9!u1e+x&V`2DSEE-z z#XcF+b#W~<(D1b24lvQM`+>)<6815T-^DU5#;Or-F3146jdciDR!0Ll&&O9U{=`UnvXa6nBCW{s2OnmJgS$XGW^9iB6hYzzxwQ{!kd~5Dg-9FLK zyimDmI%}}8{X={2)O6j6yui|cHo==YdE;9h^uHpQ#H7jf{T)vh(tDT{3Y`}Oc=2Q| zq|+@6sf6-^+=;sx@h=TFj$8o<)9)lf&@SuX=ZSIvmBe+4Cq?jQM2iNT)ki(ywQ90G zvvI*Te>;6|>xMfuZK>Vf^f*G_Ir3m$tcs$P?Y3r_4Ka_~%AZ94{y+uo!uLK;T-{pB zL>MmqjxeMF!f^2n!f?3Rjy^;e#VKJ+kSg zfzosmmFu^{mHOgr#}}>5;TVb@?MeFM_WbFAn&%b49@8hYXc-OpTX}<3go+wf1zg5% z+nIZNI+wea1m~UanQn-kbTpstE=&}yFzuw%=^xka1ljCclnw$dl4*>IW%=5;^-kX} z*b9EIjrZLKPQZVXR9`xgQL<~><{|7uTv zM$L6m5mQP0}R= za$*DAIXRPw6X5)zK2(eAIpg9zc(!grEsk)Tl-w;O)j;2VE}dr2p`xr(9B8M^^Ho`qp|jkQgR7yrl9vzeOEWIYBOZO*{Cbi3 zTIN%GGUCiv^dGq=FWDWzFWWfZO5ex2p=z-i7aD=viDsy%^osj8&}ol@Vyg^mc0%0( z0fYPhA-!+EI$fF3_vK4$qLUO}+16vUx%V7-wNK&k^}`?K2wU90c8jm^oZ9z3V5$C; zAfW3Gy;OCO-5SH&v%ByTN#?2ceIWlSP<6}eA=QkJgPI_z#Q$aAG1UGQ6B?Q$F)4TOWFIB9Y#doYuni`16OSKQOeMSONM*m z`NSY8Hga(3*K&sE8y&_EdSv;#YkY``QnHCEAiKq+77B}6#23QttOPFn6i| z^dnCVtz8HrPEq6J$IpPTqprd0WE_=(M--LEtIb~c}m7Q>dvc-M1x`|)t{!2hl&>@~A6gV<j&*sEs{9TmS)0tVc<%ecBS6I7$$a88ZWi(~z~pgR4I4OXm@IrZ5X zbl~6cK$p_T4PzJ}%?{}VK)c=(2huQ-tr4@Rlp{M#?!=c}955j7bwT;F03_5&-7j^G zjTnJ2O0|m}Q&BYs(iGfTxe8#DP9g(~Jt2qbw?_=KJ8)?(r1Jdkq~`{^E>H!XTBhpq z{R42QK&Y89-V;ImdS`&gN)IdTN0bWxzjkt)g2Rq4>8AE794P^z^>B0~!-Mix$U!F7 z{xb$iiZIpMHcu4v)jw>=AkF_IHss;=&Pw@!!aJo4a&Gh#P%*6 z(dh~urYyUsI^Ubz_q6!BT(3~@zu4Qkk)dA|rJp6|wi+&?i*K7?8r#lmssmBjNzyIKu5GPxoQY zt_9DP$giF&uyv1s#E@;{Jtv9eFUr*Vr5H0-j)j{-ki?Xia{Fd&AxD$x?i7e(%~r$fjuuD`~(b8&I- zb4_6zBZQm|i04_--eD2S6kLA)8V&ooSEZRP4;dk73>pM9JKm3vu~(2^Icu+)K6bxi z?$tgZa$Jx+!>^qU#GE0rESTZQEN8r3Sh1@Ju6s{Y3o{4sKM1`2oQ{e8Y3Jh%B3Ir6 zmZ~aQDu~db0oL+!&lSSK6N8LJ0=3i-W6g+yy#j3d4CkDY8GFEt4$pz@nG-=mBVRCL z^*^WcAH?`fOIFU)g_J?{NFDHhAY<)?iaXNibjBN@pMbe!ZYXYHW?tq9dVXMPvTU#o zY`S|HuYe|j%VB0ZH6~Faj3^UkGZc)r$#{H$;i$-h53Ih;V1518{ ztKbxK$Kl~46JNM0;l_V7_|!Ydd*Rh6v_j zSD4{|Bw&4$IUmH46^jMq{({1MROH1+lQMs+k2&@zGF4R#(n5SzJ49kkPQuTlijOdP zPz;=+GP~n$GStTRNPfvNB#|%P!RpP)lmaCWOx=h$wf@kd5S~GbPwW}J$s`DjN0R#~ zj36)OEE2)Y9>0J@FqflO%!lk0nE0^c&H59r%Sh3Xif0bFU(|=CjpMDCvRW$!OY7yg zFb?N(S$-s@59<~L;>*1GVuA1uR`*BVnTed*ZLd$IgLxqV<(Sg2EhXK=Ek#I)alB5h zJM;8FB+z6`aP3v3lMW;miC!@+CMxYk&!@SGdL{LT(tXC_IsC{Z@k#=2Wa7Qeu~Hwz z_mD@wZRdURnNgMuc~aV^J!P3$o?0W+y|0J5_ip9SPBbq<^(jUeWIpoJi5(4CoBIGk zw>)uMiQmMEAt2g|b{I_xV}RR<|XEJeQxPMHmC}?q)PcOG zEL4sT3AS5&@2$!Qo5F)bnRM0BsS$H6(z6PDWEQ1PIS}3`Q1G`cFcVrF1!3+4vJwXt zd8>ENT%=39v6v3yDa)lR@E){j=>|c2xpGm}T0nNt^|&LZ zZt}i1AmSY4Dy>690W^aTU*O9)~j8{Hit;DLpTcD6{#A#P_*Up_kTZEl}BHxR4 z&yiI(i78e+9PdZ=lTfb=vvrmRl*^Cz%yhYSa8t}Up~^nHqJ@G)DgaoLzjSGoJ%x_U zvj1fy83@xeN3zvsB+vZlUkFM1?luH){f~N{5rW@qmy!L1*GKXh8GvJRX!uS?3k)cC z1kTv~uV6_b$@jWA=Ci1uc;O`Le)v|3<+OVN{lg2!)av$9?-aPGIn7f26jA_*bRF*I z>+*YY7$Os#snXfEAFrK*Ldw-yaXm`b0xEL~3ae8;yIE-$A_Q_>bG)+0u_mDww*Pk zN`mfmBeZ@ubM!K<3v7(~49B?*7#SQlVQoL!CEH%g9NvNK+0m8w$StK1n#VzYiiwO{ zCu2KxVb1J|n-nAv`}_5q?p4guJJ>aR86_3cF05s&=gq8khU6C%_q*R2L^sOHc_oen z$u!AfPZ-5oizJaHL@tTsgQG}2Zsp9!9DUFaf)B9OGKZhvWUR+(Ry%2vqXZRz%P<&3 zvV3D66oCc(#gI|B9@JO#yAFll(ra`WBGfW3-0xyg=n@cOB_{O@1BZ_j z0+1JkKRwqiP&<5Uu2W}Go_|-fTIz)tW*YQo=3`tbiF?S|;{geIIa3>2yY{{YPz$0N zrgUn8k_NyDs1Rq~r!3!aU<@i6S4Ynkz5Xa7KyjjwAi|Lq?7a!CQIB7z<40szNvfUnERM&UY)6FzRbnvHPj)9S_kCL zQ-pFl*o(3)6TA zb9kQ?Zo&v~mB}}nor5=8+2_~~wdsZ2Ql}P=_q@~`Z3Zi$R=0vPSQa11_zaK_j3+sX z({MA;h8r$&U=fqFIGUr?kDL`Ql$V}&wWll+0#>0g#UW&hPhpDZ6nD8XrU=Mwg^G$M z03Hb-Q|x=jP8d~->?y17{kT!@mBO~Mj!Wch{=-)A2JIpTn*0W)o(#PHzDIrw07|g% zRqqf+D5FueO0a7;`FWxaB%Cu2(T`VDD`VkODvS|^x<`nym%T~+NTp%_%J&M*2Py?! zRy>^bf-McM%!#fXUVgZ;0#yM9!G1cLBr|c=5Z(mRM^9;luVp;hsteL5!Y<`0uUNky z!ZV5Rpc8cE{NM^3F$eXi(2>1IAF)90fMRfm8VB=)khiAK&W76of*J54y`Fd;zZll2 zXjsv~@g@Uo`V&7*j6yYie%TZGr^ElrG%VntcnS$G0BjFfo~E$R?g7;dm}feilJv3e zJ-)c3$jMUZJ8Di5<)B>lBb#^UHx(1^gYXw~kOS$z@gJmr!Iynf{jIW7lLvC+7)bG- zpkq;BY}Evdal66?2pP$+wE27~{U1js9WUq+TLn}d?WxW?JLKauRWEPSe?pYHacM_!$d zlxv;7!+c{HDz>2s(ee!rAke_^d3!+$$CHuGk%<&D{-I_tEfD%KCW%xMV^m&+_8moQ zV3Nrt+sZobllMg_fZNBnD~0*8^bF}+%&W_Y8;OsRVUyo}sDT;w@5o`c`bD%PymUl~ zj?nLXLI`wQ2U;!$=1^{W69#`3y9&Oyu+)>Wg?I9x;ZEd>tW8R7uE!@L7lNDRn1p&Z}WS(2xV~lOfVc(Cqah2 zZ|G+D$9BWAb}x)i(CHJJccf^JqV zqyo6ABlC@$neSMDtRus3H8V~NfS;{5-UL3^F1AIklpp69$-_~y$!=wyV@bG=B;VH~ z?+89BWG1u^LEcbTT!rRtUtgbnit{_5MTf8w;uhVuj~S?kM1|R!B_dmX5(RELCOgiI z;{fitO-r<4`te5*ney}EpE)PUF}nc`ZhYxR9B{Sf%;ikL`5Ue}5w{9gRCq6P?$4`k z4k~7iBJ&I()iGu+^yZ;i#Q)dx7!T9mzkv#e|ECoEnST#b?k%GZ$m_GtkR9+3XAs+F zUai($I`&yP`KemNs9hF)`E>QmC-cx{Md9VB@;>qr+bF1npoKCq_ZdJG zd*neiaB(mmR3oCv-f=$s8D5au)(NeYP#Sd#Pk2U~;eEuw$NO{n>n0|sBB^s227xKi z)YRw1Y8)x1sK$PXo#2!va6$sv#)nxgN}Gr)iODI2{m6uUixPmAx;fFHzTl7<3uSP4 zVh+H%*^P;z@6wBEYMWz{gBZoE5wy4zNm&9AiO6K+@DG=zZ9xh_Ud%kiizXLL^D#33 zB7tWW+MPjvMRC~DG|ej|JH_`xwMRibyPNhTR=)a z^XlUeoJ;WEL&ht>P_lvh{vo-9TxUdkBkbyS;5F80i(4lT*wC0Q|cH_ z9|++lqjC-SprDRLm*RvU`HgtsdpNx3jXVU@ExNkoP9p=0rQQc^+Q4o#7-CnMBs#Mk z6$;cQ8d}5;txFvq&N^Xp3|S8mN$leMthXB~;}DaZKXXx}0@mq)?kae+=O7v(Ee&j}A8)^={XkYJ|v5dU@kuxgQy8ld=L4 ze-exc7Fq%BO~RX=~efm*mdC3J>)RsEYv}>@Vmvc(-$X2 z+-g5$AM5Epn6L_p9kT16A#)aymGm8YrP`tBSaQ{6!XbDE6 zNk>!{=55aOBZF&k%m^ks2q0$u*nM=zlPaI386xxTL}T}Tmc)Bth%pyVm`_ioCt5bN zrS`PWrx&`ujl}kpruGyci-lNTu-_92NFnGHW?(fhYB|O5Q1lk7wVhU~e3GfG*4HK^ zgZWPoAcxe`KUzC!95c_Z5nzPG_sbm7L zS`|Nj;hlGFY`w0N&o}VsKPwyGb>j7SjB};?JBG~ z0Gp^yL8`9(N1JHQg*?7viYDl*y>p#9U7jeazYVQZab9`!X{9PondeoEosU$uJEmZ{ z4|+IWbOCu!<;cXi88;fOtGoEz?Bbxk_}Za*A-6P`!d94d&TtiIKTzXxRwXdr$63sOWeleGrH&EsPX5m zC1TGNd?xMbJ2sP7N_0JcPw*kD)rKS$LTnxMvpoQfM?zmr*G;3ot4%_rl#iWOZ3?OflK2{9Zm3Y!>_@)_rng#>YfI#t|4`R00m zLeG4{%9sNlQ#uRu=q+?D{fmv;Zaa;A{yestV&;9_&h=~ezzu?9EMC~7KnhBPii&tm z3=WnQ8;+A7^p|2USlAiK0mek8geMXT35TT-E+dFL;$zt1$NS0P=PNo_q@zxK@0*-X zB6y08P%7A!X{Fc>c0!hTGa#uYr;OC_%1%h%v|F@|{&_83c|n4k)v1s`a|1Hr!XB7j zW&}!R(zG2WPUNWwr8)^@a6f(bTx+$cUy?zg{7_ZJW{+=!ZJ#1?osO1N@iiVjp5{~U{PPBrL^AU0!dEwqt1eP{bW~JC#=N`x z*J_a9{&c?s0FQ5&1>%);zR7u{o_h1`{d*jS`}dW%HP?)PIu_0Eqe-Q`+#C|S-IH%b zL~C=1U!f4$W8^jYx&5eTyJuo~x^t!Tz41VtLvy%>N(F7jh0_}hr;^bGH^IZ93IOl3 z$<3d~aEg4lui8qFCI2PQ@p7QQy5&;H+a|U1qwN*pI$NrRbObZx^GlWqX=)LtY{mythPKoN*Q-r}z2hHB`0vR6U1Qcwc|0#1$%9HfB*i!<9LP-Y&IpzrL1?yL zE&c)~E93F`U(&Y0b}8G-IR}M!K0a+@RE>--w#oBmiRovSl!FY+}l-(V8ohP2}mMnd>PVNRLs_m02YYQJT;?}pVMhWmev;UGfk?WNWf-M8Xg zpXA$XP0l%W3&%pZZj@DVE)=huH2VI-n>LrR3?@x?os5D0&x?y%mbDdYwVotvRD!E^N~Q5AK!^Nnv^$_)@lH8NGx%AD62#D8UhcI_F6cGP zk9=)VyV7uTifxoh%kzRM1GOhKgI>$~-g9A)r}7em2~d0X&<#@ZMdqsxS0QIOY}~Z* z6$R-QG>}(r5_su|Lki_GOs98yZ)eyCu9<}={YMLzhGjoVJ8cF&=66g-cze&6u+ab0 zpLfPs4<0aSim^MK1Bjc5>uyk?zN!?~^@91B$16bPlukXeJKh7U36CccK0V>f5Ga!t z?p!pMDmNLAxYoncQ%nqH05Fqqya{%6C=FViW%Rw{g&MNK<-Z)5SO`(xqVg=z}FmfUq88zi~tn<$b7ux zI5>H%ybmd+IU@_&U#W1$41y~ip+SQjSGjwJGDJ2-QjsM*@KV$bV zl%FgRq~bkAN_p`yGecUHiu>vuA?o$V+S&bswZm~OZ6^4}ju6Fl)QwJtREII@*1fMxtJY`xd14kh>l zZp`J~f$t9IKao)}IJHVMd7Zz0cP7pL^O?*8L>orTuldZ1|8A3-gW&cH)pS%@^6eN# z!ZH6($IubUap_6sLY(2hItaN4E!V?vOxVur{`2x$mvUe+x>rXa{l*hAMR5@ zeoDp%clzles7Af#d6DK0z|EpfWs7lu@tkxWLIN!A1;35GzYA@Sd`DFq@%YULp~dfg zkf{59DMxbRaL+f^Rr+K=H;_Z;`l&T1K#VO;|4FteFO|=oCpwt;oh^#LKtzK#+6#e9 zXsqz*iSHy+kZKc6mLn?;=jr`b0RnlvSnEaqdkZ{@s5Z*#X`OJJhQ(s(r?vEh+EPDn zJWF!-(p1+F?yyw?7wWDV5eJCG`aNruSL@x1qm?v=PqX>ptZFOdD%K+`%K1Bm5* ztvXPc_Yd$#O^Om2y)kVW#%5^#i0cu8>-3XoeN8LD7qghAQZiVv=?}TkE9G?9KuNIj zS>1C=UG|NRlW-MZ&!^xfkj^c_J+?|Zlsmn@N0qY|7hp_+wEUK>Tr)i4_+e?o!+Vyj ziS#g5H?t}$fMQ6iK2Iz&!8v`9&n;LGzT>70`AVWkh7P5@0yo=hVbEr_{S=}Hpw{9# z418td&}OEHRb%!5z&fSnRg*wZGn1#EW}+WFG{frKfGrrRxKm1FkP0qA8jSnZc_(2YfS3&ODU=^_A^V;gL=7nP z2<2{m!HLT)S@CPV{3B4HMa2LeVM*afCB}^>sWHwQ@9jhk2yKUe*HE(>fN6}Q&3>Bb z4pLX|C6-(=o4s@)j`nQ9*k}E{)||M7P8;9@CO)O-9|DD!lBwiJ>d~PX1YnxuXoDh) zcF*uW0)5z!6-RR+b#doD=(R?=UcD4~EkMu^$qQUc23htVyQ$8HEKkaplX-X7tSJtU zb>>+`5Z8R-%jKd!n|H@hX=E4XwioTu9OqSbj0%99H#qaGCW!n0u7BT>Z!IY0%g|ZE zA3u}iV`vPkBa5z1L@Bf~aGE&9t1O^c%(B`gaAtMHYX2ZuQJSs5nW=>nimbZvp36kb z!8De0*$|8H@xJQ&4lUXHm@=^vR!62WA-39m>KPuSAa?e%OmIV&7RA{DrxF}(Pf=Wx zKY7A%+9zvs=M(Nt&VpvNlH6jZrk@<6;6;D?utGDls!X#Q`dM@sFMT=vEJ16gI-IYe z3~xSNWE?vAC82wDtHM$X))^KfN!%}dx`Nt);}i?Pv~gJ2g4f|`)P~>W+i}P{z}%JI zZrJb0b=}Sv!Xg4;{g(v@3`PlSvSC2?W<}U4!mDTTJKm1~dx>Hb>t+7~|EoS%+|q0C zy*Q45>uju~f&SlK#>st>-{q{adYt?GGkd2DI*Y28uB?QjxTM%vLj}s`%1Vw!Fzss( zQdm0eQ6yg^b;TH&grb}{=@jCuueWhOVY#Jg_C;hLMj?4l$ZUt+fV zJI7&tzh!N@5AIMT@E@?hiI*?z`qdQDid z!G#z2?A+;+^+$(pp}wjI8{T)4bJ%0U6<-P1eKumokR9(-b+vr#jw{eVEWt9!*;CTWcZUs)KWkgKF%0Spt{Qna2gzGk>JH8KOJ(F8K5cP}nFOWnZ@W& zOGYcd(}IT_xMldVdvR$g_JPk^m-u>kT-Y(&8&bM5Al$=s{a%vN`njbB-P$=l%#U^P z*Q7XWfJx@1;ybb(<7Av8UcS*vO!2q_^!^SgM2HEEJ(T4wMYfsolJ!m>lUC~Affdt? z+44T)M#f}W=xz{^;Sj}N7(B%?#cm;?lmF^1svKPWiR+*#jgf-V57F> zf+prS&UrQ6UUi-*Yw}+6G6X&@M;%Tc`&EZ>t37xr*%7a@KNW1~Y9%{w4MvlLv6kMn zZS4W%8z4A>RFmrk9lEQGm;%dn7vqBQZ^FNr0uLe95&Sf8!SMXK5&>}YEAKxLdm8!- z%{=w)wI#O3*!c*(<3vIo*DN74*d|u`I2gGL_QNyERu?dMA8n1XgNIhiiFHGOt#hco zehtLgrn-8%Y>C<>wqu@HP1TuC2K_lnP2Th)I827_2Q(&$G`bM>U9@t1KjP3Gnu?SA zsQxd{*`1F~GY^5-Xk`5z#6KX8zy0isOeXUQ#B5$EA3wkc7rr+y)QhZ#)_{NhmU(-V zk}aQxB7t!Ib#JEOx7t<+rOqD?t3Nw9taX_{o$%wIzlk(^Q{xb0NAY$$gO=kU&3pcE zIWr``xt#xM>r&oC9_mGZe8%m}W+bfi-~G5Qd;+gl$Qk3$ddHyh#*^`k_jp!&g#5wo zIc_EcSj2CR;F@oa;1A7NMhJwNC53474JEIELG|rB^#1NU{+aXo(`NrBRo{m}B1?@Y zJNe?tHA7m>tQJR>#;jLg3ClbE#1r@bI;yd5xOek@v7I?LhMdmun&gPVx&9-jbMGI7 zHZ%ibMS>hk8lCXho$hR~K-zR;Eo^B=6WD~~TZ65p3h zbUMRtF#ccfw0{7~!6%0|&RrSso%8+MKZrt!Oc5ksjF{l4pCxZBbNFvJ8ppL7<>(V* zG@txF%m7PoKtc0I)F9>iiC@#i(udp296en-=w26|_Y8Io zdzW#0@8}LnI`-KVdK|ub6{w>4$!&bFr9d}UOa1|MLZ_7$JPGwK?QT748PBz-lS7id zC;C)o>2xN;`C;dJ;Q1SGuq==W%ahZ_D-rTccy?AXx{r;6Wsh^4JV z=-AO((%Wt+o-O1JFI%unGR}zVP&TCUp;qkyq)O&Od-w#kEXtgkA-2>E<9~{m%=u8- zK_RJJtdM%<Mu{fL<;$! zy_s?fylo!m_AetLA8I8Aj;_*&4>usEHlHeruz7B3`wg=xAk}sm#z7Gu(qt+MXurQE z08`&3<%>ZeRIz@48Oo<%47f}RXkwy^djw>zzGeW`c@p;IBGSPokls#xoc7(CFmxT{nNUWmINy$;fT^Vux9s`JSozu{%)$NiH9 zi~(#m5hjM~7}$g)*XFmxOL*U(6VJ%F@3(F~6ZkIQ_esH45FBe>#np&aG`uTHB@iOcK5Uz~{#y<)qM*%h3!bG@nL&x;eJb5h`2j&2@RvCRFOxr}-RU zVwCcxBS`*lK9Blk)13{tKky{5b;!7{UDvV!q4`y+-KI@FtYHJnMeTMVQjLn%|56dG zSN{rW`)qQ|&&@sibc~;y;`B;a`#Zj$cBw5P|LyAbarxeNJ_?jW+KG>9fjBU?|M;so z+-1?d<@en%KTPF=bO+q4HkX}S-Ej7xX}=;C$_;;{=I-IXFo83>A4}`{aNx|&0HzjT zujDu=a0F9){^TqHS&87_8!bSfY^U~|Twl9V`tz0jW$gKt9?J+hg`31M2%pr0)3Rvi z%D13qI!{lmgp9UW_wOn9*`QT&JHBKBX(jx7>X|98Q}pDKPgQczviO-=38gEO7Ny)> z@D5l8ZM^3&ZDFp&f;``X0`(G&F4`oG8a)_tT1*XM`aTtR-ij@oIXS`iZE3X)%s)-rY@z|xh_n~F>MWv;9s6iqC9Pv1YV zW2u_Bg$8ej#Ea~T_GVqI+owBK7jg0u^nsHeZYw^^37v`NBbV0|n(`(K60X1EA^)-| z)=p|zk92zZ{GEs3*D-F+=ZQ~djf$Tl0H5{}2J#t_LKbh z$xf$U-yP+&8oR&1){Nh1_>*ZUZmsOdC+jGA4qQf^uR;umzjWE_)D7bhS-yQ(FIZ*^ zu(m3Fcw>fq-!(w(M-W`N-wG4fVQ(y_&N@2{yS$jZ1M#>vfays;@S1UX(_t8{`?!6G z-`F|Y&&}=2rO6YNKOk%Oy}8IJWbLf&M!ym=N&J`cujDPT3> zbj{s?1K!EqQvI^K=h4BQjyOyI+OhFcT;=>*Hw@qR3yy8?0e*+CWd`D) zOH{FE5~P7t{J9zp8$d1MAv3Y8ZfC(`Uygg1F+vz4EZ{L84S3C{(21R0N=Wj#%Kcfw zX>=-5dbQ-xGgk@x_Hmb$KB{Li@>u$pNJVu_c6eCWUW$K*vVX@gnQ`+)gYTOeXFE!4 zn8jU(J#l_;3*}I)*8pl?8=+B4r>c zXL8dJ?FrsvA~A)Z|k-K=sydl6RLzd#X+gi%G#s&cMAl z-ojGUm&amE8nPk<%oWf`SPn#AXIJ4O3r3@2PmO=kSJd}AJ65f~^Ia@f?NC>l$r;Of zh_fD8b@);!JI-KJbeN765%j%M{328&e}{wkdGK?(At|B&uVGrE2k~8`XYVC*izTEO zO_~xdQu-=uWtVHG=493(9m*xn#Z9|N^<^I&uOh@}-1i=#;Ep@Mz!c?ZQx%D9{GPJ` z>n;!@*&x>c`dVlE!+Q{DiJnP|5BCouGnGSPW_S~vTAMlT*DY+Vlo#5e&g=2%O+I}l zW_I@;w**t zg66->5m@NysaP`0tJoIlTXmgst#;~)j!B@{JtfYzQ3$8R$hITN8{jc zQ_82OLO5^=b}LP6CUDRTM%d6t&>Ub)M1e9r6Wm9O#O?HfNP@4s$+cjDs_0r7auEYMWeX?QCbSTd6pDC^& z%64|0ClW>69vZ-%XHc^t5Cuc508?m%A{}AV4n6SH&@ey+uo0MD1|^z99>W-YlX8VC zj=RB3%6DjrDcu0Fb;w*C#e+QnDSvSGL6Iu-=o}~HEUV-uCz=B#@Nf24*QCMj{;O#Z?l(T$ls68Rq8p0W%1c zrTD@hRL>!n^*lFlrKdHPw~prfzA8tSZ|{YT~JuXoL6pzR2WzWg|>^b?Fph%l~@{#HbclrjP?tw`6a zUy#sAJ1!5v1L5$ktFLJzjf3Rh6}WJMNzLJO?F(YmSs}2cnAoCN_hidMtKa6lQbJOcveRMA+KI$4xds9frGl17`;|&$lU8u4{P4{V&Md6h0%8*=_0@ zefa4~n_r%+$;^WvzkG`oWb21`R=u)2iqm}zgeNJNs8h)txKQQ1fOf!a{;?K|Hs(qb zz%8|p4=IQ3TgX-g@7)(RZ^;cFgimk0d#R6W6P$C5-SpYjY=6{+!r(b>om;t@I+)hh z@q=(V{IPxGPYcCtoZ%Jm0iEg8l7`WG*stL*gfuMqTbX0+w1X5eTz4B_n zfaehO3g|gWv4($r*k!7Z@W){&A^Ft_aLN*FZ|_8bh`=|5yK>L399FFS*QUfo_~#!} zt~+)nI%=+@?x!gK2c`J`h$zFJ&+&3h_;o8x72qd_2AN&d*{A~QY$dg}KCq#~ns3aS$gk&T8eC^}mSS^gbZpwJlfaIPN|a znpuO9F^wLMIgiJw4rrC0d{LW*0;>ze5I@5bNQ8gzIC?>=kjEfcYkX_Qb7eZyPvc~0 zCeXq4gQ(TJxB*EgKp{cMc|Km^d&j;6|!nX&G6z>P5~8oEW4zkP}z~6Z>;X z>+Q^aI+z!UZe~y8(z1i(w;a=zTK-G zj4@$OHy{H;V^xX`|d*&P-)!AlZiN*AVLif8VQYCmgRAiR)yKkRl!(5Ra5acoPG4@q@_x zg?5sW>RGTbqilbuY{6#cS{v2OG>ixisN8!0 zaQo2xC@EM4D3J2KO@mUywk$0~+GxrdcHo3Zo@JMovSUnjg6Jkn_E^0SDZN8kPlAfEe0Fr~(zO2?&x&=Kz|YmW3b7 zl5aiTql0-%bUQ?dlXEOU_@45zP2eNmL$tM0h@gc${34kBfsV#~pV)E9l!o0SFb0RP z(0uw}bbFc9mcwv`9VVfjs01KXD4eg693n_aaI_SGh|4G95fb=}YHA2U&@tG{KeJ6e z+H6FM$oD(GLLM7JIjsYI?FI)-f#Js$Ci-U%oSXSObTEArZU+isqESC5`g28p%IMF- z^gmiXK`=6R@}c;7ajMJG;-*RK$&rqV^Lfcs!^KI@`yKf_Mm{O-VPiiCCeOjYaN77G z=pZ0%CoZ2_?9u-6MEcCAQY!Z-!@gv~o{?z7u||Y|gIpTnljVdQ(ko)ca2aFRpU5hn5dYBDt=l$vnAK+~cPVP71^UiS>^yarT@>5{#YR(I z)HqrU_fb>kd~RwTskpQPsGO>+UE1U0(ikEamo3MBpHGrN47E%1@rum>6QIvKwh^PC zc#~Elb@iI|_qig7^#74;V?PS-gowfYhVotKN+EVuxo_U6QLl!g=C9@+_anHIpb+2K z>Xo3>@B7)aeaOvX8b`PZP`4~d$Wbc@ z1s*pfgZYD-TW}B9i*TvMlwOh1Zrcq{<%Y_xxst0v?o$C3G7ibKg z-=8~aAAXEPBxZI9V&TGs{;MADVsOzR2}H!)oG+P9Q-xdZkiU$~wWCtdsCA<6j0dx; z@`PalV-(40j1#5!FitfH(~F&0Gt^vbOCgMwf8hq3g|g1dF$kc6l%z>TLREG2=?Uh`UE(a4|RH+0p=zE07iCul&p9zue(R%`3mE6+K0d@Le>J=*c^DQJ$}&pX*3UMEE57TWXr-`8X*rAnbUoaYy+YiS@ zUxXm1HoY2Ha*;-pDS~PX5xr6il2}*kL~n*jG2Q+wd;JO^2`nlqY>X~?=5vo$IX(Dg zFNxU2?(FbA;UP!`>WG2rGF7ECBHn|_EO=ko4 zLy8Rfq{bWfsk2a|^sh$@Aq$(&c?W8TAmh(V-X%^${g|*NTluco0=CB)I^OZ(Y(1Qo^MBZ#0V}R;m z{@neGFMwVkV7bd=enP0W=ha&Vhyb-UL*tANh5RDSIPQ2rR2+>#pDX~7O#S3W&I>hd zIV_2y1$Em`S;sUOs6gt}@J zxC@BNL!DR86D?G6_u1>0O%b#PJc{+jd@YiA!vGzGhMPlfs!Ac}ZWoj}@9xvX?lmw35)8y0$%_y@sl674JaQx) z0zH}LFUpI|4}NOJ4WGCCN*L<oK ztIgy~M+d-HuY><>@T)2i*MXA-S}G7{IP(H}SYogXz79d+@IM#k=fWT&^;2Tt zaQ!?=KXuGc<@^80trWn*`*?7zF}TsacxpX zT#v;-VD{@Z90&eUnrgBjdYK!5R}kU7#thiNvSv)|=X5I4+mRioxFULvxg4%xMT%ld z#+^9a`0>XJzuv=r@YB9WM(AxW2R`-DzEE>F=o{N-oHpNa3>u<>zp(!-A2K^E7%24) z4XMQ^&lBrB;yJ%sC{Mr?dU8`$8oBWafF`z2Rz6v`nU<%J0^M^e&!LBe z1X4_D@u?{XfYjTeSKI@-M0v~NsS{R9o2{14et9P!?6#kDdgpp)$kZzQ^1~O<<0>Tq zP`GBeKci#`dEZO+ayWQ;b0#d3Cm3vvQZgdBoA}EE+R;7A^Hs`nq$|_=7wqd{ASc>U z3QZ{dY2f-{X=h0CMjZi^c;!lTu9`61Z-Tx&O&HAn0Y$ETJf8hMawftVEeN~v7zCS6 zFGY@(b}W39Cf)C{84o#P|AZ(U=9a~(3&`NT-;IHjdKERudraU!+LXvw+kxT!*5U$a zh00sI)AA8)(cg}uYcJN00$;8MSh}OOq6g3q@q`B^*BzC^VsV}+6*$oYZVU%JmQlPbg#$bRA71zrB@^MCK7_UTZ$}L-5?_ErJnbs@ z-f~oRj3GqU+0;lZ@EdTS$^ZaG{pq(4%vxae|9+HT+@d%`__|)`sq=3ZA6&70M{1uS zS-`&?mCt{60RiTrpaJ=_RdbQFBv|Vzf6E8J*Rt+b5hr|blx)aRH-ruPZ%0iA@SRfv zAxH#LW+iLX2b_UT9d8;kARGlJ?r8%8kWx5xAZ74Bjq2AVEhGV`=!uInr7pjR>qJSZwd{-ewh-PY$id-VsYA2%=m zn?v?+aRAj(YOE@&eGd^$XmyjcBCyrj{@NZQbtfIt9ZbRAy+U~sJbw3WLZdLjF z(!<9{!d^$~i5o#)4zsDEtk7goS~)FR_UX}axXPlWK{@ULpqm)KC71_zyep1Ks3t2O z!nL-dha%CE6Sqr!sM#- zDK^AoJ3d#%Q@k&a&g!N~it$92pE8Pqf~NJ_Hr#M3d79Eh(TEHZ%g@>p2}y*_N4jZ- zR-3Q}+|B9G11bJhTXMcmkNZ-UbZ+usuyl)(r?-~yfH+&yWvdPA7Cc+w7#)041(zXC z2o3THo(T7?-nEzFacXnS zWAavWzLN#yZEK<(trGO+DJxmDHmo1CMOsEx{XuJ}Me^ROff31DK{&nynUclMkZggj zm7Bz4Madle&w%@2sqKVumPT|(XD_jXN@FpH&o=hjC$dY(8}B`(ej=h~;+_s=MD}TJ zZ0%cndVBKo+~cnO`}^MsJhhx0pjaf$w69&dn%%ifW+LcBOJm;qEX`X&gdEMBCsbY# z3+0!S9}%l+3zUrG{5vcx+XWW*7H|xgm9G3%auHq>f6w7_+Px&pd(MYQ?!CD1vZRMW z3wOJal$L6JcyaT_xsH!pO`IgfnSPEGdrlnvTKD#)YMg;pCyqQf@l%Oi6Nw3i(vQLZ z81)2Y&!{ic7zfMq*~@Yx^ZA*ygjpeD{=q&aF0y-U8kX;OtEHV>F{KSl62lACX z#eC>JnCIknMd&=S5$i*zJqdPJq<$WMR#A1}V6dnT=1qT$R`1ky3%wUv?KXty@Ys(2 zVt=u*TAS&Fu;+M213tQc#@)hR|JGJ1Y(aZyek9CfLBKRzJflq${#_*$Hs29gpFHoS zg%?k+GA0%kwrp2iv4^+*L;rr544%z)KzGVM$g$hl$k5N7@~Vo5;yA{S%Yb$fB`Z#j zU4GSF>E=(GSg}{U_+0y;Jud3hVche`3QF6Z;QkUf!e=+jm@yyQhN+w9B4cm)Fam`l z&p*50Z#%q;V)_2U;i?ORGACP17$x1)om%7m*{KX<{{-Nk70$NIxw0T&2b(OAmw4u% zI6TqeT9e`b0JjONdeOkz>G>wV{+`-gTO*F+4Vt!RA9s6Ug2rFl`Y__sHtjLs4cPpw ztMJv>x}GPZgw9;Q#+Y8GWzpVhgTLrdGF%eZ$Vu~BMop45{fnCURyl1cuuzhS?$ABk?ji?GWEi_MSNhz(V)j80nlF{u_ z#JBsPZusaK`Gt*TWnF^<*+q%1d%c-=t(=P7&K~yP@cHhHS&f0@OMJ3L0-tsIGq=af z2)t?Knl`v6G+k=4_c4d6cH6@1^11-lzN4WSe`p~+eE!?D}_-7l7ejdVzTBT-sie`Gk zSQf1=cvy?YFpb-~RF4>{LU%73$#v}t!Z{TQjh_*Z=|7952FlP9i_f(UGW}U=TV4xZ z5GlEM;*qZ%#r2qL-&&E5Yv6mX=VwNk^0N?ImPK#X7H+F!lj0 z)jkJ9AU4$w2SJhWIUdC=V1Bc7^Rai_jZZjm1q*)fGgz1yb4^uNiz~34$^2wvBOYTW z%85ICai4X3l|l4}^AV#}@nS)K1xar{P?+_Yqg59kx73(8X>ceBlWwcOac>}I@hmx8w{3NU`+RR$7mSVKcJk0>WE!1?w4C9VH?cvYWKw9#^kO=Y8p7wof`Z@ ze%~l-+K+Qe^M~rF%41I`7tS&C`HNCRVQ`Om>COh1ZhO_a23R)FA>(-W@qQA~W1_yA zG@4R=T*iv!5*_;x!H&ndz)L#BA9=$Me_g>PPLLB^pO0g)`9i#_m31H92dX6V`0D1yEbH?;cAL>XK%w*+#sH%o}Ns;p4RE+Q))Bur0+BK(# z?Y)0a`x#iqf4{W1ZPtdZb%s2T+}P|Bmj>=e{ITwC1O_XBJqBy!xj0z@+?yqcJ)^(Q zNIMQv7iK$LRZGP|fqCP}eX=Lz7=0sv!3sXK64P~cZ^xr9C(X)l8ud}Bk-eioW;-D&gOaJU@6dfMQW8Nb=Bvl89N2EhJt?%(mZmwJKSxam- z2aiixv8r|D?U9B$s=}Ehx8~`4fc-=I8hYt{+QW7j#_g=i5cP;Y$Ea}g4?{(?9AqNY z<60lcKhlWY-Ocn9J3CSW8K@9h<}HxP>WPocLCAc?;nk{xzJ@ZW)19Wi#$|0l3XF^I zJ{($LRLbm7)zU}VLOVbv%GLIa!?q*+#tT$8xUl^c$N!JSG3Gq6Dprd=AJG^t<2P<- zo9J#JDTJ0Udo7a5dadk=mi?#hDcza-;KRW~Abr1c%}#sW#5WGmhcw5A%dC304$H4< zNPEEk@3z0xDL0M&hJTTDTbH$!hunw^{>cG}aJujH@!dwDF=P-+@`E{UqPYR3@a*6$G=~`v zry#!IaK|5W=Gwjv4wI?JW3-m17EV*0up7z_H2+b^v#;_Zui}!kisxx55=Bv^=#%4Q z;m9WD>z+a$7e@sBLwr062kh%dZGmT>a6py^4(C^Uw00C+aPH5#kk2}`-tBcuG=ld*e2rz8*VUxm!;G0ZDPt`DVCnt$~^c=k#$WR3zy zOLvOo`zX#esYj=RY=fa&@LTjBIG7jiK=-$e(6)kSmo*|E0XpjE7XRGhpQrd=Pq7Gx ze*vSxiLqA97pFddE)Vm7)neGgjvPDEQ&0w-tp}9ZM!9`UG{(z~Bc8{0_w(5xcQN#1 z1Y_-NO`RwL3DJb3?>A9kCOBm)^3-p3%4L+6OP%p5vl7@Ns)DMSbEQ8tHmueMBte;X zca<2v6>Nyg?MtE~5N+zc0xt8chMc>|WzG$kIbx7+fha^#bVkuF0m3tPRKsX1g7UJv zWrXs|$t^MOIXWQZ_6rgZFeE9lejQzczPl|4bmxIT>&|^(*s4IU>J+I!BOFwdYJx71g5$`YI~Dem@KvlLZ?no0@YE8PT<2PFv=J0g@M( z-vG8IfMmc@4>{l?&1MK;ZD<0>yJt?bJAx|C@3rGayo*>+O`7!MDd>oFpl$p)m7lBf zKS)+CLgi>7zOlKbr0P=Tmk`I`cU{vJujf;&KlZ2;xG!>%HPxGk7H!d?>??a|i+F|S zwB6wReaqOwG4A7alfEWD?BerU%K=~fM%|PYF=9nAxs%HpK`C&caOa0+e4%tE#0jML z8v!wgAmLdaswGP0;Lth74=N%rv)a0lr{!d=#ii0sY-MqBGR{#Nz}_{L=aokZJm(P= zJSnL(G-S#4JhCL}ZN8>@F|}E8(aUc47N=G%e@~UqW4=}0F}-q+E^Kj)FglAi@sJ{2 z2fI%{IxQIvaL)m7^{g}OUDrZ9MQ)ulW(!t~)4?ei7jD@i-lsJ~g_v>d3&0&eI*}@e z9Qc3pV_-K)D8-Ec9Q6ciJ9*@{+y={&!(+S;s*InCv*nbnaj00}$hz2E^UQbE&PQ4E zj|zr)NHS7yBl9qWdEgrCJp}@8#o;zNW1v+Ke`O-u(=rpg?oP`%*7$W-}-)k=(vVnyR#L{XQdU5o=h*_TBhK1_Qc7C=@T3 z%IFz>lT%jX5UfzB{>*(c)619HoHJT=R?H`-!zP!el|kbK&|}Jmv9yaJ?6&@qG`=)ev0+A^PSCXaomZi<2ND{v+q5sSH zm&k;}sDeEQ7T`j)LpPBfZu`|L9&)S$9RB`%WTqpX>U4N)l>YwQaBOa836(j!7Hfdt z>10)ym~|i}-8g<=qWj?+l!J`R6m<6Eb3F-9jl32tke~Q%myYQQbXU^h?rSMNkbR1S zPlu9p%5tLbFeOk@)>sKBe#>~2RuVKEB?jGbUJUl;+MCYGknXG#C%zDz-GIfM$VlD> zR~T6cJc(SVS^%oEv2Wlq@7W-wx|=KDu@1$nwBCV4zw0BpO6V$`^75W;K0Ge#Q|6yifL*6S<=Lpb=(C>V=*O1#_E*A_+mW8=m!JGlrJVm zHa`%L5glkhy2I%qAjB;i5c;$C5Ovuao2Gzzxr^YU;Z)TTx+idHIiW}>Q&F5idyr9) zF}S%{Xksv>BJ$$E+B;(0-5(zk-mT-nX+BK5d}QxBn1z6DDRYgUmL1yHV;_9OQeHNn zfSkp+7T)-{ckp2DYE*%R(es*xswFZbZjTo5%~MzB@{@*Xz!2{vKXH36lNhW<9ezgzmB6a2Y?KV|Sg z=%;^tNb;AGITwB`XZbI=**eoZ;ypdy)M*_aiYj4&$D6HK2cm1a(b)Go2vZmo#c$4C z(IW?~B|;e87p;2J+LDv-tPZ)wJ-n6}lDn?2Jj6}|et5Zg^N`2X0-&Y`^W@eUL`T3K z!Mgt75gPS@%VTFQIo)Ig>Gt+E&PW6p8-)`$Dk4=4hTy4fujbu=-t?~EO?BOHYs~?# zqrTp{uK>IpoUCmIgm3~h;cA}gu}MRs<+*TYI2KrxA()jQ_5eW0M_orBpvWP6HTBXPJC7>Uz4uR53>Dzo8?GSe?0L#Mmd45|PAkq1cOQ9pS|HoqSZ%vPXwP(c6n)YV5PA9#KC1;)|@~Lbndnm69%> zwim*&sy7j?ARcZhj{Sk$S>qFLCaL(6>DlS*SRT$;O+VG<`}k*-B>TZb9Jori%S#l2 zW5oW^nh#G#Y8;E`6BMU3w|XDzHa+XMD5qkTW&=Mv*Lzr^8BD5jXUa-MbvNrtk#|n6 zh?t!n1y^=6kMH!C-6VH!;FfY!SnO4Yl+@uSs!d2ldcqAYWVqF^fdMf7sIS7Yt~Yom zAOa#)s7(lj5^K*4S*(v6j)(lh*hADDqS6~LMomM(!YA*fl>yISJ6>fUBQW&0UR;F! zDslc-%Cc}lyt@C9UhI zax*kDJKt{$AEFl?2PfU}?KHVlG^g6~M&{64tdEE&=a+Ytr_#dkBfmBD+A7CFJD|YOlE7 z13A{lm^Tk-T34i3qfpYV93VR|iGtq{L!s+d;vogNn@?+}Iq&^`a}IkaKFm`57r^P{aN&=k<2o@Py3tF!O|~2a7o&v?}>|aK&U4ou*M) z;4v9B87`^R%_@1bSKeIqUo;1)Z|7vxPDeQNyosO-%{)U@Qnf(k7W!G_w}Mt}D2NF0 z^UTA~bT+H$VBWQCKLXIUk* zrjEt3U%C+0l#6#dntKt}|MZ5n*Wi*gxVT}Hk}x5G9lC!%f9kpPTF{9`Pfs_O;}X7i zt6#ZlGCmEsHAZB2H9*@Gvc^Gu9ZZW4>}`8$_hsVA9dk>d%3i+`fwJ9OLJlk8)+|~j z*;+`|y2{ck987EL&*p}!qW{i=+NTSV{fTy##TKl973VadY;7as)w*hG%!6eJoB@p^ zLl$_osgc!mMY=jxEww=gujR>>_IqW?MB2k?H+8?gRf4#N790@%%T$<6O1|PfgI>!5 z=5#?^aA<{jTRoQKUN}-R7?Ivu4fkMF@-=O!o0nVmB-0#P#t6epVAO!)HY_E7?E4jK&>LC1?L6_t5qRk<=ef(=jcc|4 z!?O|=X(Ozm;)Jmh2fUINYu2?-OYHTp_8->j)3=s#S~}~bL-DT^wf-q znKr=Pmb9(*U6U_E-b@VuIqD|;NsfF#NHWtlV12xO-dK~%t-`M1JdAXrE+csxL#{$0 zLiA#X^Th1Hk%Q;lnZ%Dzp^V=L)0HspZC6=x99q7*c3Z@}0P~3IjXZn54rXgKYHV8Z zvZZZ48yn-F;yvRkYh}L;bi{MXY3$&q^ORG9 z`owE?wkFGoS0nu$#pfzbIV24KvP{$b9rv)2RxEAYCetWwIyqKenL#+lH1T*CKyJ5H zGA5jAL7M&9^nCwmE0+-RNxUdD>`fePN;ovRD$0j}`I4$uxN!*&c^{Iw!tQK?OJE~H z#hdDvQPSy?Imj17B8#hzPum7t4vK54DJo+(9vL-0Uvm7ln=rE|=Y3@DNS(bV>)q%7J_3icO)1f8ta@O;fJabyz@v-Qy+4_`lPbH5_S|Ey@;SrNVay{;`?P zKmf7`B0qtO!-p;@Ejt$N??{%urRKLhZSBhq@XEgXUQYXuRg~FOYfx`CYdOTWsZIOl zGi+L>LHQ{k0}EOO`m65iMy*uy3yA8o#ErO9f4N8R?y?NpFTDu!8QNGh$0qqc3yT>$+lD2oSvjXedGrS9Hti&*C`eV90EOE(S;_3h}(Y8P(mS zAKPuhqvz{4fa8TwYo&YG%PP%m70f7E+4ni*fxFZ6ShqW3_)jbS!0@N8AiOG!Lx7!* z;4ED3Z#fF}KlL9?D&Qu&(oOln>ogCM{`|VHP6Ac$hfV_VjtIdFNXdY&|2r?_hm!#> zmJ!lsmYaD2tWjuc zB=i|0?HRSCq)2ny$y=(N>^Lt$7f=Zn{kKs-<1hqO@~MtK1ICotoD%N zt$=Y`Kkg>;`B9rRWll#;PXDHf=Y!s3GRLLJ%r!-wd(ps$2*l_%=O0;v#DV7UTHB=o z3ARi5F^Zdezs@Q?%Anrs+n)t?AUT!wNE+g6PUR;A?I#2c3A`aPaqdqjLlA2a&3+I~ zdH)+r{hL(%mTJ7{R~&S@n*3;MCs~2Da#4AyXho?c(REh0XkFP|NXknI;8^k5sKAw8 zhQQ>YB{7XM^=8}DL;FvL0U4pmsarcmxAU)>`{fYz81D)|9R_K`ncx+iD-PF2&V;2$ zz-#%E*{9QAIzu2vtHqq^I3Y+7Tf{sjJe4PQ*b>jk+1i=Ejq~b4YpKSSQ-cLVeEsb* zAyBIw!nKQn-*mofH}XMN8py){VboxWZ<%sE($*b|2OrTg$rvT+f_-hI{tUJvEw zs|PW7BrgB7UN?}t7UJBj$0t6mMz%&^&s-?A8M+2MO+N`XM&3arv1|{;Sf|p0mk2(V zhLt~1_7W{cm%26=6V5AJMmTl+71($#2}qc_9%@}<%6J7rS%@zb80e6^ycvzfV{|0EF61MRbd{3ql*h%%?kGK4I!PQp(T<|{~h@T7o zQ^Nn5N{NEp;_#HeD{HW&tEGTmNw45Hre6ZA2a&~U=>o9FO}|%k`tfH4m1T6lXTq#` zeG+nEhj#QlnhRfdBe{g1^f`s1syL)as7^ffZ!{Q0LUVbP_A_OHS)`CIvy_nym8dcm)Uf+Y`u6_kj~=*FAo$T za-SJ#dO30=2ruIla}W@GKphtZNkTbr zvH_TzP+uFeVnS>}S^0CYOuC-t+M;^qPLey|fX1OoSXzB4rpe3VJdxgAWum6Z`%F^d z!mvXF!FpIhwK(QN#^|~JXAV0_N}_Fs??r+$`oEnWYls~HrJ0fZCZy_ zOQUN(Ud>PbNKv>SZY(~iG&$K}>74!aL`wc$xGU?Lv!7sB2S6OhwJ{A9)z0;@t(q$;@xX2elhmB#Ywkf9{xDtC&=R%v$lxfp&zCZUJ z3l8Usq_=zxX9qF*uH7NlE#d7=UIJ4GBO_(XEbuD2j+PP^Qfo+#Q)H)ASL+!2!y(nW z*C$N6&81bt6I5K|DbSci`iuEdE%{!eSzobmd>loAiU<7kx@_> zmo`(BAh_JqAg+#4ff=6|LN0!7so&Z%Nyj}m9ylmf{bHox8V!%bE0r*3?8gh3S(P0a zq+C|)e_YQ&m5$J%Vumf5Ss(bKCl z+TS(wOjCa4mo|d#H`^?)2F_X=-!6LpTX5C&!fnm?;|;d4FvYA=`@YM>H&w-}3EiE^ zrI&L%EoWZ6z7m}Eq(pkSfXZ-`v%l_bR7~?zoP07fOyJjAM>AW25Rooq+bB5Yh@qAY z?7X-@i&Y3j@D31EV?6ID^I`N>ajOpT=wKFEk81IdXPQ$i{jUs+U6`th&ATU>drF7W zC?a~bl2S2!a?{$vvDy{`F1J&piLtYdZSO3N#6PsI*BV*Tl5lSHW2E%`_gQP6x0_}v zPY{m9t|_hpR*{{}i}dosX=%Lp)^vVqssr^=KDvw$>e4!Oabv+b8oj$-X{^55BdR6H zyD!Z~#VO;Sb$>_N!{5Y0@Serov%g}atXB~6ADZKE0|pEDo|6@w#O3bFJFWucoNBR< z(h@ScMCq405=Q^9U&-Sy=+lu;zRV`8L)l`bcApp@hhyoqmC2|DIpOlhj_4RTSh3?o zE)aD~-B%t4Vhzqy7cyL?(;fyVy`M0)Epjt#;3j`85h4V};bvr8ZL$-f{g`^E(nx)E za`+;wGxo_rAhE@KioT23S#zFViZB%2hjKVna^eb#N*i;VX-wwF#r_SxVhxS{yWqUe zMR>17H7UmQeNy!1y37aF`0WZG%8UJWbSseec7@cRCniDOz_-0L^gJ=L0WWSov+bgy zMZ81$xbF zBn!ydh3vTQhMqQs*L=oL_%bLXNrz)Ann!C)O-{^wrrLZE$%WL4@klY#_f4dwYT+`Y z599EamcIA#0T-OakJHv+OkMuvQ&)20WEWy;bSSou-DlRmYP3l{j=k7G{<%hR2I84^_YJrDWN> z0juUU<2V-L9_HGXB5&xabVmVeFwJ>b2Xo(kN5VSj7DknSf<#!+1=oI1x)}}q@{>0g zjoS~1&2+W)xJeKaI3bFD>$qLpwz5*2DfYh2yTmkel-Ayv*?BQbrpV|m-}$G>CHN&Y z{o-m@hvK|bBzNY^!30Z*t2u5T;%KD989S?SuRR{WGo^A$^c`0Qeo#-FL=;bgY%@d_ ziZ`b+94vzba3UXF;GhyU2hX1eLSa2IPV#6_X+r5@3-tg4>Nc^AoTFy}CLz@7qJqLr zky)d}ub*0zJL1wl?}W&0SPJ$PaA|pueN--!h&A_RG@Z#>LFhuNP@is6{mN*i9Vddw zn#5i(?tg=Oz2XqzWYJ<=w^L+`M$>e?alcc6Z@=r<(F{DnJ~eP+-dCBIN_JInN7fA< zMLe1y#G}~_3||F!1#>n2)_MWc(8ZTxeh+bHBoe`OKpcxX@d6AqklDz&*hqu>u?_C( zzqlWN9)&t7KS#lO{alH!lJj#V{_m{B|Ji}y3*Qd&+vnKT|9kVi=^$uP-Ob3{`nq>H zJyT4+Yi9gDn_rjgvvVDAl;(g0hZ)Ru zk~KV+e}6W6GP!W7>AINqXG%c(A}&k(D6~Fqe`a@&g*B`s2iIq?(h6wMhN{MT=GpEK U74)h%gfIYsr>mdKI;Vst0HR7_F8}}l literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0206797032.png b/docs/dli/sqlreference/en-us_image_0206797032.png new file mode 100644 index 0000000000000000000000000000000000000000..c266b5ba9e78d313701027d52a7a0863c278d27c GIT binary patch literal 145844 zcmd?QbySpJ)HaL=0-}PXl)%u^gLET}fFRw%&{EPh2!enNG16TUD&0tn4nwDOBPAd) z)R6Cu`g^|ht;Ms}`{%dT_lHt*pL?Hu_SxsU_P$Q|Gc|>~w<&L9U|`%;Qk2!iz`&-! zz`&Bgy#@TnP*124_;SNlQ$Yr!q>p+X_yNmWT2&eYqdXe_>;*RPGoG`ezAFaCz4ohr zH#(dOAs85!c}lXO8Qy~YQ204w-WY+`Qh>q%x@ z!J%yVF=b>^%iI8hXGuD>Zsm>dP=YglP8K)E81$qNf3c!g{_!VO0ngU6vf>}1Dg0z_ zM({&bxQRf9ied7-L`)T_?PSwahlwGcJPk1ai`8k@xkdS(kw{;L?Z{&$=NPA>453xe zK5_2L)3jB6gGGEy!q*sBbQm{qr7-?K_3h)-)~w1Z*-vk!D-BbIq^~z_{sWeTCB+@Z zYM#0cIpya6Wg0wlScDWC#QNKZr*D?q%my?JZre7W5?8;ucH(+cnB~avEsp?xr6zUH zh7Uw{puHs;@4PwPMNDgU}W8Eb#m>u*A3&U)~Y|Iqy@t&57eJ2 z-kcFL(w>}=G(g!(QMS*2jT9Bn3mzVv+_T%J70FNUQ~y>mC#g3fSU!js_2!28QktZm z7QuDP!9f%lRkSe#T5`O!Q4N|vU$mX%t(wg3tcwo`-72NbbOT#`Zt88K1DmwCG^_?A z-n=1Q`U1*!VbkO1(sar#xqh)xie_e@B`MHSK{}n)dRKA722l^!tk`tp*^r^3guhfu zSev0CJC$e)QwB{X#P8RSF~Jg1m5y9~CwdVcIB$hjT8E=t`t{x?3x16mlQh88_IYaR zq?!(#f?MOgh0f*E&Ghy?-KFyMcsERS=&9K=Z@CnwhA8%2nT~}ck4wDXea12hk34@9 zsLyw=M?DuWQZ~aR1}Dl+=9)+d^k$49rH~|+m9MN3WoUbOv?6M)N+MdYza}{ArWAl$ zkBNIhSa-Y~J?n!qa#{_vH-Q*I$95lQ?=JQpr4Q73x|V&i7!HG>j?FY!uG`xCion(+v5oh0TLDcb>en3jp)1CLL-Y{ zXCCPK<#L36!>69yf@r2my*CQ`JziC5rS-pVJO>bu3?xp^W8F~U=#f7-@ANnD zO7O!B8VQNs^7YnlQL>rZM4uyF!;!4nI-3u@cdH#}Qb*zIFeiCggn+nk9O-qjBSiI6^CTS7ekvjnkf`6 zf7ZZE=GZ0i7g6h<=*Q0>2^W=Jx@T_T#DWFYhYE6neX2@1{a+hYV@|2GZS8HJ zFca32oD>^Q-~7&DDfdD~rEjdEv6mRLT`X_HUDPs2Ki#-Ch4KWomVwXoKiM zd(Fh8M@u8MYsiR7==sO=76S z8o#^0Sj#vbFy*tE^}G*?3no#J*O`Bht!n_GGb zvC#W|S8xmDX=I?2tp^^my*>}DL}Gv}@;Kii_1M(F*V;j7gMC95p`3~bFV{?i+`86| zf!mm2uy_GtU?`!^JJ&{e7AcVJ*&UdHpYOw*KK%=KpjFU zNKsWXFw}P?tLs;#nC>-F&z6rKtn)6m=lWS2NCUndvXX%mk+39Re~48DT{)&V`2TN? zF{775Z1o;Ud)nHTRaJqTgAm~KDvwMiea#=^-pmm z*9L)E0v5DgVl$RW(iwQ972<2}S{B4)(aH7I^7^%mP@GTDHW>A}%n2S3Ln2+)<^1Fe z`L2zgR$ieXksdI1qgLgi>%5`6>k;!b5*5FPupMIeqgo8C@T7EAr++@9XB`% zcu)oQ6yy zqq>aZIQ2L>YVZ{#(DfA+B`~a>zC3blc0B~J!0N*QbYCAse`AyX?#jYS2vx1`IRGod zU06hQ(pslxjcDBtl4lOubbivh`1&(FvLHGaW?<%0g6y{$>Wnf8+!HahH}+KbbYLf= zM0s}`@Lm``+KKqZmF=P{10=@#6sc1UY}fD8)~C2}q!e$)kGFpG_Fk^rhQrzyv;3GH z?~XQ@Z0>^=-=8t&`?2M7(li~InLL9PKV;Z_<>}etI~y$fgOA(Xq|T+Fc52R4G=Rrh zCYStKIV4TVY5B=@!POc>tXF$-8I@N+l`tD8Bxw7~P2@5$-ej9La zA0%0-MSERfKYlO8fmD68R=sed9}ep?AHXqoVY`n(tRf#CW}?^=0rPO1`u2ooq3?F+ z#0DdESCEFxVxF$)95Mn%?$q3Z_RRG4xA43~>htNafm33R)hUN&FRS3{_d|lNI}h>I z=GPU)PE<~z16liRS~)resL{Pi1IcN_Qsi1Rh$SKHjpM6eeVum`OGAAGABc%iprt{B z7N^c{Z7}BtXJR6Akrfazg9gV7M(*SoFLa*dOjiV~p`SJ#-ru=VVfC1I$WY=i5$E^# zKBFmi0?eN3a-47L%-y>F^bMccfa*_tMh5#H4pL^y19O9T%_ zAbC1E3IupfIynWcw??4DY^fZ?Xv^Uu|0Epm-S(rfz7p(_db>QWke5TxmG!TSD=FEmEl{qX-~~$oq46ZggJlbsvPLJeH}rN@eqfWj z-E4iTX6ugRN{|JJ(TbCTX@qS{BEUIc53(ORKg6U%Y@+7UPyv>4-CHG35$Ju*D`t1272aC)JFjeml ztT@+=ttW=*aGxUL+jsdeSVhL2j=V%20i3k1 zp35QT%H`4Koo|zOU2G#B(V$<81vr=Ggbnuj?1*kV{Zgy~apQE$;sq+m(EXKk6@IigA*_00! z&YagR!0XnR04`GN(NBO$taYQ`v~`70dHRr_hNix|XtbBsf9SiQaD;`AFoQpUa#uh` z4wP*-6lOfv5d^e#XI~8^bKR6=me^;%@;XqkEodHf`3!ZHqss8ok0EJw()Bm4%V$#a z!2nugxuqA0EX?9ken(QgKhDl9VBoX(BiEOSu?usLJ>cvKu=lgcA8;;5n=*jaroE=G zJzD9W-2~ucc(#?Ce5H*My9n1-^wY~@i9EBs2SsED=NiOh@sN-2dg_IjqK$ekWKnh16)%Qeo<%xxd`dj2#sTTWl=K26(9t+v_` zuAiJbQ|&e%Sm$VApNoD}dZzv>tnOFyl%#6&UW+jIx~yC9y4<+V z(?T==j3suZ* zD0@?%-%XJlf{s@{$p2q_;7kMz{Cto~uZ&_lQA6hq`O|UNQCNR-HB-6z4YRl)=LgUh zyV1AwTN0PUd=kmr9a`_7ef;@591D5u8|PGq2v6}SmC%LKFVku+3ny#SC%~S&<2hb` zHax|EyL>`K%OZ?vlqB@vWd#;p6^tWrOBx29+lw!bjm^yzeFf*{_Sbo0hsM4g$)QQl zOHP@bQ_AXoM{yeRP=;p?(;CY|(mts;6JO^fJ+rI1@HdlF#2=~-pk$U>IViXH<46|t zl@>EKkDHE91C3MFE@ms%PqswG@zRJMoWfhjX+QWesBO~T2W;bNQ@sxMm<8eiQ9gC7 z@j8}Hpvz|`yQy892@kqF+zbpCJMCM=%--cJI~g89f814dP;tzK1x==WD0m$u4Gdv? zjTCaIT$Po_^Lw|pnJYqDq?P>BLgOQ=dU&ex$r%iLsp>!ml9ob!{qu56uaRDh34+~{ zc&QrK`4JSPwz*8)aJ8{Sd zhJuH^rWWpOxM6bMh(!=+Dctb=o$F2}46J+xz;>G2rQhHUMBbz4$Bj8+(d^y9mmTk2 zWXz22U7PAMFhEORJp;usWaYnLUAJhjTOn5)dZ`*imjTQr#qeD2I#m0N?TYES^gXcx ziY^21My|~bJ@0{+aoE-_RuWPe4n8Fzx-Nk{4g=WJ8_HY?IcdmCdC_OLuY29VL%=^< zwx22D0g@}9g}xSP^9TZlujaFT{)7v%&}Qt!x$boww*kZDY}bblWFZ`p@9?ij`P4UL z%_zFMss@=gsS0M-~DsTuznSx(^7PApbq&x=o0IdF37&dIbzPoa=un zolpPCa%qB~1Whp+%RMtGwO-xYa z$s*uTqZNw3;ambx&uG6h`~)sX$z%M?xyDtC%tG_73P=B~Vj~Jx)~z&W27#Oa&R-ihOs+~G@8;9+06@Jw8o31)x zD3HrtnhuAJInAxFKPZht{a2T3TKz*BTU@NIkAOOFqKUu%ld(;jdpf5$Hw81mCCp19 zdSY%UXZQXg>?ODLA4dY;P`in9$t(-!LXNh0ovPpO$XoA=DPxHcnr zPgqd;R4aU4dX7yc)+YTIdwairWP1m^TL?D%pY+zs6Ig{DU-#2qT%7Gqb&Rd`bd6Ek z^UsUB6m78_k5noEI058O$X*=W$r^;#T{zOc*S>X(!kBr^3#C;)k>e zv~7wuo7<=)ogJ+eS^022fDp&$ z2%9cFIFCga-9=GYa@3lb8vHu@77qLTSsg3)hUBVY%DL^^+qWK9{1>5F!dM_2bf1sy zN0c$Ah)8p* zS->kI=G;+L`tgv(yio*<=v&6|=wOp_g;m@$=4_1wZ-G8X2}@}eu!TL<$+KR|ILn;( z$6>?##1BfLr&|u?jSy;D-|zSUJDdLg-lgI|HVL&Q7vyhz&<|Z@A+l!lQW>WaUbWvW ztl#RSMeGOOK5Y$iKh^Ar0Opf=IY4!ny2R=mVj|5v>E-64-G4dPKqgBOvgr{pN2}rM zzmy~34FHckx4|MM@Ou8aTF?#uEaN|unL6;0IH{)tE04qHEp;sHMM`{{=W1=5ta|mf z-&(womDgVoKZz9V_g~pCu^Ab(4Yz@Po~V~i$WX2@At~rEt+80K!hQrNgYO30N5GJE zzvSeJskx>v$_lJa6^4&q8nLFK_&s=L+h0ozWJtsq)`lMVaey5&CPbb3&#`^$ci4T} zY)T%b@mn;xUf|*II)?ruzgeAFB75y4f)Q{1D-FQ-@=l%O#^*P~BcN}NCJ&bcXU8>$ ztfH}Qp0xTM1ooav9-@Ic><_?i63YX1B8C&c7)Vl0m8DNfz31BSuPHU8)c$(b)h zl6r|uQAJL%OVD^6o-d^0EGM&*rGY0OdrwnPTZIq~u(DX(e6hEw|9j!g)%Py87bjAB z{Zl@d#*x{A_ zp4CU8P**>%QHPzLJ$a?n#g}`RENy`I@+D&YqiQAhcDcWSa`nfR%wnqB;|+1F2J4&> z?w*s%B8Z+V1tM7obu98RTNEK@dx8Oeri7hC<_t@A7wBF8$HyikyK8hqD{}HQA4l(- z0uT6W5%@5I?fOhjw!{}qI8vB$Pd;JcMlRphVLWViZJx|iwV<)gYa(kRfUKUDWo)}8 zq$9_tgpYn=iyy(K@amuLaCh}+ze9Dxka~`gpY@34TO^oN1#*fB=ufubUISZ=<$;c?-aIw{hSaE!$hP8Sw;SEVHbbuO?-R zG8nYf6*5cL4!6#Us49=+_|rx097nVIQZzbp9C}E$c@lnEGa-gF`qfKHQ|1o}Qv>`U zw$|TmxW7L1Fh4e~G^Z8{Fb{uVq9SNl&y-7!{|$-bA!-yaS)&N?({;Ttabq;jDLL|8 zU-er#7l=f~#k(tgA&}*M39<;}!NG>RSOD#*l+gWG8jeI~Ucb)y5FPm&$w&4SA3Fc5 zbOsJH#}HAtKdz{xmPHho=Di{HqSf0LDTnvo!uKNEaoD=q4$93~--q6^B6+Alp62;c)~xygtZoywqdqht@d=hT4)#U{8Bj|m?fMU^AHeh1Et zy8aeQ=C4F(k)Pd#mnSF%Km7-gQ0iG9fI=gAbsIj^XIevw=+4^Bt+kt{lH!&2N|>rU z^~~LQRZJEJkdewrq)^-Ac@B!EQ#?GAijvrYLJR}?7=(6Z^K(tnx_v)ISYC2H<4(A+ zw*=A4Ct8jNs(7$T!q6puJ#f@UslE!L5&7WE*v4mG2UGxAp@^_iu<-Cb-=(VAC6S2>1y=2N=-<5~C-JxxvVS9*V|qVJQr1eW6Ub zv_Aek#cShLwKie~&&}Ses5dp=KZ-BpG#K`@5+B8p5LSB!{SC^Z>Nj@XrFYTr=R2g; ziOzFK(pp8nccG)7=Ah``^j1MEds>XK9-eL3e8G~(i%3BkW4WDh*k|ZR-}Ia9JT=;% zep@)?)5%oM%L%qCzAVIx_L)79;n;Pthf?I)>|1)08R7h!YO4(K18%TL0IX(0@yv8$ zu|22n5oWqyWO{8DArZ3@fRCy^5F`bB$4`LV$p2uikNMT((U->nhhPbb!nCiOrzA@s zHhlcBMC7x4WnDxjFcmKkc`?yI^{7}Z!$d}&l)Ad@B6&_{{jgvFL+0Ns;G_tZL(IPs zrj8tqkc%XsWPwC}7S8l;VX_dn=Nrw>Cit-8PZN?b&Kgj|mI-T#1h|lQ=Ks)@9jraj zQfN7oMQ!Z?KkdZx(K{<TXPY>Ny7zvUsC+mko%&hdddn*B$<^UP<1f`;q)BME z&!HkU-niO^Pe~Y;#vj{cgc~*A#01bs3Y>7_<6ofz1-boBp7V)fiCT0I!XuT%*q1Zn z2XsTdlb6lQOs4r=GzQhBJW8D&3V{3P!*N9W+Z+*H4{u z2zr_cKCJpTjl3Tye63HWA@PpOe~y1UcF>T^qEqi2GnD#Y_~XlFr6E~qVIs`M3`1N) zo7r-^u(Zw4NWQtI*HS^!R41>4AjeV^9e$OLl7Nr1Md7cr;(O&D!e8PfG0&rYSrh50JsH%kx-&{sIe zSUv&zxkb7oko4qK|+4m|KIBBKqPo(-P`fz%L3^ul=PbbVe!;pGMZfz*d z8I-1{;oapHK(+u+HxYUa-}T_WS452SI*UMeEc69lcgm@h-RzM>5YA`a6VI{x6xI>} zJAr?>NT3`Y(D?AO(Yd=4JxDF<8}XLFcL5nkOp)G-1lrjHBAA2u3@?QM-Z>UTa_olN z11Lhgbx}?4LH%D|bBi>YwY__+wpJGax|M#00 z0rWr>pg6NP{*(&pVtUo`)PceYJTZdhw=4uB@Hx;!^twHOR{VR6wPF$hc)nG>po@^_ z4WsWN=Zj87>({Na05!c&!Saj8=uY5UpfUMU@yWA0|DH%AmV%_AZsq$BvNFF$D)zr_ zef;Os1?z&-H-WPP7=Mzpb@g8{ldpUR{ECUvr=}}E!BCMu z0m=*XuaRL$A=Q?Jhq!41z-NaYZ%F)WYLq(3ixiFh%o6bKnFofkI&!w z(m788zf*6(KKA=JMRUMJBbY~i0`T<%fm9YIU`-f|)QzFyb!!h$%;}>Rrp5i6Y#9D@e4|KZ|9qmJw-HFhtn;9xj0qXbTipL149Er#c+W z0w5rEs|;WF|3hOOw*cpSx3RXv3%n{E0CkoBOV>E1eY zD66XYxcxl9p+xVY3eam@ISk)R{(^D0*~Qn@e}Hr;FTQPMlqb`6T-8$C0WSMrdT+O8UUe^)`O5?9RTyZ7W!{Zd2-l2u+m;+6$? znfcM_TmKWkD5u&QW8O8!)HlfYyhQb48`9Qk?q64w-AAJx7<3K2?^D=6Q9-z>W_<)L z5k#H@c!+sR&Rz+~X3nL@h9>Q{dopGO*W5(5f6t1EecnHg6!hR~*>3|10kczAg@AQ~ z8Dhl7l4Jo|muGuyMt*7ad}&mr=QO^S0A#VO*h%=eno%Z%1=XOsr4r!X2=twWra3X7-%-oK{y!f-E@o{zHwxjtc@>b{Ns5zyr^XWfpZH-;+qcFmDQI zFY_&Q2%z_ZdgWVM8b6TegI8n`VGBb;qXwQIXgdlK3zNqg%&|64CkQF1hDk_1Xk(>t zf!}%uTs6vt#8xxCX4QM8!(e!GN~`CP8TX#X%uEy?H?tvsN>5GM-^loWR^Yg~Gy}GE||9#O+^JB_WVaKm)Z)|(lrDps+ z@5}j3(MI)Ad>}C(q8rHLr3_^e_sxNfQCR=PRZVO=aeaniB`X}ohSGQVVwK}9DS8=k zInYRJ^ttu)Lldy!K48`VmwZ~W8*ZlRqVlyZ4FbB}?=nJP9o)X*(3RN4ItR4fNgj02`U73N zvK&TJQ@KsBUye`RNgeKdMT{b5S0Plo=xIc|`wu5B1eq+N-bVfruPdelF@@{Z`^8F2 zOCgxe!kMwvZIh{WU{+8pLLwBp(5|ZqZnh$l@+zPP-ngFT! zA3%IeK#zSY()Q}Tg~&} zGr`3W3$5N`_@KhXQ=0BbIfOLi1v#gOrFh~YZIW=;2Itvc(Ciat#3Ln}9s_cD>8FfC z`LaEEYn8>OYRiT^4XshAFu}(k&w3Z>v`b;MUHck&&q~YY6T?KBH(i=PM!x;k;whxp zOB?tFda98~}!6PaLO|akgjs&pWE3)o6#=jkH#7%QYbAzd67T2}eRE2a`t+;&8 zoxmm>yS{9K>DXha>?2btS;St^f0)eq6PAQ+=7p)gh|RN;7DIh-f_rX&Cuk|F za+-$2Q}?1`YU;utpiEBg=SGI$e7`di2Z1W7(I5ei(HWO%=^dIlzv#KJsjoWt3p))Z zx(^+*9Tb9ML&{Bmere1(+mo4W?aGgP`Lb(&Te>5lreE@B_V%uI=v{@b_cF^*O{t)2 zWA=rc5Hy~Ovu$}^X9WX_f^HYewromdADeS+4&3uwq7WYgyl+1~R>X~C z+6jm{S-*USLh-Cu>b&ZsHK6hv*eW?o{1hvW+I~BX546ZFR`*nBU)rY40+}6~y`^&{ zuR_t<&3I?s+O_oT#;P1*X(ifBL)U7SR(N&FnpSc(QFt&W9d!Wa>`_Mxn+B-&himtt z(T(Yumi5x>+oi_qv_-2MK7gPmf8|`mwR!X@+V}AAC8s4@&#oNmqOLlnE6I_sb|vC_ zmyN1`l`$VYvyZ}knN9K*+(p@W`ky#1i9ayFz+`}({j7KI3k)fwXR@9=Uw3vX4|&|( zFbx{PeiVMj@iup$vz+!4A>MQxACjR`Gv2pKT12z&ucwH*@r-$*hY20JB)3lYNWbG( zdxX95Mk(Gw_b9vGz2oo4YZa@deE9M-#kF^TT@|>IW2n7KCkOEZJJ4gYaw?z&Y$X_9 z@%3k`?976P%Gd_vV7R=vbINM;rsjHocQ}mQ-d>#_!f_f9UviUN#_SO5g7&VrT0SYM z!ffpt$YSv9{t9gW$OYBB$J%9mmbQH}Gmn!wdSevun2esr&SOta3&m#}9j;9rm+|?j zs9_eNuP0KNOK{=`M<;U=)G0@%d#L6jiinjnd~9w;Zljjr=eJ!0ptH2KI;gFLQd5ok zbEU8xehN@7npGYBNW8>pFW@gUz`#;dqoYpu8Lsg8Lcg9FH-}{@iy+V$?Y~%M1~=F6 zfO3QRX`;JmQo9Uh(cdd;mG?(qwJG!N3P0um|8npz8|mf_;-0RX!Dnm1ovYp3RM1Fl zlFJpvbzPU<@4QPw}h?rvH0on5G}y;J4GCU@mi_J_B2wU+zi#1C6YyzXw+jhJnn zIG}g^0&FT>t|ZYd_Eh&Q>e=ptn_O@=_gQFMbW5F ztT2|E{Cs?2CiK~`E6*0{;04-S@&Hsh>|NRLWmkKqfe@4%?nv1_GdM+ry4Xc294?!K z2}t`>{N9| zBuX55F!_Z$ohxqqB-uv1-z8Z<4gO2sO1O^)C5O2a=3WZHci$|>S6^;%UvkqBXwz?k=KE$HwYJLM3v zgm+Uz&yI4iy~~<$52#qcw{AEiMR-}j@1jeZAUgOB%&aRLAD6tw4x{^wYVL+cJ zGL!q*?PXU~$8p z*aO@6)hxj%Dg>pV19@r^W4U^^bClyPhuZdD{ng&j9vcn|$9@4Vi8iG~aCg5ykWM1z zS0^a7>?+mW1Lf`wO&7)9*5x!aTWECJxY4^H=L#LfS6a^d;2t!JjV6DO=8u$|&HRv4 zJz2=FuW@=@GnlejS7ZAthBOf$UcRd;d*dIS%a9@#q_zaK6yNlqcI&wW4qQfV>lU)| zoL0k8ZKPF$N~ouAqpNh95GkMGkTQ0Bvyk=*r_=^Yl}I~|LzhtpH7`kc~FN| zeLhL?rR$T}&?rvduYn?gpKo|gxj9=)4M%pKNwPe=)i!{3>2VbLS)gL8DAXf2O~&Bx zfDxozNGWo1=FV-#mKhqU@iu)X_w?=W@cO%RxdQBVg8JHZU*xuQYoKzO+&a=Wakzd% zHuD7{VMNEvTDz!)Vy1y_!U06^-B1;k&jG&lxhfsN$#oioKJGRIN2MJ-M}-n+gHaZM zk5{s%8!7NCq2e*sF=LH6^EQ2&vr$L#+-34&bnW*mX40G-_4k0BEF5rVz-%3ooZ^YH zR9A=@JptjuQpOHIE_#s0O4ozr+S>2gHhtVa^yskgcx3}_TNAZ8&uIhBqOm=Pt0=Gg zU3L%`j2MZStGQ$;#89PH0p%)EL8~?x>6#kf_M(-?pLW z?!K{VBj)*18GmNqTtL=pbS|z@%4XDi(BI&v3Xcjm^uSl~3I1g2cG`^l`p)JKe7<2L zJjt@zS>7<}ySpMzI^O-P;nDXOLKU?&BRQfTK@YLtxR_@NNK1=7{bBDmG1nO{9)7YW zAdX-e3F!ZoQo5RJ#d&nx?dkC_JQxA;ctS;I4BNKnFyFzT&2FF!{ru!= z!wGLm@c^59O8uuPtM68_&l=IkcT;O~v>ult6|%cXj9$U^0?9|YM79X~mp&qU*S!O# z(b`Q zzE3$$I>|&oI52A}TSs_*8Z~{DF?RoAHj1^ zbrzXps-~ej!eQ{bB%lqn4Cp(`kV1B|>iyAZ^phZcpIHLb0He%IMlZV$nuY#z5t8*X zB5&0%^(b^j)>Yd)(=*?Oldm(4dyq$R+a5YP7XhFTkVO*e;BwJ>E+Tf(5b68Vptp2v z$ez-)WHW`M$oBRa?pJcv@PnwkcFxVrUL{{0@H?G z#2sZq`vvbkQ>$F7Jez@MnKe=TP|2}tHc|K6Sex}j z@7KocUHT4TA-{e38jx>V#qttFZ%Y{b%z=7u6LMprn6BdK%r?8o*YxTv!tDW*aw+{B zXkrGHaZ653{aFF+OQjE6pB+H72F2BOK#7L!F6J;HkdjRq!7Jas%aKzMzxa_k$&=Yt zPkd)-YVeG=c6UEW&QT?Eevr?sEd!dvukW-v9c#`F*Uc#^@?5qEB{wKZ= z!n<0DAf%AIkt~Fo1>DtDGH%XYttZee#o6U%?6aFeK8QHy)=y$V|3bp?6TpE;uZ1t3 zHNVsH3%>E|CXB$>_TdXfEgxPsRS{k;3SMqD+jd`n&@X(MT! z`e0h-A^v$7TKcgD+cGAj=xRXQ%A|_VMF1tK$|4_e zf7sWc)L#ivMOauxKF}=tJsf1X9 zm}1(Tdr4(jR-U-UFD8D>+%%@588^@%C>pd}+G47%Z-1-1jV|Sr(WmxV_5kRm?!|H6 z&Gr4f)BtFJ$a+dl^Q)y$0@jer&n4?8p_$*cb?B5E$S~xC3XU4~`zH3~)u|HQhzeR~ zPpkG7DrD-1zd8?DdG_aR6xcsG*Xq+az~A~{@BhK_^6g~~UcK%KPT!|CnoK+UZ-ClA znu;1NiS-Vcbf2#9Wobgs>Y|IOZA6{4r%R3s(2?OpzQe5h$w4vG=v06oH{=Qh2iW|s zC6762$@7wkP-h z2fXc@Idpp<;W)-4A&D7m7rCJ6WqU0zW$3Ek`T5QT z_f)r1|ISR3x=%U-^yF+l6D%Gb(ZQ+~sO{6|iGq%h<&N1woATXLMlBxYnvO9ZsEqLz z>AQ7FTi(53I9Hi&`{a}=9fsk!QLf|vqI3MyN>14yoLGj$O8K(DI>8(UqX6{z;c!w5 z18RKyx$L{aoJQ@JL3I!CU&6WEm)pBG>7ssj2<6*FM~ zj!HdhpMJPfLWhts@Y+qmth3$ojDY!pGsi`u8<<-X0-?*PQUOr!)7nkk1*Ejpd^^m?BV~Sc>rA5(Z z3W5>LdHGqs#e4MEfb~ z;B+WpA!N`imLfckLPZ|Gr_wWw$s+m2s~~$oM)>-BUV(hpxvK=o0`Xo01trXTSG1~- z0l}3q|0Ubbs)EqaIZV+#>uv>VxO+#b6$vOBaiB~hEAQ#E@TT}P+(|C|)<9#)S&8jz zGzaB@_;Fkuc5Wr2Cp_z9ra!{O=IM$Y@jZu(rzTMs$)b_>SI+4anOQKQ$5LW*3ijDB zrd-$9MRjhh1ymg|-iz7(OcO=?=Ioihf+s@12)zCA{3Gk3a_sfCNB}qEOJ}o0K08L%XYbq1THzpL0X+ zQ{bIx`smEd{54MMJJw{&f}RlEMQEs;m}<+RWwwN+BkYOW+PXSqVbtmhS^G}qgxo+W{>w$H|;<#RP>hWvreSRZ#x_TBuLJ_fg&ST1q zl7~Xoro)5lt!*x%&!|M$+s=$)PmPcAONH0dO3fViIv=dGzkm;(S~ija3LMj(5EO(2 z>Db6$K;V&hD7}UCeg2{R+!jf&=3?d0Y5l%^L3`zx>7grhhgiSrRwuZx9}PF{9Qk7J z$|knomj&8yfCS6fS8Kb;ecg4gfcT+{A~d z5}ak({QAcjs9Th2+mvbf?9%2wR|rxs{0`SB3Q8sD^|1LeWD_VXlE4 zQK_ZGEunI*nzMrDgWKqqdhhQmiv?F|Kl}T=?DTbt2vz0Bmq&o;Z=1!QoOXw^CnROc z$Ug_I?5;I59iL&SP~xEZTSvYZz4-W7p51P_Gxsr*LcM=_N3pd0E{%bcTr=yEu!Ij< zNa;Mtp2AbzPBoul^-=DqYMX)w7b`(W-UhX8C)K|;O6ye=OF>|Ah(;+E{2O_6h~<{h zkXI+Ch4vli;?uNP@+}k37~5ZgEXJMp8L|htX9=b;^%L%i{C>o;O9Xn-g%6OW7}Elv z<$w%}9mEvpo({R2HuX&|AtKw4NK8ASjN%nlj?K-SNVv+v5${~QDUxrZ^|*y;6V>vW zYiWbrPydq<(SxPPtZ&A$h)1DN;6K~?>wi>wITP&lAr(G^L@>5bq;_4O2Pm7TQz#&zD`YS$ zWXt)z_8YRM*%S@Tq&-Fauot=6xFcLnl`&dh%)X%vb*W;R*(WlP!EFUYo`Gy5d%#11o!Z}r@ z1n3a`Ut5dJZ+&h`_m#BQe&)6+bbMceVrsN7&hyrl+qpj=*H*hi)3CpMtA9i9qUL(h zE-=SV+KYbC_drlm6{ZR&j;{9&IE>y;0S-G`g{of- zF9y4;0*-Wu-jFikshk+rHT&u3-c=c|n?4@s`u!Zb^sH*(%PV_Ad=KeocbCIj6gWbm z1Oe*)itD{l5NtKiX&FV?EjrkZ8y(*144t^`k$!D;Vej zYMZ@#Y)xvNrMCI{H%XcpM<$nnCJES`Tw#KAv@9Gwax&_UoVby`{%|D*}H)bF?T zq4@Ia*Kt}nZ%Y__C()!ZG_xA1{+{f7J?_F3ojFW(*ShmTf&ZzuR>g^I!d>*Mw|kZ- z<<$-BF5Bl;H!V_`9Fl^B)}kKhe{qbsj7f{Jn{HoC7qn8=_t3tS@IUtx_V>no;-ClI z+t4#~dFe2$vGbhi>I5(6ps;u$hk9BDxubL0mcmJ`uhz23o`PyuY29ypCgvHR z={lX32`v}$m{nhK>_t1$#1h>fmJF@eo@<{b?eaPEVeYK8afoRF0#)PPw>F;3(3XOU zAA;Myzhg(7j8gN*RChZ=p3^|p_7{NbAzwRs|2bpgJ^)8DMdHB+j{?mkyKC=ezG7cqPpZ ztOqd9-44uf*bDd?!($(N#PM;A1Gw>$CSY^ovY%V8X?fD6lVb`pLsqaN15lfQvYN3)NNT^NZ#@ty03EUp)^}w2H2ej_ z>baKp*J<~!2TE*QgVeON++0ScBfS0W{qI&zTG=YCL_!AhC!=K{MF}kR|2pAzf3t1` z>>~rhmR4-GvljD{SEk}F)#m)!m$g%&`%WX5xA&6W+BHb1^UE5aVCpM7_as`Rd|AtI z@smGyn(R;c+5G93?iA0f+Y}7o3`{qPSNCwQ*o7HWd_-cL zni|cVjzWPfaB&46Uy|L13^tnnFtADyo;Rq(Val}54gccb~hYQsid(Uv;Kp}N`T?*<%%GSD!sc- z7yg7vjRYCZ;SnC2%uR~oN(9OKvum!X(YxQF!9k_1M#(JaPfJY200~LN*vd%V?f<|@ z@9B%a{u4Hd@4{|B(@N3e+f=-wtJb5BwD%qNlB|J`qo%)%-2DR=03E`qAT>T0_*RSb zLIda-JHH&HC$V(LlUQ!zWzUZ~%>YZFe{9{u(!+2S*1!i5t~PF((&kG8jgs9Nsl*g4 z$*ANb>xpN@mmIQ)Q9*jrKdSyfxj&>zCLES+7==<8k0}xD99;6gtvEz@=4yHeX#YEz zKhnehGbRgoW#ycwR)DjxLksBtf&HYjNC5eaW>OvQUr6J8@)y#GiGTj?a_;%*ZpXSL zVO$mya1s~}JtL`n@fTX02-CvLUqq9A|CkJ17wUc8`S9=K&qxCgorK6VAw{3iz+EOz z;G)_1(A(Vcf~NqVy7>AIRxs^)?1h`g1-x{4xxe6_K1}(Fs@#N^yIHu>W32$9tflG4 zOr*(df44W^7LVTpEJ_*R&JF!tY2JSqFFO9_lifOjl}o}T$tzcn<&W5q{WY*JCv%5_ z(1sFtPkuG-@}K>D0~baIY-k)1Lpl{^1aJq)Wp^@-Qdjyf)gwKBEk-vW0F2{c&)fAE zsNkyrfcl;+B4n#k%kSy}AD8pe^8;c+o6EFlq*u_&st#^d0^k7q)Tp38Q)$ilYn993 z3CBUmYiynBwND8iM>sQ0sc(MF_BCblbKJ~hpYuXZTX3JPWz3|8T9-B-A09q0ZLWPW z2HfKz&`EgNxC7T`zw`mx5O2C%nV;ZlDg3q2y(ut_?%%C4cICfHVtlo~7GiII)e4}$ zvJg`Fe4QBZ=aq$vYEridZRi0fkO@%A{keA&_?_{O50=aijqIAnfpsNsIRAKF_1A4` zQ-{+yfdYWI@1^U8hR6S5@4dpJ+PeKsMG%k-N=9;0Vv#|DN`VqZ6bTZ9B1sa-SwLbH zU?DjRA}BdX&Y=jBK|rF4WPwelfP$Wd?C(4KobNf^H+|FnztU$}t~J+OGmJULZ@lAd zs_63Itj)dvcYYL-T}u7}{EuYOod4)5 zjxbP{NnZl$GO+tF0~wg>;&p=ZRM2UY25+|hbXp7OQ1JO3pGn`m2C}%YGB<9vv!8D& zGQCP>9e4=t&u=Fg{wHz)2mOKlD($PEM`-^R-2E9U!{svj3K?ryyJaRI=8yD-8Z=dc z5$N-V^*-|8nu1$7Uk0MIvp$^@UF(x2w~_%pR8h$?f99p|4?3GqxImb4GQC@$9Fm9S z??g{81}Oi9cafkxvZ*NzOXBiw0N4wVf{Rs%>gBa5EO$%H+}a-}lCQDcU-lam#gd5(;(d9)3$J2pSj##4sOTVpPe zfxE_V!Dmz^e;){S%XOj_k2Py8Cq}nD7sM9Lx+K4GG<`ic8?fH1R^dA^;($xD^?%(7 zXwKXLAFKNViCrh1%*wMD`!Q24Ueb`GB$I{Vo`$n?6J&90|7lA_kS$Q}iu?U_ z`@AI&u`6R|7=otv$2JyB!jC_o^71kU8?$l+xmgKROTDX=8#})j#iU-Z%UJ#uD1g=N zyY5`Xd+`8*|Ca#o5?pqUdSDr&j^{BbQj{SWRlik;=j##DB$)ldKNHU3E#*dOKg=I; z#M*^y&|F&*QNOW>O*ji(o<1=t=9|q_5h1=ebRs3s4Kc!IWn=jVyC90Sm?h;W=(&f@ z2#iC8?4U9UoB0JrO z&}m2K;o+~8V4+}Vhs-QnZ#sSj-sG0neA;wTIG0cSz!D!*Po~x=NlhPNB);3&SzbXF z*;CKmU6S+}r{!l}ahyWk`snTgiTQdqAb-l5W+Qk6-`C&lG%*H?*&R52pfW7$1u(KP{9lPrl0No z4)l_@wQm zpf&=x{2^*yY18ap{IhYkdr`JgW#Yk7;>@Jq;_tUb?*5x!W{)?0fDxYPWQyDzz6~1* zRyXZ~%4E8J6Za4hcGc)HF*jSR4Cv%Z>&u+MoeG(u)v; zUpThWupPk;tFkW#Ur}=37raH-2ns2N1g}xhX+jpA1kcrg`Z)ZvF9?EX$z|kKEA*Un zNPMVRdOqEbQ`2|nxo_n>O0xGsBs7AO8?b#yu(>z|tCu`oshDk}vT;$W{T}|e&0dLf zmezQ6@}ZtnJ1(?FYn@!oqLG-k-xvdp{PJe!vS#uvd0$BIb@>pEd{3|}I==;Qm_qm9 zth$*`gG3QY{m+Rni{Lvo43_xb&@kmXwT(~53>S19IsC?TBLuG`dPsxt?X#S*EO0t5 zyOL!gqSA~0s7>VAytLr$l=yRsEiqkZ4Ih$2BU-r$*7`Hp3E%i`tF11Q>g4>smY`0&Rp@Yv>7G9%m}FF z;D_v0$N^L!$jVJ^TQnDqgv7;pxMuo3?-6&seqbjE5oo;WucIS%+ln~ZR1r0IWTOv5 zL_(92W5)RY8h%G~d{q%;G6>YiAJSiv*!X|g_y3*3Ji1Fs2S%Bos`)!QItCWVb@`Z= z=ULVn)HqI@knT^n86tCbsE5(E9<6BWZQfAIlYNtKCi@9Nm(B-`L*g$s)ypNqlnpyC zZzq$Wt=~ceQLned6%98WN6=-k-&peC#`X}dA)zEP+e*J`&~X>0_O<_(W@K2l^Qjsf zu|>BOiGjvR&=el|hIhIj>vM-9e2CJ9CH(R{kr^~sej8d2vv|M_6==r zCEMs{RrtoXPa=JFt2XJtOszVjDSGq3KlbMNihYeD0ayfGd~`=&Uas@mC`k>CZStZ? zhfA#8@--Qxn6iqmGH=391X!c7`5Kdlulx#IOAPnD{igBVqFoYX!k?SHVv5&y>v-U{ zuif#$MoL@^NT6@UsnjjouiawgqvMRj%R_$OhUf~T=APgB@X2suH2pb`gLBFh9&=dR zUx0JSIrmm@T+4`xo)aMjpFA;K&HHGEvG_~3wPe{NVLNMX$j2?vwi-I`JQ}j^W*RZ?{v{w_kAky#aU_H*|m+L-lkne=%i5Txk1{)?C&xD zqAJ2s5@I)->VVp=c=q<1_6z$0iIl{vc##AbIS$q@S-&wEsF5R2TjNQ18=2mnbQ>oN z6>#DYU3c*{2rQBTl4R{~_mEmTCbwb4$`9(>bHi#%uMayuM@8{n8WZ$e37|?Oun?@7 zk+O+HjI}r}b35nH>WL_I98%D!78Tcld_lU_FHk|zhDz3Vx3^C~8qZ!vR0|lK;ToIHta%4SM@DCF71a>a%*{UK3mS&8S%mee|1%O6v zIP>m(Ym$K+TF^f*8Vcw%X%VR(dZi4Bl=5ywICx>Z8yI6_@hS0hLd?0k86Eu zTeKEi*6|c?QzRd)`8W67$?)-233pe_f_6n0oXTt^IDrREW5`^URd@e;{JdK`?I5#pWz zEgT^KcGT9e?UK}h#RzfvYyC`Agc3?qy8dDi)VO8I2||{x9m|5I^Ruqqt}nQvmf0YptXHMY{ZuayD(>TzUDloN}xgt zmuvlMRq0@hReL&M+RB_ffV_|G%k2q#SbfITG!Kc(BM!~9m(!~&V2>Wemy@B!n@<3g zL@x^O*%*_$TgUMIA9(QxPlYrn>7R%LRgxAScUYtsnZypqkSYKZ7PLn~ULRT&U zaYidn<0$yPLqiW!xoJKy6<3|3V2jCd7t~;g2|itZsBdyWKdSueC3!Dq7Y^DUW9Bze zqf7427-|F&Cq%{8Sq``NhTuV?=e>?0?Vt941&R4#o|l%i=nbpditRH|(zn~VZ1)%y z!GB3->&)<^s+(FT{dD!$BmACzUrZ z#GSQe)*Dxwl6WDTOp+*A&p^1Ro-->9CN}_wZs@!q!JByDE66FNzq39b(58p%Q8kvn zEeq4Le99rN5_+xZ>x=~-Bs}}I5{;q0Kha-Zx?iK=AdxvrthZ&?UpsL>kG`z)oWLRU z1ef|YEG6%97KHqj0y`mJMyGpiyy9jL5j?^+>l?L7oJ;8kkxHh;-XxIRQgMJ5{qyC{ zbGm`-l$_48KSdM7W)`BrX$SSv;)LG)Cp{}f1A$;TU*yjBc313q%k4@bVU#1Ir}Qyldlos`t(qu;8h?1lNm;;O5}e0TY`U1n|eS9tYQ_RXpnMO z{2hC!tGh(}g!z9^F8HXy&)T0mJO}675s;Tta=nnh?;5hCJwkc#+{QX@6E7CZbTQ%# zbMOoNeTefsXXVd;Od_oFt+~y;rKtz`~N`i0WO(R?5c!Hg-*^n*JI!Qdf}d{>-CDG%PIiQB;_?f zI_(tI=xw=(w49k~aNmBhjfGvDG&igG3F#WqCuE?W{0EMWE+06%!@tgqP~(v2hG6k$ zLa14OR*=}EGa>OGkf{oD;0P?<5XQp-k}Gj=)M@Bap_$o^*c6y-CXgxqKX|XA05oJa zmmG=%HU)l`0ct>O1KCEp*2;JP14%W48~C;kxH4aX0ZoL((*ath0V~Ab!47~TXP&@P zYE}aa3hXfJ4HXh&_=fwZvCSVaK7dtOQz4@}t5%p-Y0L^4{SrU*&w2L&%qUxZ_=D?U zEr`IKxB>aljw%wB{Y#actk%W=FW~2piMgzxLQ*F!5yB|ush%=Slr-rG{oi0&=cpqr ziMBWccLKN)P3N|@w!U&H#m~%4Fy?nkJDQ77)A|@QIB&&;hjC^ZG8;(Uz;V4-wv??4 z=?j3Q|ILZ{v!02DfSz%h2#5w2@fs(&BC4_c2BRpC56dJst|$cr zoH~y;mtLqJTwiyYC;hnflMD{D{QsHF2pl6`z_Z}Cb{hA5S^1s3NtdpN*V}WG>4j=Z9$5Pfp>%J8%6ww*)jv|162;$AJsDrC_03rodj*&@h=UeKT<2+&Yx39jfm) zz3l9eZ*PaO?4_U!v|55H5e6?F;onq|o+yYR*^8#4J5sjslE7b=)-zs_n=gS;zY+Ni z`)&7hTTRCAD{!{-=0wP^6z-i2;C9Ljtv5QvAsQ`DPoGC%mi!z%H`%BK*}+NKZGr-t=&QQ&}utV z)$5%A1zr5*a6bNb4_bc4o`d15cBZ@-N;;Qt>S6qJg7Qbrqcgqx5`y)IGbn)ZSf%-G zq$l2;3ARloS!C3?C4A16T)?uvrX+-~fqj!Yp-B=c918!jWSjF0Xj85;IYwTpRUFJr zhdgsE@~iSdf2*u;O_!IZVpCInt)_S%Z8>%QmhW6&-(w=(tktXKN^ubu?NxgZiYv6*86>J^?&aEAF#WBcrqClIW44(V~uo-sTvmoT99WW z!q(6LIR_ntS4c>n^iRh8YXwajRv@Q-a*~qF2WHKDbz!J7a=rKHh4=3u=IJsoFsUfn znZ+F+D(BC~4;nxYt%lb{3_q<9siHpJizfSHg`W@k0aCC+u32%bV1ZmF^f>f$QB*Vx zf)Ew*TMo)8f=R~h<$g+G5dsCaq}RRXr@a~udYNZwnDaz4Go^%AjPr`pldiyx3$z` z-1=^O8(efMedOK3N85PJE#Kg=&|b0tdcxHg$Nyx}CJ2Ie+y0t1f^9}qIj%k{WF@qm@HLk}gGXNu$U@LlOhf8= zU{H2&0j5Y5kPbij>~9a<_>;BHBrWO)72}G=uAxoGGT*x6 z9FU%`2)Ox;O0f6C=Mjp(4YpjYyZ`g8E^J6g!GKUy4KP!U52+8JncrgrYgglqJ#B7Q zz{*1hpTw}il?cP)(`0JKn3Bbx3BT_!av#I+eKMe{GHdUF7BmXnce3QDmf~!@8WG^TH;B!|&e{NNY}}RM(E2)FpQb zIXV78E$e<gb_4+kaB2@Q6qM# zQK2M{O)E_YhfRhj_DuP!4I#gM=jXg-rEj9L>G^0cJ>#SEnE3k(o{eh%b~ccNue+vh zhuwVi!ptkGt&EL^2@8aDPQ zoPYG_1^*ks(9pU6whsG@@_2=90RM*QiY7_YvT3M_@~Lp$EBy7g(ugsEObO1l*4F+V zUebqZi7wHo`SmBF@%OhE#hw(lCtg!x1GbVRIK9-sWHIy8>(XP7yjSYU7(2CTlRXho z|2~-nxhX0woc>SnUdnYaQHRa?xv_)RiqaniM_i&Jh(!^G{Yc8(2t}x`*vDWE@IQMx zcEwGQz)q^s%n|IVr^y*N=_Z8qE4N)l!l_41_Y*&wIK)>GPUU}m89=Kdr%Rraj=@fW zrr+Q*>2w46-~Vb%ktY!PQ->i$emJ&}-G<#_RrnYC>d(TZuLOpRIf;@Z_G^q_xq%Ns z4tMN-$K+>hf=4sVDqh8=u>-dpcyu`3|NWz}CnI6@|H;euM`6!h-c(v=`~(p|j?m?` z0Kfh30RI7Z&`E!j1WdR<8*a7(4AlRqC_t|X8vb`OQ_pMgDYFwO{wK-*znY~N{1bt2 zCI6}j`0al$EnsyEG#n*Cfrpj%2~mMI#F{Go?}q3FTnlWJ^kL8c>Ae5fJ1@+G1?gBk z#Y`O6&rGZXV!Sq>2F`4G6s@!Lmkqg-t<+R(xUL?%v150I)gh-Mhu?cqqka2iI?l7v z>reiBF%|_?f}X6-gZpNuyjXt-(Imb;rtLrH>Lp#x$OWLFj@_^s&f3<}#*i;)GDsz2XxYpP(P!mkkxvL&&+|_6UqPirbX8 zleSoujqU$0s?948*&6OLL|0So0qT8L+A=DH~Eb{Pq>IKef(iwjj$N+_u7F1z7 zCwXkDsJ=Uw z3j$rxSGpj-7MpONtAZdstmX!!1KLA_|0&;tRhVQd2H7;Qsz`9vSQe#>Q zD(~Eoqp|lJrxNTK_7)D$(sW4kItE2%3T6FXTuw)5-%9qZ;1qJd__Hv5d-u41-(4AF zWqjC_k$293d@Qk;d}^?8%!|^p|GOL1Onjjzjm13u*VVm6lfRh?4`Io2*q(J7!6$vv zyEttb(3#srr&3!>NzeaRaDWvHBrZ`p@){j87I3{3y1`DJ|LdpQ%xvAu7jBpU!U!(u zLSC!;_Yft4kAX#Vd6+_sX+yh4352Lk)(@NZFk~m5F4(7 z<;1*MamPU>r0;eL?gaPm@oLGH+DNrkKS~?U@GizB4V3+zCl)gn{<@DFG}}@wV5(D+>QZ!vE4mo-xH$SFz;U=-*fl(mmyYy z2msxTQW9r0vrJ<}I&R2>BtJ3sw)1j0O)b04_*Q`nxk<#8AiLr%+A!Cq^iVj^&}`p| zL+Aq(#cfW2Sqnb@SzJ>*2`GDl4<+(K ztHrlpzPT6&-fF zxCQw9{#ssWW!v9*6=$WoJ&l8HYoa!mfB7g;1yb`fO+U&4ht; zBVBlF7?j-6hdEPxNXKYfQmnTnJJ32%(FbZalh?gowAyjHs#sXQlpRsZ=UCXiGJ3GL zmLSMxmdFWjytZLArpJ}6P z&W3Sq&Yc}Q>YD2Bb?QM@f?DKHXn?G`gPX`_`mt1a-(#b$gu^#wxKjFW2dXR?DsB3c z{hDb`O$~Z{wv@FzB2n_n{@Rj1x#0|sBPB+TUrSC-2Acw?-C0x2Fmu;=n47xNjm9@V z`Q2E$3+p;4jNFyq3oyFTu$T`O+X%s8`Y>~{X1%7B@0eBVvr88+y0BaOO%6w|U?G++ zwuih#wquo4#NiOZFW$$KeP7vvy3afZzy;S6+sOmZF7M2RhPV}PK$Ywiek$NqG!JuO z*{KR%N3SZ2-vW$}am+vl2U6=~eJWV7BS)=`?n>@pKr`AoJRuaZ<;MNPX5&Ti4L|>h zs{2=|v^mp~mua734@8b56xNO|T3QjC9s$>4mbQY}=F({B;%8$I$Rl@Z*_TgS@J9zF zf8bmlbnviWzErSw3DlbFmF?)8{Zz0>h1_I6Wsu$2@~N#o)pnBKWCb|A!5G!^Gl>f} z)+xN%;r;^^LP^(HM52sk4>F|>1m9&Kqe)SaGvF&N&QHFBJ8`q}^6I%9Lm+V=`~Bh* zAWjg!KDmF|vIwGZkvS8pc(U6QZ|?P!m}&P3nlXN^ETnLe+j61)X_`G}VPwmgzrqK&0V=XDIKot1NEIalig`)NGitg81oGLd zJA=UBcj>LMg{*-gw`U5>R;tVd{RQthtIYuxPo+dKbAGOg8;J~cpNWR4S>@u{~ z+gt{{2*^gIS-z$ByVN_(10s1GKOXd#E_mZ{lqp0B)a-x~d1K^sQOvT{=Ztirn>bE_ zkms%MvaU_oE|FNxQuAzm&1_dK?Q)`~z;-o|6CH9n^Wv2#nPx@aby&aUei2#HV$DuT zcg>^~@0(Dg&J2DUrJ4C-;($;FNduy!{@V>oR6N{}LmIfl@6Mn1JB~Ru4w|GFviPap zmOCErnr*!k{Q3?5my=P-ox{QFOaIQn&}*Cl*eW??lU-yd%g{XD%=&xIdk z2UNXB^{Gc`#P&!@fqEA0i(j$Fj|=`v;H(7Oe%il4SNNjf9oaAbRp8+gZ5$bhv)u}8 z0$%b48g|ImRJk%^R>3Fhn*iy<&hg~jk(i`l(g#(73)uK3V#@}u!54Ngb@HY^Qhrli z$RwPOf+(0sm~+&B4y^yxiShu%6U?MEp5pnV!O36J zcxP~V z2{4I+qw-h>G;pZG5!5^F(D!6=37h`k`Ydur$cr;{o0-c-H@DhT4q$E3NK%|XPGc)d zaFpZveAyfri`~G%&({%_#{wdZz!CFF>LW33H|Six{_vaUp@1BKm*2$lB)}>65%^I~ z3rd~bN&?)&wURkIq-g|uO{pW|&q?0@8^Hdh@H@zX19>jCtlPD#U6*k90b6jq^m0}e zd2Tqk_V3aH4@AIyk}kfL#r;#eggzsh6E_Kv1WT6r711mYsIdRs6n$Q+CF40bcRr@# zln3DmU+~vmkp(K20ZHtE*oN{7uKzc1d`%&-lsk7TyuivUxs~o+1}*;=Bhh~tgi72G z?Gy-EUfcjp*Tj=yKIh;jqyTsCub!$)z}@k0G@yn*xXP=r$j&-|3+|_Gh1U)I0@yW# z2V2<=mqP&bBR$q<@Qmb+Ba0B0>{pi2W2ung4{pye&U2OUk3sSPxDt@QAe2##b*vms zASmyCZ^O0#@rIZMd_v$XwUtmg=V9CfhnI^CsW zz}wIV6?sT_)(%~4yWj)eBNRB>pdF4g`Z0$J8MvV9DBl2UXd8R4>jtsXXTtlYr`Qkl z#UlG5c>a0S+LzZdCbsvB$L=%5K`jb;O0ivQMK6XlBU??bJftJ*7Kwsd9 zOFs^N1zIpfcD`ls18>E6bZ}+@a6^Lc%RYS(hv;7~0A5<|yu++0K%1z=sG&g@aM{3< zobd?wSiq%$rODNsmySICA21hn;`+;{KGYXACs;fd<)gENCiS16c~Y=#BfR)h1_W8I zV7v9AG&A8D{F7F{3H~?^8v#qXpi>cBQ7MjpQ$x{!-g&HooR0m)q=7-;u6J0`cv>^y)6?>7zQM0VIRM4p&k&`R~_i)^@q>N`fLL)*EC-b@4 zR>VEtWT!X3wmqKLUTM{L)@tDtgpeYtAF_I3WPcjheR#D`CxHS%}jK#pu_7} zTw&j*te)*f+X5FNAAw${R0Cs)bJf2>xB^h0rO4j_EddE0N7ZydUUq~r&z@66{S?Ck zw=*CGl==&QpZ35WXGcM&jBQle3eWbT)F!gbq${CRDPc^yA6gGqp-j4uxL|yMDqUE< zB9E#xw=2oz)~>0ho%t(Vx|7#ji<$5X%m%I8yj|L)$!3h| zTUS>6%IG>SHzDlHRn&>D|H>6U8rIl%s9zC~0-R^B?opFl5&Hvxg9dn6Vm32A3#h*_ zM>rmSyyYb zs%65sp^**Z%Ubm9c2h|$gl|5+f>_Tq-zinYw3*jPYfRpWs3t>}2&><|eLl(N2+JSC z)Y?$?`%eguA%4@hYIVnyI+UA|7_2BNP=_2id|Vo;1>wc4s^#muWk1A%c4chdW*&Jn$4S}-IE0B)MOm*(+A)YN0gudO zvCGi&DGUm?xq@WxThgf;`l8GIY2Zu zv*?R%g~zK7Ixg3Vg?@n-EnuV7M#&G@2-Gw)@;hna5nsOKJ)u#7Ay9R?_nsPW@&UvR zwJRMmukC39&F&r$0^CrC>}%YdvVeZ(NKoNJL3g)wf(LIENY4k%{94=(>t5%R%c{%j zf2+%LdCXyIeLhOgxZ1@H1F9|6YfaU~nz^dKJAQhj_|;AvlFRDDO8h=rx7;@|H`0kE zU%B-P+uY9FO}BHq!$ReIqbqzw>V9h9)_zaY593dAT++V$x?;DsdTc<^t%gQkiidDz z{!*Yc@*Yo+gcE$W6KHoh{d_;{nEx93=@o|0RZ)^(P@ISN zho$dpz@ARZxj7o4gja;+oha?}oD5v0<(HkGoFq#21M;^pTh^#JWf@l@(Qq7m{h(0K zXMLJ6p|c7gWi)xDo0XZ6~kFjb)8fv zc4!|@OTtjz($TBu(EJh8SB=&+s$+GHQ8|*j8c;65ji1&`z8G>6czNsny>lC4f&g4j z$l#{BEtA>^3da^S_HfOa6eLV8-8-0MK8P{AXDOjLC@jsC4cC78i1$E8x>KKAF%BVN zM@4=t5R|26zbzs^dZEm^wML#3IGy&y(-Ue-Y&DEd1_Z>2?1*MHrB7Yo0dPrlO1o)a z^dQmGxBWrZgQ`Ch^Y3n>UwgQ@uZ#KHjU5H_q_9X@9BXbxEzFI;8(}k~C#+5ZmMj~R?xREPQM&zvJQ~Bi+g*hPl5=W=yFkh)$ultIB(>l2*2-xNtEAIGwV!iKh z%tb|Af34N6)?i<5O3&y7kRaaHj9)c(q$aWEUY?nFR@vA%S{V`Z(3O=rk{hZ+gLK+6 z;rt%y2R(^wDOoakEL6@~g?Tp#vu<6GU@Y~7!csD?t=60OID!HR-p85Qjg1&&`e_R` zE8=*e?^61zeJVz+vUb--b)ofZ_5#99oz9IH-{+_3(W`tPuGIYvfg<~p)}YNUT0S~; zn6um^C$>2}H$B&QQjPNI&n{|R6(8Mx%4w}b;}ECa zMf+oo6un2r=(+BaY;0VK7L(b(vGFE6@{&Qdnw1)~^13;iIsIX0 z_&YDJ8JhP*LsXG8Way9`mn{K@<3U*O2G`3%fOg?L4suTE%oPicRQzJP`H3(by}DaD zE4HbX`D}fB`$O!PX^FN)fLg4L697d&;~pxd^e}dj3Ti(tnyhf$w65}<4D>!~b79nH z_@?D%!!TxMU$4NcpV-ml6`;BuZtav8$_?Drlu$J|bWQ< zAE1K0B`WA@2?E$*TDl15PFGac!f&&neAXvCzI_cZ3z`9J!AJzHTyU1 zulJAex`BGEBE&@ud>iARoVtACyxlecyLbUfV?615JTD`Iu32#y+w=3Z=r2@tdRs-i z&QR0AmU?rd+B-0hc=_ncWPVY_mTA$@3rsK+RV;AhQ(OsjcI$C)c7C+H$>3(&ELV3B1@t6r znIQ4{syJ+70CtiF$Mx8ylOZ){;MKcto6J@{k{|%68v-GYWnYOF1=F+##oq= zOo1wx(ePX8ME=qQSHlpzj>2M#f>oVM#<@wS?Ep9jeb~H{F-5S+phN)@l4TI?k961q6ZaxisEMUWU`480)6+qOgm~ zYZ>{*#W9kzf?0RyrMqo+?+3I%J7U#~y>=KJ(uQR{{N6nIy+O&h+al7c-jao>zn4Iq zUU17rJ+89yWskw5VDY!RW(~f+n74KyH56*YE{w=moJQ_ZHwr;v`DVX3HP5{UB)AhS z5b98PC_-!}NW1@~F0AL!&U1u%3_QEgj&_Dg|G2g>NIgDL4GSMq>2hE@zRY%P=Pok8 z;RxT4-EEGu(*b=_uZj==ht5ETkj&dAra%Hzq5w!~#L8{TVTfjZ&Dm|IXOeht(ukN{ z;yiEXZ*9*Ozgb#TCxo%i2T9Nna1!WoDk?#2*8EF&G#+R1%6d7ll zjV_C8V;;q8x$sdcwUZ$S73~Y?EKYaJ5A!&^A$@7G>3>N1IrlQU*yd;*fo%Qik(wPd z@QEWDKRUM!TKT8j5=6H*MoRyQnyfYQ>Q70*3G`@T(RN9&*zYsPIr$s`v|*fPcD+y4 zQK5*hFF0rjL<3*FZWjhsAxDojBEK- z(l}%-80oPXRl5`yRvWBqTz#*!?J@OvrwpSW=e#wY`-)4Na?ShJg1q43mZFZ&I)M{P zz57M#)UiDDTch)K|HfE-Yd{We$8WfXP#-b>N4y!?mEkKSpf~x{Uyiq943=s+$0`ITG^rBC&o*Ws_}Cpk$FRGSu=HCq<<#a z3Dx;=l$m)r zy{Q<7JuH48Gie&u3CLL0o3VN4XQHpzley&W zUMwf;>wzi&KD|u2{#uFud+NZF__dIVj|cDUZg?L)EdM@gI_Yqwj;y2ZJFNq0OUo=; zbtt$?usgjpbi`*{m~&PO(`-`R?u($GUDv)wj&!_XJB90ymBPn>(q23jR&QNQIj&wt z@=}v)pAZZOJT)T!Vl2OoXZZ89|A2sjloiBvi z;KmLb=y1)GnsQU&lb=MEEJ(8J-ImNT;4I)!#%h7Xg? z4~x&dd|EU8mNffM>hRyH`4!d&9FUVjJ4pG1hC&jhZqx6C+v@Lk<{eHFsp#)UXW!Yg zYnVk?HLpe6!Rj~zq8>8&y7|Gy$$*2c+#$OfM9IV7_OP7E`8+_Zf)FqiW+1{g-!>N8 z*lyOa2n2_AcDczr+`t3uxIs6&(;N%5L$mP|a_SJOBq$1s5o6M5$OEgEj%k&;!t(5= zzZ*YXPt47YDRD)d#8+B@lI^TlK97>5sgo-NW@q|U(~VwlW_I@MMbh|ks?@|#g!mYy zi(VA=s9uS3$NwS9bkOeeAr@-$!N&E*-8=koKp0SA=A9FBc6bmH1g@XOY|h1u>mV8{ z_$B9UrmD@ed^6o<>-sX>Vv^v8b@xkyDw*;co-D(;gl+`Q4ap^$g@z~Us^Hp$y>TFC z^nop>ry;YPeJYreT?Zk(;S{Q<54(crsryaD1bkq~Exq?4QGGPxXuZfhm`@uhgJk<5 zWQU|(Xpszs6F#BryLy4^{jql|w5JQU1!A9|@g39EH3xS-tY5q~z9&A5rTG zUA`!2*S^$khD;aK#l!}G(@kV6EQz()F%|hTGS*som+cs(oR=lmGkyqF9-H3Wweq=& zt3E5P-|z01D6m!exs@01w$$>$D*)u9{N?*+pJ_s1J&FXP%ucub>pFAUA8S@ebQ&0V zvKK`t-=ju*SZ=H~d$QOhQi$vPICxzAV5EL)GxutwY0&KS{!&4sxO^z0qjKz{5#VZm zIzYc5NFnuyUZZ;(+Kw{p5Hx%}Z=xRWM)VNoPy0IOz>C8Z&oA^aEzPV)Wdd&*GrmH-Bo=P$X;Z zY8!+c-AxX1i5RygxyGca3`@ae>^kq%Q5t&g{YERE^@Su9cqVfQ(xdt)LV+o}e%H%$ zb-jR2FlpmM{f1T6rhl2$gBlC!5P@%61$nb4aJy_+*3SZJ^JgrpA>Ze^&>rD@>@hiQ zik1FhCm+-UPYU}*DYWIXbgmIP@tPxVCyQaVSa<0IOchMxaH$`_oRNuCe!Yd;F3k67;f^%-Hj%v zeOOAkiDOzP98Nv)#Ga%FvU%67e#`M=|G+*`o0V`qb$bVP^Ka;E2f?g-hOFdvcTD$iRH z<}g)KD|Oky5=xCDjZ602d8Co7%!x1Wi%c$l9@1=${x3?*Tx`_C$F|GE6sPO*Ln2c` zZ*cq=mwY*A${=|9!B-ct!w~CF7;-|7_tKHKTjFH;B#xS9*UzWH$p~@H;_1qdfZ;*S34&5vFVT(ij$4_0#mIY)Jg4{GETM443%^Tu4KIttb1;@ey zZZSOF^5`C(J6u3Z>&bgAsLjAKO01E>y5D)Ll;yWLfAj5`Yf0YWBX;^xvc2rMZFk_u zQF@IK=7Ds|cc<2qy8J`Hu;HM}WkqCewT0rhRb+au5uQ!(FLU!zePKqt{Z+z7>Ti`U zfkOxV#H&%1HVF?x9^~xo^R6tmwRZ|~vCVzdb!3B#%5nd7@obkIPz29cw~Y-F)LwCa z$-!mqe@og!_(5EV+_&VTr$eWyk4XU1i_l z?4l^XDQGK4kvgBrH2eAVrfj*}Fdn~Z4oqWb``3Y#%ak|62~BS`S829k@>M;&g(xd8 ziV=f-l6a>o?`I99<{Ar!LW|t9txx6Gl3018G;VR^T))+5J&yjYOgA&P66@`XK8CEO z9GF`h_FJr>fnDR;=6x;|&Y%aa2iAPN3L!BZA-$jtr3WaXZDm+~xmI2;F|tHI?bl_0 z9dcb;4T2Xj1V~dwbhUVY`S8zVXa>n`UO?Yfe_3?7vb;S17y3BxpEQq)$>b zZg>%V62C#|(kWp$D&+7)??lTZJMn9m1GNj-iDvD#^O%eT`h5{#(gVF#*h?qAihT$JW8 z7f#r=OQ8AfjEUAewS0(L3z3obEdABZnTKlg+q&z;+ZbR`AnE*6dMi11H6YfccwpOH z<4B<|M$ow-w`XqQ@czo_99iUGWbJCsD zwpIZANLSGKC9n)vK*={5!3K&_|lyo(xtiBnhIN*%l zBjvwv|3=Vh5PR1Q%~mcXUFS*43f{c+HCLn@4bl;*pImeH{t9_Oab8h_=DCGS4GOl; z1&H7JYGjK;!~?nfPN;N8H6Wh%d9fy*&a1LjF5Z$sJx(R_mvQm<3p9|}pQr5&B-*~w zmQPIOKBfZGZStFa4E!`z$7xK{2) zjAyCpeY>04NctL{xfImTV|X;-NCRYv%`LxJ)IN`6Q(->?Eu932+~$A)^X5qMD;x!o z8B47N>(LgF?vpu+<2h)1<`>3}xkG(uV5`WXjUOS@O$12MY%uy?Mc(~`z^AMt z?^WYu6|%(Iz4^C4lHzuQZG-P_m<@|i2=-dB)-=J#-eyP$2KJO{D^F3`r=HPtni z2Ov(1jJ)WN2U=Z~L8AHjthX|Ju%BYgB8C5@8qyf3{Wi+;1_haP!Iiz>p0#8fbtF*P*Ce`z<7QC=nFRI4;za+V}!=i z@)-WBK+A7Xw$JNp8U`D-XiYGy#rC1u_*2~_~HX7HTZsxl{#b5Ab1z$Vq!N?<(wHWrSOLFjB5 zy?gReZeLS*H9FAsVotd^!EvVH`^#KFYz4$yFPRS_AeZz>#(D}m$Mx1vGC--eBuxgt z5xq6cvz*#P{8Wwo{W$5=qKPgd)gBUTmELl)B{b%f?3c!|Wmmt(VM7l>WJxex03YiB z4^9?TiGk?kM&h>JjPdlr-Q@jKg}Fc!q5Ou(Sv~X_Ig|ck*$({<8(D|d0}x=c9l22d zAPD_P7kG6Vf1D{NJLO)yse85$w@QD0nq&ZU8#A50eT;3lo_aCAfwr0z)d76P21xWg zy4r$|HROSR2~qGlsvtj4_lZ~u*^cYF*HF4qQ?z4T)@G)kjy^2rQ8g?i(ecIm!6p`!-pAi+3L%-zetK{U25J?`#e&it0DgB=K3 z?2@3Le*?wAx{QgoFK)A9oN%1`_xQI%ICDYKO&0J((N=cn7yOD~c8EcXJlX9w76`=H2`ZhTX2lYr;K zE*tBu?ReXnF%ix0G!}4rLJq1oVTg3nI0TKoF22~V0fVZ#_}#9V*Rojn*(rmAEhP8K%{oEsRdN((IoR=6R`JxeQq-CyzX67?VhoR-S?=#*LKYx|h<9(*l zOhz2_`0V&o@A_L2f91M+A1B~-Tk$uLDmz#YJk0_nUGz9PDxhFnbqV`k%0pe5qNG2%ycBNHf=(rCy!y zUsFF*gohO7nbm)@60!flbLM1WhysYNFCHqi`>@a(zch==ye@-u^gti^{x~`e_G%T# zQ}Rpk@yHM^XaMzGSs;7yprR%yE=d`w@%-*P_94$!H1J&=G;lZU&eTiGeZ&ik)Lzof zeoe@U@?N-gsE~a4idnUFJI_*&8eXasSCzXsU9T&o$`x^uo~Zm}c#7 z0+^TZ#4ItoC_rSQ$LElQbzGFkzN2+YPJX^>wA_oYPg$zp(}Z&gO!zMUzUCCM^}cu8 zbF_6^Kvk*w-W#H8!~Ubr;a?GA7QkaTl5ynx{w}WofpG!%8w$EgfLKC>tdYdcSQx&E zitKz6Z}3WqS50B14y)u`y7YDLVq=qfloWkPO?^R;2eRbIM;)QhW7GWm7;jz5&p#Al z#?6U_@ln&6f8)^7Y6*<4v>Ktffwn4F55dfWcLjIRAHhG?rB9pv?)G@J+cS~8@xcMj zR@WX%!&40kv&CFVzruIR-xR4OK*gc(8`SoGk(ePhYaoFJ(&luR|BJo142!aR`~4MB zL{Lf^1c#PJS_M%$1&ao0q@chco;H#|5KT&V z53YO}4&5_0TC;atH)rF4_lhm1vF?r&BwSmsp7BB1N&lIJ)->A)IhXDM!IG-Nb9?`w zfR8x}#TxXJ1P!d`*O+*3Tn5Q-#dNmyL}~B@e5ToJ?Nnopm5vwmQ9LmTXgjZ7x_hxD zWFe`Un%@khx2Yodxx=Z;0_OZ@Ra;uo31&W%GW}R;f}#eT77mW=1tS|??_Z0kqWkNA zVf)l3xou`DahXsPV?jH60M=>4N)1_Bibuca;m~_I@T5r70hX~yF*j*trBgzBhi7E0 zT1a)cP8Z(DyZ4+oDrk?_G=nK=W~8^dF^v0gl&E(6)*E&SFMai!zl>=zg27_g(1Cio z5i!Lm9xTSlBZ82C57YSkqMvh0K5I`34Uwng35m}jgmIe--c{f^evhMv=%SIM0lvrir>%B zB<=Aj^1iD{#Vxt!{>#9fwmNBa&*NYcS_qnmyV`0GuK{_Uu-Ge|&6ErEjVw`4EMgVkI3)QFtO5715=}LImY$+|TwwRt zsGhjJVonkziKm+Jir#K-LdDr^YU~X5q^q zWI|715Jfz%^8>HA?sL$sqeuvA9~0ny^DGVO?Oft8cbJ$&LOoLi^SBLurN>8}_|~Uk z61VN^EG%&QRr?*^1f93nTXb-L>H^ofMCd0D=UQH0OvTyP?W@Ij!oB=(v}aFKYTE7c zj_t+Evfj}IP8`Wf1r=IpQvAUscghyUJzy@SZ)*&bk$t0R;5Nv=vm3m`7O-D7IpgxF zH~Wt4pUMB0X4@-zri7+)CF_UsFH-90J0v51LE%BDfzs^TpZTd3UvaJbesEK_;$MIN za@Vj1y#CYhOuXm#$r<^jb$?~KS z&a$n#<07ca@cSj0RfOlWBN1iqGUQQ+bid)?N(rSmGrR85;TNPFVH(-Uo+QMfWvO_J z5|28u3;`47xQ~Ms9h9LGxhbo?osP$wiRWXiRc9W_d&4ArPJqHASk;nt?g5B1i7``! zpdCp&)6<^udj}g2mj#S9jY87d*VeA>oQeqF%Uu@jM%|9xD>N`~6}K_J-xU~CL>m^n zxou4mJF4!Zg6>J%_q>JrD4OT^i*0@Xo7O;@q1dz*i2Lx#z2&fsf!V#&hM$g;SjpU% z)-Xq0D@r`qv{fSgN8pQl(^zN4TkK>vww&ywZ;yYF7s#^)-t{l!Y5jt-fyZnLfK($3 zlPVJqU(?)NN6k6c-v>S_2)Ax6Ak;51a?`Bq-^fZmvu{t(e&F9qJPD*=7q|`B&@t|cV-DR{yS?{4qYl8njT|?}4HJ_>_M=cT-y?V3oztfe zm)*tTzKTRdB=)0YP9ERDdm33=6o1*!iXA zzlFCy+q$bD&ksHyn$teWMBiB2phI4LtWnwP=MW&#{&g4oSeKrIxlwjDb=CowDj;NaE1@g>*l>mxJkOS882#(iU~l{l z4;(?^r3%5#ZuAWbS*U*h2ekP2XbnV86N3$dSD&h*Ap*laQginU&`t!TUt!sh|F}Y6 zzXXn$c0d6BL!Esca|yk3Ju749`%f4Ft?utX1AxR{sYfcHQHT?q_{2iv{a-`d{~8{c zwdE-|0-8PT*^q#~hauk#^?%*NW6w*DU(*)$ZB-afI$I$n08SR_kWPXhqXT}6y~(nlxPN=<{s`YcqOS{0zO6KseSqO9gwA+m-j*x< z*W_@*P||2((nq<1#4T#_3!vK7p?@()l**(3_{DVR&>u^Y{@>2}5hM{3LM_3BDWfhQ zeQQwlNL79O=TUY&|62o$De4?6%Vg%{Y@GgIl;vp1kuju}XKbo-H`aHO_lTFQuZiR~ zpgPAV1^g#PBM_+g!Gr>>`{s{a8aq`S-3ua#8akH>YfFVxcRX&~m@Z`(hu@q!92wEA zrQBf$-41j7Jx>fLVe3X9A7out^Do-k6^w`I^oss3K6fgJzFL7QPZR?m2#dG`D;W?= zoeM|up#GV_0ESO(aH;rTW7duCK`>cJE`n044jz4j-sg#WpWo|B#D)ZHT9yTBdXRaH|M;n{U|66Z zSVaT#D)eF2Sa9ccLDJ!F=0y=v4dV$#WT?MsRPb&bTfK}(b>S#Qv!5vtW;;UbDKoWD z<0`)O)`6jgOi@8I0m?F(TC$}YCf@Mfue#1)n(hB-X85o9k6{a*IBOnQVK!i7A;#Kx z3DZv*Ra*7l+&z| z(6Lhuoj=;e%A%|4o%rD1DxgjuFPunxVEV8yg>mo;s2(Z ze)pp#t@ND(LAkegBxJqqLoE?6lSw%+D(4^T7EDga=4aWJh`rD_BY(N?z5C>w&|3%( ze~(ZcIw&S`nB~Ob++Rb9$-S?~ zv0d|KUJ>tLZjJiD_^dNPk;G}v3K+6LXZ;QR{i1G$T`-sWbV#@3ieP2lB|l%5Y%3gU zYbYDU1jtoDQ(2{5;|v6!fKY?pzW052 zC}vZR94cT)!)*8TFZae>FKHK)sa8<0AHy@i7UQOqmb{<~JXMaXJ(43oiPL-mD;!!q z6Ck=%5PqlKFdqt)yv9fKxAp6vCqOp|`>f!;YfX`BBfs`hDY+f3IDFs>79-D{sPi55 zM^W*`01u^va!3rzyUikWV?B4n?o5C^N~_+wdYSFmtDn^WqZoddx{zd)%MupPA*-sa zfh;OWe(~Lj4-b~|3lYXZ3BEm1p(1UVv|8$h#6gWNEGXU>0K6k`@xys;MNQ@- zrpDwOaZJ8HpTu@J*c8nMoM57e*X*>Uoe9`C-xf?2nt~FaS-YxgaayymdJbKStXh|h zPW~?xS9Y?0YT>2$@DI1~aFp8&AzACwXQ`O27(ZGbikWoVR9ACwTh|;Qrh^XBLbOn6 zqvH-Z_W7wEopjrs;&4T&j^48kc9G134(1 zRx^P|Kg=%M0M8=XL3o;U1A2*-!H|)WvCOJyB}McrmmGHx1Zw{|Tb-G{)FDD$S z4;UD5FREEE$1ann_ww1!%cJhFnIcaEAFhb|1%3Kr|C3LZJMMUi?bc^D@?yQ$1xev1 zBr4hlHT!F>K~{(R-Fbd{m6DXt>w8vwEI%Y437d(j`L4rCEVNZ|+Wb+d zYsFiXIkEkVVHZjDZ$_eJ#nNq?X2@XHn&UvcQ=nUOcs3A=_5?^ICC~RjZEl*Oq>MdA zG22VpD;)RS^$|nT?{{}vRxQ)nRK7gQLKb3Un`j*xzE#EBe3oHA2CY<72&ZZ7>q8VoJpA< z``La9@Iowx$Z8~+u;iR|+0_RH4^wqiUN=;|JvI{-l+`8m-45hB+ zpDXM=^;Z(i+x+wtNy)>`Xynr>-Cz%~v}si+T!8;^kTAluhn46kXKYbjJIg% zxEXJL?zFXSrCA-BMVO`C>?c%U_{F@q9?6MHL;HK=MQX9=9NI;ga66-+4X>g8{t}~D zFy)QGDLW;!Is9QIV-xvkJy}(oj}X(^ol)eu>{@1xcmIk{gR05Ca?2izs()33fIIZ; z5s9q={_XDOqd>;(waaBX&@Nnvx~A`Ph-TSmhk`26c9_ri{|vFvF@s=Y``BF|0__#ZOgj z+RjHwOliDSxQpJoncaGW*LnPN$hAu5-0;a97pkp1%?_)D-^6p6u?0vrrTmDOXb1^4 zX=_V~PO#T6;KwETTz98V`=m&8CpXRYwMWL>S2n98xidUTSV`^$p+DNKK70Ln#HM4f ztjViR@#qloZZQ5q-tDpVOny-(4&lT41yl+mX1k}?Q{FPPVgOl(5%whlra5UhWzJYkViL2g6PH+ ztwjYt<+BjBHee-fo=X~{9AAaNtjwoLdcCU`N7_X=z%EplV|H*PrEs?kg?%*Txw0FN7fXIo){$R5?O^L=%mFV|K+?`g~mpJ7oMTo8m|6J@SxzHrkg_(TZbQ z54*Z)UoBzEsVO{Ijj^CU=+cmpy^GV9{+XTG{ROq7rn*!{Pn@y{r+2<=>2pC64l_CG zfziuaD@kPhLyHdSU!-n4{MMyUWB+5lzj~7j(2T8X+Q;nBy5)3;K7NJQ7+K2?RwF~w zusj&q(QjvIohoOi`kb?feOKvIsC2K_fYy3G&Ku80j2Ca52ot}Nk}obJy>s_>CO=@7 zOB+NOCW#C&A&r#}ZXc+ea7-W1U5nOnS~rfU+ii2a_d+&$cJxuUYj?xi_q5?v#IWb{ zg0qNrGLg5lhP7lBp{RT167-m?^@2zsL?2Dxqj@bl%%W3ohx$|`T>*HHX1xMmy{>eq zaD#4tlOZv??qgtJr~E7pqWj2fFal40R#IE{=jF8W26Gr6gieH9E$=Tx;h!5f5d!;} zf!kTq1DV9^E`yd5zoMV%8C@3~4C5|((T$_$zp}!<=#|_k*B9GPM(r-5Xkh1Mu$EQg z;eY{%*^Q%LG2(o#klteBG4F`6c**&UI+sP)FR;^`#JaH$_`B+`na7Ggr3q@q5#*4V zH_giIByMT0sffvbP9LYulcLDtZh^n%1bpYjspJwh zNUDqVJ}dOBImqezX)8uH>hYrh3pc9b_IWPM6>=s3rA;f<7nv{5F^+tezAI=`dVYqEa;+JWr$UI_kcOT+~BzfLM%Jr%%FNCh@}zgM39LGn z*pW>;^Uob?1l(H$+(li>zyh+!&~<4 zH0Qlkh9MyjKAQ~!`liIMFZS;Y__D7E-=__%TR-wC7OhJZRZO(--XK8QRr>D5=6U#+ zW>7~e7Rs~XA!8O(j%vx+NCWFc4K^3TutQ|i@`hn%*nx!=%+Lgm)crG$cO8QD7IHoR z%(1_LF&R2xvU0;Os1>45czxmm>E+k%HK2ChT8DkUGf~qYNkZP-cajD-Eo42MixJkN z_K|SJ6(pQmK=w&+_F@XjU^MyIgq-H>Mw5`USJ@S~;7a{yJ}L=?mq6ja1bAFE)q;Rf zTsN02MCJ4Bfjba@zvHG_eKVcdPqxz+&3ZHQzKh-I(ZaEv%?9e&LGDcHMskS6$oJJ< zU}OBV&@qFSmIOL0U~OGb2~2{qP_d!+v%ivnM*GQr9^>TgOD6`4z|#? z&+JUqu8OQ*`-D1UW7rXXHB4a8 zpAa56!BAi_8=`&SizJ-Zq+k8Aq|Zg6h6D0uR=VH+GsI^T{{16e^$U`DH65gB3FVUStIXiiaV=|tgl|7Kh-$BSz4W0AY!HA z9^qCSuy(hlw!|dvjGguD6N-~!5C526=T7NorO(#43~?^Iq)ZM^?p53!rE7y|wWTXa zj^@WKy25w!^s{N%ftJDHOpV1ASJ)cv$4{mn7uavZlXgr0gNyOci@XGI2lR0$ zbHBEOtbagI?|Y7-yzqtvOktv}Tj6S$oey5jYvUi=Y2o(<)XS+-!cMhufq$0n6Ly{| z=4M95Q-d~3t=)R8cXE)lEtzP#2sP-?<@0Tr-IXx6@z=?M5(oEmlAkl(4Y{j_BVO0Q z>*jFkTGcIa$yrsFW#PLPY1#aujo^TCe8!=+>n#StzVGFHD$$RJs|_A~_ktx2sGg(( zoobu;bM}8dML|G1BuJwLP(PM!x~Gmq<^=yxI@S?U*%kEUnj@`Xm<_kVz2uzRxP1Ff z0j`5sSN-M@%M)KghEeHrvtNxSP;EAVkc@P3WsesDWQ@E&EB-nn{IeWBH3QF3*|%&s z$z7N=A(}nrz&kqll99v3?3uqi%1rSmtKZE8)qLl!(rr`)bF-=0vm;x!7m6pvyX}ia znC&m_=#7_Ksk4O|!JwpNy@f3z-2MB2Qw zz@zM0o8Qu+qc`xx)VlMWiUum8iaqj4Z2#7Xi!*EEKC&=FUl#MWRCajZ(W0zZn-49| zu7a~$Y}=?iKI*f)j=-xv<|>;)XvYg*-s=5X$#c#9@e{MaE&+wavnR?IBtgakvdxBaVP^vJF3`Rcozfy<+-TAk>7Aj4AGu5LvACXf z!dCdU6z)BHC}elj(VGkOt30nfb3|Tp-wZ}eS5@F*@8;WE>&(dqScOHBBC5gl701vg zKNg_jo6JnTjGWV4ij(P#sduQ*l+gTq!B2BdmK{7oIoDZZ|2<^4z>s|_Y3b(Nd z4B=o86jj56OGTkj4*O#vQq}d&KlqGc-|k9$K+?nP%jN{ z&}P!@neW2+^Cfn&Arh}bLoLmv3Ui5RmIp_ehWEPk!rXSJ_FXILSD=Q1(UnGFJWKiP zY@;3bhOXCB!mJw~T^at}GV@;{>rx`fm%5d3K@ifgfsm>tcC2P3A=&5Dt8ZuSLv|iu zrWYb-TsW!TmGOvQU_89I?Ty#g-j-5cL6w84Tp}usMecvSx>HWc_ussm|C+XR=n_kj z8Y9PD2!a{LO|W;U@S>!FIch#_fbnPIw|3@~7pAlRi4WqhGyAJhkuh9MHU?C4RAIra z=Rf;UDgvWR1dG5Gnm-n)H>NdJ6BKyu6`AF|o|y>P>}Xe3yo z4)3$OI7y_tuf$Rs?T2UJkC`yrH~;JFf2PJ?T=L5@JHH@=xTHVjYDDl2HwvlXVu(KT zWW`g~ISL4Qv^!%(o++3@@gtCke?OBM;B(ei=l8L|*N);;R@6!JYnZI4UpG*5fKxt{ z`YV?IuYwJJ%%5y{9x)CP zl}NP+U~e7(^`k0*^gotM|3uLMwB;AnV;(Id_o-cwg*y+-#{ZY%f`V=b+I|8^GW8{B zJJ2lh|2?K3pFTv-v0^Poc47!3y`=)UqQ?Bsfc_tq zp@6XiQtk9R@tWzMQnfnjBb-7Gtw$7|9-c)40`07bRX@-y>Y{0vzv-`Qfk12m7!bLv zRv*_V#3y-Mx;%JvlmmHmme@ENcwwkIie#ON)(WPoJ#B4<9Nx zH5Dtk{|dZK=Dn10)8f5jXVo_16+0^JLDC{p(lw_(2l8-NkPib0QLJ-6k*|etCQ8>REgQs3M~l7d#hQ;ZK}lI=iKx5*5G=G%l_Q4+GrBVg-|^V& zXKtoS<~tr*?W15BHcv)6nVi2)I9QF{tu+|t;QK$u2Q zJPnieU^aB0moWvv`iJ4`StdK zl-H2OugHBA+7@!pPbcsIRT(ee}x-TG%HA&T<|ZM%OTI8%jZ4Q=YWa; zI@wVsy!>G0vg4Qa4{~6eW`BI=&YOre6Qx~)gqUx58;`deI9nlz-fNdhnvj@BK8pp;FEpE*Q4(UqNuD6FX9@~{Ne(!#_D%pu$rfp` z@mE{8w7#)b*`|Fw@Z>a4;T$2bwITg6qTdOPChZ00Zf~n}F|Pd4*k!mkXd7u3*e(9r zs)AEt2aRq>eAaf-EF`6+h=1k+h{gQ;&dEV|5+dY@SCm5|hX5;19u75bTo9nD&-|2% zU1=t3?J8-Ja`?fYo94#Dt!tA<&@(6@N!ug5vnkH(A`(SKb zH?A+djO^uMnbO<^4Fdz0?Q2c5VD|Kga zCwjJE2d-@y)3B2BLuW@nj50bQ;~;(+jXAXNe;3C^V}rA}GN$w-eDSd=k3$XKx5lCz z(u7d0@;TM>Yu30;Tf-nD@Ft+m1%CD(C8!T4OH$XMUrmA{MKAE0=Z|NSFBkI1e4X|J z64U#uT2BkhP0V}ak5yO;#&zxrIhR{2=S*>DOB-x#PVEUhKsTeF95+u(jy1U*ptKPXT8cay#&a)2rGdE;WA8B;xA-<&d-cg;?a*zS%g>_=BdwtM{7Wgf1BoCgp5tB_fD1lGm1WKNap*l)iFcdy-5c=lTdw z`@=f3EjPaln<0B^v36T=Qm^fCk`IV+>+ z50U_=j_u3vrN@$$$Enq))fSDFE{T%%v^Po`5jX_F;r|Y3UZ07+yc?a-5d8Q$l4-wBsB%DyS_t;}~j+VH33# z+RGBR(%>Vh!1Fm?z2V0-;lhC&iqc~?57w7Q-p?*@;rpJg>(`pC<_4qe8x6flDQbw* zT#3pbt(;!LFFhfQy9QI%uM>Tc`$@uBh>+Y>LkG}c*d!AVToxX9lgb5&6_a}x^|?D$ zZWlvQb4tFgJAD^@_b$>gKad9XhMZNj?+s`;@x%3_JO*~8{0n>4J6gsY>`-)~m!E#! zV=`meI=9{n^IV4V_lYL_KsRPi#34%$6Tf8yk0T!1Voag8LO34w` zEUuocM^1$dwhPJJ{DXqjj@Uqu@@8B80}$A08E|VjZ5?9u3ZxzN?Ttjue2_7JMX}q{ zI;;B(^eJ>_;rUpbjHb$)X}XNTnUH}y&zEadMI^#LaYGO*t3X62@_ot#>;-vTbZRa~ z&bBYEySq!eyALc+9IKVrF|mFLb>Dq^$qyKSq!B_eWu~{l_YqOpe%w+%Q+*_psL8!m z1c$|RorWGDQ$uAe@}VC-(OzXMGYArw#3tG0BX`rFmBPpY&U&;)C>+6mwmpGh=qo~3?=IQ%6mU>A%HECOF(XjEF7RlQ2Fn!&O zz@>71W&-Dkl^IZCRLh_xs1ga?N8*`t&?&nBHu0USq%5n}@_`-@BSl_G&qo z^aiyJdN{BtB;O#vCnu`QW_XqbJu6riJ)e>@mq!`kuDcn7I_gc^Jmc=M?n7mfPTr`X zO{!#y*K7Ne7Npkw3T#^|B{|+D*pV4u1pnyrjEs$tx%K zN-b~p)oOn@)$d0>8fB+BIl3f;KD?<{e@x?kXa&5G3In>Wf^dM7uj?bK(r@8zzmEt$oIwr2Qdw?!aNVUNG}V@o$0MfDAR^LRhYYBuNrDK;7VoMnyBV^V zP0l(t0S9&aL(jScJ_fZ(EwnVk6}l>V8cEBuwql3hRA7*x3nR4(GHiXU8tE5b1`0Ob zY_SZ2D^4YtGj&>N`or%7g(u@{$dy*E9qeBfu+kW$$}`p;37C#7g>Jd?9BN}<92OcR z7_l8R?&U3j+2ecaMP!9csy^MU|1eOE8W0XBEJ^a{J?8Fy?MoI{e=xo1MYi@7(GU10;%NEn^dBS@~mS25jJm=f@_@FNTO=K&p zhoCvGclXTA=NANQ@>Lu{e^oi)d1*YOPzD3rrq-X(( z<=TP;`2Dy&zF7&u!?-ssCD)nN!avzZv~a-(4VsbM2aEhnb1 zVP@N_wsW|rPX!g`rteebP9F`_pCCVjrP+NR>8@v`)9AOg0o2xw6i70F$Jw+cQEB)Zn#PR|y)Bay+IRZZB zrPUstZ*X4n05xvftAPv#2}$Uy(+@A>!^V~^CiD`Plexc$^q+~m5R94_(2gG>Apb!j zhZ8E7DK2^0#8^c8`o-(k$I{n_vrO+To|s?ksX0;#kvfUArd ziel68`my3Bu)g0z$}WPG#nHUq8L6WuuorrdAGZbpX07gdWG_kcH-R%71*6>bs|jEy z3_BlU*-XYKmwVEw`PY;Hz^jEw$$Mzx10S4El$~)u_^w@GWFz(12>Q5|Wm8eeNQRcq zj~aB8T}NSyOA2{9!WcKRhH<^$zgCvp1u2X{Ock$~vyzVQcjVwi4lqK9z8mIrPLusB z^{Y7ccJ9C2KeO&!_l(2F6)sAM#(cs`+-6C^nXw>0+xlSEa80gHas(Ud=BmE!F@;~4BpRxM?f0pbet{$Ft>Y5tb za?O_J6gZHJZG1SWOFm!JDVu7w+O|zTc1<3egf4F&IA_Nz(Rwg+5~Z*jS~4OksJaRo zhp$a{jdB6*cU#Moz%@B0w|fF_f6`QYAM{uxt!r9M&S=uuP489xZ;3S*rZ`Bg{y^RW zaE6Bpixrhk73#Nj#o_M-!*Kp;Ks+!&J$y8GLYARU{^KfM6gn|h8~^zSsioW4I!LAI z#BW(zt(#Q~;PpLt@(&g_`*K}aD<8GNgPA}WF%(^K+XgMso-|jA>Cw{)0b=mlzR21S z*sy_=U9aJeq>jzpo<>=E7e_SRJ58~owiGaKCEY-o(zHUsR%z8(kAMT1%uIH^49Y-R zQrl-hzaB(WzAY2o-Qm=%v_DGF_R@0S*|b1i8aZK?RoBl3YkL)jPz*iTc?fMH6*Hvt z@W}rG40bDLV*)lD(~MU+Gppj`|K^C<8T!RxJEXyjSWzBrgWTZ&+MRQ%RWoOl@>s}5 z9A4C&REC4c=Ds#YiBC=q0mSc37N$B{0`$&x*Je%dM@vuS`y3i2O@HSOum+jSUHEDp zf6L&!=XwonYaqg_*K?8;WnUb<<=}arm~!Bo6j@bQ_rpsAFmeOAPX53c=&*TT8|;q= zwFUepY`@kp(_$mb{1n`-8Z|XEV4H6YtwBq-qgX~0zchSlH?hR6A+8~!++(@mh0YeV z$FM3^T>4~aewhT7A^sAd@PZHuPQd?;GIU8@ZxZw>JuV?*B_bCk&gE>{OEJ#K%STG~ z0S6H+pAORB`DY55ud5IQH(IpK=JI-5rmV>FqCNGt%o2+2*1%F9sG&}}C}r})=f*in zfL2=JS;3CgD_QXZ#Q59DzMbTsd8`l)XP9!EIBLnVIn#qNW3st6#$N(HUv}%L?Wi=c ze?u-1Tz;ibIG1k-DL6tGPvUOX$|oP|L;Yt9X#*!3Av$m5V~-%y4$nY7_xyEQ#)0@n zWq+~C=%`~_A8=3pZ$nigVL*lUY#y2ZF|RKIJQZ`Cp52-kpZ;XxdI7pnG`3g0Za*yq z;}t_pzNkNvv2v1Nuop){t`w#;{>Jjc-eV7`dM9cRf^f#^q_3|N*+iABhg)5%X2M)x zAtcv_FDZ>}!Bkg80%?`nj&Q}{2=WtF`h?KqvT% zOprcGYn5Ol?`mF_{Zq_n5XYQgJoI@va?y0tw{?eew`!#}F z6%4~+HQ>^|G1oMJaeuFF$yV(S;Ln{q!A%|hupRd5OU#%GC-w)3+2Z~~z&Y|iP@ppZ z=9M*)qMv>(71O#!O)>PCLL+}ADWxd>gJ9i;x1w_0bm;*zSBU6o&M&g_i$>W~^~hm0 z!HMITlc17*{R{KJEg22T_iVYDmWDKqzB(XYDD#8^Y3`pp!_MEs&@=Wc87jRbE1J~MW9uc zl(ZOn#CH3PaY6%`P=4E?V8;55ps8GzOTfSuFn8}N(;)}tGs>Q4jZ#rGIqH}{?Zkfu z{swd2RnsO=1{&tt+trqvZnGt+{W@JyS^lHM*vhbWGPZSko^Lw%;uqJV_2lAKV(;`h zs*mCHIlf^V`Zgw&ayYLecv{XYm>pUORQbx>oGpv-v>Ke3vu?HDSX^g1rKtu_eAD_! zEoWDWtzAu!A6_3-pyvIiwPPY$=S@cV^CU@vn=On|0jK3FrGKOzuflZPSa9Y85Pztd z8;Zll%{Lq_iGr!cqd5Z0yY1Cr+(A8aC-$6{bkR6e@f>Y0Ynl>fCu@$1-^~naLKjEk zIc}XUpS7dL~=x7psJ$5kY4j47abed>t=(#6W%e#7B~PllU417Rj?J9vyBX2cVb-6c zMQt^yeM4hQO92-H>K7|}XxrLR1rw?w*%CARP#}DcSk#9dq!~YHzZIU%PqaSI&#F`2 zV9enIo~0uFReFfNs#NW;*5t8L{yyFyr+J;O{tqIn{U-S7`qI|LKpgx=u;@TSpYZz@ z3RzB9uL@=(*^@$cRUX%<92qIc?VYF~wCh)Eugwt_QT}~D4lM`yIpIH#-BVMH*+ji( z_ZF8kuR@V$l#lN1+=1w?9?}#i4HecFz;3r;VwHK`s|YUVZ)>z^)0m^>3Goq<1a1t= zR_S;J6K%eVOof{LM*2zY_|vo!Bey?kFpi7ihPKPGoi6mgJP(|Z`RCpD=%*zn0W$1knOq<&DkTHHS1 zyrx7c2jX`v#q9hgamgbL$Z|ekO_qeg<2(B%Fl$-s!b5wR_nMIHF6*AI%inRdZr)9* z-tqaf{GOs!9Xg(*^iW?qktI=Pk#EZq@bxnPJK0v0rr05b%xCdV;*ua@=C2|O1$8qe z6-tBV;e0kKB;(K7u{Ool1avOdf$=9s3em*(y+7%wABQo#bn4x626lcgNHXr5 zJvlM!L77rNyvyJ$^~Tqso<^=_SVvgsN00w{vc6vU^C|^j`f7r7L>@6S7DYHOBFBsRQGWMUO2^z1_sC{8-%WXBQuuSo29w>1C z7*}C2v2mHJYH(KU-xfF^rfkIWn0LQe_!1bK(VJzhXoHSML~WsAiT2YLNE);fGN*hQ zEjn8HVOjZV)jS&@ek3kBQi|L-`!P}Qh5K)W#qXT2JJSTq1h;~(&#V<(t*utMU)3h!=mM+LNDviv+6Gj({y~IoyH}OGt|zkH)f$u( zIKp^Wq8de9?Db+P6vvo7k!Tc~MKo7m0zQXoHhLT()VxAD97^h zFy>R=+ot5x@JAGw_VOfKgqJU3wDD-Nw^rsJ7-NLE|Z9vF05t@vq00u=ZK(-EO^ zRvS}}Jvnii1e6CkKM$yU`Fg~zKM7#_8*b&^?{`QJg4UM&JYk|dx|nJM_Rp`ADcO16 zUkh#*yl(8q6ZPqEACnSMEr*7#=QMBvbIO*q8iur_zM6LCxdFAMgPu-BnJ5Q+ad;P? zibVPu3Iz&LUZbyLiBZ&+wv`GZrs|wxYLUv6DtGIoeh$gmJUr?N*e;74&7>_bJf}Kc z56yp@w=r8-BxRa_HlRG>Uub>{nWtO&Y$+{Yd(x|RKHcfS$WFHA;vXu}WPr~Q=p%z2 zdcrTK5!^Nci{)ok6S$tp4I9J`R{jwC24&R3q(5G`FThA9Ct6VW6JzC^o%AVQa*!bb zjjfgpOc^njAJd;j$^x3r05)~It1~unwutUhH1_M|x?1)+YmO;UP(bACek%ch1B?YR zxq_zVDSU8nNvhfUt~fm4^lfM83l9Wk$W7VyFZn!bJn#43dp#q3SCy*Q{hA*HJ@Lgs z?M|aSs9;(>b|4`>prig_dr-CvboyJdf>DplMRtoZB7vdl!EJwUj##BexBjRSRfnFI z;x112f{X~!RJViH(K6yI&qnX6@(AnXUSuI}9%0`X3`*5$lk#vETJ#bl2@`u|tCvO1 z9+BLhvuI#l%PV5I9!Wg^YU>LyDx?vb^#AlmRO?5>yX#WPeXB^0uJ+}MX z`g2nFMy7E@QIo;11WX%(z|5T5X91rEXnCsAb{N(f1xU+9?er}CC^$r|w@yqEq!9h_ zH>`9#9=fsTy@P${Q@*Fy1~R%**`2jK+nKKlAj6e8`RI9lzxvpiJ-reGfg{+-z1y=2 zH8>mJycM`iA-^9%0HRDe2UCu}l9338etsA?A!+ozePVgH8DMQ_v8$g4WK=>rhQbh& zH;U?aL6Wn95(IcT){rz((&o3mtGf30L*$mlB^@_}km1}#={V0uR2F`4TNpB>uud_F zv2sRzk}e|b@x=&=sVFmxa;y5*j@0#gtRZO-u|gdEFyMlXo7a8A$+i_`hTDXwaH-st zeEcf^J~RyVyDOw@<6Xkue8#TwdBnYQc>J;?SQsdVyi)Q=DKjAJQq;vBpw#=kDShI3 zREUHQko7~LJE zX5x|5FV6YaH;F?GxVUxXz`}8^>R6H#6vx-lzRg$QQrD9^l=) zd+evAk*oheOn?#aQ;K|=y&l(+w+93Ua`5;*#Sp%5;Uxo+wOsA7Np5+O>jJq|D-%pk zFPgNZ=3}Px6uGD;W6q|>bc?v0bBf~VgV+@T`nry$aV($Ri`K9n8$G`0kf3SNf(E{9 zn;T-UMq+v)FTGME>Z4xE7BtE0AR+JK1xeQ;{XEe9B0A_|amtzVeGcsJgc1Y%Dcb?o zU+m>)QYVLJFUpNa(<{};&!1dxN^j|*)ig?#lS&w7JgL%p9pAbcWl@}3_33nR{Tu0X zLQ}q^K<6@xPLa6Vm3P3l?zyyJN8kfBYikp>X(T`!vd!>}G7Mz&RWEOuDD=G}GuwZL zhzr)CSfAQ6y*%>VzcC}AAAgNEvhm>#ALudV_!wt#*F_gTw?CrDTL_Xd>?kx z%~_fL_&x8XA7ccI5cD&{+5v#_jJu!gx}PX{xXlv1amYBke3jYqeGr-_m_FW;s$k$5 zu?*fQ1l&aH(%kl=dFi1-D1PkJbT-DsVS_r$B99UWxpFn-lh*V<-@lhZ2BsSzjGom6)3Q9{P&XA}TGI4}!S601o^tyj@Tm9M=w##-_bs29`P5 z6&us`R1St;<2cv+-znzE`l~;OOvGF=cxi^^?_H9qE#RUqyKdy}Tpo##ExLZlqRQ5v zSxuX179lP^2x~uF6y^4`4k(gKrzCmI@Y7JDx=ovV$6Eg4fj1Fq@J$?{{b)DOLn|$k z;fICz3L1(=>xb#s)orEYSq|1CDkS^H$fbts%ch$!d321M?GwK9yG1DKrS@BT@DR%E zPgRqOE^*AIo^&YC#4g!M=R)+`AEW)WULJn1{JTb#$bfqG*Y@Rk>)W4|;E0*^{9SWS0Am`i+;f9PNV67+);M^N{mBR2lwz^BldTCoFcX+-D__~wVbdCsIOrgw zj=smhbEEKqdybk^yjFmZ)OlYB|17InJ?He}noBu>%g2hn6UgDCgbY35RjWqzt*%+# zA3}07sXJ0f?RZ}?;spZ}nEe``yI!>05gMec8r0ZyVMxcKJ}r#xB_`6+pI+fLYK#e( zpgLn%%d*J|t*aJRbFMR4e4EH|zJo_oan__EN;LQujg@!JKRY?MriOIY+fCc|MXQWr z7ILYIuDNE}vMoub<_Y?}tzc4hU9<9pZjX{}-8SrIPqlQIMeWD~m7UsM2YQRD?W~j(71A$cjoNY{6iep@~o;nvL^C4R$?!@0wbMM*pDw?Y7_S9!MKH^ zG(gK|b)J6zE;&oR&;2av##9rIc5sL*ry zn*0DtvM7mIc$l@9SkXd;Ul4OB}+b|^VDJd1>&!(IbqQd9wTwdyx$c1Hgl4n7j=uq|d_U z(0B^t#$F+~ew+Jh>yywj>srgYTDuJq@WzJ*hEpfL#x$Vjb7%`r)BhQpHlQD}en(?X z&1aGM{qBBLP>1!ZbyU#YSV_nWGb$U<>!?_?#q=J$AULOp{3OhG9hwIn`@!9Sk{6T zTBl<;4mHyf=Z8n1`tGuF55e|X-3xnBz^K#OOW9b26WD-cYt^>LkdXB#v`VTQ{M?Jh zJA*(=Re4DGup#;i)R91<`dhCY3BaZ~RHi>3QU!42HsJ0OF^dAY&CdbJ<+K5x+v4yr z3y}LG{D~ROp42|Z5@1JlmM*)1Gmy6US;;F=juGCv3a30Qe-)6b@VOm+dM{+(Xv=oA zR$uu?F^gteunm8^jEL<1%Ixcl%#xil>&5qW2Eq|LYk~RWH>OYq5z^o8o;|Ee^G)H% z7XH4~D}@;K_;seZw(`NoZH#79G4q4nV|OYc5-l9e(GLlaR3RDcVN{_F({;=F{`m@8 z;SsG;*ZXXhF1J#BGuNU?mDNiK^SuwlRV>H=7HD%C_sN8*NgU5UP= z*#egx9SCCBim55yjjGB(PctFOx#uEEcIWbQAJcmFR0yG@kgJ@eu3VV$80f?N|g`SJ4KnP zvF_gOwOodJdS#*Z*$fPfq9?R~vhI{?v6XpIiglO9LoZM%tQ}Re0X<0 z60xTv*Ahgl2fbBZ6M`iPS-9tNORq1hI6?&l&Q9@OX(X$~CVUp_)(qvK{Z>+8HlaI0 zpFCx)a7{;E^*3ab)}f+fb+-A{?c*7Ig6c1EeYf;pu&*~A&8Ua0?T=y=KegCOTFMJS z(xS9!H-1K&%(`*4rYoJJmlh6iciKW7S*3zK5?y@No_iS?@Br0oCbuYJ*L zLSN?AUcTFGxWygJxvesNPCdSTotxZ`#J+Xo2&wkjr6o9Hr)e+tNq-t-ytlD_B z!lb&_t+`3^oPPZI61>x#KmqygPuokYVjX~@5fQ}>`5SwkWP{^lv{(}3(qNc}dnSBG=Wbiy%qxN8 zi;&Yph~%;~BZN?!HyEZ!3}T{x_uBUkqxq42!fq4oZbI$y!qQR^++W+d9r_%YOTo{FmV3k4X@UA?6eti|B8c>2*qAp7Hi0pf(&1GdIT9`953 zle`;_EA;BueJWs;D1PFzTXq=O>PRf^H8R5@N%JC!6_^}8cPD^Zvw{~)dTfh8>1GOCn*LQFKWgh8XcwYv40D#Xz%c#`z`6RF{>5LAQX+hDOW=3m5 zvP-^iNGs(fyHeanvFZ@OZJWx`Hl{1;s@#-|Shx?eqfx@90~Q??!*D(K&*VG&J%h(4 znN15Ztmw`fqKP^bPxvvaW9q6FjSwZC_RM|J3?^9X^{h|P(MBV%ZjLVgwj}TX4 zhffvxVuAjduiiWsHYF(67k9OT3oq!oZxcoYgT6WjR;)@77(U+D`+8S%SzRc%l!=d= zEEjUD=B;v+cC^pr&X9R;^BUqLVA!e)D}&A1Bq(S8N+Tn4-@8kH<=Q!&HaWL!9?=Uwn`1d&V8L+o1aiof z={>=}Bl}fTn$Z!;HWJ14v`>?(0GR$hDnss9r9b+>G85Wpt<57uMSZ;B!hNO2i8jGd zB5~_t-+bX{u{EG`f0ap-&B2n=Kr*WGzH$;DXK=3gXaYBxz@r;63gA4LBK( z+&r}y%lTQTLxWgH)7$cMlXwqb6IZn+v85AV+P*RF0i;}!Lt_FdkgU(<_>-|Wib7I7 z#0UMduL^k#wE;KRw^)k@pzdcv ziLh~T(ok4s`PdIU4MvmOI~qsV9^%rXj;7_3xKn+9t+_s=Ky~n~=zl$ZsC;(6t2gn*QbXyWTq#+KjOvxYcbOwLmvD{_DZy7M7PMjN|t zIv}`f>Xr6Rx1y$-#iG{Qh1)iQqilcKwf1Im^&EgV_7`{jPfV_#2QUT&U(fXx^DE{Q zoPAj3kOJbt)UE)Q%6kis^JHh@MnRh9{q{_~xdCZqwyLq+6{@@6iO+btvev%%#v+rl zC}i$7m&L*IZ zs|f={-=k(5n#pyc_sBcfvptp^D`n=D`O7;Kg%)i4OQ@Ln^}nztNye|Qd5}oyU{qM= zCu?Ja)XZyj{X7`D1%WZvx_(l?yrUt~*veaZKy#43ORlS1d40g+Pv)J*iyCEa@A z8QLA;^A8Q?!++HK;&oskKn9!O1iNlTy~YOc!nj9B5Gkm* zTh}Ct0q!)@%d^c6scZOlw+JDd+y-=I6*~ggb>k!4g5b|_yj4uR{0#a4jT7$^l>eNd z-Jf0*GDo+MsBc|UIyoeuKH&!hBt}&mCM6b0A9`f+zE6e z@6lW4<*_x{1t3E$s^uIgG5VKu$v-6p%-vLf-f|^@Cfn+s+)3B6_LbH3J} ztFt&eH7~BeNl#O9aEe=b$N$!sfP@mrkW`;%uk6?kS6>(btwmUn2)dvBaG>7!q~n3bHsj7sr@5+k<9Ya=eu{bkaNo&Y0gUdwr%+Uw_D# zJ04kDewz8OBk#kXj=Vp;cUukxBRT_Fu{S<=pS(RuVy-rYXTiwD0*0 zcXDl{&-G|#pl;IeU61uptJC%A1grbHK3 zHD3DK!@%J~@&c%2c;(rQWudaI6+49mTynZI!!YJUdvY;%Yi8Nm2;>VXSgql?~H2nN#&O!Ra2Ex)GzrC3Xgt?Os&ZSNNB6^t1~;my%NM3c z-i(E8MdGCJ>iM~9qbC6T?pL(xuAw85spYSYe0Hr*!?@cTivIS5a`q!?I-g@X3YX!d zGdwFkwKySUQb}CaU@u}|R`UH<-i@~#NpQNJcwV)bEW37Jv*}c9zOg^*rxxbE6_Fh( z3`+AkmtAgAeu2s$38t0;g-!ayh!ft%$26SG*;#?TTkZ2ScDCX$fkNsIw7)kGt7@|q zbxgdrVP&X%hx6Ju14_Zuhjf9-WnOVnz!#X#z=T2Lzi99l=xzi>HatG8%dMoBF)5lk zQ9T_V7~M?%m34x)5%1m1^Uvod>--HLKw9tfKvdgH{W@n@%`?%ugoU3C4qbE;|lZ^TKOXpNpEBx<@QfeuE-Rs?{SEqd9h z5w-Kccheuzkq)r#{G`uHYuvHH7F>@kyZ#&kmCC^dsc=zMCEYFI5{1s{zgFsa;%cW9 zS&98@2=T7b9ZnpH{^<{C7K66zm=NI#{F(=GE0T{vWYQ0pRKUwIlj8q5$2^HoOK=To zq2g_KtL@(vqaz|JlTr3%iT1Dg2av1j%ir(ttUp}()bptICIM2215Bx$OXx}4=ie>Z zVSn@y**WBQ*o}Scg%Y@ju4ZjE|NLIHFZj45_?sH{JDAFBu;ltpso7vz8-b=repq>p zFj<}nbjD`JW~O^|U7MiWCpCj2%!^T#wG-{$SvYE^+NxEt5X{|+?&5SeHe`;KRTbN*Z-24{&6^5J+DNw+?1Bc0N)<+3bJE+_$ z_X6=H>DHOP3oN6=8eNMjL2-iIJO-aKYxfqy)A z$%AZkbtn(85e2Pv7J^Yi3VcD^ocD`E`~IzJA@Qw-J>)@EH?vCUgP?|aR{Dcve-ntd zlBQR|%i%JRx=9QXk%2>NP<_Z<&Yg$T_fc)tW}_2VhYeJ7CAkYB}DRcuE|}f5w~Hv7$6_rz+>!{(P9(6BpWBPq*HDly%f8bHO~M z+4#AMDic5LXdN;Ted*`0xMSZHVh&zjcQ8kPsLTRcfcJN!=%V}<>K5!WY=|3HZLq0& zculjKwb}kAlZR!S#*=N@F_ZY+lw?s*nz;-@YbBM5VE=fT8LXq+=$_KriS(5#7Y7Q` z&tLpVv0ddG4|Lw#mg+pA+%Nl#+iiOG)q2uzAzBiWv5;Vn|Id2=LVBCyJ(QPY+TaK$ zm6}vJ>O5RP7_w@kg_5FOiLpm?mHi4pdHj+}xpuwS#bl?{gKFXeCKF+u>`1v_0f&9| zW2$#H8iwu8v{k<}JVj6UE}*9mGZUVlr#21Ns?u5;(y}hL9bQzJx76mkmruH(-|xcJ zgv~TyJSjG14^)_**}EJ`>KHuMd-}p)!%}k5?5-c)Ou;!0<0%e2j4QC)J~Cfo*u3sK z&?yjt&n5_th3&5l4gYl_OjU1?%Rye@f3fxq3|8rW_Hja?uIG`c>$eahCAst%-V)9A znFve(@x50XcvVdl8>ZUy9W<~yYvfl>5}5QnZ1SG1Do^vy;rr)A6!Vk*?(;)gzJj;5 zZ@ZZmxjk%i#M&us8!y)R_&@k+{b0!pOagZ*=;E_cVzvvmQCe3T4oP#TZ$dQ<%}#p; z=!h#y<-9cM_$fo)T2lDJw&D#P@N{rU0S>;w^3a%vOnh#dwQw`hqB2?kQlbjb|sCPp@4n#%$B@?9GI?h13xN zn-_WpMVl>tt~JK;!Hq-R6I*p-C+h1FsU)+S+;KKZqNgScn3X{P=1HU^*DL(iz_pbG zp@Zxg7=go0B>!JwuPkfIz?}Wa3k#m6)XHaHe-0Ldxn87SbXX+1J(OLj>gj_H=wcj9 zoP;d&4NqGqFF5*=Er#>d@rnwse?ZUf8l$oP!SZU~i=w_KjwBW3y&jcK>)-t)w2JdF z!q#pnF@RXCgO48neHU-i7fHpgD7P3>yYI$`^qb;xh;Ud0hI-mK{l zyEczG5Y%c69g%Wrgy*=S`op%{~bM(0Q?qeZ~8gb@-Mq3r(J!9wxeM=$%(WLlyRoDPZlB%WzZT|7+B_xQc;*A)X*0sLZe=q*01>mnk zE_&zxj@!i>aRpwfw<2@-5aTuZu5gK}{~J|B_6ky-?=RHIyIg=c!(MY#&ZSR&scP6B zNI+EYa5MvmCqVwi8z63?*|+DvW72=;09JpMK+*W`ApmU!#C{F=`oFgHk8aAVV5aeZ zz%+?)VC!rV>2ZIvI{$^$lP7%zDZpH@1t6gSG;OF{LF%IS7(D(_a`}`*HL!Z>OXFYv z9s*EUpe47#&6ft_e^6LVfirn={qL#iDgwmia=5Gh8%FpaqLPULB^3Xq3V@vX6FYf! z6>QPk7czHgmt8(J0IY7W?G=0ZP$+Puj4@mP7di_7W)IOM_}>rq|KkVy0#WM1i~KBf zC8yKkQ`Jg-(ieL5#m0rg>z@gI6K|P6e7BgL@m*UTui*M&`E}~HnyL}KQX8KrXo=0& z7|wrO%Jt4N5!PmIEu13*{eCh|sBSW-sYP506IN>p91M_ykB4)&r2dg80ThK>- zjX!1cySs74&9u(V)L|Q|GqcTSR}QAyd_)}!b8ASA#rwCc;JcLs7-tx3fPe`u<4^@r zU!`cv`=La{K)VjGVI6WUh`)~ZbC>GVeP>=a81pDQlr`C0t{Z1(M|Zmi98%XjxIW98 z1evDe-kn>kwtTuQHtOqNEvYM89Q z^h5csDK@puQ>2@J#tO1>)5U>PQLm(=4gs188y|Nhb&$qL0>B8&F$!Jd;$yg7%~^D0 z4OxUU=Nr13auvXmJXyr-Etg9Lys}tsn#srESgB%Rb3K=ww$D=G3+IWj*Q!ZXD*W(a zIr4)+dL*u6HdQgnabf!I`nVWUeAxr~B0FBGt8~B3$<$oHRpc?ReG58 z;sQIHt|EghNLMGl2iI^2lDWPDuRN4Xa%^dGD|Ntjqqfei*A-`X2ozVEM#bvd;!GuI zKM}q##5<|s;{rXab3ZSpp2(bbq<^i*+Z8GcULFzYbk6!KQq`5NDLn#Nl+xf zt!i#;xRig?6#BPi%Q;MBEudQK-fB@%)>TY}#rFl5?#m|K`Ql6Z%w%40e^V|&5cAxA*>h6N02?{HSR6OD(7F{wFbnWLvRP;1B> zy6{&8so%Q3AyLTfkDI37(W5?$nkhJZiaM_!(hXQmImFIPENU6}7pxuJA5P}J{qzHX zmn#Jr=iGZV|C*Ur6;0WgvxB>9>)Zq8bK{|XOx$O0(7TTFg7pELM`!1y^>|=E)fDj$ z3y{6Nxkj`_rvI;?dJLG#QCE8TYo**{&d<;q)w-N_S+!>aZ9*&FpS;fUHcme^l?@ea zKC!l?T`Qd}JuFT705x1Z!HkB~4~gxrJo8{4=34Z1V~_SWp7Am?g1FyBHdG$6G>IG6 zV`>@*Rc)W9=&SWSbD(kiwBYusd7u%K@QkaZtn4PT!EL8q%eD`ftAUNM1TPzAJ`oi7 zYv6EjzRQh+c~=*PAQJ~>QJTHWg!_4LClmy%1`vcN#kC{=&qQP!CIqeyXGS2HUB3s zX&oI_;kzrpHIdL}IZ>r=NdtF-qA^z-9b~)%wkC~V zynMZ7Raqw08@HEMLl_ST8NItmPxPpbL0O^mUXLpPHiubCB>4-__wJZjmkyacpoG#e zDxG&BYK|W<816=3vLn>|aTrm>=+?7AnOs)WULvhH_r#-fDD951)2FKDleul5yU3%k z#oZ1k5K^X^*N)!c?1i)9*W#9LBIWyCN&%Z> zkzs#RNV|5+D}$LURcCfVT5ZQe6$`rxN37lng7|cGsiWp*X;IO20m}2 z0fz6Va0~D)LY)_27!|v-20oPyA<8;vR{PZP%u{!`WyZxlke;jUxQp6zx*6vP>lSy{ zXZKPk&S0GpD~+PoehiyQRlob@!rxy1YYcu70qmFU=S3W#a-1G)M>AiRmV%&_c|77> zmR&q)ENW$v9bie#WG#Cna9ux;!*>_*!McVJvBRo6*GcY#+mjA$a-RjB4Du}=3B}Ys zSh_mCanuBOK5&iqKVz+y$L`8^Z3TV1f-K8rq$aX-f#cO zqJL%?#t0&7EdDH&LOLHTab8R5kluV@Gw!AqE*tJVylV86t@vh40ori0wiy%R;B#8@ z!28WhTC5<56F@?G*smR`?$iq$?a*CqWT#Gj(9ANz*NnOZZISLc?VpooQK zR?eQ+%(vtlARtQn<*Cz~6zGd|@h634C+Ei{2=VM4Fb^5cSL#3MDR^#_Y z1d@1{vwzK*+8#G4x1n^g3nsCj-`&bkOenQMNVY66UO{Ge8AZ|(GHw3x$<3n znQ>0o2s5(+4ncMkWI~;Ji%@SZ!LyAozuhDpPAoSgHdpdjhhF+PFgf_l|-?7QIR7gj|sF;{nfa6a!C zKi8*Wo7lgblrsDICV4j1%#4C0QohTK36u@(*NLxVkl$!f>I%(o&|F06blaFbvOv2% zyZuxtg-*)t6jZ$22sM<0td`13u z@rT$N@0t_#AZQEL2D~gOmg%hdS6_ZN1w4s|Acx@yC|%e6Tr-!ZwQZYF|JfmJ=7~A3 z9Q`N12R*ku-TBGfQsleXc@)=^<+~6rkOgY(B=Stw@kJu`Hz8i^%W3wU8-kPGqnmH@ z?y5JrlIrN_&^p#mw2?7_R3Ke=T1MCF?uxoV$}%`WPgN1b^|iI(kG)xau6E;M>$27z zZXP>I(AN2fov?_P`-j)AtV_C|69>w7-4c(4yZyEAbXfs90q*`b0*BFcCgW3N(umW$ zFOewhHz&XFxy2!}(FJv!g)h8e*HpN66)E5fwqOML+>B@l?sVGgR~T37qP!C;G>%Rh&o7CDuF;cN@GU;rITN^-llACRo$-k3$Cdh;Fc0SO?$p>lFS9OrTg8iBhSJsBY!r?#v_b z@*9Uh#~>(8d4`i;d!*2lRH3&}no<=jyT5yS`y~zVg^eJuI#w|bWS@(fAs1_c?Nkeu@Cil}GH4fre3gx>mxav=*E8T#15Ta@)z*nt_H z{JoLEcYYhi2g}P2Eq+?Wq_Zxsf`8@(4GuQSLAn~ntgs)TG*NtTCjnJ^APd;f#?EYx9XBv?U+bHxLTlNS;R87vu(3>#2(G5n@+|ZHv!KoYLMns2uNT= zzFswvh78gO;sF>SvJAi-3L>AqJlRzsq7VvA^Pr_cAIW#ED#<2Me^lm>gVfE+>CcNQ zop>1H^b6;Cu71pI?Cly6%!!4ahS2sEYdU5aWvrso09r$PWXr5GrR|ICdt+fI20E@N zPo4AMi;Mv2+W)gN`VHUxp>35d-;F^pM=Cv{O3-vlQtXnP$=w&W6{ym7!wtoxexIZfR3@0UTKp zAPkcB#YOj$%XI(pY31X=xwb&4;XS1Fh{7IQXv0VvY|^-;xzu%ebdghEE{^#H05%rbJKc_qGy0@mwTD zMNR0ux9(@8%AXGfxP}Q}pC}RoAXMW5uu2+s}^?-2o|Z-6^AfL{r-e%|^_!Q${d!2W>ecr>vgC zeLbuEUNKfDl)RbeobbN+>4qW9Ttr{TUlZwh*f=?OLt{p4hD#1&Yml44(1{d@Z`BBz z?DDkiUVntG8h?e=aOd@y~vol6+Su9A>0hM9D~F zsS^tZmDi$Qn;$?zG& z|6Hf$5LN>yl$@Zzg-uD8wp?ecGf1x;lWX02-)8z*Q$;e%FcX&4Y&szcrP*O}C^gZR4(_Ii%KaMHnqyQ zQnCz%n@?`b3YS|Mdu=Np&y~^6&ri5f29_jT2ND>s!BxvlAUnQl`zVKyGVD>ts&2+4i z`}Xl(;{A#98RE9}v?%g{wc|Sv0x=hRcPDr@Duh#>%ChO-K)rS88EWGI&4!RWfwTn6 zS3a*1j}y0r&&3;ZI+pjSvS&Jgmn$fCVRcfF!P~qcKs2_C2@t)})Qj)`1%LZP{<3Cf z%CmJ15SM7OsT9~E<~_f??ntTCQj&PH&(e?I+xy`S@g-Wk%L(@Wve8fuC21ri`qsVu zYl&bBU5PxaP-nwW%=RX5*h(Y*@_m$fg6$otZLQ;gu3!3(L86*3hO^XC!7Acf2L+nq z4^SN?J~B`kC2SFyg~xXwKv7KYX5&H|952JUv4~s!u;pPqk0QqgAOrK^-%vJi4Hvar zUylHf-EB}*BW2OgQ4W=}%sSL-LCz;^WA{e#cZ6v2jN5(BKx>q+LFvRvJ=oh|(||jl z*uZN7#U){ZcvW!?<2vYD(+XH0%bgQ|UI#G4L00$DIxb_%kN)a))fewpF@QtguGH0S zd_F8g@tj4e>-7@LJLYxN&vFhBGn-E~-tyy#=dOw1?1<+Cqlr`ZW&u~4k==;OogfK8 zc5{81e3*4zn@*87>#0)veymkMLw?Q>7!e>v8`96yzg~fW|7`J@M zHXn{Ofi1A=b~2QTCkZ!=&dyJtm*p~QcE2Y|_3_Kc|;*?~NyPg;H>z1uq8&oBID%o5$k_g(zVA`bbJ?Q*tDQ zHqRi63WoogW~#z4OkAPV8Eg@6GC%Cibv9z%kx)^fXfsSkVy$t{T~2M|YnPTDqzRm0 z%^@ea;n=E?;Ap5$=a<}|B!?T!236lQ88I72+#Zx7j=T=e2Aha$=*thi=I;#|d|sF~ zxg#PI#{RWnf23W>5+mQGK-I$LPk|JWX&9JuYmBs8)r;BeWRL(a|BPq0y7t$h?34Jk z4tMfdlk8nxy<0pzBS~O@+BqKBBFN@!HzhhmUL#2{D5YH`*qmxHyN5@evdi(zP!%bl z={M=?8g5;@zcI3&2IE@tWqs2oFB{GK;#V4X9h|Fyc;$z&+(>!mjpmcXSuAUAM>KP% z;k7bH+woOO&Zz|Z*aXS}o!hd0WxlLxv*a(Qt!_KVxM)0bk3h<814wmkAs6M29`4&T zhEKfXgO!A7wNUwH{hc%ka%N>6RnQmHD8p@1>(X! zqerd{9zGYCO|(0x4FTNxB7)T(hoB)NSQI_8{>>nJ2@>x)!S2Z>b z$o4glHJ2VHNuPbaS1a#d(lqBd_@4_!zc!nuMN*plsQBGisI%8Vy_VrQ37>&|6_wlo&}63$p=db5k;Y#pEFCpSwU#B$?qbs`FxW@bPsA-O3wjS?ebR(<;o__>kz= z2aB1wjcBxr*0Bq2QM~O>e9o1Ic8H14=@Bg`R*UObjZshyT2f_DCz6b0@UKsyxbX97patNg7yAwTI4?;Xp-}6puqg{T_5YZ z;IIYldx?P0kGNUAg+;Ga$FG9f9m zKmFZhxr|Do!J`=12TKbrW(!^3UpqmPs?%9)DJZpK&XA?A-3+#BF8Gga8Y=L+s2@@K z5k5yk!g}}aFD_n;3S-$``R@JdGp4wRoh@i2Tm>;>fU z##EdbzRmN1wUF^jQ^5b>AltjdY!nFl!l>AFURC)BUfCiaLC~r8* z$-ig$A;`~dyn`m! zK8g1ch14B1NSZ>xl6BsqUZO;sqL zX+BuDN8{*pUgu`5c(`)BhIwtyhLVmI>vgIOPVLRk+ND|mec=dM2jbB zcCcOus|bm?ji?T7tE~o1kwpfg_{bPH+E(Kb-&DU%_t=+a)$Lxax4Q`E&!6}`M~?Ip zhUaZWI;u?6`wnf;_Zs;92mQO06E<*3EvHHmq zT&rU~qDJ6Duv(NpMQ_Kuk*GIb0Y>S+Mm|W*MbnY@5^=e|ehxqIs<0{bm~fb~V$g9& zbsD^b^sGvYf}dFDj-QM-m2>&Qp)bZVq03-+WgADRoAZx1HW4GI^SqW3ojLH~5K1CA zjEjQ)Vq!5(OOZJTY9j9*rFdNTh?1Eo0`n>63PYCG{?k_-mBleIA);uZF*FG(_Mk`U z6!(jrKYm7Y=*gbznis|qyj)P~VmV911j3(gsKcB_v@{*O!LGwv1I^Cgmt>u~>eeC* zO31K7!|0`68;^@m7n064Hx8cM8>1NJz2&ViOop^(pR_ylZoU6FqSzcE+ZVUk7l&wP z#YJk^2K0BYD~AaCZ)GR+iPBSv0fK9%p^L-s^9u#~K61vsyvgK-Og_bT%5DbWS|l zrCqh?1vQ^Qs$fkLV`N+{w|<~d>8R>)o6_QJgEXJVDoxpRrOGEXPsPi>Acz9M%Qo=g z%~PztL35kac4zkX`1p96cKYHU9}Vs)hR$TX!RKKj9Pz$y6`6`jGhnclCFZ5WV)C*` zM1KzMldqcBuy(OKM4OAuXpd!cpJlO%`E?@#Fee@hRZCysZ#+7DL+J*l9GBp($u4xQ zRu96Qc;W-B@Y`5BZkBU%F$xqkxHf^Xs>Ay_vj*Ha58RH;~u+} z75?lRnywZ^-f%|)hTb-;>KV+e$2=bVYAy1#>;&t7`etAM)Y>4;*DO6PcI9GYa$5v} zxlWxz#{~Amx|io>PiPTM#vVw$IVAF zf?R-$Qt8y{H#Kbt7|w{9yLQ5ewL{6nmu?z|>p;v$iu<-|v$!hyKz7>2DPXi!4%_RG zwP{1i*JZehi>;%ZCQjz_aSQZ;=`!Dr!#Dg((hrs4kxz%rGS`sKZ4%ut>7@hc^U8l} ze20e>LJu&Q;DP*^zK4U8Q+ykY&G|{E)ERBg0Zg)YGv1V=4aW3-eU<~gvMj<0k2;HY z_K8|9`4MsF?==Qt#x_2Tu(ns4h!-z9`JAfCc;%i_j&+Y%^#(LXkBi=2{pCMlJtx<= zQ#418)MZw|Ts8(I^S`@TkeRK*)rAy@mrFuUQhh{QNHxM_peBz&hw9op--@2nXI)o> z4__?c^n4#Gu$W;jfEa7XhYxL>YG8}qx4*hWA0+yhn0vZDXq;kJZx(8l5&N*#qlx_x zq%fm)kF2O2@j#HGQ>5r?2H#7&>t0nTmpU+F}-^@8@0k|H)`4sGhX= z%sz9NO?EZT0DHVw9?%{p#79gm*4WmNYkIN9iCH2kgR@wQmGBhw!b6pVvLe(>T=Nzk z)62_$rpsEnSG(GPC@MAd=zOuQ>AGF%kzNY2&4h?&{fAX!vr~Py?by$4W5E{QnG>y$ zC^~_4F^Bfwfv39)0ZBgqFp`9Y)2xck*2!%L$L^((WyP)xm*1y#(NV07Jk6(%HSQ&X z)>&84|#fVkuA~jEiwH=0W`?9SeV|&U=gVD zA=)e&ZTV8{S%6daP=`poRH9a!zcxi?3l}M_#OYL5ti{NSqi@zZeQVZ(ChW9^UO{yx zzyx_zPhgvWVY1KI?Q~i1wg2$V+^5uoH?amERt1#B`vM~d0|1rcyCb}59bz3kA`(dK zBqpve{;}Zp(_Ckic0inn`o{K=yZwr8E@fl9$J+?x)agX7^MdMQpOr>WiSW4Aq>$H2 zMda*LC;Thc{e#{~90MUTn_u%iX3YcKRhPcrMCvXb9@9IRDAvNDuAtQ{9@$;JgP-h9 zgtB3f+&V@s&f|;2S(J7BH|IMnm1N}akXa&evoUW*LG!Bq~}t;1t#QczRcONSD`2@Mc(wysUU0VzXGWSoC(RWq6YU zBS!H~dDhXU?1J|%N1JS!5lqj#Gi zk|LekVgfQX=axZ}FKV$lW@w}8DXwO5E=90~uO1l%uLjkj*!H%{vGEqx+Q{OAF{%52 zS=yZj4fJAe>WOWPu~5>;{gs2 z-=z_iimASJSZQvw?x#R|`c6i?SmdQzX2EF5B4r;th88b+0+h#_*QP>`Y^d71a8Nfm zUpN3c-MG{+ez%X_tX~XP9k3H9_hJi(0PJ1h8nd*`h_1Xw73zPAdRVMUs+hP%F2+%fi$UvMpm%PP&S1(b9SAZ>Ovv5pafCn1URXygN+xsh3oVe>8*HTCm&l+ z#rD?IUBqz)rSto3!?xS!O{;A8Y>rOOWwpBzJnbHNZk&ELu#@xp;^I>AlcS5DlXL~s zZ)}>Kf6jag162Pkirpx4aAd%D0?8Y4=v?uhS-2z@tVuQmMM9OoyI z4>>NHIq*i!0yC^Ah#KGalm&=W;J5--FR=0hW!5To*If5y-XzM9Tu$7-&`Ewt7)p+4XEY+sW>d_yS6v6(-^{6D^Xl~Xjl4O7B;4; zyqQAp(RQ&y=^)cR6YjX+u{d=8p_7t}He_*i^1TsLP~duoQp2X$JgPIJ{`!iK(*oT= z*zdmbj~pjWR#8*RFan}3`>rDFMy#Zz;$+Y^IHUb{|eA!s8}d|2Urp+n_0mI|`zz!Ebx^Uf{cW7WbCl zQ3tjt%}#*DA|B>#%D+S!R3&8~D4cG(AXJoqsIG~ITzS`v zE>rC6p)`H)TGN)$hzHk40WIg@33R<9(^m>+lWw{zoNg3-Pt>W;)6;a?MDp5s7c-rZf|kQm z=MYAbf+Unc>O>Hq1f*`F-k58MuK@4&_eV2Q9%)GjzZyOB6ISkGPk-EY2%4#q- zN|%AGY6R;wlgtF_NGYNZxpr?2JB{KInWJfCOz z#I^NlVbm3)_gQ;SgA!h=Zz)Rabo{8l1NZ0z^` zBmR?ng;nF0(L>nNW}=m?Fy|UPs%RSkQ=8s1i`-duexb=ZYG+$Hq#>K9G0!MW zystEyOZn3eLM_C)vvDe!#~l__&x=FKo@>ONh^WwgUXFzrd1)g;l+SUli~e*gq0eXe zqwn^ca2@oMO;C3xpn#IgJ&P68{qxrk#BF*R-;V5WPUqFML9!Vk)8rrA!kok93x5rU zKxrQJUUVjumPViAoIZms{G-iqA8lLOw+n&)FAe`B=R#Wb5if<#sqrX!OeK0!l%6$lV zRClPgAQsS(^~LApab0-}(RVdSB2>?QsLIB?>O6SHFxr`pRUc<;hUJSUJXn9~{n35s zG!5gjcShb#q-AuFX5e0ao7)ZES?ir$5mB{1Ids=cnVf_DL@!o~XVUUQOnVI-gs}8z zL#|*C{{+roWRO(1;81>17OBDDZQ-JYff@(8#&83yCNtmj;nH;rYsh>r;C34Ne7 zuF&OdrRjJ<#_)2df!$eg{lvq6g!*O-C%?wdJ@Wmo)_JCf%UP7Jk<+AB6#pVua%Sqc3pbx$5L9#b-b!tqFCl+t*A8N^!P)$Nt# zyZ$`R2x)-cw$pvbPklEEx@AlIA``E#Ga&(gi5hv`oe7-1^2}tOFZKPW7Gk?5=OVcV zcg_O!@X&sgT|c8s`;+A-U|{%iKC66m6vvPefB!`@%Sf~qi|6C=gBkS2dhb5n@JB)? z)sSer0vE&iT7!FmHpIlpsn#Cp%KP6Gx~W_VvwitPbp%KDYOG5Ycf#bmp2Ib;=Xj%a zjMB+mn&9O-+>`A%uG_ggn>{9>>xh>KaZy{WjY{)(CNJfdAXb}*y-g3beHpkrKcC!^ z@&94(Kf~eb!uN0dPA`J!o#?%n=%RNbdJje?q8s&&9!49z3}N&dy^|1SNQmBv&S+sU zVHp3NDVdGh43H~WE7Cc0P+(0k9muWNI zGcMNhR2u;<+~oVY5_X;*Br;xrN3uka1Ocjh^knL;eFduhqLnRZY3|SDjQH)$@Iz;J z*-{HJctGZ>(IC!BH9E2+Bu(5?_zlyxWZlafANs?8dj!ks|F$U5ZbffKxwnY_JRnCgRh>t`jhLnJ~_)o4wpgs z!DVDyh<#5p%eNKnc_d^wDCsov+}QynWqIydt-x``3HuOpE@rA`rK@D+VdkuFpeLFl z;BzG=Uz(7Dc$_{J&_27)$B~=wTPmk42fgpEE+=iyWhs|;y#BUbd zA9{?-CRA`l9?XzzeqWm7#pXV4ovl5+*zBQlq$(GJfSwS%n^UW|)k*RWjUl1zjcGNB zIRCcP;_4P+Pes%fjq`eu7>^hN85J!mfdjC7Mt0ZvlD)I_3 zYsn*GBCwS?y46c1#A*@vLkoT$qo^&J#h1CWQmpb_=#ez>e3bCO!*=q6bmcq#dO!HO zAE)Bf_+X#-YkCWYtDrPUoq3L*sgAvdX& z|GfKp&cad<*CVC(JD)I{z9;^S!K`Ps4OQXR^8=TQKsaR0tJb_Cf7Z{Hm!Tl)qb7m1 z=0phNZJWOZHlruEV*z*Ta9=sIO$O26mvF`V9E9bII==KXQ=h+zPauUjyuZ<;JnHz# z(l%PSB2~d!UY~m~%1F`pGhEr0L_Y4Z)zf9Hb4H7z8~X|U*PG-n$I(c^BKg+ITZe~C z%44(F{T-)2UY}S?Novf>Yk_Z!8zS$nfAAc44{Gt6DR1SvD#+qOh#K+mgoCJy-8I8! zA9-*p+>0bJ!m*kmY9uE^{}lTAuB0O_FP-7B4e6W14~Q5L;hvQJy3Hx(q_m00gwy@#beg0%$; z6C3n8EQl~YS1@rNy!>}+CP;dRZR3p_GI%%E>>`wxXG93UswAJ{6j7pV+IGc(^U*AH zb0zNjUI=2graND2+Gho>GFPNM!`o#qEIxn7i2c@`vUk-#B!S=aiBuo4P>Vu%BBQXS<#73)MUpV@<9qFu$>moQ!&YGgsao^Gw5V z9Cu#>@)tcKVi}f?Wy=Gx`DK6QfsP-+Ij<=31uHvwNJ#G5}Q) ze6wY_T+?kHba*ahMNR>Af0>%b)xxI-a|0w(^01d+_yjXtnXP|?cF8ElgnDmw;_B5A z7`z;tS|H)rr;DBC0dJ<5_zxH-)BnsOsFkEL&gdA^XF<_S$r>-@73%q&H z0+N-3cbiw5wc3X%+G+w`g~(ptKcoB7Yw4iHK+(ui z&+wv?m+^d4TCa6wGbsa{SCRla1q+2er1og>;t6h4Yfsz9q>81Li<=ObL}Q_XA;t0bmABUo3HqlJjOYqux?zkl#~oP6z67A11e0l>Uq@&V z(tX?UqyEoK_Y&tepx5dJ)Rg@y{Tc#JhbrD86W~%nz_SEB_D+T%!i*C@`>>kwiaS>^ z?-7*~fj>_ymr9t%{9ePXuBE(!&;A7_O_vt_yhkPzkmg|&<6s8pfI36HA&XpFrd(S_ zTm?>v5OqL4=BAtoX?4hs2SHB86wOJX{2fLgRg^Su5ipE%5ehu6@>lJzI49Im)yS8yAJSP}|NKSzCUd zZIB2Ib4C_iS8^%GwEk0bGTX zUY*L^xD%@&f0USRjaSkV`sO&&e7jtAcS7Zf{Rb;aLP!?X*h+)UNe@2y^!3kD+B6p( zO7B_JNDcm-qlsHu@aIyY6SnYQehP(K^pA<43*kRa6q_?3lG&U!fpMFJ-$oW_n}m36 z&-+{If2*_#@deh13#6(fA5@#o-H1<9IpDhL+SiK7-~g>-t!CHNqC>ETR6&g-erhD1 zFYZ`#U)&`L2>nwf6-<5N`zjy(Bm!rGybuMw^tv4}xkpi#p&67&3->j$sS{NY2^5b5 zo-~%>UO8dn2{;`_OXq2^>cvPAAhK`tVynYv(=zP7}2V}Tnw2|={saov;>P?n@1P5{LBuS7Vfrc`eB^G$ji0n zGeGJ-5vp&pSlC(our)W7YY8Jq_l4n7Qf-3q_&(5elfrn4^zD$IQz~-lZf6 zCFub{R;!C{p&is(@3onp_Cg6ks9h{ldpvR*L;ZmjMcZ|e@h$C1dM>Vg{9j4O z(^9~9Rx5yT@TA+7?Gdk`oF}}u*iP>l{qkvy^j+2Npsr_or)5OQGT%Y{{U;^B!DTH_ z8ksXM>B^|C)Y)gSjaCxrSdjR1%>-}4)giR<^WOg_Krd2!Pkd&%l_gfu@~S?#w_h$w z#>VqCQEos~SHSH+K$e?+8LHkNT*0N@;5#g&3N*M1gixzJ?nz-O&R}wP@YyMr@Fx?P zd=pfgusrzL>?I)P!ooI=HXkY=PV@xS1lZ1O)=;8IU$MviaGLfayWE*-Fo%?7Ss-fV|by^i#jU=I!RN#o3RAwhv#ayRQ{`JApq^zKnkSmr#e6DdUkt6 zcr1Z7dMS#K3u=vBciP4y zHpOGgImzYV>zo4e-n%~mxu4-pqg9^t-6pdc0H70i##;{32v-q<>crCZ^=0@QilOr- zqzFgP$!^jVxmo0^Jss0=WPl<=E?3p}oTHNHfA^F85nY%~dGDnJ3MPl9pbJTo)*LmZ zNB5bNADDJ7&~9@&rK5D8)Jwah!exdz-#i%GrPgX=VeO{VZ+`&DlqJc`009a$AUE3O z=!&`GV7QFUZwRj8IUKEtQk1hscDa#-@HV-RZKD`2MI2?GEoy7t)$)+xRnqJ5>9TVDx)1MeHyGW?aQX@P^=T)jaiwO{F-(G zE~}p^lM3alJYDk5&z!&rB8OYd&ctvhz{NJ*!x(G#5$E5x-nzo2hhwy#J&wa`RaAAk z=V8poW+i%W+@`#nXrkSIa(mv54yG@o4hiDO7S(9$c&pPtaA~wwg&L3D@j6t_Tj7UA zy&9Ny=dq3L+M(s^--%zHPgQG{6EKKLhW$^KN&~ROzXvr$Gf4f!sD(y^b8fBZD32CE zdhYHoAI}^G%K0Sw69!zqQE4(4OQkaiWUCjSCG>dkK%~P-hKlC^HNo87ZkQhI&8eaG z2Ii!QSB$R?1o<|}#3n*Q$2KE^qaFxh;xrEj|E!M@qDi% zft~%(mwat^d%T)Gfdi71c>I|)K&5_lf#-R>O8@xjtv<>3h%VH6e=AY@IbF5YLX!}e z8LvoNq`LiB|0sy}?)WJ2=0;iIq4ZaiF)p=My~YPKCmf6O_M-MM0J9K^Qy*z*ej8C* zTE#B!@^}>$ zc1$*zzY@wiyF>hKAMM0_2q-Sv2i2lBk=O&&b5fk{>$s&@Z#SMxj#@VlxuHvr<*1+6 zr`gy(ORh_i;3u1^7dSy|*_Hy;89^SFb|EYC?t6RT(h0Eo3`c*eg8_na)1sdom$@ zoLfy}tUMdPw}R7KQ%Axne1(7$1$A*=Pp?eYj;<_6ts8qqh)RWkdXuaRBYd~Kzea@$ z#o-&vZiMSy0u)MiMhao&%z}NU&w3^8plqo^q--M>#d=^Nf2yLX@8mjSOfV)(CurYz zO00d^D-Om&VZL~8Zg)%~6ZExaE|6s3k0u>e61elOL!HS^fUzsk&yDq3W5J&zuf5ph zWn19-6t~oibW}(NVlHEbyp`f)7k}0}Jq}=X&8!-WBFMED79N|Uc~*O_hmR(Fmdu)4 zBU8P2PcUPuX!|9&B+0_87ekXwomuJ>5D9NJ zUXLYe47;>Qd=uQcfMxnIPOM-Md=r07EOYbe)3&t%9aA>v$0IOvUfH)t@(o03OFA$e z4-yo$51sPGn3Ux;^w3kLlXsn{oZb!}$It-Z|r)4}HH6-z_XRANj-KF;> z3ne-jY1lmwqal4HVWpO} zYLY^V{ZceK<6UjsgSZkHY`jCkB(8fTqUg|}cUtA<>RM`V! zlDW=LaSS@Il6LTau0}*ky)Mop={>fP&`pClHY7K*I|{+)g*U~nxD}q;@d_kD*3FZ> zucrxBltMml5h9L)c#5tpHqf6KdMaCFd~wABa z-sIqojbLdMTvT-K-mfh=au$kQLZyz@f%a$8kIzLCdJMW12y)Kr3V$ptm8dOz@&J=v z!`5gCF!V{O&_j0mgz0`|gi*(&CdLTRfncF0#Cutn?n%1yWYm)bC+Tgn#C9mhLprBm zWvbh6t~t}=F1s%+vSJ0fo~0-@lmw}4(_oB`%>V(HV`BHyqjgS`7m0NL*?-0XcUwFc zMSWZT13^Wi3`H8+JX*;U zzztp2CtH*BnZO)&qHv~`}H{gLa=gZE8l9kv7!Z9L<9T#~Qn&kAhLzM7I( zKZ?zTRJ5ZNZl@LHs(*hbr=A1}?mWkfGYGe822IM_J9_U1rs_Q!I2^rN#dgAS#VDk1 z3y|F!K#1%n9{qB2vnO6H^+Lw}3tmwIb~M#@N%aK$i37>4vlUU8pcd-mZEN-`+H(t< zL$rV4YK#G08|n|Gci zdU(J^rf3H-ao3gDm%qr`JEaSfo`hDl?q=!&A=$e@L(x}e^^yaw_}nok20H<47Y$Tk zG24TXeYd`I?1sn5XrI^~e%ht69Ub*f*CmLrL;FPT@!;Y1Jg2;*BEH`V%Wpw*PME7vQuB=-m0d1o7xjqsZyf za$>q=X>M{y>-spy@wWN!X(RSkRt9}<0y3`sf+d~UuVG3AEid(1r=vJ7I1;48#lM0J z^*;)04BUmw=%t{>XlN3jtc>*@smiGIGp9pzex`yleX3*x%^W! zhibrt!Wyl4|9BbaAHjg5IJ1O0bgTwfhVEMkIjul%a67Y&F_Y6R7ERS3o;l}KYBZ$j zY~KiWJyS2J?b;0bIO6P{+-Tjc9LQd)-W#i}jekZ_viLOQ^SL%qext7s_;cw?=>VdT zB48^)r9_Po!v;is$KRHa<_Ds?(xc~V|vZOBuxBM2$4~JY7!5c>>6H4 zPtm*k_9$k|El*p~Is+qA`Pey@(kMm!a_)oYjNW}U9@fUHaGP%Mv$LO+iVm!JRC=cF z^BraDGs{k0s=&US8rYXR#YA{zMflO}b&eXicOQmmA4sPu-zFpY+=)KbGlLNuFKG+p z3WeA_RV%u<^FWaNO9`evczZvDlMcSp#2@+{L*x#MOvqqUjwBt62vYI@T^D}7t~>$v z1sfkpPM&}(qHCHzeV4SPYr1s=wTF?0Li}H`qUnm=Rv6&$<(gT|K-t&g?MXdNQ zQ0o#1?3#d24I<(i+O1&m&d^GJ$Q={A$(t(+hORuU-@J4%UR}p)FbuF@i`r zR(Y=&Po(Ept7>+jYR3hwZTJDj<)4%{On*7yw3#*p)gc_w?RJp191~NZ_M5aIr651A zxp-Kg-`CfqBYC+(qB+h9tV;Y!s>L|Z{JIS5?&-HN;;x@dlNkaje%@oJ9cLl1ysa&$ z%RZ&to15;mqLWr<3*J15spjJ;LzF{5SBO<@s7v2PpjOSkfAvAnEx-tG@~SGauADeL z%HnmVip6gUVs+H?^dXr|p^I6PVW>Q6#kTiNa$jqt#N=W#Ni=e0iQxf>{#%9Nzv7oMPtg60gJSk&9DLZAPy33-vEWLGQbQ zGOMjdnv5W5+wonha?w#2_u6MyY8Np{5lfyP{>H6$r@+K;If10iv-QX_OLkql$2CEb zHumzRB#kwNQ;nS=WLujl3iv@1N6-6Lqy}J^K2f#%J3 zvE$YgJM{#05pr3{Q5L1d95P(p$88-B8XFs3al#*y9Yw8k8r#F(CODlwV4|5=Ic$Fo zaoe>iL4UEq^VA10Y6b%RZIcp)R>UB;yrZ~>5JD|L55lXbr_wn7cp?)hq!Xv#%a=}G z2iv&4h+R7A4zaE6wr_Kz1)2k#1;BVCs8)qWSs9%4+y3# z%*zW-m%+KQTshp&;^J0U0<2?>edAG@Jm!vcM;kc8OuV>>kX|7p;pO`XawcaI#F69f z(Mj;c=nya7I`WG6PE;F{RKjbBJ~I9?k5lS3l)sP|M6!@8;$(=%S?@j zJc=?+a_1CE-+EjX)b><~TAY}XT+@lgZ8}H}`+ks~BK@U6llf~mH z0RB_I_lwC2$%VSexsvx?ALZIceH|E%+qgl6liF%c8r0R>oN`Cr(&%q=D;0Vw2)QR) zy+ysqG1AS_peTpIH)Z68>C1aLY_h$_)_R-oHTDG+xgtGJTc0HdZpl|2NVdai*K32k z{S<327Rz_>j9}AHcpFj#Eqe#I&JdvK5j!EeU*3vW@ z86!%zL{V1SIlNOzTy(Vwn>fEfXFbLFyR@*i19#kojj^F;bCd{C5%I2ve>cw=cYuYz zm)QoJCXwA+vZAn)C8Ja3d?+6bNj-f3Na7O!r%QuS3b3t` zu=LD4-Xa9C+$E_heJEEn8Z^aZnP$dxd305;aaZj=_ONOllVXPFL}!koVW}H~vt0W^ zh!=OEknVuJ8kZ2jaYVO=^i))^6%RMH=8e|K{(3`@J!y%E?3??7b<3Oa8LB_pL*^~F z-k|GKj#f@+Ps&$E=RWQR-T+pIbjb153hr)gO(#*LQ3>JMGomcW^uG1sI$Lv~^dujvlX&_E{JyHSWpN_PCBR@rIdSn& z`)Jtjiv@85(QwBhYJ?e%cC-HCkYlRjNgKIQ5o&)kV_%D}_Z21T-Zj2AQCM^i+bXKy z5vrzrVmb#*7pS$wf2u?pfHIKHM1i7A{#su+;@#BdJ;M=UC2*E8L^SV=`F>o5WDS_0 zMU8cS(iBa5l@gQ7A|n7$U6^gmzAZ(mJMUC~F*NF11Z8Z+;}q}{IrC7NlNMWR*J+-N zD#C&-0rN)3jjyE$yTkY)!ce1UE_jG=q-gCOcLucBIb@oXqOtqqsD_9|>g;e)9_~u8 zSTWzWdDB0!BYlSUGuX3>{9kO&7Kz! zUUHk*K9#qZF-5eRW%;wgU4(g7($ZBH^G*3V^x|S#t$fEgVF=SOY``-BB>*UqRz)>E zD-=Z1$clG669!g4Uvvs5FjfD|mHy^=FxPwnQ^bvo?)M80UfwZR!f^&E>J9 z>%TN>({X;Y=~fKm$W>xc5Io`a>>A!7h?hQ!rVa&{znuWntuC;3G&=$ihF2igg*S$y z{MeB~L_NCS)*K{wNrQJ;DZF{09y$VTpJH*cH)6AA?WL7hb?1Jrk^@=-JZAE!RLHJb z6pu*2Rw$Qh|7g~vMqkN|go3)if)x*Dtu;JaLsfw&1~4p%DigsjwKjef)QZ}V)}z2e zTss7eY`U!!1?0*1-ZtPu-5ciTORE6GHE*@FZhp$q<}US+c~U@rXZ35ZPm64R3lBn78S0{c# z+|9E-L#8iGqtPN*34gk-of09!w7}?SWxDG1sVXX)A32P$27V z874^U;_wo85u?L$Q#k3Q^2iFCne^~0@MD|Z*_zJDPMkc((Da{ab+1gba0!H@$AkR~m%(42jy8Ss5HXm?R2{bBp*ZFHQol|{`W+Y0T&@6 z31Sg)=@m{zv-!5i`yGHXnUBde%nO*=vJDLJ1%PrR79p#w0X(;cL%&7c;asbY_HS7I zLVC6{KDzCETKa;nXFrrb`fGO0g4ug%p?30kGmFK($E=o#%m8hO0YIoLniSfKUzuRe zQj0w4GZdLQ^7#9#zSG-oAKreIH_&Gn$b(=Q?W(#VU9YI=2)K_BO~;&i;R9d-o|W43 zE++KxI~U~FAB7!p`m=$%dccTv32JZsMU8w=0**^STgh3ky*QJEX&%tA4TQ&)_QsBmcspNX~%>eV*)}sP)|pN01#@ zHoHZJiuRg>iW0^}zP?@A_cn$&v(PLhgc%mwL?ofe&f&PMnD9gs1t0ARgospHOyCiF z@=yEGiUQad=XU^J$K9Iq;R^tVoCxu}J!OE6Fjl#6=a5JKH|0koKgy1!~L zyQ&C>V>|-*94D?_oa_`4S*&^K-$SBEQp3U}mXD8iR|w+?-%%>jFx#5l6E~j{uUyXI zrcNGh&QK)uQG8@K7M>ug0um{JTb7&m;94z{vWQT7@*Sv zxE7PmEM$BVM4g)gzwt)`f`=>)8u#3#>jg8nhpraZxHe8zmB^m<^R`YPp-XPT$AAOK zdFr9*3Qp{1#$v!c2p&npS@7M^ZTjg}39)0Tq7kv9g9m@*ze}t=D?)lB0pewwCOPa= zpEO#!gaV6o*6aTrPel!}Q{k}1S#wC&L@O@3{Br!~ZhrD3!S|}T&F5YSCKyy%>2hTl zGr1zaQMeWMHIfWt7T5JU7T=}|>yi>m7`pp$!)#ym#JFv;$;MO-r5pdOfc z2=JUP)wWVdST*jFo0nL1z?l{0{5f#MZvw!14>G?$LCI!?>k=0f5C`R_@|D013e#S@ zIO}hrxyFAkX)w`dYDlVq)+$)UEQM9cH(z&O6|V7L`yRtaBHh0~-O3-4ky+7b=-(MV z92~KCnY|FC?aRbXPJ0c&dpb8Or0=Kz=|pwP4_|zc1E%O>t2*GJ{tXZQ1SAdt$RXVt zkggbTm=j~R37+n1vk!3XQ-DK<`^SQPHD3C#raShnz`sD)-&B-#h1nzrSZNKyT~%AX zcocEr(>Ejpeg`{m$lp{sd@?heM>i&%C#mYFZ%Tf#`7*lZIrQ91vaL;lR! zL$?6=0}llz7h8Vq{bhq7DJ-}I^R-D=!IE_D*Y_$@tJFQd?)+~RrTJry#6RRslIFs5 zo6x1hYt0P&+?!+XRHaYRiPnW%KaL=8m*;||1au3I>0=$Pz*N<+QTa^VB0O+2{m$y_ zobdD1JusyNGq8~<|6ehYe$R1cud(Mp{wWWM$=d&7HSaE3uXxw4V1)4>;Ra)<*1}z@ zodO;*pRjoEtbb^TUeI1-D%tv2>%n328t&CYU0^!YN5LCP;2&=MZ~tKcbHmPO4Ux;2 zcB-xFP5-E;cxH8v!O2Lf$O~}yP{3(uIe*Tb+!vBtX|)`c50nxjp-Z<8ZY@K@!5KFb zks~$LRY5CVi;k`b!N>ElZf3LH-Y&5eoxxhVx6JBWCBj}FWx+jL{&@$ac2OwX z_dow1)-8ZW0iN=jb4?Er6L15em%3BF>N@YPw|puO_}05TjV~7y-Qx%5w}LWG0Q{Ex zBjDA{+yszRwty8;^!X_@`G4=*`v6=A>JD+z4XgP*DTGTSc~->@=(N=uNxbKTCkH$5 zbpafN6&pQ86#NfrY!O&tw^YGXb#Rc!rYy|1Q3!z?+=8YiJR#6BIK&of$GQKYKY-Q57d+VeWc8HiSgd z&HLk~8FalUCkm@HGcdeyXq>8RIA%4FrYQn^K*5`}HP<@@q?|3-9@Bsa&C3h5ijPGa zpD7y+%HdD--cZ|O^e>)ov!;KH;;teL{2wRkbZcdm+m)rr$bJxe#gQejbeCBqr$1gW zZd?c{6)|%KyG+br>Dum_04SGqGND#?)JR%23AaN5{YKLq+kt}Te58lg1C{=%#%X-7 z5Qu9;QJi4gbL@Mg>bD_<+Aq4J#rf*RUk6E_4cM5zx$d00DQybeTkO<6SnU}q~kU=>67F0)Aw3@tv zBy<>>bh)&Dsd=Gk5v>v1#%EY+ocY4(y732mULSmds0itqln{;;@9_Dx!?aHXv+!^O^ z9ieyRXTlDYk56;s)ef3Z)!Un2p; zt>)-uKrOH5IX2;{#zs#(34-f?&Q$S7_3-h>-86&WkfXyUTCTHy9dEim5Yvjf^*CAOmweVqOpp=x8!l=>Tu{cL47E0#o zh9%qNxZKm%ihn-LH~vzuv(%XpmB%u!_+R`V8i+|d0)&AFV5arjZwSAIEs&%MQjCI` zf)3!`wkm5`x1uw>#W&} z0pP3Z5aejQ`2*g|2U?|fkYVaPNZTDOitg^`9o|$_aDcQKC8Fh@(c+xjemEb*rlz*vtiV-x-}fIzm7#s7KGRQyy`z1bDx_dCCbG*12zEG-EL4}xlSz=6 z9cT)!$0y{u4U4fE^)IzWWQ)v6<;Ld+Eci&Bk&Q49@@BjZV7$A(Uit@G$cH3BiY$g7 zLXUxRis$hd-_2aDCijUy165hc`m}DB@=`wDlj=LQ+&7;rh_9KPhgYO)g4@sw6F{Tp zOg#e`tSRvD8$lgKGNKbx= z`J6AMVZ5P~yZ>1Y->^`OUuC!lYaSiu+rzh8aJAgJ)BCLVttu5m9zJYL{_LlWatuzx zX~b&xt1xbJY9m;J=X-!l@^U9oOMLGGCv-TXa${JQWxDLhvwWd}bE=`DNJQ?;*xS^5 zaj(7kc=xGSFwo@2x%+b2uV*C7iFQeEW32Utw(Xzb^nhY*9Ns|^<5fLve{4Pt;SA+Z z&>9bf7+>tVx%+J#YmxRfp8|atBPUTW2o^G4)LuQ%m(WIZ26OIm^!{KLWn*sm*puJR z^GW0dImxO6`kzOBid|gopeLTBR1XwyJ4UYosBk`on5gLdT#QZakK5?}Vsplgbe~RD z?`KWB%kd?sFUS??nMBS!HhVyFX1Jbn~>;MOgauW zjP4p9e%h!w@_C=m;{BCWtR_O`rI^%ZD%;l+{hnTH7L%MGje_ZJU2V*@W*t+syWe~S z87X=f0@MvwCU%ziyd*R8M)m@4+T!)LWg8H#Dn~f)XO&7jzlZYHJowC(>4Uf%Xx4f9 zo%FFoO+>`Yw8N76w(gMGw#gBT9%F^XQE)}nuT$(S<|M8uw5b5$dJkOVdwuX``1+n> zMjL&${1O@b@%3dTw_!-g2*Iv`0MAlT3en%N4gw=_M5)!=m}!S>uOY+C5z_lF_0*$X zE}-6`fM2^kL@=d}R=&^1xCk-KHNr^_Pkxi}TJmG_eEwHdz98eH@Q6eRw6grje>kMb z6=%x`)}8xJf2KQE!W0=Gg_PLj^B2pRR3aZf-`ma&#_sIDGslVfc{a73ZZ1$Bcj};& zXPv)jkP{eE7$0w;Y;|7{`DxetLvsxoCcxcdc60$}(_Suw_pwrfin74ThyS>c8Tjv* zlOd1o^t=oM6V;+LotgEeG}sTCT^}|UbtSvoFZ^n{ivMbtJ079h<&qp}3h1^XrjnS) z^m1cf>qViU-|CA6=mf>f@}c~0FXe>R)eKgcDTV1AGfG}ncy8O0QEVYDM&K@DTl+O7 z5g?#f*%{}zh6>4*N$GDT=k^~h_~rETrFdTMzjVUwG92?VBj#65+NSPn*Y5Ot->DB> z-xglOe@~2YIWH$*BZ1v+M-DVp$q-qv{m#GsFv2jtEkOr2(PyC84p3g-`SY>_NUX!` z07zWTQ?QtTipPSVq>B#qDvr zAj@i8L(j+4-&&a@-EvjDz_vu6MS2#{6ySaB#yf2guB|Cy^E$I>_^uxPI_32f-q^s; z=}Age8gLS20j)3&3_xQAcEa;SM$!nQnAwK}ant-`l6uI(@F)&bb3vxPmQ$u1$V$>! z$14+&B1uWFXRWiwmXF-(tLiDXH!0917f}8jFyu$0!w7j__M`Azam#89io-HqqTAV- zU@joQYMZ6j@3m{VRJ6$#^^&5}bV{j|_ysNfU!9dzKcjDw=VMN;-3j(^w-qE5Xfi@* zMDweTv9b6GX|K(Ubwb<@J;oMnm~0|^S$zKDF22!!T!iWn#GOCeRK?Hc2TT?!I&%Gb zSjdepka>sCJIW{NfAvtQJBCa_0O0h{cAi~th+9UTFyWuI_h(tKD7LROoy!#edTo*& zHF0sy;K1pBc}uefwUOu3Q0Dz(1KW(ATh}8Kb{POllAX!!UVj$*45D{M=@3fVZFZ7) z^OLdVcqDEaey^yb1lHo=0=VwyiGo0yNvZ~Qj=@Ur8vZ2uYO_~zPhcQdD;wH`0SCMT zaZx@2c+58&_qYzed(G3g;0<_ZU{y31+yTlf?VJCQx?j^jov8J9ElNQnRuuKLiH!C9 zb-1;dZ1B@q(zwBI&Kr%>PbwPXj6z{kMT>(EW@Nno75`p?H(5uaBrsiPG)VmCWvcPJ zI~U6bo9Wx>Fz}xPGLs(iRwwkeNV5&7nMN1QQg}x?=ZU|dDfOf&wUvi}Ds5{M|4#tP zdc&jon_?oP*NEBE0{WZ3vR9yk@*xc6eiV%$7%0G@7g7$pBQ$lo=CQf8pg{eQ{?;mhN1o3V_|e?SmcyT4d`Sd+mqmPcu9O=Xl51=v2Bj4|2?ljPx1~J^Xtiot?#YTcsRKh= zD4@6b|1RbKcPW1pWcmJI^i=L5GKOM=XzqV2yLa|&H;U3oCZwiIbYN(1^M2Yb*ZE!7 zdER(^Dm!#h$kZqQ=Mv(v0Zag`GL75*@07a(%)~OClmlqK)4>=vnsiu=o7#-E9Y8{@KR9RMmF&jY#Ta%sDd%=aykT{@8qvUkw z&%(e3EDGzdNF`XMZUbLD{J7QOc>YiGWn60Nl#H1%v*zF@Q9HTfC1ca$6r2*VbRQJn z&9U6uD(r&E%vfq_JAF-Cq`2`MuTtwxu)+NhQ)g_0d2i#2F=i49dUBu}@NHrT;)-Ea+Fs0nb#oe4h3m@W&{9>RgvJ}Oe6{p$GmT?Q zTidsNyCk!{f#87pq1eSg<@y(jz#>WV4)h%7_d0|r;8yLgTC$XB@$RS%@e4qmd^2oL zd9&CdPnq=jH{Rr$9!tMo=7qR>fDI@XSX{Qwpw{{(2**PXM+^v^eF%L;I` zNvrg`(NYft=+f+&&u{I`4=!BoGl#+CmlzcGmMDG8V{d_Se6~0ZUuLIw{ z?QqXfZX#In7y^=}_z@q({SOyg^E`6{&_P#p7Ioq#GnbAchrFSn#o5c`iqTG=t-43Z z-vL)ms?@mfC-Ur6gkx;j+9&g*gDvh-X`hk6C#*K^?D}1^aG=YYlfFD_@IG8~E0aVo zEK92jajf~rAL#l7g**2#koTT7%wKNdIxBm-mD_ZxF9e!XEpKrS2`LFS&s_PJdpCcO z&XgCJF5w#dn-~2x5qNz>eiYMV1l}yp<}Nxvc5bl+{Rmteb~`+j3Bm0D36Vbi^=)R9 z-8^KqN@{%f$rq=iu7tq(OzEplv~NvD;bd3|?FMspOf-Fg>3uT>*|+bf#Hbh!B#$n> z%UlfLo6=O7UZ3uPysOUcY&JH!Z>{V%CyZD)hWN6^C@ETHesatJrBLg1&4>sfTG}pW z4CePDEFWj+|O~BJvgHyW@hL^q4Oc z66_|>O3q~Ce9hb*>2tLV3|PX)@>SN#HG37>F}ucDo>%s?r#jj$UZ=om!fA=SRC^1^ zNo2?I`0JJu1mX^J=w0xpRDgLWPMWQ(V~g(&rMtMZ-G_hOL>qeGwp|mn8HYpPNj~wp zs(Cc|KA&jADA)TbfK1fArcu-VGr9A+7k`9fZCKHUJj-E?g{SG0y#0|a1LDKQs<^YY z_aQd#LV<@u=6Ey&OHcvF7|^AjTxK5U8Pc2uRyK+hfEMSk3z)uOfmkLIaFJEtgXTBF zOi*Dh_)|-{3~K)4hNjvx5Zj$(2%!|A&K5IXm`+vp81;b>FT=n2Fu!OzjkTm{Fg>B}oq)Nv-nA29S?@g%mn zU=H>Egd6#E2AMOQn>8}&4YvLb;^{s3 zFDJH5$*m4le(z8ee)_Vg=iwY{H}+6!rpsdn)C+q?#dGf+Tt9O{zu(y?T9t|*>U?E+ zad!TqM(=);|GC6&?88(=Vks7V$W6!4Yue4_LL2|NFRKT>yuZ$v#I}-rd3s5fsy*<~X)~OE^K%#MJ<)Ww1w*Nxq@#9tyDbjns7W_m+E*vF?0 z$Gy-P2AOzI*1Rj&cg<(w{@40~jcI)u0K~t%Z#nt>JDXph40o90v3+-MjRS0EtkR`A zWSiAM`)11wz!6*w-GF)Mf31JM$qoa1n6)^rF>Ea9Gu16#VxTc?vtFKpj4ygJ2OKt#qog=>Yr zN-FWK)_v8AD1Sn_Dz@?m-(eu{UB1P|NLJ)KK|EE-CNPETY$bQGEI@=InL(D$vaKEc zXsLHU3Ipt~(nkduPF65h?4p6^_2Kczl%bw-tU#v0^{MkS-T;ZiRG<{gIJa7%zK{d4 z9B(YVVDzSm08?bsApAb~$N_cN#i8LJ>a`X!R=T%J-wX(R^ywM0GP#;N`swOsqD-Pw z;_*tsdwy#>2{LERe9#-utvAj-Uy@lFO&zu4T$?F0w9G)!UDUw=<*)ITb)k#$M}IUo zEcZOd5+7+!bSanI zo7ga@Rks2pCDZP#sZF@+gQS=o2h$^KI3l9HGi#49{NxkK`f)=M@MEbJnB>>=K+3w3 zEsdOje@c}>PLNW?bE<$;S;I_?vf$!@XwRypWeBRev)KlE%&T`k%s|om@-2}e#~n-9 z?819nH#+27EI0txtW6l_b|?bKg0YHA*6ljo) zAVIQ7&N(N6CWA=MAO;XvG*DGJ7e7Y?l})v>~q;7o)K&E6Bn{E+!R zpied(<4cXm8GmAIeOzNURgJ^$R<-8n$TMh$OdIkEn8hbX|P_Zb?zIF zk)xW6-Ajf@eu}8$cxNH#)GTPTLU2U=)U}mr8KiUNZqI_& zEIRLUCOjf}(?nI`FC4<)A}1-qb?>X8Q!BSSXq-`$G+L%hv*8ryu-^Y+OC@3y;~sqD zb8#S@a<>yHZ9jL#1w%q$H6NmXA!R1%>)ji~LTuv}qbA9|J(sNQr!hN+YNv&DTUhcfXF#9)>2~9wcxoFTo4x%D+YJB|=K9QU_a0W*zy)VJ1cPA}M+*}?;s(2ARvn%cOhOrH;ymAuo(LrI=pUaj zojiFsSB4kY+xg&rnQ(^V&qp}~%i1S*mk5Sl`ru6-9$0Pp)8ogD>|NKLG8lx;w`5r; zlh1Z#b*STf`|4pJdin#vJ%K0lL&qW-QQ-R-AkPLF?ty2k zN5VA*D13~-!xM&&Q4bHnL}~*9;5l4(M=W$n3ky6^>{hUCZzwQ>7l6z5G0=M{Bzl4< zl}*gp0wQMo<+^^?k=l0Kz^^R;oY+;R+gC5se0U(uXDcll*;9*BO2QLDIJAL}Yh058 z!*<&9p$0e$%{6=QDOM_|X{8|e=ATGvv|FSGQ-;ULP{zj#UcC0B6lX7~5P0BE={5j? zYhs7aEq7a=;ek)JzJV3S0{w`af_xb0Llc!_qrM@*vxLT9fLY7{55%VUzn&*>BAD)h z2l|8c$mBb2ihyT4F4*Su-W^XS2X^@5J5X_eQ-KG* zM6WLi!ok_R^86;EbYd2)S=Sxplk2GzXrU!C5YmoCyrxNV0=a}_a!=?SydOLe1L#^K z7MD++afbC4j?4g@kRE_+SbNYwEIvnxU81X=ih zWKr##U_W$>RtgI%_$LLt4(bm;ml{)ur6R(?-m9^^tm(10PZvzTeozNR5h@F(;*-RKLO0$rWcZEj#+W8a0Lw=7 z{um)bW#Ln~;qVy-^2r;|vm2Eeu|whi5EXsm0`n)QHk7;^5B_2LI%##((f_-ZjJrO7 zH@J=@Y^@9Z-ewN>*^nZklPqrjJu>|mfK}Vxb}VMOTKGt)iqE8c>6_jRqkJUz+*voN zzC|t#GQaUw3|KZy1QSV`>1tD7UN7)^2*~()&C*5Xj0CLTpRRJ9wG`?Ybggf?R@vz} z5fAo*hWe3@Su_c@PZuc|WvUVnN^j+b+hrEr23)RS+Pa z^0|^K`b*%dQuiox-!}uM*0x4`-<>F;UfKIc;7TwgN=#XEaoRtA3C^hZ@habVZS!sU zbK}%huWQ$VP?)=AD(rM!`Cfo@8=Jhq+FDAA%KWZ!#KRfOKi)kWoU2WlHW$%p44cqg zka*g-F}B?nn&eicskg?yQU^pweY`G4CYNINu5}B~)E=bMya{LJ{PT%>UG7s*@!=_M z1Ux6k{6?A|waso2tndnbJ=!lVqWn@$VaJL$Fl38%1InG|%FG$hT!Et;(17l`%*00& z3C}Sd5^`GR=VwON)Q^)1z3%U;^gwJEn;EPe2H4Qv)(citkDhHC>Gh{N1}x;$k;@*Z zdTzaptY9a=eGAfH6s$&0F>w6+X>C=%pISG6#GBJVT)e|ADMVy}h=Zq-$Tsn6*CTyT z6Zkp`vUmj}5%`U<3L@I(a#T5k(qSv2KuuVCp6eN)g=Ez+Y1q3{CvM4zMUS6OvOvM-j&l%~rl56uGg9+o$4al@b0sRq(U$^>QmK#feu zs1gB3Jx~_(OF!BXEKJCZ&K)|b3o&gESQ*-()f#BbG{Y|_Zhr2*-$?|X^spLg# z?sM{ql;%eTRsIH_8mS>9g-`h?FHZ#0FmMn6^~cHvFu-!=6VUa?;wXGKEgX>M#BUY5 z;xvE5?9snsreeM=%9d@S5^s|A{fVDxY+Hh!{f;A0>U0jclp1-m|IVl|%uEF|)Qs~) zvKmKZZ!VADQ7j&hgtv05h1n*j!{kX^c5HYg5eFACldn(7*jc|nwoZz`$EY;WudRIt;E*PG{XcXL!)^|^Xf82sq}>76288jC1}l3P50oOxERHRfuK`@o>v zQh#UKJx%gV+V?NYPfiSl(URy13InyGC-gNOO^cK}Wly9pF5IeSN($H4Pn}cfOexwl zrNywZNZ?cWW1dgo1igjOaRCWF|42UDiwX`l?Zna!2Pg`rnu}O7GjzbfOwQ@#Y zwuMP_yE=|DthlGE>w-&cYDZgTW#yU%>*)Y9uBpyE`#nK^jwtsdm*S2u13v&j{+G7z z*#w4<)W)+~1uT=iK4G_!dfJJvZgQOLLwEG{y`hA5qY}RIaZ%$(neFSuk*yRJX2NGX z3rGDHA6eHEjnqlZ0h{V*^HwD&85Fp>tkCRyBAxXfV=C7eC&tx+&&i-vm8O3vHQYcr zn7sXGPfLS6{Y{m&uN?Xl@E8soLlwfm9PUB+Opfb^hevww&*qGd&=+jE6Wft z`Tg;Yw#dO@##<_;iQ!jnXgQfv?14p7=?24SO{4bi1_1-(!-~gVsqWzw@D9>a2 zGEeJ`sL{cotx52Bev5+m5?_0p!yhdLMf-7kXCucW)!cm_5t|B$OTQ;Vq$cakTLc5I zZz-7}$FK{s6x-&pPm|-B_ZM!ZMq?&))2u^JWjLoB<(^q(m!j+ zDbyk@$ElonkjXNwKE+N?v4zf@Fw~kR4|=OU+q5Iou+HeBI@84-JUOsZg1(C7!k7?0 z+?GOJN>RYP*@Nk1_~Vm`W?rMb2TEq9aOi@B*^;0&>b;{l;AX{au^r*3qXn!X;($IVOG%ghLO_#E34rh%ziG zt63X`S`8UEYNBzGm-ev_Dh@wvFU%nB(H`mtA`MCic%1$8cwHnNb~`Dh25thC1gCE8 zv@4`~ZTLp$?3bV(q^TgoIo73AG%Fv>L&g?{`zC1ZpPb6N@^a|Bp2No z@?v*&;}|w$;ctMysJ7USmgmVn*&0oi&dyI|sM9O!e>e0S`A2Cr`gmvuh=Z$jtPea9#9qY_@yfKo|4BYlBm`jhGS=`1vd0pRe!n z9sMlu1uhO|122$%>6lA(a*}udAobm3&n=XX@v9zQ=GGS}uo=0X1f=8-P$JlbLcD=8+u^}I&`|}XOHpPr*Ckl}W)p4X|sGZAe@S!rWzNWp9!6nVP zxL{>A!A{2EE#gQ0tH0cV!Hp1^_~H)jBm5VQ)lU1hZhh|a*#Sx?1e5+Wy8S8i=ZgI1 z=WcN+ZLK=deT_x}Zo}EQ?>#!jP2V$6DspZL{qFC}O=V&l?mQ|CpsuzPG4{L_@_^D# zG`h)U?+f9U0%g#XdS73$CH~M~13+cVu<0ltsT15RJGxx3ad3;nUV#M;dVuKQ` z%6~#r$7Sd8sbCGIe*T2uFf!In&!EC-zKe73o6J#6$y{jhuYrS~1%XCcAl3rON2d>W zh1bI3Z?!ycF`?opYnOG@p*DM|o15f1rjkDytXt9ay@+F{Ci49Vp^|a)hr~Oj! z@O@+=!9DZ+w!m}Z{^vf5FC9pHYwJyz<&_%hD*Bk?@hy0cteKD6r29!z)(JDo>hgbm zo=xj~V&^xf5yDjojm+j$T{Zlkbr-p8_xg;j8e7XQb({@5K}ww1iH1pOBKy>!=SIzi z_gx3i+F$$7ZftF0RhcQQ$oJG=e;=4XX-+lutY5$(Y~6_7iDQPO`EDc!6t)WjSy?wI zgAapShgQ7B_sNmo09bWMbD~UJMvUf~R?MLl>nYKqZck^r;KPj7nAL=da z-&xiO_#Ijp=4EE!yErgQ`dBiLfX^mGPZT&MYk^b^0Ayih)gv9FDk1_hT2AL>R7P|GHB51%(<<5c5fvp zZpw(5Lrv)?psvBczH%@jLX*SZU)**N2woEl4%ee63b1{)p;CbMcDjxy6vk(T||MMiiVPCU`R*$9pT zQ|s$>>FO=BO1~D=S1|ux<)zz#2+OXc6BjffadAg3R8Z_W2X-XP^w1I!_5(L}B z`Tz}9i4@O+c~1mv*UGP_L~U>EV(PyCit+U5*W`Ut(|Os4#ZMDiq*aVHo^qz%-i0Ey zJ6bctG?^e`Tw2MP$^-H{dz;e+V)2 z${mhBaO{c)KUie#TQ!45noVVC?rpr*c$?9=WEEvKns)YUdTwL1ul0aBxY{x4_t>bf zbH7^ia8{*=y;5-!*O2ev86$ph*k+`MjzET`q+NzS1Y5eB(kPhBeq_%u4hTCJ4Zq~ zRCtpFuID!! zLEk?Yp76I6AYi5Nz7V}j-|qeGyu_I%O`hL6=?~|^y!T40>~*Lq4au&og_@2$Gon>=j83!P(OaKgPbpIk~{J{!SraYdh7VY>&O!fMB^AW1bt6=*$WBZb}w zujw>$d*$NePw}*m__otPgaC!CHQST39Fe{ut@ss8?buG0TX~#^@2BpJKw!KXh&r&(YB%&qzDEsE^n*-oLn2ydLvQBZsfDZWC&R~QNlJYwZ;szvtm^s}juF*YSq(1jh^51T zU+h*q2VdEJpc$E=n&yf(()+T=Fzjg{`+m&_KFfI+$rD%Yahx|UHjA3;whk}$de3B) zyf=E%RO_rS4l?^p`ETd_FwdP**4r8D8ZjSh!oO`d@lKxW_$7*Kk?7H>pExoeuV5Ox z?E68<9K-R%B-dH}Lml#Ls|R0K=ZQk-kn#9PytFAkE!?<~TZ@%Z@ayxl-cVCAyfqhA zgE8QPteH7B@FL)Y^xbHn;(r84;zGY;Ajd``nt>Xdn1}nLK3IBJ+cf4rDI^n963?UI zz{EmNMLiHieIDn}zNzekI7SocEQ#Q_pPtz552nDOMzwSDJ;xFZU~f_$Sr|~XolJ-I zm8=qatbA3-dnLb+ic@ncAx7NF;J^H=-^`Sn_<|`eIPE06F@iKA)tl~4sGs>D+uof} zoxKk`h5UC64JpRIvV*)@jrAT#tg3TL>X1!x_=yf|xw-3@ObS`=3@#mZfl@nhy(2Z* zPBiPOYWE!-(0TN0^~cZ>j0MA( zenoI&J^hJs4e$<|Mn?fc#9x(u0SFZrI^4=Waau(GYA=w?79ZOuvHcYb3)488r0rOI z2${i){61>V0NF>(BTJgeY0FNqz7rT*rEmA!HtEP{K}{)C*={d<{AH|C@`XL(>&q4I zL=5tP;^CV5baNoFTIT>N4eDg`=-Zn)Di{2Chyn`$OyN&yp{Are)pNULQyl@Mt`c14 z%pKO26$m@~0{Qk#touDVpnt`z(!5F_F&(zY2jQlep;_U2EY3+4XgoufF2rE;P)0Hh)Brtg*s*kr!W5x%K=oDrTg{{qTgME@8Mjj!C=XB=?#}K@C0WD=LX-oN5dwDZHF6med4B^57V2K zv6{<0aCd99%dPI#E%J05K8>#4&m*4ohPMh9Ctg=M>i@0N7cNz|Vm}p91jXrhE{dKy zaqBtCJ7TD0PbpZn=kF^_6XUl9gobRqra%F8Kl8#%*c3K1WjDMLxXS%-E`n7|U%s_u z(EWZ*dF6zzpUkL_jnCTT^kO(U&^o|o!l}H6fD7{$%$R;vw(>gqx%(%8Prs;#fp~vg z&G=UwHI56S2XL}|pQ}e-w*GevKy)X!7N))@tsgX!4w-BhU?ba)C{f3SoCckpJfy!a z{0Y-8>(A#6nce1&oY0r_JX-%Z7CqFo6~Uorpc`pQ z6r>BwIdyzGD?eJ8^RVgkd~=W}6j^m*#4nuo&?s_zQF%x(ICUUbOeCNyuL-9xzRTsy~K?k^W)An?p68D zu@_u^O|N3`@!4P|oy$E23ra`WE{DU-Gfay?FvT|20Nbfv6_~!mh_P#~t!)sHK*M=zbNZ zXcpH5GUML92j7Yp=mPrInU%pp)0#g1tU?~`;VP_=@e*Ba$1bGlfZRN&zYM_@Y`cd= zA2%>zxLcFrZarR^ncKzCE|64UV>45|Ay8B_#ol%#YC4n^=2TQQ%XE5kxU%A{ws zs_t_39UAjF>qXGcILbDDFi~>W(|Z#5>zxc=*3oXO;d(uFXL*MfFRg>B@ZhQY8PjU~ za9@MnhHWlYei+jRI-Cj^&JRr}zA-$foaXbfaD^pSBR^#@9L6##6>h0nn`_%TF!@Tj z|AUMGklCwI=)#%-HRdZJ3YR6t=MEraPLjjIcLxh(25(AB3jMT>FQg&6r;&~+;lr7#3+xtP1+pheko&=C!XaBvnvm0c zeJ5Yh01>k%F`b^B`NkVFCf!!- z$}Y~CJ)nnBDR|4AhbPU~rqNQLE*c3hZw&qKF#XrB5s>;nwW9+qZ2%;BVAghs{;4z3FAHnr{!Kd-TW>MLIO z-0V}cGFmsUbe#T$&gf6E1T(Xh#_#w-K{;gjFBQc3>-0aAH=+;|@eFH)8%Whei#Sge z@#*nfDt?`A^yGtEt)6!$ABLN&3+aOg0$EPc}W^n@rjF z-W(UarsCL>@eRZ$b$@n)IV)|9kaR`v!E>_lEu)(dpA0}PEm(<{=6xMHH;+4xMn-c1 zfM}`zJIPq;PkR{F?2x%19^0bd11Rcj<*AOl@OKt20zdjDJjWF~&#wL%qV*7z2T4Yv z=`#A_meTv_2oN01--2l%b3+&^FSFFBGj~|;h4@mQD9@eNZPE@y$DiG5iJ6O7xTCgm zf_S|!f3hnr-uK2=YQ^dqe^eYBR4pN|E-HAymt8p6j-Q^&F1IpGEN!=bJHJu6bRH1U z?Nzagd&TvR2inj=PIx3epc!&)d&h326cQUM_f?WM?;y3!ocz9m%If1+-G3{;z937r z&wR%b2&5-Ve&|JTED9AqN)%#XMnI#&h{7|np0O#KQME;LA+{?#c{`WX5b$?!4A8XL<%!)LWe;!l4u4qf-&^E1tpgNG7368ccRe1L z|JwNVPO~b}S^Kla&mWC|13Gb+qgsM93_Gj@jTtEidCVjO-+8KQT{0ie5gdM`-S=;` z%o1iiyg0|b1!NNF<^j#FcG@h#K=C{NvI|;>U3@uqHG?4&0#2RZUouBJrJXr@yxF+f zHFQq|BbMZm>)8}ml=}n)DfuCzC!MOE6~{?_Qh?g z{c+YiCk{;2K@Y3lq}vRU%ieSQTP|M{0^$rFo`Wi;?WWQgsA1oX_*-Ki3I|r_YxXP* zU^@n}xRg1q*%}>)z=Cut5B6CwKw3J#`=pxzkO5%0(Qo&iimg^K`|;?6&nn4RwXa%8 zhBH5m!dm0EZMp)tqG(#|NqPZk^|7=I6ckabI+Be_HnPm1Y$eml^jDl3v2R|Ps-=-ktwP0`v&i|@Re|m`X`-V<3GUCKGje~=wDa<@?`u(x6Bu6eJ_6K*jR^Fggjx-Gx>8)O|5 z;0HSX;s;ix0EEAplUfS2L|K8olS4?hVK7kCl?!|zDIt8G(2zlAb*>%U-KMv`acHvgEB;DX-RReRzcbcPn=Qg zfQ02&!B}1G4pn~pS==0Zh7^iOxM}pks}cf$8R={C665T>!uc0-@(<*P5aYp1GB7Jv zZ`+P2kWX%NL+4(#(&c0D&o=Gwr02ggk`(kc;O(n%udr#ZrNrzh2gU4XJChYRg5}lU zn(PB8g9Z!KR0E3n@4d7&9i;uqRlxHJLi=1AL`y7!x`)19b3h&UU2hd-h3+<~i+v^- z?-Qb8HC|AT54PL^@mkbl7(V)f_Y`E)ZW5E}1biL<+aU%qYGt$s?Uen?n%#vXvjQE1OqHPFdppVoXZJB1VOx7J%LmFi=&CHwq% zRTGq}8>dzZ-}(F=RQRcm{Bv;BZa>WjoZ3cteg-dm=}VE`$FSnuFwa`~&;q%DgT!KN zO0Cfjyn0Ol2|a(d02;H-W-Cc%YCid`jqMyC*Mfj+akCQCV!jlGEtH#%B4T_DN-{w4 zeR#=GAcNm0Zz{+|cS*7_Rd<8js+}pYFwC%w$KvAmL3U@?WDZLG8ZxX_sdS)IlR5!% zDhDc>K-@j{)VAzI_Q=z^sQGSj&VG^omY!a|^Vz%VgWI4GW4f_=@WsJZXu&RTCM|oc z*j!$H)P*%k6$W1T-ao*vGF?(=AxY}fb&HpMIhY`X zy>KSBQ-IDz*!h(L<+Xolb`qn&rG&DPPOmc{?N-=i6Z~o~R`GsYHyGRD2#jtfB@x^GO-S4qUrnWyTGz4plmp06}N zS!4$@D!KN*;+g)^&u&YrvRJO-HN*iSGMn*jv7|0#r-b|;4Y)!Bx{+|}wDr2l2AC7) zDp~T>ssGs$Ox<_r#Z3o`eT9v@nWUlK)w;pjeSlJS58pX1nq#t}k~6q&;`x$vHxK3q z(4OxX&)+HlA&Bz1B=ouc9CdD!Q0WAI9w3q?c6lPhS=WIrw*2*F$)PJ~j@X36Vg35m z`L?(0p^Su*uDmGzH-yZ&GD{edf^-{UNfkBJlpvG`s^Y>WTbv%|nIK=}YwjpI`UZg` zDMPlfq|y}^ca{!{6saJC{SX&3PCMqe%31Sqr_IL{Wr%#Jh+W@zJNpJ=&^m*_Zev)S z5#HMHR)c03sihi~xOtWiI{78e3NhL578SSKT%4^hyvA-5aUulZtkPk2`!HBC!2=_7 zR>d4_XrA&P9o!hPNuLQ2+pA9WZ7lCJr0(T8|M{XLBrWqnX7QVI!=B9U`T}xHy2DkX zEC{fQ?XB5iA#;xz#~*ym;m`*0SEI57p zp}(K*&A~aP$;T{{6=9nq7ozX0x@hgnZE}MUDm#Wt9}hIyAq~^_4s!smzb+xar@GF5 z^i}bVphHqS5LGw++B&|8aCr90n-Yzf1E8s~Zb#Zt0__*q=#aUVv_kQW$BOtbh-lMJ zR>QY6c0LZ0U8DCWLI`EH?eQ~O5RT7_9n1R$rH}^aPzm}N1E!{Tzg$xkneT_y{NOG- zT-T7ABx5tcU5QhVRK^mcZ$9XiC`rq+#SJNR7s7LK(RJafzgcD~VRa{iL}N2GwaiiL zn>L(6V7fm-Q3BtF)P1tIZ;?&A$fCdfV?n2B-P7o|b;W1xO_obmubGNwm`Jv_nBL@V zr)k!*{q)`CC+ng&6p=W|tV@)WB)?tqe*NrVqepHd!^PlDIgn-2dRFLWC*V7|L%2g5 z6t5uqb5zq(tK8Du!1TD`Yn~gU*%Hq< zN3t0@urAAG-3Z|9Vk`KFF&-%6`HOQ_*-aN!VV79!5oGdZAv_>w#=T1xLt+#wTsNoi zx6iMz2RK-!^>pQU0Q`z5xHzxsBj7~892B48g@nOdHHk`CAWIrjT_BS~uG6jaN1i?| zi3#GfSxdl?h*zlrsS#JAn``KNHFw)O4ZvS&0ytGjCUZop4^}L_ntikZx{EpG@z~qS zVxI-Ayfj#{&(TBq$Dk(Y>t6eOR90IdkGlTCoQWy3|WI=wt8G@77BzK!Pn|Ws%`|AN%P0)Q^hPi>wEV{aCKQ`fsK(w zq9rmY0AK;IN#7AGMtC_QDm zhx4>`H&N;6&|+2X2}AIi^Wxu?3%*E z?~TBQ=l7m7`hSqF_et{W4?I8q-TvpKfgm$(@EfhNH2?9C%lI796j`-pI8*fgQVxz1AO? zM+{q}==q&j+}pQhu3yVp;^?MMYP?dL$snm0s(bjhPFm6^xtpYDdxgJlOJO(P$t>HU z?xy{2OQi|B)Bs8f?Bmuw@E?^@>PuT>Ap%huO$`0gHUBKypDMCaYitCwpNi*F`jTvi zzN=@gv7f$FTCvjOi*x3GldRRIq+)B5|0_HA<{-iTaUW7EB!v2XTg+KYZ2E<|l+Rd& z`$&s0ht1~u7V*i=}F*XIU-aw^B z4u72Cb>pjKjoP0ya#HeoMMuA{FI5+MU4Q(@53o9u+)qGzQU{PxX{T#H1StxBf@QMU z>_EAmK88qIj`UoPEMqMWv--_@_H-uET-{nKw$)*wB@$Sa=DUtP`_xUhEA8V2p=Dn( zRvNspS8bSsdNkLr}NqsB2?)bZ*qGT!lyMrwb1T{x3 zUI6jyo&Gja%STK-YO#c6)e@eM6X~KU60@o*B_z_BRGjrB%{ZrWW?4GBX5ugy`7p-6 z7deS7GQ%GuYOwk&WjC^Fo7$7ZCN0=_z$w^-Wg_RaZgl@n$=U3*dd`|~l|vkE;$Q6K}$KnHGe z8r6hQ5Qmdvr~}fB@aaYvF>|nBN(phlO1>QB{wYRjyi}3vR~nvBDz+7~=Sv@*kdr}h zzm(1oY%U*rk2*@y9w}KiWS#1n=_F5XSWAvj8ONil z<2>~^{?#L7*Nm2vQM|^tkl_`za~GyJbwls?muT(go<384DBLk z1a{Y&@L4s*3Way6|a=d-kl_|h-n3L|NaJ}3UgcY)_GfhzOq8&X4< z8=faZA%Z3MQhifg-(5)#y^?&x0&2Q%rjuh+ncyM%_u7EEKgt8N>l5myO}Eh_y9C>V ze?9=IvP^z7F|R7PXRw)-It^SdCHM^be@BC|LJmgk6q+TX5p1C~1L67G|Dv-&{gowX zuKaJPB>vMEUBQ4~K}1@kzgG@i4{D-Ydu93(J~x-m-H*)v{2TzGwaCh^d``BZ4!qp2 zUU@z%}r)zw>O`-!!}$k6s)pL8-x_J z9sk~6P;&-<3O;#_h38Vu#J?3xm40&Byu7v22NsA{bE)5$6{CUm{UYYdaQ;j}h<@3I z0-G;;6x^@E&)rfUUypgim72`Ph_~axN3l=0pKxf_6Et; zi=1?h`}_*rmu>o&#x@8j3zCIR!_X$e4I0=BjXy?Y-Jp$-=&uw=;qGa!NtA2bx8F$8 z_nrswGS$UMiqlE;wD3$y*v3WXIc}X!0_;>Y0Fou}6v*xITj1}KZ<+>5G4oRIFUci>9CX9q&EMCNS7K_(P; zTOqU%gYSv3(~okJLG9$kkbLUp#5L8H?A=<3PwUE!LW^Xge~hdW*lD4HN(ABM{*6b` z0V zcz-rY4Bv}{t8f}}Bi;9FNn3N5C6FAff6_f}#DH9f1+yY^=}yJte*IWu;VuCGyYo0L zN~I;sU`V#Z+AGBNhwrkch1jlfv8ACTbk0OyJNd@nHERMJXP=ea5lt?nhGIIP(RanW zN=PcnDGR5hko-d0I=dY@POt?_RQ!!A_=!-1p+^ZE>H|D@jvxo*vek7NV`q&6Ub}GdC9+VJqBj zrc~t~#=-Za!C`m!f&Lpy@0BwH;037D>K)AEfXG^7?-M(;Fa1A11++uW97q^)9i|gA z)tV5(tE7X+DdnNf$Yw@Q}fR(?ICD@3xNd_>b^>%vOKjN_UlKnX8hsf==SG zELU(0ajOh1;L9Gm^RI70$?Ps&`}zBFZ#}vXMtJmcDOXPXIR&x?IBJ>fci)uo{X4jD zX^rgfyYhgxuqJ!*@npC~ljUrrwB#!8Kig*)Wlax#!qC(Bw*m8Z0}C*@;W#ctsb}^& zA^M!T;@=63hFJ^=N?yQ!eUJfvqQJzuGl62HSq;HMY740y{1p{?_0$afHs&J?lw^P_ z_znesLLWoyVFv{V=YDGdplV_C`o|M~_}2tqo~__gZW<*U(!c{QfThPKV*fD3qumGu z{ML+N9Mn|6ApW&bp(sb& zGzQ-9v03=#mx6Cn2k0nmh^#9pxq?9`09F}WRjbHRNpMHdG_w7N+aG;1WkU5K;?fDA zZ(H=JTwz{ChYgCKsDPaZyr29y?LX5S1AeO+fS^Q|IHSM>CaLSrS4N_ymxTO(ruYAw z=~cv_qTqQ9^1Cv4&6EfScTHRD%XJ0y!EUfh7GcTP(bqeaQq!99t@)2Z82qtT1|m6~ zZD!H%%jVB1A&$gOe9+ggJq|9?=rej4bZct1L^fXDXP&lz50h&?hgI`nK)(y(MTd6L zLk?Zt_}7@F+a`w8IseMszxrH=dZt419NGw2!N+V=aAE2G7^=u8$~a`*u481}(c!%YItY!%YvfVA!a}CNB=z3W z{Y{Dk(nVnV4(7Rh1eZe}LOC?WEK$Qm@#hf$I~nQ%GODEI0q)hH?bxB|8^B(4xLKI_ z8>8DEHPp5m^o9_{VvX_C>ybLNuP~6-zC7NX8dY4ir$#0 z#{T12JiY>6e`BaA8+mnAm2Z`gb43%NQ*)}UJV&JH<^U#}jW;dTefNKOo}gRHP%K_3 z26a$!m@q(p%a*>KPe1nz5{`^0mtvJ|2!R*HSXqxS!*ajM?5udPIdbujfJ7-l9Gta^MIp z#z^J||I^~%;= zFne;PU&P9H)6yzDJNCVsr!yU%&Kbk@jzJLzUdWJN3U2Y5nioq6vBvxq-rE;v;l$O* zpWS~=TMY#wxTWa|Km@?LT~KUVUf7Y%pfN>{O%k%d1x}FjEtd#5e!}VQ5G$q+RhuD% zQN=yar(%<6fR={6QeX{rk2gcQ9}NrWq57YWk3b4B@OBul<^eu4hmo`q%3-e{SB0wK0Vw3XmnMW`p2X5iR0so%{y`TMR+HF4Wwv< zKV>0y2-&xa16B90zhe{{(xFNNWxlcA{8v)2~Wv~Vm8Qmo(P>`e@t?2kB zY?o`X<&#uNcsGi!fuo6|Yt0J2zJ2enP1pgOu+ig!p~c3sQ9ZBu~ z1Bm@W#e$yEZ7-pzqyy;Y+z$ByoPm!I+_OQ)P$^?EbmQ-F<${WGomL`vF9#Hl0t^g& zFPq~dh|jvqr@+}{zjT2aTV*kWKrP==q=e*|>(y4)uyp#YcuTuVu8C5gw1WwTrW+)!I*?jh7O|Mm$w-hTQV zr6u+FT8}Z>&lrNA_L=YoTIhJlBz}3BPTk`!DVT5iGDZ$rV? z&$jccu#HG?T9?M0iRl%ZReOoXwKpAPglIMUo=|1&SSYR9Pp;KU$5=KTfDnkGH709r zmSfZq?uUasHxQSTo{Hm z<S49>z_6BjRQyx`NQ8#?7=&s6|hhn#*DpQih!L&9r}^jqEsWb zMIT-@yZa8+c94#sArUzo_H*0Xn&xj}&u+f8;xuy#!lGbT_4y1$6G>Cjg$Q978`Z|{ z8{4M#wZ5iGx0h>;pT1VGUTIk!XwghQvgZZ6=G;Q=_CE(A>Uz_T5JI19Z%_xKx;AWI z{aR%-jY1?GXpE~mP$1o3{yFeaLI5#FwKbb5-rR+KbvMhube0dEqh1BwuJj*XMOQXb zSGo;25StBJ38GCwlgM}B7X9$1-yu?T#}uJtOhwS`-1)RX={DQjZBiT615HH_iZX}u zg&0*;FK(+uRdMd4A{VDQ;EcpMc~&P17WRi2Uc-nl?}D)?%hQod-9=Hvf0{=vk>OS1 z^*tL#iB)>AL@EQSEgwsSwV6q#6OXT0XOPtwX%{Dr!M@&J!4iNzeFqnI9S_y1oTD? z1t*%UcS@w`u@vsgk}}moK|{QXY@HykN&mq|9~m0!=G41F+L;*yt+_{}bF!nLW5i%P zr=l4=S&d)oZ=(P~E-IFgOaA=7i~3$h&Y$uAs;be$O7@M%3MW({}I?Iz2QPqmzo6rd?2joB@_-UPJYUoB@Oxw zsrng^ZWYDq#Ba7QQi?ONzNJQEi(fQtm7G-f#8~dqQN`=4%hHlY!ed5HtTsNyjt*8R z#-9Dnv@oFpn*WmhU0K1fW|UZDgQV!ax2$;goF(xQG{jbr)rze>7517_wq^aKskwOO zVB9y+1jDp+3~MlJD?oys-1_W=|}hE@)|BqcV+V_oH#XKZ&#!Y?RfID4toF1 z<>p=mo*eI%Kg;DU{PQ~Ma^06qs~C=&%QL`;cjJ+?WLR06H9(hYcJ01x9NzIvL?$>9 z{PmqMu-&7!=O!;#=DRW&O8_y&_@>;4SimobC{5+YLq3Ue%v;#!ih8{C;P3h&!A5yF z*Is>T$say~ufSt{QWgr+U*re(CYcq$cJw;o4SGb^P>JQ|A01^cKl_p&%-ITjES(Rv z9)QTJ=^83bov;Y6Y)CbCT%^Fb+s%XH30YOY`wcF^Js!J4@6U9Qe4myeh(M=>%sWe!W5cS0Cy*I$9UKjs6>o*k# zYz7NV?5HLxY(H5q1v^W=HyOQnGARH3bzyU&^1=xwO z&Hs)G2PAiDUcKvNygUm(uYl3yJT?hgM7bCc(H-A<3z8mIX+mIM{F~N@0Vd22*l-`9 zq`Q;-*G^*JfG6{HCX`=hPy9i$gjT~O5?Cim9v5aZ4j9!9gMVlJ>e+$U1w?TLB}J(0 z#=*ELa99b+eCUM)morqm*nh?M0*%p8V5jo3_raLD|Bi_mMbUf}Cq@mO5kW}+pa@8N zsKNR@0~31>3*?3dg}n;@{WU?9c1}#_80f4NR!yclu|rp*5CyH zuEfdY5h|QC0<_PK4-Gw?4ktp~s_|dA@EqU^6x}4t*xvA#T_PSW`x?F?&lP@{%%h;@Y*%=f6&1iS0Z@h znpuIe9_c~jNBY>f)f%i>9Wn?9Gnf6Dl1g15Xr_1KFEM*2Aq@^?BLjo8RG$MtVE(~| zjgyF(Lr+KNEW9lQv=J6Z8CU0nX|Y|jshJ9_$*v7o#U!q~y-c=*c})@QXam$G@``^J z*jE(AV`s)mGxUy7Cnh=L<^2Ce-dl%7-G%L<3Me2bC5?0`Jt6|qlG2EPbPUoW9Yee* zl0!?EgaRTU(lLMnLxXgKfOJU?FvM8{@3+5ie`oJ~uJgzF>s*)D5HhpYZ^e`Mb3b`= zw=9};fCS}uhI}CMy+q>>6CJh97i|8-(U4I74rsvmv~eHzT~o_V_EDG{ic#<9_b;N~ zDh_t|@8Ldo?#_E(G^-i&AC(4RbshqVLU-r8$Ipn-HHM(_XDR>uczkMBH(e^SE2y5K z6&=A9*PW<)X`Iv6EuTLY^7!2tLp&*5h6ZisU?u~N2UWu$-GP{xI9eFiS&w4>UHA83 zkeTYj+O6V1Az&Cel36a)duk^zh4GJT4(DkpK}P72eV+Y`uJwZ0hpByP62_o;oqv~a zAdJ)DRA%UUT|yZp#9-OKvQD!3v~kbI*3Jb~Zaaj%1<5L~F-_dytl{?a&M3BV768tG z3eP|-0E-m=e8)!iW>Ha*2}4KLYt_fWo+v)f%DK^w+jcanzF27K`;(HW8I1A4GA2FrdYRcQsx{k*h;7@W~NY;jkH zN*z|3w$#!51g$^0it=(Z;>S~)u#D4_I@D#qKS104q{dqC_>GRHCzG!Tcp})7URj?H ztdXi;r>(?PESkZj&8>mAd!_2*CQmV48? zSTYV(R1ASorf|xW~%h#n9eE% zcP{0{r9+peh-5@SI;@{g`6)krh{{a-eZ+X{PWZ>LRks1Do4u>OHR))HLmR6HHm6sk&?*QeFi^`D|pKE(8~C?xEWcDE(rml&dp;9rkDDd)(tZc}R4c zb8(uvG=2@&rO2O`V(j|lIjebJ5awkiGTy+~(d zChNmz0;Vvbs<6mdZSA}IY1ms41ZG*G8yBaMhMgTEt5rO`POZsn(lR7ALWJVmNy>sj zoCE#AG{euspg*t@^Zh)n`u(?ocl;#eg3 z_$ea@t52rttnR)bT^m|CuqlpkxqZjDZ@u8AX;9bO+i^lQAHDNVtX!&mnd3?5-*age zzYp{Ik^N0n6h%g*fxBja>l*QDbmOv!V{!BV`{Ky|Sbcw9s=pv*zQrVMiA8XJ-u;xD z2#D(4509*_Vp;g$Cw$OF&{yIXfvP92Gp03wKGEA)saTqVvA;FHkM1qEaB)ydo%qf* zXCB?N`m%X9=CB+;%gP?FHmEYli;w56E8%@@_4|NjOx2#DZwHaS-e)VkJqeIg+7MB$ zHLZ)-%h2hT~KOH)5J5_EGu*s&e1H!sP?O!`1GSa6m4Q_`?0-2)71) zG~|D-&M=H>{e3&CvM%MkeLUt5%KA>+&SFeXD%9V{;Pv^x zIedopT>d`2-h2u_d@BI|1)PXlk_^<&e=i0SlY^|Ox-wBq9@@A;#dlSRg66U`VysM< zQk*pQBNWm@U%Z5Tc}~C7SC+M6R`L5r0z548MS9qB!HXEjU^!(U%Rhqian_%&lNh~Z z4ONH>NnMKxtYB_orK;fX4jl02jc|^*1I)2}{?6E)=3^|9D|YzuvA9N!UkF)}ni)0c zO|l-Pd-;y{LO^c=+%aQYHzlOW>9GH2#S;bllXt-m^oz>wnV{3wP>9f3*MI4h6i^DT z8tj|fJe~gdFC9c^FI&!S06`nEC3?T6akIuOTQM6xlo{|RsxQl``O_Y_hXB+Z*tY!0 zUY~%;^Uq5vbHRxz_@j7cC%2b!uTFj`rRV588{ywMJM14<$N*d;mb9X3XVe!oq|Il` zQN6a(>^!}>!?SE($z#5Xl)4&44L{@EPPFwFf0QewJ-d;j`{c8?=XNl|DU^N(3X$O} zoHzXH)X$+rK)G0sy-V9q>j`&k1VgFac*#1Efp(5DU5hnxEK>xu>ZEhG zU%SV{+%BHa6yaeBC=dVVbWmF~T;Q!hZ|ZW)^EHj{G;g*a#GL713w0b(Z-3zOwi*ZZ zg1DjbCPjwkDrCsAft4e|w}#~_azzt`iP*eUM9sfo0OtJ|!<_zWw}g<1`i!mXN6|&J z!kl{%wD4RX$E1o4TL`Ncn1 zMmxrZ0KnZK18-a{=k8zyJBcFVWUGD;UBPbD%2s8drsCl}EhM7_vxIkH3Hb{T``5}= zG{LyG`Nun6x9{mdGns#tYy6y-g}jRI&Vi_U-q+Gu<|LsA4xXJE@_O!wtZ{GI9Oe&J@JKs^ zl`JO4wwbAE4P2pgm*yNhh;-0Bg#?*7=o`xH;n|W)ufO@ivih^#-!3M~QeA`~YN9N% zMrYL+kc=Hi-%d~XN8=U|6O*Quyw&fG`Rz0t)33w6ljDJC0G)QE051V<4ibPS-`W#l zicesY++>}&#g=12RL;bob4S|JmlNa>0sR{^*anpCwg1~FFsIje5OOIFhncS^t?y&#yZV~eIUDXH_U*

6*yoTH zSrHF&mRTYxg;Vu~ncaJ)^SJ2cI^ra(>zSjE)jUXzZjQ zDr{ZZX)55{u7dJ~aEt4W335un5Bp{>@<|Q@?a2*{x#CU8$0V(LZsp>S1BhDIhxV)N zur{#oF}&VdZWxwtqlS%pSc4dUGmt0NiPYPN66+Br&-^k#km(6Xo_`?qy7KrsfyK)e z4L2iyYEaFRIhns}Zo7NOqD`#V!gXxmRs><0#{kl&l z9u7SQB`;>?EO?l*SEpDA;5V0v+LrlP3~W9(p5&87g8wMM1nvqyHVA#PE_^5iMJ}EH zjEm!Z#`JE$f62PQx_`urvj$>8dZV!gCZOi|-zw*SeGiadtVv;_&Ld2ketL<7i;I@~ z|M@`dOn?2q^D(4H8$uccOjAwL{`@95Gh$tkq2%MJDhI2tY_QLgDg586iAXwx7ro)o zG>EA?QGgQOX@Kvz<$Qi48^f^(CRS2=JwOf--5ig&3Z(Ravjte&i?cyu z7yA{8I4Q8H()ajGKyJZ`L*(yIf69*PzjEF9ycm#`76n?5BLYfCKx=Y!4?lxyQD*8q z!(i|UH@ml(`0ocuQay!`wtCXIVTxam=&)45sP^UsJsf>kc$KSDX7%rJVf^>Fu(R>2 z34#&DSAP04{J$Ad<3@nww{*fM94wmfW?}` zW+vYN`F7%6Yqv^|Lsned6Hu3G^COxEFBeBX^wlZ%N)W~!#v)Da4sV*-dTmulZo>W5 z(F4VF(^shEbY!Jk5lKTX-%|!6R9@F(#4n{|(j83fifv(LOU+6=T+G&C3si!Mc(VfL z|7B;h5@J1TEdOZ3ryO#RNLz*N$w3!-<*?0HDyAM~!*0HLu};6N9`?R6A_|6nwd)w- z1Dy02!uzY`r6$&+=r{^T?*4ou_N4G0On6Fiqx{b6dyZ4pURGTU^(gwI6{}*z5%T>R zNWEnO-*!Y|wtsmur`H?shZ~e`zZcA5V5Wvifl#G3O1N8wl!`rNr;NaBsX24V|5a+( z;3^;=AEf5_OL2vNIEjRS9ogs%hSJea5BQRFRJ+Rc;$KDq|H0!1tho{Ajy$WV_dn!j z={m;X54zv1zWP8`6DY;_7dIiqT&xDNl5y;ZwE1Qa`tfeEkt@&)89=_gnGf&usV+w_ zMxg9I&GhN)W4XQJ)3h`60>qrWX(^VfeRQ?RNZSOy@BczQlO6rh9HwCsP3(;;j{mEhP=S-63CUolrDdlc zUR8fiAFAOXN9_;l)ON;qRPnmsH!+N9cJ;%%TdBgCC-(2B41@ApkM!bTj~-g-3ajOhi9g@2GG8N1L|qa z;5s^Kt1?!+O|THi?t!B0+GYxzzG91j$tBz`qN^1~ zmj6xUEM4|p=B`D68$h5WL`d#Ev?^!R)05q}?7ai3^BZazlKm0e&wb`utX8M76`xj5xcRdZ|CwjF|dGfNTOEm<5p;jq(Elk zo7U};hNxn+jr{H(bb;|T(0H#F!T$ru37(Li5)3tYMI(u4I ztomyy99!tbmUWK>7!#3r@SZD+Kmg;x1mCzhUX7abcXiym!G+KLw|I7`0jFZ_Hd(;-24l&Y28fN zV|8G0Z`MsFs!GvMt=R*#W0CcW>mjL2{eZ(pRS4AHzQ0FoK@pkEvi#50*~P^=Z`z-I z`O42SVDRzARHq9({qUmE$jITWzwKhRMEv*cGzJlK8SFdc!CG8xp0p~8YsjfOcYxJp zHh#)@mG>kD?ya{HCfFV%Lf((RWA-2(4x5~;pyfrNvZ8#w0U@>jE8w*IdN0oh`@xo{ z&Kid?Ww3=62kS*T>!Uh=1^c#5^~tK`3GUO^jqPq3F0{qHO{-9~zDc+JizWTF+Q)0g zUK5BM;6d1}r)D<(jfwK_Y*c1lcr%^p8KrCzDW{a!2vugy32&p?8RMa5Yu?3^j*D(p zuC<@-s7G4bjK&A^Iweo5tH;1=Iob+TItlVt3{Nih|ERaM@9OoM%(i!s1=@UogKjw? zvf*i}XyUM1zz6EM<-JA`y`52thj2h|eO7n~#4iiANAIl1^vOfrp%P91S6us6y%+L! z;EVif*BcXkIJ!y_c4}Hp5moeW`XyXis`fpwlyV*SQG!Jg^o16*P zCBBF0>p~kF4a582IaPb7`G2eCv-OuCWqiHe?{#97;Lb6vJ_xfF;Mnc=K1NHlnK9AE zFT01>Z)D9mzX?2zj~aF36$b#a8My=U{kP?XL}tw}3=yOHsugLJcfkV;(B$^|=n(g~ z2!Jv99BDbWByh+>cy(dOGrPdu^NUB>ui})hvT>jd=Q*0>b!-L=4KkY)-(%1uGH>p}9lk<6Lq;u9?W2ULEK# z_X*~gH>4J$L0S)wJC%;Fz`m+j}Oo3`=$~C9Ncg6}F5^dQVL4M@>d}0>hmVrM1;rUDw_Gg@7 zi-~&gq$>@cW7OGIe?a+tIXK5MsHx^sAh6m z*Ub;$RWp?@bQWykG_jv>nHcmF)GHf(M%k@^g*Frg*pPR&xz|lZT(%JmuiREi2uiKm z|2|V7XguOK=@1Qtu<@*9i%<1i6>>w_%f#__@L-K$;x#F4U zF?zz0&X!ItXUjoib6r=g=B(s>f8QXh?ISE+%Z`=htYgZGzG7=RGWcl2 zF7k%DIM%>c-Z zGVD}b2jwmyOqOIFpHg~~I+<>i+PwCA1a`C*z8yFDv$BkDzfvN?_13<<6c$d}reyeqw z9DC}2*a}UYd2DlX{`w{40Rt(V$S!~~E)$Hm4n@6EM3#QzSxmeHETsuv#)%mLW)M=d z5}&md+s__Bq=_@l^>pnAGb@a$|J}RnC;N`-nSBhL^G}oMz0va8+J36QIaTPq187^k zNI(TTg|k3oJaEssa_5b+6eXuKPvmY(SoFI-3o|z?jn6J=Fz-hv3^)v;GrMXyhvv^p zxOWEd8vF{C?feTu>R&ep51x5txMuSBo6HXS>P=34yRT|4`^qqc$G?M8Cn|E8ZrP2S zM`iC5bLTgYYFAX{p)W1<-xHC0Ropi5L>+QU%Ii-98{WoiuJBpQ{-~hji4{-=3QlPL zH_7BIIn1npv)}U$Xi>imJxw+_qHHvU9LrE;nZemyE&k-mm3VtaxoduN`%hvj%ymby ze(MH3H7HZrsKbM!kN){Mhh%!WJi>72Zs}ZHH5xnVoPynPRCXWt@FOvMR#kAiz208Z zfpyd9e*n&#(yoYtWg(F-1v?Nw0Y!qOlC@jW*3=_}5}cWvd5_zfFI>mfiQ@AdzZV6H zq{@F|(StXE1zmzElF8`I{Dd0Bq&>O7S6_n|5!pNXt{+*Ak7wx48|f7&30bJ?mY+v@ zop%E&Wl%kzprCxVoAD5`dNS4}S$mWp`t$Gi{%rn7$KO2z{}41CbXYy0hMiCG>IhCmav?sx@Cl6$VkrSnq4eQUWihZ!kTL7%!CgSZ8|&F`|QeiDE8iAB#}Z_arl zm|N&M*1wiCNGAj^5%Y)dp9eQ(ll+A(bvOfG)HvCyIoH?HS}xRv{;5(+2F#efYk;Yn z)Mqr)*RI6l$HO|@XAQ~u=qcTcX~es&Ou0WMwY#wO7IXpYXXcya6`$(^$y0d3zHwna zj7tTjTwBSNJVx@&(VLlR zN3l4bxBZ*s;{(9ip$j+@jOMzIie|ft=jysVcheqTo^)cwKBb;Cgx7KJXYy0iIlNPb4fuRo zc-|R52k>@lCDmCis`; z^U{9a^w*VeYpnDl+-wDXVpqbaSY05`Aht17)01v$qCvZ$)0}uFX;|!M>9kkNa|4=Q zzPsz&H~W(~CW9O8RrYAnciNT#-@K#q%vaufUjWuM$7sB(9yIPtR?c*}^j=zk_agqj zaSTw7fqHPZhItV;PeF;Lz>ge1Bwy6MWk3cz9SyHDEPH|E{}d zfB+?Wqm6rU#69_(kgy8rUVr~+CQ4W>fc|n$ zk!|xCTZ36GOusTX(&H2aKZ8Y3lNVCzFn>FY?VIzX(2xrCVBdsqbNK0 zy_=qqce%13ekHKgq?ug8F0cMC&>nq^AHeIYMkjtT1_jOo9l$%}L{9OW8P!+|B z@~IYjdqTE~&Pk%Od$@7mf ziGG1rKQL1!``_tBM;v}6*BR%Q(>A>5HK4F@LT@ZP(3%+Oli(*tZkWw|8w4~LtKV7q zz%ws^IGO_2&xS(f+1secjd_cF{jtYXZj#TP`}L+S{LBZwDmt~h(Y@v=7-OSQod<2p zgDcO7ntinj=wkFho>YCfQu8Om>Nuo>vQX3b^dXOVT+hQA5=z4|ba4@95tK`DSJR8;K(Q-Lf)yefvi;X8q zx#Da#F*Sl009%`)BMoR|%9VZatH6evW&MY&|Na&VIP6_X@_UQuwZu|1o4*y0HNTCe zr#m(4%6T9&q_E4F8+-#F8O+TrjkfjJkZ$AnSaSNsbS|+o=KE7n7>4Qf$daJxR1)nv zD6Ymu9ekr#OsgMfoF(B@f}IoCw#}lXYIAb;NzTj3k z{Dji>ef+H2gNH%jKA#OeAjwYZ!c;b{&d_FGFVn<{jmX%4?8{DFuvt>xn7X}e_9RlP zA={n(UVLH~#dyB!z)v1C@(mUKiJuTFHp=+6F%RL&hUVY?2@{H5$6V?iFBpfwQM;psawxA1yf|Zv&q@ES=+N&!}|lC#hNBg z_z`g%*B=v9`!znIdy=c7{khSoUdb@w`IIh`u$%qj;GQ$jAC_BfR`&EyKWT$zb47`P zDaEX+q9%!O(%3NTjl`Z%p_ZzyNbH;wC@_|-i_T7B{y>V>DF*(^T;H)v!Wl!!Pp_CIF( zS2Zv1NJ|q5ahX+*l;@CyKn-_=f(C<|oG}p&-7WeTB{0K~jrG6+X|dWq%k;Gw1cy)XAc{i*3}TGp?!(3KDn{{exuNgtycEKQ(;mM-tC_V zu_w5`CIgU=t_@@3sf7jKsl5zmFL|xOSRy8Me@P4T??u*~Z%%tZM?SJ2$jALe+)5SX zX3)sA9tGEJk2t3QmzK97`4wnS`Fx7*s$^pg(?n9EBVVmS6y3_t75=RJnEcUHXRu+- zKl`&gd%~335)4B_K}t{bSS^fr-FHEM&Tb5DO>E3heL(E}IHGgg#v#u6*(!pkvBYho z;g!uV@o%UMs_n#vsC|!fEIzUTUYzIm^&0XhW*>{B;5YAEW-7~52J(^`F&?5fnSb?h ziOU}w8X14af{+(fajPTHI}Obpjp0nw7?=&<4BDPIu61OhPAqO0F}oD_#K3jP0lB+s zgO{b&Z|&zj8GimD!VVMW%rj4Vo@Os0&#Zz5JeLkXHyA)QBNNaLf$z;xk*DV)_%Uc9 zT?@s_xwh84cM%xhrXPc;?;jH{5dRh=pIq}?yt`ep2($HO(7F364J8~2X%E&y$;u&X zXyTn$lR~r^)Y8KmJk!j175eO6W*^3VL7-Q~Q4j*YVE$b*FBU3<1TY5Y;t{eY7hluu z+6HEM`$b&7T&jd?VA_uNR=&D#%>7z^H}Fn+L}>-IhwSe&EF-6@5dEa%NmwXow-Uq5 z^tsmanMk7M)cX%|zMifD?A?+gmLz!3ql{Jd!lAk80LPku*@9h}zHP_m*-`l#N{UG& z%)*1lRpsI@y%Z?=8JI|*^)qCw;-T@@9zt8q3J%m0z zn{oVm-u9<;@|XA}I@P89u;gg3nJ%)2=`tpF4a!fLXha zuMkw!sBrr=+G@g`oX9@~-}!z_q^ezeSA`i*y|-;}1eaXF9eHvyJ$-TvZ z)M2hR1SSXJH7{^hyIjRL55RuTgH$^KOoA44@M=!a0ikCXK%|<6bzZB`{|O*}0uX!8 z4%7Wh%de@>g%!>H%IOUd{y>#3Q^P&X8v|E+P$OZX@VkNfSTumahjEzL zv!ne%LCdn_kRyeDLH6A*Q@9w&eK-Pisx?pKv% zsa~5_vPH%Tq2-~RzBh$!s)dlqz73du9?855NXX{$UOf?p_yd!u+W9w81=w-GZ$?VB zZKLx7xGYB*{h@PrK~CyR`#13B=2-J9P6$2Dt2`B#;JGGhN@pms7GcFab9_`T}`GF)PoJ;|482XGfO zz{RrNJveJtTL(mumn}x^*CPjJuo=#}|K(~tIvotI>YG@8a#HU*^wQBo)b1F%sa*hz+u%JwKJP*u!O3R< zGo{s3fs$ff#j@=~A73cAZ!szU7zj>C!p^&b-YWx4Fy?&41K`eoo-eTKOzOl*0I=?) z^|ov84414U-+%Al?2X>7e-{mexG!_T{~s+Hh`-sE5Mx>>kJEyBb3Cvy@^{IS1Dis? z8n9p)%=du%VtDj#StUCV2I>H_Ux)x(^S@sL1qiRxoXZ;Kx+VbR0w$|^0+1V+tSSgm zVcNe$ev-*xy|f}ouO6Ha4`jlxm%)Ht6bYu{#!bG96QBg_ zLk_lTF%VhgRe$LM{3no80VaetE)}oQCOOC`CUC$&l^hMgxsQdIGsh8Afzg~~O_l3` zN)IK-e17T1bmN3TH}3m_r>4aDkD6g?93Y)p!a`MKOl83$kOc7TECCmX*GCOQRM!Og zH%WW(-_QR4?np`jwW=2Y6{~iuFZK5XWk-{C&$zfWziJM`{}H$50z4%Eovmiq=nHdM zpK6^Q;COb)QhEkBOD`^}JlqAG4M)>6lQZ}Z-vs4y%c(&1>`Vi$5eQ3v9F<$)ds(g+ z*RdrS9esC2qsdh6pLMp=Z&91}686%J^?^68V{@5uOA~D*QhA$3=Y@LQkiLFXS?;$! zfqCdl5|Z+_XSF3;;uo-W9o-kYecC8jbzQeJJVU{}x-Ta)vj~*-tbaPJcX9IY;TnGRfFR%{8}U-k zl3QKfZ@-+e?PR{w-tF_(N8)>}C8u+`#WUkiR{ci7@AnR2pNGA^P0ftI9CBoQoOi6t zffWLGTXUGW0v`%omqjwkBL6;()#W3O%wdF>M*#Vm%=#^=UHHyq$0MW*(OVX}wnsOK zsjUWL87ejBs&;jWU`)N34F;E4`~5X$7Ul)LmYZE@wblnr8)B0LN z9hB%Wf)Sr1s^H%0^J21Xnr%%F{r@`pZx7Hiq}w`dXEep_Fk-(AFP;hn@H%q*De4BhJ2WAxR-}lx2^pGH>d0pLqb!zlt!QYoA#)-Y?*o@|V zyC;=*+EwLHJnKL0dQ#B(-H~9si4oXN?^#^p%<-QO7bM3kEftR?jYqA*>Rgt3XTG%3 z86GYMa2ZTaf2vJvnnxmnxhM*)UkFexmgZBz4R+1TkK`VlTpZjHpyslX%LnnzU+PV5 zZ(K^PtK939bv+Jsnh~-nV)AhbFY))Vj-~0gHYi8mj9>Ln-$1n0w7=wsf-^ITHBjn( zni12M?Z;s3)<`5NT5&vxtGpEo{E6sqe!;0PJ=jmsPG!sH{A9{&~2=YYpCw`D=7HVcN}?HB+dAdxoT@%DHTWL zzlZ_mf7+{~+Q(*A^@A+DShs}O;2o1&(FFId?%_^fo< zeQ1*L(NFK2Cc7x=%^^FH=8cTB3^9YYSqE&2->gk2n*1&hTL25{zO5PnxLp*Xg0!FQ z``w9N4qzv8m7FzCEKV}m3ro)@RQZ?4(nq+%_pCpp|B?8Kh?^y1qs5-{NJn(SH0R9~ z4JrNl_GRT6;xvhL7h9*++-Z0BH&V^6T@y)Cl9krBWv_FM*zhkY2!U5@xYJ;$NkDT` zVrqrE)NxqJl^vd?J99x?mE>cIM4TjKUc6u0v9+;^`~bo&&cyF}OzF(ckdddRukh~@ zQ@^|Y;Wua2e74u4PBkQo)I)Mk0t*}2wBIH*UzOg|<@s2Cze(GLI>zZ0=HLbWFAko~ zj9VCsXLDflcJfEgzBsX%e#^VLgr>jvAU6XvnTsfhcfI$WA~!lDGvM)poZ5ifB?J#D zE8qHQdctqCety=)ct^=`>uY;EsA;%w@o~d~@z(JG3if6@H@wvL$;+v2YBB0{KV>QC zS2W(oGmIv%RlF7>P%^mCnssBtV{(es$JZ8LSwgay$000>91DfQI-0G*VgK-h(B~~B zWw1c}eiLl-s7Nj<>i)-*WOolugHLs9xaXKMq5?)?1~%*tD} zC0{Bj4NV|ya6{mmhrQWe{KfV&-aaZaIT;$kH_QhH87mMx_Nhz5jtqStnpC&ZR8Wb+ z@|G!`I?5=Ln9^*W_qM=SmjB2BsU;jdY+BpcgmE!lAGxt-y)Q8BO>Z!zaI#IwAE5NI zO=@Ivh%LaW(MrPMfmwvRWm@TMbXeCH+&dCT!3$gYYcZv-?y0$4rQ2WMi4pL)yAo{b zVNB2I=QL&%`-_uA#lTd*WSegE(8d8qAeStCzm=+`8=mFq9pGg3B158YT*jw(@ojD& z8S6c=8$imYHPzE6C_%d= z?q>-bQ}%u3)*|X*1|XjoIERFBFLnL{)oU?3*J3nfa^mGs7X9@b#g!tiQW0&$I z@|sqz|E(Bs-Qr`4g;XQ%i+!}lp8lReqg6TC=ixQMiQaIhJA6p*8F13q$G6Kd#6nU< z_>7RP(X*Ie!;n7FP?L$n`PkSa9z<6dLq5ZY4|7x@sbLFaTh)1I#->9F*}Mi}whpn@ zVmPm2q=;!2`>p5SwIEl1#BVU7qUW}l{=U^sM*efbvmj0x*J#Dc-`i8F>P^9OLhLY6 zX2B-t=Mmx}e5FztHEh7qxFL&dPwl_)H*stQM`7~h^T21q9>#BR*0l5FtpeeShgvr8 zWEO!T?1MFvQ|f?HeI@x9|!!Ap9qb^X4#T zZ%rBOyT)uVL+GtPjv4L`j7%mzWKrX|3n-rvTRqXtifNa|=yd#Q_#k3_n`27Bw23Nz zi)sbUGl&*{SFLB`BA(F60*SoALJ1sCpoJfhNI~gptC)WXv=IIo=CBq~nNzy>l@m<{ z{_hK^qFA{oeEQE+^R&lr+(xIUfTJqHjPMr~y=3{1nHO+AI$ve5IUfTFpM}Pa+0l4w zJ`?2)eo$cI0FK5BFn8fYSbKFVb8+xBkxnXb!@p=|KoKO=97>6H;3k0C-UoY?@ZBK) z?xnK7bWgz4G|a$Cl5dH9xiY(RF!PL{wYn@UyfOIa%0*Te^3ttE=Ol?+)mK2v>JWf@EUG8EC9R5FgMam-0jbdHRxz*mjGzsGm)e<4B?~t7nkKg|K<5?+sn@17YOQw0G%sDL~I(cM$pB@TDY9~GC zHzm}zA`JARwfD{`-M!soM=maHhQhgK z$0oLTD<+5%`equcoDUD1<7rWx%@AvQdoc!z-1+0F!>w4HNfCn&y-N1`GdZWJa#+T1 z-ekbph5TK*P8x$1zp!_Zl|2R7pv@OiW|dpEAe} z%t2F(gnl@WcjYPzfS+patyL)n^O$tB#^F@-7;H1t*;p1LCd*szS}NY`iO4g?Q)zu} zzB%&RLy2Bs*WOiz#URhBYe@EN6(?8`YA7HebLM{lEUcxwF8BsNF_#K&uePRir-&A{ zRkSh}0Q;xB%))&d_Gm&tOe%$^OU(}4pfj+m5SgnZ<_?Y+5ixc83b|}d#B)A-2mc8{ zb>zaRiT|6^i9&`F6WGHKsErGt(!9TpWcn<%Hff9Xr7C(I^!9$C_(V#xY>yUXUVx7? zhr-z*!coK45|46@T^Eb`M)XtEcMQ#q&6K}=8m7kfYY6f}IIZo!3~lv(!;lFttB9(BB%Yz5tb%V*s8s*P{&bO$&g-s zvVi6|SPP%YA$&zFEG}AlzMt5K`#CZXUq$~zXJL<*VC{_pbsBzy!J^2bRj(UesY-93 za?9Hv9lIWH2GbTQ~we{5|ip4l9%T!!_8tt}*(Js&VJ0qVIPufd@;Kw^`Ee7^dQqn^|GBDV$7=|@OUovFaVo03$1{^UZGg1S{LLG#F9&&s)*QEVC2+j;MG0~;xo+$QCzNW65Q z1Pfv9Lv~e3j#cj_pkD1~@pEg-Bzo4xS1f9S>J6&@mBtld3llqAbRm^xgHsl@~wLFRk2(zy8Nn?eCRa=8pS)b>Y zDe=0`Z6)LkauJ+!;*@+zx^`522$5Dpn$fgBqVP}6@awJmuB}^cOlEQPq-;wE9zvVa zQ*w2Zoippk`RM$4T8qI#hu#8lfYKMlD6SYMtjl?7r-RcL!kidYyOxND*1uA>9??I}^tR%a{(&oB*AeR`Q> zvn?UPlCVW+dTIIi@b&Y(7epA>b}C^|3`paAY2auoyI^sq=-K8s#@k9Rf1zK??@=2b0|zOpt7d&^ zFuqE{1uKVAduu#xZ7gk#d(@Y{-_PFWB~mIWE~f4H%TnQzgA7zGiemvj;gSMuJwjkc zIO&h2=Kxwdt$oG3{r;;oGTH8TPcy>x?QOX7zcUwFD<^UBJToPwXeW(Ip;bo^ zQ{Sr5 zKTBcsVx%;bF!|xT>in}x7Z1{lEyF953`5{A9+9(*z^r5XNiUKfznO&e z*9wqDWY+evOhobUbW!2CXm!+l=$2D50v3FER2pFa>=wN~^0I8Ex%?{Cb%OV1kMHhWy93IMv5Y5Hxt;y5cuL2gDviVou4-e zm|w~H=ZZ^9%=GG+Jee_e*qa)@sCK=r^DK8_st9o1tfzzlA5A_b*pY)ZNBygKM+n@@ zqRC3&;QFRV;)k);NM{4sXlAea=}-67vn5hyiz1%e;H54T*~bm8Px7Wd4cJw94EQ)I zGw1tP7whKr*M7cdf$hRGS>f%%#u?;wG|U#QqVP4DkHp|jGD*UiCcq+K+(!)81VL`; zA3%xsX74F9?QgXLG>!kavk84n>D1ZNO-6ni@P__sCrrBrKCoSD|3y7LmXt-}tK)zd zY+lqN33b=BW z3@S%2Qxk6N|Fnm+3(xI<(41M*VuKj7MGZQ5LfTXiBek3s0N(HauAxC}E|3H3|8yLH z*{|zAsDHULQqXQ%>=uv-><0EPUtgHf?U+*Yd-85f8X?!1Etpj3fQdSk2EOgVz1D*< z+ChfF6LNSU%Dmu-8O75}XTuh_2J8G^2m)llvk;igGlFR?fp6_3u31X}S&m6&K!HO4 zf7r?p_`v?6J2hoqRWss0voHUId0~Gn@pmc-wel}~Z8PEk>BGC`xL~Sc z#3Z;FcP8m66iR&Q0ZDD*4YkJYn2>LeuZF_yx^8pu2E~$7kB+8Hx326VcT>n1RA`AVh<*2E9N#kCA9{s$j zxBNj9>MFB?6PY5FE{pulZY!SJqx{-}Cq<{#H%cJi0C(i_y7 zEt1sH;SnKlvU`+%GpKO*yO~UD%g?Sm%@vRE!a{>ZLf~@Vq%oy~jMT~h;o@fOQJ zhZvVKpD92z3f{aL$z@t9A8~U**rUP$Av@t4G3vaxzTZt! z?oy*D!BTNdkV&-2G`0(`oQ3MVdPd1mQ1oM_a*|-PI?30e@CfzU?r|PY+0iI7gu<#d z23#Ls{J>dd=QqC@E)Rs%M+%%ZZpa;C+AcOohRrs2(7>9j>Km2Bzb|#5D!1YWjksAL zy&u^+$f);PL98;=?c!w3yhV%$a>v`>oIH8{y~1lQ<^?m@?U8yK?e-88IPAyHI;cy_ty_Bdv~C84`nAyb@O1g-|)S{&6&+8xX-6EH8#Q73HOLWja_N_WCd#DuJ7%xFewnsv7~?g!;Oc?AOC>18N2 z!-lgT$O;h2ZS-2-DW$DqC{B7jo-Ni{OZxV~IUg<8qh$LQC&lizUqk0mwKM$mw-KQE zu+PCPOGYGs)y(Rm^oBm9<0<8P3$eV&OLLDHi1$4xUoYn2n?RL#;6lDx9G28YxusU* z&0UW~8kb~-Rh4jf${&`OP8b=KZKw43hrr9mOUeP~3>6=E;+@?;wwHQD<2pFPq4vG~ zGQN&xW^;XSIhp%e?uQv7TrQwpfidgpxNWM8Hlmu)Hd&I%PO+3W(?-0+yVPu*Q`M+9 zYB8 zS#&8Hm9eCur!c!SU90NT&1=`8k}*(6Gn>`L913cK&0c5cAGKbnozlW5u)+r~`)Kh) ztyBWFJ9sPE)Pfc&@)N8dL_y+*=8a0*vPt>sVf4Zle_1C@poWrX8{r@$aXfVr*Viy6 z=5{iPageRQI*83$fAz`fxAyTZ?mtT5DifC|6hiq4{`5JqH*?xSH~R0uIO4K>eZ{D0 zr;+A=#wnJ}xTAaA(iyGx_}PBR?LnSwFL>nPGy4LkB+h??F_jpBgb|Bf^Xfi5y-*5cwm?B$ z1=h<|YZq|dmQ`&R0hz+(=v!TRu=V=6<=z&$?YnU#DbtG+aqmBK3zQ4HjB!<#V|P}H zN${_Av_Ql)e`GGlGO$wgzi_XE`z5;ud z;jiBqaJWJv5xHtM0^EiyPnI9`El#JQ8mg{8nNcojnnE!fdMmqr-x3cfZZz_uPiXe? z;y_@(>SzD_259#5X8I27U6d7p3h8dvCBNCn`+4ApW36jP2}Qj zFx_k=5N;ccO~nZ@pDdecIPFO?CFH-kbUtg{m?iRWYYh!?EdQSo{ud1XdwT?pNuS%t zyw85@jI2LtV-w1#+8n>n*9M{~gRs8F!#REL(rGd(HmTa8L3RnTs(AQxwSQeY%Azpm z-q7nOANg`lyGW=<)ibQ7hF$svBqcBU4(?JT9vhCu(za>!*uPi18Tou?(3=Gmpb9ynnle$ zTn?oPE~aYzoG?!ZtV!54?LHW3qudILg$Dbg>~2EpvrCN(ED?SoEJ>;pg_hG($%(^a zM>U^1S9WyqJRdt{!|2SZ=h;N+b`jUj+(6_Xt1$cm6sznKKHhH7z;Y_Pys^XNJQ|6} z_+@Hv#43ht74!VlN_IX2k^b(G5wFyj*54v;4vBQQo4719d(T^2)Iwyzx8jw)D`PX{<4=j?0{V?Xpu2yW3u?Yp1&HaKf{YTmDShj=S2c)m=* zCEFB+>37hr6)5nZY(4N~gG+9Qe$6Y~~JSKDAU4kfOI>g598Ua<)Q zTj%5!I|*)=OK(G{Bj=&zm?~TG?IgCAXTd35`QyyzSJqwJlE2$IL_$as8?SLNew4i! z9^vf#sAEyCDS*HvL(iyI+;0}+Ri@=bPf=W59(6F`d6c$nQ`@t<3(I-3aoP*!dZ{Wl zw3P|A>D&*3RuUGr5C(E?dQOHHBnP*(p5^4kRTdKif=9BpR0UNj)nJWRYqvcCl7F+C z@dq6EH`CwGQ<&VsgbGUuwE>oN@r2v=DRyVx8u@F<_SpHF>_ml`kFqn%`b3StXdAo8 z(A@>?L%q5uxZhMnN7j)Ff%VWZXUYX>Z5{y5kGUy2Q_ojLm80s)^q?I{obnY^fDR2B ztD>dfA%Eg4>d^wBueu`PxD0_qHYMtyON;2^-qt8YH^H9ttc*h@kyFHBnA#$9W~`f@ z!r$4`pvcKarCiI+XQrmXKj!>o$JN`Gyd~j7sDe|x*o776h=;Dd= ztsLPeK_BbH&*b=SiCguRvB|n3YoD>OAAYUZzJreLJZJuuYb;pF6wbiKdI|)6C&=>Z z2dnuJp#AiO9xGy6;?6U+i-=+UN?5{3g+;^Y$k>z0?J2%2zunU<^GepnI}EJZmA<-- zQH9~kFlsMwZ>-LLAU)%PRJInpQoM*z2kyYYc%C_l*WBPjzpUPxk`WDcbr!HUBT{tf zAEkTNl%HDYhSrr;d2#VQa0tnl#}x%m*XwVsT}&mr=xdceXuMhT4bO|JL>9EHvh87( z5#up%YI(UrK9E>w(&D8mu&yHNA?+%6p||Cg_Ku?iHHFVe6Y>+T7JKYpr7|E4i&@=)W>lE3lQL`+6l#D0a0C^3Uo^e<^I(o2?+(nV=rF}Q z)~uYuIu_RYr~7O2BC+=2)fh3WC;tB36b!>sXg8l76T{qZ%E9(kw%pnoVr4wJ zN>9`}jP9R?f2QoMa*kyuF}O>0D-^vuW!0&onk>E$U~`$u+*i(0J8(=(VRx1>%KYJ; z%r2%;USue}Z1Q@9TH;a;PmPUB@IDeU?(YC8-mYOhNu97?za%Q)UE@5TorM|hK76?E zFSpy*oqNupIN8h#JF_P0g4~*9{2sI5j5e$_8=-w$?lMRn({Iw;xvxj92a@5XGM1<1 zM!K9EyA*k4vh#9%cXQD#kF%IK2d>^^jy>K2!bt?ra`xgJPBlDuN(n#OY?AbJLfA~Y zH=NLSx9H;l-Cw4dN#-9E5c`DnGcxOmHjBZk@35;+zE`JNZm-MkzLhp4q7%3AQN#P1 z%6yA}nBdd%%+2ytcC&%BKnMhkhO3}d)c4_C2E=swsa$7g1rhi3pxhVbDkiBfVP`M7 zUFQv&#c^QuWX=oOt%SG?(vG^jXnw117y*$7rlQbz!9gmHrQ3mK_F)@i9VMlfy--in zY-P__-ZUI&+o@rd;B@a9>NQb&kZb*+vNfwHmY5r-GxAF(@(adwkOaJJViB#5L$6z- zalQ%b@U=CDYBJqo?y{AIqr@_tS0oNKQ@z8haPW2E_TjP{yF8c(LU8VuJB;r`X6ZLp z$j3FfV&WT{onpdc2z^az+UFt!<&zx~cG4$a_9AJ?V1f(uOmq2jZpEbCbv4_U3Yyb; zVqP~Rh)WwY`+NRdtivINHd2kgV~cRzMJ-}nQfr0kYMVYwRI-c-^pO@9d%iK&jxlU? z-0cpG8Hqv5xy`5Xrpg{(2!X)Pc?y|bh2P-`9$2YRRNNiW#H$fZ5| zB~GwGVtMGLC=Ane?sXL9^q~hPv04f{eAo}0w2Wvp@GZkW7m!cqxCEnRrL7(4dC_P6 zGZp&F6yBdB*Eg|_&VtY89kEdW>&;b#`|Y0Cb4_Q7Ej38nS73|Rx_?T&nyEMiy5`j`*rNs?+4*X zGq}XCj2AMsHO_Yd#BNF!-qDXo48<|(wj5qGFHM&=?mFl}UOj}&be20PO>-owrG}E8 zMV%;!XdHN~MD+(9%u=71+tZ)OSYSeW5J-W~=EQP1OrHPtljzg3=B;}e%Ge~SM z6n!JN4|mSPa6_{DrU-#;TVzgjX9xkdl}iXYkg4#!4rhOg?qA>0BOf1-AMP;M_gPO^ z_1YO9pReW?VC)d6+}!J0yVc|s4i&Rq*045w%?JhY03T$gl~2arJcP_!f2)*teTYhw z=O)t`#Fe(1Q!)Ytu+nRA7|&?R5pRX$iK1*zgW6LG2IDvT-bN$P&szjh5~8i8 zL~_JSx2r$-?d$v)rUBkGe6cNAr?yIykNEF?Fu<=aV)fOSojyNO+Q=vW5OgRK0DIBG zgygVM#&?=TM3l;Wwn@$cWI60mf0Df1sureZ3T>nq&wLVH5r|kf^KlH$vf6#vM4O#u zH1wn@!Yt!!j{p2kcIz;#WQb5#YtI=(GtUx@EOk4t1@R855!vrr;*uY>@>)l|VN$LgLJF_z=pZhs zet@zr_2Ae6;)g=8s$l{VZ{ra1?fcTZCqQsTy9uqXgDXci&>1bJ)Ht52a19y{PLnNw^-L9iyDg05d%KwQ8|;ir^JhNwVw zoW!(3rdY3t(Y}zDme*2(0STdKuC?}Cbfu=WymaZ@g5Ja%9F?4ng5OM{tEJ*7!+R*) zXb^&xA*UjAzV4)bNk=4YDt2^jrrrK1dGvXE->EIbxq9EhLvP&Az?vek%mVXyjp)lj ze{?%7)~Ly1ez9wO(Rh@fusrgd+-Z)PfJ<%I`ug=6j>Gv)SMTl(du?xnxl-PJ^Fma1 z@^a?hlq1sZF8c`WZnV$rQ&f1dr{z)!dm*K(9r1l()4j0^wjl0wh$+6ac9%o^s;UD_fq{cmzjP>Ti05DtYNFG%gV^wX>F<9JiBEhqm~p>hvY~P zW#}>p^twYMSh}O*t+B|8O4yTcda5r6V6K5LLKZgXndv~OjH$Rfd*fIa3b%Ibs&_%H zU%{+AS7))f0E;^f$$M&!?-S-1I*tVQ3qOQCkrVB5151e7-yQekuir{r4o4VkMTco| ztJ_A9&+{{+lHxhV+A^|8A)>ga+b=<>8{*=y+nd)5&3K+`a-MUcrgnJU8WH($Y*_WimjO(DAn8^iNyToIjuL`bddMwE9I;xAOb_29)o-b&^1h9KJlnv| z9TlsbLV^1-QwElSb#*pXz1z*2;m%h!iEWsW$J}>g582H%BzNeN5fSYsJ5X>tTr%Yd z^W;dpWGQS6`bV@|IC$C{D>x)Bvi{CmL2I-$#MQ~e-muWJ^;5a;WQz8{qa?;qM#h7i zbRrM#*@yBC$M}oTvR>*HEfa#sBqhDtPTfpw#DB4Tm)B2wahgDYxU;)xd6{SPsB506 zhV4skoye@3sWz*s$5O@Lc3R|VQ1aOcvYFy9(94Omfg~^GC;M$Ry>M1zFRkf>i|JH1 zXZ>1&e)Zs$b0mN_GuxZhlxPuD8El_DBQyl;IOQHW#>+$+P}dkVpd>r4`jv6IhN^8R z6o?ulrRXM%JtBMU)FmP$!3>c-)-b)7fiEM@e{H&=VCN#>rax8jt~(X#-E0$7le=m- zL+sctCx&Cs6P-M;+I-qEJC^FYienS1=^U6e()Jr5iwrsA#Ee>KA|qSkb^M}$$;C#M z`D^UM#z&aZ8p87O4bN?u$E6eqs2G7Fi@?*+eM080R1gRwB>tUNMvkmfy1jfU44`Rk zBdLpjdpnBv6-abo;c6mcCC(4Xvy% zh#GHR%!uHAz7B+dkgU!Q>1*`N*XFqmG!zH0X5`w8&QAAW_&2bB zPU*%Wm4pb9$L2ftDvM!;_#ECp8S)`Ig4)&&y41RFSMI z{Bj#y@vIBOf@YH(tUxnX8NX)?dfV7&zMk|sfj?uyB0I&nI*>-$Tg7E zzx;nnhu_Ts>Q(@dhy=*!F@r6w3`LV$c7nmJ+zv39?rc31^0Gm$MVNutV>Kv^1-R~M zdDwx5S*Z6FhP)2a=j{#_5?wYTA5$ZWkULUkboR}wu|P0`VYd67C;aIUYyx^jsd0I= zpOiCi4v@3QX=^WQY0`pivlCn5Y$vCM9=)aIb=CmqwVm|3ZH9gTE5cRIk$YO(OJXhaefDO zDi!Tva+c*WFZ6O{z`uFSpX~!5%Ra`blzvm#ULXeLr(f!d%t`D&{4yNt3p^oy9fEn* z0*@})75*y20^{lR>jWljK1X@75JcoS$u$Z;b8&A-tZIzCO#xr7o88rK6uuv4A9!!h_$#LnitkN|36R&1`Zw9R}S%)ydCZ8`1K0Uaw&;N+3a3A~tFR zB^@kRpAq9lxwDTcTcD^tBf+MN=fCK<_;a_;-?mHpS>63eQNg$HO_E8eQ0zdFnYYr- z3d-2e!1QX~nY=h1AMzk*x*T7(*KZ;3wGbYmQ)E_2c{=XriSPdoTspBIfzQrVn&hWS&LKUsQ5pM`Ug=Qpn$xI9M&kvw^p(>D{4#ZpLm* zIV?A>PDK!_hRhnRU#}M?4}6+`hW3+Z+h0nDCg}E>u5T<9I6If`&fvAYbq4n2|W9fLKV<)u}k8Ki`XTg4{+@xYv#> z${HDb2(^a1*#~NzQeAwa2cZ2?NK?ep`%)*jL&t+`aay`zn36QbY9fKMMBf@-Grc(;X3TfvqH61qpe0Wv?y#KXyv z(qw;JzI5Jh?ta=zB)6aRkg_|V+4^ISUg;;LVqli^-}^!?l;#?@%4exfA&9c0RWo(i zcc0#JGgveyd6|e;i%l`>%Z&ot0T;8SZ=-+_1%MPOFpJLUuOfNLo^)=a+K)XiY zZXOq)*p0}9<>_RKGTLOaSF%W_u#&!R88W|Zaje#RSX%wwd-fVaOde1xSHGlPo`%sh z!`=hmmBQmJ5>WOoG4;q5Z0^lBn`MQ4O@YLwFzUUXA)CV<}RtRn>9;bF4|90Lelmdganl6d-lP z2ZZ94pUv_Vs2cu|phrKXwD*%AyPb7%`-E`uf~7D3?JG)WDgO3+Xldh!30L|YJukW- z`b#+k+pBcgnRPg?QsXh(gV5uun~U@ox5I=qP4|P`Vw%%&sStz*C(OSpS5D+@D zoa8#360q&xtP|?6m``#gE^q}*5i56Hw;`QJ(!&pN{W%V8@2>qlkZ1#x)RbD+>v>YE|7UTm!p$iP8K683UK7r)g8S*)lmhS5rM ziN*AY>w*6qO^RRC&qzf=FotY&Di_oipElVo0s8pfYUen@=^xfIB zbF8j9Rfp8`HLZw54|^Dsd9b^-rovT zqEUa);}fMey-Uy)n%=Q<(y6lp!gZ#Tuh>q?pGWI{= z%;3@1jDhSkG#f7`G6h1?7>`ZX<@b{p%`D_?7@>Qfyb`xC?4b|~=!g^9N%5Fl)S{2< z+OfSC|Gy_Uol&+*#WB;D0NeVj(lxy1Q!bNz+WwUG$<4Kgp|pC9@EGbChFi&4#_^Zem~Ltl!*%4U(*9P;wn&a z)&3#Ivu0<8KJqKyWYOnz%1*^GADFBe4>5m~olvIE+s=JG#))0-pNY(rh}n19E^GXj zg5S*J^@iwyawWWCH)eaea`@rssUElfi1GE+@ZT%duW#9=qWB>e1%mYK1xvf6WT3+X zrbog)sj%YR!w+GfR1cPdXR;>ZwT9^Mb6O#ANJ>g4I;o%4JAc~eKTP%i?l05Rx+5}N_3IuU^R2}b Pz)wR(M>${Ve&GKA-OAjS literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0206797033.png b/docs/dli/sqlreference/en-us_image_0206797033.png new file mode 100644 index 0000000000000000000000000000000000000000..4ad6fc0014d75c162a52257901a10a15db9d5a20 GIT binary patch literal 73039 zcmeFac|6qp`#&nlZA(gvHA-m}DhgSmXcKLyNtBXeP-N`W6zMKY+f5}~?kKxrq`@df zB}}(0&0s1qg)x?l7-r7(evi=Sb5G}c{`j49e&_T@kIc;beZBVUbzRTb^SWMtTJN-& zE}y}Nsg@i^(K+!Y8!t-I0}*)sclyKbh$ev`VQZq1HaX{`fxIGk6c)(6?8{B4RSCCTE4F z2>;LicrN2hwQ5c1;*ETnF}0%9Ig-#Im#X>bKScP6kGu_DZ-AFuFRFV+YQ*wT>Vo72 zKbN8{QlcVUSzBA%JupzGA{t33ttc(s{KG zquQ3jrkEnx$MQ2LH^}6WuxTvU)KLw7Jha5&=avwBg-vPZ3ooCTB}4rbS6X%Pr>^XS zZ@dxA~v5_6W4LY8T~c$N~~@T$S( zryfy_KT9EAeR<@zNC?q)iRYSyllzuXBod-K8d_=&KNjsQF*o~ZbbI03h)r{A7pdmG zFd)Rv{HaEt7pvxd)fCZPsG8SyrcX!Vrw+WhL^Th)=;2NEDM=bF;!WW{*GO|pk|lmY z&P@0*`+D}Q_|vp>+x8)K)D2JK^|xO9 zpq|%u$YNHwmCN?C2TIjLiI(>Bc0~U)XUrv(Olcg_b#sw*NttI&Ru9UA5(_t1HpWwQ z`=mPEe_Cca%Lx81iV963#MKI~s&?7X(`_WPSQSsp7%}Ob;=thhkykJC0)1V0ymXc) zr|+9D)p>xPKB_}zXb7Enxl9Zj`()M4l|R)se71`uHt2jT#m0BHU+{+F*j$~&Ab($K zanT)okK<{VfWScB%uug)0ZoPtM2GdqzkHO$s@3FloBy;bVrM;`L+zdhE%=bP8-IL% zzx}a+M4y4myK2EwA3xcgp(U{6i+N0ocfH*m6=&{(=~Fu9PS&YyoR&uM@xA-#ov~R-GE-*C45t7?>2A+m?6+?Vx0OGmorrQ%QDN?2zum=h!TA2g zpvA%e9XH(gyQZTyAt99bB7=$Qe4AM>_44YbVqU1_RW)eOZQk85Lt*cE8R}g7$b~-Y zd70k(axkbqy|UB9W&3g0N@UEg{Am)Il&O&;j@|tC)$WbKets>6CHNhMqZQ&M8;qIr zBk3BSl9NL<**nVSQCAmklM_XI;wEf$u{HlhBPv*R58;j}EwfZTuWGhtJ@t)V9@B}| z+W$i9kdg7hKte8sSiih2-yun(HvPh3bYCO%!j|Q0Ft`Z%fM6Hi^~*9W&Y$s7;%-k+Yl`ZN|b4MnQ!moBxSkZ^M%=7&O6!pNix0YFjOy z%Tk@~^3Uil$eZK8PumUc8;XW%o}ND1tfEqdBxXX_J_n1}!N$+8XFHIy`d`xziObmS zFmX}zi}qf}@6Bx3AX!v7d8A|BEXpx=iCyiTgw1_(*g~X!jtn*B&hq9MbPgU}rkZ!G zaHZ&GWSx0Qh+#X4A^}TG63w75`}cZa(6*>b!UlHvn3fnKd<(qsN=ZZvdGp?e0h%s< z8g4dm!Gt#NM4Ick6&|mA?r1FnyVv$Z5?z zv;=wYtjR2RQ9M0j7V`H_Ff5|~e3~dGBSSs?WUrhge~_V^*wDA;Xm1>c?X?C5dpo?w z*dCV4AIM!WkV|!O$o5aT2D`vY+4CD`s;cJw<-JVXhToxJ^>n`YjM^5~4<~A3(|7Mh zBflo;XDRKah`mGTnN*Kw4)`5ckj4;^ZQrA+3R%dpV6l9Y5*B$l8!Q)`;p)Rk85cFd zlyGob1JkLrecD7Nu-su0RHY$%cyIkX)L=+l02YQkze@`b@mt{u#XerFvj7q10^_n@ zUG)4y6B8G&r+mBSBecwO`$aN}=O_zb!+(v_(pE>se^gIdqssoCu5dIyY2_pO$wN&++RoDgB5Ajz1Se?Cdg z7^dPw9%<7YJ^VpVEKn+t#|LWDk;qJ0e%DWu6CB1@wx<~{TX-7xtAy*esWH3A}67wj|8~Np;qL%J4aarNj9DW;FSfbFkQ*)Ybpc(^_E<|_> z>)AyghbrdXFza{Qyk0-?`VCNye-#ItTOq5;5PqJ&#octmXed1jR`%+o>1ai}p&s2U zjs~KW0)6HP6sKMPb|u>G3ows+kRtglG;tJ%cg2r!&|2(+9@;m=yz}G7v!%{ZSk=n> zKS5hLlAw-04J~gb>a`Noqs9ZQ15#GbCD3vbq&P&c@!xNR8A&W$yzVeeDmU1-Xf3y6 zwa1EuChLG`op;RZ$is6%U(anLu0dA;`hF~Yqf7dxz7w8gLIY^~UDquBAsJPz#FUaC zStzV@A=GQ(8=r{|aqNe0+Gk7=f_hETQDd#Tavt`LvgLA9k*X|$Vy^X3*N<0gim(N& z)+Dr~kI)@G+uhDhbmc>s*$?eR&!fF{1A2O!J&kWrpwA?N)2Aef{3&w?Rxc9S~0M;&qL{X3_XYyC-Iu)j6Ui zf3dDq2d!p~wLsQ!PPEC1Bqr(c!&{FRrbxUVaB|G_`g?Rt$Mu7W(Z8QM3sD|Nms=lo z;k?ic&J73%=no?}srG31q=WC|Qu*!KVg45F`1e_g?yTx&L=h({5f+-!z^B4-7nrCL zOBbZDtnSq%oUTqY-^QjOb2PB5V@=|7}Jkg1R5C$G_W3Hf}yzp29THga2+?&`!>A-i|cm_v<2J zKbx7qyYBzH(`NG^$ml{O;yR z^xfHNs(EuB?G;5ek<+w#DJ+Y1gvyhj1!&p1vaK{gj~pfSTfIYwxv6Ic`5@AJ@Za9A ziXb39Q|Uory%6Gprr8(?G>7x8#@Cz70vzIIk<>8;%HA^z^{qk8L$Fr7}ph)iyC+?rS!D-w+sFxXrwPU7J68Qf(az!};!(kbf)p z*s3K85uk-0@bGeLB#>R%>1lQOM5WvP)T{$8;=fAd{UtEh-A?3sCK z##m!hPibrvsiGBkzmBzMbkP4w$0ZwVf#D9PEQ(KKeSd33AXWJNi<0>XR;M03v7!~V z@W@U_yXI6z8h`2ex+kEhO9JauuimMz+xj7dxHmP3#y3mTL0zZ6rJ)Ay%EeclTu5`E zw}M{Y|0C1iPCqQ&tE=#$LxiWdv}FTpy{q}7%ZWa{(i#0FJ89`JGAa{&l#|+rzkFF{ zr_=7+h^HBK5YpupI;Z4tabDzPW>Im_yUw+=Zp}Lq*zNYBU18`j{l`XC4S3RHaqIx@ zoX)2m7;F6(+0jD#@JsC0eDi)$R7)c5WPKPG+Zbf5@IL&&qflZDqq&ts`sTj5taE82 zcjnuE&7?!3<%c=<$%W}vjM?85I_;WSE%ci2?YS%0EzZk4vp>fHwO6kI471(bwG7!G zuUsxQZl3YEy+UHmFsCd^ePmu(Z{X;#PNi1P^TZ1w4~zuWdwPXoic|E{eqP5Hg#unJF>Uwt&=~l*D%xdSf&yJ z;+f2^D8*l^yu6?l8E$}aBu6fXn;uA5V#EUsc{D4n^WqsEaS*>Kk%x;s6GXRb7)&`6 zl;P#J^rYmTpmX&@294FMqv`YKeKp18>js(Pc%FLot1397FC0cTCwQHseeON?O5g4e z6n^|5CwE#(iF=HUN2b$RlP!ehd84tTU5Dlk_s6JxYRMiP97-NdV42cJeSJpTM%M+~ zfBSZ|GRvX0ONH7}_WrBdJ&h?37=x)15k{Hx7d8>8SUN6v(I?LgajcWA)9|Q*in4vj z$Wh};dr7-TW7w&ID0j5?3^?(9_wH>)SJB2q2`t(EN^2@??h|4|H*C zr3np1m@<}JadmMO)lFjB?YIz_x<7%|!iyvxW&w6=fF)nlAGEO0r0>GX;Rbe5Dp?yz zjE6g14DdVhho;V4T#>6+u_`YUcR1%be>QA^*&yQj2%Qa4Chkd@D*)8YKX+<)@!M#J zbhnb^&id@gLnCK5_QyG=MxA9#V(sYzKEraZ73}WK+1el5ovJyH+28U4)(1LlYjsLy zI0!LsVwkPjWybncZmTGR;>+a(ki}?oHdfrc;j5CQksuj*mA~q5!K%NdibYS3`sTb` zoYySrW{SzY@s9ilbD>C`@UDSX4=%HIxQe9i`J%V!0Yh`1M!WlZ*T9g0g;e{{Olp+W zBJP(j91>MQWk0sWsY{8ST?$9YD=4LO?^RqicuKGSbf(K zlTHKYBn`S`*i}9dbrlS;E$dM*op4&DUK$%THI`C;U$#os`=V*8an;Q;jO{#{(UbLS z^O{E&gbeoPlh1c4jbydjz>x{LdGnUeMusIhXlWpKr03)QM;kl}M(thg$xnu7cE#hY zW>ny)b$8n{Ico=v%kO^Xh+*3fZVBgu2j2jW^EWL-Asos2FJhd`-eyKM#J5vssaml_GPqgJ z-kNzqfsBI#YJV_#{2JTe-)n0=KaX{RR77tavUr?8vFQ$d@e)MTN7yxl zm@T`P%wmy{-6n&Vz=`OcTdBfOE|8F+h|xVKF%Nlm~Z7yP8=W?* zan%&z>req5&FO5uk8lkTiLG{6bP!I1v8#Sh2&WGF4%EH&-ZMK?Ym}~pX|Xa~7sVy? zz$|~ToWPu=ss#s{l|%m=cH>v;2Dk*5s^XA05*I-gcT0G}y4nq!$zuC$;ry=q1||4w z7osP&H&y~7ohCxuezp`I&-}doaxMab2oY?+rY3AeR3idgY+Y6wCoZ!EPIW`c+?%RM zXHJG-bOEHCkb?DUP?JS`PQrI^5=L}%&5*|d!HB&f+;mHbuor<-vaN)WN+Ji)b3H7p zP?%8x9#7lmND>j-fmr<6XuE#`6W9J!mw$&0cki15d#EGQfPckbbzLQLwE2DVuUIE> z@4iwWahaR@1i`yiw}sup(PsQRNUO1LPL92>h%-1w|L(OsN)b|2Lx=a@(dF&?rn$uk z5%y0LfaKCLmX(&nsR9hg?=JXX(*>p>#L-1QTrE#2w+=Z^_85Zl8XVx)4rXi66=ERF z`y{s_7<2Jm`s0TW9xuFq>2;XXhjpQM5~NTNQcUJCatLLLPV*wf@6S=Cv+>eTs}Pn7 zR%t3(46m-c5#9B2ro&oQ>~xEURos|o@@EpZR?@i-UDBxcE&+2%!hU@jx~jOaOz>st1( zG~HcLpz%r}=$s*8`T1j#VG*lBaaTguqR_#YxqwJ2Pt;3nB^;n=B)O>|U*)6$K)akA zhbj>C{l;+k=GHykU5Em(1<1CW8ml)U8PS`I^O@)&i48DYYz`OfnW!~#MxyHjJ*jWJ}iYXpN$Apg+ZAc{HG;)_=QS0nG284t#p(@q+Qt!hRy?5{&vDI7!+fy z69F02XE0(PV;WIlhX319=;&j)Qlh9zUIw+g@#F`X{}&dJ$#V+(56(W$TSQS;DNUshx{l7~x?#_u9h(>k3U)+i?Nmg2JXVv@&KoUC z=&weewN##p?{Y(r)d0c;nDrQWlPG#-vuE1E3&g>fY$mOsxwA9fSW0OsA&Q_9{5|PX z2R$`jsyTtL%eMe;Y4H*wD%%dD>gEFQFKsAHh|nk;b~9M4lVfa3quR=H_!?&r^Z>`) z9n}ellFu~=c|TJOcAYuF(^dx}^=rHhYBGP$I>Z`wvxY;YxCJ{RP24~up10N5gKj6+ zK*%-Q8uSo95y*mbNoF^d2^3IO+5JU|e3#@Pn5zeEo~=g}%q@6E-|VIm)X5a{(DnRu zzQs5jK1uAOp=XX6@O-24vo5g^*f1VGiIipg9cnf%Mc1!+^$Da&OJO5ffn$cF$BO;{6D6$)c{TGQw0dAeEeZ8qWM0Ba zYo(KN_&KCkXTdY$J+rt-zi};EnVSx^t8nZb(g+z~MO@C0cSfJqfDZT~b@((2nrFEpzndg3g~@Wi zZM7XDLk@7T7e8d^w&$^o&}STVU^sOAQ7RGP9`qbV{@G`-=g@P}Pxdy>;8*rPI2;;2 zVOj>lNl@84nwsVOjV}w%=l_7Sc0hu`PBA?$c3n*d14E9FSiKP`{vzxVFAi8eLI|{k zxzOMrGISeskl$6U!F#ZIcUm5$p@qYSJT61gi;7c0pUFWcltQsGYD?^f7I#U*A%H3L z({}siV?fdyV69X=b5TQNUU@Wmn5$#vIq~s5TiEN)qr-#jb>qVmN;bcxiZtOlcmPK- zQ{SVT+vQ0zVmlSk9$C5#roqWVDU`BT4i40~j0im@iQj8>maw-+v_v99?6_?55Z_xh zhs_^iXK1JM;k=nh>t60hn7<3&M4Gt3!;H$yWAvC6;7e>oR~h0AjStT=hh#pr!{hC}?l&}&Lo@!T20uUEM^mzEU0onC&mK;NbY7#z zhiWub#MQ70g+vaBoR-2m47P3}I|X!(I^+U>FQbe6-RLq1kBR0yt_6n5L!-qjD4 z&(T`FuEt}O@+i9OwO@mgCjF<&45G5bdH{y5eF;>|IlB)6GM$Qh^rvg6l1@z7Y-)7T z!Nz&$NZ*8pTB_%vf4{U>Wl(I2Yh7Fbj+NM|4!AI*A`17;Ffe%8rYfyYXmNLpTq}Q$ z&4W2Mb!?8^kfB}>U6;h%dHF=i<@8IIVIRD|d-ZBpw_B|6DC+MVX+D|HGU&W$XG_0q zL&Nn0C}!L;loQ||&u+$Ms>lbalvNBqYv#m1*3_&aRSeP+Do#JC={0D{V)zC$VS@+q zuQX=C+~{doDvN4BJE%#*iWmWEl1!?z{KZ7z>sU&0PbNO%ZrF>nIA2V+c!Z&Q+058cg>xPd$6{IS`cbEbEw=0lB`#>~9^QCi zRf3YR?J6h1iVXoqtAei>JNGdXm=(J==PlsvA9XoLsb80uU!<17Bvo|8Dh{_!sWrs} zY$2^HUodm2rAw!=yEgXa$>xzrB5(Qie%n=yU9B$}x1BXILV^4FL2kXuRNlw$8+Zxr zBf_u77RhV^@6^$E3bRUW3Yjz-Ezh=xz-k_lPK&~2?sMp)b5Fe}Ny=n8-7llRtv9+b zVusI|Hx&IU!67Q}>L9Jv;6PDwCd+_&Ab6v}ZyhgcPnB!#ln=%~Jy1L?V5k>18h*cCC$`_YB-v%3ik2gP7(&RtW_V;YN6#5ObJ z26_##n}aN9m6hFJlZJx}D?i}$vcCdSY+ldfRitVf+Tt+lWT^*R{&k^*z-$6Y*-)|a z4!Go%&ejH-@@o^qQk(l3VNty1zR1j4Qeotv-t)(~EU8Yt(aW*T!*l8|#yvefLql?( zQo@L&3mWcD?Fzj{rnH;2B^-WN)J26$pK4f%<~6; zC%pUc7*7~BZR}igMRWtdGYvE-o$YAc%^T60RUNh9o0Pk|d7?Kw@_GM1C@J7Z+2}4-L<)p8IJ5$_2A+9mn7nVKGg?4 zc>QCy#wDool>-|qBE`!BohPu(Rc0&8y%i#0jO}vpGV%BuPjLy;tu+`arkG_h3axy6 zf`#bdai6wr?KCwkY`DZAJ8*Ebcku~LxzKfXohNh=O-2FR0ZZC*(~xi%;4eXj$)##3 zQ7cCpG(pe+KpD-;-o%?AOgO>&=bSissrXXjFU+WMDVXoBH^dRN!DJ=a2KD~8(HNXO z*pN3L7d^iu2MacWpxqvelX$em8Vtu@7=3rwoPR3Y2wOSv-MweQ=*;~u$d}eV+u17d zSGLKx6oDoxjc5@1UUumLKsW9A+vH}jUN&pQJVl`tIj~p1ym9OiAtHk<(2~N^;+}Ph z&xU6-&OFHBn}%9ors+y1M~rDdQ_S02Pd|vefeBhU)OUmrP=5fc&C42n!WU-hxD4ajh;ZIgBD58p)H#zw+hx| zEao(Fj8e)5^~lUDlt(o+`iLF4w-~c#Bh;_i1?y;+y8(YKBbCe958pi6?(qpeG)96# ziObiWgsZxOJM7v)TZc%R9ACh1lP>fJmMbHKSO=4iZ^oLe2Nl)JIH!$v?QR&3W!VuE z)l&5YTvbT|sl#u=Ds;}|@Q>g0Ty%c@pK_pQn$ZRg8)FfGELKmH>qf38Z}GdEH%4uC z{MiBmOpW~hJPT!AzSH&;>jLs4x-$Dfi}xK;Nk$e0L0*K9y^V4Pn>F!6+mUEuVU?*4 z zzfKz<3xBE2B!54%(KU*IzOM^51vZPcA3iwl0VOgH48=Y>4Dka3{KdXJUKc%wL*aaY;vl># zErGP>H_-JP=z{(0e?y>a42KkGn~j5I?N9iC=_Ro4E*)5P9wl*T!y+BWG6h?b69D>y z?V+RSwn*3yi+8^qHU!!BPlM(;f)jgWAQ9*Xj$GhyUO?dR!?ahGjId^Ih$punc#3}e zaXZq_?Pw|#1l#FdAbJE0uw*~e)yhE!J&)<>p1=7+CF1XXRCCXir&LkLm)_%X0&y_FP~_@pLV^XLU)aAsUuEs|66XVLkCa!M z(w^B$k@%)VEwti9a5u_GnI`sGiJFGvNdag@bCXvqzi?U{TN9)H`VIS1JmlS!4_qLwr;+l~!-kK8=rP@C&z-w{kN2}-Y)6$JU!o#BwHhNfK`gYcILlbJJf zE{JBYbmy_z#CNWvM>!`r=eSu>Vl5I&22{+xaH{XM&sd6tKu1OBlJ|6X;8n;YI z*iwK${LnCOfL!wemt3Vly%3QkVbB`4imtpQukFg~l!%WiBj@qfp?s)Qnqlqqvh{$o za>jWS>U?Y@n%2p7U`z%&{as=o#_6#FE*&#GkD1e$ragP;?OV35_o#G^5$oN_l*`8e z0XDdOrg$m*+3Uu|Ayd}_Fo8x>(OXole!bnYb2KEU~SzCZgC z8wjbF^Zf%$ZkHE3I4T>kZosYd<+%QNg7M{%ic`Nq)5)alPN(eA z3j1lV4(}`gY$`Wux zl{}$ohz|8bZG%Kw5W@va%It-_?{D)ZZ_JVn;hbm7fw)c5Vm;yQ`h z8{hDh*ik|Gr>Lfw!ZzYw#yQ*9B(3;cN*RZ_@r2>6Lo~NmFkmk@^@Th5PhCqcE<7;I zFRnt@$|b-doodjSmaq9W&h0OfU_1;xXu6J%6SDrC9VsoYYvvxn3Y$wM>a-h~mA_*U z^}>^C);BxbgQxrK)<5>B*In8Nws2oN8$EEBltj?ZS#HZg{|Ix4tUd*=ghH=v(oQ!6 zb`lE^zSMyCS3VHnILU)X!${j`r5OPk`0&1tuvr}{Gctnqo_-VLto-d`@?~Y?Yn}tS z<l)og_3h9d@o! zhC19jEmua-YaV`jPLfqVWA}~k&Bx8K^-qc^Hm~oM^MiCtNpOT`f@%yn!lNAsb!;E? z*_?>EdK{`TpZ&ss){(osV*TR0s^a0=f+fJo6mV#&JDOjj0f4|YkSDge zV1lvsNn!*sEGESK;&32^TwgFvSi}Z49>F9F>^&350w5y;n?evIJZZoL@+>Ba5rkn+ z5+j&uF=?p^bnXGPfaGUBVe>vB+|}oXqQT!khINQ@2JcwqjU5|`6~RoF7UBE)2zx2T zXJ+=Qc5eU3^9$21U#uuq2{XpTsv`TM%u?XUtiVsNLP3>oF$9v$OX!ap`7jQ$1kaYvf)zaLH5=t$qyu=F-bb56BCP^QGIsw-S%fg;1Enytif>Io zl6Jtd#O$V(Ad)6?_R-d%ajsae?e5Q@sf|b~v|^og=$Qdh=PAOjAlmgl4L$r?xg3r< zUSp3V%AH?1T?Eth`D%$^x-{fW>ruV)Sgb8lm?h{+%ODO)vDyr}VDn8T>LBvlgyc7= zQV~Wt@G{WHm&xVMlVbb-02LaxPYcDGw?M&*+wxDLnu%X>B%G<4ji0Ig=867h#iEnS zH_X#T0%Qa*Y|CuxJ%$X)88qzWs&K<_NYM7prhi%bi zifl=`%@jT@+6&gy55gPqO}5qrG?GZY3?^^4t#VzY3~13SfZK?cou?8 zy$RZ?K#Ll|BT*TG?7RsXg3al1;;O(y-bqYcLALbxLKbY{jk9?L8PXu*jV_5%Va2iv zpPx{&gW4X$NNp~YqTO11aWu}s$at^SHzRKU#}k8ntFi8TDn4_|V6zp}Fa#iNw&JFa zCSjMWI&KLHHCYhG@12+5QF-aUBS^&t4DBDkA{)zg5iqoHMGDc!QtAw*kZs)1N z#y%%8+g(|gyz21v!H!7oNa@410Z#P+IHmVA`p{-p7TzL>VgKEPw?4@a zb|>6w8`+uJ=|FcAeb}LFi*Oj{*^C+ukVykhV)L9_0&!HofM%Ao;_9}nsK}lE7MMzd zp;h!aTyV@m!;|$>oBZPYi_Uvo-1?wkj$n)wcwg$aa%JPtC6>PJ^q$i zM(5Rmym*LJKkJmpk^7!-I+$>DEXwmzDS7%_YA|4{QL^(`HUW_TRGAFvST=R`rzc~L zk5U-Q*m)|7#hkwbhbxxXh7f~|6DjQT5@j}TG7*aGef+LZ)4n$MCk#ll@4V1v-K6(v z_rxyKbZp{hIQ+cQdImO|Lul_|Seb9QY9vBb8N>$zgEsW05lh{`Ems3jmELn@2gI;3 zTTxbRCe$-Z!p$(A{0N7?8-8 z#`%ct<9Lj)+YEzogp3s_S2&QJbg1{AfK%?iE}etDbysUiPiRk)ucbDxxB%{GSy2<3MonV&QfQWhn48s5dLdYTl4`2wO#W{pf)&sr~c>{JuOofS_*%dWTNFXdgLKcQwpx7Y>NZ zMICah5gWq|20FU1F`R-MM`Xqd-H$;E+elg>=qSKeBe>*#&rr6z?S~ zF31{gbUs77OxYX2XnMg;!xj`xb$fHeBWN$Xe#QpSNk&}L&OC~bT)!pxR=Zw#N*+Xy zXYupk{zsq#Bm_`InkK(|>za^#Ng1r{NSg)Svomh9b_VF-E%swBk^*ht>$Ox3NoRBD zTWv?zm$h+e!AWP{SXiVVd*VBfuB~3JS0gVS*0pYdKxB%b1bIO@uT;3`N4!POHK@F45( zg#;6+6XvjBL5; z5c3N}soqtDz2j`@re8v&>LnB$4j8t8fCxgACJmTCgKm--L5R{MF@j7Vh^h!oVs!)h z31L|R5hYWae?)0KAjA;ZD#)yxG+=^xrxRiXS$31e{DLSupg=H`Rn^+{ONjJO&2PfxAQgE+%r79)NlR5wn3PE)g=mqz>NEFxsFNyJyXP0QNb=nG zo6IXp+an3aQh4H62)T6$oH%e5ZlesPrkNMzsKJdjL+FhNNC@Mdujm;Gn#Gm4F!~kb zYlUs|5`;SHz9{2IETBfF%oYezyLT#oA7W>|gk&t0F4|K36i^88W#R~KbkC>TXbe^$ zsK7`RNF0O2eiI^p6C%+x0>9~>uuT4k^iLosrjDi+50{l{=iGAoFymRmgRz_e&w*cj zBqMo0vhYf$6D}q(8#qHY^G@+zJ=4yKfJftgEb1=U-n(L63ZaWS4HmU}hMQW3-SN(Y z#-b%whVoC05d#Dm&Icjsz)rV6kiTq^hzVev z6tx}Z0qyga~U@^@d}0H$}h?fXrySkE!jR-TWOz)Cu3ckvTc?f`o{WvCDhiI}YlzH7Qe zNEpJGydd=O1%*BGy;r&vAPu%gDTtCd$g+UXdcU3Nv<2TA02Eby>gH8 zBWJdfD1FnhW$mbz)1=-Qc_eh(@HQ#*8$VSn4-iTc@?abAEm|~W(_zjn?pTdMgywtx2sib&oeV~ z4^GA}TIa%s ztYh_0^^rSlA_<*Nh*$kr5=umbkUn1UrgsVoPos-Q7eIUL>2KNJ8lTL~WIgVXr5-TS z^=}+tQtErV^U7yPe4B3NH6mw9g-G5w*1(MA;oGffPi!^vH2mUovp@1yhsduPOE7sK zVw@Q(vyZy*&zO%c?NK=wN;IBFbq|Y!(9I!3ijUXvi%k}gm{Sc zec)65Wz3k6mrS|u4e}+cRxd)01+B{f)KxuvDzAImH+Gb=WNKI`oB7xSxP1F{R|UUI zTy4y?HZ@9OvMWrBlQS7d{wTf$_}s^ES6O5cnmFnaI`m2?#5;wC2o_WNO!!_H(cdxs zO9*l1u6sk3(XL^{vf15u3(Vl)%LvskP4qkmWhN(*=*L*?%w61K;ez>o=`fBj3$jk= zR@D?E_D!PD53JAq#x+vzm!T7bJ=`o}l-3P1h*wUQ9Ke0n=zsgxfc$Xy0sHcU={4>D zG!6HLSykZX)U#>_F8{r_9CA*s%$wu+m-Z&Kp`O6mR+^3!By$LETJTu`;lTx_7-q$E zBb^dV=KQ*}m)AGDq(=?O38gR7HD=$2#EWtjLvd%rw&-c()sc;}xl`v1@bHm-)$&j3 zTcQP$kne>MjR1!u?J;!c%9t{n-KrcuHkFkaFH8GWv+VTZ5$%vY)&|9EGTH(-Y!2Rk zo`Aqe)j^8xA|%c^t>Oi# zp+h@$@u!2$#Nnin)QYGjTO~t?Szm06``!OkFcn&MFJj-=ClwP~Bg=&@(*} zRhIlXR`7yUZJ_9?gZvlAuAP`#GSFLp0^`F%?wSw;Gl72Bt*gwAjNZMqJG{SThENlS z5T+`XathG4SpO&9cyugs?gQwvv>* zDdbOFrO4)EY~}#vC4X%~*(KY6im^LJh^C@J845Z*z}lJ~WYv_Zva-=wHGPSXE$=I* z#$5Al2W~cXTj4u7eqggNFnpB6_!9tN1;aUVrG|v8v-MI(O=*U&eDRusbSzjO*@ib~ z5)cx~zaM4*M?`(=&86t=eu86_Kjr_iW?_!}FR>%t>x*{WKyJVj$gcBcJ`EQ`!VU!* zW!N=apKTIH?h6!z8(nUg|K=ikPlsUGXre`eiFGerXE<+iTm!qMAge5P>n!zcX#Pim z*>sB7j^-k|07wWzjEJ`u9lk4q+zTWa0i9@(pqUAn{Ell{yM&SK^@0rE@evXX7F8`0 zmtBlp9w`X?)HoENr6wE;6I3wvInknDSiIwNP7sxDe1rt!p1sub@-vYHeF9-m=W8K< z&bkQLpkQL?M2mjGq8p!cf*{X{5fX@aj!%ApoY3*fFW8%)jkic3+9`;_LS_Ws)#0;4 zO+Ene(n}RZ!+Z%jNtr$ivR8cp`rK!r&$(29O5mP8b`Di0a5a%+&czh;jJE*E0G6qoN&(MwRZd`aQ5NCDPqXOAmppkG&s~q8}Wg> z9eh;41X-sv=VDMSU4gurHsc@pE(`^(IuoH?j8qve3*@A7~OYBmWi}D}mO` z4z4-E?4hGKojJmdpxBwt1{18o96$J`KTJpn>NV+Vim_I`+z4jZ;W^R1NQRkjjn)BSe0*O#2BV^#xgXT|{*~u9BE4m@4K(h(EbIiQP}LgrO(aTTLV_hj z9gd*fCVpRD35D~B7pvt^O3~lYal1RH{OjJ3vZe?l*_rHq7YjY{yOaO7no<`PsN9e9 zndpInrP5U6zc+0CF|oB`!8%vGHyzPDCRvE$a_G)Yzx3@w9dVx9RcBcmlG+O-hX?(7gO1;}?fz;|wr?yY!?;T|pMm`0xlkket94 z1zGOnGJbIyIUckx*Z`Vf*9kH%$A?Ft;XW=yAeuTZL$Libk%ZtEw&i&CPeFG3`0xk} zI~r&C38XHJ%lL(9Ilcx3o9*Ml*aBUm@!|Oe=(zUOJwEX-X=+*1^?)JZ!aQg6`gL16 zU|B<_$O)rxc}vM6y6tP_J8z`j!;EKGkz!&VGH(2{;sr63_~-HMIKGiq2}VYxKMjqj z%TPs$g$5)3JVunczZh2S(gljH6bfN(>K#dt-1PczayZ`g7sIK89K-WFmuzwdnDZ%qnh?C)1T zUPmHk;X@zEUr2LME|&9XjWMWUT{5U~s`iVD*oswAHmq^_Y(N#YYpCjQ_0n}$DQg^R z_lKNzfyGC5G9aT84b~A3FBeOw2o#J!pg!15tU@2m!+D)f+LtRz4^d=~i6deAs#lp3s zdR+sj&Xktm3|Ry}DxH+_J6M@<{tQ2hU;{WP#CPc%(1fA{xT1UA!jsb@9JjY{Wtwzl zsnWg4Os7N@o&(dMERr7K)7hHRoRm3AYJ75}p;$%yK0aeWhB_RQR9m6ry1+flFKYUh z6Q&sBP9tY0#y;vOx0^mn+8*!F<@%j0a=82rWM4#zrA89)k0#%i9YXx)wW(84s@R$E z6RB4Q=HH7{Y26^2=9O3|tq5Q7$ZK|%K1wMm%?XH*{L_Ah4! zo6Yg2r)8sY&z@4BJ*7tQldt$roKLk;+~iadR`7mCkZ{hcsK^q$9)3-Vvay2y zcd;XDGITsJQOo-;5NYHs)cMpW$9k;H>LY2_%1PQ+h) zZ%uC~@z6nc^g7+e5;x{kfw|a6e`jG4tDPy~TsN3d$HVp8JAO5#rP}B7h#9LZ4X8+J zR2KQd@XUeo>Hs(POltY-ssQ7p%;%Mn+KByEdI2n*>6#@E;Dh4|;d>W|#8TobiVd$N z->gYL$e30cGkAAblxvlo4$dVVfBYaVW*A>IHCy7Sigs2_g(a;d84JBrWz>0rxV!Rl`k}0D6#@OlDRF+l%FoNeB`CvG2&-Poa9K;WDrE}O(F4etn-(JpX$W7R@mKOD> zCOfj;tSOZmrC-EKAD&hx-alV4);)~Kj;f9ik77MbXE(Ezz30()HS~2pta3O~Jjb%4 zIyZy^SC4t1?%#i~0{MhTq9XMqYZgtdtAnLZ65AIh)^Ks(z8$wT^6bOgR(I5?(GK6) z=VB0+Qn|$rA9iqH=G#i4Gr`9j8j;nO3DML|5RyD{s$p4Drn^sW%f7&jp~yHV@n7XZ z=!TOJN1B@pNosckJ<29(gT2bvJ1gL3nhoE+yQX4|Pv+tanrmAtqU_(1D>CIjO5+`P zwF7Y;sW_kfkb6eZ>g!ZfTARUe7M{}jvA_St;A=~`87rz* zpFfiH=?kNt|?ZDX(XbDe<5oobZh!MC`0Iuoqk)$$3I2{mf z!A2X=f4LT*Amw0y2(pJGfFtm-FR3JzGFjoZ|$%-5a`X~T*zX}(h7%%~LGK9NKh!NyAPa1c@ z0QaP&D$sKVe?Q<`hiSL*{`I1tRHo9sjNU4sX0?p!WEFzk(?I{zZDF#LqZfoNU-(7X za@aER4}w+t8bdx*XAPLB&GXgri;#~yW&*&Ga`Uk!|2Gt?>xV_xA4^?`e03DEvmYa> zj@=7@sxUaIn(KYo61~2r8f@xh6l&xHvV_H;*B0Mko<|^78(0W7Z3SMa4n!p*X8^j7 z-l;>pFo6lHmS!KYW&-KWfsjYy-^3sk$z2W;a(S1}q{w0~_@+I&DFgv)4MFt=xVo3~ z!B(U&BzteYbpe`^!Vco9F*j({{5m1ca0BH>>-r}axuKj02P@d)nN<@P$Cus)(YJ^B zw~}235cIMZO&oa*MP&$8&>ykJB0`A%2Da&3d_>ix%Wb4#QAnpdN+C~Qfr7Wcc5R)g zmY5Ro;KRmw@Xs`J(PX+vje%!s3RvwCc`4K>*$w@%>sgWzJ_h0saD(G(^J-NuSnu}hpg}>A5 zS5Fz6xHBN7z||`|9aU_Dn|QrIccjVqTH{afABaQ$)uEyn~`o ze$?=AaG{yoYi}2}fMxeymdlwo!emxW*>cvzlVG;Pm`ghaU-xGe^PO|ZC`Xgi8T<+- zXw&fR9{xzIF)=u`ywH7do}b99#*1LAG@PjQLCYvz2v0A3quBf>g?QCgC}ryhB*!cJ zpk|)m=b6Z(#I9F${DkIcjKRW05*=2$Zl6TyS~!kyJt!HX8?`}L5fn@P`WMun5_VUR zK(_gzDL2*O?yv44t*6t+pPhAQBla4)QvQZR;eXy!AC47|7<(ZTg_kd@N3+eIErsmU z*tKy6EuhnW6ppJ26L(krQPJoRQYWvY3tu%3mSc5&bSN>RNLb4fDxt6>FsVu{s?;17 zta@;zW&$#*uZ+a7nDA4!ZnD;m!Sr&nb%icy{sn^Hfyx%WQ!Gy7wsmSQ1t(TWH61s6@HA zgG>KQwH)NgDPm6sddZj&;Vm@4MITp$!x zzZF&LSHPy z&vE7rOGO`pnL9o^T<`RR{+^ou|CpMwj5(!fGL2`*x=RV*67^>;Pfh<%&uTlGC- zkTlwuCV8m5Old8EIfF@sEz=WJj3Y_J{=u8OiUWi1N47@s0)455wmf=k|4qv*p9ErI zQPDvMhK-`5xxrkjXq;j*dc%TP29kQKSorFoW}YQJn%f4c0l!hWrp$V}CH;|xj2zq< zGukpw$lUs#f$7HuN2>;?`uFBNWkGvw)Gvb}8&) z(fn0~TfF4)-rvZn6y2`R9rdUZbeu-OKc+Jrx_N7PFTQxNd2}na1Xi}zbkYV1rTX5U z2lq#SHCoTx>fN9bibRCx+=P9h>xp+y+N$RWp_JH|WEgDWb%A&iGb6#Fe8$r20p_4S>j1y|iW?|CS@nlg z|BKqX!3HIV8!9E8jC%S$l~;Oh*+ac|9?)g?&cHKzjl1I&>FHkat!3R`hx!a~B}E%p z^XZJ!L%7u3ZnD>x9z(;R5^`$I!Ss3FVNrHEyutz8_99;i;*2D0N3Hl=!-($ew-~tjHDHx~GxvLLsYR6<3dnL9N#w_2tw`oj%X@h;sPRHD_`P73wmi0;1BGcPo$=}9hH-CUoABf8Ot z0QTfaioxL-$I&`NVK?KRONE+OUH1A0dzfI@u0iGU%pytdw2Dg#l&lMrgWGkm@~J))J+A8=8h&By&r6e@ z%`2s!>ha`xMOxE(!|wMam6_}=+tAZiMSibOOvRDy`B9AOTHsR{E`2l#(>iM~$XPe5 zy7J;*$txYJZd}}5l%;#EnWdTE9m^dbpmCy_-F&L7m=n#VY~8UmQpeXV<%6mb6mG!u zmIQXhvBpwP*9HyUp&q-tmWV(@5FA}(4K^s_lL~P3t^2~R$l=~APCGf1p3n_~cFl;7 z4R!T#AFmsx&^f*IHiY2MM?0_@EeHMck`?FAQ!aYH?bRJeCz>RK5 z|8@7I!n;^~UpTt)^!|93W09bZ>n0Un9yC>rtL@=Wi}jTxToeOw_?54qMLvjNz%P#I zMpim6(d%=hOqQj&91e;a^%#=99OmNK#hFZjNtzJLy?DsxI@#X4!R;4rpDnUNb?>O7`15|q^BV1@m%&y`9O%5LA7pvDB8RMzNfMp|Ae<^IFWyxb--wh z-U08AYi?tbWM})LW|PrT3?F`BEdm+;@J2@^GnwZH!tpZ_zdRKnEpL#yFT-|4RE=|u zJl|HVudDdntvZvj`Yh9lkQZz?8@|gJTgu8_Zje`=Wa9Z)8f8Tfb541|J~a|47=rP$ zN5ZG`c?|2~SNCeTVFJNdch)3rH65xfJu5+BZp!N&_o;xcLbO|N}qzo_l`YL;; z!PPmSNPXTXl#8(m4Ng58Zam_>uYyx6Mb~9{->R4#yD>S0o+)o3Mb326=Qftt9mgoY zLN@-<5V0jHXXkEYSd^oInSS?I<(lLn7v7tJnt;|$`n76SxX0YG*SC%26jA)h8)x6% zt51i&i@{~F(^yVDZW=S)tjQ?BWSU^ibTDB`)uf|!*Dj%Jbug)>H;no~aEczxFg|Ci z_=;(EfE#C$#hlqNLC~!+6;JTZDxC9-Hu&t4q59e;R^Kd+JUP%qVscp*)cil6X5->- zbtAN+B2Bg*wr6>O8@y$8XhXulh{m(iz%cM)FVfR21zdvW7f}M1fIm_-UA!H5c>8PW zC50DV?0D?}d3YmC7J^XA%(+ndmznHo_a=%JH%(tGH|iva1(@DlY4TE|=l+R=Bhzs> zKLM9Go%6|wIH--459__tTd3&c(!$9YaVv~UYn`X)Y_;VQCpl3wS9v40J(=DSFfbRU z<`a=YBao~z*bM}fRdIXKw1a*o*N&k{{-ZDYMA3YWsG=o8~F##k3Us!yf!>e%m9t<;Dhkbd;@ zdjqWA0^0DBu&a=Z#%YZpt85bCmCKw=e@wuUKAr}PS=Q3i1G29&#=G$UecROGi6iL z5tk;mrnXscfPZ#6`NgC9UL~g?h=YECo%45WzypW&eBGW41ZZB@BXzNX5;#js1P`Cx z+&kLGV#QU|!yneCCmemaX^dZENhwZBO0#-O)*nzeF3r?GynFDX zyhCK~Ot*L#7QmBFl8!tQ}`WwcHdbiSXLp+tj;m7Y8iDZ0x){_i6T(8?fHNP zb)@I-55bco0~KS-sNUi6eRauo>Mg1CCoj!XxW=jk(mipIR(Ez=YD3f??~F6rd*qFS z6gF7VK5R=gp$Ep{Fr=0bWcv(OU1@Gmq1ueX*b9{ikPt#1UQwwh-2PL*BM=cX|H6le zlSqUv@Y`nErKVb-SeKlu__Fo%7Dbo zwQjuGQ`J|E3+X8|WyWVms=tEmQfhZu7(;Rem(Br>=4w2W~1 ziSy;aJs{x3)-kReM`SF{s5c!ZQhUbt__6o&4@(?uu(pL0lZ(caZXqz^BTl{s)yapD zYIy1uV7`%uXP?YMAzd3x!O?kA8}fN}%4at_0lmF_Rhadcp|CA0{EL=_|3w+|<=E7b z{0ySxvZ|A_0`_T8O*@#g{PUd4Mw58_SQN1f#OWwI;gd490o$;KDhKU}c5KO2R}JVo z`L8G4L!K=S(vCRV6_%Bz-NyJL{w!#+2QBvs;(4h84?!Mt-7B8gh6Wc6C+1Lzn-~Tt zwWfKy%S5h_AAv^sadX`ere75t#+nxH;zPDbo2&T7+*n*Ief$XUbiZp<#Eca}+fM=> z!$s&sDNssF`^%p9@OQMqPDTZJ%&P+()ongOF^|Qaw;Q!Yj4OhYGi2b;jNtZri0pY# zwgr7>;&PH=&heC~cL}ScP&?G$R%EEU-#h!K zt*^~%YQ+%%j@&Bv29AKl7PQ^wXjSUn2%O0op9Vzqj z8ik%l*xs6l;iqodZki*FN;)hi zehpwS+p8;=i3*~YLs2YCA$;U)Mw^-;1}bP!*>;h3mynHWJThV`E(5vonp99lNII0y z!B+gzq#9_3X&IEiUjwjJ)W=QHWc|5Qfp%h0Rj~g`Lr@>~R^XMjH$W+2Az9WM-$Okx zKO`jo$uNO zSxuV&r!<9zESCC?zx)dca5wX&0X_$zRijHS093WFaz19+zb~sUSj9(0m^-+^Ss5h! zB0bYlS`X0f|9YijajViFtF6IEe47AGOtRET3G!n8x3=o(SJW0@)W5YwC|DD=DV+lD z=Kt1KBK<5)0<-F$w#tgKSJptuga57Va7!i%V=njYB&hWHxv7Ycfh+MZWC)s+eyOG; zf+hhOXm1uYKuF}jv>aRtv@{q*pp)X?A-#XOMzl9=;caVt0m>xY*DjMaU6)03`LAr_ ztZ{;YtQeF;4caZ*gjGA)@C!h==sZv8@mI!6Lck+MMh<%#?3Uh_JMXF!x z-`gs*yh_BF16zi0vO4kE-OGTO|F3OnVs(dNqE+7@1okF{6?CDQ(vInXBD$vAVZ zxNm1BVZ3~stQZ7N{tFrKJn@h6B2i`Mfn^|FFhly~Un#1?-$C{vWSFrL5Mubgd-})v}=U)e4bA3gHu)gmSpqeek zo{IZn;mULcMF>CD?YIPuGz8FDPJsU=(Wt_o=YnI_Hrpw*5Xpe0%bZ&CEglydPU+XE z3y|JZST0#;!kRB;170qh^)yLHD0&7`>lVKCp$l;uVb4&k5$Zy|Eo2g-0q%;!b1(c9 zCb1vfi~T4bl)w-3b7m>wnrjBZ4QtY=kcK>Y!oZ>6c646QCk1^uS5c<1mFQRsj&d${F0^9|TDS+)Y19Y93@D z1I6ie=2oPx6!t1KkU>mB-Tu`wXp&m_0Fl2-S{1-BFg{z zI~wSB?30ka{*Lq#z1P2)bZu-*GJgRa zp*@ATj*=8WZekHf7YpH%T;OZ%6}QzFWSWf8SKTa>u9blMnQYl(ZDBwai0Rb_RX1J( zd}hljAtIyo5cn8-@3*}QK_oG_t;=8U2Bo+`>YC>elyTYI5I`NBe?Cxx2o4Hg(_%g< zOF_9}Ttx;BUjRXsuhm-#4F!_Uos(L;QVBrD0k1^$7kWXLzBV3~&`?^~I`737thJzQ zDAB?qRN}7z#?*WFeGD`dnB}dN2V=~=fE%fV(A`@q1^`&Eb@HX5p};Khw)ZqJTfx$0 zRfOCB^U}dA55{d({=INO%HKNA9YtInVTSO^Cd8YhNC!*wsFN29OZrKP+HkvdZ?Xvmn1QV@8l}XvuJg*?l`z`$Lv_&((%6p_vivsmBOv1g`vvg2Qy0e-;v$q=CJ?S?&>o2H|kd7PeGoECOM+xgq zw~WuCUD7qp24_Lt?vKgS1MJ$I+HnE#9(&wWvZamBdbYo(VlN?$e0OHThX>8V+|>`P z+0P#?Ow*Gi;V%PQ*>!UQrjO0DPF5R)BuZacpktK8Qr{zPbuT z3k(7a3_7YNTwtg8Q!aZ_PF@Zx+mlxlR8{BO_3GX<>`pa`Vsl+tko*V2W zEE^?z(d5x-c35X+w+6-C89``b@w-`te(5h-eo42uEL3imGg|E7C5nGJx9SUQZ&j_= zQ0%25nji(ZNcHpqV2WJj=BcrpsU8Ot(J9GVMk6 zkOn4O0y}fZ-9QdGIy2lWdwKUvQQcjiN%UNE&wWPc88{lI7@?WseT>X`2a2_zK}5B` z*}^&AL1)?J3x)5^J*Q7S8ZL&j4Ij%ot6#BJJ3pDl5?d&J8I$YeyIVWbT)VsJCMkAkX7itKhSrsl4*>}kc3Pk;0l-79~w_PmRCLzyo+8%O^T znkdvI{1s3qTnkhfO0*apoPQPxm1!ZN&y)1yxylQ<5aGjWLd7+ zr7;xEW6r^ohMxeoPvycEl^A`>hX4BZ+k!$KppLXOD&py1hu7H~pqKLFaqFJKnYwPW-BL_cPvsF*QqX?y3h3XD; zRo6`Wkt}BSqVzfEe-#|Sg#)FcV{qLUKTo+W%=-{H-Q~eyod^r3Mp;Dh!`fCHDPpX? zt!j3ge08sC6ou=VR_Yhw(LH&nYQ)BOyvuI^x0So4w6f(@!A#z?A1wiPZD=Z~%oJw8 zgp&$85B>X?$DsPqAc^MroaJx1SLrSpPxm}eEu+Ohg}its3%cloiYqULcfQ7R$-ZxWxB)!;09Cu;NOX;Zb>5p zPb_(#C9qfda|$orke+dmi?*KVD%zyvR25dZ`g5Q>o>7f*Q#0c5@to%IXPRs(*Y8?l zA@Oc#ck^&xNjsjERAv_8$Qjzom0VKatT)2{cPS9$1l_RdOR0=DxU~WAajwo1Qo(5I z_JlQ9TO+4RQE=WHP}8%)|FqmS=v)piIW=?AI`6s@5|Qikp+N(%ru$aAk*WTbmt2Gg z?FJMRHt=%U66m5dLcR!Xa-(a(D}rl0D}xG1A3=#Ex^uujcmBmsr6kWkz!2n=%6V=J zIjooh{-hDHkJ;fHi<_Td)SGLGczcW}$_e(1B@jxLrX4d6Z3_g^96udYfhFzF6fvR2 zaK6~j5p|GP#2tIs^%PMojIO*W=uZ4kAj7Zp;LHw5B$>4{**;-pjav(9s!8J&IiBc zGXSxDIqi~tYoVZ*!hV~C7b4Gcdm!^-gz7*qc=Qu@%eJjK43e>`%+0t(L$ATl5r`jv zz;5plvDH}^hNO=+*?tDvf~z1&TAhpA4)9*oKy^>QcY>Q{EQXS|DP0AcLLK7R4&iGL zY~(*WaV!?{qhDnGoj>>S@ACSlLj7gBm_%UO{C8a#NtWtWyfOsj4f`5ts**^q1@VQ2l7FT`5~=w|v9+l5m5FgLw=#=BTsUk2q=bPr!%GXbIFAlutq>@{ezYGlbMt0nJq!T$P~S zZh$AlCf}3xqi3n(KwEeFzgI;0fIP}<^hjihpoHyyY@u0B_*U_5ZW+HblJVkYKIvLO zHp6NIsm5iYDG22#6?9HV1<(bxdICj%qj_q5z(vn#S*?(>O^bt`iqm&`kL~a4-P6~? z)|g49zDw5BO%1QQy~yGEu_c}Fr9)!>)+|cXbf@i@s($^V!#%|k0b46{^~5}qxTc7L z3u07*!@>%jwWLsehYVO*Ob$nH_Qly~ajDxY55tRL&8)foo4#mLBWH&5wks%PKV0*L z!=Q{9-6G%R`kiM8NstK=Yo<|)gW|@vrp1aM!Nu#Sb$f@5#jRA7FB3gv7r{fe-_E@= zL>i0-O%3tcyi`$4JQO@?-{!_K-QGaVBJ1~O_2Psi&`Jb7%>_LDvI9`SYzovUPF}Ql z&EdO02yj|4d4Kj&YMk-T&b!e1r5*#%8*P&`KROiL4^0jA$AWfK;GzV)7QfA{gZE~$ zAkLC@BR-BM(v^3DV>sQC|GMQH1zwvSIC#VS^bZ1@H!;>u1n$0H$-W3^{aQCb2In7} z15NGs=KS8A-#6#?&G}!8t)y&t4@}A~MgfNTZ94!>2Tl6E6=qvF7v}lO~6?5M>_Hz#;JN)9u#n*me z*)zdCSOUJt}lqyZ%Mgpwm}-T8n$*!B-&h9 zate-(sG^+>21QivMvdX9nz6V^>si|Pi?1|g5ZH(49co&ME!>L2J3>yg;~$SWr|ycu zjZD9TvNje@l{@ApRaet5-@d|A;t8hl)CY7ic32Ol99TW20 z^H|2a1hj!R>wP|aCys_qJTtA@XSh5O#|lyV`tbSO=Mm#MQddA9{$X=K)m-xv+-jo)Ed z7$+?_q9p>xh@A7l;6YJcQfDEdn5Ll==kPh@d#W4j`%?^|z_&MV1FRpZ#(xWKafHAiDML z8f+lp!;IlV7&@8cmKzx^XX4y)21z9JO|H^U9>-}#D%(6rrYF@zu(s{$H@>+A+ZBW~ z1!+!czAdlV!~o55)m8LRbO+h3XgSF3&!Z-_)(Q+J9F6{}0 zm58iDJKt0tY*XODZn*O?^)RbsL9JivjM1@cAu^~dnh0+ogadKoPbp9oyfT{R1rW=> z3fOx*`Xt`n)14#)M;m-eS%7zLeCYDTS@87J^#0KS;va=Qc@J12UMZ|-eCSaJ-oWdI zs7>L0kvkesAQ?-cM3*0}XoVb~>dU({-1E@E@`lc8tjtQOSE?*^5A`c<+!Oql?hVCZ zNpQP0Q|+y8YepI5<Ed(^x~Zfq(=7DbeS4|W=igd6j= zq5I-PN0M{WRW=kM+IN99q_=UPKwH}8EY$lD*ZySdrijtRkZ|WY*;@hYGuTFLoowUl zr;%zCAN<`!BG2y3+vE;8?~+(ts_uk`of6*p1DD_4%D$CPg9WO6J^Qtr^t5tr?(6ET z6LLe5WpzCK4v(A63Cr>Z`ow3ybg+?EED;FbR5NT^$JET~w@v6Joa=>D9(b6=P)m`c-v-Hb^y&qxU|fr>u8L4DJXW^LG|1ov!SrnUP1|)m|}ba=3-} za6_HjLC|hlz&hc|wicK#rFy{5dxiHLC6S7{`x-){=3Y|KJ*tjw-Y26d&EZ*vCI{s- zdP2Oq>@0l+|V_hBP%$MkhaV{l_^Yns~&IJPvyU@YQc zNmC}<_+xE}XH-PTm+n`c^z1yRET5^0;#~{l(vnH(o8p~Q>m9CZ^bksoX8A7sc9%Pm z-a6-JiQO3jL2|9$r@V~Fk&>O+-a97l%^F7a1WR5>W?T1QxJDVBVUnn)^n)8T-J*Ct z0_M@ArYIxErWI-;d+~DVS=v&!kGI!}#ji_ZP1=py(NYGQRj3!H)}P;TuG6I!VRrq? zc!Wa}>`-tIsg793e|I24VI>_kQPFcFkVhBHX4o0ocTTqDO_CZMvkF@hxV1AKQF6VX z-Qeg25zJG&2yTUa%TUv=Fs-~1zbcoQp}V>SJ58!Xx6_HleCCS}8q}FEuJrYGqbCfw zrppSm#-lxh!xCQn?BNBJM<(;S71K}``kEu?=n6R_n4-zaB!WIi`iafM1_qC3~5qk6rc!=uLa7P3v;SA;4Iu{(z+BUzINLb}=Xi|2m*HHmyD<&fyCeKi(AX6%iLvACwoY3i^N{X;zHMyvl&?&*cGJd z--yq3rj>hFeY^UBj)9D9OQKW%s)6yDOGs5+g}2P(}kOhs2&A)oEJCUv;bsSi%| zH)p*M7T9nZ{;XI2t^Y2vfvr9PK?2P3NA8w5=nDK?JlYNsA~&M-QX zL9$_oQ|P7<^wV8M+}2jJrU|1qqM2rk;8MrQo%*8JbNiwvAcKr5eo|FnA~=pId;~XM zp1u}aO1v>0+^R>nUO5=Ky`8QhQxpmDgiEKBaA_k(n6=V)e43J$^t~`t<#pYC+wQ{;NjwEplUTUIDLBU z0l7<(Ux-y_7tT;0=sAbUg?_5oCR9n6ZMqSL^jwHL4IZ838W62pNu>36Cy*0Vo$T!J z@spR^zK*}s?78XBhEp#DM~B}_fLR5bjq+k}7hG)?Q58J>SsK<&qz(efflgX%4T_(2 zx@EI@mEKi{z2YAA!!(#v_s|r$`QX&h5TO*$(?FRtFW~s4cl6fdC&$wI#v(Q|mw3R> zx@NF!D~+A~jRq@$jcHuRZ;L9m-t1jgRO>sH4;N%n*)0oB$KZG*L1DmwxAcRS zv>{RqT%?LVlkk^EIbl$PWP3v2<366gN6+8eIN3@Y(hqj=mR1x+=&$~}%Vk}c*(=yr zoA`j16G*%owNec662vI)e%~{Fm_IUfuP?9>P90h4`^GxF62u>~cSobKOjtsb%dYCB zyg?v9(RQ_Qe2wZL$6Y;{sHHeIau zs%Kl_c;M;A2(X&HX0R&gxxy{CqP=08F9bJDFKd_cF;n*gX-y>H;BG|T{LdV=pc!E&mFa=p!?;5TR%?L^-tFgN6sdq6E6`Cd&#ch=*kok%TbXB zA8*nR8pD&|r)QwI5kwt}Au2&tEvtDNJwAcql6wOWChl^!uF7U^@4O!DwG(EOH?(cS z_@*{iZ}s~D)d-iOx~eSHr&-B|>>}dfh+G% z|9;t-o2$f*)5BpJ8WYAvI#`ahm$W7AI_X@7i$>aT$Y1wJ^g4K~Crdk>4PJlJJ+l7L zivdwCKdnE^w`3LixTfl1HPUzMU7_~w869q!lLl-hK3UKu<=2!ET5SCUN3_f?jPRssdl_C2gNHNUlo|U?u{i4tW@p8pRu@td+oEQ+9H`54&C?P z&uN&I_C1|$esmKU>D{s!*(oQMEDJ~ zNr`{J)C%$4@Ii;3m4=*73TGR!2;E%za1%*AK$X>Ll$6CuafR1Yu*)jrB9iH~Ag#4U z%kT)4ffoopi6)AjaBY0LO^zXVEKT-73)_3nru}YlhhcJs0Ng+J66!(|T&F0<&l+VQ zfy>K#mBizURQvPLy_c%{KFy}Vnv~F-vFrSVf+cpjhhVEKgVC+idxF_}+J<8h4GEPv z)@hHVX?sVnh%=k8P1Bhs#`bXwagVbKr(ZlcmT6iaGgF-8tPV3trZn86jE|4qg;^~l z6vJ6H9$`0w7|>3eFrB$(@85-Q)_2izmU!Lw;5vW(*gbYXIl!5M-yQc?Y7x!3n!yTA zs^TO(N_5L&Rj|4)z8N=RZ=+NSE_4Liosi(2l-rLW^PI{hLIxi*1Xm@&%<_PQfH<6+-aKd}wz^OBbTfyNTd zj^-LWM;aSDgMz)JFUgfH1l?xM(j#aY9p7u0cSy`^I92hGm)p2A_Bs13}}u=NFKv+hwy*828&1#ut}RbB&L8v*2M!#?1W0 zk>^#D&G`}vF8K8aGk4`uBg0*X^ZWhB5TV^RK!ZsQcZR(LkQ?R-JcSMN+y4Tt;c9Lb zA4T76xV0C;si!|9KUm&lM>A^=0m!*(im%;tK)ApX`5!q=4c8_ApYq?#_+Uoo_%?Ngojal8wNVdIx086Z~^NK ziFmUx1}C8~;@GVAA1ELQ7p)#W3v5oH%Z$xfTg2%}ZFARxe}KRkDHI~&nOs`ge~?js zV)=ENE{G}>>eLCa(x82pYjhx-Q#6*miTZy+q4Flv8v~yLgjJGAwjX)r(0 zRrd-;XRB6|dt>5kow4`MU+F%IE^x@Xdtg^ib~bRXAy1K>BsEVD%|7)hIDRK7JO6?- z)iN&5O{lBYn2JT?;`Ci|^iPm+Azx&v2bi-XwwXugThInWOdij(K;-o8BdcteQ~!oC zOG{B#P{Z#~kAnu%7G!%5`bi(LJ2FyK{T9Yb2`TDE4eMy!jWeH4i8O~p=Hw3YV8Q!b z8LGaWR5b1=oEWFIu&jinOy4t2Uc)jS8xv56hJ zZ+#wbc&UX686NdByrTr z+L{+4P&w^#W?@#-$$Z!xDQFO*h1^uiE-=6xkPt_u(m6N8#8G#WR$Cz6Z1oid=k8mlh4e(SPk0=YK&7q| zw?O2lxe%m}Z`4B4jEm!!Xdw$OsY9g4h)55z5LzItQj3qKZUhZhO8qANZ_N!79*X=$ft9oE)K~#bJ(ht+@x_J|2E_k@gSu zK+XjNH!+IZp@9#8f!_~wh`6|L;CR6K6|&K1p{eX!s|Aw0FpYm2SOp9m!7ojR29^Q? zmp85uB}Rn?{^SeZlin0_3Fr@3AMi)_>$g9aY@P}D_^>3@+hGEj#Ei`|%#W^LDk)*l z`z=2%Oh3(CqJ(AU7kK?3ho-dSgaKeYzGET zuYCK%a&Dff3I>+_J+R2VLHswV|BpLd>-CUfhFP<;;mKCr1rg@3H;)x(&$s;T@KIb} z>QY2o=-=irWS-(IbLSwDw$*)f`snv7gxL%nCa9)iZJ%s!nPw`U`yl!9-iH&>HRhRN zB|5HWG8M5D$?9{v7Ryk#6Sh7T#%+v?UsDu%K3EgwCgw+cdSdz|&y0U@YN3Aj=(9Cq ze&VJa3tF<-9Fl-_CrfT*E$E4ubRud?Q$IIm9T0ify%MXFL?uUp`|O)(uKGoOY?(}Y z4B>K3vlu;-$)FTVCN0U+HrI-+$+OE4N5So$UK8;d8z4h2!E4fi7li|01Mjx?^(X?K zbpp;qF2j_8FR!>e=82lF0}RCp%G#t47}>L&`+%$HU-!{n3p~;?b=b>Kb0B7tDGfNw zMfiHoT63*U;J=GK&;Omc1bbf3?TH*F0&w-Uc9aoea*|Te3%854K5ZW6E%>~2b9_6_ zifliv{y0QD4h-x;_m}@>LlQ@E-2pTDpao=2WSwWp(`cN0AI(Pkwl~JNnmXs?}G={^~(1F)#9F~EP4W{b77B&wUMBwf$DrwZoXJP*aVX9 zIS=rJnT(|+gOZrh`kRC`2$T$+0X-Rq_Jg+@q=6y_&06LzjK;0fmc6s)hWW3ZE%FL| z7(EXY`n|IPbX@Cpu`|DRrh<#|+X+t67C<4+}%v0RS6;{G$n>H?ce}xqFXmS{R^^JmG2{BN4?5Hlt zJ@$jBSO2zLE4>E%{c~sHshWA<3_An*iaHxfJA=J$e(%@LVwo=LKzhJ{NAz2>gErz) z#=mY*Y8q597`iG+E^pog#qNw$*apngUynrLQ|6s|Co6f#GykAWE*Uofa<&ri?DLpy)y|YmFWjd>sz$N*)vokzZYv6zP>H9|N zw=J44*7EBHU2y;B;P9}8B3y68F0Y5>jqJ>eB+P=SL8~46 zHEjv{8F%z411q1ZK#WrD!hT&5yS*uqBuA~hQ5d$Pt9|88-^ExRCN^jmZ9z*azZSWf z*^~#Tt|~roEp^e<*O$(7eQ`CmgHA8WP?2j#;8Z*^57@M78qF2 zFlzh2KU4A#9l#BHow@L%^n?Y1=o}~R9gSN+w}rk-Om`sP?V2sLgnmxWG(Tj4*az&u zAVaNScD$6$;`8!Q2w?Cs>(K8!OspQ88|TNDwcH=cX!trd);nH1++!9VfDS2&rDsT_ zF?#3nNAW(U#*yLEtO)*${zJwNFIIzReOUWF3mPND{o#dl$+9}#Cxs_R)8$->dfrFQ zh1Q``j7+R49ty6o(=wT%|9nzoTENF%`l~P06>3W2U)IQ?z@+2FRt!fM~pVxb-#qST1K%I$vg-Qdam^`A=Dz4M%SNsCj>O2E|#lD(g z--w==I!pv(6*6_mg@5Y)2<@1~0M#HPV8GU-jXAs5J6GTn;kFi8@?2Z#o-DQF+oHbA z+|XNdfUM%zE#qVz7j;UVwIDQpOmQ>6Wh)Ya9~(6>v#y$`Te22uvvoCGLa64PQhH;A z#H*AuRj^KTrITN`6pZIihZ}sDE-E-SwY@TIn4_11X*6uae|V;#xv)nftQP*+*{bMr zfPC$(R+roHYAnBf+q_tNo-7+jSR3@Tau=;A9LMK_Ez0diKJ4+W3ili=g?D$1QFUkD z4$Tm%ht)X6ey;jMDFG3)cdPZ}sP(Tt>U7%;G=(pJt)oC0DeH(DHnd0$)W_5uB?sw1 zM_K;!9YJKn!ytyWwXWrhO=zv+XEPU_nM9p%es^z(X=FrKNK1gT%U<3sL`T{t&rvGQ zXzVC>Azb$c$p{sY!xm)yrR7XjY?GH(&um?gZwmFDwRqlK#IhA~R*wuUnubrU)y)X+tiG;1rJQp4Ix zfFro?3jeMVDvXSaNMk;TV$4Qvi#Kf|_*^N==eJHJC-qejT+q%7>I|9JFXbm}+56LC zo^MC0nZUdzyk3r514X*0%PCm!_0nYHumQKFG;!j+3);n?m7V(6IQy^kx5KR92O%oJ zOVqCU)yYp4Dz9seYV)@azH{Z(2Zw3iO$lusc}U8=g9`3Frl8}-1cud8>T>EzbowtR3Rq;4J|Mc7watO#$MqILxlUY&=j zhd*1N33CxYy@`tNH-TYwB=Z8?JQC~P-7Dv^qWBCKx2oR5^=F9jFbyS7ySH{MM>Pn){C}df$&tJ>f}F z9_(77F*8`fyYe1(h$yfi2Q|D&kF*_nXm3l_4-XnWIHg)WCO3vmP+DJ)e|tVvPO_lUl7r>SoZ2i9 zA4u!Vx>{9&i#DBUok)$NZ_U1*oK!^2X6E9yNT9MMCyS-)EpcBwt9t{V>c$d#PglEn z6?F&DE9=6Hkh1s^761+2##&}8s19`um0BQR&rPb{&)fn?aYvdN_pXsnai)$JYwPW0 zQOn==mmC2@kFQxc=BF;)H*jv)8M`IkfGi!YWuu{S~Oc4F?D0Og!7lGQs+yw^V3Pk z8xu2eU*2lCs`^~>W*|!F#1}qIlBfm_LT)yw((QH1N)YX++4)uvaClcn!Db}>{8tUa zg^OFdnafeigM~Sjy5o+sujmsm{=(w@LQtLjFmeeSOvX=#C$0k4MKJs#nBSpD*bo4mt=bgH6eeWl4v+L$;15zAT&Q zo6cn7s`XT{Ivl={BpQ2~zEQbc1sXpZr?Z%q3cfYciHB3~4z}ck_|HndsqopVF_zp> z1o!Ota5d${ACCZJ-Z=9>7_hWQwjUf(2``=`s+NA^`rKLg+=qRzTsf-;e)IIp#^vO% zHKXs#!}b}=&@Qy-vHg+~B-bR5)l_n~-PD7*JK|Fb;WsAEe?p&*KV?DNASHW|@gVuN zqCY&ZLd=Z&lT`*IbdY#^qEUQ~ z!HqKKTfL0VR{Mz+rw$C{MdNZV+mcn>E)#G2@j@%W^Rv3MmAg~1`#HM&o>{yLj@`>? z2#AY1zJhhrz7W3Y`_gg_GDd`$m!53@B5a}jhag32x7R*WOPvua6sA4q;1F*9b$w@r z9unU<)ofhi9$vIb?W132N9=Zb&oM+#RKVmcF&=G`X7I7hjCH=8M!0~UY~qpP@kNJd z(YUd2gVMl()XNF_yJtQOIb`Rqhl5rAA3RKGlxHUjbN##c_-Zi^DCShxTQ<^Os zwz_i>Hjy>6JkG65T>e-pi`^Bv*y0#)M`%Wm$j@1Jm*o}Ib{f3u9#QS=D7Bz9x=gu^ zk#)zO3@vEA16x_m-OFmyxHO+1Je>66FpDy`wVdItKSYp=HG5cH<2K^XGVlksp_AlG zwybCz8Cpny*ci)8txLyv){69io>k4SZJ%%~qf`Zks7^D~ca@?IB@-4i8sG02t`BrB z85?bH*4PrUZI2A~oo!>YFsp_5pg0h3O;P2WGk9CIInC}2tJq@{%Je&_hgXREVC;_$ zo<7bvnW88{CaU(}SdOPeHC*FERKgII0v>NgH|xDh&&PI~Uq2j^sdMStC1auFb5+;( z92u9RhnGMFC{CO7TrByrcA+wN-?jCJGg$`56fc`TGV1>F_?Rjdb^k3vA7eggU6^M* zFPuN4*Hz=2J1G7P9^&Y4frvic4zwId{fh>GF;=&dTX-$y5U5C|d ztr`;istk;`Euq|53FLqaTeOe}gWDRVM`|idl()WXkU-sUdGl0QI0u4d>jB-_`7>~_ z^tm0kk|p<2Z=tqa7@yzJr+Yhiy60F+%ty!jF%r*0<{GCDz4?Pa<2aH3W-pj51DNi# zJKp}OwgG8lpfMVn+evX?G#8~r1@=bc{><|Xgpd=A1JKiUAW|yiQNz9Dku--IOoh+f zf(MH(4b>Ttbvk?RQQxSgXWCQ@yILT08qSn5fxfk(ce=`uvuhF6pK<8P2UC(P^=Vpz z-gO9?kO3Mnv!vz7#`$8PE*DkCVVwHxX=dTgihVnzF99t@e_d!HgymwYz_{G=;H`mx zlLpTt$5IWz*WZzo5+P$3soFWr%=antFweeen7$`~IM;qXYr8o27qu!H7cmJxu+BWw z|GP4`G2_C{(juRxsKK_VKyJcXd_X>Vu8+ zae$%!tgfCw74M9jhj|ZXE>cBvG$)&_wNCyzU_)TuV`p(Fm$C?gu|O!_iaoGsA6WR$ zok{DP9J07UT9JBS;&SYssL#R}W>i1^%?@B2J12y-Df*Gw3-vMM@ukU~Y zs#b}6RO|$>2K=o1o}g_?TxqUV`W=DEClT1nZvc6JMqvEAHspisGkd;+1@?cP%lcD% z5%~M(&Z2FTj{*Fs_{&7VlUo;Ocp$Nt?G$Ho2s<9Ilt4~*=& z46;lAvs+8Ioqn!j9{Am8F2W<0lH^p7Rs3fi>Ru`=+K| z{9_)*{nyPZ*#SVpnuXubqrTi307>5Y6|p%{N_e+SO#CQ-Xhg^HZfGYQhH?CrC_0q1 zKSPzZK!w$R0J=Q~RW|O|!u;CV+7A!x$N`euK_WCqOgva;4>1O5XbME zdcom;{uKZ?^P!mJ$mK%|X3VuCK24o=l9zIn34eRe>r?^rWxqWy+7i`|F(wifhjMNUBg)`>xh))!wL%VeY7h%tb z`yCe+KKLwkTOC`cd{|Wf1;PU7i){d;-EcciSe8}+oH&iQtbU;gEd+EQCMWNEBf3C^X}hqNEHW6cA=Cxcrm76L>p(*%IPVHC#% zp$7=)3_!Zj(X;F_f<6IRs55mol->GavP9YgYv5=O#TT^1t_Dyv-NEyx^w+Dw>9QVy z3zJl>0drh?CTJ082|Zn)b5NF+`hX<{N`z}2CIUHq&huI9_bHvU|g7NEJg zx=>H%9I*Nk-4_dmUM9j0-T+weZr%CA!a(8=#{h1bSM>4+L6n9-cQd~S{Exa?=j}Ad6S?NUPn@na~ zo?UVzRe5SS4_F9~#Pjh1qS?~aOz`vX%_UD{Asi?=+dEy=vhS_QiYW+NklXe|H_ z3F11};f|A4cIHylHzPL%?R;V=Gy!?$vNGM$#Z)a21h)%M&qw1jw|#aMva#n0faewR zPu?tC3i{#!Ym#;JfyGY{=46$KB2{d=$vJJrI%I-r(h`x^LNwqz^4_lxc)rZk33k*a zK(;=u!!Ba-;Ca29JN5~v|9V@!>ur}{C6X8GidbyxPrsQdSH<4ZmwKOKz5vX3D1eYz zi1Awu2JrqifIi^ID>`HDbesboe14`|ejeaa3nv=4us$(UXQup!h|zukwkl8G#r6)j zfKwZ(BC`;CsJL2u1#rXIlfjTc$tSIbX(ax)v#-%W<;aV1){-v+73SiI!W`NHv7oPB z1NEx!wXBfI*NlN_E;KlkqjwrmL=(9D{r@`omj4UV|DOE+m&fKe<$s^~{|DUWKaY)s z_v`K+A{>>@g7rjAopNQ?0x(rplVYRmYmEptXq;OL>oGI#CWK7}3%&$7mvenS8XS~J z1!Ag>ZPzP7_c=0m9M}iDjs={Ny|SOIVtlAdM($IoBdrN&1@R$ofZ^IqIK zVwV6JFZ&NqcMS{pOr0x3=Vwg_%Hzl4Yei2ozjerhwP}W>z>}o4&Z~SwG|nI(!tA7gUtd%B z5dwQByZF4py|*)UKI7Ny5z6uyHOQEou75eN=SEH8a3-_X0BswoZb=RT`U~mEzE9f7 zg3-d1%)*zlRAhiSs>A|e))K$ayJ;b+7+uu<4%7*5#vxk!aF|A#i zcji>E0(DhcVBsHq9RT&Pmqd9kD8pUBu}e15D!~MT`rxZ{MQ)VJ68>`3A_;F>h+^^B z($k|o52UHyGSFwyxIh+9(a2-j@kG{UwKxw|i+A!aalXn|_k@o1F*J~QpS&l$9#bk^ z@BvItgSs;V9kQ-7Rm;aWy#YS#>{4cpP#jjjlL3SebpV@t+e)Ikd@qhC-;TyzW&H_U zVjw-vdkHYQrpk0u<*40eRh@p@`K<+Sd%~u-#|*zBT(YK3B|Yi2J|KZwuJ|AyUp_OU zaOH6VzDi#LH7+(Aa8I8-@2jUEa{4AYMK|*Pj7obvk5X;om%bsJlBwg2*fC~V2TVUf z0c3sEay{UbONz8|xu*5&VTd4e%Z}aein1H0nKVw$kRIg<2n95=z+$GpvJU@x#{Tz5 zY1S=Bp7kTs2WQp2Oh^J$@rd zfjVeaGXBsAjdmRVAMIWHS5sFOu9Sz!Lm31q1gL`xq*ekd2^iieu~mUWg;fJd$2t(? z5vfH|LK4LXDn(@Zpc3VctD5r2GPx-c44^a?gg_Aj2_XYag#sZsBtQ~E=G>%qR{w+f z!Tgl0bp0m%s``h2QPjuCgX7g({-CqQsrJ=K3DItXW40 z$9++oGTUQTBOQPQf~alJ&etQOX2vh}A_gro6K8cAGb`ag+3SL9ae0(~XiHBF-&%NM z)v(@5yBFN&zV*}7`DF6WdeZuX?%V`t6RC;`c-ef@W$BR{kmo*ocGp5_ zTDQx~G`;^s#ST&6Q>KVN<}1wI-b0Ay%T}5k*f||kjJt~-oSijbk*N=!%2r|&orVqa z-ua(Q=(ZyP%N^ZZXoFXTH1YY zJBnX;>1_I$+B}oXuXd$hn0>|l*h(Cc7ypI8_BA^uo9o(j=WGQtv2TUiK8zm0VndZe zN-GurQTo>45P2yLmA@}|AEIfqoBV}11~m#- z2l;Ti_%a~>q3jhk*?{2y6IE{otqyz^j`L7F*mo1&8ymkfZ3G$l4Y) znF+Yja7j|TC$Xos2ifMGuRC;-2I@VDLPE;vXMzxc4BDUTrC7Yq{{=Jdxu8j>+V-|e zDw$qHD`1f(D8MwbRfNm^u`hgxOHYj_6|%j39bjH7(<0M`mOi{#TwRr!(+_QJwiNG) zLEtYygVLm0(pc*v6?uXe4Vmq01w zx3B$J@#h$tss{akZe1n$`h0!k#~;{0v5ihVZj^W%2%ZLcl{~*N3L|u~WABdM8fra$ ztI=k}G;`N@9fJBIAl4;mTHq6ur|*?6E#*d$5W?J>Z9z{+PGLuN68yyAVz{Zj1YnM4YvF*c@qLfTN}qAA*2H+3KmBMbG_?i^N~x(bjitQst0_Voh`EC z^-$`n@^wCPLiXyhq_1Q0IrhneicipuE1YvI{ul+~-0(Sez~2mhBf$e;RI4KOl>D(< zk<%7*LR^2sZxpX$7;}O{98D@?u=!8Ouap}Iykc^?Y`<8}>2t3=H52HebBpgs1v=5F z8c+alXX6?zb;{@M>fU_*SJR6r^~o%i%7NW>w>$LydZi-LYqjIeQTp7>w~a(ld?_W` z18JBa-uN$g&JIKr(Pez-Y!6z~Kxf{Rl>($yo7{hgfKIHv4e-qESTi?I5Il>}uIyl$ zTs-c9Hq9Iml$sb4OduNtym!O+5ZnaVq}k;ykJbRpVsgDUiU9-z@7rl)O4@uDDi=V; z$Uorc(T)3D**3D1&^L(;fX_x8*+YzD8Cj!~WgoQW&y%-Y3u?O?S{X6V|_VsXmsx-g?JYztco$8k$fWxnriyoJ#y8$sd`Ajk42a)9D-#t^B5 z7S?alpK)mdzo!lv1^HD#Y>&&r@Jv;9;MPP1kg0AMkW_F&8^A$^wYW1cnToX%*OYje z87sPG0X9|1Ew|xz7_U^+)(6Z4meLvU<3mig`LsP43;V}o!W#ZPBhTv_3t$lH$gW>_ z-iJNcUITKHnj~O)^EoQl55Z7x@$w@8*@w7ZNg1Fn3~s-BdB25GXlTIKp!)~+z&nMC z@HQR>45tx$zO*mnZGs;QsLXiw731&j*JCf_$w3U3cLVWuT7j zuQN)ebapvf8rn-vG@Su<42x6;rt{$L_d3*nHJ$><8kPN!3C{wso`)FLlHG^RJK>)q zE3qRbnQ8%F`rQJ&TL3u2^lk_I|G6Mg!xkJA6hvEJ8zp&|%jkODAmh@YLNrSumz!5y zKux^}ssR;lElSwSnEfkFK0n-_)X2)450!mG`F9!XmHe*mEkojHM%z1bWyQr(uSP_0 z7PZ_ix5;z#F<-@mS278S7bs*i_k1|Im4Y7{MZ;51G1>5{n_tV zWR*1fqNJ{4r6~CMhIBJpy0w4c<(Kz|OUATeXtesOM}NcJ76M5nnRd>h{FWu0FEj*I kZ|}kwU&3M5$5w0f1o8c=VTacT-vggd4n`knjQFzPzmR3(q5uE@ literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0223994226.png b/docs/dli/sqlreference/en-us_image_0223994226.png new file mode 100644 index 0000000000000000000000000000000000000000..c412d5f67e033d99fb36ef635b1daa6f3219ff50 GIT binary patch literal 5070 zcmV;<6EW?Un0*9J0`r%kDLTqS^OS8mfdfZ-L`3m9N9&niT(+L53K`fb~+S?Q~}LWbIdl?{Y3#g+00x& z3+o4rJT(QDeNG?hdij+!!$J2=9k-DY^xw4akl6^E=|vme(ycc582Y39x9qFo-+=8c zUFK!HfmQSS^KfYSAxl46f5<*c4Rt?AT`3|Ge2&2W`VO)yrutc(&h&-bS zR>>0O%ZI&qrdydd!f3KWvTWCQ`1OlM<9(iE!E9vz+o=YS!#>RM9*f}d8Fxyq{ADoY zyEhyiXw)tR{b%+~u`P8hkk8-yvmY_|c=QMTci^j+e+>}2KnM^5mqUO(p9dIyQPv5F zegD~pMntOWVXE2YR2;HXst=o7Lo`=D?Vj1DI32RIE?>*z9F{xf&Q6=6IBjN4p^SQZ znjKEXX4YOmvyc1REp!s{-%O9SKRsoKqFz3z-|xg$!`{?y;cELID1VN%U1v01AzN&` zxjaXCGn)V&VbUK_dpnrumPuL`*4C!f24f3jh3Gp?#45q z>$mI{_c=-nK;Un@o7FF4X z%1_F->R5Td>$;AQo~~Ul%Sz&NHon^XnLT@Klx^1k5I$ylk0XvFo}X60{9R6fzXCfp z3GfegpT2C_wcc3Olyy1UW1-1hiy?-b#Ah+S0;fEv#7q23+dz++LyIfM5atlUACf&@O5ntcPCQJG{Ee4V zWIxC8lj4?~c>mXUTWK$%&C%okM zczF1ZWWU9k{hzW%^mj0D`T7jy<&&U{WZQW|>304rK4+c7_-D@TNUse?nq}34mS%XJ zC(z?)aW75Lvu?lCl+6LC-lTALCMF3)1H}v8?5XlNeQiV(&@X#wJL8R`gWG(+*Z(xb z##r(oUeL(@_GEtmpOAj#qQ`$<`SWGKKmV2SW4-62L`VOwA9?I|{;Dqt7ycPBb+(et zj z7ByZOqI?^bsRV(KQK!-jSvkic(|@T84IL=QdxmSMU6;?dQs3Ke^ZTba6--d8ISwA* zv{qIaF-#GsRjUrl)haB$$zycZy@iUk>hUu5fc|| zxWA@b*r>cX^rdq7tXBSVq zK=}r3Q1x5+UaLPAMpoID^0oJr{(PIO<_@PU)8>G5{50Gc)?9|3uF6)+SL#+Pr)su` z>un_?B5$jPLm5oJ=PltS+^zyQ{sL`!=&6jZqKsO-#H=!-@{M@$TbQ-QyVp=vuR|dc zMhFaozzn~v5b7XmNazP4K;?T0;QxCGC8UE8xCR9H91~k;#n$3q&x~*n&@if(C9E}M zmE3%ViLpqaO1jL^W$bG%-^=K~43^iSN zzspy_UjcW%!pL6j6LOrB47T`^*_Oxex0bNld9L`JW2T5Diw-zkfsY?j{sF&l3EzOL zl-vUXy*C`|-|Qo*9@oG3;N-Q5;2+s_T}XXxP7Q2N`C;N@<^Nu1U_1H{e~smTOZfIt zMkK?JK&yts=#>~eP@0nCGhVFkDYVqUp;&{=I^&JKvY~06hn62e03WSCWFMuw5l|mD z>LW=$$}7sxx|En(05h3s;5Xg}y zjMhW^u@6j$+ zHV*3CkY5|M3LWKUVWu)A!u`w3&%%S@t#t780}cmK{+mo~%q8c2)ctxHIp(j8b)xh7kogffoTv+=p-gEIrS`J0eGlVjk0d!4STsqVl>elWTOObB zTz*dx)*t7gFPC5!J+jRyY8++kz8HK(9UV1&7r_{KagW!ZV}qy7XO&0`;S;uR{di~W z4`bGkKJe}Ge1;LA9G?e)D{DA907tad5#SMRwI8G|wAD&7>#*cV$_=8_^}ufwZFj&v zJrris$_Jm1fPIuq2xtfl(r}2~;Vw+q%-JUcwAw1%#VEH2q0J`RYOQXWhrE1Ttg$j% zDBYBH-Df-`Wsm3rCU^Fq?I1YDeo=+0SRF6_VxEsOGaV2Degp<-IE?Km{rcOdHqts1 zZFMx5L|YvVRXw9`a=e~9{z{0}J4RBDYA0lywma~ADDk|3F}EL3N%TtyCLo<}aGXpdpFH zKv81E=C|`n%8e|w6F$eeqDT>R8rFh>&*1`DNeJ8w0=+jJM5`XI8|}Y}56isu93{c# z4bZCJdRdWww~s)(hC?mKmCGr$m9Q03M5|t5an2FPn0nObKz9xIm?NWW+c--s31-jj zS)<--0GMpJ3IyUC4i@T|ZcBo_(PV{WS-{nJ5Uu)Elr0(Y0SJ^MqtOD|ciSIe38^Fm zhC?8};b3$f>SH=)4& zUCYq#1;sN(wCd%kk7(7~SFxdK)qNI)XwTWbKKods(M*fZxT+GYCKXrJc4{@GKI15j z5NLtG*$u~RBPb#CK}ZX|4OzR;HpkFL6RkSYssn@4syEbZcG*vR&T{x@-}|WFU7jzG zW0dse5IC#h;5{F<;Y`1+MmO+-w9#ukPB@uQ8Oo}7j=HJlcQf+EG21qDhG~+E&3w`a z+$RAl>{hC~$|72IKipmM^-DK*Z&YE|z&$GYqwBY_gMJAC0IeI2ve9VT$cBZRAMTMv zZ}8JoImFOLt8f?5s$;ygR{X@^2W@jKw!>Y%!`m^k{J8uZrd6M@E`#=0+g~FH; zJd7!yA?vr(D7yGn5opzL#55WVNTO9gV!h3Af1e<)c&_8>SR-wG-0e}hh*mucCU+*u za;r~l^Ndq0wZ#g&^Hkeq&_{dj)>d5&(4waqHlOwD&dNzT`XmU%HyoTU9TlCr{Fb8@ z?jc%rqE&CG{2|)#Pfe@7e#B&zU@4!A_j!(mF)9z!o;Sc&UiQ+W$8H-?N}~fppaTL# zt4?&yZRwhcR-Nih+d4z}Z>!D_$zg{^_iNrNm~e?!{cDy!c})l~1c+8WsvcZBZTMyE z<8xd@s~%NjC=(%Y%Lvej`yVx&1GB6C_W9X0a-?YDue;ro3LI@B7pAdnwmgCMgr{a*EQs_&} z(o=RQPKPW#iKTbed77OLo0&|*3GBQmPMeu7Bl6g(A1A&{wAh?x%C6Z)lW*ArabD;) zsrw>-XB*w$fbBv5nLV2An4LBTKHcpPi5$UO~Qogz$dK(y)_)X zA<$nuIHBPMuPTfA;%Nb(Q?C7@wdK{*JAIG9{XLwr)YcP!9`fHzkF{Smh_!~je!nV* zre9OP<(KV$p!_-3HJdD%kqvf}y>y-Mq&je!I?yUq)$f3+XIgg74YbD=o07z^+>3!5 z&L|u7{Xa_J8?GNH(8))k{)7$#bH#mCvj&~X2 zGs0k|XgVVV2!U%yptli+4E=v;hN%96kSFO z?R)QbPsn8(%Tw8Bjou`7ReDSNTjS%_FZ^)%?+#TJ4dq|?ab~zut4lVvbbgs$C*k~c zaqe-%am4eJPFwy8?Djy7=uPYbw;Y6)eUF7k^}ZHE42V4n1v%X-p5#VIPrz$|qBcPs z(bRKh*cE=SBl0@3lw^^bttwl!_2?GUqCnL%@56o{KEi@#{mMeN(;Q>Qwf&DTe@zk{ z5(0$4Cq$sfhC^&8d&elt`Trf>z_$p59?dCVng#(}56E%aX^$MfH^v?>Wo${j=)@ix zp8IM^DYBnq_|)vR^q1)#|8-jLk)!;zRb^EJ zK7ynDEVa=AAwURR9|FBL9BGzSdlIA>Ugrt++p1e0_tI>ltqy@FXs3T_)Y3CCNg!VE z{KYxORNFMcO?@5EFE?k}HEu-Fe81QK8ZEYXK_ma$ll=jFLi!c_9{+vi&zAxJ{D+C` zSlpEjxDrgiga9G%X%J{N$HC?n3N}BW{TnM~MC@Co8%u3fj9tICa4lzTS`{Q$X<7H1 z%YxOsinPzLty%)C>~qK8J^F!D)7(TA?`^n0!hb#P&uz4q3fE+~<{=vN^Em-OK0*CK z|2^eTa}w23bi#tL_~dnfxT-!)6HkT`0)zk|a2*Kr8mhVum&gi2fDj-A2!SCHpoU{e kbV)WLKnM^5H;BOh0bsvgwN8o)y8r+H07*qoM6N<$f(#%CWdHyG literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/en-us_image_0223994227.png b/docs/dli/sqlreference/en-us_image_0223994227.png new file mode 100644 index 0000000000000000000000000000000000000000..2171ee0684cbf2ea8941c3675fa408bbcddd6486 GIT binary patch literal 8006 zcmZ{pbyU;eAO8_hLJ$xoM45zwgmlLc6huksoRpM+gi3DGC?zd9LO{9%1cZS|$4H5R zqsHjifH4N+hvMgaf4@I|=iIZixbM6BihIxV@p`@2(Nd$OzClewLPD$l_~A1Wk_(Gx z?;R;GoxM`=KNTe*0nn;He4y`rVKWovw60O#3B~PL1XU=7&=N>9j;`IM$3B?WFQ-*U z%@*i4>D`%|PC~Rsl67_zZC;+F4=Pu}&9z5~kMujJa+xGAz67#K#z~Bn)`Dw3<4a6V zH*f?6Qz8Kkg_!r;P6{`l|kCnt8Lv|_nm{UQ$&saw68AZFmf;ucrnNQ4#J^PXr7 z99^yDF0t#2-&zA;h=L3ADW7IcR**{|&@*D-)s%nZg! zi&Eyxxi(Dd#gCsXSZ@MF)%jO0;yg6UlztxN?93>e{eH zMUzFM_f08kqLcGwuYHi`N<+R-2x${s9v1t++(-hnL^+z!ar(L%aZL2V`4(`FL8&V_ z5vL_phVycJ?bo);nbSnoPn-OH)b>i9@(XjXOKt3}`*%Gcgo15KsaeIiM{$d*v3LEQ zmMAvg?u_>#yF$@t-o78A6vTalSx>XQiS4#~G?&Gt7_B6C!QA;cfp&5iltioh&bWL3vXB9LvB&U=%&@y5%t zFpu`**}L{LG>Q07RXR|L8)!zqB}*+_;*t}2k6}Api0!&)1rD*fY@4Kw@>vzeaN|wj zkIv%93Hj=II_zKELR~JHso`6NiFNpYH99`+yA;czXdD=z_LsO~RxXe#2F&7M9D#?BOytfLXej2s> z%^=T)t~E1j7Q>ns-38g~zjE9RW>MM!XjbfS`uj7^FKfO2)QCOtHY3XomSna0YBH}m zQB!_wXC~6&Neh^YC=URR0b#Y<6c%usTanu7(?qh{is5{x+Q|ejw(amE85-QKK@IZr~S4E&`EG{T3(G8iK-{)o2R(qc6Dmh;XrTs zDWOp>SjJ6X;KBXbkmh`_tE-*w(-SkSdmp^_ryZmTBedzzt-Izn_3|)qsOYJ$WiqFB zEZp#T_#+f4v|nn){DkhvvK9UPNNPS02xFJ|eXKCS@AoqRkG<~?^wXws&NsmxCp_GM_IX)OYaKkF8Y@4;>FwSd8%_?q!J6iD05t@^WbL4NOUCK;rux8IVAsBp}HCGS1zPwkMi^+ zYglowdh*J%J1sgG#rz?JjtS>c?sZfQXF~O%tNC<`fJvW+N$9TV4ETYn%|7LpkzWL~z7GrZzKSj4YLOfQzr8_86f)DjCpX!#(DYBx|o`piYi59Li(upd5&W!VP$NmDjM{_{|Bln@F{n9;TU?kIyt0 z%L4*rfzjzhYDbk><$Dw5{I-POkY{g_-tSTM3|DI^1HOp9_%u(3-m#)6&24GHb~W`UxsJNTo4hs@4FZso(rF0FUAQ{c8^aQ39&e=8?}Tk z=wm*U``#*0EV^?|l~gjYXJm#uzkQ}9Qv#BpBu0wI9Z&C>*ATqNI~Z2PH7rI^_rvcw zg#~#-WR|p z-ys2W5T^P_LiMq=fng=!lW8f7;>)-Bo+^(5&K26@YyfGqQD-F5dhs0>7&=KuvHXOT zTj|dLsb=fhl}c)-JQ#}(fJ*q;l2Bs(K!OslJ_Y36RXjdH(hrGnX_82mQ4Y zCIIA{8@>^4_`S^yj7#4Kx0@=fX~ck)UisK#7yiC=7No2w-4R2;oU{KNP+@Glutayu)&zX_(?99z%l#_e%88YBbtPvwEu9Ij2Ke>J(A+%)6c51Vt;)`OgwCrB|BRFJ(0! zT;`^Gi}X#>r*!S8EAB=c0Dfh!>)0NUn0;%*4Ua;)A&M2UlvT3kZB4|1f64A+r2VT&&U?$QREcBDT38@N~MO@-T>H17Gf)3cJes<8;re-8zj={{8+A7EnA;<1>}c%hSR%kQ{2G2# zIV|yjClwAojQ{0{1o@SUDE^}>yIiF4bRgTXURpIphO;@qYT5pne*sFny7Ki_0n;TN zSN?{KZ|w{F-|iGl%a$7|R?_8{_#fg#>vL@yYGzi+_5v&R0`o$264{&|dd^r1rPW=C z@wIpi<~{k+D=hZcu@|m7Jk5578y`rMU0||Rq5xHxtXlv^_^5j5@&TL)xXO!~fb72! zQri!_pX5IJLCd^8I(&;`<4ThI-<595O+HXK)EPYs;$GloTwpN2jSOAJR^-roPI9ZY zFJ6Q+p(|&L)nMd(7bAocE<;jxQWwM?0i-7>e39v<{S#T5lpS?8hu^dQnWv?cjq5!m zm((Hun`|Uh{?4Ro$AgDvp4piH++;^?%nD)krM~+w*P=-HZG3;=xNje@Yo~n5C^~5O zF1e^%w8r`bs8`eSP_U<$HMBrAvz*AWjpQuf+A`zX^N(Kvq4k(FC9Ldb60cQo1v33X ztw$tDKixsRv+jKzS@ETN0Xss;4?WvgK?5e4eMy;oHhKHC(Yh`VwJOJy1q$A*OR_9f zvg;WuJ~y}N3(tp+UW&a^jmmIph8};@WPd==JgG~1(U_W57nhkn9u~_1i#tgMUnjEH zp}kC#b!+YY*!d(WFHF}H1JD4{lW~m7kw|m$yIrC>+=V;04Qt?R(e(2SXNHf`Ty8!K z<$y;l*ZX|xFze5=zb53hE9%A)G#u9;Zy*FLi9i-oz2NI#bSmvIf3p&P?{~T(3nyRV ze=H?Voy71Jv?A46{82)29?Z+^S*5@v1d}7kVQ^Yugdi z64}PicB#@)yV6f8!FmVD>VtXNCvx%@&W$#KmnBILW2*lC>rER`KoB_A4AnhRK4F=kSRcz7eCY$ z$_+n;1j21#Lu1dKJNVo_!4Pv%=`m&&JSWO`RxslY80D9bQpb{GU4l0Od^J0u@!*>A z9c-U!X#e`j8-lnnrb1rmmTHzQ-b9|L%acguS__`Or`TuZo4wJXs{oN%tGTCU)cfn^-9Do^tB+`4 ztAL|O^|Rt(xGvX;^9Hk?G@N0%K0lzO>Hy>V5r(Y1rFga3%ES_ned2eC85h5~IWzp& zl^gaO_@roGEtk`f>!s;#Us%)xq_TC}CurE%%vu^B>iD&-c;(huUlQ>i>CTtoDvCKR zxGO$txN{MNBbH14T_^JM7AE%=j1uCo%B zC+R!GP*AL!9~Ng`;dLE{f){w-uW4Qng&W&l+Q@fvfLB?;{)a+l+W7E8E4q@G7TbV! zrVdr&>m|SwW`PY{Wxfgr{Iw+vq&+dnY7Mxfj1k$;FP4PCYL`HFS6SU_)O_(RcH+ z_hyYcf6!kykubCp?c_~eeA~`ck?EFsg39QnUKAHZ&tN%5T#qJpd%o`VO_aWxMt(o8 z%`j!Zd$=<|-JJ#DRrs-I!B&u@k|n4w9JVs{(J_($!#wLL93)=ca@$86;hMR4iDJBm|uI8wGU{%FU9r~Uz46C_QU&&CYc zX)l4a5})AxZ2u%MC}Xe}y?n`Ic+om(Zuz4G8%3YcH7bC>7Rr2e&*0QwtW_J;Xl%u( zxl5d>Ys=advAtsRvo{kJG*}EVYFXaF8=<~;L6UA!obETDc6$F}@^3&k(`E@(SO#Jo z*O0%D{T{D~+Y5BgVTay#fNTbpqYx=pOwlq1GRvTXB0`CKEVkhV7t;SGkQX$dhxXWz z=ac=meBiXy4xBI>O!eNX=q?Y-9y=$T(s(tqlXW~fiej_k-f5?&dX@~EW0PHl2oLwx z=q6%o@tbVnUG#2Z>@+cM-N8IBDX^z26ShY-pj|@DgomPJrAPlm9RULWfzK~}79R)y zUi(&yFcYE=>vD^E3)f!tD3Z}I6(Pxsk3McMOBpl!T>*c;eDvkmqQz=g@4()_*tcY_ z5q1gjgviA8w4M_LJ#|FuRkbKX6urRHo_F@*6pIV_Psh$!r^LQ8*RO?1Nx?7XHS6_L z=R8m_rf6wcjT$Y7`tKUi30Rmh5cH`)ibo2&3Otg zE(fCaOR9Ir8DxG z(MQfAj^4T4O<&5|-pc)fFz`kD`u5TLYW~FD_$M``Ndpt~JJp6pQy=mEG%Td|)p{>~ zuE&)~73igV-*yhV41L}1H8(5V-*qX>2|rBMMh*^Aw_T^H%3no>m-l14k#*JIm60N% znTpB)X%OI0n{J=5T73<{Hf^;X57z**U$iw~vljZXqR^o~VLV#_T@?8zBljN9akr0& za&`@?&QD_+-3iwQqz7|3hO%d$Tjl**L@~ zc*5T*a_KW^*iQ}rRmklD*|_GSjyIoTq4m<>9dCO?E|FVhWgKh8cOKCc`juu;;?{17}-6lZf z)b=*YKN6^Nc=<*|44)96huI!kI?{7v5zo5X6quMzhs4v*xyqmFrX}lk)~n0_u|yLd zzgDF^mF>)B_2v+rnfX=I3iFN_Ggmx15}FowzCDsx-gf3WQa@uUA1nIDvGPgX#Q!8; z)HMK$2MC3Y3|MV&McTjlXWT2^KrMuolNAZ7F23)*oPcF4$F)8|5i&E^C$~mUJUkBH z*v+g(Z=Au(g(H%HY&wAS6S%Q!DZ_w!37r{WTGqh_xwR?_Wwf-)DA+jwzt#5s#KRIj zaK~J*W7p%qJeCa{Omf#{00G;ilxDRlAEkGY;%aDf)8+K-%Ql_y=Vrbm9}NE^qtzb? zXl&jISrG0VAttWsYTdApCY1|dS$uuNs%F7=4eJhcQvQ25Z&Tfrx6gpLO>ut?wx3?)Z|qcvjxAZ#KXw}YsI;B5 z%Q~%*^zR~=EB!;^iu_N%(EK|F{(7XR_yS+O&C5JH+ptrkr*p8=6Xdkl-S|H|E5mSB zAj^0X-1sLgNT|%YFBo<*n{0@uokuQ%(yKTse79~ACo|g_Ga%VdA6|6Jrb?Z%^Mn)Ubm-RU`$x=VaR`)FjHgW%eT-u_X22eAc(G3Gm=27j$%>~AIw<&uBfEO~w zS)So!@%<;C)t9aw;M~USQu*>ZT1HM?JhNKYb`?3_eh%;Z_83R&>R)@}!(> zSvi65DQ{L(cEK^PRmM2WEbw@gFrYf;EXTCw$rKKv-g^6fA{)hhw?CpTl~Te`HEo-a zXYh7?j2)w%{-b7%UEZyzwG0WcwFGpium|m4@YalDukLpEt7mb6BD`xGxbO)1Fyd?f zYR;X$i4Wc9_4@^m^9&0-tDlotTY3q1^EaK9(6vTx5Y$j;wB zEYXSdynf9usYcoU;(DChg=b&Pe`c&v#+rWW=rO)r#mk?^&}llrk5=GUz`gS~x%W$O z27?4S!)}pYHOUWO;wssbTL;WB##ii2OoEJv9v3DSd0erb)91x&;+;QLyX~jhqZw6) zpLqw`nTuhaJX%xcxs9ENFVzQ3iU}ClxHhP|p=+&BZ?dn!dL4{yJhprtox^tUWo0gn zxh#%uBfzs6bu$=E{IC&~=L%Ddp>MHn=w&vpy>OBWlzcqt~+Q36;O^ItM)X%7oF49rI`h&=DrY;=JRCyfA zJs;()V^A;`Rdjif`F@vE*X02Awu;Dl{8FiYdVekLPtMW(+5v0h(@YX6P)#WP*D4%L zK7(rcjSVx((4%{zG)ibD=wqcim0tS|)_Kfjs?;;Zk(V z!ctFPw3Fbf-T%+4EtqTuGIfU-NU^SXffru~7uWh0MTee+e7+nGI_pfu6g;l%t zuY`@RE*xo+sIpTFX$D{kDNg#m`kL79L;k#cPbj_ww&kxgK>xNaD`3!%fl1Ih+yxR3 z=9nK3vgi%z&6t}z3Y@^9@PW91wh#tzpC09t7P5UpsdJPN(FGT;X7jF4w3hiHw7q^& zahl(#CpxwNB-tQSea36)LC$c-jI;o8){PbWkV1D`dR9BKOT(tyyOJ8s9#FYa1&S?- z410^E;$N<*rg$I~I<5#}O{GEc4Z;QokG96~!d*H2NZMHc^W=7IYEnVK?q{s331#Tn zaoXP3p)u{)b|M2+%OafAJSUjngSuaaE&sAsG%v`DaTF7N1lrA8d#d2O@m}5#5(W?p ze*Rlw^m+5OSGF5O%r+{(=T-7P-mLZj8(?qjf6CNHHqGr# zFJdcT@>&_(G;&UNz$B*TgvFFp>#Iz<5fN-}%C}E=F#Y2*rA@8gc%^8^2ndkmZR6Qk z+ziUXb4~92Za_g#N7plg3z^f@es>WfX!sPGLUqL2vO1|eqpuWp6^hN60j9C`7toX13P@B zM4f{6c}<<3$@Cj7k&E04c2>R^u!2PvQ%^ zQ3EWW+oI6adc5t{HZ#}<%ZbtU4*tp?nwp^K=4P&zH=>QwD@-eXG&!BNd-Ozms{n(t zv@>LF>qzCRB$JOM^g)1f^w5Tu)BN>R_Tqv`q^`&c_z}^vBqr5MAl}bVjTaE~2F!eF zCAV9O_*%M zL8qn;gQA=Q!t>kRY3{84MKWw9b7Gd7P%)Iu+V z;?ldI7p}T=y0CvGvyuZuF9Y`Mc1bZ)1Xi^l1zV4I(V_=01m|YU`VP?XH)Mb%?B^jg zQ`^9rQc(g@p@a`3m=MooMMk=?7SVfqIY2w{4gGz#l}h(Ad;6a15oiQ*=$1=33-j*vF7=dWqK-x==d z?dAYRs~^l2vIQIN3OIo2kyf*&ol8Gl)Z_E>nJcP4rm>MTj_ z_#dX)^~20bW@k-&WKab>=Im)X&Vj$U51SN9hj^umhwDbo#1%%w$-L~(qUR4zg zhXz;7-SR+dD6|*L8t<~51S=8Rv0Uw14S35ZoneS?lCE!#8>0`d7lHOXOwhRWEa?Lf zS}bjU`NZ{QBOukp3Oqy6CQeJ4TjaEnCByxLLHQ27V^YVMeUDZPo4<$k4J2q=+n!95 z&z2LYlAJ~4hMB{T?54|3`GQbyBj0ui;xeQ;V$~Qd+QL3**x5F{NhO_hueR7`ENzy; zFh>+xl*z@gf@kylFbHN0cSVDr_DJaaGGrY#_rc__sn6h{p)`>H0Ioz!$3)FpL<`^#?VcZ3BHROn(gBWdj%yz7 z*mOoqcx$=jW0Y$(`Y9o#vnwQsh}CW%6K1rl@qLcdl9h z?Fh6X;VlOBnM+HSg$)lmmS5h#E+y)}_3|m?m~ZKqif)#uv-4o_7{_K{I^|tGC#>q2 zr`kdx{xJ;|j%hphC|(}xpF}Y|kDRXud7T1eX4kXtNNFKJgvmsHca|}+hHKn=frl6lW)Dk@uA$bA+$;$N`*82MW@C5TT_`1&^*<$X;R; z+z;F`l~6?|6k{3I?~8|zET}f6E#r@h3j+F;9*&Mw7K9!#hrIP}xmHwu~J$zBo zD2PH$V)a0V@dto?;rfdLg9=^C&R50>CO5UElQHoICC0J49xocHliSpCfF^GqL!T@l zR<}Sgp{lMHyN(=io4-c{msx3_1BHp4kp8=<(-*1~?Xl(5$jW-D`bVCvt{T;1bF`N? zE61H`!b*Vt%vkQNLcg`rTVdUCM?H#7>M}|W7?wuC@6a0bHcc76lEuHjXFt)7%zQRB z$l|s=VN)6D>;Kmpo8X|dc9_w$c(&HkkFYyx^uHZQBb+Jv!Ymd^&BpMA@=C%IB+xjn z`47?vGK3i0Uk!xSuw~%)0+~XpW1RW&)r5sKT7DW&?m6AhZUINeGbbxdJ+luCB!DI0 z`PMd8R1J;adcK)$GL?P6nXG}Mrj?LM!~*f^!faC{vOD=h>rV<9XW%x0b15;-Vd+be zcxv^t%nru*R}v>%;%XGqq_MZY$`qpwt8E^_6}`U_nVX$%f&kBwSCHaoZGL?y2>$VL zq0`sSpfmyCP|k%Xhs~&0A1Rv67yZ(fw|M7%sVDCz-V3T!ZYZoY-1CZMN8l|ia>kqz z<1Z#=%w6`W3TpRMd@y%o4#6@~;PYz=S6Y#HYv#js#0#ZeeCh)wlvm%fVk?d7sZO@y zs_2s`(C$sSatxRTo2An*2sYRYW01iDUZfC2Ns!t?C-SR}j$K(vG`s?0hT6Oe%PmZR z-vyE(h2{PIN;!S7m2f8R)sdT#Ps4pQ zZ=E2eCdigI;G=>t1GCJ{^)HyTafq)T<@7K`AY!o(2u>`tNwWP=X{fq7FCk}Qis5r^ z)Z=m~!4G&v%S9cc*6yn+Qz`XZ1fR*aGo=s5&9QFxtTm2_r2+Rp;FWLLl}b7_7dB3j zQGAMhG>3cZa8B-44r$*o)9A(I-WOE0qkoCguK_xb&(Y(d#mArA_nz>0j$~c{ysw>C zKSr#|qI|1T?KcmYd(*S5a$ZwN_fWtLqKD@vn4v}U<~k&=P^RXlCq|b-(4jv;Qw)5z z3q=iT61~0T63oBc3lc{>sEiXq`=-Y5z<@#ok~ zGjE}>=XlNBJvoQJ21NA~Hy+&Ne^ zax{w7V}_E37PW>)iC?CKzbV||V7x4g^5GTE!K?D>s;pdG-JAxAYS5FR#sNEe`jG}P zDM9py!&|ju+lG-r2%Jfeqt5VWEQgWR<5y8AOz|mlW;! z=C;WjWEhDVjK6JrYzC>didTFKTvB@3lax38%|Sh^a+B5*Xt8~IJayI1)=`K3j!!9O zek?3%#S2w*oZ=;{HczEz2d*G5V7g!w`6X%p5q8{W2b@E~P;ZQ4aWLA4oRVB~+8rsI z!mmzC@PT+C2gf3^Y<=Tp6DJ1^6>$Z)b)p`j;Y30Z>bqndp7m)NNb$M=*6JQ;Jf^m1*n^4D7B&lQMIf;1$7 zh)wOgp>)v0_mx4c+?`T%{cN&8X-#!zk|#-hlzg#oLB!Nyc*-0-&Doe&J*>+7ATv$> zVZNsOZm)jAyE8f;eSD6j|6*ZTf6(=j;oyrE!9_nRykG0t&qIL380nR)o6rsc%6n{> z0kP840n?644kR)0eb}*k8eXlUe<*nu^0uqCqPXLD1O@z-y!yy|ncq#7kG9S9J*bR{!s#LQbf6vtBw|E)qEiyi*kGUYRk5|?4`CK2^nZBs?%jXEdfojpGq zYklp8j;Mhlx_xZc{KV7k7wM_{HI_WwWW7qe!z-rwl;6cLR*snSWixr%O%xeIEwD$3 z8u@0hpai72-`L{If{OEz1>)=DhZW%}8mKEv9>faFLD2iqmMHveHVTeB9=F~<8?jJo zR>a|DYq%*^K-_S0dIiN3Wq)T#Li!RH0!}~9`vHIpLc4?v$!7g&85?|_`Z>+W{vEMw zfd1KWVhK;dntEJe-}89A5#V&j=MeVeyb?CqC1pi9SI}dP3;_w~fk%2yuZw_SKIVC` z?5;DOzC7*jI9xka?6%eG4UiFd&^^>-zIky~wm#y|n(*qUYL~~&Eo{BWVJ?04GvRKg zjoAIEBl&{gNC-c%6vrM~j5y#9M3JR!w69 zr>cP)ckI&mVw8^2{uDt%@!kCaxUR$xGNa^+sN3H6(7Fq2-E1E@{?G^lFl@j@g~wb% z!*hg(OLF#!Vm#1!Z(75=267Ggq1TdsKuAQKu3Og${6`wIZgJj;!dfm{vR zpI`Wekd>NHd(nzpmL^ATG7b^D+FnZw90VWFlfixl-i^Z5nn?PajdL2|@jFjg^Cj-w zd}=X9u@8NS33!xVx`}%iZ*x`O^t#gm^Y;viW7RkQA_KCg_3&~u>e;wf znq5NqxcTLpLyiTarmSeX@2CHx*P5xS@z$l}zNEQZWX-cA{!`TCuBOas3vba;D{g&q zRZwVOQAP<}$FG>L-bGZp;?>|9Zm?GO>}nOO{RB3Q4PK17_y@ho(;GmTq^4Ls#@*o- z&X|IZIZ$x7vrVdJy}pMJ9T$Yax@Xv4U`w}2ir3gDhNYnz=a<~yzcf`OhShf^epgFT z>#>L~M4NJot^x0%iRs$gnOL-<>A0(&rFr}UqKrpD(zU|3VSB-U$S)h}H+{B*EZj(j zq$h+=i%EkP)x~)lYh~rkHFzQRD#NpepJK}PFBKM@>u8nOfZH0aI0-@t8hbTTXFC!9 zaSD_IXVPB3aT%h|x$~;DW6%Wvu&SrhI6ZdT5F)0Z1NQF^@87=HxUcF1^2!|#GiAY+ zH?dmi<>h>-c$tNNY~?8WlB)BB>yc57Vb@k@rS3|!v8m^y_jox2$-O>g87ycg-OxCB z!2;2AYy19>0Fh_#<-(eJiv@)|ehIWWJQVT07My_>&h%Gk;i|9eFP@oJ>>o~6IH3!A zt|&O$VC{U{tm25+)1HWPT{F?c6;T&-OY5=b8=0IzCO}E>G_74FF3A0$+Dlg?)3?P@ zF3se)!n)iE+TZBL^14|yc}b@f*i^McR-MxKo%e2Ty|&3sJx>IhCkOHDD395OcdsU& zOx@hE$Y#-t9JUe%)zpi3ECd3i{}qNMhup4rJ;xRQCHGfo zsr@hm4?eiv`+tUmd%^>>E-15tu{(7oxHs!parE0ucdl_{1(bVs8IL{vFk2_apx_II zcGXL7Y4u(-)-Q3}K}y~GMqWH^MI=WteD>(XE$TGC80nUIlU@cN1P^GKMAF&8M*rh! z$OzGN!lq`CJZsWtY^@|K)5x{CNTqFJDpGb5Z9N_Qalg5|Mvdg_yP{M90*e~K(2l!l zHQ@t7B;$LHiFliUy$I2~JDDfKdm>F?o0$+lcu-nYQc0?b0bH12k@7XdH{xJ~w@)Qb z94g4CRcmd}(8A`r(S^Rmv{OlXEPD~H@^^cHgM3VcK*XxF zdW!CjIA!$AADXiuEdaurUX1vvw>Y@D_x(im%z+|z$ZIpzbHC-s-wXLWRI}8zw8G{L zp(bc{RV+t2aJNS5)99yS-Ch?CE<~~5<@n0W?TkZb>BNzc?@b!D7;QoWuBE9RzlW(M z!vXf)>xVRs(?_w5FP)PA!v9rT{@w4a3|O9&KQwSUK`!9u-L&=R+>bn28cMH?Av@-=eXeUNg1iA>3BJP}b)UHd8Z zOQma8_};@U+d5^y?2}sBO=|{S{%u2)o((K!ZfjPJx?|7R3pgDp+pFFj_r0=DT?oR@ zdFN4TMWr@m1^CX*=youbN%ZL3TW#Epf8gq)CTG&Ry)imF^tJgH7C7r~IxCNoxiuya zTOw(Ueo>BaT#v$s{=3)rXhan7$|$|<51t27T1G(85Gk9|OB?j6lR`qmn3t_Sr7Vpe z#T5lv#a7<~-;(eAtEh%m{8fq>fATG}zzNJIJZ_^0e`JF5mwnobfSk?>_-?wf?*m{7PpY{HkE!8sRmKFQ)JmJ_@aPf6!OJx3SnDpz5Vo7SH=s@} zm+&WZst%GLOR2;qo6v)aZzW`tWupzJ&L(5da{hBHoVe*i>$cZy+sY#l@R>B9)|oHI zHmZFmdEcayJ9nG$T^>_}&CNS@z5fZUi!t2y+^nN4 m9J-_br25GcV5?ZU)SgJx!*k2QKzN8N=-yWM60QxqDMqT+=BmI zKuM1OcQ-urDiIM6k*3N6gO?Y!vS4uw;!{V^(?bqob%t2ANfJt=*~dgh=v|S^_XZ!A ziP%l%eMtOL=m7L59gJEi`AuYG8tXwSL!lt^hH0wIhUC4;vt!fesRD630Kon3*gBiJ zES&--4&3(kHpex&N5le(e_v2qGYS%!%g<`sXZPNS zPfw|s%$yjMngt|G`6^z*G3Ek=8c(qS8(Uy+nw!KPAb3D8Z%d?iVK@1NdJoY2yZ?bu z6&ScxP~ES70__2^ukvpK5s|L-n=~bEnVmf2L_eNhl8$V4A5e#mDZAAwITo%8)fk-G z(<6#IHo&7eGp|g{v5%?Ueh_tWL^~!Q*1YHQDCgP*(k6d&kT?3NHZwD+Q_h7^wr zd?5Mr~>YO5?p*dm>|v7qN$hJIRB^h;Hs1^=+MRQ zaXz?|$<*FENHYPA3|q96D?ya9OG8WGZeyggr6J4t_3Euk&Cu0PMKSqQO}hM> zD?PFH@@ggf?AujtztqnHD=;r+*6p6M-AJd^Yrwq2j;fA|Kym8AsJC0jHbr|>RiJ6- z+ghS&@$tH_-zjUb0?dJ1RNnxtvd`4FBbOPD=Ym1~>g)6xNzZhqZ21p-VnMgsV%3Mu z=!h>8s|DC=>m2EJ5o3!xnm+d>BRFfxshK53s(c)f)>b^Rua{1CWi=IucPS?B0f zV}Oy+EzG{N8y7$zQ&ao~%Y&WJR5OTsi#V@?l-j1r2kp1c)0;Cd>`pzd=FW`)QrR{+ zw{|3(Zc+{IFzLN3;=lgzxQE2j9{1{+Lq=fd-ANmvMcH^%15RD&A_A)2-<*7jL`M?1 zySJL!?$$3Kb;S8-XOHjOr=VAzH)I$FXM^=3(4HY{7%iDWC-b>ejM(cBsxO3P?U2^* zA-YpYedEj8zJ@)J;Q0D|Y!-#d!X$^1ITzZ`FYA^}OUZEo4{qehZ5Hr4(iKOITU@#v z@1|_Gt67~ohZ{wE3g$lrYoWF7eAS&hsA-(Wo!x;z_%vMI09GF|AScAjP7u#CsG)J4q*uDaJb_~Tf0pwiWxhi zH>;<7x*XJ(2STl(gtw-g+c2G@fWWPAri<)Pj}z1Yy)NZlW6o9Yrp~0Czj#f=6(L7F z5HCkq_dRmNZmt;m-_1x-SSbRnu?DC1Q}MzaX`*o-M78`fg6f)<8%vsYt~7o9KDm_I zKk?B%{?^JR+z9GxkoD9uL^|JG%y81AN*Hn)p?jHFEMLU4m7L8rT z=#|TlgP4NJW>p3^@Bb_tU1zV6z38F2H2bBZ(>$?Gbf2on4b}bvE0hgS46~mfIniw3 z?RU4l{e$P}CzM^U`-hDe892jP9k=Kw`Ax>ZUSnJWshws}m)(l7ku@;BwV&ES0_sK1 zSJoU-)zi1%c}2+~AKWhrQb!jW)sD$>So}aVj|a(MMsTv4JXJrFYzCwOfjFC z8J_F;QEO{uWS3N{rb5A)B^yc9jO2XRXh_&#KS*YeF1$E|pxQJSK>cMdV4Mu9@-dToa zML^qBg;O$F2JCelngJ7U9SDt-18(JSQ&Rqs|*g7lN-aL=;#H4*~tU6_wq zL4MIe3YeySmXS}YS)?uQNyGHL(k(?VpJ;0uANYz}x$ec5`SV^I4nI@9E%Q*%-~+Q} zRgbm~cWC#4|B{AcUS|#fmOf<>z|tpllXe2-vofzBk5x6yCd2jxsL%a2RUD+O;EwQ5C2nZ2p&1Yrj6 zK*j!mqDKtuO&(iIG(MR}0cUQNFy$Kkrp2@M7Xu3M@m-+nPu=w8ERvGJdBGK6gNpjQ zlkEddtQ$~8q7C5_{SN*&yCJN?7b7Bqm}-4%l*X1 z#~8t1xVP~G!4|mT3#EJ@8Hl{*&s1LWs|{Eh^tbs88z(2Q9qG8l?f@z#S888F7&vwxFY4V_USbrgLh6r6X9y6Ob(1TF+M z-XNcbTzzKl*_=Wz90xo#=A+>6FSsmIg@7cr>Mgkp+nLTr|Gbij?CF<{G+uUQmbnpY zE|)m>djvF^B71jgT`s@p=oa^*bry&Sirr7tQNC{L3->%YOV{A`-2*=M+s=OKbQr(j zNB6m*!B8BZTjv1QetE?pNM{rtBz5MpL|TaD)X^OLx%TtmU2g0%+)eN2D>q#}9+biR zw~SEFE5I9=aZ}NtrrjYsa7qDU7xo*%Z{MUHoHN7BO?_&|o{Ep9>R()d$Dd^F^({bl z-`*cSsP3P@D1*Vx&NXLQ7qoEC#owVW14=G>pocTt2SGVa9o1Fft7BohV3S$VIR}!~ z>K-v5&L~dmq;M<8yyi#t+11DJJh2|X6xT6KT`|O9C(w8jI4XjU+nRJ*u{o5_2|C;; zcuy~U;E>hN9SicKWs_YxC~R_F-%uKJs|T!YO&x+n+(yR`W@pJ;`G@zk)o0c0&8H%E zN>D*tbHARBKbcq&0tV_GSZo9rHudYH92$eEJF1WL?cQvyPkyPbu8i*o^R`aGOqbXt z$G3nR_Pxh~l^f6rSZ@Ca(y9`+iB_#^Z0g?4#MIv&uj&}^_4OTHl+z%V(pJm`MGe?@ zO5rl9vpNz(!W9+jU!{|^Bo3{9>1h2=Jw->Hm8&dmdmv5~NEh0z1~M18>AT_iD2L_J zYDCEConR?F!Plf3A*5J4Vh$>7ni)=PU%r1MX^13R#!ANYh^j>Bx&%nc+U@2eAo$eb zY~=)m+){&KIWVz4hzpl^E?hd%q7>GQb00Y+(fM#alT5_~vc zLFsjs6#Q=BPd+HzJ-&ey*DV+q4BfP?XuX@I{0`~g-<+fHTwkG`!Sv`a<|`i&xlHxTU62wpL>(SuC8?R`6sd0HNh$r`=j?URJOIS|IMSHhw=jO2FG%cBJU0Vx`_byIfLOfvxLx|!f-cm$gljUv zUYs$J%4$bOLAV&VYK(o~x`y?7gkTzUB3BE=Ryo1^s;?+E-d!fz-ARI)P(dW536{Cpaxa zrGmazBWqtyl%!)?jAmOo;Ql_8d)YI!Q+w-EF4m@Mf8hZE%S*LPVjb@(5ZIZStm(tx zw^!Jn?I<7Krw?oVnsFoQW6)6?uRhlY=a;YkF1`j)7Fi4U9l|!8w&NzbZ+m9TeYvI? z4MnVa%VQY=0}P36o?|2>=~^L9E&PF&@qgr7S|t^qNU)$p;BF1^j3k1lLrD907>=yX zLlh-01!$Nbyd$HW7bFBU1a>t^Wog33PMPWEcyHW73!n=l{{8)R1{PAd;9%RZg9-Tt zb3NYZk4S!sUZrIbtGMie$3BWbS%zsBnyrqhXGVW-KXmZ8S{2I}gItFWA9t3?k=jCuRM;Y_uQbqTg* z*NtJ9qJ@_35A-mD?0G?}8b`tuL#~Y(@B)@l78Gbnx9y4(taUR?OR<=31x2A)e7yjp zEjb}L^{CQjM8@4i);43P?Tda2$X!l{u0R;B^IkXDRoQ3qkym|vY3S{)cR?Wp&-~6R ziG3{T3XPtM(8ZC%C|wlxh4~>ob52ApHf@oaKaNHYs&`Q>vq0(75AAb<{_p!*@dbl9 zSu=*r{0+n25dUY)Q8?(KnT^~WzkeXH{WT-gqzuAsY&j1v2}H&YXFlA#LAcAsa;t69 zEovD)B5Jqc$zjo!)57sg4LbarH5)v8a&HKBc%EBezK4RP^B^ADDZcdmv}JjP3;o_W>u&Vp~R6Ew6MqiFwN7u89f9!!z;WwqGlCSh!y52qw0Z((vSzi4u5 z&$%Zrx?9d|ncus{?&uw}lwft|!opI>Bj@R1m|4wEzU2Vh|F+>$tsn7)2c?um>f=ZL zi1K#N+@Ysjr5|Q7{<@4a;5+o55Q3!)A$5JLsK^l5_8DN5s)Ub06rUrPKeu;6+%zb1 zT=n@DNF$xYyKVJjRqU-nt;R9KJ&Eg^3By{qR*>}49B&ghwfUCbY>^BSB=o{qCdb8XVyYejDhtVTU_u7Wray0r9EUVDCG@p1&78Snvw&S zr-+IJ5v}LaG7c9Q)8?J=vxo&eve%7bRcHRx)UTi|?@PGBu9=Fv8oTZTV(JfT*z6$k zoTiLzvL=p&m#&l}n*AJec!CvQQ4VD#(YfnjA{4p)>lJ@OzBh5pgBM#}R~JvY1{U?R ze9c%obmOCrrsCA!IQ1TtP)Wis939AVewLb>(yii@$EumexgLK_!}+fwSrIo&OD_E@ zW(L|3V|k=?N}tT}D96~Q-XN0-YjZnh9P~|e-kred4LfR~!?YR1P-`+B3}Qo9WZGL6 zUh@52w}j?irri}%Zq}1MFU8b1f%E%w!Ev+3%fC`P7ycY2i~WICI;$3oosJPRqOEtO zuC+T>QE+M16Dy1^awYWBl?nIh0NIXcal}iz%zy2UClvZ%e%nhn%RGN}W~T1#)z0>d zG!@6ZsOWyWF!uaif~b!-jLJ)^!j!HfKa>a>z#oph#MsPv_S>oXghrI0$V$pa>iZz^ zHsOXeF^BerStc?C{(6>pqQ~JNOgKe=3f}KtVrBie>a6eSy{c0gaoqT(jD=#Ql=wc~kY>m3{^a`{OpSQJh z9o$Y)$~QPfMa!-NM1Q3r4&?cC$i_5>^eF%KaVCiOJ7ZcRyS^7WrWkK9AKbt!C346;S{BhfQ)K3d8rngnLJD@eMnQ!vz>x3<0A zXE-b^`ph`J@eZ*1LIee&Te-%aA4|P{YT<-`)o6au3}M>*()CX*^p8938sZN<>{k)> zdcrhS^m;STiM3bpi6H5|9du*jfr9GFm4^!|4R-|#=$$9mgaFDTAh2WK!@izN>sztM z^ku&ijH@w4I^wNZ_DM$E=YzHebjM)VXvo zJ-yc4_ORfx@`~GhV~;Jz?U5~3s#tBcxI;n6O4lw5I_2wdk$nx37N`Y?jL!#jfm@r% z!4y+wvY9ncJOz#=AH^hc+@>t{yFYmnT#4ZpnWI2~#&#AD=+m5zt39{jm+vfMM-rZx zgZHoG&u|M}VRa>)2AN?FM#o&l0;LxfR#MzfvOon_ZmF$|O0~V-y1Hdw52&WTy`HfJ zLJ3UaocBt;lTh~$xb3q>rx%tbu9;b)9c~*Cg(9aKhtUorzC8~=xDH{@GDB8uqAxsn z@Z#Q8%cpO&*-BLeSo*tIhuEwyiy)&dD@EY`tLOP@J4s`^XbGM|8tAHjZVE+Jb0RJ% z^R`)Y=Z_cC7Cyh@V;=pw0aFZocjv1^{paLLl-MS_l@Ks@<2F&w4Mn!ei{$n2xDz*ahTk+S3K2 zkvq>g6#9cco=xlS6`K(Uc(;stt_N66=#*B@ne=mO1yx}h``t3MIl1nu!(sv7b^v^o z-W~LumCzm3_ifZs4W1>NzI8qLhfQTCTuC&^9q&c)t9w*sUvBag0^IRfr z+2R*rJ42M{Iu9#o=NH4^)*#Ycm=ijOxK&EUV1w{r@mr4Z1(Q=t9RMg-RF1T%^f>oofH(KXE!H=E5nSQ2>nCPgNeX6SBp2kWrnsSiDk>(!q9wgylau^~+E zI$6Xz(C((tBNpqJL@q;h(YaM!rPR%ePpG`cY+aeJpFZ$>t(__;eF~WC9m%su77HrU zas92IwKz_>;M@FQI-~qzMsCBbZFXhx++?tluOF?Xv+uYa$LQIZC2AXruL}f?06p!= z`|a=XxjB<+_zren2&kHM8u;Yva>g=dLbztTXbY^kZz*3Wz50%u-!F?VE56L7aN9rl z`&0qLvhcAS_+d`8eNID-Q&86sc6OzvT|pa5hH%7P9EO0ndCzD(r*Tq~W2^wS&sUMr z`xM@HO_B7xV`-mq7H2lX=PPL6?tLW7v#0qb)b?L$X_Tt>2MFjl*3S4pr^tk=( z`@IVR3OLn*$KgjpTgUqn+LFi(#0m-mjt;2n2c9v}KR76$d-~nUEhO*u2L-9rx>1;> z0!ciod}4w8`zwxaiQqy8sVfpnN%7@TBa{K{QM&(SmAH2B7()NawHAaPurqwqZI7GD zIhE0ShyCek(X}}u4 z`WbLOdGZ~|MRXrsAhP=5A5Q7EeqtHq80YZ%)8^DUsnn{CCzT|;>U`$10~ogMr_U~` znk%Cz<8?iFavL$`x0T~-EO4<8C>Bo><^cKzHaD%!3s#5Qd33Q+FLq%>L|4wvwE$%K z80?Ea%;Ge|37C&VGG?2(elE6oeaG!@-M@i}+8jNQbGDv6blrkR@w^JxA~(G>7WNyg0dUEJH$E1;_T}9=6VLSE zm6Ee-Oq^K1M(W`IG2R!t<5*M-&fZ(?0?l}0w;#vv=lgrYCoX<()7HE*e!r3s8fvJ z7TP3H*viu(leV=j0kr)V9T>Ks;lHOpjOUcx`1+fR5Qs*G_GCu#HR=&D8(!~so|9+L zeG7lHm`cVW(M`8SeYVw6sI27ypaFAP)82KRU&7Q5F4g_a@70=)W@MK>cV)$-ZzaUh zZe1lHB}1_HwDJ_z@$!oWEG=&Iz8v-sqoi`Sc{wjcBe@6s!}+;> zny*4nkTfI?u+KEKvqy}|@qoQlI&nBC{T^81Fn#S{4!F%Yr|qtVep=KFmpa6L?=X(Z zg|Eb*0uN@m0}+FrLJ%>^La_O~NlX_!?xhae453>(A-}NY_ZzIQiKuzmaWiZ!FR~iU`hZijT0eo#l306SEOv9{Rl33XfZ#4&&r$liCbMs1^;KD% zTRfDa2f+wkmqvwPzJA|dqHk{zZBm#EuhjaUxnzC#ddUiY#h-v#3b}juT3Xq<7Eh_I zJk&u}9idOye262kUmM!Nr^)WK6vA83=~wGw{PH`x_9^StvC}ZhKIVf4b4||#%-ve< zw_c&=72DH~8d(l^)+7GV^}2_RYrZ#Osi2h@XNc{q)vYUHYy8>S+jyw(x7dogP3W)~ z*bk;Q+U~7ioQ+=XKV}n-l?hj#RF_*t1hAZjFVlX43N)|4pl8+)x7^S1&>gJfZzk5$ zK*T|>FL*aaOGo4w==X4A8v6*e%f^Nah38!xNZ6@?JY!dc8>+7-OsiV-sT3*t5VCTlI+bmmgMq z56;$b;HDbZ`EK3$9M*|bL*I9P(c_y5;uM`+*<(C~=We*X?bPzvhYq_P#jnGib7nZU z0?M$|q@`Rdb)`dNxji>7gJjz=@F~EU*=CNQ58M+L#d>xx%RNlRLbMLMjMU#tT>dx7 zVUhg=W+M#dqkobJlLfkE&sWM=#*v(=tcBNtsl7lfn zdVaw{;xUf}w97u{F~wDLWFuy7DA-TIIpAzyQepN8sAg#h{y zMn1--G{({DqM2bzm%Xb_9=`P)WxY{31Nw=zuW!mtqDOflNTL^S)06SB{*P1&CuXkW zDSrfio+NXv+~U+^g#j)XcRS5E*(}u$ndY5Gr?1vZZsuwcjUAO^i^tH;kO4+;vfPEX zPRpv2wwX@g{}4$cTVR)Mo$dPHZv0Xj$ekAt@sb()?9Qci?!4GZnTPDFft&jWPO57K zj_Ap6{7$DS#pXvqKXW=9J66VHd1?u$U>bOHDb2&hvv|kW|toShPd%CU34>TJdIGoDXwd2ak!NKKBPR|^d(d&Ufood`MY?w}l>)p%-+npbqEGKdL8DPvtVLeRpP7*0hdpYD9r93t;BD`+F|?I24#jj z^0DR12lbAy=qX7uM7ni#1(-_=Lie%pORhaxocsI}gOH=lh zE)`sJUYY8VBx->Vxa11{lEth;%*u2^0${Rm77|Lr10v01Jk5#o&;gywL1mu)jQ?^) z%eGLRoIS`Qc%+6MgYrC-noRjqug#0`;;SsN=NjK15EOX~Iv*>% zC?!m-zNs*|lK1}`xa{ZuWx~*a<-I<=^CEbl-$qfJ5Ea3(opU*mjeA1C>A~yGMh={; zFC~^Y3?Is8v~wg_uaBCi$wU?v93k7mrZ^bu7hjdp-8(or*a|~LDd(mYk;$m%jO>#n*^+o7WD+YfyHOB@L5>CfeqMqP zPlV426K=ah+XHGjR;}Ad{_;PE1brUz+@IJK*Ir?;Yn;8a`$c#1ovs9_i2`0lw|}iE z-ch*cFU$}MRpfD)u&q1$%^mN)sw zO#N#?Nkespcoz!PRnQ`%S)cJ53JyyEbcok~oyNa&PJ{6`ug}e1_;LD5;ib1RSmJNB z6TELC3F~O=B?795Nc*j+bBt|%95u9TMm;?k^3LA{ftOuzShrEptz#*SA0-lYUZce$ zOv$oFY5j~WF^NIDjA?2)u*E%W!ZJQhb!@T7X=&rd=F%n|v^hh;aLV7jVbka*)>3H4 zD$Y*+{;QY$uG3pDkz0}yB7|IqIMwtU*2SOU9(bXtj;lYQ{o=)Qd@qZl#bTtFN#&P9 zKxcv=y|$#B5`jgA^o7j>dOXTo$ZeRz>#h6dIl1Os1$b~dmR~XnkmzQ$`5nrz3rn6C z;0C`j9P;`1$0sv72_c9On5{s%HX)*+NNmqQ86Y95_>t|6yudfV9%9me0g`w|Re%`J zRBZ%ONA4srr358m4E_zw*)vhngdv!pTRT4%l{I31xybByztO_gPbiZGlrw?OCS<9&?qUtwN zeqVjdK@o@wLeQ1{OW?b^!@wZaQHQ<2P}Kb#{6`JOv}NqAlbK03v3yrz-2lPK;Wk2_ z8Hji~sbu0m$3wqw$^G{^xrJvzS6-n{!W4^WpzwpeFR*lSxwZoaTkv ztKBR7#gR+QH8IN_-7WGDr}cD47AKGJ6R2O>mxh&<6ELi$^k)G_-qnqaQuhC zNBqdFS>cK(Vh%4f9?|KF+C4FN)pJ@7Pfy#AG*;DWS{n0X*W>xSebaQIxK|(eJiLAN z%UrMejS-y<|z zVtMw?z2m6>d*Bg_xoUE-$&#rpI@mu>q7Ck)rsG~JovlF-Q(qjAp2@Uyk_F3gCfMpaDiLMUEzYH#@RK{Bbf)iP=YVkyT)bCXv&?iWv~%!VNIbHZVyQw|IJ#o! zazs!oj7UcAk7VT^qn>wSsJ)-1b0e(~FO2#2xx-ze^~X6ox=;~5A0a457yuQCOU~I* zv#QdBe|eT`R0!b4NkmNEzd8F~WEaR=o<7dKNZ=r$VygckN4YCa1Qa0Q#=rRQ`x;Ty z2_I0r&zO2AHYB{vE14F6<;i2#BAEQIhkvP^!LQpkGzVnzKTU)bjPii~Rb5>#UJnOI z>Jrj#Cl`rVC|VHf&aqGC{ukgoHR!Ii@wnRD1%4whh}&Jxsku_FJZN0Hc;}-xG^ZW6 zo$`c$GdnvV?V|}(-K`@%iX)o{ZY+q}h6ND~Ge5ukRqcIykC%qbH`IlXf&@ASEETam zDV|gs6C6BcGo$#h*e{hEE^F+a>C^I?+0E^VOpjd4ap5?yt!uzKF`y`p&8Z+Nb9%pD zGOW1AVQHI%zsd!vs^ew2UHU*GGK0_~!3lVxo#O`dWgz>4Zr(H&drB^+z?&T-KJK=s z|GJthXKcf&wmhCLn#lV~iTEaR{{SzhNltB$YxbxV>A@#Yv3BpyeYUPxsOLP7`j`Rl zY$yic6DRCm8mXb&G$Bd-i|Qmzn`Nbw%7S}tcN+I)WIoRLFd)w^s7s`Gcu}|E_ho1W zB|1%FSA6L0%>6$&WGmUNjd+?>nyEZv%$GDxi~o>Cfi_R<<+q&{kRF1dQGoScd8gOW ztC#M@-j0`kW)b0fZ%yAI$oXp1@vO95%d_LC8Y_VK$NF&=T~$W&{B%+MhNzc!_A*7I z|D<0fe3m93gY!>jj70UWrIJ(;Cp595>&_^{pGFk=M+gJs-7^S`bN)L12(|TYtWQyXsFiaNwchz^D$#S zw8zU4Uwp!;jtX_3)L-U*Uzqbl35%z})s1uGIfjFH73fnb*s}7*>{;Q2S)eE{J`qsP z>!5lTE^l#YJN0W4md<^ZY=1LQ*=$_!U2!*t?j^I^67i7m?=q1?vD=I315wf;_MeuyecaL^n_6tyFqqe-q#KrHO7;~;vP>I*=N|R&}>MNvC=Qt-qo;1a|7cDja z_~(L6dHaTYW1B?NZw2DPIgj2Y{@|v(&~lLiM08m0OSB-ur0)s69|mAO0CbKt4%I<0 zIfCmPG;Cf?c6JtkyveP*I4eRI&M(?9u107_JR(TCnr|@G;%|~n&9l*)KK8FBYE(K4 zp$u^TlxyzrAnl4l$4mc)K);ofKNm^Tpi~P7;IIKj+DT~T99+GVDjkK~_rw51QLBy9 zHo$xXheDR`4%>gTw&nuH^VYF#X0pw~-JL=0k9m_Oh@!UldKWtWr~`%gt5TEu6z<}~ znJRP)j$2-1zJE>3P}u1#X6&|@N%8aP`WJTf9tPg*4a**_OldgpzHQ1W3*AMgXE3&b ztxsd1k1}dAed=rz`7oa!3wYH#w9{~Ecq`$-^UtPaHGlKFc_vD8Zqmhbpf-Jbo#()X z9#3=IN%N!{S*V>$qbL_M;XcRdLbu0M6&@tU@rLayM*!v0LdWmyI{NvEsqERldhq>6 zJdMq~B<_H5NG@v5`kmbu^+)xkuvp#SwPrBlj&<-$v`|tlK~V#Kr%3TYoG9_wH15Zd zf6$pq_Rhyx4WkPmy>s`e$vfv9dUvZ3#R^cof-F5VubS<*&Rno#@awmf$KcpK^X|f? z(O_;!1j&D(Ob}YA!e$txb~v#8kXGtnGxNGc+Nr5W@ysu>dwhuRLd|@Vy8Z)5O4&3-hj>{Y4-`H>hg!)tNero{P5)+nVI#p6lmb9`f(@ ztX%8w5gf36f99<7$?#FtNM;!AjhE+;R_q5C#9g~`VVpq$Y4T8328p(uf2R< zxWKLnDHc74b23E2aA-gYLiq1+FR+K|4DvdM`Na?%A$Yx13O+2Exm=l4B?zCpR;1~4 zdsYsD=>-3G-iYdjz=@GrgwJ2&##a+;p*VVbuy_Z$3DAqb8cL+8s-yBv$ujK!08rn0 Ai2wiq literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/caution_3.0-en-us.png b/docs/dli/sqlreference/public_sys-resources/caution_3.0-en-us.png new file mode 100644 index 0000000000000000000000000000000000000000..60f60762138fe7d98fa081415cf26a4943897779 GIT binary patch literal 2752 zcmV;x3P1IUP)pPa!Eu%RCwC$n`u~-F-BFk{0Y@>jH&Wt02T)4n7aXBssVicB` zOX`JaF2{q_2~&#?rkP8$id|Xfs23G(+0lTGT0tnFxgZFMIx;xG$R>Lr2ng2sFhB=q zSd{Td_5QEx{V+WDJooe7@Bg{)|8u`XpV~`~ZvvYTAfN{@5<(zAAd&!qNCE^R2@oI< zNq|5k0e^NPrRGdyW8`F%MH7XH|3P8=s2${2dg49z003YAM?%tm1!4=85P2aZJs3WS zG6CT8kCv0C(eT;v!9vu9jP&3oZEw^8HmnQ6+clMCL4mv(zK?)Ob8&M#`$%sr@v0v& z@zoUHs-n619H>uFER~Aj==ntk06q~XtAWrX`-(7l=)GLjQP?!@q z{@ZTZw0`amLgo~4vvd&0Pxryp)QHy>ob2%4P9)V*SX4zr!(BuoQ>IK9!d&k>z1cqI z{8L;})njUE#LIJj>h%6Q^ZVF{wB;i;Qi+5h9}NJn%x@raKm6;fW+Fl#T`67idLh|4 zu~b$4-1hhHTVfMtV@>p)0G=3ns@?Zq+;oGKv z*YE#+n>`1!`TpdYF4yhs&w)&meoVo}#+v$ur`WVKNyq<%YnFsZMC$eXSc!Bf$)AMeNOrxm z98up?0IbZ4d1uoLyuSG}0{xcY?H+G%68SDpqp0{6ZmtR(?ChBNKk$ZxK&chd%Fub9B!C3$z*oSdfKP$^R+Au*=jW--Q5@}8T8wOFz(A> z+jPJi0CSN!@M}8~J+m~H+qJd0$mL8L?}^W38D?fiTvJtZ_QG{CGOx1j!^2co2eNIY zE_th}e&*28WCEs4)4Ngh`F*TJs&5jrEg}J*$>Ry|NTs@Q5S4XS*jiLGYupV!l}OmJ zCkyWg!}|T(8zmGK-@ggmTpXX4Lzd^H6d8Ibpk8+o_km$bMW>TS}&FY)P96F+{d%+84 z0FbR#^Wj0eUL$0WrHtk7-A3#N4!~oiv))WDwoq|!rza!b-MC#_%g6i5bZq+f?}v+A z&dvOM*6oz`?o(Suoq{*R_pvM}5O;^mT)Ww?Z7fftAwAED0FP9BCXZ)ZL<03qVuSdN zP8xtRONCq}V{LHL13e@F7rC5Q7nL%1)>HuEPG%TvZ9T=8(JF40lp-DCMCkm>9oo4$ zp2gbI0)X_(**fDB#*QLr-bCyM4&cnWE2QR3>%BzUvd0>L#Q|n|$1Tk60kC$33wat1 zyP`b|;`hpR?G6;%TDEA7?qPdG5~XG3DCEwFEmXV^*atUP1r?Q*Z6tf#C@HG~z|7R7 z%W^bSHmKdEz8|Y+XCNKoj5;@$;5Eqx+bjkplCo$bd!u65ur3Iht+xK#S>J1CGr+PQ z0C$JWZS8>kO3xnquB&Tk(E(-;v|eN5zDOk!LKaKgc6D>}Jxa^U*&dP9D~M#(l4Wdd zEW5NF>oE`jQ&SV}G+O8_@4Ge+!`aD+oB8?teVaq?BU0GA1xO_l=1nQrAx&+WdOB8S#d?X;+5%{W zL%$q0tP5gqR7{WS^|&#ETLM_$tqfODr{F@Wvh7$L9&xbkIv%IZL`G<~%z?=et2Z8l(~lTRu|vXblrbH{CQq;M*jC6P7 zAFJjO7P_e26);09x-VYJYSS8{M>uNB=$!_=`UBu*VJQICR{ab{qy@7TIM~}!cc+es zuSRv+yK8E024LzW_fF?DH>qIv7JK9}8Okgb+deq(_=yx1KZZPwhLE7)i2By(j4ulc zZ0X+G=!}W_*0i+)j$M5C6#K-`Q^PI#NC$_uZf2|)Q-QCDmtJxjn1b~m%uQ)q7k*!v<@zee~^S^yy8n;SIv9h!z@F~L` zuV;E)#MIP`TP3BWq~-E)te=ie;#EKP>`x<4qruHp!K;f(yPS8hZ8~d0reR}ajZ`A( z(HeW)kgTXvuy(`eEDH)`tWfsw@ySLcm z71)pX#Is~7?MOOfjY2*gfad0Vq-SPfE=p#UyBs4UBW!G}*%&$+h}RorbCI@}bUZGD z(D`DX7<2A;EnO+wSMkI5S3ZiysJKOPz zXHjNr`1+U~!-i@z2TQevhO_F+9anqo4qDp6*s z{^#^#iMhy}=wm-XjcU;ayd~S zjKa+^{((2(%#||Kxw$%iyNf&$A}5<97d+PwF{vJcqF5t}K17 zWA47SPe$ba7&NE7d;EGLztWTQN_%qi?{fFvpbejgc;mTn%wk=QtX`o_PVsJpPg-Jv~RCwC$oO@7HS02Ydnp$REOA{(Mow8*Mm>A3h9MeO=6 zYU>mmT}9emv9DQm*RJeXc5T5Q!v0 zB$5b`NFqdtL=w?OklwC|&JB*_kR*g*P1STeQY~6uB+{*;B2i1mw!HxCh>m7?uX_kB zZW668hPGS7cBvYGjjLu-T2{ta(MHkoqDQ**khXqXE?M&}B*qkwZb{|!wVV0PLl1U( zh|@k2Pt@e8KI2W7%cc| zO7%;oh!GEaKI?~$MKHY5t{0Ps=^60OI`;KR@k?HRh+XkChOueL!}QpzFXL=(zz|LUxf|B{Fmxd{EXn6tyV1mZmG!*PMf2b20p#%- z`o8%ZVfQ~su(20?SHHrs3D0@S-1|flxSZIlFT0HsqCq!dAXirJ4@lTnmkYTz0`t-t zoL%_-P1mIvW2W1<$B}7tG*?~Z^s+@a=y&VTO-%?Gh7Jy#@{?>}D?H+5hs1sBQ0R>W z$rPxPB0JS9by>2#BO!*;vNGmn#B$c!n|>2>7?JiN4?lg9=9Ul^WTvn;zkmZNJ#I@f zXfLYayW|WUHCNr9F<}t$c#W4#+=3U8s&%wF9keEQnyx}+}N<*(D-{;R$ zaL&lM{`n*i9mMK`CvfP6+g4qiUbYCAy$P8{NBGDGJ>#mRNP+|zE_)NFmo4(hwYIv@ zj4}Jw)#_@YXz^U^wkl+5Ed$yU@(x$pVxcJ-ZmXBjPZHcyBPNTQ@jT3znFTMzB|S?h0gxfpz6m)AA4I1f{{Z$&2y zk3ciVjBaWI{dcTHs?p)HH*sm#raQmUz>fucJ<0Wt1$&7Yo{Gv8hdf^6w-o?Tw&YWcsTM+dBoLi8 zm#bgrQM3K5ud%~av^tvIiFi|^-&R4OlHQ{uGz@8In6F1rD1F94&As{c8*2bi=#8{A z)KIlQhtnC$?tCJxvO0({nRs+qm1oR!Z=IKnF2v^f{N$t{)~t1Vv2?ngNK$nuGEKO< zzf@ihKvc#!1ci#0h8lzrHSz1+9!LmL<7})$rq&Yk)GWTA^F~0u-~E0TNjvu-Q)}t} z)&}+s8s@*>B2&1%Q%tPac=st8NJG2{;%siGF;7#NJH;& zhtmrs6i)v0ou4BeO^#u2egO+IQwUR3_$5wT3!n{#&@*Jsx3K<;Tq+W^T~4H!KR$~@ zu5|Zlat{Mw&iD(0QiW8dAv|#;hS6ylMyC;;I1(Y)o!@mEZ@FnQU-ZJ$IBKhr$d&Y3 z@~n?c&GxedN!C@{=B92P%iuKA4P5P zF?T5M7UQl=?_?r5^a9HdpCWnaAVwy9ACNdxCO+iz-A9L25o0p3%IfG+5=G4$jjDT3 z_rX_P#9mZ`W{jEERu|5u25kR1=sC=`>*|qQT57FGWQv;}!j;wgsVgZ&P^ySbnS#<_ z@{wsQcZWQgM(3rkuisR2@ddhKm zm>y`QU}}cvFx#%9zncRR0f}5m+=3T>`k`rb(CYH78vp)0Ee*BEwGqTVll7BY1|4qD zR@y0eaw?7*EAlWsiJLyTGjpW6UP@ltw4HQIDv2=#er2YNQnNeHL|TgfF7e2O?@1my zh~&SD2{!g(;QB4N?2Xi&J%utn0;xtv zkRbctK7@An=2gu(9Ra)bZS!9G@?)aLPe-neAnxr2otkuaibwkB-UtAuk5dGci8mbQ zpHD?%2tDKXzEY@Rj-@xHWo6{`Gk44bDS}ZNOehT|CG6gP2Ggpt~a%lW$PyZ(R zi8N$tEzX8o&aMocNSD04oR)@K6nZ1(%xk%iRHX^nOsp#j+)(^s?wgdX$fl*dQ#vN{kQBLN_&` z(}>jJm$ns&TBh$jOxAo08KVLl?;k50S@-!F7S4`i=H$SR@bCXNvu5pPHYX!BHAMhH=PbZLgf2GrXYPBlYfJrz}; z)`4zn0@|b`lo3(5nj87CARk+Pp-;mw(B7`!W7{6u*6MHG>-993S9MlnGP}b>T0X$Q z*4=p=+dA4SslE}Gg;&WhtYpG)6U!Ea29!H%?ahd^54+^o0V28#9s2{xQ^UKnb$1>s zUY^}9i2&9v4QJu(xOPbdVAh?wt?d92zi^J^&vgoySb1+qmmi0B}` Z`Y-il7nw201!5QImN;7^FeRDuvlieQm$nqSgH2#ST^0cx)=Dn zcnY!FJo_HPt-fVW+YdiB+UT#;6V;vF(d}+cR@IeV(d8~pF{?8>qtl(5 z-l&f3hz@sXMqTY}N84?I)T&mtqU9C|*g%8DFx{Fx6Im5iuF|X}%dCt_S87;LjI4+X zS7=Dl>numvWx_9&xPzYqFMxz!h@dKJnV+WiNj6zrg25Bat1rHX34YcM&(@mM25EsKp^s$!A zn#VD|u6s{$op5YkSDD^r^tisfo6%**9APOOq(ypevUm*eg627JCCoz;t2Wm=9wHzA zIZpn)d?D?0K6~e%(5n+Y{``IW_383vZ~ype`|Z!iyVvi|9^OBm{E(}2>Xh(M9J1dY2CIYT07X)PL*D{uoU3SWSe zM5)Z@cZj(9E!*Rn@%yj8UtL_@?zGcisVAyCyQACPnyjiTyQ0fonqpRGc1EW=HN8j060m^=i($GodnU3fs$8X6OO{y~m9EsVpcq*Z6|T^b zqSslDvde-aqopiG$tAYJ8Zb!15?a()3^veUF{}|ZIT4x8)aYX^Swx02G;(1<(UEpq zsD~82sYp46Jp6zr)7XK2NT3kbfI%9Tw8DeMU;_=7#;04Wd;7XW>CRXi!yzvnE_|I|r wZ*w5M>1_7SKcT}DJ@4vn|M>OS_UPly>%-H_{r>p;)5Xs}-+pgDf7s~DJ6{D>Q~&?~ literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-arrowdn.gif b/docs/dli/sqlreference/public_sys-resources/icon-arrowdn.gif new file mode 100644 index 0000000000000000000000000000000000000000..379428032932289326e52c2772bd90ba3d88b906 GIT binary patch literal 1887 zcmZ?wbhEHbCgqow z*eU^C3h_d20o>TUVm+{T^pf*)^(zt!^bPe4Kwg3=^!3HBG&dKny0|1L72#g21{a4^ z7NqJ2r55Lx7A2Jm>Ih`8X1|p zSh~8p7&*iAy5uL9=BDPA!1Sgd^t#~G3rY&P1wfl!Qj0RnQd8WD@^clyUbf1_?G{Iz z=0WwQ;C71>PQCg-$LND17Ac}(Lcr7mV#1RxkONQnsd>QEUIa|mjQ{`r{qy_R&mZ5v zef{$J)5j0*-@SeF`qj%9&!0Vg^7zri2lwyYy>t84%^TORUA=Po(!~qs&z(JU`qar2 z$B!L7a`@1}1N-;w-Lrew&K=vgZQZhY)5ZeMTG_VdAT{+S(zE> zX{jm6Nr?&Zaj`McQIQehVWA3>*p!%p5Wn0u7E$th{n6D>ft^YU5&dnB%c=(a~-RV>2EO25SIA CZ-yWM literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-arrowrt.gif b/docs/dli/sqlreference/public_sys-resources/icon-arrowrt.gif new file mode 100644 index 0000000000000000000000000000000000000000..6aaaa11c23e919c4798d7dcd2b1ae98dd35dd316 GIT binary patch literal 1890 zcmZ?wbhEHb7yetOgf{ zR2HP_2c;J0mlh?b0+lO+tSYc_E=o--$uA1Y&(DE{Vn9ZINq%ugeu08>u$h8JcxGNo zetEGbSiP^Wm1kaYNn&1ds;7&s63_&_%*+%k11Cod14Ab>6ALqQLqk^=3j-5FHzP|& zBUd9gQwvj9m|mCsATTyHM;rzL?XHK6w zdE)r7qel)OI(T6JzP)>P@7lRz`?jrHHgDRvVg0(bYgVsXxnlXUrArnsTDV~Tyt#8` z&zd=7`n0K2CQq6;p}()Ur@O1OqrI)QrMaoGp}wxRrn;)KqP(oMq`0WCAU`iRCp#-M zBRwrOB{?ZEAwDiPCORrIB0MZKBseHAz~9f;$J@)(!`;o*#o5Wx!QRf+#@fo#!raW% z#MsEtKwnQ+M_Wr%LtRZ(MOjHvL0(Q)Mp{ZzLR?H#L|8~rfS-?-hntI&gPo0)g_((w zfkE*n3y@@B&;b#k`jUY|nSq%>!ec`MLnAA%oR7zc2TpB#@>)|YCO&lN;uMU!b7Eqm H1A{dHTIz>f literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-caution.gif b/docs/dli/sqlreference/public_sys-resources/icon-caution.gif new file mode 100644 index 0000000000000000000000000000000000000000..079c79b263f4b48089f3b3256154c73e7699593f GIT binary patch literal 1086 zcmW+#YfRQ<6hA3LZMyP?V}KhnEtF{Q4vGxS52AozEVM85nq$#%tv8gRF!3GhgM?_) zd5Luv3SKrXFTGCpn$y>EMGZQ&sWI^~+e%qWysRPK%BSD+f4-fabAGq;|Cf|4nO{(o zMH%#n$oXf^sdM5SJ4e1UbEeM389O82RCB6M#VI=_KP%=$orn{5LVlIau{svV?3jS$ z$Q-F7am0>L02|Q2!ZN1G^Z2J`V#a3VX;G<~iYc3tSAnLeiI}hnc~O{UH5OwwCTPT} zks65+8-W$70S3`f30dfY7}$UY7M95y@-?2QvMPChphYF3!YbtLLIs*E%B)Oq4^fy^ z5+zoGJ>&tI*hU=i4-s$()c}KNsKh9AfEd_-1{V3`d>b_$i;xcz{y>Y0S(uP3>_P>a zLM((Fku(B;E{r2+xJ8u612VCVIG_oJ;Sj0;2GQ7rEa(6+umMf}M1C4=)Odu8AxQWG zEh;Aearj(=d;4Z^LLYN*6+asU?Nbl3^Q%D|{s@6Xj}(aZHwkoj-~bw*1FnSd;0aX! zUW<-TPD&b;6s;@zw7K~4mF9cDuK&BQ;b_N-Cj%R=CnEjde9%~1kk}vYY^@mTTNm%F z?mbrf*~d#iX;?ITDf(MHQdK;&p(ek&e9qe?*&A0E)mG(1s`J*pn^#ttId4vCXjV$j zj464u)2F9Q$V#0!dHk4(FDB#D&wP^f0*q1)G?4!%Xe>;ROD~Oee-}xfJUd+aa_0AK zFP$vgR2te>+%}b$hVw&Ni`yT>F5LNRM@_utk68Piwi5??BLBpCcb8mx|3c*1x!xZJ zKU!7(#h8s(^x)5*w=Z0K{=a9#qeoHU|0&%MiU0rr literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-danger.gif b/docs/dli/sqlreference/public_sys-resources/icon-danger.gif new file mode 100644 index 0000000000000000000000000000000000000000..079c79b263f4b48089f3b3256154c73e7699593f GIT binary patch literal 1086 zcmW+#YfRQ<6hA3LZMyP?V}KhnEtF{Q4vGxS52AozEVM85nq$#%tv8gRF!3GhgM?_) zd5Luv3SKrXFTGCpn$y>EMGZQ&sWI^~+e%qWysRPK%BSD+f4-fabAGq;|Cf|4nO{(o zMH%#n$oXf^sdM5SJ4e1UbEeM389O82RCB6M#VI=_KP%=$orn{5LVlIau{svV?3jS$ z$Q-F7am0>L02|Q2!ZN1G^Z2J`V#a3VX;G<~iYc3tSAnLeiI}hnc~O{UH5OwwCTPT} zks65+8-W$70S3`f30dfY7}$UY7M95y@-?2QvMPChphYF3!YbtLLIs*E%B)Oq4^fy^ z5+zoGJ>&tI*hU=i4-s$()c}KNsKh9AfEd_-1{V3`d>b_$i;xcz{y>Y0S(uP3>_P>a zLM((Fku(B;E{r2+xJ8u612VCVIG_oJ;Sj0;2GQ7rEa(6+umMf}M1C4=)Odu8AxQWG zEh;Aearj(=d;4Z^LLYN*6+asU?Nbl3^Q%D|{s@6Xj}(aZHwkoj-~bw*1FnSd;0aX! zUW<-TPD&b;6s;@zw7K~4mF9cDuK&BQ;b_N-Cj%R=CnEjde9%~1kk}vYY^@mTTNm%F z?mbrf*~d#iX;?ITDf(MHQdK;&p(ek&e9qe?*&A0E)mG(1s`J*pn^#ttId4vCXjV$j zj464u)2F9Q$V#0!dHk4(FDB#D&wP^f0*q1)G?4!%Xe>;ROD~Oee-}xfJUd+aa_0AK zFP$vgR2te>+%}b$hVw&Ni`yT>F5LNRM@_utk68Piwi5??BLBpCcb8mx|3c*1x!xZJ zKU!7(#h8s(^x)5*w=Z0K{=a9#qeoHU|0&%MiU0rr literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-huawei.gif b/docs/dli/sqlreference/public_sys-resources/icon-huawei.gif new file mode 100644 index 0000000000000000000000000000000000000000..a31d60f895af919d33855217ca02daf4093136d7 GIT binary patch literal 2767 zcmV;=3NZCYNk%w1VaEV40Oo%H*MEWRpQG)+!RUX6`tR@RDKg4%aM+KK_}}3B+S#H2 z0rt<*_Nl4!k%7|y4fwgY^uNNg00pZ61NzIz@q2NyEiLIHE%S?g^NEbfKt%9!YU?yQ z^qh&+U}WbD8{q&E?Mq9<013DN2H5})@N8$u01CkX3Fs9f^{bWHKuGLFOZ1kS)=*aD z01@p=RQ9r(>pemK^YiUeQPU6`>94W!hk53YleQ2Q>_SB7aCr2fj@; z@a|n&?qX^0W?<$J9`9pa@`8Bj9VzWnT-qQk!yqQl01W6ECF?mp=LHtlFgo|Iv-PHs z+HiFBprpVM7yReuz5od5zrUyu5$;)5@{gDATVup9HOgRO=U{2(0u;*t3-Hg-^OuGE z;N9vkH}7a--=Co1Fg#jPP{_r^k&B8kCM0%mZLFxL@$v9KH8SJk;?~sDy0^BTnwV%{ zUQ9zlgnfM9U}wHiP~xwz{`dCrZ*%7e7{X9d^{Afom69VE761SLA^8LW004ggEC2ui z0LK6@000O7fPaF6goTEOh>41ejE#|lU+C8$A-c{-jb1$W_sAqMbtN`+RZ#DGFR(dnrcRE3N%f+!F;*y7(oOv#1FPiAwy#YP$z^Q7U<_8BLs** zMSr>_(L`oBgxA?;z>x3<218xJ;14v)$l4!yG&Wrk_I1$$0*;vB$B=#`at4uppizkb zkVG1Zq>_oSkp_`a7SaZiZXiMjk!m2q29j+I!X%PpB5B5wO%5V}2qJRl1%NsHfxr&G z=|x^!dAKu_41Dql0y7LOA)N(0dcZ^rABfh-m_;ITB$a5UdB~$fR(S{-NTzYfmPE#Z z$dyBCddM0`(jjJQ3^=Hu0tmpW@WUTds1O^W zCv4aPqTVsWsFRSgiX^m^Dgr8zYZ&qdl5-rgEs}5?A_tOhoSJHxtD4)WAg_+-CV)r$ z0l^3=PP3O0e*huI221P;!@n>Lqi=>hZs_9&J75vyw2nR*>5$YWnFy(5hH1$EwSHt- z2&Y^c@+FZL3&QBRsy<8z0uiv;-+usJMIdJZ34w1G1pu?}1~|L$M-&7rk*ppEBV3oW z3{UE?w2~S^sl>(pGc6ETQUgwH}48uSiAJo0SA(QX~FrGzYvq#TC0!EGVmk{K=`YG64w z*^4Iq2N`U_YT26(@Ww;iA6V3a3V*n8!U;+!QN-&o;K;WJKrdVR4kI*6wC0H)Z8+gt z8&Y|wjA~4X(?lBohtwStGUbrnULHxSg=BE{y8s3-FXI^_2KJ7l* zPUi?^WVSHxgDj2s;6@ith~r!?-DM$hAerqT7$efv{s0FgQSEnvBN74j@P$DTKnvHC zONlTuy0+nk2Y*mO7rMX%Cg?#AOZY{0)h5f{2_S7&W;+3~xxv`varUz{DmpF%e^`f&wJ41t9(5fhwzv1mBc| z0U*EuBM2j%FvtTFnh*k5Fk|UZh`TZf2my8Qf){uKMUtgzel#qE{5o z`T+owlz=pEZ~#HrKnm`Pm2OzzgBhrem4EOJodD;_ahfxf-U+}Ak|4_v=x&*|EE67c z`G;K+;#whFp8pE*u^;}yhknQwAj5S;KNQlDsB)$xqbbb@AmI*w_<|O)xy2+vAOLDp z8yCVE${l#|oS_`S5dZ^$BP4;HUf`rVX&Fy4%@bn4W7;49dBA^~$q>dQOL?8kgXdnQvsYOnDfj24t0FVQ&GJsH);SXF0X(+eq z0|aFCq_QkQ2uwgnL@)|$U)!lnaS7A<-BO9Aav~Zq1~shtZ;*=2X&Qw1zoAY~flO_x zQ=i%a0w5p*o16;-5U|cJ{sE(En`{^}sx}@hzy~=vDN0qkRui}|sdcU4U8UAnL&&wT ze-NTA73oVe_41G0LefE{2Z>hMA+geI=u@QtfIkQT2`1nUZwBxt8^}v-apD4J9dHDm z*r2j$RqI-*fP_WBP>(V+WFp;)q}Cd-rO7KqPBT@AJ}!>D^n`Dbmde!NE;eOIpur!e zT2+HIfiCh&!V$`4~D_J+6+9nP?X4+~QQR3Nd6O>yUni+~O!R{(#Yf%|x3 zV=w6V57*_SoPYe_5lO*{ETyMCXS!kI{qmTJC@5?}tYs@Z`Gp%cnIOCjX8WdC#YjMN znOSg%0i?hn3uwZe>0DG@D8{!L9NVviU!0?6(AlZm;xWpa)Zb-c0Yf<`I615cvC$3@R|NPo+jf8PRu)%BB zhMd<%u5!4#ylXATHvtr$a54bk<~ZL%26n#lMX-S98Gr)LaZUq?L;OQXHv|E!0Ok|h zR|}ywHv~IJbrC%JsW+&2(zpJHJ2XAlm3#yMI(PL)OS76>Px~OC`E{mNAnK0zLI%qI z)ZlJG?RXaiuD@;tv`>NzH;90J?Vk56!rvj>leB^Xjy?$j0N@7pwE`ImPx(Jo;pSvu Vyd?-=a{xfJ@}LjB=qC{f06U7uGEOJ7jGVWt0R$WZ8s=je-u*DOe&kq9}!i z4KyN^y7WUtlMt&*F%bljtRbDEJamX6>@vOi{twMwE+=kqtJr%yLc-a1j}Z>-)`f9GBO%WDhy=L0>5=eLzUZCI?G``P_| zXl3c^-NL}=_M!Zh=FG7pU)shB58htr9~}O0uVbq2_rt-Fxv_(zmmdB7)lfOReyZ>D zrqum6#Z#+~moGM-oS4Ziby(A(@4L&ZrWcR4wV&_mTide#$G4kXKF-|cGp}2pJe$2t GE&l)<3Z_>8 literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-notice.gif b/docs/dli/sqlreference/public_sys-resources/icon-notice.gif new file mode 100644 index 0000000000000000000000000000000000000000..409070650a49518feb1624c624d814c3bfa63d2e GIT binary patch literal 962 zcmb7D&r8&C5S<{wSd_qn6)U)w5LOaRh!N}_*49lD)S!dZp@M<3gPIX|@UR9-fz+X3 zAaT9)OY{xZtO-;RC85Vab;JEUNIMS5f^r$7-e%-XK`j{0+N$CsgpRd6B1wp z8dw-cH^rU^H4`&7qgabf)l^K`l!66{q9$U(CKRO5%W5pfY)s&YQ6n`HBR0ZTum%`N zgC%I8hQ+`JG_WvCAt-W$sIn@BKCDG1qQWW^a=`*c7G+i@s0S(ZDv1&+K_2`7O-#cM z=m!ZX1Z#kSG+3e)Jg^wpfCd(Yb5TZ)U=a!DwsG z)Z`|1ejMK4ap%CHj;%+>T6=D++5I=uBbQ$fZ48cf{hV1}9&Njl-I}i7e0#6Hdug@s zsr-57_=gLr&9Ros$k}}M>tJ77=hEJ}iPaZ%8|P+gUbS|Oj}5I{|5nZZc)C1$e*Rj` d`09hYXJ4M@PhCx)dHK8WZo1*|*q+^V@*ncduk8Q; literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/icon-tip.gif b/docs/dli/sqlreference/public_sys-resources/icon-tip.gif new file mode 100644 index 0000000000000000000000000000000000000000..c47bae05c9fd17c02a851d96cfac67bf194498d8 GIT binary patch literal 879 zcmchWKWmj?48@OdDOZPr)a61!OE4yGD)salg#|^Ix$qIEF#uk)V>D{95e^!2r`i+9t# zTMu5YtUcd+@#gp0tDE;9ug>oLIzL$6dOx4-t#2PKExu1KulwoYeD?Os#mVO%AMWih HO|EMGZQ&sWI^~+e%qWysRPK%BSD+f4-fabAGq;|Cf|4nO{(o zMH%#n$oXf^sdM5SJ4e1UbEeM389O82RCB6M#VI=_KP%=$orn{5LVlIau{svV?3jS$ z$Q-F7am0>L02|Q2!ZN1G^Z2J`V#a3VX;G<~iYc3tSAnLeiI}hnc~O{UH5OwwCTPT} zks65+8-W$70S3`f30dfY7}$UY7M95y@-?2QvMPChphYF3!YbtLLIs*E%B)Oq4^fy^ z5+zoGJ>&tI*hU=i4-s$()c}KNsKh9AfEd_-1{V3`d>b_$i;xcz{y>Y0S(uP3>_P>a zLM((Fku(B;E{r2+xJ8u612VCVIG_oJ;Sj0;2GQ7rEa(6+umMf}M1C4=)Odu8AxQWG zEh;Aearj(=d;4Z^LLYN*6+asU?Nbl3^Q%D|{s@6Xj}(aZHwkoj-~bw*1FnSd;0aX! zUW<-TPD&b;6s;@zw7K~4mF9cDuK&BQ;b_N-Cj%R=CnEjde9%~1kk}vYY^@mTTNm%F z?mbrf*~d#iX;?ITDf(MHQdK;&p(ek&e9qe?*&A0E)mG(1s`J*pn^#ttId4vCXjV$j zj464u)2F9Q$V#0!dHk4(FDB#D&wP^f0*q1)G?4!%Xe>;ROD~Oee-}xfJUd+aa_0AK zFP$vgR2te>+%}b$hVw&Ni`yT>F5LNRM@_utk68Piwi5??BLBpCcb8mx|3c*1x!xZJ zKU!7(#h8s(^x)5*w=Z0K{=a9#qeoHU|0&%MiU0rr literal 0 HcmV?d00001 diff --git a/docs/dli/sqlreference/public_sys-resources/note_3.0-en-us.png b/docs/dli/sqlreference/public_sys-resources/note_3.0-en-us.png new file mode 100644 index 0000000000000000000000000000000000000000..57a0e1f53377b28fd80ceb393b035f8d9b8b392a GIT binary patch literal 1080 zcmV-81jqY{P)pI>PbXFRA}Dqnq7z+RTRg6r`*v5D@IBmL~4s5$SN3-f`YICkrqS^ z3O*?4J{4psT?>Nv;P$DA4|YKmeJd1v=!+{-P(e)MT23E4 zfpM5g?!;YNyXOywggfWv-aG#}pA%7499KRz`7Lmw98Q!I<#3`LPLvbnaH1TQ5UMEG zlVk_b1{y%CK&5Na8Kh~iV1CQyHSIUD=$Ui|Y1(mW{t2nlnW!hpqTT3)%#9tQ2@Hwc z4!jG@4brq%Pm%+`OlEvYXy4{GY5awd{We?gCpulWuT{Z9`oN)jk~9ZtI#ba@Tm#IC$eXI#i?GB7 z@C|Siuxi7{ffK+jzyrW_z5t&id2Xi{7fGOZ7RUNhF z=`q(YYu4B2zr}fA1-My6UW~A&C(H`Z0J{sqLLYb>ST?)$UTJH~j4 z)U5D`kx~@HyTBjADBwh`9UcOHGnaO3gx+o=^sFTdqcDs&7cD!S1Y1R9DJQENmMBXi zUX68u7SqPL=oS0?tDydiCJNY=lMCxo@cxg;Iw&H0tZv&cB5#jSkGc8v45{4-xAi@+ zH#c^6Llb3aTk2cp0#8`%*2*ZSb3DivIwje71|rV*JnwHI2e{V0 z?L}VtT_k|}D>qFKHz^_)cUMxtxkDBWXzz2D1oirfKkqs5Z>m~O`i+$JJR1>-NN>fsG-Q5hHbmeou}1??2@ zp;^{uvz{qy3k557pH|h^|K(Paf~;?wS^K(J)S`9Ki+p;|vZBS3*BostG+QG&8PgN& z=@C2h$ubuxBJTs&=XH#%UFPZrh=oAMNN{DPG_uReuDWcPESRPGOKD;{Otwcu3X@*QY`k0+@}-e yxT4{IZgHXpLI!Q!9RCwC$n{8}UWfaGMW8J;IUE8&LY1fUhGLTVa3!QBW+ZA*L=y*M3rfr5il?z?dJZieJ#47>M$@*mD4$1ecadv1qsU1#wR$;qu4Bmh4yS`Hi)ReG=4*#e zajgEV5YlLPaKTMvrW)|6L6Y@JtlU|tX}9u@d<^;|8a!TJ`kRn8mKIZX-z)%<^+`jn z738I}>P{CwooQ#@mc8ItdHsQ6rsk%O$iLY)*x7Zcj%}lwEK@`-=;otzSyX+&p9{kzQYbgSF@Q=2Qy+?#VW0=Vy|cYT&lQ@%;A2YQ{hH z9%ov+34{hBa~SnWOw39F;7p5`9$z4G?cJ)An*I*rb~&khdI=d8h1SkK)If-j5C2TB z8p4%r<<^31tR|Tw4Xs1QHMjMk20}yD8f4p%dXd_$me9Z-SWPlj8%nUOe;>LpUUAI& z1+$nxI~Q3>q`9q!{1-nPlEMa4o@e_SGSdvCrzn))J%_ut?ipfIn4e4CD=Pq4zT@D? zqk$|@wV{L*qfBG7mtFghv-Lntl$pcnz~kwNI)BUSUuvgmQEXp5=UThz?DmmjlsW$V z@)3#dyh6UbBYa%QD%n4gom-Glu20@0My{Xs3VRpaU9=0QxbJP-J)Pz^`?kOW30Tn z@ufYqb@gL4$<)8N0%S?kG%v#(IqvH7UvcW1v+a@N8!}9Baks0-uUUYP1beE1+)Q)S zUobczB2ql)J>S90pM1~e2X4k70q-59;@Rj(Ixc6Ok_Z$V@dbE+GNTrV;HlFQ=lRMeN%CX^J*|inqrfX$fdWuECU& zYE~#M%8xCgqI||*N`--7Xw)BgEZ#bB0-qYhW>q--LhLmBU*r0E*5+4PP+l2n2}rtV z_qXh+#jl32SrvAyo2R)OD=o_BoyYIMW>x6y_Tety7r)3^eF8`TK$3KP{`eA2F}YpN zxa|~0YP$+*PB&3e-M~W&UFbFHknL5q%yVT?GB=Ox3?p^VEh7*N@$03nevU0%RA*Qe zy8Bd~e78Ejsl=~x+vYF$e)B!#In3BC3Kw>)rNi4F>6}lnndnzTghB(1f9&nZP({}9 z<_Y#X?@5R5*rUdbm zZ9zPQB)-@t_#f`t1VTsxAtZr72uUD>BoIOp2q6gsLP&yt3mvdY+y=oIIs7M?<}pwN SX0_=60000pR4oO5oRCwC$oPSW0cb>;zfj~mSuLKN~5E4Ot2?$cVg1bfW*MgmP zwXW?r{^)GC>z&@*cI;fk*-bA_)*65J|u{1o1b~sY9&bgB!&FtPHC|>aqwkEf8t)sIV8a zy`l<$fv1-750WZjrUfER4&AbBw(qRs>9xxN*j}-pMOzA)VQ&y-TOiWJv9nVR=-IGB zL`2wi-rWtmJCwYrnwtLq8d^@54N{_E#qc2?d98FYUt9|#UM6CV$k!5{lNa$(kHW9K zOnWgQF}`S;)25`yKPRE0?;CL9SWtgolNa zrBRX}cY`9==aUXcrj$ru%8RMoU&9MOUrS=-S8j{+s~_?FOYc$kFAFFdtPOe${ncT1 z{rLzf8V%)-DtvDE-wqmRZ8xCPYAJtA<5`q3{Ob`bjZO9Fv|84D-@EVUj$Eg)=@6=E z@#L*7W|qCq*W!xRyz%}azq+NTrIL{{k5{wx9=$&5QFz+d73A@UdA>TE#nbrfC(X3B z8(6fZ$fI-Rsms(id`VH^EXwBij$f6Q%6pGy`z7a}PPWn1d>+=6lPmX*Un}gz{Ox!v z`WEl|IDGSOT%3|O*G+D{rZj7N?NbLjt(LV5jh=HqnA?U z8mrYxV|c!gMHxe<(a3REu1^wRwhaLAc*#s3m+W-80O+~e%iEuijH}D~<%`(7rkqDg z=TTZb2dmA-!DFZR&9(3NZQo)rQeiLlD~pmzCp{Of?FN2YE1pz;Th3*$^KYlnx3rR& zpyJ_&X0!Fj%Xs-G%lKj0eA3fW0mv9`|G!E9Ya~lJ`Po^X-Lx2KsErPD79Hj+q@gxm z{Mlkoes-3_qjQ5Er(oy{Vq#*j+H8D$A>2z=-J3vbyTSAQk0(04Sglr))oM!O z`@CekFQuSwX(c8)8igX37UN~i63tz8`Qdaso8^aiGyN2A&2A@2H4T81Uv&6wZ?)Oj zTifJC_?WSn~TqB!^hRw*Ad4e7J~3dFR=6G>_8NdnsMLm)G}au^{sd3m-1x^#e`g zF3xz6x-4W*PjVBduE|UGaf6#U1-aQM6tVOT40?@sz!ZhmW+Or>@p4-Zv4VXye+OW7 zc`+d&A#@sz?CP00DZSLHz!o=IttMHmrqgKTxg*zuw$4$Jkz99N$7-|j*1@mnmSvB7 zO=WExR-28GkWk(!3-#*~r7p`oB}P+1qz`Ts(`7QTajn||ZQHTiBYWfRy#Q?8q@~Mb zVxKK9=<$~3C;^C&N<0(TEz8Di8z5P&W>aZ0G0`z}8jaMCZ=XmZMWeykeJP-en=p}N zQr=uQi}bWqx=f~kF79@>Z|%3c-P>cc{E{db1WhEfM8nqKxzqiJbY}y$o33o;>9xz* zUQson-E%k07oQ_VqoK9kz%l!D08Tq~beT*pkf^ z4(PF1`pDEKqI8;gLeoonS}MI}GariryW_5*SlZ-;)XNs|tUjBo*UTssvHYZ9)NlJ) z_By_sKLd-UZ%SNTyvWApvWyA2xbs6QE?m4wVctygq*taq{STT*zcFP3uVFUm(>{fsnY2byQucdN-4PVNGGwlltbpW(qxPn=t@v%0g@(xlo8u|zNX$;Tj zL~B0)v5_*bKys%(iq>`m{r&x%z8c|~$V_b_07FC0fQu6zCMG;gOl!LV)wFmvmglg* zQ8R8V_1h;>NK8;+wORT1?%)IOmvT)cCaCB#nI=Te-qC2U(nDqS8QysRkk_a3)aCm} zq?bP(MW@xWs-)NJe1QW+nJCD~rsrxeyN)(b$ZgNE*Rg8RgYE+A=jS;+kQlUu{`Kmaq-Zo)ZC1bW*X)SP zB&*dleDyW$_FJ1<>AK4LvN>#CQ_ki!<><89`&-$rmo4DrXSKYw{l~;e&UndGvBREY zdBhz%GStSin-=p=&;OneA1vbs!`1gR{w#YP5zt^s>sevrzAfWX_)uEYBrfUeBv}`^qrW~HGWfKsmsDE zOQ*BGS9P4Wl`RF-yksh zy60FsZNNwVNWragKrk^ve!4rg7mMBdVO@#NOI{X#h4i#kMn*=saPcD79Zs)o<((?G z^^%CiJRf_Q&GJKDpRI*9vNO}^G#dG@zTAL%%&<4GWyKs~qN8as7&HHba-?wT_zK+{<0Kls4ZZI zwt3@|Ua`~d_}Me*beT*+rNNJ9iBKqFak*TiXf(`~bowmtSfv`EtT@NZqPks+{a z5!JMKPh8~>&n-znE|bw}==6Ccb0>biz(smmDwnVHu}ZPEji zzW45)-n*6<0^#-C^K4pKLR_4Zk&zJ&pEynB{*O3#>=aGS=dlkuD9FiX!}lillwCC0 z-?hEgU&dQ|syVhNu(h$+M6CR&JD%9FMMGhZ|GG+tIg8TOds)AHF)ylW?m2UG5RP)x z8t}<_w^)soA(EXPo(HDmU3a@1zaW|JX{#K%Oq;}A@|fXl@p%B-Ez8C%ib2W{N|$?g zkBuLwVmzkfoqdk=b-RuZv4S>9rqBJ{sq>hQNv?~2V>^dfL1TD6U)yB#J4cCkVw)_#|7FHwSFj&?I903+@}Q+?E}XNC3Nz=CN(ZZjK1= zEPb1Cd{SB4f1Go_kMMN4^{(v-b4ID$mhCyd8&;L{vU{JFmp>h)qCi-YZ!0DoBh||m zu>a`Eo9X_gaoZQ>jE+kpfEdXop55erJD^6gL|Bn;DJE=@t~lNAw+sx9aIkUClqxYW z=nj}aF^#ZTB`nJSYa&gTT1nApc>bmLzAo-6E0000