From e00cefc755c4b53cc022fae53235a95317dafb51 Mon Sep 17 00:00:00 2001 From: "Dong, Qiu Jian" Date: Tue, 4 Apr 2023 09:28:37 +0000 Subject: [PATCH] CCE UMN 20230213 version for new console Reviewed-by: Eotvos, Oliver Co-authored-by: Dong, Qiu Jian Co-committed-by: Dong, Qiu Jian --- docs/cce/umn/.placeholder | 0 docs/cce/umn/ALL_META.TXT.json | 2828 ++++++++++------- docs/cce/umn/CLASS.TXT.json | 2742 +++++++++------- docs/cce/umn/cce_01_0002.html | 100 - docs/cce/umn/cce_01_0003.html | 21 - docs/cce/umn/cce_01_0004.html | 95 - docs/cce/umn/cce_01_0007.html | 159 - docs/cce/umn/cce_01_0008.html | 177 -- docs/cce/umn/cce_01_0009.html | 37 - docs/cce/umn/cce_01_0010.html | 38 - docs/cce/umn/cce_01_0011.html | 124 - docs/cce/umn/cce_01_0012.html | 127 - docs/cce/umn/cce_01_0013.html | 34 - docs/cce/umn/cce_01_0014.html | 711 ----- docs/cce/umn/cce_01_0016.html | 86 - docs/cce/umn/cce_01_0018.html | 217 -- docs/cce/umn/cce_01_0019.html | 11 - docs/cce/umn/cce_01_0020.html | 21 - docs/cce/umn/cce_01_0023.html | 28 - docs/cce/umn/cce_01_0025.html | 598 ---- docs/cce/umn/cce_01_0026.html | 21 - docs/cce/umn/cce_01_0027.html | 31 - docs/cce/umn/cce_01_0028.html | 233 -- docs/cce/umn/cce_01_0030.html | 17 - docs/cce/umn/cce_01_0031.html | 19 - docs/cce/umn/cce_01_0033.html | 76 - docs/cce/umn/cce_01_0035.html | 15 - docs/cce/umn/cce_01_0036.html | 18 - docs/cce/umn/cce_01_0042.html | 28 - docs/cce/umn/cce_01_0044.html | 18 - docs/cce/umn/cce_01_0045.html | 19 - docs/cce/umn/cce_01_0046.html | 29 - docs/cce/umn/cce_01_0047.html | 294 -- docs/cce/umn/cce_01_0048.html | 224 -- docs/cce/umn/cce_01_0051.html | 29 - docs/cce/umn/cce_01_0053.html | 231 -- docs/cce/umn/cce_01_0057.html | 144 - docs/cce/umn/cce_01_0059.html | 147 - docs/cce/umn/cce_01_0063.html | 25 - docs/cce/umn/cce_01_0064.html | 23 - docs/cce/umn/cce_01_0066.html | 25 - docs/cce/umn/cce_01_0068.html | 68 - docs/cce/umn/cce_01_0081.html | 144 - docs/cce/umn/cce_01_0083.html | 107 - docs/cce/umn/cce_01_0085.html | 49 - docs/cce/umn/cce_01_0105.html | 209 -- docs/cce/umn/cce_01_0107.html | 39 - docs/cce/umn/cce_01_0110.html | 14 - docs/cce/umn/cce_01_0111.html | 23 - docs/cce/umn/cce_01_0112.html | 50 - docs/cce/umn/cce_01_0113.html | 28 - docs/cce/umn/cce_01_0114.html | 91 - docs/cce/umn/cce_01_0120.html | 62 - docs/cce/umn/cce_01_0125.html | 23 - docs/cce/umn/cce_01_0127.html | 28 - docs/cce/umn/cce_01_0129.html | 173 - docs/cce/umn/cce_01_0130.html | 25 - docs/cce/umn/cce_01_0139.html | 186 -- docs/cce/umn/cce_01_0141.html | 37 - docs/cce/umn/cce_01_0142.html | 151 - docs/cce/umn/cce_01_0143.html | 21 - docs/cce/umn/cce_01_0144.html | 81 - docs/cce/umn/cce_01_0145.html | 49 - docs/cce/umn/cce_01_0146.html | 58 - docs/cce/umn/cce_01_0149.html | 15 - docs/cce/umn/cce_01_0150.html | 236 -- docs/cce/umn/cce_01_0151.html | 215 -- docs/cce/umn/cce_01_0152.html | 123 - docs/cce/umn/cce_01_0153.html | 145 - docs/cce/umn/cce_01_0154.html | 146 - docs/cce/umn/cce_01_0157.html | 135 - docs/cce/umn/cce_01_0160.html | 23 - docs/cce/umn/cce_01_0163.html | 74 - docs/cce/umn/cce_01_0164.html | 17 - docs/cce/umn/cce_01_0175.html | 17 - docs/cce/umn/cce_01_0178.html | 129 - docs/cce/umn/cce_01_0180.html | 284 -- docs/cce/umn/cce_01_0182.html | 100 - docs/cce/umn/cce_01_0183.html | 41 - docs/cce/umn/cce_01_0184.html | 19 - docs/cce/umn/cce_01_0185.html | 52 - docs/cce/umn/cce_01_0186.html | 20 - docs/cce/umn/cce_01_0187.html | 71 - docs/cce/umn/cce_01_0188.html | 92 - docs/cce/umn/cce_01_0189.html | 234 -- docs/cce/umn/cce_01_0191.html | 17 - docs/cce/umn/cce_01_0197.html | 200 -- docs/cce/umn/cce_01_0200.html | 58 - docs/cce/umn/cce_01_0205.html | 25 - docs/cce/umn/cce_01_0207.html | 15 - docs/cce/umn/cce_01_0208.html | 78 - docs/cce/umn/cce_01_0209.html | 207 -- docs/cce/umn/cce_01_0210.html | 60 - docs/cce/umn/cce_01_0211.html | 67 - docs/cce/umn/cce_01_0212.html | 20 - docs/cce/umn/cce_01_0213.html | 196 -- docs/cce/umn/cce_01_0214.html | 22 - docs/cce/umn/cce_01_0215.html | 25 - docs/cce/umn/cce_01_0216.html | 127 - docs/cce/umn/cce_01_0220.html | 54 - docs/cce/umn/cce_01_0222.html | 214 -- docs/cce/umn/cce_01_0225.html | 54 - docs/cce/umn/cce_01_0226.html | 54 - docs/cce/umn/cce_01_0227.html | 54 - docs/cce/umn/cce_01_0228.html | 54 - docs/cce/umn/cce_01_0229.html | 54 - docs/cce/umn/cce_01_0230.html | 25 - docs/cce/umn/cce_01_0231.html | 19 - docs/cce/umn/cce_01_0232.html | 111 - docs/cce/umn/cce_01_0233.html | 133 - docs/cce/umn/cce_01_0234.html | 128 - docs/cce/umn/cce_01_0247.html | 27 - docs/cce/umn/cce_01_0248.html | 19 - docs/cce/umn/cce_01_0251.html | 51 - docs/cce/umn/cce_01_0252.html | 595 ---- docs/cce/umn/cce_01_0254.html | 151 - docs/cce/umn/cce_01_0257.html | 207 -- docs/cce/umn/cce_01_0259.html | 141 - docs/cce/umn/cce_01_0262.html | 149 - docs/cce/umn/cce_01_0263.html | 52 - docs/cce/umn/cce_01_0265.html | 158 - docs/cce/umn/cce_01_0268.html | 152 - docs/cce/umn/cce_01_0269.html | 52 - docs/cce/umn/cce_01_0271.html | 58 - docs/cce/umn/cce_01_0273.html | 117 - docs/cce/umn/cce_01_0274.html | 77 - docs/cce/umn/cce_01_0276.html | 51 - docs/cce/umn/cce_01_0277.html | 51 - docs/cce/umn/cce_01_0278.html | 66 - docs/cce/umn/cce_01_0281.html | 105 - docs/cce/umn/cce_01_0284.html | 79 - docs/cce/umn/cce_01_0285.html | 26 - docs/cce/umn/cce_01_0286.html | 34 - docs/cce/umn/cce_01_0287.html | 127 - docs/cce/umn/cce_01_0288.html | 196 -- docs/cce/umn/cce_01_0291.html | 20 - docs/cce/umn/cce_01_0293.html | 22 - docs/cce/umn/cce_01_0296.html | 27 - docs/cce/umn/cce_01_0298.html | 170 - docs/cce/umn/cce_01_0300.html | 39 +- docs/cce/umn/cce_01_0301.html | 39 - docs/cce/umn/cce_01_0302.html | 191 -- docs/cce/umn/cce_01_0305.html | 22 - docs/cce/umn/cce_01_0306.html | 60 - docs/cce/umn/cce_01_0307.html | 235 -- docs/cce/umn/cce_01_0310.html | 14 - docs/cce/umn/cce_01_0311.html | 144 - docs/cce/umn/cce_01_0312.html | 78 - docs/cce/umn/cce_01_0313.html | 555 ---- docs/cce/umn/cce_01_0314.html | 176 - docs/cce/umn/cce_01_0316.html | 14 - docs/cce/umn/cce_01_0317.html | 135 - docs/cce/umn/cce_01_0318.html | 66 - docs/cce/umn/cce_01_0319.html | 283 -- docs/cce/umn/cce_01_0320.html | 168 - docs/cce/umn/cce_01_0321.html | 110 - docs/cce/umn/cce_01_0323.html | 19 - docs/cce/umn/cce_01_0324.html | 144 - docs/cce/umn/cce_01_0325.html | 66 - docs/cce/umn/cce_01_0326.html | 291 -- docs/cce/umn/cce_01_0327.html | 175 - docs/cce/umn/cce_01_0328.html | 96 - docs/cce/umn/cce_01_0330.html | 14 - docs/cce/umn/cce_01_0331.html | 58 - docs/cce/umn/cce_01_0332.html | 150 - docs/cce/umn/cce_01_0333.html | 78 - docs/cce/umn/cce_01_0336.html | 242 -- docs/cce/umn/cce_01_0337.html | 184 -- docs/cce/umn/cce_01_0338.html | 45 - docs/cce/umn/cce_01_0341.html | 45 - docs/cce/umn/cce_01_0342.html | 79 - docs/cce/umn/cce_01_0343.html | 645 ---- docs/cce/umn/cce_01_0344.html | 88 - docs/cce/umn/cce_01_0347.html | 19 - docs/cce/umn/cce_01_0348.html | 27 - docs/cce/umn/cce_01_0352.html | 82 - docs/cce/umn/cce_01_0363.html | 169 - docs/cce/umn/cce_01_0378.html | 284 -- docs/cce/umn/cce_01_0379.html | 395 --- docs/cce/umn/cce_01_0380.html | 209 -- docs/cce/umn/cce_01_0388.html | 64 - docs/cce/umn/cce_01_0393.html | 22 - docs/cce/umn/cce_01_0395.html | 14 - docs/cce/umn/cce_10_0002.html | 21 + docs/cce/umn/cce_10_0003.html | 131 + docs/cce/umn/cce_10_0004.html | 105 + .../{cce_01_0006.html => cce_10_0006.html} | 84 +- docs/cce/umn/cce_10_0007.html | 129 + docs/cce/umn/cce_10_0009.html | 37 + docs/cce/umn/cce_10_0010.html | 38 + docs/cce/umn/cce_10_0011.html | 118 + docs/cce/umn/cce_10_0012.html | 236 ++ docs/cce/umn/cce_10_0014.html | 788 +++++ .../{cce_01_0015.html => cce_10_0015.html} | 62 +- docs/cce/umn/cce_10_0016.html | 86 + docs/cce/umn/cce_10_0018.html | 203 ++ docs/cce/umn/cce_10_0019.html | 13 + docs/cce/umn/cce_10_0020.html | 27 + .../{cce_01_0024.html => cce_10_0024.html} | 6 +- docs/cce/umn/cce_10_0025.html | 596 ++++ docs/cce/umn/cce_10_0026.html | 21 + docs/cce/umn/cce_10_0028.html | 39 + docs/cce/umn/cce_10_0030.html | 15 + docs/cce/umn/cce_10_0031.html | 21 + docs/cce/umn/cce_10_0035.html | 15 + docs/cce/umn/cce_10_0036.html | 17 + docs/cce/umn/cce_10_0045.html | 19 + docs/cce/umn/cce_10_0046.html | 35 + docs/cce/umn/cce_10_0047.html | 178 ++ docs/cce/umn/cce_10_0048.html | 126 + docs/cce/umn/cce_10_0059.html | 171 + docs/cce/umn/cce_10_0063.html | 26 + docs/cce/umn/cce_10_0064.html | 27 + docs/cce/umn/cce_10_0066.html | 30 + docs/cce/umn/cce_10_0068.html | 23 + docs/cce/umn/cce_10_0081.html | 135 + docs/cce/umn/cce_10_0083.html | 106 + docs/cce/umn/cce_10_0091.html | 25 + .../{cce_01_0094.html => cce_10_0094.html} | 24 +- docs/cce/umn/cce_10_0105.html | 197 ++ docs/cce/umn/cce_10_0107.html | 49 + docs/cce/umn/cce_10_0110.html | 14 + docs/cce/umn/cce_10_0112.html | 108 + docs/cce/umn/cce_10_0113.html | 108 + docs/cce/umn/cce_10_0120.html | 62 + docs/cce/umn/cce_10_0127.html | 21 + docs/cce/umn/cce_10_0129.html | 188 ++ docs/cce/umn/cce_10_0130.html | 33 + docs/cce/umn/cce_10_0132.html | 525 +++ docs/cce/umn/cce_10_0139.html | 186 ++ .../{cce_01_0140.html => cce_10_0140.html} | 10 +- docs/cce/umn/cce_10_0141.html | 32 + docs/cce/umn/cce_10_0142.html | 136 + docs/cce/umn/cce_10_0146.html | 117 + docs/cce/umn/cce_10_0150.html | 133 + docs/cce/umn/cce_10_0151.html | 108 + docs/cce/umn/cce_10_0152.html | 107 + docs/cce/umn/cce_10_0153.html | 128 + docs/cce/umn/cce_10_0154.html | 98 + docs/cce/umn/cce_10_0163.html | 73 + docs/cce/umn/cce_10_0164.html | 23 + docs/cce/umn/cce_10_0175.html | 17 + docs/cce/umn/cce_10_0178.html | 218 ++ docs/cce/umn/cce_10_0180.html | 25 + docs/cce/umn/cce_10_0182.html | 94 + docs/cce/umn/cce_10_0183.html | 33 + docs/cce/umn/cce_10_0184.html | 20 + docs/cce/umn/cce_10_0185.html | 55 + docs/cce/umn/cce_10_0186.html | 22 + docs/cce/umn/cce_10_0187.html | 57 + docs/cce/umn/cce_10_0188.html | 96 + docs/cce/umn/cce_10_0189.html | 221 ++ docs/cce/umn/cce_10_0190.html | 136 + docs/cce/umn/cce_10_0191.html | 19 + docs/cce/umn/cce_10_0193.html | 628 ++++ docs/cce/umn/cce_10_0197.html | 180 ++ docs/cce/umn/cce_10_0198.html | 132 + docs/cce/umn/cce_10_0201.html | 151 + docs/cce/umn/cce_10_0205.html | 16 + docs/cce/umn/cce_10_0207.html | 17 + docs/cce/umn/cce_10_0208.html | 84 + docs/cce/umn/cce_10_0209.html | 208 ++ docs/cce/umn/cce_10_0210.html | 60 + docs/cce/umn/cce_10_0212.html | 23 + docs/cce/umn/cce_10_0213.html | 296 ++ docs/cce/umn/cce_10_0214.html | 20 + docs/cce/umn/cce_10_0215.html | 23 + docs/cce/umn/cce_10_0216.html | 76 + docs/cce/umn/cce_10_0222.html | 411 +++ docs/cce/umn/cce_10_0232.html | 397 +++ docs/cce/umn/cce_10_0245.html | 44 + docs/cce/umn/cce_10_0247.html | 29 + docs/cce/umn/cce_10_0248.html | 19 + .../{cce_01_0249.html => cce_10_0249.html} | 29 +- docs/cce/umn/cce_10_0251.html | 42 + docs/cce/umn/cce_10_0252.html | 863 +++++ docs/cce/umn/cce_10_0257.html | 207 ++ docs/cce/umn/cce_10_0262.html | 149 + docs/cce/umn/cce_10_0263.html | 52 + docs/cce/umn/cce_10_0268.html | 152 + docs/cce/umn/cce_10_0269.html | 52 + .../{cce_01_0275.html => cce_10_0275.html} | 38 +- docs/cce/umn/cce_10_0276.html | 39 + docs/cce/umn/cce_10_0277.html | 61 + docs/cce/umn/cce_10_0278.html | 65 + .../{cce_01_0279.html => cce_10_0279.html} | 56 +- .../{cce_01_0280.html => cce_10_0280.html} | 12 +- docs/cce/umn/cce_10_0281.html | 105 + .../{cce_01_0282.html => cce_10_0282.html} | 53 +- .../{cce_01_0283.html => cce_10_0283.html} | 63 +- docs/cce/umn/cce_10_0284.html | 72 + docs/cce/umn/cce_10_0285.html | 26 + docs/cce/umn/cce_10_0287.html | 124 + docs/cce/umn/cce_10_0288.html | 142 + .../{cce_01_0290.html => cce_10_0290.html} | 36 +- docs/cce/umn/cce_10_0291.html | 20 + docs/cce/umn/cce_10_0293.html | 20 + docs/cce/umn/cce_10_0296.html | 27 + docs/cce/umn/cce_10_0298.html | 40 + docs/cce/umn/cce_10_0300.html | 227 ++ docs/cce/umn/cce_10_0301.html | 141 + docs/cce/umn/cce_10_0302.html | 114 + docs/cce/umn/cce_10_0305.html | 22 + docs/cce/umn/cce_10_0306.html | 58 + docs/cce/umn/cce_10_0307.html | 234 ++ .../{cce_01_0309.html => cce_10_0309.html} | 14 +- docs/cce/umn/cce_10_0310.html | 14 + docs/cce/umn/cce_10_0312.html | 71 + docs/cce/umn/cce_10_0313.html | 539 ++++ docs/cce/umn/cce_10_0314.html | 174 + .../{cce_01_0315.html => cce_10_0315.html} | 16 +- docs/cce/umn/cce_10_0316.html | 14 + docs/cce/umn/cce_10_0318.html | 62 + docs/cce/umn/cce_10_0319.html | 279 ++ docs/cce/umn/cce_10_0320.html | 166 + docs/cce/umn/cce_10_0321.html | 108 + .../{cce_01_0322.html => cce_10_0322.html} | 16 +- docs/cce/umn/cce_10_0323.html | 19 + docs/cce/umn/cce_10_0325.html | 64 + docs/cce/umn/cce_10_0326.html | 289 ++ docs/cce/umn/cce_10_0327.html | 173 + docs/cce/umn/cce_10_0328.html | 94 + .../{cce_01_0329.html => cce_10_0329.html} | 14 +- docs/cce/umn/cce_10_0330.html | 14 + docs/cce/umn/cce_10_0332.html | 148 + docs/cce/umn/cce_10_0333.html | 76 + .../{cce_01_0334.html => cce_10_0334.html} | 50 +- docs/cce/umn/cce_10_0336.html | 242 ++ docs/cce/umn/cce_10_0337.html | 184 ++ docs/cce/umn/cce_10_0338.html | 45 + docs/cce/umn/cce_10_0341.html | 50 + docs/cce/umn/cce_10_0342.html | 79 + docs/cce/umn/cce_10_0343.html | 645 ++++ docs/cce/umn/cce_10_0345.html | 93 + docs/cce/umn/cce_10_0348.html | 25 + docs/cce/umn/cce_10_0349.html | 25 + docs/cce/umn/cce_10_0351.html | 51 + docs/cce/umn/cce_10_0352.html | 86 + docs/cce/umn/cce_10_0353.html | 35 + docs/cce/umn/cce_10_0354.html | 43 + docs/cce/umn/cce_10_0359.html | 23 + docs/cce/umn/cce_10_0360.html | 35 + docs/cce/umn/cce_10_0361.html | 185 ++ docs/cce/umn/cce_10_0363.html | 197 ++ docs/cce/umn/cce_10_0365.html | 237 ++ docs/cce/umn/cce_10_0367.html | 24 + docs/cce/umn/cce_10_0374.html | 28 + docs/cce/umn/cce_10_0377.html | 231 ++ docs/cce/umn/cce_10_0378.html | 299 ++ docs/cce/umn/cce_10_0379.html | 398 +++ docs/cce/umn/cce_10_0380.html | 211 ++ docs/cce/umn/cce_10_0381.html | 66 + docs/cce/umn/cce_10_0384.html | 760 +++++ docs/cce/umn/cce_10_0385.html | 376 +++ docs/cce/umn/cce_10_0386.html | 60 + docs/cce/umn/cce_10_0388.html | 63 + docs/cce/umn/cce_10_0393.html | 24 + docs/cce/umn/cce_10_0396.html | 72 + docs/cce/umn/cce_10_0397.html | 52 + docs/cce/umn/cce_10_0398.html | 53 + docs/cce/umn/cce_10_0399.html | 54 + docs/cce/umn/cce_10_0400.html | 28 + docs/cce/umn/cce_10_0402.html | 82 + docs/cce/umn/cce_10_0403.html | 17 + docs/cce/umn/cce_10_0423.html | 19 + docs/cce/umn/cce_10_0430.html | 89 + docs/cce/umn/cce_10_0461.html | 94 + docs/cce/umn/cce_10_0462.html | 119 + docs/cce/umn/cce_10_0463.html | 129 + docs/cce/umn/cce_10_0465.html | 18 + docs/cce/umn/cce_10_0466.html | 107 + docs/cce/umn/cce_10_0467.html | 21 + docs/cce/umn/cce_10_0468.html | 22 + docs/cce/umn/cce_10_0469.html | 19 + docs/cce/umn/cce_10_0470.html | 35 + docs/cce/umn/cce_10_0471.html | 18 + docs/cce/umn/cce_10_0477.html | 23 + docs/cce/umn/cce_10_0477_0.html | 23 + docs/cce/umn/cce_10_0551.html | 19 + docs/cce/umn/cce_10_0553.html | 13 + docs/cce/umn/cce_10_0557.html | 16 + docs/cce/umn/cce_10_0602.html | 23 + docs/cce/umn/cce_bestpractice.html | 33 + docs/cce/umn/cce_bestpractice_00002.html | 74 + docs/cce/umn/cce_bestpractice_00004.html | 94 +- docs/cce/umn/cce_bestpractice_00006.html | 186 ++ docs/cce/umn/cce_bestpractice_00035.html | 37 + docs/cce/umn/cce_bestpractice_00162.html | 98 +- docs/cce/umn/cce_bestpractice_00190.html | 88 + docs/cce/umn/cce_bestpractice_00198.html | 48 + docs/cce/umn/cce_bestpractice_00199.html | 123 + docs/cce/umn/cce_bestpractice_00220.html | 99 + docs/cce/umn/cce_bestpractice_00226.html | 157 + docs/cce/umn/cce_bestpractice_00227.html | 73 + docs/cce/umn/cce_bestpractice_00228.html | 61 + docs/cce/umn/cce_bestpractice_00231.html | 147 + docs/cce/umn/cce_bestpractice_00237.html | 15 + docs/cce/umn/cce_bestpractice_00253_0.html | 171 + docs/cce/umn/cce_bestpractice_00254.html | 248 ++ docs/cce/umn/cce_bestpractice_00281_0.html | 230 ++ docs/cce/umn/cce_bestpractice_00282.html | 227 ++ docs/cce/umn/cce_bestpractice_00284.html | 247 ++ docs/cce/umn/cce_bestpractice_0050.html | 18 + docs/cce/umn/cce_bestpractice_0051.html | 27 + docs/cce/umn/cce_bestpractice_0052.html | 22 + docs/cce/umn/cce_bestpractice_0053.html | 25 + docs/cce/umn/cce_bestpractice_0090.html | 16 + docs/cce/umn/cce_bestpractice_0107.html | 381 +-- docs/cce/umn/cce_bestpractice_0306.html | 29 + docs/cce/umn/cce_bestpractice_0307.html | 78 + docs/cce/umn/cce_bestpractice_0308.html | 80 + docs/cce/umn/cce_bestpractice_0309.html | 25 + docs/cce/umn/cce_bestpractice_0310.html | 133 + docs/cce/umn/cce_bestpractice_0311.html | 67 + docs/cce/umn/cce_bestpractice_0312.html | 124 + docs/cce/umn/cce_bestpractice_0313.html | 20 + docs/cce/umn/cce_bestpractice_0314.html | 74 + docs/cce/umn/cce_bestpractice_0315.html | 25 + docs/cce/umn/cce_bestpractice_0317.html | 103 + docs/cce/umn/cce_bestpractice_0318.html | 41 + docs/cce/umn/cce_bestpractice_0319.html | 82 + docs/cce/umn/cce_bestpractice_0320.html | 103 + docs/cce/umn/cce_bestpractice_0322.html | 19 + docs/cce/umn/cce_bestpractice_0323.html | 19 + docs/cce/umn/cce_bestpractice_0324.html | 109 + docs/cce/umn/cce_bestpractice_0325.html | 50 + ...ce_01_0236.html => cce_bulletin_0000.html} | 4 +- docs/cce/umn/cce_bulletin_0003.html | 30 +- docs/cce/umn/cce_bulletin_0011.html | 18 + docs/cce/umn/cce_bulletin_0054.html | 2 +- docs/cce/umn/cce_bulletin_0068.html | 91 +- docs/cce/umn/cce_bulletin_0169.html | 4 +- docs/cce/umn/cce_bulletin_0301.html | 17 +- docs/cce/umn/cce_faq_00006.html | 180 -- docs/cce/umn/cce_faq_0083.html | 8 - docs/cce/umn/cce_qs_0001.html | 12 +- ...5.png => en-us_image_0000001082048529.png} | Bin docs/cce/umn/en-us_image_0000001088110417.png | Bin 237 -> 0 bytes docs/cce/umn/en-us_image_0000001113962636.png | Bin 0 -> 29269 bytes docs/cce/umn/en-us_image_0000001126243447.png | Bin 1833 -> 0 bytes docs/cce/umn/en-us_image_0000001144208440.png | Bin 18322 -> 0 bytes docs/cce/umn/en-us_image_0000001144342236.png | Bin 14662 -> 0 bytes docs/cce/umn/en-us_image_0000001144342238.png | Bin 26399 -> 0 bytes docs/cce/umn/en-us_image_0000001144578756.png | Bin 30362 -> 0 bytes docs/cce/umn/en-us_image_0000001144738550.png | Bin 37189 -> 0 bytes docs/cce/umn/en-us_image_0000001144779784.png | Bin 9715 -> 0 bytes ...1.png => en-us_image_0000001145545261.png} | Bin docs/cce/umn/en-us_image_0000001148989534.png | Bin 10819 -> 0 bytes docs/cce/umn/en-us_image_0000001150420952.png | Bin 203878 -> 0 bytes docs/cce/umn/en-us_image_0000001152953258.png | Bin 49530 -> 0 bytes docs/cce/umn/en-us_image_0000001160642447.png | Bin 0 -> 44914 bytes docs/cce/umn/en-us_image_0000001160731158.png | Bin 50328 -> 0 bytes docs/cce/umn/en-us_image_0000001172076961.png | Bin 45211 -> 0 bytes docs/cce/umn/en-us_image_0000001172392670.png | Bin 0 -> 74307 bytes docs/cce/umn/en-us_image_0000001176255102.png | Bin 385371 -> 0 bytes docs/cce/umn/en-us_image_0000001176818150.png | Bin 0 -> 29008 bytes docs/cce/umn/en-us_image_0000001178034114.png | Bin 16778 -> 0 bytes docs/cce/umn/en-us_image_0000001178034116.png | Bin 21046 -> 0 bytes docs/cce/umn/en-us_image_0000001178192666.png | Bin 5313 -> 0 bytes docs/cce/umn/en-us_image_0000001178352604.png | Bin 7289 -> 0 bytes docs/cce/umn/en-us_image_0000001190048341.png | Bin 22863 -> 0 bytes docs/cce/umn/en-us_image_0000001190168507.png | Bin 11868 -> 0 bytes docs/cce/umn/en-us_image_0000001190302085.png | Bin 20101 -> 0 bytes docs/cce/umn/en-us_image_0000001190302087.png | Bin 28934 -> 0 bytes docs/cce/umn/en-us_image_0000001190302089.png | Bin 10506 -> 0 bytes docs/cce/umn/en-us_image_0000001190302091.png | Bin 6316 -> 0 bytes docs/cce/umn/en-us_image_0000001190302095.png | Bin 12286 -> 0 bytes docs/cce/umn/en-us_image_0000001190302097.png | Bin 13099 -> 0 bytes docs/cce/umn/en-us_image_0000001190538605.png | Bin 6958 -> 0 bytes docs/cce/umn/en-us_image_0000001190658439.png | Bin 22986 -> 0 bytes docs/cce/umn/en-us_image_0000001192028618.png | Bin 0 -> 16572 bytes docs/cce/umn/en-us_image_0000001195057213.png | Bin 18564 -> 0 bytes docs/cce/umn/en-us_image_0000001198867835.png | Bin 38920 -> 0 bytes docs/cce/umn/en-us_image_0000001198980979.png | Bin 17223 -> 0 bytes ...7.png => en-us_image_0000001199021278.png} | Bin ...9.png => en-us_image_0000001199021298.png} | Bin ...1.png => en-us_image_0000001199021308.png} | Bin ...3.png => en-us_image_0000001199021320.png} | Bin ...1.png => en-us_image_0000001199021334.png} | Bin ...4.png => en-us_image_0000001199181228.png} | Bin ...4.png => en-us_image_0000001199181230.png} | Bin ...3.png => en-us_image_0000001199181232.png} | Bin ...7.png => en-us_image_0000001199181266.png} | Bin ...9.png => en-us_image_0000001199181298.png} | Bin ...0.png => en-us_image_0000001199181334.png} | Bin ...5.png => en-us_image_0000001199181336.png} | Bin ...1.png => en-us_image_0000001199181338.png} | Bin ...1.png => en-us_image_0000001199181340.png} | Bin ...7.png => en-us_image_0000001199341250.png} | Bin docs/cce/umn/en-us_image_0000001199341268.png | Bin 0 -> 643 bytes docs/cce/umn/en-us_image_0000001199341330.png | Bin 0 -> 43998 bytes ...0.png => en-us_image_0000001199501200.png} | Bin ...5.png => en-us_image_0000001199501230.png} | Bin docs/cce/umn/en-us_image_0000001199501262.png | Bin 0 -> 36081 bytes ...7.png => en-us_image_0000001199501276.png} | Bin ...5.png => en-us_image_0000001199501290.png} | Bin docs/cce/umn/en-us_image_0000001199757520.png | Bin 0 -> 1370 bytes ...8.png => en-us_image_0000001201381906.png} | Bin ...3.png => en-us_image_0000001201823500.png} | Bin docs/cce/umn/en-us_image_0000001202101148.png | Bin 0 -> 13154 bytes ...2.png => en-us_image_0000001202103502.png} | Bin docs/cce/umn/en-us_image_0000001203031716.png | Bin 0 -> 293 bytes ...8.png => en-us_image_0000001203385342.png} | Bin docs/cce/umn/en-us_image_0000001204449561.png | Bin 258070 -> 0 bytes docs/cce/umn/en-us_image_0000001205757902.png | Bin 0 -> 311 bytes docs/cce/umn/en-us_image_0000001206876656.png | Bin 0 -> 293 bytes ...2.png => en-us_image_0000001206959574.png} | Bin docs/cce/umn/en-us_image_0000001207511384.png | Bin 0 -> 48774 bytes docs/cce/umn/en-us_image_0000001217183707.png | Bin 0 -> 48558 bytes docs/cce/umn/en-us_image_0000001218074121.png | Bin 0 -> 31088 bytes docs/cce/umn/en-us_image_0000001221007635.png | Bin 373349 -> 0 bytes docs/cce/umn/en-us_image_0000001221376671.png | Bin 562 -> 0 bytes docs/cce/umn/en-us_image_0000001221501677.png | Bin 0 -> 391 bytes docs/cce/umn/en-us_image_0000001221820189.png | Bin 0 -> 439 bytes docs/cce/umn/en-us_image_0000001222591781.png | Bin 454 -> 0 bytes docs/cce/umn/en-us_image_0000001223152415.png | Bin 52084 -> 0 bytes docs/cce/umn/en-us_image_0000001223152417.png | Bin 18696 -> 0 bytes docs/cce/umn/en-us_image_0000001223393893.png | Bin 41282 -> 0 bytes docs/cce/umn/en-us_image_0000001225747980.png | Bin 0 -> 1024 bytes docs/cce/umn/en-us_image_0000001226818003.png | Bin 0 -> 37856 bytes docs/cce/umn/en-us_image_0000001227977765.png | Bin 13643 -> 0 bytes docs/cce/umn/en-us_image_0000001229793402.png | Bin 53133 -> 0 bytes docs/cce/umn/en-us_image_0000001229794946.png | Bin 88733 -> 0 bytes docs/cce/umn/en-us_image_0000001236263298.png | Bin 44583 -> 0 bytes docs/cce/umn/en-us_image_0000001236562704.png | Bin 0 -> 1170 bytes docs/cce/umn/en-us_image_0000001236582394.png | Bin 168421 -> 0 bytes docs/cce/umn/en-us_image_0000001236723668.png | Bin 0 -> 742 bytes docs/cce/umn/en-us_image_0000001238163131.png | Bin 366 -> 0 bytes docs/cce/umn/en-us_image_0000001238489436.png | Bin 0 -> 296895 bytes docs/cce/umn/en-us_image_0000001238830246.png | Bin 0 -> 104261 bytes docs/cce/umn/en-us_image_0000001238903330.png | Bin 0 -> 238070 bytes ...1.png => en-us_image_0000001243981115.png} | Bin ...0.png => en-us_image_0000001243981117.png} | Bin ...0.png => en-us_image_0000001243981141.png} | Bin ...3.png => en-us_image_0000001243981147.png} | Bin docs/cce/umn/en-us_image_0000001243981177.png | Bin 0 -> 40318 bytes ...1.png => en-us_image_0000001243981181.png} | Bin ...2.png => en-us_image_0000001243981203.png} | Bin ...9.png => en-us_image_0000001244101107.png} | Bin ...4.png => en-us_image_0000001244101121.png} | Bin ...1.png => en-us_image_0000001244101223.png} | Bin docs/cce/umn/en-us_image_0000001244141105.png | Bin 0 -> 68173 bytes ...2.png => en-us_image_0000001244141139.png} | Bin ...8.gif => en-us_image_0000001244141141.gif} | Bin ...3.png => en-us_image_0000001244141181.png} | Bin ...5.png => en-us_image_0000001244141191.png} | Bin ...5.png => en-us_image_0000001244141217.png} | Bin docs/cce/umn/en-us_image_0000001244261055.png | Bin 0 -> 6760 bytes ...2.png => en-us_image_0000001244261069.png} | Bin ...4.png => en-us_image_0000001244261071.png} | Bin ...6.png => en-us_image_0000001244261073.png} | Bin ...1.png => en-us_image_0000001244261103.png} | Bin ...2.png => en-us_image_0000001244261119.png} | Bin ...7.png => en-us_image_0000001244261161.png} | Bin docs/cce/umn/en-us_image_0000001244261167.png | Bin 0 -> 56901 bytes ...6.png => en-us_image_0000001244261169.png} | Bin ...8.png => en-us_image_0000001244261171.png} | Bin ...2.png => en-us_image_0000001244261173.png} | Bin docs/cce/umn/en-us_image_0000001244997085.png | Bin 0 -> 695 bytes docs/cce/umn/en-us_image_0000001247802971.png | Bin 0 -> 93092 bytes ...8.png => en-us_image_0000001248663503.png} | Bin ...0.png => en-us_image_0000001249023453.png} | Bin docs/cce/umn/en-us_image_0000001249073211.png | Bin 0 -> 43744 bytes docs/cce/umn/en-us_image_0000001249958645.png | Bin 0 -> 311 bytes docs/cce/umn/en-us_image_0000001251716033.png | Bin 0 -> 231 bytes docs/cce/umn/en-us_image_0000001256348238.jpg | Bin 0 -> 136985 bytes docs/cce/umn/en-us_image_0000001274316069.png | Bin 96604 -> 0 bytes docs/cce/umn/en-us_image_0000001274882416.png | Bin 0 -> 131867 bytes docs/cce/umn/en-us_image_0000001276433425.png | Bin 0 -> 178515 bytes docs/cce/umn/en-us_image_0000001280171657.png | Bin 135824 -> 0 bytes docs/cce/umn/en-us_image_0000001280181541.png | Bin 184445 -> 0 bytes docs/cce/umn/en-us_image_0000001280421317.png | Bin 9291 -> 0 bytes docs/cce/umn/en-us_image_0000001280466745.png | Bin 71933 -> 0 bytes docs/cce/umn/en-us_image_0000001283301301.png | Bin 0 -> 413584 bytes docs/cce/umn/en-us_image_0000001283343269.png | Bin 0 -> 69128 bytes docs/cce/umn/en-us_image_0000001290111529.png | Bin 0 -> 113720 bytes docs/cce/umn/en-us_image_0000001291567729.png | Bin 0 -> 258053 bytes docs/cce/umn/en-us_image_0000001325364477.png | Bin 0 -> 27034 bytes docs/cce/umn/en-us_image_0000001325377749.png | Bin 0 -> 122439 bytes ...8.png => en-us_image_0000001336475537.png} | Bin docs/cce/umn/en-us_image_0000001352539924.png | Bin 0 -> 69669 bytes docs/cce/umn/en-us_image_0000001360670117.png | Bin 0 -> 277 bytes docs/cce/umn/en-us_image_0000001378942548.png | Bin 0 -> 39577 bytes docs/cce/umn/en-us_image_0000001392259910.png | Bin 0 -> 43734 bytes docs/cce/umn/en-us_image_0000001392280374.png | Bin 0 -> 24280 bytes docs/cce/umn/en-us_image_0000001392318380.png | Bin 0 -> 29029 bytes docs/cce/umn/en-us_image_0000001397733101.png | Bin 0 -> 11394 bytes docs/cce/umn/en-us_image_0000001402494682.png | Bin 0 -> 80603 bytes ...6.png => en-us_image_0000001408895746.png} | Bin docs/cce/umn/en-us_image_0000001414561076.png | Bin 0 -> 1065 bytes docs/cce/umn/en-us_image_0000001460905374.png | Bin 0 -> 5102 bytes docs/cce/umn/en-us_image_0000001461224886.png | Bin 0 -> 16828 bytes ...8.png => en-us_image_0000001464878016.png} | Bin ...8.png => en-us_image_0000001465197524.png} | Bin docs/cce/umn/en-us_image_0000001480191270.png | Bin 0 -> 225679 bytes docs/cce/umn/en-us_image_0000001482541956.png | Bin 0 -> 70510 bytes docs/cce/umn/en-us_image_0000001482546084.png | Bin 0 -> 25122 bytes docs/cce/umn/en-us_image_0000001482701968.png | Bin 0 -> 41848 bytes docs/cce/umn/en-us_image_0000001482796460.png | Bin 0 -> 57113 bytes ...9.png => en-us_image_0000001515838557.png} | Bin ...9.png => en-us_image_0000001515917789.png} | Bin docs/cce/umn/en-us_image_0000001528627005.png | Bin 0 -> 1046 bytes docs/cce/umn/en-us_image_0000001531373685.png | Bin 0 -> 57904 bytes docs/cce/umn/en-us_image_0000001531533045.png | Bin 0 -> 295492 bytes docs/cce/umn/en-us_image_0000001531533921.png | Bin 0 -> 49030 bytes docs/cce/umn/en-us_image_0000001533181077.png | Bin 0 -> 92923 bytes docs/cce/umn/en-us_image_0000001533585325.png | Bin 0 -> 23066 bytes docs/cce/umn/en-us_image_0000001533586881.png | Bin 0 -> 231 bytes docs/cce/umn/en-us_image_0121749065.png | Bin 255 -> 0 bytes docs/cce/umn/en-us_image_0144042759.png | Bin 2961 -> 0 bytes docs/cce/umn/en-us_image_0144045351.png | Bin 302 -> 0 bytes docs/cce/umn/en-us_image_0165899095.png | Bin 4916 -> 0 bytes docs/cce/umn/en-us_image_0165899282.png | Bin 6934 -> 0 bytes docs/cce/umn/en-us_image_0181616313.png | Bin 36099 -> 0 bytes docs/cce/umn/en-us_image_0181616314.png | Bin 126744 -> 0 bytes docs/cce/umn/en-us_image_0183134473.png | Bin 250 -> 0 bytes docs/cce/umn/en-us_image_0183134479.png | Bin 250 -> 0 bytes docs/cce/umn/en-us_image_0183134608.png | Bin 250 -> 0 bytes docs/cce/umn/en-us_image_0183674977.png | Bin 571 -> 0 bytes docs/cce/umn/en-us_image_0195434915.png | Bin 302 -> 0 bytes docs/cce/umn/en-us_image_0198873490.png | Bin 253 -> 0 bytes docs/cce/umn/en-us_image_0198876479.png | Bin 253 -> 0 bytes docs/cce/umn/en-us_image_0214003838.png | Bin 673 -> 0 bytes docs/cce/umn/en-us_image_0220702939.png | Bin 393 -> 0 bytes docs/cce/umn/en-us_image_0220765374.png | Bin 12585 -> 0 bytes docs/cce/umn/en-us_image_0250508826.png | Bin 505 -> 0 bytes docs/cce/umn/en-us_image_0259714782.png | Bin 2363 -> 0 bytes docs/cce/umn/en-us_image_0259716601.png | Bin 1806 -> 0 bytes docs/cce/umn/en-us_image_0259814716.png | Bin 1806 -> 0 bytes docs/cce/umn/en-us_image_0259814717.png | Bin 2363 -> 0 bytes ...3152421.png => en-us_image_0261818822.png} | Bin ...8034110.png => en-us_image_0261818824.png} | Bin docs/cce/umn/en-us_image_0261818867.png | Bin 0 -> 20023 bytes ...8034108.png => en-us_image_0261818875.png} | Bin ...8192670.png => en-us_image_0261818885.png} | Bin ...3393899.png => en-us_image_0261818886.png} | Bin docs/cce/umn/en-us_image_0261818893.png | Bin 0 -> 158 bytes docs/cce/umn/en-us_image_0261818896.png | Bin 0 -> 158 bytes docs/cce/umn/en-us_image_0261818899.png | Bin 0 -> 158 bytes docs/cce/umn/en-us_image_0261820020.png | Bin 0 -> 9081 bytes docs/cce/umn/en-us_image_0268523694.png | Bin 0 -> 26982 bytes docs/cce/umn/en-us_image_0269288708.png | Bin 25385 -> 0 bytes docs/cce/umn/en-us_image_0273156799.png | Bin 1664 -> 0 bytes docs/cce/umn/en-us_image_0275445543.png | Bin 0 -> 1004 bytes docs/cce/umn/en-us_image_0275445566.png | Bin 0 -> 1004 bytes docs/cce/umn/en-us_image_0275452681.png | Bin 0 -> 1004 bytes docs/cce/umn/en-us_image_0278498565.png | Bin 0 -> 18002 bytes docs/cce/umn/en-us_image_0298565473.png | Bin 1743 -> 0 bytes docs/cce/umn/en-us_image_0300973777.png | Bin 571 -> 0 bytes .../umn/public_sys-resources/icon-arrowdn.gif | Bin 68 -> 1887 bytes .../umn/public_sys-resources/icon-arrowrt.gif | Bin 70 -> 1890 bytes 651 files changed, 28642 insertions(+), 22414 deletions(-) delete mode 100644 docs/cce/umn/.placeholder delete mode 100644 docs/cce/umn/cce_01_0002.html delete mode 100644 docs/cce/umn/cce_01_0003.html delete mode 100644 docs/cce/umn/cce_01_0004.html delete mode 100644 docs/cce/umn/cce_01_0007.html delete mode 100644 docs/cce/umn/cce_01_0008.html delete mode 100644 docs/cce/umn/cce_01_0009.html delete mode 100644 docs/cce/umn/cce_01_0010.html delete mode 100644 docs/cce/umn/cce_01_0011.html delete mode 100644 docs/cce/umn/cce_01_0012.html delete mode 100644 docs/cce/umn/cce_01_0013.html delete mode 100644 docs/cce/umn/cce_01_0014.html delete mode 100644 docs/cce/umn/cce_01_0016.html delete mode 100644 docs/cce/umn/cce_01_0018.html delete mode 100644 docs/cce/umn/cce_01_0019.html delete mode 100644 docs/cce/umn/cce_01_0020.html delete mode 100644 docs/cce/umn/cce_01_0023.html delete mode 100644 docs/cce/umn/cce_01_0025.html delete mode 100644 docs/cce/umn/cce_01_0026.html delete mode 100644 docs/cce/umn/cce_01_0027.html delete mode 100644 docs/cce/umn/cce_01_0028.html delete mode 100644 docs/cce/umn/cce_01_0030.html delete mode 100644 docs/cce/umn/cce_01_0031.html delete mode 100644 docs/cce/umn/cce_01_0033.html delete mode 100644 docs/cce/umn/cce_01_0035.html delete mode 100644 docs/cce/umn/cce_01_0036.html delete mode 100644 docs/cce/umn/cce_01_0042.html delete mode 100644 docs/cce/umn/cce_01_0044.html delete mode 100644 docs/cce/umn/cce_01_0045.html delete mode 100644 docs/cce/umn/cce_01_0046.html delete mode 100644 docs/cce/umn/cce_01_0047.html delete mode 100644 docs/cce/umn/cce_01_0048.html delete mode 100644 docs/cce/umn/cce_01_0051.html delete mode 100644 docs/cce/umn/cce_01_0053.html delete mode 100644 docs/cce/umn/cce_01_0057.html delete mode 100644 docs/cce/umn/cce_01_0059.html delete mode 100644 docs/cce/umn/cce_01_0063.html delete mode 100644 docs/cce/umn/cce_01_0064.html delete mode 100644 docs/cce/umn/cce_01_0066.html delete mode 100644 docs/cce/umn/cce_01_0068.html delete mode 100644 docs/cce/umn/cce_01_0081.html delete mode 100644 docs/cce/umn/cce_01_0083.html delete mode 100644 docs/cce/umn/cce_01_0085.html delete mode 100644 docs/cce/umn/cce_01_0105.html delete mode 100644 docs/cce/umn/cce_01_0107.html delete mode 100644 docs/cce/umn/cce_01_0110.html delete mode 100644 docs/cce/umn/cce_01_0111.html delete mode 100644 docs/cce/umn/cce_01_0112.html delete mode 100644 docs/cce/umn/cce_01_0113.html delete mode 100644 docs/cce/umn/cce_01_0114.html delete mode 100644 docs/cce/umn/cce_01_0120.html delete mode 100644 docs/cce/umn/cce_01_0125.html delete mode 100644 docs/cce/umn/cce_01_0127.html delete mode 100644 docs/cce/umn/cce_01_0129.html delete mode 100644 docs/cce/umn/cce_01_0130.html delete mode 100644 docs/cce/umn/cce_01_0139.html delete mode 100644 docs/cce/umn/cce_01_0141.html delete mode 100644 docs/cce/umn/cce_01_0142.html delete mode 100644 docs/cce/umn/cce_01_0143.html delete mode 100644 docs/cce/umn/cce_01_0144.html delete mode 100644 docs/cce/umn/cce_01_0145.html delete mode 100644 docs/cce/umn/cce_01_0146.html delete mode 100644 docs/cce/umn/cce_01_0149.html delete mode 100644 docs/cce/umn/cce_01_0150.html delete mode 100644 docs/cce/umn/cce_01_0151.html delete mode 100644 docs/cce/umn/cce_01_0152.html delete mode 100644 docs/cce/umn/cce_01_0153.html delete mode 100644 docs/cce/umn/cce_01_0154.html delete mode 100644 docs/cce/umn/cce_01_0157.html delete mode 100644 docs/cce/umn/cce_01_0160.html delete mode 100644 docs/cce/umn/cce_01_0163.html delete mode 100644 docs/cce/umn/cce_01_0164.html delete mode 100644 docs/cce/umn/cce_01_0175.html delete mode 100644 docs/cce/umn/cce_01_0178.html delete mode 100644 docs/cce/umn/cce_01_0180.html delete mode 100644 docs/cce/umn/cce_01_0182.html delete mode 100644 docs/cce/umn/cce_01_0183.html delete mode 100644 docs/cce/umn/cce_01_0184.html delete mode 100644 docs/cce/umn/cce_01_0185.html delete mode 100644 docs/cce/umn/cce_01_0186.html delete mode 100644 docs/cce/umn/cce_01_0187.html delete mode 100644 docs/cce/umn/cce_01_0188.html delete mode 100644 docs/cce/umn/cce_01_0189.html delete mode 100644 docs/cce/umn/cce_01_0191.html delete mode 100644 docs/cce/umn/cce_01_0197.html delete mode 100644 docs/cce/umn/cce_01_0200.html delete mode 100644 docs/cce/umn/cce_01_0205.html delete mode 100644 docs/cce/umn/cce_01_0207.html delete mode 100644 docs/cce/umn/cce_01_0208.html delete mode 100644 docs/cce/umn/cce_01_0209.html delete mode 100644 docs/cce/umn/cce_01_0210.html delete mode 100644 docs/cce/umn/cce_01_0211.html delete mode 100644 docs/cce/umn/cce_01_0212.html delete mode 100644 docs/cce/umn/cce_01_0213.html delete mode 100644 docs/cce/umn/cce_01_0214.html delete mode 100644 docs/cce/umn/cce_01_0215.html delete mode 100644 docs/cce/umn/cce_01_0216.html delete mode 100644 docs/cce/umn/cce_01_0220.html delete mode 100644 docs/cce/umn/cce_01_0222.html delete mode 100644 docs/cce/umn/cce_01_0225.html delete mode 100644 docs/cce/umn/cce_01_0226.html delete mode 100644 docs/cce/umn/cce_01_0227.html delete mode 100644 docs/cce/umn/cce_01_0228.html delete mode 100644 docs/cce/umn/cce_01_0229.html delete mode 100644 docs/cce/umn/cce_01_0230.html delete mode 100644 docs/cce/umn/cce_01_0231.html delete mode 100644 docs/cce/umn/cce_01_0232.html delete mode 100644 docs/cce/umn/cce_01_0233.html delete mode 100644 docs/cce/umn/cce_01_0234.html delete mode 100644 docs/cce/umn/cce_01_0247.html delete mode 100644 docs/cce/umn/cce_01_0248.html delete mode 100644 docs/cce/umn/cce_01_0251.html delete mode 100644 docs/cce/umn/cce_01_0252.html delete mode 100644 docs/cce/umn/cce_01_0254.html delete mode 100644 docs/cce/umn/cce_01_0257.html delete mode 100644 docs/cce/umn/cce_01_0259.html delete mode 100644 docs/cce/umn/cce_01_0262.html delete mode 100644 docs/cce/umn/cce_01_0263.html delete mode 100644 docs/cce/umn/cce_01_0265.html delete mode 100644 docs/cce/umn/cce_01_0268.html delete mode 100644 docs/cce/umn/cce_01_0269.html delete mode 100644 docs/cce/umn/cce_01_0271.html delete mode 100644 docs/cce/umn/cce_01_0273.html delete mode 100644 docs/cce/umn/cce_01_0274.html delete mode 100644 docs/cce/umn/cce_01_0276.html delete mode 100644 docs/cce/umn/cce_01_0277.html delete mode 100644 docs/cce/umn/cce_01_0278.html delete mode 100644 docs/cce/umn/cce_01_0281.html delete mode 100644 docs/cce/umn/cce_01_0284.html delete mode 100644 docs/cce/umn/cce_01_0285.html delete mode 100644 docs/cce/umn/cce_01_0286.html delete mode 100644 docs/cce/umn/cce_01_0287.html delete mode 100644 docs/cce/umn/cce_01_0288.html delete mode 100644 docs/cce/umn/cce_01_0291.html delete mode 100644 docs/cce/umn/cce_01_0293.html delete mode 100644 docs/cce/umn/cce_01_0296.html delete mode 100644 docs/cce/umn/cce_01_0298.html delete mode 100644 docs/cce/umn/cce_01_0301.html delete mode 100644 docs/cce/umn/cce_01_0302.html delete mode 100644 docs/cce/umn/cce_01_0305.html delete mode 100644 docs/cce/umn/cce_01_0306.html delete mode 100644 docs/cce/umn/cce_01_0307.html delete mode 100644 docs/cce/umn/cce_01_0310.html delete mode 100644 docs/cce/umn/cce_01_0311.html delete mode 100644 docs/cce/umn/cce_01_0312.html delete mode 100644 docs/cce/umn/cce_01_0313.html delete mode 100644 docs/cce/umn/cce_01_0314.html delete mode 100644 docs/cce/umn/cce_01_0316.html delete mode 100644 docs/cce/umn/cce_01_0317.html delete mode 100644 docs/cce/umn/cce_01_0318.html delete mode 100644 docs/cce/umn/cce_01_0319.html delete mode 100644 docs/cce/umn/cce_01_0320.html delete mode 100644 docs/cce/umn/cce_01_0321.html delete mode 100644 docs/cce/umn/cce_01_0323.html delete mode 100644 docs/cce/umn/cce_01_0324.html delete mode 100644 docs/cce/umn/cce_01_0325.html delete mode 100644 docs/cce/umn/cce_01_0326.html delete mode 100644 docs/cce/umn/cce_01_0327.html delete mode 100644 docs/cce/umn/cce_01_0328.html delete mode 100644 docs/cce/umn/cce_01_0330.html delete mode 100644 docs/cce/umn/cce_01_0331.html delete mode 100644 docs/cce/umn/cce_01_0332.html delete mode 100644 docs/cce/umn/cce_01_0333.html delete mode 100644 docs/cce/umn/cce_01_0336.html delete mode 100644 docs/cce/umn/cce_01_0337.html delete mode 100644 docs/cce/umn/cce_01_0338.html delete mode 100644 docs/cce/umn/cce_01_0341.html delete mode 100644 docs/cce/umn/cce_01_0342.html delete mode 100644 docs/cce/umn/cce_01_0343.html delete mode 100644 docs/cce/umn/cce_01_0344.html delete mode 100644 docs/cce/umn/cce_01_0347.html delete mode 100644 docs/cce/umn/cce_01_0348.html delete mode 100644 docs/cce/umn/cce_01_0352.html delete mode 100644 docs/cce/umn/cce_01_0363.html delete mode 100644 docs/cce/umn/cce_01_0378.html delete mode 100644 docs/cce/umn/cce_01_0379.html delete mode 100644 docs/cce/umn/cce_01_0380.html delete mode 100644 docs/cce/umn/cce_01_0388.html delete mode 100644 docs/cce/umn/cce_01_0393.html delete mode 100644 docs/cce/umn/cce_01_0395.html create mode 100644 docs/cce/umn/cce_10_0002.html create mode 100644 docs/cce/umn/cce_10_0003.html create mode 100644 docs/cce/umn/cce_10_0004.html rename docs/cce/umn/{cce_01_0006.html => cce_10_0006.html} (63%) create mode 100644 docs/cce/umn/cce_10_0007.html create mode 100644 docs/cce/umn/cce_10_0009.html create mode 100644 docs/cce/umn/cce_10_0010.html create mode 100644 docs/cce/umn/cce_10_0011.html create mode 100644 docs/cce/umn/cce_10_0012.html create mode 100644 docs/cce/umn/cce_10_0014.html rename docs/cce/umn/{cce_01_0015.html => cce_10_0015.html} (51%) create mode 100644 docs/cce/umn/cce_10_0016.html create mode 100644 docs/cce/umn/cce_10_0018.html create mode 100644 docs/cce/umn/cce_10_0019.html create mode 100644 docs/cce/umn/cce_10_0020.html rename docs/cce/umn/{cce_01_0024.html => cce_10_0024.html} (54%) create mode 100644 docs/cce/umn/cce_10_0025.html create mode 100644 docs/cce/umn/cce_10_0026.html create mode 100644 docs/cce/umn/cce_10_0028.html create mode 100644 docs/cce/umn/cce_10_0030.html create mode 100644 docs/cce/umn/cce_10_0031.html create mode 100644 docs/cce/umn/cce_10_0035.html create mode 100644 docs/cce/umn/cce_10_0036.html create mode 100644 docs/cce/umn/cce_10_0045.html create mode 100644 docs/cce/umn/cce_10_0046.html create mode 100644 docs/cce/umn/cce_10_0047.html create mode 100644 docs/cce/umn/cce_10_0048.html create mode 100644 docs/cce/umn/cce_10_0059.html create mode 100644 docs/cce/umn/cce_10_0063.html create mode 100644 docs/cce/umn/cce_10_0064.html create mode 100644 docs/cce/umn/cce_10_0066.html create mode 100644 docs/cce/umn/cce_10_0068.html create mode 100644 docs/cce/umn/cce_10_0081.html create mode 100644 docs/cce/umn/cce_10_0083.html create mode 100644 docs/cce/umn/cce_10_0091.html rename docs/cce/umn/{cce_01_0094.html => cce_10_0094.html} (62%) create mode 100644 docs/cce/umn/cce_10_0105.html create mode 100644 docs/cce/umn/cce_10_0107.html create mode 100644 docs/cce/umn/cce_10_0110.html create mode 100644 docs/cce/umn/cce_10_0112.html create mode 100644 docs/cce/umn/cce_10_0113.html create mode 100644 docs/cce/umn/cce_10_0120.html create mode 100644 docs/cce/umn/cce_10_0127.html create mode 100644 docs/cce/umn/cce_10_0129.html create mode 100644 docs/cce/umn/cce_10_0130.html create mode 100644 docs/cce/umn/cce_10_0132.html create mode 100644 docs/cce/umn/cce_10_0139.html rename docs/cce/umn/{cce_01_0140.html => cce_10_0140.html} (50%) create mode 100644 docs/cce/umn/cce_10_0141.html create mode 100644 docs/cce/umn/cce_10_0142.html create mode 100644 docs/cce/umn/cce_10_0146.html create mode 100644 docs/cce/umn/cce_10_0150.html create mode 100644 docs/cce/umn/cce_10_0151.html create mode 100644 docs/cce/umn/cce_10_0152.html create mode 100644 docs/cce/umn/cce_10_0153.html create mode 100644 docs/cce/umn/cce_10_0154.html create mode 100644 docs/cce/umn/cce_10_0163.html create mode 100644 docs/cce/umn/cce_10_0164.html create mode 100644 docs/cce/umn/cce_10_0175.html create mode 100644 docs/cce/umn/cce_10_0178.html create mode 100644 docs/cce/umn/cce_10_0180.html create mode 100644 docs/cce/umn/cce_10_0182.html create mode 100644 docs/cce/umn/cce_10_0183.html create mode 100644 docs/cce/umn/cce_10_0184.html create mode 100644 docs/cce/umn/cce_10_0185.html create mode 100644 docs/cce/umn/cce_10_0186.html create mode 100644 docs/cce/umn/cce_10_0187.html create mode 100644 docs/cce/umn/cce_10_0188.html create mode 100644 docs/cce/umn/cce_10_0189.html create mode 100644 docs/cce/umn/cce_10_0190.html create mode 100644 docs/cce/umn/cce_10_0191.html create mode 100644 docs/cce/umn/cce_10_0193.html create mode 100644 docs/cce/umn/cce_10_0197.html create mode 100644 docs/cce/umn/cce_10_0198.html create mode 100644 docs/cce/umn/cce_10_0201.html create mode 100644 docs/cce/umn/cce_10_0205.html create mode 100644 docs/cce/umn/cce_10_0207.html create mode 100644 docs/cce/umn/cce_10_0208.html create mode 100644 docs/cce/umn/cce_10_0209.html create mode 100644 docs/cce/umn/cce_10_0210.html create mode 100644 docs/cce/umn/cce_10_0212.html create mode 100644 docs/cce/umn/cce_10_0213.html create mode 100644 docs/cce/umn/cce_10_0214.html create mode 100644 docs/cce/umn/cce_10_0215.html create mode 100644 docs/cce/umn/cce_10_0216.html create mode 100644 docs/cce/umn/cce_10_0222.html create mode 100644 docs/cce/umn/cce_10_0232.html create mode 100644 docs/cce/umn/cce_10_0245.html create mode 100644 docs/cce/umn/cce_10_0247.html create mode 100644 docs/cce/umn/cce_10_0248.html rename docs/cce/umn/{cce_01_0249.html => cce_10_0249.html} (52%) create mode 100644 docs/cce/umn/cce_10_0251.html create mode 100644 docs/cce/umn/cce_10_0252.html create mode 100644 docs/cce/umn/cce_10_0257.html create mode 100644 docs/cce/umn/cce_10_0262.html create mode 100644 docs/cce/umn/cce_10_0263.html create mode 100644 docs/cce/umn/cce_10_0268.html create mode 100644 docs/cce/umn/cce_10_0269.html rename docs/cce/umn/{cce_01_0275.html => cce_10_0275.html} (61%) create mode 100644 docs/cce/umn/cce_10_0276.html create mode 100644 docs/cce/umn/cce_10_0277.html create mode 100644 docs/cce/umn/cce_10_0278.html rename docs/cce/umn/{cce_01_0279.html => cce_10_0279.html} (53%) rename docs/cce/umn/{cce_01_0280.html => cce_10_0280.html} (51%) create mode 100644 docs/cce/umn/cce_10_0281.html rename docs/cce/umn/{cce_01_0282.html => cce_10_0282.html} (60%) rename docs/cce/umn/{cce_01_0283.html => cce_10_0283.html} (63%) create mode 100644 docs/cce/umn/cce_10_0284.html create mode 100644 docs/cce/umn/cce_10_0285.html create mode 100644 docs/cce/umn/cce_10_0287.html create mode 100644 docs/cce/umn/cce_10_0288.html rename docs/cce/umn/{cce_01_0290.html => cce_10_0290.html} (57%) create mode 100644 docs/cce/umn/cce_10_0291.html create mode 100644 docs/cce/umn/cce_10_0293.html create mode 100644 docs/cce/umn/cce_10_0296.html create mode 100644 docs/cce/umn/cce_10_0298.html create mode 100644 docs/cce/umn/cce_10_0300.html create mode 100644 docs/cce/umn/cce_10_0301.html create mode 100644 docs/cce/umn/cce_10_0302.html create mode 100644 docs/cce/umn/cce_10_0305.html create mode 100644 docs/cce/umn/cce_10_0306.html create mode 100644 docs/cce/umn/cce_10_0307.html rename docs/cce/umn/{cce_01_0309.html => cce_10_0309.html} (50%) create mode 100644 docs/cce/umn/cce_10_0310.html create mode 100644 docs/cce/umn/cce_10_0312.html create mode 100644 docs/cce/umn/cce_10_0313.html create mode 100644 docs/cce/umn/cce_10_0314.html rename docs/cce/umn/{cce_01_0315.html => cce_10_0315.html} (52%) create mode 100644 docs/cce/umn/cce_10_0316.html create mode 100644 docs/cce/umn/cce_10_0318.html create mode 100644 docs/cce/umn/cce_10_0319.html create mode 100644 docs/cce/umn/cce_10_0320.html create mode 100644 docs/cce/umn/cce_10_0321.html rename docs/cce/umn/{cce_01_0322.html => cce_10_0322.html} (51%) create mode 100644 docs/cce/umn/cce_10_0323.html create mode 100644 docs/cce/umn/cce_10_0325.html create mode 100644 docs/cce/umn/cce_10_0326.html create mode 100644 docs/cce/umn/cce_10_0327.html create mode 100644 docs/cce/umn/cce_10_0328.html rename docs/cce/umn/{cce_01_0329.html => cce_10_0329.html} (53%) create mode 100644 docs/cce/umn/cce_10_0330.html create mode 100644 docs/cce/umn/cce_10_0332.html create mode 100644 docs/cce/umn/cce_10_0333.html rename docs/cce/umn/{cce_01_0334.html => cce_10_0334.html} (51%) create mode 100644 docs/cce/umn/cce_10_0336.html create mode 100644 docs/cce/umn/cce_10_0337.html create mode 100644 docs/cce/umn/cce_10_0338.html create mode 100644 docs/cce/umn/cce_10_0341.html create mode 100644 docs/cce/umn/cce_10_0342.html create mode 100644 docs/cce/umn/cce_10_0343.html create mode 100644 docs/cce/umn/cce_10_0345.html create mode 100644 docs/cce/umn/cce_10_0348.html create mode 100644 docs/cce/umn/cce_10_0349.html create mode 100644 docs/cce/umn/cce_10_0351.html create mode 100644 docs/cce/umn/cce_10_0352.html create mode 100644 docs/cce/umn/cce_10_0353.html create mode 100644 docs/cce/umn/cce_10_0354.html create mode 100644 docs/cce/umn/cce_10_0359.html create mode 100644 docs/cce/umn/cce_10_0360.html create mode 100644 docs/cce/umn/cce_10_0361.html create mode 100644 docs/cce/umn/cce_10_0363.html create mode 100644 docs/cce/umn/cce_10_0365.html create mode 100644 docs/cce/umn/cce_10_0367.html create mode 100644 docs/cce/umn/cce_10_0374.html create mode 100644 docs/cce/umn/cce_10_0377.html create mode 100644 docs/cce/umn/cce_10_0378.html create mode 100644 docs/cce/umn/cce_10_0379.html create mode 100644 docs/cce/umn/cce_10_0380.html create mode 100644 docs/cce/umn/cce_10_0381.html create mode 100644 docs/cce/umn/cce_10_0384.html create mode 100644 docs/cce/umn/cce_10_0385.html create mode 100644 docs/cce/umn/cce_10_0386.html create mode 100644 docs/cce/umn/cce_10_0388.html create mode 100644 docs/cce/umn/cce_10_0393.html create mode 100644 docs/cce/umn/cce_10_0396.html create mode 100644 docs/cce/umn/cce_10_0397.html create mode 100644 docs/cce/umn/cce_10_0398.html create mode 100644 docs/cce/umn/cce_10_0399.html create mode 100644 docs/cce/umn/cce_10_0400.html create mode 100644 docs/cce/umn/cce_10_0402.html create mode 100644 docs/cce/umn/cce_10_0403.html create mode 100644 docs/cce/umn/cce_10_0423.html create mode 100644 docs/cce/umn/cce_10_0430.html create mode 100644 docs/cce/umn/cce_10_0461.html create mode 100644 docs/cce/umn/cce_10_0462.html create mode 100644 docs/cce/umn/cce_10_0463.html create mode 100644 docs/cce/umn/cce_10_0465.html create mode 100644 docs/cce/umn/cce_10_0466.html create mode 100644 docs/cce/umn/cce_10_0467.html create mode 100644 docs/cce/umn/cce_10_0468.html create mode 100644 docs/cce/umn/cce_10_0469.html create mode 100644 docs/cce/umn/cce_10_0470.html create mode 100644 docs/cce/umn/cce_10_0471.html create mode 100644 docs/cce/umn/cce_10_0477.html create mode 100644 docs/cce/umn/cce_10_0477_0.html create mode 100644 docs/cce/umn/cce_10_0551.html create mode 100644 docs/cce/umn/cce_10_0553.html create mode 100644 docs/cce/umn/cce_10_0557.html create mode 100644 docs/cce/umn/cce_10_0602.html create mode 100644 docs/cce/umn/cce_bestpractice.html create mode 100644 docs/cce/umn/cce_bestpractice_00002.html create mode 100644 docs/cce/umn/cce_bestpractice_00006.html create mode 100644 docs/cce/umn/cce_bestpractice_00035.html create mode 100644 docs/cce/umn/cce_bestpractice_00190.html create mode 100644 docs/cce/umn/cce_bestpractice_00198.html create mode 100644 docs/cce/umn/cce_bestpractice_00199.html create mode 100644 docs/cce/umn/cce_bestpractice_00220.html create mode 100644 docs/cce/umn/cce_bestpractice_00226.html create mode 100644 docs/cce/umn/cce_bestpractice_00227.html create mode 100644 docs/cce/umn/cce_bestpractice_00228.html create mode 100644 docs/cce/umn/cce_bestpractice_00231.html create mode 100644 docs/cce/umn/cce_bestpractice_00237.html create mode 100644 docs/cce/umn/cce_bestpractice_00253_0.html create mode 100644 docs/cce/umn/cce_bestpractice_00254.html create mode 100644 docs/cce/umn/cce_bestpractice_00281_0.html create mode 100644 docs/cce/umn/cce_bestpractice_00282.html create mode 100644 docs/cce/umn/cce_bestpractice_00284.html create mode 100644 docs/cce/umn/cce_bestpractice_0050.html create mode 100644 docs/cce/umn/cce_bestpractice_0051.html create mode 100644 docs/cce/umn/cce_bestpractice_0052.html create mode 100644 docs/cce/umn/cce_bestpractice_0053.html create mode 100644 docs/cce/umn/cce_bestpractice_0090.html create mode 100644 docs/cce/umn/cce_bestpractice_0306.html create mode 100644 docs/cce/umn/cce_bestpractice_0307.html create mode 100644 docs/cce/umn/cce_bestpractice_0308.html create mode 100644 docs/cce/umn/cce_bestpractice_0309.html create mode 100644 docs/cce/umn/cce_bestpractice_0310.html create mode 100644 docs/cce/umn/cce_bestpractice_0311.html create mode 100644 docs/cce/umn/cce_bestpractice_0312.html create mode 100644 docs/cce/umn/cce_bestpractice_0313.html create mode 100644 docs/cce/umn/cce_bestpractice_0314.html create mode 100644 docs/cce/umn/cce_bestpractice_0315.html create mode 100644 docs/cce/umn/cce_bestpractice_0317.html create mode 100644 docs/cce/umn/cce_bestpractice_0318.html create mode 100644 docs/cce/umn/cce_bestpractice_0319.html create mode 100644 docs/cce/umn/cce_bestpractice_0320.html create mode 100644 docs/cce/umn/cce_bestpractice_0322.html create mode 100644 docs/cce/umn/cce_bestpractice_0323.html create mode 100644 docs/cce/umn/cce_bestpractice_0324.html create mode 100644 docs/cce/umn/cce_bestpractice_0325.html rename docs/cce/umn/{cce_01_0236.html => cce_bulletin_0000.html} (79%) create mode 100644 docs/cce/umn/cce_bulletin_0011.html delete mode 100644 docs/cce/umn/cce_faq_00006.html rename docs/cce/umn/{en-us_image_0000001223473845.png => en-us_image_0000001082048529.png} (100%) delete mode 100644 docs/cce/umn/en-us_image_0000001088110417.png create mode 100644 docs/cce/umn/en-us_image_0000001113962636.png delete mode 100644 docs/cce/umn/en-us_image_0000001126243447.png delete mode 100644 docs/cce/umn/en-us_image_0000001144208440.png delete mode 100644 docs/cce/umn/en-us_image_0000001144342236.png delete mode 100644 docs/cce/umn/en-us_image_0000001144342238.png delete mode 100644 docs/cce/umn/en-us_image_0000001144578756.png delete mode 100644 docs/cce/umn/en-us_image_0000001144738550.png delete mode 100644 docs/cce/umn/en-us_image_0000001144779784.png rename docs/cce/umn/{en-us_image_0000001145535931.png => en-us_image_0000001145545261.png} (100%) delete mode 100644 docs/cce/umn/en-us_image_0000001148989534.png delete mode 100644 docs/cce/umn/en-us_image_0000001150420952.png delete mode 100644 docs/cce/umn/en-us_image_0000001152953258.png create mode 100644 docs/cce/umn/en-us_image_0000001160642447.png delete mode 100644 docs/cce/umn/en-us_image_0000001160731158.png delete mode 100644 docs/cce/umn/en-us_image_0000001172076961.png create mode 100644 docs/cce/umn/en-us_image_0000001172392670.png delete mode 100644 docs/cce/umn/en-us_image_0000001176255102.png create mode 100644 docs/cce/umn/en-us_image_0000001176818150.png delete mode 100644 docs/cce/umn/en-us_image_0000001178034114.png delete mode 100644 docs/cce/umn/en-us_image_0000001178034116.png delete mode 100644 docs/cce/umn/en-us_image_0000001178192666.png delete mode 100644 docs/cce/umn/en-us_image_0000001178352604.png delete mode 100644 docs/cce/umn/en-us_image_0000001190048341.png delete mode 100644 docs/cce/umn/en-us_image_0000001190168507.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302085.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302087.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302089.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302091.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302095.png delete mode 100644 docs/cce/umn/en-us_image_0000001190302097.png delete mode 100644 docs/cce/umn/en-us_image_0000001190538605.png delete mode 100644 docs/cce/umn/en-us_image_0000001190658439.png create mode 100644 docs/cce/umn/en-us_image_0000001192028618.png delete mode 100644 docs/cce/umn/en-us_image_0000001195057213.png delete mode 100644 docs/cce/umn/en-us_image_0000001198867835.png delete mode 100644 docs/cce/umn/en-us_image_0000001198980979.png rename docs/cce/umn/{en-us_image_0000001180446397.png => en-us_image_0000001199021278.png} (100%) rename docs/cce/umn/{en-us_image_0000001098645539.png => en-us_image_0000001199021298.png} (100%) rename docs/cce/umn/{en-us_image_0186273271.png => en-us_image_0000001199021308.png} (100%) rename docs/cce/umn/{en-us_image_0000001243407853.png => en-us_image_0000001199021320.png} (100%) rename docs/cce/umn/{en-us_image_0000001093275701.png => en-us_image_0000001199021334.png} (100%) rename docs/cce/umn/{en-us_image_0000001190859184.png => en-us_image_0000001199181228.png} (100%) rename docs/cce/umn/{en-us_image_0000001192723194.png => en-us_image_0000001199181230.png} (100%) rename docs/cce/umn/{en-us_image_0000001409700093.png => en-us_image_0000001199181232.png} (100%) rename docs/cce/umn/{en-us_image_0000001168537057.png => en-us_image_0000001199181266.png} (100%) rename docs/cce/umn/{en-us_image_0000001190538599.png => en-us_image_0000001199181298.png} (100%) rename docs/cce/umn/{en-us_image_0000001159292060.png => en-us_image_0000001199181334.png} (100%) rename docs/cce/umn/{en-us_image_0000001231949185.png => en-us_image_0000001199181336.png} (100%) rename docs/cce/umn/{en-us_image_0000001116237931.png => en-us_image_0000001199181338.png} (100%) rename docs/cce/umn/{en-us_image_0295359661.png => en-us_image_0000001199181340.png} (100%) rename docs/cce/umn/{en-us_image_0144049227.png => en-us_image_0000001199341250.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001199341268.png create mode 100644 docs/cce/umn/en-us_image_0000001199341330.png rename docs/cce/umn/{en-us_image_0000001192723190.png => en-us_image_0000001199501200.png} (100%) rename docs/cce/umn/{en-us_image_0000001163847995.png => en-us_image_0000001199501230.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001199501262.png rename docs/cce/umn/{en-us_image_0000001409860177.png => en-us_image_0000001199501276.png} (100%) rename docs/cce/umn/{en-us_image_0000001199848585.png => en-us_image_0000001199501290.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001199757520.png rename docs/cce/umn/{en-us_image_0258503428.png => en-us_image_0000001201381906.png} (100%) rename docs/cce/umn/{en-us_image_0276664213.png => en-us_image_0000001201823500.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001202101148.png rename docs/cce/umn/{en-us_image_0276664792.png => en-us_image_0000001202103502.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001203031716.png rename docs/cce/umn/{en-us_image_0000001359820608.png => en-us_image_0000001203385342.png} (100%) delete mode 100644 docs/cce/umn/en-us_image_0000001204449561.png create mode 100644 docs/cce/umn/en-us_image_0000001205757902.png create mode 100644 docs/cce/umn/en-us_image_0000001206876656.png rename docs/cce/umn/{en-us_image_0249778542.png => en-us_image_0000001206959574.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001207511384.png create mode 100644 docs/cce/umn/en-us_image_0000001217183707.png create mode 100644 docs/cce/umn/en-us_image_0000001218074121.png delete mode 100644 docs/cce/umn/en-us_image_0000001221007635.png delete mode 100644 docs/cce/umn/en-us_image_0000001221376671.png create mode 100644 docs/cce/umn/en-us_image_0000001221501677.png create mode 100644 docs/cce/umn/en-us_image_0000001221820189.png delete mode 100644 docs/cce/umn/en-us_image_0000001222591781.png delete mode 100644 docs/cce/umn/en-us_image_0000001223152415.png delete mode 100644 docs/cce/umn/en-us_image_0000001223152417.png delete mode 100644 docs/cce/umn/en-us_image_0000001223393893.png create mode 100644 docs/cce/umn/en-us_image_0000001225747980.png create mode 100644 docs/cce/umn/en-us_image_0000001226818003.png delete mode 100644 docs/cce/umn/en-us_image_0000001227977765.png delete mode 100644 docs/cce/umn/en-us_image_0000001229793402.png delete mode 100644 docs/cce/umn/en-us_image_0000001229794946.png delete mode 100644 docs/cce/umn/en-us_image_0000001236263298.png create mode 100644 docs/cce/umn/en-us_image_0000001236562704.png delete mode 100644 docs/cce/umn/en-us_image_0000001236582394.png create mode 100644 docs/cce/umn/en-us_image_0000001236723668.png delete mode 100644 docs/cce/umn/en-us_image_0000001238163131.png create mode 100644 docs/cce/umn/en-us_image_0000001238489436.png create mode 100644 docs/cce/umn/en-us_image_0000001238830246.png create mode 100644 docs/cce/umn/en-us_image_0000001238903330.png rename docs/cce/umn/{en-us_image_0000001238003081.png => en-us_image_0000001243981115.png} (100%) rename docs/cce/umn/{en-us_image_0000001117575950.png => en-us_image_0000001243981117.png} (100%) rename docs/cce/umn/{en-us_image_0000001144779790.png => en-us_image_0000001243981141.png} (100%) rename docs/cce/umn/{en-us_image_0000001098403383.png => en-us_image_0000001243981147.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001243981177.png rename docs/cce/umn/{en-us_image_0276664171.png => en-us_image_0000001243981181.png} (100%) rename docs/cce/umn/{en-us_image_0000001144502022.png => en-us_image_0000001243981203.png} (100%) rename docs/cce/umn/{en-us_image_0000001086743939.png => en-us_image_0000001244101107.png} (100%) rename docs/cce/umn/{en-us_image_0000001134406294.png => en-us_image_0000001244101121.png} (100%) rename docs/cce/umn/{en-us_image_0000001159118361.png => en-us_image_0000001244101223.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001244141105.png rename docs/cce/umn/{en-us_image_0000001144620002.png => en-us_image_0000001244141139.png} (100%) rename docs/cce/umn/{en-us_image_0144054048.gif => en-us_image_0000001244141141.gif} (100%) rename docs/cce/umn/{en-us_image_0000001163928763.png => en-us_image_0000001244141181.png} (100%) rename docs/cce/umn/{en-us_image_0000001409580465.png => en-us_image_0000001244141191.png} (100%) rename docs/cce/umn/{en-us_image_0000001198861255.png => en-us_image_0000001244141217.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001244261055.png rename docs/cce/umn/{en-us_image_0000001360140132.png => en-us_image_0000001244261069.png} (100%) rename docs/cce/umn/{en-us_image_0000001142984374.png => en-us_image_0000001244261071.png} (100%) rename docs/cce/umn/{en-us_image_0000001120226646.png => en-us_image_0000001244261073.png} (100%) rename docs/cce/umn/{en-us_image_0254985211.png => en-us_image_0000001244261103.png} (100%) rename docs/cce/umn/{en-us_image_0000001144342232.png => en-us_image_0000001244261119.png} (100%) rename docs/cce/umn/{en-us_image_0254986677.png => en-us_image_0000001244261161.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001244261167.png rename docs/cce/umn/{en-us_image_0000001160748146.png => en-us_image_0000001244261169.png} (100%) rename docs/cce/umn/{en-us_image_0000001159831938.png => en-us_image_0000001244261171.png} (100%) rename docs/cce/umn/{en-us_image_0000001153101092.png => en-us_image_0000001244261173.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001244997085.png create mode 100644 docs/cce/umn/en-us_image_0000001247802971.png rename docs/cce/umn/{en-us_image_0276664178.png => en-us_image_0000001248663503.png} (100%) rename docs/cce/umn/{en-us_image_0276664570.png => en-us_image_0000001249023453.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001249073211.png create mode 100644 docs/cce/umn/en-us_image_0000001249958645.png create mode 100644 docs/cce/umn/en-us_image_0000001251716033.png create mode 100644 docs/cce/umn/en-us_image_0000001256348238.jpg delete mode 100644 docs/cce/umn/en-us_image_0000001274316069.png create mode 100644 docs/cce/umn/en-us_image_0000001274882416.png create mode 100644 docs/cce/umn/en-us_image_0000001276433425.png delete mode 100644 docs/cce/umn/en-us_image_0000001280171657.png delete mode 100644 docs/cce/umn/en-us_image_0000001280181541.png delete mode 100644 docs/cce/umn/en-us_image_0000001280421317.png delete mode 100644 docs/cce/umn/en-us_image_0000001280466745.png create mode 100644 docs/cce/umn/en-us_image_0000001283301301.png create mode 100644 docs/cce/umn/en-us_image_0000001283343269.png create mode 100644 docs/cce/umn/en-us_image_0000001290111529.png create mode 100644 docs/cce/umn/en-us_image_0000001291567729.png create mode 100644 docs/cce/umn/en-us_image_0000001325364477.png create mode 100644 docs/cce/umn/en-us_image_0000001325377749.png rename docs/cce/umn/{en-us_image_0000001283755568.png => en-us_image_0000001336475537.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001352539924.png create mode 100644 docs/cce/umn/en-us_image_0000001360670117.png create mode 100644 docs/cce/umn/en-us_image_0000001378942548.png create mode 100644 docs/cce/umn/en-us_image_0000001392259910.png create mode 100644 docs/cce/umn/en-us_image_0000001392280374.png create mode 100644 docs/cce/umn/en-us_image_0000001392318380.png create mode 100644 docs/cce/umn/en-us_image_0000001397733101.png create mode 100644 docs/cce/umn/en-us_image_0000001402494682.png rename docs/cce/umn/{en-us_image_0165888686.png => en-us_image_0000001408895746.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001414561076.png create mode 100644 docs/cce/umn/en-us_image_0000001460905374.png create mode 100644 docs/cce/umn/en-us_image_0000001461224886.png rename docs/cce/umn/{en-us_image_0000001359980148.png => en-us_image_0000001464878016.png} (100%) rename docs/cce/umn/{en-us_image_0000001360140128.png => en-us_image_0000001465197524.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001480191270.png create mode 100644 docs/cce/umn/en-us_image_0000001482541956.png create mode 100644 docs/cce/umn/en-us_image_0000001482546084.png create mode 100644 docs/cce/umn/en-us_image_0000001482701968.png create mode 100644 docs/cce/umn/en-us_image_0000001482796460.png rename docs/cce/umn/{en-us_image_0000001409700089.png => en-us_image_0000001515838557.png} (100%) rename docs/cce/umn/{en-us_image_0000001409740389.png => en-us_image_0000001515917789.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001528627005.png create mode 100644 docs/cce/umn/en-us_image_0000001531373685.png create mode 100644 docs/cce/umn/en-us_image_0000001531533045.png create mode 100644 docs/cce/umn/en-us_image_0000001531533921.png create mode 100644 docs/cce/umn/en-us_image_0000001533181077.png create mode 100644 docs/cce/umn/en-us_image_0000001533585325.png create mode 100644 docs/cce/umn/en-us_image_0000001533586881.png delete mode 100644 docs/cce/umn/en-us_image_0121749065.png delete mode 100644 docs/cce/umn/en-us_image_0144042759.png delete mode 100644 docs/cce/umn/en-us_image_0144045351.png delete mode 100644 docs/cce/umn/en-us_image_0165899095.png delete mode 100644 docs/cce/umn/en-us_image_0165899282.png delete mode 100644 docs/cce/umn/en-us_image_0181616313.png delete mode 100644 docs/cce/umn/en-us_image_0181616314.png delete mode 100644 docs/cce/umn/en-us_image_0183134473.png delete mode 100644 docs/cce/umn/en-us_image_0183134479.png delete mode 100644 docs/cce/umn/en-us_image_0183134608.png delete mode 100644 docs/cce/umn/en-us_image_0183674977.png delete mode 100644 docs/cce/umn/en-us_image_0195434915.png delete mode 100644 docs/cce/umn/en-us_image_0198873490.png delete mode 100644 docs/cce/umn/en-us_image_0198876479.png delete mode 100644 docs/cce/umn/en-us_image_0214003838.png delete mode 100644 docs/cce/umn/en-us_image_0220702939.png delete mode 100644 docs/cce/umn/en-us_image_0220765374.png delete mode 100644 docs/cce/umn/en-us_image_0250508826.png delete mode 100644 docs/cce/umn/en-us_image_0259714782.png delete mode 100644 docs/cce/umn/en-us_image_0259716601.png delete mode 100644 docs/cce/umn/en-us_image_0259814716.png delete mode 100644 docs/cce/umn/en-us_image_0259814717.png rename docs/cce/umn/{en-us_image_0000001223152421.png => en-us_image_0261818822.png} (100%) rename docs/cce/umn/{en-us_image_0000001178034110.png => en-us_image_0261818824.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0261818867.png rename docs/cce/umn/{en-us_image_0000001178034108.png => en-us_image_0261818875.png} (100%) rename docs/cce/umn/{en-us_image_0000001178192670.png => en-us_image_0261818885.png} (100%) rename docs/cce/umn/{en-us_image_0000001223393899.png => en-us_image_0261818886.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0261818893.png create mode 100644 docs/cce/umn/en-us_image_0261818896.png create mode 100644 docs/cce/umn/en-us_image_0261818899.png create mode 100644 docs/cce/umn/en-us_image_0261820020.png create mode 100644 docs/cce/umn/en-us_image_0268523694.png delete mode 100644 docs/cce/umn/en-us_image_0269288708.png delete mode 100644 docs/cce/umn/en-us_image_0273156799.png create mode 100644 docs/cce/umn/en-us_image_0275445543.png create mode 100644 docs/cce/umn/en-us_image_0275445566.png create mode 100644 docs/cce/umn/en-us_image_0275452681.png create mode 100644 docs/cce/umn/en-us_image_0278498565.png delete mode 100644 docs/cce/umn/en-us_image_0298565473.png delete mode 100644 docs/cce/umn/en-us_image_0300973777.png diff --git a/docs/cce/umn/.placeholder b/docs/cce/umn/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index 45ca625e..bebc082e 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -20,7 +20,7 @@ "githuburl":"" }, { - "uri":"cce_01_0236.html", + "uri":"cce_bulletin_0000.html", "product_code":"cce", "code":"3", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -43,7 +43,7 @@ "uri":"cce_bulletin_0003.html", "product_code":"cce", "code":"5", - "des":"This section describes the Kubernetes version support mechanism of CCE.Version number: The format is x.y.z-r{n}, where x.y is the major version and z is the minor version", + "des":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", "doc_type":"usermanual2", "kw":"Kubernetes Version Support Mechanism,Product Bulletin,User Guide", "title":"Kubernetes Version Support Mechanism", @@ -63,7 +63,7 @@ "uri":"cce_bulletin_0301.html", "product_code":"cce", "code":"7", - "des":"CCE nodes in Hybrid clusters can run on EulerOS 2.2, EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches", + "des":"CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches and verifica", "doc_type":"usermanual2", "kw":"OS Patch Notes for Cluster Nodes,Product Bulletin,User Guide", "title":"OS Patch Notes for Cluster Nodes", @@ -80,9 +80,19 @@ "githuburl":"" }, { - "uri":"CVE-2021-4034.html", + "uri":"cce_bulletin_0011.html", "product_code":"cce", "code":"9", + "des":"High-risk vulnerabilities:CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are", + "doc_type":"usermanual2", + "kw":"Vulnerability Fixing Policies,Security Vulnerability Responses,User Guide", + "title":"Vulnerability Fixing Policies", + "githuburl":"" + }, + { + "uri":"CVE-2021-4034.html", + "product_code":"cce", + "code":"10", "des":"Recently, a security research team disclosed a privilege escalation vulnerability (CVE-2021-4034, also dubbed PwnKit) in PolKit's pkexec. Unprivileged users can gain full", "doc_type":"usermanual2", "kw":"Linux Polkit Privilege Escalation Vulnerability (CVE-2021-4034),Security Vulnerability Responses,Use", @@ -92,17 +102,27 @@ { "uri":"cce_bulletin_0206.html", "product_code":"cce", - "code":"10", + "code":"11", "des":"The Linux Kernel SACK vulnerabilities have been fixed. This section describes the solution to these vulnerabilities.On June 18, 2019, Red Hat released a security notice, ", "doc_type":"usermanual2", "kw":"Notice on Fixing Linux Kernel SACK Vulnerabilities,Security Vulnerability Responses,User Guide", "title":"Notice on Fixing Linux Kernel SACK Vulnerabilities", "githuburl":"" }, + { + "uri":"cce_10_0477.html", + "product_code":"cce", + "code":"12", + "des":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "doc_type":"usermanual2", + "kw":"Service Account Token Security Improvement,Product Bulletin,User Guide", + "title":"Service Account Token Security Improvement", + "githuburl":"" + }, { "uri":"cce_01_9994.html", "product_code":"cce", - "code":"11", + "code":"13", "des":"CCE works closely with multiple cloud services to support computing, storage, networking, and monitoring functions. When you log in to the CCE console for the first time,", "doc_type":"usermanual2", "kw":"Obtaining Resource Permissions,User Guide", @@ -110,9 +130,9 @@ "githuburl":"" }, { - "uri":"cce_01_0027.html", + "uri":"cce_10_0091.html", "product_code":"cce", - "code":"12", + "code":"14", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Clusters", @@ -120,29 +140,109 @@ "githuburl":"" }, { - "uri":"cce_01_0002.html", + "uri":"cce_10_0002.html", "product_code":"cce", - "code":"13", - "des":"Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.For application d", + "code":"15", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Cluster Overview,Clusters,User Guide", + "kw":"Cluster Overview", "title":"Cluster Overview", "githuburl":"" }, { - "uri":"cce_01_0342.html", + "uri":"cce_10_0430.html", "product_code":"cce", - "code":"14", + "code":"16", + "des":"Kubernetes allows you to easily deploy and manage containerized application and facilitates container scheduling and orchestration.For developers, Kubernetes is a cluster", + "doc_type":"usermanual2", + "kw":"Basic Cluster Information,Cluster Overview,User Guide", + "title":"Basic Cluster Information", + "githuburl":"" + }, + { + "uri":"cce_10_0342.html", + "product_code":"cce", + "code":"17", "des":"The following table lists the differences between CCE Turbo clusters and CCE clusters:The QingTian architecture consists of data plane (software-hardware synergy) and man", "doc_type":"usermanual2", - "kw":"CCE Turbo Clusters and CCE Clusters,Clusters,User Guide", + "kw":"CCE Turbo Clusters and CCE Clusters,Cluster Overview,User Guide", "title":"CCE Turbo Clusters and CCE Clusters", "githuburl":"" }, { - "uri":"cce_01_0298.html", + "uri":"cce_10_0349.html", "product_code":"cce", - "code":"15", + "code":"18", + "des":"kube-proxy is a key component of a Kubernetes cluster. It is responsible for load balancing and forwarding between a Service and its backend pod.CCE supports two forwardi", + "doc_type":"usermanual2", + "kw":"Comparing iptables and IPVS,Cluster Overview,User Guide", + "title":"Comparing iptables and IPVS", + "githuburl":"" + }, + { + "uri":"cce_10_0068.html", + "product_code":"cce", + "code":"19", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Release Notes", + "title":"Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0467.html", + "product_code":"cce", + "code":"20", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.25.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.25 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.25 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0468.html", + "product_code":"cce", + "code":"21", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.23.Changes in C", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.23 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.23 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0469.html", + "product_code":"cce", + "code":"22", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.21 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.21 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0470.html", + "product_code":"cce", + "code":"23", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes 1", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.19 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.19 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0471.html", + "product_code":"cce", + "code":"24", + "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", + "doc_type":"usermanual2", + "kw":"CCE Kubernetes 1.17 Release Notes,Release Notes,User Guide", + "title":"CCE Kubernetes 1.17 Release Notes", + "githuburl":"" + }, + { + "uri":"cce_10_0298.html", + "product_code":"cce", + "code":"25", "des":"CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and inte", "doc_type":"usermanual2", "kw":"Creating a CCE Turbo Cluster,Clusters,User Guide", @@ -150,9 +250,9 @@ "githuburl":"" }, { - "uri":"cce_01_0028.html", + "uri":"cce_10_0028.html", "product_code":"cce", - "code":"16", + "code":"26", "des":"On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.In CCE, you ", "doc_type":"usermanual2", "kw":"Creating a CCE Cluster,Clusters,User Guide", @@ -160,9 +260,9 @@ "githuburl":"" }, { - "uri":"cce_01_0140.html", + "uri":"cce_10_0140.html", "product_code":"cce", - "code":"17", + "code":"27", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Using kubectl to Run a Cluster", @@ -170,19 +270,29 @@ "githuburl":"" }, { - "uri":"cce_01_0107.html", + "uri":"cce_10_0107.html", "product_code":"cce", - "code":"18", + "code":"28", "des":"This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses thekubeconfig.js", "doc_type":"usermanual2", - "kw":"Public network access,Connecting to a Cluster Using kubectl,Using kubectl to Run a Cluster,User Guid", + "kw":"Connecting to a Cluster Using kubectl,Using kubectl to Run a Cluster,User Guide", "title":"Connecting to a Cluster Using kubectl", "githuburl":"" }, { - "uri":"cce_01_0139.html", + "uri":"cce_10_0367.html", "product_code":"cce", - "code":"19", + "code":"29", + "des":"A Subject Alternative Name (SAN) can be signed in to a cluster server certificate. A SAN is usually used by the client to verify the server validity in TLS handshakes. Sp", + "doc_type":"usermanual2", + "kw":"Customizing a Cluster Certificate SAN,Using kubectl to Run a Cluster,User Guide", + "title":"Customizing a Cluster Certificate SAN", + "githuburl":"" + }, + { + "uri":"cce_10_0139.html", + "product_code":"cce", + "code":"30", "des":"getThe get command displays one or many resources of a cluster.This command prints a table of the most important information about all resources, including cluster nodes,", "doc_type":"usermanual2", "kw":"Common kubectl Commands,Using kubectl to Run a Cluster,User Guide", @@ -190,29 +300,9 @@ "githuburl":"" }, { - "uri":"cce_01_0023.html", + "uri":"cce_10_0215.html", "product_code":"cce", - "code":"20", - "des":"Before running kubectl commands, you should have the kubectl development skills and understand the kubectl operations. For details, see Kubernetes API and kubectl CLI.Go ", - "doc_type":"usermanual2", - "kw":"kubectl,Affinity,Anti-affinity,Workload Access Mode,Advanced Workload Settings,Configuration Center,", - "title":"kubectl Usage Guide", - "githuburl":"" - }, - { - "uri":"cce_01_0157.html", - "product_code":"cce", - "code":"21", - "des":"The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be ", - "doc_type":"usermanual2", - "kw":"scale out,Cooldown Period,Metric-based policy,Scheduled policy,Periodic policy,Setting Cluster Auto ", - "title":"Setting Cluster Auto Scaling", - "githuburl":"" - }, - { - "uri":"cce_01_0215.html", - "product_code":"cce", - "code":"22", + "code":"31", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Upgrading a Cluster", @@ -220,49 +310,49 @@ "githuburl":"" }, { - "uri":"cce_01_0197.html", + "uri":"cce_10_0197.html", "product_code":"cce", - "code":"23", + "code":"32", "des":"To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.After the latest Kub", "doc_type":"usermanual2", - "kw":"Overview,Upgrading a Cluster,User Guide", - "title":"Overview", + "kw":"Upgrade Overview,Upgrading a Cluster,User Guide", + "title":"Upgrade Overview", "githuburl":"" }, { - "uri":"cce_01_0302.html", + "uri":"cce_10_0302.html", "product_code":"cce", - "code":"24", - "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.Upgraded clusters ca", + "code":"33", + "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Upgraded clu", "doc_type":"usermanual2", "kw":"Before You Start,Upgrading a Cluster,User Guide", "title":"Before You Start", "githuburl":"" }, { - "uri":"cce_01_0120.html", + "uri":"cce_10_0120.html", "product_code":"cce", - "code":"25", + "code":"34", "des":"You can upgrade your clusters to a newer Kubernetes version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgrade", "doc_type":"usermanual2", - "kw":"Performing Replace/Rolling Upgrade (v1.13 and Earlier),Upgrading a Cluster,User Guide", - "title":"Performing Replace/Rolling Upgrade (v1.13 and Earlier)", + "kw":"Performing Replace/Rolling Upgrade,Upgrading a Cluster,User Guide", + "title":"Performing Replace/Rolling Upgrade", "githuburl":"" }, { - "uri":"cce_01_0301.html", + "uri":"cce_10_0301.html", "product_code":"cce", - "code":"26", - "des":"On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.Before the upgrade, learn about the target version to which each CCE cluster c", + "code":"35", + "des":"You can upgrade your clusters to a newer version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what w", "doc_type":"usermanual2", - "kw":"Performing In-place Upgrade (v1.15 and Later),Upgrading a Cluster,User Guide", - "title":"Performing In-place Upgrade (v1.15 and Later)", + "kw":"Performing In-place Upgrade,Upgrading a Cluster,User Guide", + "title":"Performing In-place Upgrade", "githuburl":"" }, { - "uri":"cce_01_0210.html", + "uri":"cce_10_0210.html", "product_code":"cce", - "code":"27", + "code":"36", "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", "doc_type":"usermanual2", "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", @@ -270,19 +360,9 @@ "githuburl":"" }, { - "uri":"cce_01_0068.html", + "uri":"cce_10_0031.html", "product_code":"cce", - "code":"28", - "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the nex", - "doc_type":"usermanual2", - "kw":"CCE Kubernetes Release Notes,Upgrading a Cluster,User Guide", - "title":"CCE Kubernetes Release Notes", - "githuburl":"" - }, - { - "uri":"cce_01_0031.html", - "product_code":"cce", - "code":"29", + "code":"37", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Cluster", @@ -290,9 +370,19 @@ "githuburl":"" }, { - "uri":"cce_01_0212.html", + "uri":"cce_10_0213.html", "product_code":"cce", - "code":"30", + "code":"38", + "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.This function is supported only in clusters of v1", + "doc_type":"usermanual2", + "kw":"Managing Cluster Components,Managing a Cluster,User Guide", + "title":"Managing Cluster Components", + "githuburl":"" + }, + { + "uri":"cce_10_0212.html", + "product_code":"cce", + "code":"39", "des":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", "doc_type":"usermanual2", "kw":"Deleting a Cluster,Managing a Cluster,User Guide", @@ -300,69 +390,49 @@ "githuburl":"" }, { - "uri":"cce_01_0214.html", + "uri":"cce_10_0214.html", "product_code":"cce", - "code":"31", - "des":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.After a cluster is hibernated, resources such a", + "code":"40", + "des":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or ", "doc_type":"usermanual2", "kw":"Hibernating and Waking Up a Cluster,Managing a Cluster,User Guide", "title":"Hibernating and Waking Up a Cluster", "githuburl":"" }, { - "uri":"cce_01_0213.html", + "uri":"cce_10_0602.html", "product_code":"cce", - "code":"32", - "des":"CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.This function is supported only in clu", + "code":"41", + "des":"If overload control is enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The c", "doc_type":"usermanual2", - "kw":"Configuring Kubernetes Parameters,Managing a Cluster,User Guide", - "title":"Configuring Kubernetes Parameters", + "kw":"Cluster Overload Control,Managing a Cluster,User Guide", + "title":"Cluster Overload Control", "githuburl":"" }, { - "uri":"cce_01_0175.html", + "uri":"cce_10_0175.html", "product_code":"cce", - "code":"33", - "des":"Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.The downloaded certificate contains three files: client.key, clie", + "code":"42", + "des":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "doc_type":"usermanual2", "kw":"Obtaining a Cluster Certificate,Clusters,User Guide", "title":"Obtaining a Cluster Certificate", "githuburl":"" }, { - "uri":"cce_01_0085.html", + "uri":"cce_10_0403.html", "product_code":"cce", - "code":"34", - "des":"This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to on", + "code":"43", + "des":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", "doc_type":"usermanual2", - "kw":"Controlling Cluster Permissions,Clusters,User Guide", - "title":"Controlling Cluster Permissions", + "kw":"Changing Cluster Scale,Clusters,User Guide", + "title":"Changing Cluster Scale", "githuburl":"" }, { - "uri":"cce_01_0347.html", + "uri":"cce_10_0183.html", "product_code":"cce", - "code":"35", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Cluster Parameters", - "title":"Cluster Parameters", - "githuburl":"" - }, - { - "uri":"cce_01_0348.html", - "product_code":"cce", - "code":"36", - "des":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", - "doc_type":"usermanual2", - "kw":"Maximum Number of Pods That Can Be Created on a Node,Cluster Parameters,User Guide", - "title":"Maximum Number of Pods That Can Be Created on a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0183.html", - "product_code":"cce", - "code":"37", + "code":"44", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -370,89 +440,109 @@ "githuburl":"" }, { - "uri":"cce_01_0180.html", + "uri":"cce_10_0180.html", "product_code":"cce", - "code":"38", - "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "code":"45", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Overview,Nodes,User Guide", - "title":"Overview", + "kw":"Node Overview", + "title":"Node Overview", "githuburl":"" }, { - "uri":"cce_01_0033.html", + "uri":"cce_10_0461.html", "product_code":"cce", - "code":"39", - "des":"A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating ", + "code":"46", + "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "doc_type":"usermanual2", + "kw":"Precautions for Using a Node,Node Overview,User Guide", + "title":"Precautions for Using a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0462.html", + "product_code":"cce", + "code":"47", + "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", + "doc_type":"usermanual2", + "kw":"Container Engine,Node Overview,User Guide", + "title":"Container Engine", + "githuburl":"" + }, + { + "uri":"cce_10_0463.html", + "product_code":"cce", + "code":"48", + "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", + "doc_type":"usermanual2", + "kw":"Kata Containers and Common Containers,Node Overview,User Guide", + "title":"Kata Containers and Common Containers", + "githuburl":"" + }, + { + "uri":"cce_10_0348.html", + "product_code":"cce", + "code":"49", + "des":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", + "doc_type":"usermanual2", + "kw":"Maximum Number of Pods That Can Be Created on a Node,Node Overview,User Guide", + "title":"Maximum Number of Pods That Can Be Created on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0178.html", + "product_code":"cce", + "code":"50", + "des":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", + "doc_type":"usermanual2", + "kw":"node,Kubernetes,Formula for Calculating the Reserved Resources of a Node,Node Overview,User Guide", + "title":"Formula for Calculating the Reserved Resources of a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0341.html", + "product_code":"cce", + "code":"51", + "des":"This section describes how to allocate data disk space.When creating a node, you need to configure a data disk whose capacity is greater than or equal to 100GB for the no", + "doc_type":"usermanual2", + "kw":"Data Disk Space Allocation,Node Overview,User Guide", + "title":"Data Disk Space Allocation", + "githuburl":"" + }, + { + "uri":"cce_10_0363.html", + "product_code":"cce", + "code":"52", + "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has 2-core or higher CPU, 4 GB or larger mem", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", "title":"Creating a Node", "githuburl":"" }, { - "uri":"cce_01_0363.html", + "uri":"cce_10_0198.html", "product_code":"cce", - "code":"40", - "des":"At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.A key pair has been created for identity authenticat", + "code":"53", + "des":"In CCE, you can Creating a Node or add existing nodes (ECSs) into your cluster.While an ECS is being accepted into a cluster, the operating system of the ECS will be rese", "doc_type":"usermanual2", - "kw":"Creating a Node in a CCE Turbo Cluster,Nodes,User Guide", - "title":"Creating a Node in a CCE Turbo Cluster", + "kw":"Adding Nodes for Management,Nodes,User Guide", + "title":"Adding Nodes for Management", "githuburl":"" }, { - "uri":"cce_01_0338.html", + "uri":"cce_10_0338.html", "product_code":"cce", - "code":"41", - "des":"Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server (ECS) corresponding to t", + "code":"54", + "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Nodes,User Guide", "title":"Removing a Node", "githuburl":"" }, { - "uri":"cce_01_0185.html", + "uri":"cce_10_0003.html", "product_code":"cce", - "code":"42", - "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", - "doc_type":"usermanual2", - "kw":"Logging In to a Node,Nodes,User Guide", - "title":"Logging In to a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0004.html", - "product_code":"cce", - "code":"43", - "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", - "doc_type":"usermanual2", - "kw":"node labels,Inherent Label of a Node,Deleting a Node Label,Managing Node Labels,Nodes,User Guide", - "title":"Managing Node Labels", - "githuburl":"" - }, - { - "uri":"cce_01_0184.html", - "product_code":"cce", - "code":"44", - "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", - "doc_type":"usermanual2", - "kw":"Synchronizing Node Data,Nodes,User Guide", - "title":"Synchronizing Node Data", - "githuburl":"" - }, - { - "uri":"cce_01_0352.html", - "product_code":"cce", - "code":"45", - "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", - "doc_type":"usermanual2", - "kw":"Configuring Node Scheduling (Tainting),Nodes,User Guide", - "title":"Configuring Node Scheduling (Tainting)", - "githuburl":"" - }, - { - "uri":"cce_01_0003.html", - "product_code":"cce", - "code":"46", + "code":"55", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"Resetting a Node,Nodes,User Guide", @@ -460,9 +550,49 @@ "githuburl":"" }, { - "uri":"cce_01_0186.html", + "uri":"cce_10_0185.html", "product_code":"cce", - "code":"47", + "code":"56", + "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "doc_type":"usermanual2", + "kw":"Logging In to a Node,Nodes,User Guide", + "title":"Logging In to a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0004.html", + "product_code":"cce", + "code":"57", + "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "doc_type":"usermanual2", + "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Nodes,User Guide", + "title":"Managing Node Labels", + "githuburl":"" + }, + { + "uri":"cce_10_0352.html", + "product_code":"cce", + "code":"58", + "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "doc_type":"usermanual2", + "kw":"Managing Node Taints,Nodes,User Guide", + "title":"Managing Node Taints", + "githuburl":"" + }, + { + "uri":"cce_10_0184.html", + "product_code":"cce", + "code":"59", + "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "doc_type":"usermanual2", + "kw":"Synchronizing Data with Cloud Servers,Nodes,User Guide", + "title":"Synchronizing Data with Cloud Servers", + "githuburl":"" + }, + { + "uri":"cce_10_0186.html", + "product_code":"cce", + "code":"60", "des":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", "doc_type":"usermanual2", "kw":"Deleting a Node,Nodes,User Guide", @@ -470,9 +600,9 @@ "githuburl":"" }, { - "uri":"cce_01_0036.html", + "uri":"cce_10_0036.html", "product_code":"cce", - "code":"48", + "code":"61", "des":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", "doc_type":"usermanual2", "kw":"Stopping a Node,Nodes,User Guide", @@ -480,9 +610,9 @@ "githuburl":"" }, { - "uri":"cce_01_0276.html", + "uri":"cce_10_0276.html", "product_code":"cce", - "code":"49", + "code":"62", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Nodes,User Guide", @@ -490,49 +620,9 @@ "githuburl":"" }, { - "uri":"cce_01_0178.html", + "uri":"cce_10_0035.html", "product_code":"cce", - "code":"50", - "des":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", - "doc_type":"usermanual2", - "kw":"node,Kubernetes,Formula for Calculating the Reserved Resources of a Node,Nodes,User Guide", - "title":"Formula for Calculating the Reserved Resources of a Node", - "githuburl":"" - }, - { - "uri":"cce_01_0200.html", - "product_code":"cce", - "code":"51", - "des":"This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.To improve the system ", - "doc_type":"usermanual2", - "kw":"direct-lvm,raw disk,Creating a Linux LVM Disk Partition for Docker,Nodes,User Guide", - "title":"Creating a Linux LVM Disk Partition for Docker", - "githuburl":"" - }, - { - "uri":"cce_01_0341.html", - "product_code":"cce", - "code":"52", - "des":"When creating a node, you need to configure data disks for the node.The data disk is divided into Kubernetes space and user space. The user space defines the space that i", - "doc_type":"usermanual2", - "kw":"Data Disk Space Allocation,Nodes,User Guide", - "title":"Data Disk Space Allocation", - "githuburl":"" - }, - { - "uri":"cce_01_0344.html", - "product_code":"cce", - "code":"53", - "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", - "doc_type":"usermanual2", - "kw":"Adding a Second Data Disk to a Node in a CCE Cluster,Nodes,User Guide", - "title":"Adding a Second Data Disk to a Node in a CCE Cluster", - "githuburl":"" - }, - { - "uri":"cce_01_0035.html", - "product_code":"cce", - "code":"54", + "code":"63", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -540,9 +630,9 @@ "githuburl":"" }, { - "uri":"cce_01_0081.html", + "uri":"cce_10_0081.html", "product_code":"cce", - "code":"55", + "code":"64", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,User Guide", @@ -550,19 +640,19 @@ "githuburl":"" }, { - "uri":"cce_01_0012.html", + "uri":"cce_10_0012.html", "product_code":"cce", - "code":"56", - "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.For details abou", + "code":"65", + "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The autoscaler a", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", "title":"Creating a Node Pool", "githuburl":"" }, { - "uri":"cce_01_0222.html", + "uri":"cce_10_0222.html", "product_code":"cce", - "code":"57", + "code":"66", "des":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", "doc_type":"usermanual2", "kw":"Managing a Node Pool,Node Pools,User Guide", @@ -570,19 +660,19 @@ "githuburl":"" }, { - "uri":"cce_01_0046.html", + "uri":"cce_10_0046.html", "product_code":"cce", - "code":"58", + "code":"67", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"scaling policies", + "kw":"Workloads", "title":"Workloads", "githuburl":"" }, { - "uri":"cce_01_0006.html", + "uri":"cce_10_0006.html", "product_code":"cce", - "code":"59", + "code":"68", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Overview,Workloads,User Guide", @@ -590,89 +680,69 @@ "githuburl":"" }, { - "uri":"cce_01_0047.html", + "uri":"cce_10_0047.html", "product_code":"cce", - "code":"60", + "code":"69", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", - "kw":"Create YAML,create a workload using kubectl,Creating a Deployment,Workloads,User Guide", + "kw":"create a workload using kubectl,Creating a Deployment,Workloads,User Guide", "title":"Creating a Deployment", "githuburl":"" }, { - "uri":"cce_01_0048.html", + "uri":"cce_10_0048.html", "product_code":"cce", - "code":"61", + "code":"70", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", - "kw":"Create YAML,Using kubectl,Creating a StatefulSet,Workloads,User Guide", + "kw":"Using kubectl,Creating a StatefulSet,Workloads,User Guide", "title":"Creating a StatefulSet", "githuburl":"" }, { - "uri":"cce_01_0216.html", + "uri":"cce_10_0216.html", "product_code":"cce", - "code":"62", + "code":"71", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", - "kw":"Creating a DaemonSet,Workloads,User Guide", + "kw":"create a workload using kubectl,Creating a DaemonSet,Workloads,User Guide", "title":"Creating a DaemonSet", "githuburl":"" }, { - "uri":"cce_01_0150.html", + "uri":"cce_10_0150.html", "product_code":"cce", - "code":"63", + "code":"72", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", - "kw":"private container image,My Images,authenticated,Shared Images,Creating a Job,Workloads,User Guide", + "kw":"Creating a Job,Workloads,User Guide", "title":"Creating a Job", "githuburl":"" }, { - "uri":"cce_01_0151.html", + "uri":"cce_10_0151.html", "product_code":"cce", - "code":"64", + "code":"73", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", - "kw":"time synchronization,private container image,Concurrency Policy,Forbid,Allow,Replace,Schedule,My Ima", + "kw":"time synchronization,Creating a Cron Job,Workloads,User Guide", "title":"Creating a Cron Job", "githuburl":"" }, { - "uri":"cce_01_0013.html", + "uri":"cce_10_0007.html", "product_code":"cce", - "code":"65", - "des":"A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multip", + "code":"74", + "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", - "kw":"Deleting a Pod,Managing Pods,Workloads,User Guide", - "title":"Managing Pods", - "githuburl":"" - }, - { - "uri":"cce_01_0007.html", - "product_code":"cce", - "code":"66", - "des":"After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescription", - "doc_type":"usermanual2", - "kw":"Add Label,Managing Workloads and Jobs,Workloads,User Guide", + "kw":"Managing Workloads and Jobs,Workloads,User Guide", "title":"Managing Workloads and Jobs", "githuburl":"" }, { - "uri":"cce_01_0057.html", + "uri":"cce_10_0130.html", "product_code":"cce", - "code":"67", - "des":"After scaling policies are defined, pods can be automatically added or deleted based on resource changes, fixed time, and fixed periods. You do not need to manually adjus", - "doc_type":"usermanual2", - "kw":"scaling policies,Metric-based policy,Scheduled policy,Periodic policy,Scaling a Workload,Workloads,U", - "title":"Scaling a Workload", - "githuburl":"" - }, - { - "uri":"cce_01_0130.html", - "product_code":"cce", - "code":"68", + "code":"75", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Configuring a Container", @@ -680,9 +750,19 @@ "githuburl":"" }, { - "uri":"cce_01_0009.html", + "uri":"cce_10_0396.html", "product_code":"cce", - "code":"69", + "code":"76", + "des":"A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple ", + "doc_type":"usermanual2", + "kw":"Setting Basic Container Information,Configuring a Container,User Guide", + "title":"Setting Basic Container Information", + "githuburl":"" + }, + { + "uri":"cce_10_0009.html", + "product_code":"cce", + "code":"77", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", "kw":"Using a Third-Party Image,Configuring a Container,User Guide", @@ -690,189 +770,159 @@ "githuburl":"" }, { - "uri":"cce_01_0163.html", + "uri":"cce_10_0163.html", "product_code":"cce", - "code":"70", - "des":"CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.", + "code":"78", + "des":"CCE allows you to set resource limits for added containers during workload creation. You can apply for and limit the CPU and memory quotas used by each pod in a workload.", "doc_type":"usermanual2", "kw":"Setting Container Specifications,Configuring a Container,User Guide", "title":"Setting Container Specifications", "githuburl":"" }, { - "uri":"cce_01_0105.html", + "uri":"cce_10_0105.html", "product_code":"cce", - "code":"71", + "code":"79", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", - "kw":"Start Command,Post-Start,Pre-Stop,CLI,CLI,Setting Container Lifecycle Parameters,Configuring a Conta", + "kw":"Startup Command,Post-Start,Pre-Stop,Setting Container Lifecycle Parameters,Configuring a Container,U", "title":"Setting Container Lifecycle Parameters", "githuburl":"" }, { - "uri":"cce_01_0008.html", + "uri":"cce_10_0112.html", "product_code":"cce", - "code":"72", - "des":"When creating a workload or job, you can use an image to specify the processes running in the container.By default, the image runs the default command. To run a specific ", + "code":"80", + "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", - "kw":"Commands and parameters used to run a container,Setting the Startup Command,Setting Container Startu", - "title":"Setting Container Startup Commands", - "githuburl":"" - }, - { - "uri":"cce_01_0112.html", - "product_code":"cce", - "code":"73", - "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service except", - "doc_type":"usermanual2", - "kw":"Health check,Health Check Methods,HTTP request,TCP port,CLI,Setting Health Check for a Container,Con", + "kw":"Health check,HTTP request,TCP port,CLI,Setting Health Check for a Container,Configuring a Container,", "title":"Setting Health Check for a Container", "githuburl":"" }, { - "uri":"cce_01_0113.html", + "uri":"cce_10_0113.html", "product_code":"cce", - "code":"74", + "code":"81", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", - "kw":"Manually add environment variables,import environment variables from a secret,import environment var", + "kw":"Setting an Environment Variable,Configuring a Container,User Guide", "title":"Setting an Environment Variable", "githuburl":"" }, { - "uri":"cce_01_0149.html", - "product_code":"cce", - "code":"75", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Affinity and Anti-Affinity Scheduling", - "title":"Affinity and Anti-Affinity Scheduling", - "githuburl":"" - }, - { - "uri":"cce_01_0051.html", - "product_code":"cce", - "code":"76", - "des":"CCE supports custom and simple scheduling policies. A custom scheduling policy allows you to customize node affinity, workload affinity, and workload anti-affinity to mee", - "doc_type":"usermanual2", - "kw":"Simple Scheduling Policies,Workload-AZ affinity,Workload-node affinity,Workload-workload affinity,Sc", - "title":"Scheduling Policy Overview", - "githuburl":"" - }, - { - "uri":"cce_01_0231.html", - "product_code":"cce", - "code":"77", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Custom Scheduling Policies", - "title":"Custom Scheduling Policies", - "githuburl":"" - }, - { - "uri":"cce_01_0232.html", - "product_code":"cce", - "code":"78", - "des":"This section uses Nginx as an example to describe how to configure node affinity.PrerequisitesA workload that uses the nginx container image has been deployed on a node.P", - "doc_type":"usermanual2", - "kw":"Node Affinity,Custom Scheduling Policies,User Guide", - "title":"Node Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0233.html", - "product_code":"cce", - "code":"79", - "des":"Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.There are two types of pod affinity rules: Required (hard ", - "doc_type":"usermanual2", - "kw":"Workload Affinity,Custom Scheduling Policies,User Guide", - "title":"Workload Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0234.html", - "product_code":"cce", - "code":"80", - "des":"Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.There are two types of pod anti-affinity rules: ", - "doc_type":"usermanual2", - "kw":"Workload Anti-Affinity,Custom Scheduling Policies,User Guide", - "title":"Workload Anti-Affinity", - "githuburl":"" - }, - { - "uri":"cce_01_0230.html", - "product_code":"cce", - "code":"81", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Simple Scheduling Policies", - "title":"Simple Scheduling Policies", - "githuburl":"" - }, - { - "uri":"cce_01_0228.html", + "uri":"cce_10_0353.html", "product_code":"cce", "code":"82", - "des":"The created workload will be deployed in the selected AZ.This section uses an Nginx workload as an example to describe how to create a workload using kubectl.Prerequisite", + "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", - "kw":"Workload-AZ Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-AZ Affinity", + "kw":"Configuring an Image Pull Policy,Configuring a Container,User Guide", + "title":"Configuring an Image Pull Policy", "githuburl":"" }, { - "uri":"cce_01_0229.html", + "uri":"cce_10_0354.html", "product_code":"cce", "code":"83", - "des":"The created workload is not deployed on the selected AZ.This section uses Nginx as an example to describe how to create a workload using kubectl.PrerequisitesThe ECS wher", + "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", - "kw":"Workload-AZ Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-AZ Anti-Affinity", + "kw":"Configuring Time Zone Synchronization,Configuring a Container,User Guide", + "title":"Configuring Time Zone Synchronization", "githuburl":"" }, { - "uri":"cce_01_0225.html", + "uri":"cce_10_0397.html", "product_code":"cce", "code":"84", - "des":"If you select multiple nodes, the system automatically chooses one of them during workload deployment.This section uses an Nginx workload as an example to describe how to", + "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", - "kw":"Workload-Node Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Node Affinity", + "kw":"Configuring the Workload Upgrade Policy,Configuring a Container,User Guide", + "title":"Configuring the Workload Upgrade Policy", "githuburl":"" }, { - "uri":"cce_01_0226.html", + "uri":"cce_10_0232.html", "product_code":"cce", "code":"85", - "des":"If you select multiple nodes, the workload will not be deployed on these nodes.This section uses Nginx as an example to describe how to create a workload using kubectl.Pr", + "des":"A nodeSelector provides a very simple way to constrain pods to nodes with particular labels, as mentioned in Creating a DaemonSet. The affinity and anti-affinity feature ", "doc_type":"usermanual2", - "kw":"Workload-Node Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Node Anti-Affinity", + "kw":"Scheduling Policy (Affinity/Anti-affinity),Configuring a Container,User Guide", + "title":"Scheduling Policy (Affinity/Anti-affinity)", "githuburl":"" }, { - "uri":"cce_01_0220.html", + "uri":"cce_10_0345.html", "product_code":"cce", "code":"86", - "des":"The workload to be created will be deployed on the same node as the selected affinity workloads.This section uses Nginx as an example to describe how to create a workload", + "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-beta add-on has been installed. During the installation, select t", "doc_type":"usermanual2", - "kw":"Workload-Workload Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Workload Affinity", + "kw":"GPU Scheduling,Workloads,User Guide", + "title":"GPU Scheduling", "githuburl":"" }, { - "uri":"cce_01_0227.html", + "uri":"cce_10_0551.html", "product_code":"cce", "code":"87", - "des":"The workload to be created and the selected workloads will be deployed on different nodes.This section uses Nginx as an example to describe how to create a workload using", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Workload-Workload Anti-Affinity,Simple Scheduling Policies,User Guide", - "title":"Workload-Workload Anti-Affinity", + "kw":"CPU Core Binding", + "title":"CPU Core Binding", "githuburl":"" }, { - "uri":"cce_01_0020.html", + "uri":"cce_10_0351.html", "product_code":"cce", "code":"88", + "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", + "doc_type":"usermanual2", + "kw":"Binding CPU Cores,CPU Core Binding,User Guide", + "title":"Binding CPU Cores", + "githuburl":"" + }, + { + "uri":"cce_10_0386.html", + "product_code":"cce", + "code":"89", + "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", + "doc_type":"usermanual2", + "kw":"Pod Labels and Annotations,Workloads,User Guide", + "title":"Pod Labels and Annotations", + "githuburl":"" + }, + { + "uri":"cce_10_0423.html", + "product_code":"cce", + "code":"90", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Volcano Scheduling", + "title":"Volcano Scheduling", + "githuburl":"" + }, + { + "uri":"cce_10_0384.html", + "product_code":"cce", + "code":"91", + "des":"Jobs can be classified into online jobs and offline jobs based on whether services are always online.Online job: Such jobs run for a long time, with regular traffic surge", + "doc_type":"usermanual2", + "kw":"Hybrid Deployment of Online and Offline Jobs,Volcano Scheduling,User Guide", + "title":"Hybrid Deployment of Online and Offline Jobs", + "githuburl":"" + }, + { + "uri":"cce_10_0288.html", + "product_code":"cce", + "code":"92", + "des":"When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a cust", + "doc_type":"usermanual2", + "kw":"Security Group Policies,Workloads,User Guide", + "title":"Security Group Policies", + "githuburl":"" + }, + { + "uri":"cce_10_0020.html", + "product_code":"cce", + "code":"93", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Networking", @@ -880,9 +930,9 @@ "githuburl":"" }, { - "uri":"cce_01_0010.html", + "uri":"cce_10_0010.html", "product_code":"cce", - "code":"89", + "code":"94", "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "doc_type":"usermanual2", "kw":"Overview,Networking,User Guide", @@ -890,9 +940,9 @@ "githuburl":"" }, { - "uri":"cce_01_0280.html", + "uri":"cce_10_0280.html", "product_code":"cce", - "code":"90", + "code":"95", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Container Network Models", @@ -900,19 +950,19 @@ "githuburl":"" }, { - "uri":"cce_01_0281.html", + "uri":"cce_10_0281.html", "product_code":"cce", - "code":"91", - "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Con", + "code":"96", + "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", "doc_type":"usermanual2", "kw":"Overview,Container Network Models,User Guide", "title":"Overview", "githuburl":"" }, { - "uri":"cce_01_0282.html", + "uri":"cce_10_0282.html", "product_code":"cce", - "code":"92", + "code":"97", "des":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", "doc_type":"usermanual2", "kw":"Container Tunnel Network,Container Network Models,User Guide", @@ -920,9 +970,9 @@ "githuburl":"" }, { - "uri":"cce_01_0283.html", + "uri":"cce_10_0283.html", "product_code":"cce", - "code":"93", + "code":"98", "des":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", "doc_type":"usermanual2", "kw":"VPC Network,Container Network Models,User Guide", @@ -930,9 +980,9 @@ "githuburl":"" }, { - "uri":"cce_01_0284.html", + "uri":"cce_10_0284.html", "product_code":"cce", - "code":"94", + "code":"99", "des":"Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", "doc_type":"usermanual2", "kw":"Cloud Native Network 2.0,Container Network Models,User Guide", @@ -940,9 +990,9 @@ "githuburl":"" }, { - "uri":"cce_01_0247.html", + "uri":"cce_10_0247.html", "product_code":"cce", - "code":"95", + "code":"100", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Services", @@ -950,19 +1000,19 @@ "githuburl":"" }, { - "uri":"cce_01_0249.html", + "uri":"cce_10_0249.html", "product_code":"cce", - "code":"96", + "code":"101", "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", "doc_type":"usermanual2", - "kw":"Overview,Services,User Guide", - "title":"Overview", + "kw":"Service Overview,Services,User Guide", + "title":"Service Overview", "githuburl":"" }, { - "uri":"cce_01_0011.html", + "uri":"cce_10_0011.html", "product_code":"cce", - "code":"97", + "code":"102", "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is Uploaded Charts for subsequent workload creation.When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}", - "doc_type":"usermanual2", - "kw":"Installing a chart,Updating a chart,Downloading a chart,Deleting a chart,Uploading a Chart,My Charts", - "title":"Uploading a Chart", - "githuburl":"" - }, - { - "uri":"cce_01_0146.html", - "product_code":"cce", - "code":"180", - "des":"In the workload list, if the status is Rollback successful, the workload is rolled back successfully.", - "doc_type":"usermanual2", - "kw":"Creating a Chart-based Workload,Cluster,Upgrading a Chart-based Workload,Rolling Back a Chart-based ", - "title":"Creating a Workload from a Chart", - "githuburl":"" - }, - { - "uri":"cce_01_0064.html", - "product_code":"cce", - "code":"181", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Add-ons", - "title":"Add-ons", - "githuburl":"" - }, - { - "uri":"cce_01_0277.html", - "product_code":"cce", - "code":"182", - "des":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", - "doc_type":"usermanual2", - "kw":"Overview,Add-ons,User Guide", - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_01_0129.html", - "product_code":"cce", - "code":"183", - "des":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", - "doc_type":"usermanual2", - "kw":"coredns add-on,DNS server,domain name resolution services,Kubernetes,coredns (System Resource Add-on", - "title":"coredns (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0127.html", - "product_code":"cce", - "code":"184", - "des":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you ca", - "doc_type":"usermanual2", - "kw":"storage-driver (System Resource Add-on, Mandatory),Add-ons,User Guide", - "title":"storage-driver (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0066.html", - "product_code":"cce", - "code":"185", - "des":"Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage ", - "doc_type":"usermanual2", - "kw":"everest (System Resource Add-on, Mandatory),Add-ons,User Guide", - "title":"everest (System Resource Add-on, Mandatory)", - "githuburl":"" - }, - { - "uri":"cce_01_0154.html", - "product_code":"cce", - "code":"186", - "des":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", - "doc_type":"usermanual2", - "kw":"Auto Scale-In,autoscaler,Add-ons,User Guide", - "title":"autoscaler", - "githuburl":"" - }, - { - "uri":"cce_01_0205.html", - "product_code":"cce", - "code":"187", - "des":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", - "doc_type":"usermanual2", - "kw":"metrics-server,Add-ons,User Guide", - "title":"metrics-server", - "githuburl":"" - }, - { - "uri":"cce_01_0141.html", - "product_code":"cce", - "code":"188", - "des":"gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.This add-on is available only in certain regions.This add-o", - "doc_type":"usermanual2", - "kw":"gpu-beta,Add-ons,User Guide", - "title":"gpu-beta", - "githuburl":"" - }, - { - "uri":"cce_01_0207.html", - "product_code":"cce", - "code":"189", + "code":"150", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Auto Scaling", @@ -1890,9 +1500,9 @@ "githuburl":"" }, { - "uri":"cce_01_0279.html", + "uri":"cce_10_0279.html", "product_code":"cce", - "code":"190", + "code":"151", "des":"Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.More and more applicati", "doc_type":"usermanual2", "kw":"Overview,Auto Scaling,User Guide", @@ -1900,9 +1510,9 @@ "githuburl":"" }, { - "uri":"cce_01_0293.html", + "uri":"cce_10_0293.html", "product_code":"cce", - "code":"191", + "code":"152", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Scaling a Workload", @@ -1910,19 +1520,19 @@ "githuburl":"" }, { - "uri":"cce_01_0290.html", + "uri":"cce_10_0290.html", "product_code":"cce", - "code":"192", - "des":"Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered,", + "code":"153", + "des":"HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values co", "doc_type":"usermanual2", "kw":"Workload Scaling Mechanisms,Scaling a Workload,User Guide", "title":"Workload Scaling Mechanisms", "githuburl":"" }, { - "uri":"cce_01_0208.html", + "uri":"cce_10_0208.html", "product_code":"cce", - "code":"193", + "code":"154", "des":"Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling t", "doc_type":"usermanual2", "kw":"Creating an HPA Policy for Workload Auto Scaling,Scaling a Workload,User Guide", @@ -1930,9 +1540,9 @@ "githuburl":"" }, { - "uri":"cce_01_0083.html", + "uri":"cce_10_0083.html", "product_code":"cce", - "code":"194", + "code":"155", "des":"After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.You can view the rules, status, and events of an HPA pol", "doc_type":"usermanual2", "kw":"Managing Workload Scaling Policies,Scaling a Workload,User Guide", @@ -1940,59 +1550,189 @@ "githuburl":"" }, { - "uri":"cce_01_0395.html", + "uri":"cce_10_0291.html", "product_code":"cce", - "code":"195", - "des":"CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.If you have", - "doc_type":"usermanual2", - "kw":"Switching from AOM to HPA for Auto Scaling,Scaling a Workload,User Guide", - "title":"Switching from AOM to HPA for Auto Scaling", - "githuburl":"" - }, - { - "uri":"cce_01_0291.html", - "product_code":"cce", - "code":"196", + "code":"156", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Scaling a Cluster/Node", - "title":"Scaling a Cluster/Node", + "kw":"Scaling a Node", + "title":"Scaling a Node", "githuburl":"" }, { - "uri":"cce_01_0296.html", + "uri":"cce_10_0296.html", "product_code":"cce", - "code":"197", + "code":"157", "des":"Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clo", "doc_type":"usermanual2", - "kw":"Node Scaling Mechanisms,Scaling a Cluster/Node,User Guide", + "kw":"Node Scaling Mechanisms,Scaling a Node,User Guide", "title":"Node Scaling Mechanisms", "githuburl":"" }, { - "uri":"cce_01_0209.html", + "uri":"cce_10_0209.html", "product_code":"cce", - "code":"198", + "code":"158", "des":"CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.If a node scaling policy and ", "doc_type":"usermanual2", - "kw":"Creating a Node Scaling Policy,Scaling a Cluster/Node,User Guide", + "kw":"Creating a Node Scaling Policy,Scaling a Node,User Guide", "title":"Creating a Node Scaling Policy", "githuburl":"" }, { - "uri":"cce_01_0063.html", + "uri":"cce_10_0063.html", "product_code":"cce", - "code":"199", + "code":"159", "des":"After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.You can view the associated node pool, rules, and scaling history of a ", "doc_type":"usermanual2", - "kw":"Managing Node Scaling Policies,Scaling a Cluster/Node,User Guide", + "kw":"Managing Node Scaling Policies,Scaling a Node,User Guide", "title":"Managing Node Scaling Policies", "githuburl":"" }, { - "uri":"cce_01_0164.html", + "uri":"cce_10_0300.html", "product_code":"cce", - "code":"200", + "code":"160", + "des":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "doc_type":"usermanual2", + "kw":"Using HPA and CA for Auto Scaling of Workloads and Nodes,Auto Scaling,User Guide", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0064.html", + "product_code":"cce", + "code":"161", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Add-ons", + "title":"Add-ons", + "githuburl":"" + }, + { + "uri":"cce_10_0277.html", + "product_code":"cce", + "code":"162", + "des":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", + "doc_type":"usermanual2", + "kw":"Overview,Add-ons,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0129.html", + "product_code":"cce", + "code":"163", + "des":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", + "doc_type":"usermanual2", + "kw":"coredns (System Resource Add-On, Mandatory),Add-ons,User Guide", + "title":"coredns (System Resource Add-On, Mandatory)", + "githuburl":"" + }, + { + "uri":"cce_10_0127.html", + "product_code":"cce", + "code":"164", + "des":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgradin", + "doc_type":"usermanual2", + "kw":"storage-driver (System Resource Add-On, Discarded),Add-ons,User Guide", + "title":"storage-driver (System Resource Add-On, Discarded)", + "githuburl":"" + }, + { + "uri":"cce_10_0066.html", + "product_code":"cce", + "code":"165", + "des":"Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage", + "doc_type":"usermanual2", + "kw":"everest (System Resource Add-On, Mandatory),Add-ons,User Guide", + "title":"everest (System Resource Add-On, Mandatory)", + "githuburl":"" + }, + { + "uri":"cce_10_0132.html", + "product_code":"cce", + "code":"166", + "des":"node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon runnin", + "doc_type":"usermanual2", + "kw":"npd,Add-ons,User Guide", + "title":"npd", + "githuburl":"" + }, + { + "uri":"cce_10_0154.html", + "product_code":"cce", + "code":"167", + "des":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", + "doc_type":"usermanual2", + "kw":"autoscaler,Add-ons,User Guide", + "title":"autoscaler", + "githuburl":"" + }, + { + "uri":"cce_10_0205.html", + "product_code":"cce", + "code":"168", + "des":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", + "doc_type":"usermanual2", + "kw":"metrics-server,Add-ons,User Guide", + "title":"metrics-server", + "githuburl":"" + }, + { + "uri":"cce_10_0141.html", + "product_code":"cce", + "code":"169", + "des":"gpu-beta is a device management add-on that supports GPUs in containers. If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.The driver to be down", + "doc_type":"usermanual2", + "kw":"gpu-beta,Add-ons,User Guide", + "title":"gpu-beta", + "githuburl":"" + }, + { + "uri":"cce_10_0193.html", + "product_code":"cce", + "code":"170", + "des":"Volcano is a batch processing platform based on Kubernetes. It provides a series of features required by machine learning, deep learning, bioinformatics, genomics, and ot", + "doc_type":"usermanual2", + "kw":"volcano,Add-ons,User Guide", + "title":"volcano", + "githuburl":"" + }, + { + "uri":"cce_10_0019.html", + "product_code":"cce", + "code":"171", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Charts", + "title":"Charts", + "githuburl":"" + }, + { + "uri":"cce_10_0191.html", + "product_code":"cce", + "code":"172", + "des":"CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.Helm is a package manager ", + "doc_type":"usermanual2", + "kw":"Overview,Charts,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0146.html", + "product_code":"cce", + "code":"173", + "des":"On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.The number of charts that can be uploaded by a single user is limited. Th", + "doc_type":"usermanual2", + "kw":"Deploying an Application from a Chart,Charts,User Guide", + "title":"Deploying an Application from a Chart", + "githuburl":"" + }, + { + "uri":"cce_10_0164.html", + "product_code":"cce", + "code":"174", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Permissions Management", @@ -2000,9 +1740,9 @@ "githuburl":"" }, { - "uri":"cce_01_0187.html", + "uri":"cce_10_0187.html", "product_code":"cce", - "code":"201", + "code":"175", "des":"CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Ma", "doc_type":"usermanual2", "kw":"Permissions Overview,Permissions Management,User Guide", @@ -2010,19 +1750,19 @@ "githuburl":"" }, { - "uri":"cce_01_0188.html", + "uri":"cce_10_0188.html", "product_code":"cce", - "code":"202", - "des":"CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permissions are ", + "code":"176", + "des":"CCE cluster-level permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permission", "doc_type":"usermanual2", "kw":"Cluster Permissions (IAM-based),Permissions Management,User Guide", "title":"Cluster Permissions (IAM-based)", "githuburl":"" }, { - "uri":"cce_01_0189.html", + "uri":"cce_10_0189.html", "product_code":"cce", - "code":"203", + "code":"177", "des":"You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kub", "doc_type":"usermanual2", "kw":"Namespace Permissions (Kubernetes RBAC-based),Permissions Management,User Guide", @@ -2030,19 +1770,69 @@ "githuburl":"" }, { - "uri":"cce_01_0275.html", + "uri":"cce_10_0245.html", "product_code":"cce", - "code":"204", - "des":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", + "code":"178", + "des":"The conventional distributed task scheduling mode is being replaced by Kubernetes. CCE allows you to easily deploy, manage, and scale containerized applications in the cl", "doc_type":"usermanual2", - "kw":"Pod Security Policies,Permissions Management,User Guide", - "title":"Pod Security Policies", + "kw":"Example: Designing and Configuring Permissions for Users in a Department,Permissions Management,User", + "title":"Example: Designing and Configuring Permissions for Users in a Department", "githuburl":"" }, { - "uri":"cce_01_0024.html", + "uri":"cce_10_0190.html", "product_code":"cce", - "code":"205", + "code":"179", + "des":"Some CCE permissions policies depend on the policies of other cloud services. To view or use other cloud resources on the CCE console, you need to enable the system polic", + "doc_type":"usermanual2", + "kw":"Permission Dependency of the CCE Console,Permissions Management,User Guide", + "title":"Permission Dependency of the CCE Console", + "githuburl":"" + }, + { + "uri":"cce_10_0465.html", + "product_code":"cce", + "code":"180", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Pod Security", + "title":"Pod Security", + "githuburl":"" + }, + { + "uri":"cce_10_0275.html", + "product_code":"cce", + "code":"181", + "des":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", + "doc_type":"usermanual2", + "kw":"Configuring a Pod Security Policy,Pod Security,User Guide", + "title":"Configuring a Pod Security Policy", + "githuburl":"" + }, + { + "uri":"cce_10_0466.html", + "product_code":"cce", + "code":"182", + "des":"Before using Pod Security Admission, you need to understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you de", + "doc_type":"usermanual2", + "kw":"Configuring Pod Security Admission,Pod Security,User Guide", + "title":"Configuring Pod Security Admission", + "githuburl":"" + }, + { + "uri":"cce_10_0477_0.html", + "product_code":"cce", + "code":"183", + "des":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "doc_type":"usermanual2", + "kw":"Service Account Token Security Improvement,Permissions Management,User Guide", + "title":"Service Account Token Security Improvement", + "githuburl":"" + }, + { + "uri":"cce_10_0024.html", + "product_code":"cce", + "code":"184", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cloud Trace Service (CTS)", @@ -2050,9 +1840,9 @@ "githuburl":"" }, { - "uri":"cce_01_0025.html", + "uri":"cce_10_0025.html", "product_code":"cce", - "code":"206", + "code":"185", "des":"Cloud Trace Service (CTS) records operations on cloud service resources, allowing users to query, audit, and backtrack the resource operation requests initiated from the ", "doc_type":"usermanual2", "kw":"CCE Operations Supported by CTS,Cloud Trace Service (CTS),User Guide", @@ -2060,9 +1850,9 @@ "githuburl":"" }, { - "uri":"cce_01_0026.html", + "uri":"cce_10_0026.html", "product_code":"cce", - "code":"207", + "code":"186", "des":"After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.Trace Sour", "doc_type":"usermanual2", "kw":"Querying CTS Logs,Cloud Trace Service (CTS),User Guide", @@ -2070,29 +1860,269 @@ "githuburl":"" }, { - "uri":"cce_faq_0083.html", + "uri":"cce_10_0305.html", + "product_code":"cce", + "code":"187", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Storage (FlexVolume)", + "title":"Storage (FlexVolume)", + "githuburl":"" + }, + { + "uri":"cce_10_0306.html", + "product_code":"cce", + "code":"188", + "des":"In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.In CCE, container storage is backed both by Kubernet", + "doc_type":"usermanual2", + "kw":"FlexVolume Overview,Storage (FlexVolume),User Guide", + "title":"FlexVolume Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0343.html", + "product_code":"cce", + "code":"189", + "des":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "doc_type":"usermanual2", + "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Storage", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "githuburl":"" + }, + { + "uri":"cce_10_0309.html", + "product_code":"cce", + "code":"190", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using EVS Disks as Storage Volumes", + "title":"Using EVS Disks as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0310.html", + "product_code":"cce", + "code":"191", + "des":"To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is", + "doc_type":"usermanual2", + "kw":"Overview,Using EVS Disks as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0312.html", + "product_code":"cce", + "code":"192", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-evs-auto-example.yamlvi pvc-evs-auto-example.yamlExample YAML file for clu", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an EVS Disk,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an EVS Disk", + "githuburl":"" + }, + { + "uri":"cce_10_0313.html", + "product_code":"cce", + "code":"193", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-evs-example.yaml pvc-evs-example.yamlClusters from v1.11.7 to v1.13Example ", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing EVS Disk,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Creating a PV from an Existing EVS Disk", + "githuburl":"" + }, + { + "uri":"cce_10_0314.html", + "product_code":"cce", + "code":"194", + "des":"After an EVS volume is created or imported to CCE, you can mount it to a workload.EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubec", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Pod Mounted with an EVS Volume,Using EVS Disks as Storage Volumes,User Guide", + "title":"(kubectl) Creating a Pod Mounted with an EVS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0329.html", + "product_code":"cce", + "code":"195", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using SFS Turbo File Systems as Storage Volumes", + "title":"Using SFS Turbo File Systems as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0330.html", + "product_code":"cce", + "code":"196", + "des":"CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable fo", + "doc_type":"usermanual2", + "kw":"Overview,Using SFS Turbo File Systems as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0332.html", + "product_code":"cce", + "code":"197", + "des":"CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) ", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing SFS Turbo File System,Using SFS Turbo File Systems as Stora", + "title":"(kubectl) Creating a PV from an Existing SFS Turbo File System", + "githuburl":"" + }, + { + "uri":"cce_10_0333.html", + "product_code":"cce", + "code":"198", + "des":"After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume,Using SFS Turbo File Systems as Sto", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0334.html", + "product_code":"cce", + "code":"199", + "des":"CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch efs", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume,Using SFS Turbo File Systems as St", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0322.html", + "product_code":"cce", + "code":"200", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using OBS Buckets as Storage Volumes", + "title":"Using OBS Buckets as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0323.html", + "product_code":"cce", + "code":"201", + "des":"CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud w", + "doc_type":"usermanual2", + "kw":"Overview,Using OBS Buckets as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0325.html", + "product_code":"cce", + "code":"202", + "des":"During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, whic", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an OBS Volume,Using OBS Buckets as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0326.html", + "product_code":"cce", + "code":"203", + "des":"CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.The following configura", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing OBS Bucket,Using OBS Buckets as Storage Volumes,User Guide", + "title":"(kubectl) Creating a PV from an Existing OBS Bucket", + "githuburl":"" + }, + { + "uri":"cce_10_0327.html", + "product_code":"cce", + "code":"204", + "des":"After an OBS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an OBS Volume,Using OBS Buckets as Storage Volumes,User", + "title":"(kubectl) Creating a Deployment Mounted with an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0328.html", + "product_code":"cce", + "code":"205", + "des":"CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume,Using OBS Buckets as Storage Volumes,Use", + "title":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0315.html", + "product_code":"cce", + "code":"206", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Using SFS File Systems as Storage Volumes", + "title":"Using SFS File Systems as Storage Volumes", + "githuburl":"" + }, + { + "uri":"cce_10_0316.html", + "product_code":"cce", + "code":"207", + "des":"CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWr", + "doc_type":"usermanual2", + "kw":"Overview,Using SFS File Systems as Storage Volumes,User Guide", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"cce_10_0318.html", "product_code":"cce", "code":"208", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-sfs-auto-example.yamlvi pvc-sfs-auto-example.yamlExample YAML file:apiVers", + "doc_type":"usermanual2", + "kw":"(kubectl) Automatically Creating an SFS Volume,Using SFS File Systems as Storage Volumes,User Guide", + "title":"(kubectl) Automatically Creating an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0319.html", + "product_code":"cce", + "code":"209", + "des":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-sfs-example.yaml pvc-sfs-example.yamlClusters from v1.11 to v1.13Example YA", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a PV from an Existing SFS File System,Using SFS File Systems as Storage Volumes,U", + "title":"(kubectl) Creating a PV from an Existing SFS File System", + "githuburl":"" + }, + { + "uri":"cce_10_0320.html", + "product_code":"cce", + "code":"210", + "des":"After an SFS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a Deployment Mounted with an SFS Volume,Using SFS File Systems as Storage Volumes", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_10_0321.html", + "product_code":"cce", + "code":"211", + "des":"CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "doc_type":"usermanual2", + "kw":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume,Using SFS File Systems as Storage Volume", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume", + "githuburl":"" + }, + { + "uri":"cce_faq_0083.html", + "product_code":"cce", + "code":"212", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Reference", "title":"Reference", "githuburl":"" }, - { - "uri":"cce_faq_00006.html", - "product_code":"cce", - "code":"209", - "des":"Cloud Container Engine (CCE) provides highly scalable, high-performance, enterprise-class Kubernetes clusters and supports Docker containers. With CCE, you can easily dep", - "doc_type":"usermanual2", - "kw":"Checklist for Migrating Containerized Applications to the Cloud,Reference,User Guide", - "title":"Checklist for Migrating Containerized Applications to the Cloud", - "githuburl":"" - }, { "uri":"cce_01_0203.html", "product_code":"cce", - "code":"210", + "code":"213", "des":"When a node is added, EIP is set to Automatically assign. The node cannot be created, and a message indicating that EIPs are insufficient is displayed.Two methods are ava", "doc_type":"usermanual2", "kw":"How Do I Troubleshoot Insufficient EIPs When a Node Is Added?,Reference,User Guide", @@ -2102,7 +2132,7 @@ { "uri":"cce_01_0204.html", "product_code":"cce", - "code":"211", + "code":"214", "des":"Before using command line injection, write a script that can format data disks and save it to your OBS bucket. Then, inject a command line that will automatically execute", "doc_type":"usermanual2", "kw":"How Do I Format a Data Disk Using Command Line Injection?,Reference,User Guide", @@ -2112,7 +2142,7 @@ { "uri":"cce_01_0999.html", "product_code":"cce", - "code":"212", + "code":"215", "des":"After a cluster of v1.13.10 is created, you can use heapster only after rbac is enabled.kubectl delete clusterrole system:heapsterCopy the following file to a server on w", "doc_type":"usermanual2", "kw":"How Do I Use heapster in Clusters of v1.13.10?,Reference,User Guide", @@ -2122,7 +2152,7 @@ { "uri":"cce_faq_00096.html", "product_code":"cce", - "code":"213", + "code":"216", "des":"Currently, private CCE clusters use Device Mapper as the Docker storage driver.Device Mapper is developed based on the kernel framework and supports many advanced volume ", "doc_type":"usermanual2", "kw":"How Do I Change the Mode of the Docker Device Mapper?,Reference,User Guide", @@ -2132,7 +2162,7 @@ { "uri":"cce_faq_00120.html", "product_code":"cce", - "code":"214", + "code":"217", "des":"If the cluster status is available but some nodes in the cluster are unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Node Is ", "doc_type":"usermanual2", "kw":"monitrc,What Can I Do If My Cluster Status Is Available but the Node Status Is Unavailable?,Referenc", @@ -2142,7 +2172,7 @@ { "uri":"cce_faq_00039.html", "product_code":"cce", - "code":"215", + "code":"218", "des":"If the cluster is Unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Security Group Is ModifiedCheck Item 2: Whether the DHCP Fu", "doc_type":"usermanual2", "kw":"How Do I Rectify the Fault When the Cluster Status Is Unavailable?,Reference,User Guide", @@ -2152,7 +2182,7 @@ { "uri":"cce_faq_00099.html", "product_code":"cce", - "code":"216", + "code":"219", "des":"This section uses the Nginx workload as an example to describe how to set the workload access type to LoadBalancer (ELB).An ELB has been created.You have connected an Ela", "doc_type":"usermanual2", "kw":"ECS,kubectl,How Do I Use kubectl to Set the Workload Access Type to LoadBalancer (ELB)?,Reference,Us", @@ -2162,7 +2192,7 @@ { "uri":"cce_faq_00190.html", "product_code":"cce", - "code":"217", + "code":"220", "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).Before using this feature, write a script that can format data disks and save it to ", "doc_type":"usermanual2", "kw":"How Do I Add a Second Data Disk to a Node in a CCE Cluster?,Reference,User Guide", @@ -2172,7 +2202,7 @@ { "uri":"cce_faq_00029.html", "product_code":"cce", - "code":"218", + "code":"221", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Workload Abnormalities", @@ -2182,7 +2212,7 @@ { "uri":"cce_faq_00134.html", "product_code":"cce", - "code":"219", + "code":"222", "des":"If a workload is running improperly, you can view events to determine the cause.On the CCE console, choose Workloads > Deployments or StatefulSets in the navigation pane ", "doc_type":"usermanual2", "kw":"Fault Locating and Troubleshooting for Abnormal Workloads,Workload Abnormalities,User Guide", @@ -2192,7 +2222,7 @@ { "uri":"cce_faq_00098.html", "product_code":"cce", - "code":"220", + "code":"223", "des":"Viewing K8s Event InformationCheck Item 1: Checking Whether a Node Is Available in the ClusterCheck Item 2: Checking Whether Node Resources (CPU and Memory) Are Sufficien", "doc_type":"usermanual2", "kw":"workload,InstanceSchedulingFailed,Failed to Schedule an Instance,Workload Abnormalities,User Guide", @@ -2202,7 +2232,7 @@ { "uri":"cce_faq_00015.html", "product_code":"cce", - "code":"221", + "code":"224", "des":"If the workload details page displays an event indicating that image pulling fails, perform the following operations to locate the fault:Check Item 1: Checking Whether im", "doc_type":"usermanual2", "kw":"workload,Failed to Pull an Image,Workload Abnormalities,User Guide", @@ -2212,7 +2242,7 @@ { "uri":"cce_faq_00018.html", "product_code":"cce", - "code":"222", + "code":"225", "des":"On the details page of a workload, if an event is displayed indicating that the container fails to be restarted, perform the following operations to locate the fault:Rect", "doc_type":"usermanual2", "kw":"Failed to Restart a Container,Workload Abnormalities,User Guide", @@ -2222,7 +2252,7 @@ { "uri":"cce_faq_00209.html", "product_code":"cce", - "code":"223", + "code":"226", "des":"Pod actions are classified into the following two types:kube-controller-manager periodically checks the status of all nodes. If a node is in the NotReady state for a peri", "doc_type":"usermanual2", "kw":"What Should I Do If An Evicted Pod Exists?,Workload Abnormalities,User Guide", @@ -2232,7 +2262,7 @@ { "uri":"cce_faq_00140.html", "product_code":"cce", - "code":"224", + "code":"227", "des":"When a node is faulty, pods on the node are evicted to ensure workload availability. If the pods are not evicted when the node is faulty, perform the following steps:Use ", "doc_type":"usermanual2", "kw":"Instance Eviction Exception,Workload Abnormalities,User Guide", @@ -2242,7 +2272,7 @@ { "uri":"cce_faq_00210.html", "product_code":"cce", - "code":"225", + "code":"228", "des":"When a node is in the Unavailable state, CCE migrates container pods on the node and sets the pods running on the node to the Terminating state.After the node is restored", "doc_type":"usermanual2", "kw":"What Should I Do If Pods in the Terminating State Cannot Be Deleted?,Workload Abnormalities,User Gui", @@ -2252,7 +2282,7 @@ { "uri":"cce_faq_00012.html", "product_code":"cce", - "code":"226", + "code":"229", "des":"The metadata.enable field in the YAML file of the workload is false. As a result, the pod of the workload is deleted and the workload is in the stopped status.The workloa", "doc_type":"usermanual2", "kw":"What Should I Do If a Workload Is Stopped Caused by Pod Deletion?,Workload Abnormalities,User Guide", @@ -2262,7 +2292,7 @@ { "uri":"cce_faq_00005.html", "product_code":"cce", - "code":"227", + "code":"230", "des":"The pod remains in the creating state for a long time, and the sandbox-related errors are reported.Select a troubleshooting method for your cluster:Clusters of V1.13 or l", "doc_type":"usermanual2", "kw":"What Should I Do If Sandbox-Related Errors Are Reported When the Pod Remains in the Creating State?,", @@ -2272,7 +2302,7 @@ { "uri":"cce_faq_00199.html", "product_code":"cce", - "code":"228", + "code":"231", "des":"Workload pods in the cluster fail and are being redeployed constantly.After the following command is run, the command output shows that many pods are in the evicted state", "doc_type":"usermanual2", "kw":"What Should I Do If a Pod Is in the Evicted State?,Workload Abnormalities,User Guide", @@ -2282,7 +2312,7 @@ { "uri":"cce_faq_00002.html", "product_code":"cce", - "code":"229", + "code":"232", "des":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", "doc_type":"usermanual2", "kw":"What Should I Do If the OOM Killer Is Triggered When a Container Uses Memory Resources More Than Lim", @@ -2292,53 +2322,23 @@ { "uri":"cce_faq_00202.html", "product_code":"cce", - "code":"230", + "code":"233", "des":"A workload can be accessed from public networks through a load balancer. LoadBalancer provides higher reliability than EIP-based NodePort because an EIP is no longer boun", "doc_type":"usermanual2", "kw":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?,Refere", "title":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?", "githuburl":"" }, - { - "uri":"cce_bestpractice_00162.html", - "product_code":"cce", - "code":"231", - "des":"CCE uses high-performance container networking add-ons, which support the tunnel network and VPC network models.After a cluster is created, the network model cannot be ch", - "doc_type":"usermanual2", - "kw":"Selecting a Network Model When Creating a Cluster on CCE,Reference,User Guide", - "title":"Selecting a Network Model When Creating a Cluster on CCE", - "githuburl":"" - }, - { - "uri":"cce_bestpractice_00004.html", - "product_code":"cce", - "code":"232", - "des":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This secti", - "doc_type":"usermanual2", - "kw":"Planning CIDR Blocks for a CCE Cluster,Reference,User Guide", - "title":"Planning CIDR Blocks for a CCE Cluster", - "githuburl":"" - }, { "uri":"cce_faq_00266.html", "product_code":"cce", - "code":"233", + "code":"234", "des":"A VPC is similar to a private local area network (LAN) managed by a home gateway whose IP address is 192.168.0.0/16. A VPC is a private network built on the cloud and pro", "doc_type":"usermanual2", "kw":"VPC,cluster,nodes,What Is the Relationship Between Clusters, VPCs, and Subnets?,Reference,User Guide", "title":"What Is the Relationship Between Clusters, VPCs, and Subnets?", "githuburl":"" }, - { - "uri":"cce_bestpractice_0107.html", - "product_code":"cce", - "code":"234", - "des":"For clusters of v1.15.11-r1 and later, the CSI everest add-on has taken over all functions of the fuxi FlexVolume driver (the storage-driver add-on) for container storage", - "doc_type":"usermanual2", - "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Referen", - "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", - "githuburl":"" - }, { "uri":"cce_faq_00265.html", "product_code":"cce", @@ -2350,11 +2350,451 @@ "githuburl":"" }, { - "uri":"cce_01_9999.html", + "uri":"cce_bestpractice.html", "product_code":"cce", "code":"236", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", + "kw":"Best Practice", + "title":"Best Practice", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00006.html", + "product_code":"cce", + "code":"237", + "des":"Security, efficiency, stability, and availability are common requirements on all cloud services. To meet these requirements, the system availability, data reliability, an", + "doc_type":"usermanual2", + "kw":"Checklist for Deploying Containerized Applications in the Cloud,Best Practice,User Guide", + "title":"Checklist for Deploying Containerized Applications in the Cloud", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00237.html", + "product_code":"cce", + "code":"238", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Migration", + "title":"Migration", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0306.html", + "product_code":"cce", + "code":"239", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Migrating On-premises Kubernetes Clusters to CCE", + "title":"Migrating On-premises Kubernetes Clusters to CCE", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0307.html", + "product_code":"cce", + "code":"240", + "des":"Containers are growing in popularity and Kubernetes simplifies containerized deployment. Many companies choose to build their own Kubernetes clusters. However, the O&M wo", + "doc_type":"usermanual2", + "kw":"Solution Overview,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Solution Overview", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0308.html", + "product_code":"cce", + "code":"241", + "des":"CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned val", + "doc_type":"usermanual2", + "kw":"Planning Resources for the Target Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guid", + "title":"Planning Resources for the Target Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0309.html", + "product_code":"cce", + "code":"242", + "des":"If your migration does not involve resources outside a cluster listed in Table 1 or you do not need to use other services to update resources after the migration, skip th", + "doc_type":"usermanual2", + "kw":"Migrating Resources Outside a Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Migrating Resources Outside a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0310.html", + "product_code":"cce", + "code":"243", + "des":"Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be ", + "doc_type":"usermanual2", + "kw":"Installing the Migration Tool,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Installing the Migration Tool", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0311.html", + "product_code":"cce", + "code":"244", + "des":"WordPress is used as an example to describe how to migrate an application from an on-premises Kubernetes cluster to a CCE cluster. The WordPress application consists of t", + "doc_type":"usermanual2", + "kw":"Migrating Resources in a Cluster,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Migrating Resources in a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0312.html", + "product_code":"cce", + "code":"245", + "des":"The WordPress and MySQL images used in this example can be pulled from SWR. Therefore, the image pull failure (ErrImagePull) will not occur. If the application to be migr", + "doc_type":"usermanual2", + "kw":"Updating Resources Accordingly,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Updating Resources Accordingly", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0313.html", + "product_code":"cce", + "code":"246", + "des":"Cluster migration involves full migration of application data, which may cause intra-application adaptation problems. In this example, after the cluster is migrated, the ", + "doc_type":"usermanual2", + "kw":"Performing Additional Tasks,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Performing Additional Tasks", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0314.html", + "product_code":"cce", + "code":"247", + "des":"Both HostPath and Local volumes are local storage volumes. However, the Restic tool integrated in Velero cannot back up the PVs of the HostPath type and supports only the", + "doc_type":"usermanual2", + "kw":"Troubleshooting,Migrating On-premises Kubernetes Clusters to CCE,User Guide", + "title":"Troubleshooting", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0322.html", + "product_code":"cce", + "code":"248", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"DevOps", + "title":"DevOps", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0324.html", + "product_code":"cce", + "code":"249", + "des":"GitLab is an open-source version management system developed with Ruby on Rails for Git project repository management. It supports web-based access to public and private ", + "doc_type":"usermanual2", + "kw":"Interconnecting GitLab with SWR and CCE for CI/CD,DevOps,User Guide", + "title":"Interconnecting GitLab with SWR and CCE for CI/CD", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0323.html", + "product_code":"cce", + "code":"250", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Disaster Recovery", + "title":"Disaster Recovery", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00220.html", + "product_code":"cce", + "code":"251", + "des":"To achieve high availability for your CCE containers, you can do as follows:Deploy three master nodes for the cluster.When nodes are deployed across AZs, set custom sched", + "doc_type":"usermanual2", + "kw":"Implementing High Availability for Containers in CCE,Disaster Recovery,User Guide", + "title":"Implementing High Availability for Containers in CCE", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0315.html", + "product_code":"cce", + "code":"252", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Security", + "title":"Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0317.html", + "product_code":"cce", + "code":"253", + "des":"For security purposes, you are advised to configure a cluster as follows.Kubernetes releases a major version in about four months. CCE follows the same frequency as Kuber", + "doc_type":"usermanual2", + "kw":"Cluster Security,Security,User Guide", + "title":"Cluster Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0318.html", + "product_code":"cce", + "code":"254", + "des":"Do not bind an EIP to a node unless necessary to reduce the attack surface.If an EIP must be used, properly configure the firewall or security group rules to restrict acc", + "doc_type":"usermanual2", + "kw":"Node Security,Security,User Guide", + "title":"Node Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0319.html", + "product_code":"cce", + "code":"255", + "des":"The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to th", + "doc_type":"usermanual2", + "kw":"Container Security,Security,User Guide", + "title":"Container Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0320.html", + "product_code":"cce", + "code":"256", + "des":"Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be", + "doc_type":"usermanual2", + "kw":"Secret Security,Security,User Guide", + "title":"Secret Security", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0090.html", + "product_code":"cce", + "code":"257", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Auto Scaling", + "title":"Auto Scaling", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00282.html", + "product_code":"cce", + "code":"258", + "des":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "doc_type":"usermanual2", + "kw":"Using HPA and CA for Auto Scaling of Workloads and Nodes,Auto Scaling,User Guide", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0050.html", + "product_code":"cce", + "code":"259", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Cluster", + "title":"Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00254.html", + "product_code":"cce", + "code":"260", + "des":"When you have multiple CCE clusters, you may find it difficult to efficiently connect to all of them.This section describes how to configure access to multiple clusters b", + "doc_type":"usermanual2", + "kw":"Connecting to Multiple Clusters Using kubectl,Cluster,User Guide", + "title":"Connecting to Multiple Clusters Using kubectl", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00190.html", + "product_code":"cce", + "code":"261", + "des":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "doc_type":"usermanual2", + "kw":"Adding a Second Data Disk to a Node in a CCE Cluster,Cluster,User Guide", + "title":"Adding a Second Data Disk to a Node in a CCE Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0052.html", + "product_code":"cce", + "code":"262", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Networking", + "title":"Networking", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00004.html", + "product_code":"cce", + "code":"263", + "des":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This topic", + "doc_type":"usermanual2", + "kw":"Planning CIDR Blocks for a Cluster,Networking,User Guide", + "title":"Planning CIDR Blocks for a Cluster", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00162.html", + "product_code":"cce", + "code":"264", + "des":"CCE uses self-proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native Network 2.0, and VPC network models.After a cluster i", + "doc_type":"usermanual2", + "kw":"Selecting a Network Model,Networking,User Guide", + "title":"Selecting a Network Model", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00231.html", + "product_code":"cce", + "code":"265", + "des":"Session persistence is one of the most common while complex problems in load balancing.Session persistence is also called sticky sessions. After the sticky session functi", + "doc_type":"usermanual2", + "kw":"Implementing Sticky Session Through Load Balancing,Networking,User Guide", + "title":"Implementing Sticky Session Through Load Balancing", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00035.html", + "product_code":"cce", + "code":"266", + "des":"There may be different types of proxy servers between a client and a container server. How can a container obtain the real source IP address of the client? This section d", + "doc_type":"usermanual2", + "kw":"Obtaining the Client Source IP Address for a Container,Networking,User Guide", + "title":"Obtaining the Client Source IP Address for a Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0053.html", + "product_code":"cce", + "code":"267", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Storage", + "title":"Storage", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00198.html", + "product_code":"cce", + "code":"268", + "des":"A data disk is divided depending on the container storage Rootfs:Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.# lsblk\nNAME", + "doc_type":"usermanual2", + "kw":"Expanding Node Disk Capacity,Storage,User Guide", + "title":"Expanding Node Disk Capacity", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00199.html", + "product_code":"cce", + "code":"269", + "des":"This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.The CCE cluster of a SaaS service provider needs to be mo", + "doc_type":"usermanual2", + "kw":"Mounting an Object Storage Bucket of a Third-Party Tenant,Storage,User Guide", + "title":"Mounting an Object Storage Bucket of a Third-Party Tenant", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00253_0.html", + "product_code":"cce", + "code":"270", + "des":"The minimum capacity of an SFS Turbo file system is 500 GB, and the SFS Turbo file system cannot be billed by usage. By default, the root directory of an SFS Turbo file s", + "doc_type":"usermanual2", + "kw":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System,Storage,User Guide", + "title":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0107.html", + "product_code":"cce", + "code":"271", + "des":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "doc_type":"usermanual2", + "kw":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?,Storage", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00281_0.html", + "product_code":"cce", + "code":"272", + "des":"When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The ", + "doc_type":"usermanual2", + "kw":"Custom Storage Classes,Storage,User Guide", + "title":"Custom Storage Classes", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00284.html", + "product_code":"cce", + "code":"273", + "des":"EVS disks cannot be attached across AZs. For example, EVS disks in AZ 1 cannot be attached to nodes in AZ 2.If the storage class csi-disk is used for StatefulSets, when a", + "doc_type":"usermanual2", + "kw":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology),St", + "title":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology)", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0051.html", + "product_code":"cce", + "code":"274", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Container", + "title":"Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00002.html", + "product_code":"cce", + "code":"275", + "des":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", + "doc_type":"usermanual2", + "kw":"Properly Allocating Container Computing Resources,Container,User Guide", + "title":"Properly Allocating Container Computing Resources", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00227.html", + "product_code":"cce", + "code":"276", + "des":"To access a Kubernetes cluster from a client, you can use the Kubernetes command line tool kubectl.Create a DaemonSet file.vi daemonSet.yamlAn example YAML file is provid", + "doc_type":"usermanual2", + "kw":"Modifying Kernel Parameters Using a Privileged Container,Container,User Guide", + "title":"Modifying Kernel Parameters Using a Privileged Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00228.html", + "product_code":"cce", + "code":"277", + "des":"Before containers running applications are started, one or some init containers are started first. If there are multiple init containers, they will be started in the defi", + "doc_type":"usermanual2", + "kw":"Initializing a Container,Container,User Guide", + "title":"Initializing a Container", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_00226.html", + "product_code":"cce", + "code":"278", + "des":"If DNS or other related settings are inappropriate, you can use hostAliases to overwrite the resolution of the host name at the pod level when adding entries to the /etc/", + "doc_type":"usermanual2", + "kw":"Using hostAliases to Configure /etc/hosts in a Pod,Container,User Guide", + "title":"Using hostAliases to Configure /etc/hosts in a Pod", + "githuburl":"" + }, + { + "uri":"cce_bestpractice_0325.html", + "product_code":"cce", + "code":"279", + "des":"Linux allows you to create a core dump file if an application crashes, which contains the data the application had in memory at the time of the crash. You can analyze the", + "doc_type":"usermanual2", + "kw":"Configuring Core Dumps,Container,User Guide", + "title":"Configuring Core Dumps", + "githuburl":"" + }, + { + "uri":"cce_01_9999.html", + "product_code":"cce", + "code":"280", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", "kw":"Migrating Data from CCE 1.0 to CCE 2.0", "title":"Migrating Data from CCE 1.0 to CCE 2.0", "githuburl":"" @@ -2362,7 +2802,7 @@ { "uri":"cce_01_9998.html", "product_code":"cce", - "code":"237", + "code":"281", "des":"CCE 2.0 inherits and modifies the features of CCE 1.0, and release new features.Modified features:Clusters in CCE 1.0 are equivalent to Hybrid clusters in CCE 2.0.CCE 2.0", "doc_type":"usermanual2", "kw":"Differences Between CCE 1.0 and CCE 2.0,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2372,7 +2812,7 @@ { "uri":"cce_01_9997.html", "product_code":"cce", - "code":"238", + "code":"282", "des":"Migrate the images stored in the image repository of CCE 1.0 to CCE 2.0.A VM is available. The VM is bound to a public IP address and can access the Internet. Docker (ear", "doc_type":"usermanual2", "kw":"Migrating Images,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2382,7 +2822,7 @@ { "uri":"cce_01_9996.html", "product_code":"cce", - "code":"239", + "code":"283", "des":"Create Hybrid clusters on the CCE 2.0 console. These new Hybrid clusters should have the same specifications with those created on CCE 1.0.To create clusters using APIs, ", "doc_type":"usermanual2", "kw":"Migrating Clusters,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2392,7 +2832,7 @@ { "uri":"cce_01_9995.html", "product_code":"cce", - "code":"240", + "code":"284", "des":"This section describes how to create a Deployment with the same specifications as that in CCE 1.0 on the CCE 2.0 console.It is advised to delete the applications on CCE 1", "doc_type":"usermanual2", "kw":"Migrating Applications,Migrating Data from CCE 1.0 to CCE 2.0,User Guide", @@ -2402,7 +2842,7 @@ { "uri":"cce_01_0300.html", "product_code":"cce", - "code":"241", + "code":"285", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Change History,User Guide", diff --git a/docs/cce/umn/CLASS.TXT.json b/docs/cce/umn/CLASS.TXT.json index bc280604..afd0e381 100644 --- a/docs/cce/umn/CLASS.TXT.json +++ b/docs/cce/umn/CLASS.TXT.json @@ -21,7 +21,7 @@ "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Product Bulletin", - "uri":"cce_01_0236.html", + "uri":"cce_bulletin_0000.html", "doc_type":"usermanual2", "p_code":"", "code":"3" @@ -36,7 +36,7 @@ "code":"4" }, { - "desc":"This section describes the Kubernetes version support mechanism of CCE.Version number: The format is x.y.z-r{n}, where x.y is the major version and z is the minor version", + "desc":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", "product_code":"cce", "title":"Kubernetes Version Support Mechanism", "uri":"cce_bulletin_0003.html", @@ -54,7 +54,7 @@ "code":"6" }, { - "desc":"CCE nodes in Hybrid clusters can run on EulerOS 2.2, EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches", + "desc":"CCE nodes in Hybrid clusters can run on EulerOS 2.5, EulerOS 2.9 and CentOS 7.7. The following table lists the supported patches for these OSs.The OS patches and verifica", "product_code":"cce", "title":"OS Patch Notes for Cluster Nodes", "uri":"cce_bulletin_0301.html", @@ -71,6 +71,15 @@ "p_code":"3", "code":"8" }, + { + "desc":"High-risk vulnerabilities:CCE fixes vulnerabilities as soon as possible after the Kubernetes community detects them and releases fixing solutions. The fixing policies are", + "product_code":"cce", + "title":"Vulnerability Fixing Policies", + "uri":"cce_bulletin_0011.html", + "doc_type":"usermanual2", + "p_code":"8", + "code":"9" + }, { "desc":"Recently, a security research team disclosed a privilege escalation vulnerability (CVE-2021-4034, also dubbed PwnKit) in PolKit's pkexec. Unprivileged users can gain full", "product_code":"cce", @@ -78,7 +87,7 @@ "uri":"CVE-2021-4034.html", "doc_type":"usermanual2", "p_code":"8", - "code":"9" + "code":"10" }, { "desc":"The Linux Kernel SACK vulnerabilities have been fixed. This section describes the solution to these vulnerabilities.On June 18, 2019, Red Hat released a security notice, ", @@ -87,7 +96,16 @@ "uri":"cce_bulletin_0206.html", "doc_type":"usermanual2", "p_code":"8", - "code":"10" + "code":"11" + }, + { + "desc":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "product_code":"cce", + "title":"Service Account Token Security Improvement", + "uri":"cce_10_0477.html", + "doc_type":"usermanual2", + "p_code":"3", + "code":"12" }, { "desc":"CCE works closely with multiple cloud services to support computing, storage, networking, and monitoring functions. When you log in to the CCE console for the first time,", @@ -96,1772 +114,1790 @@ "uri":"cce_01_9994.html", "doc_type":"usermanual2", "p_code":"", - "code":"11" + "code":"13" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Clusters", - "uri":"cce_01_0027.html", + "uri":"cce_10_0091.html", "doc_type":"usermanual2", "p_code":"", - "code":"12" + "code":"14" }, { - "desc":"Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.For application d", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Cluster Overview", - "uri":"cce_01_0002.html", + "uri":"cce_10_0002.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"13" + "p_code":"14", + "code":"15" + }, + { + "desc":"Kubernetes allows you to easily deploy and manage containerized application and facilitates container scheduling and orchestration.For developers, Kubernetes is a cluster", + "product_code":"cce", + "title":"Basic Cluster Information", + "uri":"cce_10_0430.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"16" }, { "desc":"The following table lists the differences between CCE Turbo clusters and CCE clusters:The QingTian architecture consists of data plane (software-hardware synergy) and man", "product_code":"cce", "title":"CCE Turbo Clusters and CCE Clusters", - "uri":"cce_01_0342.html", + "uri":"cce_10_0342.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"14" + "p_code":"15", + "code":"17" + }, + { + "desc":"kube-proxy is a key component of a Kubernetes cluster. It is responsible for load balancing and forwarding between a Service and its backend pod.CCE supports two forwardi", + "product_code":"cce", + "title":"Comparing iptables and IPVS", + "uri":"cce_10_0349.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"18" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Release Notes", + "uri":"cce_10_0068.html", + "doc_type":"usermanual2", + "p_code":"15", + "code":"19" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.25.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.25 Release Notes", + "uri":"cce_10_0467.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"20" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.23.Changes in C", + "product_code":"cce", + "title":"CCE Kubernetes 1.23 Release Notes", + "uri":"cce_10_0468.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"21" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.21 Release Notes", + "uri":"cce_10_0469.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"22" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes 1", + "product_code":"cce", + "title":"CCE Kubernetes 1.19 Release Notes", + "uri":"cce_10_0470.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"23" + }, + { + "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", + "product_code":"cce", + "title":"CCE Kubernetes 1.17 Release Notes", + "uri":"cce_10_0471.html", + "doc_type":"usermanual2", + "p_code":"19", + "code":"24" }, { "desc":"CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and inte", "product_code":"cce", "title":"Creating a CCE Turbo Cluster", - "uri":"cce_01_0298.html", + "uri":"cce_10_0298.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"15" + "p_code":"14", + "code":"25" }, { "desc":"On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.In CCE, you ", "product_code":"cce", "title":"Creating a CCE Cluster", - "uri":"cce_01_0028.html", + "uri":"cce_10_0028.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"16" + "p_code":"14", + "code":"26" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Using kubectl to Run a Cluster", - "uri":"cce_01_0140.html", + "uri":"cce_10_0140.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"17" + "p_code":"14", + "code":"27" }, { "desc":"This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses thekubeconfig.js", "product_code":"cce", "title":"Connecting to a Cluster Using kubectl", - "uri":"cce_01_0107.html", + "uri":"cce_10_0107.html", "doc_type":"usermanual2", - "p_code":"17", - "code":"18" + "p_code":"27", + "code":"28" + }, + { + "desc":"A Subject Alternative Name (SAN) can be signed in to a cluster server certificate. A SAN is usually used by the client to verify the server validity in TLS handshakes. Sp", + "product_code":"cce", + "title":"Customizing a Cluster Certificate SAN", + "uri":"cce_10_0367.html", + "doc_type":"usermanual2", + "p_code":"27", + "code":"29" }, { "desc":"getThe get command displays one or many resources of a cluster.This command prints a table of the most important information about all resources, including cluster nodes,", "product_code":"cce", "title":"Common kubectl Commands", - "uri":"cce_01_0139.html", + "uri":"cce_10_0139.html", "doc_type":"usermanual2", - "p_code":"17", - "code":"19" - }, - { - "desc":"Before running kubectl commands, you should have the kubectl development skills and understand the kubectl operations. For details, see Kubernetes API and kubectl CLI.Go ", - "product_code":"cce", - "title":"kubectl Usage Guide", - "uri":"cce_01_0023.html", - "doc_type":"usermanual2", - "p_code":"17", - "code":"20" - }, - { - "desc":"The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be ", - "product_code":"cce", - "title":"Setting Cluster Auto Scaling", - "uri":"cce_01_0157.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"21" + "p_code":"27", + "code":"30" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Upgrading a Cluster", - "uri":"cce_01_0215.html", + "uri":"cce_10_0215.html", "doc_type":"usermanual2", - "p_code":"12", - "code":"22" + "p_code":"14", + "code":"31" }, { "desc":"To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.After the latest Kub", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0197.html", + "title":"Upgrade Overview", + "uri":"cce_10_0197.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"23" + "p_code":"31", + "code":"32" }, { - "desc":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.Upgraded clusters ca", + "desc":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Upgraded clu", "product_code":"cce", "title":"Before You Start", - "uri":"cce_01_0302.html", + "uri":"cce_10_0302.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"24" + "p_code":"31", + "code":"33" }, { "desc":"You can upgrade your clusters to a newer Kubernetes version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgrade", "product_code":"cce", - "title":"Performing Replace/Rolling Upgrade (v1.13 and Earlier)", - "uri":"cce_01_0120.html", + "title":"Performing Replace/Rolling Upgrade", + "uri":"cce_10_0120.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"25" + "p_code":"31", + "code":"34" }, { - "desc":"On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.Before the upgrade, learn about the target version to which each CCE cluster c", + "desc":"You can upgrade your clusters to a newer version on the CCE console.Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what w", "product_code":"cce", - "title":"Performing In-place Upgrade (v1.15 and Later)", - "uri":"cce_01_0301.html", + "title":"Performing In-place Upgrade", + "uri":"cce_10_0301.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"26" + "p_code":"31", + "code":"35" }, { "desc":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", "product_code":"cce", "title":"Migrating Services Across Clusters of Different Versions", - "uri":"cce_01_0210.html", + "uri":"cce_10_0210.html", "doc_type":"usermanual2", - "p_code":"22", - "code":"27" - }, - { - "desc":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the nex", - "product_code":"cce", - "title":"CCE Kubernetes Release Notes", - "uri":"cce_01_0068.html", - "doc_type":"usermanual2", - "p_code":"22", - "code":"28" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Managing a Cluster", - "uri":"cce_01_0031.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"29" - }, - { - "desc":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", - "product_code":"cce", - "title":"Deleting a Cluster", - "uri":"cce_01_0212.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"30" - }, - { - "desc":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.After a cluster is hibernated, resources such a", - "product_code":"cce", - "title":"Hibernating and Waking Up a Cluster", - "uri":"cce_01_0214.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"31" - }, - { - "desc":"CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.This function is supported only in clu", - "product_code":"cce", - "title":"Configuring Kubernetes Parameters", - "uri":"cce_01_0213.html", - "doc_type":"usermanual2", - "p_code":"29", - "code":"32" - }, - { - "desc":"Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.The downloaded certificate contains three files: client.key, clie", - "product_code":"cce", - "title":"Obtaining a Cluster Certificate", - "uri":"cce_01_0175.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"33" - }, - { - "desc":"This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to on", - "product_code":"cce", - "title":"Controlling Cluster Permissions", - "uri":"cce_01_0085.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"34" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Cluster Parameters", - "uri":"cce_01_0347.html", - "doc_type":"usermanual2", - "p_code":"12", - "code":"35" - }, - { - "desc":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", - "product_code":"cce", - "title":"Maximum Number of Pods That Can Be Created on a Node", - "uri":"cce_01_0348.html", - "doc_type":"usermanual2", - "p_code":"35", + "p_code":"31", "code":"36" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Nodes", - "uri":"cce_01_0183.html", + "title":"Managing a Cluster", + "uri":"cce_10_0031.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"14", "code":"37" }, { - "desc":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", + "desc":"CCE allows you to manage cluster parameters, through which you can let core components work under your very requirements.This function is supported only in clusters of v1", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0180.html", + "title":"Managing Cluster Components", + "uri":"cce_10_0213.html", "doc_type":"usermanual2", "p_code":"37", "code":"38" }, { - "desc":"A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating ", + "desc":"This section describes how to delete a cluster.Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workl", "product_code":"cce", - "title":"Creating a Node", - "uri":"cce_01_0033.html", + "title":"Deleting a Cluster", + "uri":"cce_10_0212.html", "doc_type":"usermanual2", "p_code":"37", "code":"39" }, { - "desc":"At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.A key pair has been created for identity authenticat", + "desc":"If you do not need to use a cluster temporarily, you are advised to hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or ", "product_code":"cce", - "title":"Creating a Node in a CCE Turbo Cluster", - "uri":"cce_01_0363.html", + "title":"Hibernating and Waking Up a Cluster", + "uri":"cce_10_0214.html", "doc_type":"usermanual2", "p_code":"37", "code":"40" }, { - "desc":"Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server (ECS) corresponding to t", + "desc":"If overload control is enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The c", "product_code":"cce", - "title":"Removing a Node", - "uri":"cce_01_0338.html", + "title":"Cluster Overload Control", + "uri":"cce_10_0602.html", "doc_type":"usermanual2", "p_code":"37", "code":"41" }, { - "desc":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "desc":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "product_code":"cce", - "title":"Logging In to a Node", - "uri":"cce_01_0185.html", + "title":"Obtaining a Cluster Certificate", + "uri":"cce_10_0175.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"14", "code":"42" }, { - "desc":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "desc":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", "product_code":"cce", - "title":"Managing Node Labels", - "uri":"cce_01_0004.html", + "title":"Changing Cluster Scale", + "uri":"cce_10_0403.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"14", "code":"43" }, { - "desc":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Synchronizing Node Data", - "uri":"cce_01_0184.html", + "title":"Nodes", + "uri":"cce_10_0183.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"", "code":"44" }, { - "desc":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Configuring Node Scheduling (Tainting)", - "uri":"cce_01_0352.html", + "title":"Node Overview", + "uri":"cce_10_0180.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"45" }, { - "desc":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", + "desc":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "product_code":"cce", - "title":"Resetting a Node", - "uri":"cce_01_0003.html", + "title":"Precautions for Using a Node", + "uri":"cce_10_0461.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"46" }, { - "desc":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", + "desc":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "product_code":"cce", - "title":"Deleting a Node", - "uri":"cce_01_0186.html", + "title":"Container Engine", + "uri":"cce_10_0462.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"47" }, { - "desc":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "desc":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", "product_code":"cce", - "title":"Stopping a Node", - "uri":"cce_01_0036.html", + "title":"Kata Containers and Common Containers", + "uri":"cce_10_0463.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"48" }, { - "desc":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", + "desc":"The maximum number of pods that can be created on a node is determined by the following parameters:Number of container IP addresses that can be allocated on a node (alpha", "product_code":"cce", - "title":"Performing Rolling Upgrade for Nodes", - "uri":"cce_01_0276.html", + "title":"Maximum Number of Pods That Can Be Created on a Node", + "uri":"cce_10_0348.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"49" }, { "desc":"Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total num", "product_code":"cce", "title":"Formula for Calculating the Reserved Resources of a Node", - "uri":"cce_01_0178.html", + "uri":"cce_10_0178.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"50" }, { - "desc":"This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.To improve the system ", + "desc":"This section describes how to allocate data disk space.When creating a node, you need to configure a data disk whose capacity is greater than or equal to 100GB for the no", "product_code":"cce", - "title":"Creating a Linux LVM Disk Partition for Docker", - "uri":"cce_01_0200.html", + "title":"Data Disk Space Allocation", + "uri":"cce_10_0341.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"45", "code":"51" }, { - "desc":"When creating a node, you need to configure data disks for the node.The data disk is divided into Kubernetes space and user space. The user space defines the space that i", + "desc":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has 2-core or higher CPU, 4 GB or larger mem", "product_code":"cce", - "title":"Data Disk Space Allocation", - "uri":"cce_01_0341.html", + "title":"Creating a Node", + "uri":"cce_10_0363.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"52" }, { - "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "desc":"In CCE, you can Creating a Node or add existing nodes (ECSs) into your cluster.While an ECS is being accepted into a cluster, the operating system of the ECS will be rese", "product_code":"cce", - "title":"Adding a Second Data Disk to a Node in a CCE Cluster", - "uri":"cce_01_0344.html", + "title":"Adding Nodes for Management", + "uri":"cce_10_0198.html", "doc_type":"usermanual2", - "p_code":"37", + "p_code":"44", "code":"53" }, + { + "desc":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", + "product_code":"cce", + "title":"Removing a Node", + "uri":"cce_10_0338.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"54" + }, + { + "desc":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", + "product_code":"cce", + "title":"Resetting a Node", + "uri":"cce_10_0003.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"55" + }, + { + "desc":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", + "product_code":"cce", + "title":"Logging In to a Node", + "uri":"cce_10_0185.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"56" + }, + { + "desc":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", + "product_code":"cce", + "title":"Managing Node Labels", + "uri":"cce_10_0004.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"57" + }, + { + "desc":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.A taint is a key-value pair associated with an effect. The following ef", + "product_code":"cce", + "title":"Managing Node Taints", + "uri":"cce_10_0352.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"58" + }, + { + "desc":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.Some inf", + "product_code":"cce", + "title":"Synchronizing Data with Cloud Servers", + "uri":"cce_10_0184.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"59" + }, + { + "desc":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.After a CCE cluster is deleted", + "product_code":"cce", + "title":"Deleting a Node", + "uri":"cce_10_0186.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"60" + }, + { + "desc":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "product_code":"cce", + "title":"Stopping a Node", + "uri":"cce_10_0036.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"61" + }, + { + "desc":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", + "product_code":"cce", + "title":"Performing Rolling Upgrade for Nodes", + "uri":"cce_10_0276.html", + "doc_type":"usermanual2", + "p_code":"44", + "code":"62" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Node Pools", - "uri":"cce_01_0035.html", + "uri":"cce_10_0035.html", "doc_type":"usermanual2", "p_code":"", - "code":"54" + "code":"63" }, { "desc":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "product_code":"cce", "title":"Node Pool Overview", - "uri":"cce_01_0081.html", + "uri":"cce_10_0081.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"55" + "p_code":"63", + "code":"64" }, { - "desc":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.For details abou", + "desc":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The autoscaler a", "product_code":"cce", "title":"Creating a Node Pool", - "uri":"cce_01_0012.html", + "uri":"cce_10_0012.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"56" + "p_code":"63", + "code":"65" }, { "desc":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", "product_code":"cce", "title":"Managing a Node Pool", - "uri":"cce_01_0222.html", + "uri":"cce_10_0222.html", "doc_type":"usermanual2", - "p_code":"54", - "code":"57" + "p_code":"63", + "code":"66" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Workloads", - "uri":"cce_01_0046.html", + "uri":"cce_10_0046.html", "doc_type":"usermanual2", "p_code":"", - "code":"58" + "code":"67" }, { "desc":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0006.html", + "uri":"cce_10_0006.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"59" + "p_code":"67", + "code":"68" }, { "desc":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "product_code":"cce", "title":"Creating a Deployment", - "uri":"cce_01_0047.html", + "uri":"cce_10_0047.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"60" + "p_code":"67", + "code":"69" }, { "desc":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "product_code":"cce", "title":"Creating a StatefulSet", - "uri":"cce_01_0048.html", + "uri":"cce_10_0048.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"61" + "p_code":"67", + "code":"70" }, { "desc":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "product_code":"cce", "title":"Creating a DaemonSet", - "uri":"cce_01_0216.html", + "uri":"cce_10_0216.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"62" + "p_code":"67", + "code":"71" }, { "desc":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "product_code":"cce", "title":"Creating a Job", - "uri":"cce_01_0150.html", + "uri":"cce_10_0150.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"63" + "p_code":"67", + "code":"72" }, { "desc":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "product_code":"cce", "title":"Creating a Cron Job", - "uri":"cce_01_0151.html", + "uri":"cce_10_0151.html", "doc_type":"usermanual2", - "p_code":"58", - "code":"64" - }, - { - "desc":"A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multip", - "product_code":"cce", - "title":"Managing Pods", - "uri":"cce_01_0013.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"65" - }, - { - "desc":"After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescription", - "product_code":"cce", - "title":"Managing Workloads and Jobs", - "uri":"cce_01_0007.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"66" - }, - { - "desc":"After scaling policies are defined, pods can be automatically added or deleted based on resource changes, fixed time, and fixed periods. You do not need to manually adjus", - "product_code":"cce", - "title":"Scaling a Workload", - "uri":"cce_01_0057.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"67" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Configuring a Container", - "uri":"cce_01_0130.html", - "doc_type":"usermanual2", - "p_code":"58", - "code":"68" - }, - { - "desc":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", - "product_code":"cce", - "title":"Using a Third-Party Image", - "uri":"cce_01_0009.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"69" - }, - { - "desc":"CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.", - "product_code":"cce", - "title":"Setting Container Specifications", - "uri":"cce_01_0163.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"70" - }, - { - "desc":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", - "product_code":"cce", - "title":"Setting Container Lifecycle Parameters", - "uri":"cce_01_0105.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"71" - }, - { - "desc":"When creating a workload or job, you can use an image to specify the processes running in the container.By default, the image runs the default command. To run a specific ", - "product_code":"cce", - "title":"Setting Container Startup Commands", - "uri":"cce_01_0008.html", - "doc_type":"usermanual2", - "p_code":"68", - "code":"72" - }, - { - "desc":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service except", - "product_code":"cce", - "title":"Setting Health Check for a Container", - "uri":"cce_01_0112.html", - "doc_type":"usermanual2", - "p_code":"68", + "p_code":"67", "code":"73" }, { - "desc":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", + "desc":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "product_code":"cce", - "title":"Setting an Environment Variable", - "uri":"cce_01_0113.html", + "title":"Managing Workloads and Jobs", + "uri":"cce_10_0007.html", "doc_type":"usermanual2", - "p_code":"68", + "p_code":"67", "code":"74" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Affinity and Anti-Affinity Scheduling", - "uri":"cce_01_0149.html", + "title":"Configuring a Container", + "uri":"cce_10_0130.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"67", "code":"75" }, { - "desc":"CCE supports custom and simple scheduling policies. A custom scheduling policy allows you to customize node affinity, workload affinity, and workload anti-affinity to mee", + "desc":"A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple ", "product_code":"cce", - "title":"Scheduling Policy Overview", - "uri":"cce_01_0051.html", + "title":"Setting Basic Container Information", + "uri":"cce_10_0396.html", "doc_type":"usermanual2", "p_code":"75", "code":"76" }, { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "desc":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "product_code":"cce", - "title":"Custom Scheduling Policies", - "uri":"cce_01_0231.html", + "title":"Using a Third-Party Image", + "uri":"cce_10_0009.html", "doc_type":"usermanual2", "p_code":"75", "code":"77" }, { - "desc":"This section uses Nginx as an example to describe how to configure node affinity.PrerequisitesA workload that uses the nginx container image has been deployed on a node.P", + "desc":"CCE allows you to set resource limits for added containers during workload creation. You can apply for and limit the CPU and memory quotas used by each pod in a workload.", "product_code":"cce", - "title":"Node Affinity", - "uri":"cce_01_0232.html", + "title":"Setting Container Specifications", + "uri":"cce_10_0163.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"78" }, { - "desc":"Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.There are two types of pod affinity rules: Required (hard ", + "desc":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "product_code":"cce", - "title":"Workload Affinity", - "uri":"cce_01_0233.html", + "title":"Setting Container Lifecycle Parameters", + "uri":"cce_10_0105.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"79" }, { - "desc":"Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.There are two types of pod anti-affinity rules: ", + "desc":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "product_code":"cce", - "title":"Workload Anti-Affinity", - "uri":"cce_01_0234.html", + "title":"Setting Health Check for a Container", + "uri":"cce_10_0112.html", "doc_type":"usermanual2", - "p_code":"77", + "p_code":"75", "code":"80" }, { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "desc":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "product_code":"cce", - "title":"Simple Scheduling Policies", - "uri":"cce_01_0230.html", + "title":"Setting an Environment Variable", + "uri":"cce_10_0113.html", "doc_type":"usermanual2", "p_code":"75", "code":"81" }, { - "desc":"The created workload will be deployed in the selected AZ.This section uses an Nginx workload as an example to describe how to create a workload using kubectl.Prerequisite", + "desc":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "product_code":"cce", - "title":"Workload-AZ Affinity", - "uri":"cce_01_0228.html", + "title":"Configuring an Image Pull Policy", + "uri":"cce_10_0353.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"82" }, { - "desc":"The created workload is not deployed on the selected AZ.This section uses Nginx as an example to describe how to create a workload using kubectl.PrerequisitesThe ECS wher", + "desc":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "product_code":"cce", - "title":"Workload-AZ Anti-Affinity", - "uri":"cce_01_0229.html", + "title":"Configuring Time Zone Synchronization", + "uri":"cce_10_0354.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"83" }, { - "desc":"If you select multiple nodes, the system automatically chooses one of them during workload deployment.This section uses an Nginx workload as an example to describe how to", + "desc":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "product_code":"cce", - "title":"Workload-Node Affinity", - "uri":"cce_01_0225.html", + "title":"Configuring the Workload Upgrade Policy", + "uri":"cce_10_0397.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"84" }, { - "desc":"If you select multiple nodes, the workload will not be deployed on these nodes.This section uses Nginx as an example to describe how to create a workload using kubectl.Pr", + "desc":"A nodeSelector provides a very simple way to constrain pods to nodes with particular labels, as mentioned in Creating a DaemonSet. The affinity and anti-affinity feature ", "product_code":"cce", - "title":"Workload-Node Anti-Affinity", - "uri":"cce_01_0226.html", + "title":"Scheduling Policy (Affinity/Anti-affinity)", + "uri":"cce_10_0232.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"75", "code":"85" }, { - "desc":"The workload to be created will be deployed on the same node as the selected affinity workloads.This section uses Nginx as an example to describe how to create a workload", + "desc":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-beta add-on has been installed. During the installation, select t", "product_code":"cce", - "title":"Workload-Workload Affinity", - "uri":"cce_01_0220.html", + "title":"GPU Scheduling", + "uri":"cce_10_0345.html", "doc_type":"usermanual2", - "p_code":"81", + "p_code":"67", "code":"86" }, - { - "desc":"The workload to be created and the selected workloads will be deployed on different nodes.This section uses Nginx as an example to describe how to create a workload using", - "product_code":"cce", - "title":"Workload-Workload Anti-Affinity", - "uri":"cce_01_0227.html", - "doc_type":"usermanual2", - "p_code":"81", - "code":"87" - }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Networking", - "uri":"cce_01_0020.html", + "title":"CPU Core Binding", + "uri":"cce_10_0551.html", "doc_type":"usermanual2", - "p_code":"", + "p_code":"67", + "code":"87" + }, + { + "desc":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", + "product_code":"cce", + "title":"Binding CPU Cores", + "uri":"cce_10_0351.html", + "doc_type":"usermanual2", + "p_code":"87", "code":"88" }, { - "desc":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", + "desc":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0010.html", + "title":"Pod Labels and Annotations", + "uri":"cce_10_0386.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"67", "code":"89" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Container Network Models", - "uri":"cce_01_0280.html", + "title":"Volcano Scheduling", + "uri":"cce_10_0423.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"67", "code":"90" }, { - "desc":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Con", + "desc":"Jobs can be classified into online jobs and offline jobs based on whether services are always online.Online job: Such jobs run for a long time, with regular traffic surge", "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0281.html", + "title":"Hybrid Deployment of Online and Offline Jobs", + "uri":"cce_10_0384.html", "doc_type":"usermanual2", "p_code":"90", "code":"91" }, { - "desc":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", + "desc":"When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a cust", "product_code":"cce", - "title":"Container Tunnel Network", - "uri":"cce_01_0282.html", + "title":"Security Group Policies", + "uri":"cce_10_0288.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"67", "code":"92" }, { - "desc":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"VPC Network", - "uri":"cce_01_0283.html", + "title":"Networking", + "uri":"cce_10_0020.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"", "code":"93" }, { - "desc":"Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", + "desc":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "product_code":"cce", - "title":"Cloud Native Network 2.0", - "uri":"cce_01_0284.html", + "title":"Overview", + "uri":"cce_10_0010.html", "doc_type":"usermanual2", - "p_code":"90", + "p_code":"93", "code":"94" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Services", - "uri":"cce_01_0247.html", + "title":"Container Network Models", + "uri":"cce_10_0280.html", "doc_type":"usermanual2", - "p_code":"88", + "p_code":"93", "code":"95" }, { - "desc":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", + "desc":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0249.html", + "uri":"cce_10_0281.html", "doc_type":"usermanual2", "p_code":"95", "code":"96" }, { - "desc":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is Uploaded Charts for subsequent workload creation.When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}", - "product_code":"cce", - "title":"Uploading a Chart", - "uri":"cce_01_0145.html", - "doc_type":"usermanual2", - "p_code":"176", - "code":"179" - }, - { - "desc":"In the workload list, if the status is Rollback successful, the workload is rolled back successfully.", - "product_code":"cce", - "title":"Creating a Workload from a Chart", - "uri":"cce_01_0146.html", - "doc_type":"usermanual2", - "p_code":"176", - "code":"180" - }, - { - "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "product_code":"cce", - "title":"Add-ons", - "uri":"cce_01_0064.html", - "doc_type":"usermanual2", - "p_code":"", - "code":"181" - }, - { - "desc":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", - "product_code":"cce", - "title":"Overview", - "uri":"cce_01_0277.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"182" - }, - { - "desc":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", - "product_code":"cce", - "title":"coredns (System Resource Add-on, Mandatory)", - "uri":"cce_01_0129.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"183" - }, - { - "desc":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you ca", - "product_code":"cce", - "title":"storage-driver (System Resource Add-on, Mandatory)", - "uri":"cce_01_0127.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"184" - }, - { - "desc":"Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage ", - "product_code":"cce", - "title":"everest (System Resource Add-on, Mandatory)", - "uri":"cce_01_0066.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"185" - }, - { - "desc":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", - "product_code":"cce", - "title":"autoscaler", - "uri":"cce_01_0154.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"186" - }, - { - "desc":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", - "product_code":"cce", - "title":"metrics-server", - "uri":"cce_01_0205.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"187" - }, - { - "desc":"gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.This add-on is available only in certain regions.This add-o", - "product_code":"cce", - "title":"gpu-beta", - "uri":"cce_01_0141.html", - "doc_type":"usermanual2", - "p_code":"181", - "code":"188" + "p_code":"144", + "code":"149" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Auto Scaling", - "uri":"cce_01_0207.html", + "uri":"cce_10_0207.html", "doc_type":"usermanual2", "p_code":"", - "code":"189" + "code":"150" }, { "desc":"Auto scaling is a service that automatically and economically adjusts service resources based on your service requirements and configured policies.More and more applicati", "product_code":"cce", "title":"Overview", - "uri":"cce_01_0279.html", + "uri":"cce_10_0279.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"190" + "p_code":"150", + "code":"151" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Scaling a Workload", - "uri":"cce_01_0293.html", + "uri":"cce_10_0293.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"191" + "p_code":"150", + "code":"152" }, { - "desc":"Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered,", + "desc":"HPA is a controller that controls horizontal pod scaling. HPA periodically checks the pod metrics, calculates the number of replicas required to meet the target values co", "product_code":"cce", "title":"Workload Scaling Mechanisms", - "uri":"cce_01_0290.html", + "uri":"cce_10_0290.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"192" + "p_code":"152", + "code":"153" }, { "desc":"Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling t", "product_code":"cce", "title":"Creating an HPA Policy for Workload Auto Scaling", - "uri":"cce_01_0208.html", + "uri":"cce_10_0208.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"193" + "p_code":"152", + "code":"154" }, { "desc":"After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.You can view the rules, status, and events of an HPA pol", "product_code":"cce", "title":"Managing Workload Scaling Policies", - "uri":"cce_01_0083.html", + "uri":"cce_10_0083.html", "doc_type":"usermanual2", - "p_code":"191", - "code":"194" - }, - { - "desc":"CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.If you have", - "product_code":"cce", - "title":"Switching from AOM to HPA for Auto Scaling", - "uri":"cce_01_0395.html", - "doc_type":"usermanual2", - "p_code":"191", - "code":"195" + "p_code":"152", + "code":"155" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", - "title":"Scaling a Cluster/Node", - "uri":"cce_01_0291.html", + "title":"Scaling a Node", + "uri":"cce_10_0291.html", "doc_type":"usermanual2", - "p_code":"189", - "code":"196" + "p_code":"150", + "code":"156" }, { "desc":"Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clo", "product_code":"cce", "title":"Node Scaling Mechanisms", - "uri":"cce_01_0296.html", + "uri":"cce_10_0296.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"197" + "p_code":"156", + "code":"157" }, { "desc":"CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.If a node scaling policy and ", "product_code":"cce", "title":"Creating a Node Scaling Policy", - "uri":"cce_01_0209.html", + "uri":"cce_10_0209.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"198" + "p_code":"156", + "code":"158" }, { "desc":"After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.You can view the associated node pool, rules, and scaling history of a ", "product_code":"cce", "title":"Managing Node Scaling Policies", - "uri":"cce_01_0063.html", + "uri":"cce_10_0063.html", "doc_type":"usermanual2", - "p_code":"196", - "code":"199" + "p_code":"156", + "code":"159" + }, + { + "desc":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "product_code":"cce", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "uri":"cce_10_0300.html", + "doc_type":"usermanual2", + "p_code":"150", + "code":"160" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Add-ons", + "uri":"cce_10_0064.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"161" + }, + { + "desc":"CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0277.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"162" + }, + { + "desc":"The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.coredns i", + "product_code":"cce", + "title":"coredns (System Resource Add-On, Mandatory)", + "uri":"cce_10_0129.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"163" + }, + { + "desc":"storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use EVS, SFS, OBS, and SFS Turbo storage resources. By installing and upgradin", + "product_code":"cce", + "title":"storage-driver (System Resource Add-On, Discarded)", + "uri":"cce_10_0127.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"164" + }, + { + "desc":"Everest is a cloud native container storage system. Based on the Container Storage Interface (CSI), clusters of Kubernetes v1.15.6 or later obtain access to cloud storage", + "product_code":"cce", + "title":"everest (System Resource Add-On, Mandatory)", + "uri":"cce_10_0066.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"165" + }, + { + "desc":"node-problem-detector (npd for short) is an add-on that monitors abnormal events of cluster nodes and connects to a third-party monitoring platform. It is a daemon runnin", + "product_code":"cce", + "title":"npd", + "uri":"cce_10_0132.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"166" + }, + { + "desc":"Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.When the CPU or memory usage of a microservice is too h", + "product_code":"cce", + "title":"autoscaler", + "uri":"cce_10_0154.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"167" + }, + { + "desc":"From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly a", + "product_code":"cce", + "title":"metrics-server", + "uri":"cce_10_0205.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"168" + }, + { + "desc":"gpu-beta is a device management add-on that supports GPUs in containers. If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.The driver to be down", + "product_code":"cce", + "title":"gpu-beta", + "uri":"cce_10_0141.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"169" + }, + { + "desc":"Volcano is a batch processing platform based on Kubernetes. It provides a series of features required by machine learning, deep learning, bioinformatics, genomics, and ot", + "product_code":"cce", + "title":"volcano", + "uri":"cce_10_0193.html", + "doc_type":"usermanual2", + "p_code":"161", + "code":"170" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Charts", + "uri":"cce_10_0019.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"171" + }, + { + "desc":"CCE provides a console for managing Helm charts, helping you easily deploy applications using the charts and manage applications on the console.Helm is a package manager ", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0191.html", + "doc_type":"usermanual2", + "p_code":"171", + "code":"172" + }, + { + "desc":"On the CCE console, you can upload a Helm chart package, deploy it, and manage the deployed pods.The number of charts that can be uploaded by a single user is limited. Th", + "product_code":"cce", + "title":"Deploying an Application from a Chart", + "uri":"cce_10_0146.html", + "doc_type":"usermanual2", + "p_code":"171", + "code":"173" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Permissions Management", - "uri":"cce_01_0164.html", + "uri":"cce_10_0164.html", "doc_type":"usermanual2", "p_code":"", - "code":"200" + "code":"174" }, { "desc":"CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Ma", "product_code":"cce", "title":"Permissions Overview", - "uri":"cce_01_0187.html", + "uri":"cce_10_0187.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"201" + "p_code":"174", + "code":"175" }, { - "desc":"CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permissions are ", + "desc":"CCE cluster-level permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.Cluster permission", "product_code":"cce", "title":"Cluster Permissions (IAM-based)", - "uri":"cce_01_0188.html", + "uri":"cce_10_0188.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"202" + "p_code":"174", + "code":"176" }, { "desc":"You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kub", "product_code":"cce", "title":"Namespace Permissions (Kubernetes RBAC-based)", - "uri":"cce_01_0189.html", + "uri":"cce_10_0189.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"203" + "p_code":"174", + "code":"177" + }, + { + "desc":"The conventional distributed task scheduling mode is being replaced by Kubernetes. CCE allows you to easily deploy, manage, and scale containerized applications in the cl", + "product_code":"cce", + "title":"Example: Designing and Configuring Permissions for Users in a Department", + "uri":"cce_10_0245.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"178" + }, + { + "desc":"Some CCE permissions policies depend on the policies of other cloud services. To view or use other cloud resources on the CCE console, you need to enable the system polic", + "product_code":"cce", + "title":"Permission Dependency of the CCE Console", + "uri":"cce_10_0190.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"179" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Pod Security", + "uri":"cce_10_0465.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"180" }, { "desc":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", "product_code":"cce", - "title":"Pod Security Policies", - "uri":"cce_01_0275.html", + "title":"Configuring a Pod Security Policy", + "uri":"cce_10_0275.html", "doc_type":"usermanual2", - "p_code":"200", - "code":"204" + "p_code":"180", + "code":"181" + }, + { + "desc":"Before using Pod Security Admission, you need to understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you de", + "product_code":"cce", + "title":"Configuring Pod Security Admission", + "uri":"cce_10_0466.html", + "doc_type":"usermanual2", + "p_code":"180", + "code":"182" + }, + { + "desc":"In clusters earlier than v1.21, a token is obtained by mounting the secret of the service account to a pod. Tokens obtained this way are permanent. This approach is no lo", + "product_code":"cce", + "title":"Service Account Token Security Improvement", + "uri":"cce_10_0477_0.html", + "doc_type":"usermanual2", + "p_code":"174", + "code":"183" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", "title":"Cloud Trace Service (CTS)", - "uri":"cce_01_0024.html", + "uri":"cce_10_0024.html", "doc_type":"usermanual2", "p_code":"", - "code":"205" + "code":"184" }, { "desc":"Cloud Trace Service (CTS) records operations on cloud service resources, allowing users to query, audit, and backtrack the resource operation requests initiated from the ", "product_code":"cce", "title":"CCE Operations Supported by CTS", - "uri":"cce_01_0025.html", + "uri":"cce_10_0025.html", "doc_type":"usermanual2", - "p_code":"205", - "code":"206" + "p_code":"184", + "code":"185" }, { "desc":"After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.Trace Sour", "product_code":"cce", "title":"Querying CTS Logs", - "uri":"cce_01_0026.html", + "uri":"cce_10_0026.html", "doc_type":"usermanual2", - "p_code":"205", + "p_code":"184", + "code":"186" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Storage (FlexVolume)", + "uri":"cce_10_0305.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"187" + }, + { + "desc":"In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.In CCE, container storage is backed both by Kubernet", + "product_code":"cce", + "title":"FlexVolume Overview", + "uri":"cce_10_0306.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"188" + }, + { + "desc":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "product_code":"cce", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "uri":"cce_10_0343.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"189" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using EVS Disks as Storage Volumes", + "uri":"cce_10_0309.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"190" + }, + { + "desc":"To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0310.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"191" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-evs-auto-example.yamlvi pvc-evs-auto-example.yamlExample YAML file for clu", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an EVS Disk", + "uri":"cce_10_0312.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"192" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-evs-example.yaml pvc-evs-example.yamlClusters from v1.11.7 to v1.13Example ", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing EVS Disk", + "uri":"cce_10_0313.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"193" + }, + { + "desc":"After an EVS volume is created or imported to CCE, you can mount it to a workload.EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubec", + "product_code":"cce", + "title":"(kubectl) Creating a Pod Mounted with an EVS Volume", + "uri":"cce_10_0314.html", + "doc_type":"usermanual2", + "p_code":"190", + "code":"194" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using SFS Turbo File Systems as Storage Volumes", + "uri":"cce_10_0329.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"195" + }, + { + "desc":"CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable fo", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0330.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"196" + }, + { + "desc":"CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) ", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing SFS Turbo File System", + "uri":"cce_10_0332.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"197" + }, + { + "desc":"After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume", + "uri":"cce_10_0333.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"198" + }, + { + "desc":"CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch efs", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Turbo Volume", + "uri":"cce_10_0334.html", + "doc_type":"usermanual2", + "p_code":"195", + "code":"199" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using OBS Buckets as Storage Volumes", + "uri":"cce_10_0322.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"200" + }, + { + "desc":"CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud w", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0323.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"201" + }, + { + "desc":"During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, whic", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an OBS Volume", + "uri":"cce_10_0325.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"202" + }, + { + "desc":"CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.The following configura", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing OBS Bucket", + "uri":"cce_10_0326.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"203" + }, + { + "desc":"After an OBS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an OBS Volume", + "uri":"cce_10_0327.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"204" + }, + { + "desc":"CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an OBS Volume", + "uri":"cce_10_0328.html", + "doc_type":"usermanual2", + "p_code":"200", + "code":"205" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Using SFS File Systems as Storage Volumes", + "uri":"cce_10_0315.html", + "doc_type":"usermanual2", + "p_code":"187", + "code":"206" + }, + { + "desc":"CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWr", + "product_code":"cce", + "title":"Overview", + "uri":"cce_10_0316.html", + "doc_type":"usermanual2", + "p_code":"206", "code":"207" }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pvc-sfs-auto-example.yamlvi pvc-sfs-auto-example.yamlExample YAML file:apiVers", + "product_code":"cce", + "title":"(kubectl) Automatically Creating an SFS Volume", + "uri":"cce_10_0318.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"208" + }, + { + "desc":"The following configuration example applies to clusters of Kubernetes 1.13 or earlier.touch pv-sfs-example.yaml pvc-sfs-example.yamlClusters from v1.11 to v1.13Example YA", + "product_code":"cce", + "title":"(kubectl) Creating a PV from an Existing SFS File System", + "uri":"cce_10_0319.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"209" + }, + { + "desc":"After an SFS volume is created or imported to CCE, you can mount the volume to a workload.The following configuration example applies to clusters of Kubernetes 1.13 or ea", + "product_code":"cce", + "title":"(kubectl) Creating a Deployment Mounted with an SFS Volume", + "uri":"cce_10_0320.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"210" + }, + { + "desc":"CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).The following configuration example applies to clusters of Kube", + "product_code":"cce", + "title":"(kubectl) Creating a StatefulSet Mounted with an SFS Volume", + "uri":"cce_10_0321.html", + "doc_type":"usermanual2", + "p_code":"206", + "code":"211" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", @@ -1869,16 +1905,7 @@ "uri":"cce_faq_0083.html", "doc_type":"usermanual2", "p_code":"", - "code":"208" - }, - { - "desc":"Cloud Container Engine (CCE) provides highly scalable, high-performance, enterprise-class Kubernetes clusters and supports Docker containers. With CCE, you can easily dep", - "product_code":"cce", - "title":"Checklist for Migrating Containerized Applications to the Cloud", - "uri":"cce_faq_00006.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"209" + "code":"212" }, { "desc":"When a node is added, EIP is set to Automatically assign. The node cannot be created, and a message indicating that EIPs are insufficient is displayed.Two methods are ava", @@ -1886,8 +1913,8 @@ "title":"How Do I Troubleshoot Insufficient EIPs When a Node Is Added?", "uri":"cce_01_0203.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"210" + "p_code":"212", + "code":"213" }, { "desc":"Before using command line injection, write a script that can format data disks and save it to your OBS bucket. Then, inject a command line that will automatically execute", @@ -1895,8 +1922,8 @@ "title":"How Do I Format a Data Disk Using Command Line Injection?", "uri":"cce_01_0204.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"211" + "p_code":"212", + "code":"214" }, { "desc":"After a cluster of v1.13.10 is created, you can use heapster only after rbac is enabled.kubectl delete clusterrole system:heapsterCopy the following file to a server on w", @@ -1904,8 +1931,8 @@ "title":"How Do I Use heapster in Clusters of v1.13.10?", "uri":"cce_01_0999.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"212" + "p_code":"212", + "code":"215" }, { "desc":"Currently, private CCE clusters use Device Mapper as the Docker storage driver.Device Mapper is developed based on the kernel framework and supports many advanced volume ", @@ -1913,8 +1940,8 @@ "title":"How Do I Change the Mode of the Docker Device Mapper?", "uri":"cce_faq_00096.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"213" + "p_code":"212", + "code":"216" }, { "desc":"If the cluster status is available but some nodes in the cluster are unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Node Is ", @@ -1922,8 +1949,8 @@ "title":"What Can I Do If My Cluster Status Is Available but the Node Status Is Unavailable?", "uri":"cce_faq_00120.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"214" + "p_code":"212", + "code":"217" }, { "desc":"If the cluster is Unavailable, perform the following operations to rectify the fault:Check Item 1: Whether the Security Group Is ModifiedCheck Item 2: Whether the DHCP Fu", @@ -1931,8 +1958,8 @@ "title":"How Do I Rectify the Fault When the Cluster Status Is Unavailable?", "uri":"cce_faq_00039.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"215" + "p_code":"212", + "code":"218" }, { "desc":"This section uses the Nginx workload as an example to describe how to set the workload access type to LoadBalancer (ELB).An ELB has been created.You have connected an Ela", @@ -1940,8 +1967,8 @@ "title":"How Do I Use kubectl to Set the Workload Access Type to LoadBalancer (ELB)?", "uri":"cce_faq_00099.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"216" + "p_code":"212", + "code":"219" }, { "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).Before using this feature, write a script that can format data disks and save it to ", @@ -1949,8 +1976,8 @@ "title":"How Do I Add a Second Data Disk to a Node in a CCE Cluster?", "uri":"cce_faq_00190.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"217" + "p_code":"212", + "code":"220" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -1958,8 +1985,8 @@ "title":"Workload Abnormalities", "uri":"cce_faq_00029.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"218" + "p_code":"212", + "code":"221" }, { "desc":"If a workload is running improperly, you can view events to determine the cause.On the CCE console, choose Workloads > Deployments or StatefulSets in the navigation pane ", @@ -1967,8 +1994,8 @@ "title":"Fault Locating and Troubleshooting for Abnormal Workloads", "uri":"cce_faq_00134.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"219" + "p_code":"221", + "code":"222" }, { "desc":"Viewing K8s Event InformationCheck Item 1: Checking Whether a Node Is Available in the ClusterCheck Item 2: Checking Whether Node Resources (CPU and Memory) Are Sufficien", @@ -1976,8 +2003,8 @@ "title":"Failed to Schedule an Instance", "uri":"cce_faq_00098.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"220" + "p_code":"221", + "code":"223" }, { "desc":"If the workload details page displays an event indicating that image pulling fails, perform the following operations to locate the fault:Check Item 1: Checking Whether im", @@ -1985,8 +2012,8 @@ "title":"Failed to Pull an Image", "uri":"cce_faq_00015.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"221" + "p_code":"221", + "code":"224" }, { "desc":"On the details page of a workload, if an event is displayed indicating that the container fails to be restarted, perform the following operations to locate the fault:Rect", @@ -1994,8 +2021,8 @@ "title":"Failed to Restart a Container", "uri":"cce_faq_00018.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"222" + "p_code":"221", + "code":"225" }, { "desc":"Pod actions are classified into the following two types:kube-controller-manager periodically checks the status of all nodes. If a node is in the NotReady state for a peri", @@ -2003,8 +2030,8 @@ "title":"What Should I Do If An Evicted Pod Exists?", "uri":"cce_faq_00209.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"223" + "p_code":"221", + "code":"226" }, { "desc":"When a node is faulty, pods on the node are evicted to ensure workload availability. If the pods are not evicted when the node is faulty, perform the following steps:Use ", @@ -2012,8 +2039,8 @@ "title":"Instance Eviction Exception", "uri":"cce_faq_00140.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"224" + "p_code":"221", + "code":"227" }, { "desc":"When a node is in the Unavailable state, CCE migrates container pods on the node and sets the pods running on the node to the Terminating state.After the node is restored", @@ -2021,8 +2048,8 @@ "title":"What Should I Do If Pods in the Terminating State Cannot Be Deleted?", "uri":"cce_faq_00210.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"225" + "p_code":"221", + "code":"228" }, { "desc":"The metadata.enable field in the YAML file of the workload is false. As a result, the pod of the workload is deleted and the workload is in the stopped status.The workloa", @@ -2030,8 +2057,8 @@ "title":"What Should I Do If a Workload Is Stopped Caused by Pod Deletion?", "uri":"cce_faq_00012.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"226" + "p_code":"221", + "code":"229" }, { "desc":"The pod remains in the creating state for a long time, and the sandbox-related errors are reported.Select a troubleshooting method for your cluster:Clusters of V1.13 or l", @@ -2039,8 +2066,8 @@ "title":"What Should I Do If Sandbox-Related Errors Are Reported When the Pod Remains in the Creating State?", "uri":"cce_faq_00005.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"227" + "p_code":"221", + "code":"230" }, { "desc":"Workload pods in the cluster fail and are being redeployed constantly.After the following command is run, the command output shows that many pods are in the evicted state", @@ -2048,8 +2075,8 @@ "title":"What Should I Do If a Pod Is in the Evicted State?", "uri":"cce_faq_00199.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"228" + "p_code":"221", + "code":"231" }, { "desc":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", @@ -2057,8 +2084,8 @@ "title":"What Should I Do If the OOM Killer Is Triggered When a Container Uses Memory Resources More Than Limited?", "uri":"cce_faq_00002.html", "doc_type":"usermanual2", - "p_code":"218", - "code":"229" + "p_code":"221", + "code":"232" }, { "desc":"A workload can be accessed from public networks through a load balancer. LoadBalancer provides higher reliability than EIP-based NodePort because an EIP is no longer boun", @@ -2066,26 +2093,8 @@ "title":"What Should I Do If a Service Released in a Workload Cannot Be Accessed from Public Networks?", "uri":"cce_faq_00202.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"230" - }, - { - "desc":"CCE uses high-performance container networking add-ons, which support the tunnel network and VPC network models.After a cluster is created, the network model cannot be ch", - "product_code":"cce", - "title":"Selecting a Network Model When Creating a Cluster on CCE", - "uri":"cce_bestpractice_00162.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"231" - }, - { - "desc":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This secti", - "product_code":"cce", - "title":"Planning CIDR Blocks for a CCE Cluster", - "uri":"cce_bestpractice_00004.html", - "doc_type":"usermanual2", - "p_code":"208", - "code":"232" + "p_code":"212", + "code":"233" }, { "desc":"A VPC is similar to a private local area network (LAN) managed by a home gateway whose IP address is 192.168.0.0/16. A VPC is a private network built on the cloud and pro", @@ -2093,16 +2102,7 @@ "title":"What Is the Relationship Between Clusters, VPCs, and Subnets?", "uri":"cce_faq_00266.html", "doc_type":"usermanual2", - "p_code":"208", - "code":"233" - }, - { - "desc":"For clusters of v1.15.11-r1 and later, the CSI everest add-on has taken over all functions of the fuxi FlexVolume driver (the storage-driver add-on) for container storage", - "product_code":"cce", - "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", - "uri":"cce_bestpractice_0107.html", - "doc_type":"usermanual2", - "p_code":"208", + "p_code":"212", "code":"234" }, { @@ -2111,9 +2111,405 @@ "title":"How Do I Harden the VPC Security Group Rules for CCE Cluster Nodes?", "uri":"cce_faq_00265.html", "doc_type":"usermanual2", - "p_code":"208", + "p_code":"212", "code":"235" }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Best Practice", + "uri":"cce_bestpractice.html", + "doc_type":"usermanual2", + "p_code":"", + "code":"236" + }, + { + "desc":"Security, efficiency, stability, and availability are common requirements on all cloud services. To meet these requirements, the system availability, data reliability, an", + "product_code":"cce", + "title":"Checklist for Deploying Containerized Applications in the Cloud", + "uri":"cce_bestpractice_00006.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"237" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Migration", + "uri":"cce_bestpractice_00237.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"238" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Migrating On-premises Kubernetes Clusters to CCE", + "uri":"cce_bestpractice_0306.html", + "doc_type":"usermanual2", + "p_code":"238", + "code":"239" + }, + { + "desc":"Containers are growing in popularity and Kubernetes simplifies containerized deployment. Many companies choose to build their own Kubernetes clusters. However, the O&M wo", + "product_code":"cce", + "title":"Solution Overview", + "uri":"cce_bestpractice_0307.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"240" + }, + { + "desc":"CCE allows you to customize cluster resources to meet various service requirements. Table 1 lists the key performance parameters of a cluster and provides the planned val", + "product_code":"cce", + "title":"Planning Resources for the Target Cluster", + "uri":"cce_bestpractice_0308.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"241" + }, + { + "desc":"If your migration does not involve resources outside a cluster listed in Table 1 or you do not need to use other services to update resources after the migration, skip th", + "product_code":"cce", + "title":"Migrating Resources Outside a Cluster", + "uri":"cce_bestpractice_0309.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"242" + }, + { + "desc":"Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be ", + "product_code":"cce", + "title":"Installing the Migration Tool", + "uri":"cce_bestpractice_0310.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"243" + }, + { + "desc":"WordPress is used as an example to describe how to migrate an application from an on-premises Kubernetes cluster to a CCE cluster. The WordPress application consists of t", + "product_code":"cce", + "title":"Migrating Resources in a Cluster", + "uri":"cce_bestpractice_0311.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"244" + }, + { + "desc":"The WordPress and MySQL images used in this example can be pulled from SWR. Therefore, the image pull failure (ErrImagePull) will not occur. If the application to be migr", + "product_code":"cce", + "title":"Updating Resources Accordingly", + "uri":"cce_bestpractice_0312.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"245" + }, + { + "desc":"Cluster migration involves full migration of application data, which may cause intra-application adaptation problems. In this example, after the cluster is migrated, the ", + "product_code":"cce", + "title":"Performing Additional Tasks", + "uri":"cce_bestpractice_0313.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"246" + }, + { + "desc":"Both HostPath and Local volumes are local storage volumes. However, the Restic tool integrated in Velero cannot back up the PVs of the HostPath type and supports only the", + "product_code":"cce", + "title":"Troubleshooting", + "uri":"cce_bestpractice_0314.html", + "doc_type":"usermanual2", + "p_code":"239", + "code":"247" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"DevOps", + "uri":"cce_bestpractice_0322.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"248" + }, + { + "desc":"GitLab is an open-source version management system developed with Ruby on Rails for Git project repository management. It supports web-based access to public and private ", + "product_code":"cce", + "title":"Interconnecting GitLab with SWR and CCE for CI/CD", + "uri":"cce_bestpractice_0324.html", + "doc_type":"usermanual2", + "p_code":"248", + "code":"249" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Disaster Recovery", + "uri":"cce_bestpractice_0323.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"250" + }, + { + "desc":"To achieve high availability for your CCE containers, you can do as follows:Deploy three master nodes for the cluster.When nodes are deployed across AZs, set custom sched", + "product_code":"cce", + "title":"Implementing High Availability for Containers in CCE", + "uri":"cce_bestpractice_00220.html", + "doc_type":"usermanual2", + "p_code":"250", + "code":"251" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Security", + "uri":"cce_bestpractice_0315.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"252" + }, + { + "desc":"For security purposes, you are advised to configure a cluster as follows.Kubernetes releases a major version in about four months. CCE follows the same frequency as Kuber", + "product_code":"cce", + "title":"Cluster Security", + "uri":"cce_bestpractice_0317.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"253" + }, + { + "desc":"Do not bind an EIP to a node unless necessary to reduce the attack surface.If an EIP must be used, properly configure the firewall or security group rules to restrict acc", + "product_code":"cce", + "title":"Node Security", + "uri":"cce_bestpractice_0318.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"254" + }, + { + "desc":"The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to th", + "product_code":"cce", + "title":"Container Security", + "uri":"cce_bestpractice_0319.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"255" + }, + { + "desc":"Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be", + "product_code":"cce", + "title":"Secret Security", + "uri":"cce_bestpractice_0320.html", + "doc_type":"usermanual2", + "p_code":"252", + "code":"256" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Auto Scaling", + "uri":"cce_bestpractice_0090.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"257" + }, + { + "desc":"The best way to handle surging traffic is to automatically adjust the number of machines based on the traffic volume or resource usage, which is called scaling.In CCE, th", + "product_code":"cce", + "title":"Using HPA and CA for Auto Scaling of Workloads and Nodes", + "uri":"cce_bestpractice_00282.html", + "doc_type":"usermanual2", + "p_code":"257", + "code":"258" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Cluster", + "uri":"cce_bestpractice_0050.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"259" + }, + { + "desc":"When you have multiple CCE clusters, you may find it difficult to efficiently connect to all of them.This section describes how to configure access to multiple clusters b", + "product_code":"cce", + "title":"Connecting to Multiple Clusters Using kubectl", + "uri":"cce_bestpractice_00254.html", + "doc_type":"usermanual2", + "p_code":"259", + "code":"260" + }, + { + "desc":"You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).When creating a node in a cluster of v1.13.10 or later, if a data disk is not manage", + "product_code":"cce", + "title":"Adding a Second Data Disk to a Node in a CCE Cluster", + "uri":"cce_bestpractice_00190.html", + "doc_type":"usermanual2", + "p_code":"259", + "code":"261" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Networking", + "uri":"cce_bestpractice_0052.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"262" + }, + { + "desc":"Before creating a cluster on CCE, determine the number of VPCs, number of subnets, container CIDR blocks, and Services for access based on service requirements.This topic", + "product_code":"cce", + "title":"Planning CIDR Blocks for a Cluster", + "uri":"cce_bestpractice_00004.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"263" + }, + { + "desc":"CCE uses self-proprietary, high-performance container networking add-ons to support the tunnel network, Cloud Native Network 2.0, and VPC network models.After a cluster i", + "product_code":"cce", + "title":"Selecting a Network Model", + "uri":"cce_bestpractice_00162.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"264" + }, + { + "desc":"Session persistence is one of the most common while complex problems in load balancing.Session persistence is also called sticky sessions. After the sticky session functi", + "product_code":"cce", + "title":"Implementing Sticky Session Through Load Balancing", + "uri":"cce_bestpractice_00231.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"265" + }, + { + "desc":"There may be different types of proxy servers between a client and a container server. How can a container obtain the real source IP address of the client? This section d", + "product_code":"cce", + "title":"Obtaining the Client Source IP Address for a Container", + "uri":"cce_bestpractice_00035.html", + "doc_type":"usermanual2", + "p_code":"262", + "code":"266" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Storage", + "uri":"cce_bestpractice_0053.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"267" + }, + { + "desc":"A data disk is divided depending on the container storage Rootfs:Overlayfs: No independent thin pool is allocated. Image data is stored in the dockersys disk.# lsblk\nNAME", + "product_code":"cce", + "title":"Expanding Node Disk Capacity", + "uri":"cce_bestpractice_00198.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"268" + }, + { + "desc":"This section describes how to mount OBS buckets and OBS parallel file systems (preferred) of third-party tenants.The CCE cluster of a SaaS service provider needs to be mo", + "product_code":"cce", + "title":"Mounting an Object Storage Bucket of a Third-Party Tenant", + "uri":"cce_bestpractice_00199.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"269" + }, + { + "desc":"The minimum capacity of an SFS Turbo file system is 500 GB, and the SFS Turbo file system cannot be billed by usage. By default, the root directory of an SFS Turbo file s", + "product_code":"cce", + "title":"Dynamically Creating and Mounting Subdirectories of an SFS Turbo File System", + "uri":"cce_bestpractice_00253_0.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"270" + }, + { + "desc":"In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. Y", + "product_code":"cce", + "title":"How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?", + "uri":"cce_bestpractice_0107.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"271" + }, + { + "desc":"When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The ", + "product_code":"cce", + "title":"Custom Storage Classes", + "uri":"cce_bestpractice_00281_0.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"272" + }, + { + "desc":"EVS disks cannot be attached across AZs. For example, EVS disks in AZ 1 cannot be attached to nodes in AZ 2.If the storage class csi-disk is used for StatefulSets, when a", + "product_code":"cce", + "title":"Realizing Automatic Topology for EVS Disks When Nodes Are Deployed Across AZs (csi-disk-topology)", + "uri":"cce_bestpractice_00284.html", + "doc_type":"usermanual2", + "p_code":"267", + "code":"273" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"cce", + "title":"Container", + "uri":"cce_bestpractice_0051.html", + "doc_type":"usermanual2", + "p_code":"236", + "code":"274" + }, + { + "desc":"If a node has sufficient memory resources, a container on this node can use more memory resources than requested, but no more than limited. If the memory allocated to a c", + "product_code":"cce", + "title":"Properly Allocating Container Computing Resources", + "uri":"cce_bestpractice_00002.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"275" + }, + { + "desc":"To access a Kubernetes cluster from a client, you can use the Kubernetes command line tool kubectl.Create a DaemonSet file.vi daemonSet.yamlAn example YAML file is provid", + "product_code":"cce", + "title":"Modifying Kernel Parameters Using a Privileged Container", + "uri":"cce_bestpractice_00227.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"276" + }, + { + "desc":"Before containers running applications are started, one or some init containers are started first. If there are multiple init containers, they will be started in the defi", + "product_code":"cce", + "title":"Initializing a Container", + "uri":"cce_bestpractice_00228.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"277" + }, + { + "desc":"If DNS or other related settings are inappropriate, you can use hostAliases to overwrite the resolution of the host name at the pod level when adding entries to the /etc/", + "product_code":"cce", + "title":"Using hostAliases to Configure /etc/hosts in a Pod", + "uri":"cce_bestpractice_00226.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"278" + }, + { + "desc":"Linux allows you to create a core dump file if an application crashes, which contains the data the application had in memory at the time of the crash. You can analyze the", + "product_code":"cce", + "title":"Configuring Core Dumps", + "uri":"cce_bestpractice_0325.html", + "doc_type":"usermanual2", + "p_code":"274", + "code":"279" + }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "product_code":"cce", @@ -2121,7 +2517,7 @@ "uri":"cce_01_9999.html", "doc_type":"usermanual2", "p_code":"", - "code":"236" + "code":"280" }, { "desc":"CCE 2.0 inherits and modifies the features of CCE 1.0, and release new features.Modified features:Clusters in CCE 1.0 are equivalent to Hybrid clusters in CCE 2.0.CCE 2.0", @@ -2129,8 +2525,8 @@ "title":"Differences Between CCE 1.0 and CCE 2.0", "uri":"cce_01_9998.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"237" + "p_code":"280", + "code":"281" }, { "desc":"Migrate the images stored in the image repository of CCE 1.0 to CCE 2.0.A VM is available. The VM is bound to a public IP address and can access the Internet. Docker (ear", @@ -2138,8 +2534,8 @@ "title":"Migrating Images", "uri":"cce_01_9997.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"238" + "p_code":"280", + "code":"282" }, { "desc":"Create Hybrid clusters on the CCE 2.0 console. These new Hybrid clusters should have the same specifications with those created on CCE 1.0.To create clusters using APIs, ", @@ -2147,8 +2543,8 @@ "title":"Migrating Clusters", "uri":"cce_01_9996.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"239" + "p_code":"280", + "code":"283" }, { "desc":"This section describes how to create a Deployment with the same specifications as that in CCE 1.0 on the CCE 2.0 console.It is advised to delete the applications on CCE 1", @@ -2156,8 +2552,8 @@ "title":"Migrating Applications", "uri":"cce_01_9995.html", "doc_type":"usermanual2", - "p_code":"236", - "code":"240" + "p_code":"280", + "code":"284" }, { "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", @@ -2166,6 +2562,6 @@ "uri":"cce_01_0300.html", "doc_type":"usermanual2", "p_code":"", - "code":"241" + "code":"285" } ] \ No newline at end of file diff --git a/docs/cce/umn/cce_01_0002.html b/docs/cce/umn/cce_01_0002.html deleted file mode 100644 index 24eeb3db..00000000 --- a/docs/cce/umn/cce_01_0002.html +++ /dev/null @@ -1,100 +0,0 @@ - - -

Cluster Overview

-

Kubernetes is a containerized application software system that can be easily deployed and managed. It facilitates container scheduling and orchestration.

-

For application developers, Kubernetes can be regarded as a cluster operating system. Kubernetes provides functions such as service discovery, scaling, load balancing, self-healing, and even leader election, freeing developers from infrastructure-related configurations.

-

When using Kubernetes, it is like you run a large number of servers as one on which your applications run. Regardless of the number of servers in a Kubernetes cluster, the method for deploying applications in Kubernetes is always the same.

-

Kubernetes Cluster Architecture

A Kubernetes cluster consists of master nodes (masters) and worker nodes (nodes). Applications are deployed on worker nodes, and you can specify the nodes for deployment.

-

The following figure shows the architecture of a Kubernetes cluster.

-
Figure 1 Kubernetes cluster architecture
-

Master node

-

A master node is the machine where the control plane components run, including API server, Scheduler, Controller manager, and etcd.

-
  • API server: functions as a transit station for components to communicate with each other, receives external requests, and writes information to etcd.
  • Controller manager: performs cluster-level functions, such as component replication, node tracing, and node fault fixing.
  • Scheduler: schedules containers to nodes based on various conditions (such as available resources and node affinity).
  • etcd: serves as a distributed data storage component that stores cluster configuration information.
-

In the production environment, multiple master nodes are deployed to ensure cluster high availability. For example, you can deploy three master nodes for your CCE cluster.

-

Worker node

-

A worker node is a compute node in a cluster, that is, a node running containerized applications. A worker node has the following components:

-
  • kubelet: communicates with the container runtime, interacts with the API server, and manages containers on the node.
  • kube-proxy: serves as an access proxy between application components.
  • Container runtime: functions as the software for running containers. You can download images to build your container runtime, such as Docker.
-
-

Number of Master Nodes and Cluster Scale

When you create a cluster on CCE, the number of master nodes can be set to 1 or 3. Three master nodes can be deployed to create a cluster in HA mode.

-

The master node specifications determine the number of nodes that can be managed by a cluster. When creating a cluster, you can select the cluster management scale, for example, 50 or 200 nodes.

-
-

Cluster Network

From the perspective of the network, all nodes in a cluster are located in a VPC, and containers are running on the nodes. You need to configure node-node, node-container, and container-container communication.

-

A cluster network can be divided into three network types:

-
  • Node network: IP addresses are assigned to nodes in a cluster.
  • Container network: IP addresses are assigned to containers in a cluster for communication between them. Currently, multiple container network models are supported, and each model has its own working mechanism.
  • Service network: A service is a Kubernetes object used to access containers. Each Service has a fixed IP address.
-

When you create a cluster, select a proper CIDR block for each network to ensure that the CIDR blocks do not conflict with each other and each CIDR block has sufficient available IP addresses. After a cluster is created, the container network model cannot be modified. Plan the container network model properly before creating a cluster.

-

You are advised to learn about the cluster network and container network models before creating a cluster. For details, see Overview.

-
-

Cluster Security Groups

When a cluster is created, the following security groups are created to ensure cluster security:

-
  • Cluster name-cce-control-Random number: security group of the master node.
    Observe the following principles when configuring security groups:
    • The source IP addresses defined in the security group rules must be permitted.
    • 4789 (required only for clusters using the container tunnel network model): used for network access between containers.
    • 5443 and 5444: ports to which kube-apiserver of the master node listens. These ports must permit requests from VPC and container CIDR blocks.
    • 9443: used by the network add-on of a worker node to access the master node.
    • 8445: used by the storage add-on of a worker node to access the master node.
    -
    -
  • Cluster name-cce-node-Random number: security group of a worker node.
    Observe the following principles when configuring security groups:
    • The source IP addresses defined in the security group rules must be permitted.
    • 4789 (required only for clusters using the container tunnel network model): used for network access between containers.
    • 10250: used by the master node to access the kubelet component of a worker node (for example, run the kubectl exec {pod} command).
    • 30000-32767: external access port (Nodeport) of a node. These ports need be specified when you create a Service. These ports must permit requests from VPC, container, and ELB CIDR blocks.
    -
    -
-

After a cluster is created, you can view the created security group on the VPC console.

-

Do not delete the security groups and related rules automatically configured during cluster creation. Otherwise, the cluster will exhibit unexpected behavior.

-
-
-

Cluster Lifecycle

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster status

Status

-

Description

-

Creating

-

A cluster is being created and is requesting for cloud resources.

-

Normal

-

A cluster is running properly.

-

Scaling-out

-

A node is being added to a cluster.

-

Scaling-in

-

A node is being deleted from a cluster.

-

Hibernating

-

A cluster is hibernating.

-

Awaking

-

A cluster is being woken up.

-

Upgrading

-

A cluster is being upgraded.

-

Unavailable

-

A cluster is unavailable.

-

Deleting

-

A cluster is being deleted.

-
-
-
Figure 2 Cluster status transition
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0003.html b/docs/cce/umn/cce_01_0003.html deleted file mode 100644 index 3c5ffd78..00000000 --- a/docs/cce/umn/cce_01_0003.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Resetting a Node

-

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

-

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

-
-

Notes and Constraints

  • The cluster version must be v1.13 or later.
-
-

Notes

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • Resetting a node will reinstall the node OS and interrupt workload services running on the node. Therefore, perform this operation during off-peak hours.
  • Data in the system disk and Docker data disks will be cleared. Back up important data before resetting the node.
  • When an extra data disk is mounted to a node, data in this disk will be cleared if the disk has not been unmounted before the node reset. To prevent data loss, back up data in advance and mount the data disk again after the node reset is complete.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the node you will reset, choose More > Reset.
  2. In the dialog box displayed, enter RESET and reconfigure the key pair for login.

    Figure 1 Resetting the selected node
    -

  3. Click Yes and wait until the node is reset.

    After the node is reset, pods on it are automatically migrated to other available nodes.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0004.html b/docs/cce/umn/cce_01_0004.html deleted file mode 100644 index 92e2ae8c..00000000 --- a/docs/cce/umn/cce_01_0004.html +++ /dev/null @@ -1,95 +0,0 @@ - - -

Managing Node Labels

-

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

-
  • Node management: Node labels are used to classify nodes.
  • Affinity and anti-affinity between a workload and node:
    • Some workloads require a large CPU, some require a large memory, some require a large I/O, and other workloads may be affected. In this case, you are advised to add different labels to nodes. When deploying a workload, you can select nodes with specified labels for affinity deployment to ensure the normal operation of the system. Otherwise, node anti-affinity deployment can be used.
    • A system can be divided into multiple modules. Each module consists of multiple microservices. To ensure the efficiency of subsequent O&M, you can add a module label to each node so that each module can be deployed on the corresponding node, does not interfere with other modules, and can be easily developed and maintained on its node.
    -
-
-

Inherent Label of a Node

After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Inherent label of a node

Key

-

Description

-

New: topology.kubernetes.io/region

-

Old: failure-domain.beta.kubernetes.io/region

-

Region where the node is located

-

New: topology.kubernetes.io/zone

-

Old: failure-domain.beta.kubernetes.io/zone

-

AZ where the node is located

-

New: node.kubernetes.io/baremetal

-

Old: failure-domain.beta.kubernetes.io/is-baremetal

-

Whether the node is a bare metal node

-

false indicates that the node is not a bare metal node.

-

node.kubernetes.io/instance-type

-

Node specifications

-

kubernetes.io/arch

-

Node processor architecture

-

kubernetes.io/hostname

-

Node name

-

kubernetes.io/os

-

OS type

-

node.kubernetes.io/subnetid

-

ID of the subnet where the node is located.

-

os.architecture

-

Node processor architecture

-

For example, amd64 indicates a AMD64-bit processor.

-

os.name

-

Node OS name

-

os.version

-

Node OS kernel version

-
-
-
-

Adding a Node Label

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node for which you will add labels, choose Operation > More > Manage Labels.
  3. In the dialog box displayed, click Add Label below the label list, enter the key and value of the label to be added, and click OK.

    As shown in the figure, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

    -

  4. After the label is added, click Manage Labels. Then, you will see the label that you have added.
-
-

Deleting a Node Label

Only labels added by users can be deleted. Labels that are fixed on the node cannot be deleted.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node for which you will delete labels, choose Operation > More > Manage Labels.
  3. Click Delete, and then click OK to delete the label.

    Label updated successfully is displayed.

    -

-
-

Searching for a Node by Label

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the upper right corner of the node list, click Search by Label.
  3. Enter a Kubernetes label to find the target node.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0007.html b/docs/cce/umn/cce_01_0007.html deleted file mode 100644 index 22d1f7ee..00000000 --- a/docs/cce/umn/cce_01_0007.html +++ /dev/null @@ -1,159 +0,0 @@ - - -

Managing Workloads and Jobs

-

Scenario

After a workload is created, you can scale, upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file. -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Workload/Job management

Operation

-

Description

-

Logging

-

You can view logs of Deployments, StatefulSets, DaemonSets, and jobs.

-

Upgrade

-

You can replace images or image tags to quickly upgrade Deployments, StatefulSets, and DaemonSets without interrupting services.

-

Editing a YAML file

-

You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded.

-

Scaling

-

A workload can be automatically resized according to scaling policies, freeing you from the efforts to manually adjust resources for fluctuating service traffic. This saves you big on both resources and labors.

-

Monitoring

-

You can view the CPU and memory usage of Deployments, DaemonSets, and pods on the CCE console to determine the resource specifications you may need.

-

Rollback

-

Only Deployments can be rolled back.

-

Pausing

-

Only Deployments can be paused.

-

Resuming

-

Only Deployments can be resumed.

-

Labeling

-

Labels are key-value pairs and can be attached to workloads for affinity and anti-affinity scheduling.

-

Deletion

-

You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered.

-

Access settings

-

You can determine how your workloads can be accessed. For details, see Overview.

-

Scheduling policies

-

CCE supports custom and simple scheduling policies. Custom scheduling policies allow you to customize node affinity, workload affinity, and workload anti-affinity. Simple scheduling policies allow easy and convenient scheduling.

-

Event

-

CCE provides event names, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time by workload or pod.

-
-
-
-
-

Viewing Logs

You can view logs of Deployments, StatefulSets, DaemonSets, and jobs. This section uses a Deployment as an example to describe how to view logs.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will view, click Logs.

    In the displayed Logs window, view the logs generated in the last 5 minutes, 30 minutes, or 1 hour.

    -

-
-

Upgrading a Workload

You can replace images or image tags to quickly upgrade Deployments, StatefulSets, and DaemonSets without interrupting services.

-

This section uses a Deployment as an example to describe how to upgrade a workload.

-

Before replacing an image or image version, upload the new image to the SWR service.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments, and click Upgrade for the Deployment to be upgraded.

    • Workloads cannot be upgraded in batches.
    • Before performing an in-place StatefulSet upgrade, you must manually delete old pods. Otherwise, the upgrade status is always displayed as Upgrading.
    -
    -

  2. Upgrade the Deployment.

    • Image Name: To replace the Deployment image, click Replace Image and select a new image.
    • Image Version: To replace the Deployment image version, select a new version from the Image Version drop-down list.
    • Container Name: To change the container name, click next to Container Name and enter a new name.
    • Privileged Container: After this function is enabled, the container can access all devices on the host.
    • Container Resources: You can set the CPU, memory and GPU quotas.
    • Advanced Settings:
      • Lifecycle: Commands for starting and running containers can be set. -
      • Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
        • Liveness Probe: used to restart the unhealthy container.
        • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
        -
      • Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
        On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
        • Added manually: Set Variable Name and Variable Value/Reference.
        • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
        • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

          To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

          -
          -
        -
        -
      • Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local disks and cloud storage volumes are supported. For details, see Storage (CSI).

        You can add data storage volumes only when creating a StatefulSet.

        -
        -
      • Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

        Enter the user ID to set container permissions and prevent systems and other containers from being affected.

        -
      • Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
      -
    -

  3. Click Submit.
-
-

Editing a YAML file

You can modify and download the YAML files of Deployments, StatefulSets, DaemonSets, and pods on the CCE console. YAML files of jobs and cron jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will edit, choose Operation > More > Edit YAML. In the Edit YAML window, edit the YAML file of the current workload.
  3. Click Edit and then OK to save the changes.
  4. (Optional) In the Edit YAML window, click Download to download the YAML file.
-
-

Scaling a Workload

A workload can be automatically resized according to custom scaling policies, freeing you from the efforts to manually adjust the amount of resources for fluctuating service traffic. This saves you big on both resources and labors. This section uses a Deployment as an example to describe how to scale a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload for which you will add a scaling policy, choose Operation > More > Scaling.
  3. On the Scaling tab page, add or edit scaling policies. Scaling policies are classified as auto and manual scaling policies.

    For details, see Scaling a Workload.

    -

-
-

Monitoring a Workload

You can view the CPU and memory usage of Deployments, DaemonSets, and pods on the CCE console to determine the resource specifications you may need. This section uses a Deployment as an example to describe how to monitor a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. Click the name of the Deployment to be monitored. On the displayed Deployment details page, click the Monitoring tab to view CPU usage and memory usage of the Deployment.
  3. Click the Pods tab. Click next to a pod to be monitored and click Monitoring.
  4. Check CPU usage and memory usage of the pod.

    • CPU usage

      The horizontal axis indicates time while the vertical axis indicates the CPU usage. The green line indicates the CPU usage while the red line indicates the CPU usage limit.

      -

      It takes some time to calculate CPU usage. Therefore, when CPU and memory usage are displayed for the first time, CPU usage is displayed about one minute later than memory usage.

      -

      CPU and memory usage are displayed only for pods in the running state.

      -
      -
    • Memory usage

      The horizontal axis indicates time while the vertical axis indicates the memory usage. The green line indicates the memory usage while the red line indicates the memory usage limit.

      -

      Memory usage is displayed only for a running pod.

      -
      -
    -

-
-

Rolling Back a Workload (Available Only for Deployments)

CCE records the release history of all Deployments. You can roll back a Deployment to a specified version.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will roll back, choose Operation > More > Roll Back.
  3. In the Roll Back to This Version drop-down list, select the version to which you will roll back the Deployment. Then, click OK.
-
-

Pausing a Workload (Available Only for Deployments)

You can pause Deployments. After a Deployment is paused, the upgrade command can be successfully issued but will not be applied to the pods.

-

If you are performing a rolling upgrade, the rolling upgrade stops after the pause command is issued. In this case, the new and old pods coexist.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will pause, choose Operation > More > Pause.
  3. In the displayed Pause Workload dialog box, click OK.
  4. Click OK.

    Deployments in the paused state cannot be rolled back.

    -
    -

-
-

Resuming a Workload (Available Only for Deployments)

You can resume paused Deployments. After a Deployment is resumed, it can be upgraded or rolled back. Its pods will inherit the latest updates of the Deployment. If they are inconsistent, the pods are upgraded automatically according to the latest information of the Deployment.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the Deployment you will resume, choose Operation > More > Resume.
  3. In the displayed Resume Workload dialog box, click OK.
-
-

Managing Labels

Labels are key-value pairs and can be attached to workloads. Workload labels are often used for affinity and anti-affinity scheduling. You can add labels to multiple workloads or a specified workload.

-

You can manage the labels of Deployments, StatefulSets, and DaemonSets based on service requirements. This section uses Deployments as an example to describe how to manage labels.

-

In the following figure, three labels (release, env, and role) are defined for workload APP 1, APP 2, and APP 3. The values of these labels vary with workload.

-
  • Label of APP 1: [release:alpha;env:development;role:frontend]
  • Label of APP 2: [release:beta;env:testing;role:frontend]
  • Label of APP 3: [release:alpha;env:production;role:backend]
-

If you set key to role and value to frontend when using workload scheduling or another function, APP 1 and APP 2 will be selected.

-
Figure 1 Label example
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. Click the name of the workload whose labels will be managed.
  3. On the workload details page, click Manage Label. In the displayed dialog box, click Add Label. Enter the label key and value, and click OK.

    A key-value pair must contain 1 to 63 characters starting and ending with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.

    -
    -

-
-

Deleting a Workload/Job

You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. Exercise caution when you perform this operation. This section uses a Deployment as an example to describe how to delete a workload.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments.
  2. In the same row as the workload you will delete, choose Operation > More > Delete.

    Read the system prompts carefully. A workload cannot be recovered after it is deleted. Exercise caution when performing this operation.

    -

  3. Click Yes.

    • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
    • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.
    -
    -

-
-

Events

On the workload details page, click the Events or Pods tab to view the events, event types, number of occurrences, Kubernetes events, first occurrence time, and last occurrence time.

Event data will be retained for one hour and then automatically deleted.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0008.html b/docs/cce/umn/cce_01_0008.html deleted file mode 100644 index 7f7433c8..00000000 --- a/docs/cce/umn/cce_01_0008.html +++ /dev/null @@ -1,177 +0,0 @@ - - -

Setting Container Startup Commands

-

Scenario

When creating a workload or job, you can use an image to specify the processes running in the container.

-

By default, the image runs the default command. To run a specific command or rewrite the default image value, you must perform the following settings:

-
  • Working directory: working directory of the command.

    If the working directory is not specified in the image or on the console, the default value is /.

    -
  • Command: command that controls the running of an image.
  • Args: parameters transferred to the running command.
-

After a container is started, do not modify configurations in the container. If configurations in the container are modified (for example, passwords, certificates, and environment variables of a containerized application are added to the container), the configurations will be lost after the container restarts and container services will become abnormal. An example scenario of container restart is pod rescheduling due to node anomalies.

-

Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

-
-
-

Commands and Arguments Used to Run a Container

A Docker image has metadata that stores image information. If lifecycle commands and arguments are not set, CCE runs the default commands and arguments, that is, Docker instructions ENTRYPOINT and CMD, provided during image creation.

-

If the commands and arguments used to run a container are set during application creation, the default commands ENTRYPOINT and CMD are overwritten during image build. The rules are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Commands and parameters used to run a container

Image Entrypoint

-

Image CMD

-

Command to Run a Container

-

Args to Run a Container

-

Command Executed

-

[touch]

-

[/root/test]

-

Not set

-

Not set

-

[touch /root/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

Not set

-

[mkdir]

-

[touch]

-

[/root/test]

-

Not set

-

[/opt/test]

-

[touch /opt/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

[/opt/test]

-

[mkdir /opt/test]

-
-
-
-

Setting the Startup Command

  1. Log in to the CCE console. Expand Lifecycle when adding a container during workload or job creation.
  2. Enter the running command and parameters, as shown in Table 2.

    • The current startup command is provided as a string array and corresponds to the Entrypoint startup command of Docker. The format is as follows: ["executable", "param1", "param2",..]. For details about how to start Kubernetes containers, click here.
    • The lifecycle of a container is the same as that of the startup command. That is, the lifecycle of the container ends after the command is executed.
    -
    - -
    - - - - - - - - - - -
    Table 2 Container startup command

    Configuration Item

    -

    Procedure

    -

    Command

    -

    Enter an executable command, for example, /run/server.

    -

    If there are multiple commands, separate them with spaces. If the command contains a space, you need to add a quotation mark ("").

    -
    NOTE:

    If there are multiple commands, you are advised to run the /bin/sh or other shell commands. Other commands are used as parameters.

    -
    -

    Args

    -

    Enter the argument that controls the container running command, for example, --port=8080.

    -

    If there are multiple arguments, separate them in different lines.

    -
    -
    -

    The following uses Nginx as an example to describe three typical application scenarios of the container startup command:

    -
    Example code:
    nginx -c nginx.conf
    -
    -
    • Scenario 1: Both the command and arguments are set.
      Figure 1 Setting the startup command and parameters
      -

      Example YAML file:

      -
                command:
      -            - nginx
      -          args:
      -            - '-c'
      -            - nginx.conf
      -
    • Scenario 2: Only the command is set.
      Figure 2 Setting the startup command
      -

      A command must be enclosed in double quotes. If no double quotes are added, the command is split into multiple commands based on space character.

      -
      -

      Example YAML file:

      -
                command:
      -            - nginx -c nginx.conf
      -          args:
      -
    • Scenario 3: Only arguments are set.
      Figure 3 Setting startup arguments
      -

      If the container startup command is not added to the system path, run the /bin/sh command to execute the container startup command. The container startup command must be enclosed in double quotes.

      -
      -

      Example YAML file:

      -
                command:
      -            - /bin/sh
      -          args:
      -            - '-c'
      -            - '"nginx -c nginx.conf"'
      -
    -

  3. Check or modify the YAML file.

    • When creating a workload, in the Configure Advanced Settings step, click YAML on the right.
      Figure 4 Checking or editing a YAML file
      -
    • After the workload is created, go to the workload list. In the same row as the workload, choose More > Edit YAML.
    • After the workload is created, go to the workload details page. On the displayed page, click Edit YAML in the upper right corner.
    -

-
-

Example YAML for Setting Container Startup Commands

This section uses Nginx as an example to describe how to set container startup commands using kubectl.

-

Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl. See Using kubectl to create a Deployment or Using kubectl to create a StatefulSet. For more details on how to set container startup commands, see official Kubernetes documentation.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        command:
-        - sleep
-        - '3600'                        #Startup command
-        imagePullPolicy: Always
-        lifecycle:
-          postStart:
-            exec:
-              command:
-              - /bin/bash
-              - install.sh                  #Post-start command
-          preStop:
-            exec:
-              command:
-              - /bin/bash
-              - uninstall.sh                 #Pre-stop command
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0009.html b/docs/cce/umn/cce_01_0009.html deleted file mode 100644 index 7905014f..00000000 --- a/docs/cce/umn/cce_01_0009.html +++ /dev/null @@ -1,37 +0,0 @@ - - -

Using a Third-Party Image

-

Scenario

CCE allows you to create workloads using images pulled from third-party image repositories.

-

Generally, a third-party image repository can be accessed only after authentication (using your account and password). CCE uses the secret-based authentication to pull images. Therefore, you need to create a secret for an image repository before pulling images from the repository.

-
-

Prerequisites

The node where the workload is running is accessible from public networks. You can access public networks through LoadBalancer.

-
-

Using the Console

  1. Create a secret for accessing a third-party image repository.

    In the navigation pane, choose Configuration Center > Secret, and click Create Secret. Type must be set to kubernetes.io/dockerconfigjson. For details, see Creating a Secret.

    -

    Enter the user name and password used to access the third-party image repository.

    -

  2. Create a workload. For details, see Creating a Deployment or Creating a StatefulSet. If the workload will be created from a third-party image, set the image parameters as follows:

    1. Set Secret Authentication to Yes.
    2. Select the secret created in step 1.
    3. Enter the image address.
    -

  3. Click Create.
-
-

Using kubectl

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a secret of the dockercfg type using kubectl.

    kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
    -

    In the preceding commands, myregistrykey indicates the secret name, and other parameters are described as follows:

    -
    • DOCKER_REGISTRY_SERVER: address of a third-party image repository, for example, www.3rdregistry.com or 10.10.10.10:443
    • DOCKER_USER: account used for logging in to a third-party image repository
    • DOCKER_PASSWORD: password used for logging in to a third-party image repository
    • DOCKER_EMAIL: email of a third-party image repository
    -

  3. Use a third-party image to create a workload.

    A dockecfg secret is used for authentication when you obtain a private image. The following is an example of using the myregistrykey for authentication.
    apiVersion: v1
    -kind: Pod
    -metadata:
    -  name: foo
    -  namespace: default
    -spec:
    -  containers:
    -    - name: foo
    -      image: www.3rdregistry.com/janedoe/awesomeapp:v1
    -  imagePullSecrets:
    -    - name: myregistrykey              #Use the created secret.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0010.html b/docs/cce/umn/cce_01_0010.html deleted file mode 100644 index 94e68226..00000000 --- a/docs/cce/umn/cce_01_0010.html +++ /dev/null @@ -1,38 +0,0 @@ - - -

Overview

-

You can learn about a cluster network from the following two aspects:

-
  • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
  • How is pod access is implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.
-

Cluster Network Structure

All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

-

-
  • Node Network

    A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. You need to select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

    -
  • Container Network

    A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

    -

    Currently, CCE supports the following container network models:

    -
    • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
    • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in the cluster can be directly accessed from outside the cluster.
    • Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.
    -

    The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

    -
  • Service Network

    Service is also a Kubernetes object. Each Service has a fixed IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

    -
-
-

Service

A Service is used for pod access. With a fixed IP address, a Service forwards access traffic to pods and performs load balancing for these pods.

-
Figure 1 Accessing pods through a Service
-

You can configure the following types of Services:

-
  • ClusterIP: used to make the Service only reachable from within a cluster.
  • NodePort: used for access from outside a cluster. A NodePort Service is accessed through the port on the node.
  • LoadBalancer: used for access from outside a cluster. It is an extension of NodePort, to which a load balancer routes, and external systems only need to access the load balancer.
  • ENI LoadBalancer: used for access from outside the cluster. An ENI LoadBalancer Service directs traffic from a load balancer at backend pods, reducing the latency and avoiding performance loss for containerized applications.
-

For details about the Service, see Overview.

-
-

Ingress

Services forward requests using layer-4 TCP and UDP protocols. Ingresses forward requests using layer-7 HTTP and HTTPS protocols. Domain names and paths can be used to achieve finer granularities.

-
Figure 2 Ingress and Service
-

For details about the ingress, see Overview.

-
-

Access Scenarios

Workload access scenarios can be categorized as follows:

-
  • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
  • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
    • Access through the internet requires an EIP to be bound the node or load balancer.
    • Access through an intranet uses only the intranet IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
    -
  • External access initiated by a workload:
    • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
    • Accessing a public network: You need to assign an EIP to the node where the workload runs, or configure SNAT rules through the NAT gateway.
    -
-
Figure 3 Network access diagram
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0011.html b/docs/cce/umn/cce_01_0011.html deleted file mode 100644 index e5d9c508..00000000 --- a/docs/cce/umn/cce_01_0011.html +++ /dev/null @@ -1,124 +0,0 @@ - - -

Intra-Cluster Access (ClusterIP)

-

Scenario

ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

-

The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

-

Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

-
Figure 1 Intra-cluster access (ClusterIP)
-
-

Adding a Service When Creating a Workload

You can set the access type (Service) when creating a workload on the CCE console.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select ClusterIP.
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload listens. The Nginx application listens on port 80.
      • Access Port: a port mapped to the container port at the cluster-internal IP address. The workload can be accessed at <cluster-internal IP address>:<access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration, click OK and then Next: Configure Advanced Settings. On the page displayed, click Create.
  3. Click View Deployment Details or View StatefulSet Details. On the Services tab page, obtain the access address, for example, 10.247.74.100:8080.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the workload list, click the name of the workload for which you will create a Service.
  2. On the Services tab page, click Add Service.
  3. On the Create Service page, select ClusterIP from the Access Type drop-down list.
  4. Set intra-cluster access parameters.

    • Service Name: Service name, which can be the same as the workload name.
    • Cluster Name: name of the cluster where the workload runs. The value is inherited from the workload creation page and cannot be changed.
    • Namespace: namespace where the workload is located. The value is inherited from the workload creation page and cannot be changed.
    • Workload: workload for which you want to add a Service.
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the cluster-internal IP address. The workload can be accessed at <cluster-internal IP address>:<access port>. The port number range is 1–65535.
      -
    -

  5. Click Create. The ClusterIP Service will be added for the workload.
-
-

Setting the Access Type Using kubectl

You can run kubectl commands to set the access type (Service). This section uses a Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

    -
    vi nginx-deployment.yaml
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx:latest
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -
    -
    vi nginx-clusterip-svc.yaml
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: nginx
    -  name: nginx-clusterip
    -spec:
    -  ports:
    -  - name: service0
    -    port: 8080                # Port for accessing a Service.
    -    protocol: TCP             # Protocol used for accessing a Service. The value can be TCP or UDP.
    -    targetPort: 80            # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
    -  selector:                   # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
    -    app: nginx
    -  type: ClusterIP             # Type of a Service. ClusterIP indicates that a Service is only reachable from within the cluster.
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-znhbr   1/1       Running            0          15s
    -

  4. Create a Service.

    kubectl create -f nginx-clusterip-svc.yaml

    -

    If information similar to the following is displayed, the Service is being created.

    -
    service "nginx-clusterip" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the Service has been created, and a cluster-internal IP address has been assigned to the Service.

    -
    # kubectl get svc
    -NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
    -kubernetes        ClusterIP   10.247.0.1     <none>        443/TCP    4d6h
    -nginx-clusterip   ClusterIP   10.247.74.52   <none>        8080/TCP   14m
    -

  5. Access a Service.

    A Service can be accessed from containers or nodes in a cluster.

    -

    Create a pod, access the pod, and run the curl command to access IP address:Port or the domain name of the Service, as shown in the following figure.

    -

    The domain name suffix can be omitted. In the same namespace, you can directly use nginx-clusterip:8080 for access. In other namespaces, you can use nginx-clusterip.default:8080 for access.

    -
    # kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
    -If you don't see a command prompt, try pressing enter.
    -/ # curl 10.247.74.52:8080
    -<!DOCTYPE html>
    -<html>
    -<head>
    -<title>Welcome to nginx!</title>
    -<style>
    -    body {
    -        width: 35em;
    -        margin: 0 auto;
    -        font-family: Tahoma, Verdana, Arial, sans-serif;
    -    }
    -</style>
    -</head>
    -<body>
    -<h1>Welcome to nginx!</h1>
    -<p>If you see this page, the nginx web server is successfully installed and
    -working. Further configuration is required.</p>
    -
    -<p>For online documentation and support please refer to
    -<a href="http://nginx.org/">nginx.org</a>.<br/>
    -Commercial support is available at
    -<a href="http://nginx.com/">nginx.com</a>.</p>
    -
    -<p><em>Thank you for using nginx.</em></p>
    -</body>
    -</html>
    -/ # curl nginx-clusterip.default.svc.cluster.local:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -/ # curl nginx-clusterip.default:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -/ # curl nginx-clusterip:8080
    -...
    -<h1>Welcome to nginx!</h1>
    -...
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0012.html b/docs/cce/umn/cce_01_0012.html deleted file mode 100644 index e8127065..00000000 --- a/docs/cce/umn/cce_01_0012.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Creating a Node Pool

-

Scenario

This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.

-
-

Notes and Constraints

  • For details about how to add a node pool to a CCE Turbo cluster, see Procedure - for CCE Turbo Clusters.
  • The autoscaler add-on needs to be installed for node auto scaling. For details about the add-on installation and parameter configuration, see autoscaler.
-
-

Procedure

To create a node pool in a cluster, perform the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the page, click Create Node Pool.
  3. Set node pool parameters.

    • Current Region: geographic location of the node pool to be created.

      To minimize network latency and resource access time, select the region nearest to your node pool. Cloud resources are region-specific and cannot be used across regions over internal networks.

      -
    • Name: name of the new node pool. By default, the name is in the format of Cluster name-nodepool-Random number. You can also use a custom name.
    • Node Type: Currently, only VM nodes are supported.
    • Nodes: number of nodes to be created for this node pool. The value cannot exceed the maximum number of nodes that can be managed by the cluster.
    • Auto Scaling:
      • By default, this parameter is disabled.
      • After you enable autoscaler by clicking , nodes in the node pool will be automatically created or deleted based on cluster loads.
        • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
        • Priority: Set this parameter based on service requirements. A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.

          CCE selects a node pool for auto scaling based on the following policies:

          -
          1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
          2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
          3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
          4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
          -
          -
        • Scale-In Cooling Interval: Set this parameter in the unit of minute or hour. This parameter indicates the interval between the previous scale-out action and the next scale-in action.

          Scale-in cooling intervals can be configured in the node pool settings and the autoscaler add-on settings.

          -

          Scale-in cooling interval configured in a node pool

          -

          This interval indicates the period during which nodes added to the current node pool after a scale-out operation cannot be deleted. This interval takes effect at the node pool level.

          -

          Scale-in cooling interval configured in the autoscaler add-on

          -

          The interval after a scale-out indicates the period during which the entire cluster cannot be scaled in after the autoscaler add-on triggers scale-out (due to the unschedulable pods, metrics, and scaling policies). This interval takes effect at the cluster level.

          -

          The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          -

          The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

          -
        -

        You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

        -
        -

        If Autoscaler is enabled, install the autoscaler add-on to use the auto scaling feature.

        -
      -
    • AZ: An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      Set an AZ based on your requirements. After a node pool is created, AZ cannot be modified. Exercise caution when selecting an AZ for the node pool.

      -

      To enhance workload reliability, you are advised to select Random AZ, allowing nodes to be randomly and evenly distributed among different AZs.

      -

      In a CCE Turbo cluster, an AZ is randomly selected from available AZs, and all nodes are created in the selected AZ.

      -
      -
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created.

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
      -
    • VPC: The value is the same as that of the cluster and cannot be changed.

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks.

      You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      -

      Ensure that the DNS server in the subnet can resolve the OBS domain name. Otherwise, nodes cannot be created.

      -

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    -

  4. Advanced ECS Settings (optional): Click to show advanced ECS settings.

    • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
      • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
      -

      Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

      -
    • Resource Tags: By adding tags to resources, you can classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

      -
    • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

      -
    • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
    -

  5. Advanced Kubernetes Settings (optional): Click to show advanced Kubernetes settings.

    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Taints: This field is left blank by default. Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
      -
      -
    • K8S Labels: Labels are key/value pairs that are attached to objects, such as pods. Labels are used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. For more information, see Labels and Selectors.
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  6. Click Next: Confirm to confirm the configured service parameters and specifications.
  7. Click Submit.

    It takes about 6 to 10 minutes to create a node pool. You can click Back to Node Pool List to perform other operations on the node pool or click Go to Node Pool Events to view the node pool details. If the status of the node pool is Normal, the node pool is successfully created.

    -

-
-

Procedure - for CCE Turbo Clusters

  1. Log in to the CCE console.
  2. Click the cluster name to open its details page, choose Nodes on the left, and click the Node Pool tab on the right.
  3. In the upper right corner of the page, click Create Node Pool.
  4. Configure computing parameters.

    • AZ: An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      Set an AZ based on your requirements. After a node pool is created, AZ cannot be modified. Exercise caution when selecting an AZ for the node pool.

      -

      To enhance workload reliability, you are advised to select Random AZ, allowing nodes to be randomly and evenly distributed among different AZs.

      -
    • Container Runtime: runc or kata.

      For details about common containers and secure containers, see Secure Containers and Common Containers.

      -
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. In certain regions, only OSs are displayed and options Public image and Private image are unavailable.
      • Public image: Select an OS for the node.
      • Private image (OBT): If no private image is available, click Creating a Private Image to create one. This function is available only for clusters of v1.15 or later.
      -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    -

  5. Configure storage parameters.

    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 50 GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The data disk space size is determined by your service requirements. For details, see Data Disk Space Allocation.

      If the cluster version is v1.13.10-r0 or later and the node type is Disk-intensive, data disks can be EVS disks or local disks.

      -

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • Data disk space allocation: Click Expand and select Allocate Disk Space to customize the data disk space usage.

        You can customize the resource proportion for the container runtime and kubelet in the data disk. By default, 90% of the space is allocated to containers, and the remaining space is allocated to the kubelet component.

        -

        You can also define the maximum space that can be occupied by a single container. The default value is 10 GB.

        -
      • Adding data disks: The node must have at least one data disk, and data disks can be added. Click Add Data Disk. Click Expand to attach the new data disk to the specified directory.

        Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

        -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
      -
      -
    -

  6. Configure networking parameters.

    • VPC: The value is the same as that of the cluster and cannot be changed.

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks.

      You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      -

      Ensure that the DNS server in the subnet can resolve the OBS domain name. Otherwise, nodes cannot be created.

      -

      This parameter is displayed only for clusters of v1.13.10-r0 and later.

      -
    -

  7. Configure advanced settings.

    • Kubernetes Label: Kubernetes provides labels for you to run kubectl commands to filter node resources by label.
    • Resource Tags: Resource tags can be added to classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag. A maximum of 5 tags can be added.

      -
    • Taints: Taints allow a node to repel a set of pods and work with tolerations to ensure that pods are not scheduled onto inappropriate nodes. For details, see Configuring Node Scheduling (Tainting).
    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. It is commonly used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. It is commonly used to modify Docker parameters.

      -
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  8. Click Next: Confirm.
  9. Click Submit.
-
-

Viewing Node Pools in a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the node pool list, select a cluster. All node pools in the cluster will be displayed. You can view the node type, node specifications, autoscaler status, and OS of each node pool.

    • A default node pool DefaultPool is automatically created in each cluster. The default node pool cannot be edited, deleted, or migrated. All nodes created during and after cluster creation are displayed in the default node pool.
    • To display a list of nodes in DefaultPool, click the Nodes subcard in the DefaultPool card.
    -
    -

  3. To filter node pools by autoscaler status, select the autoscaler status in the upper right corner of the node pool list.
  4. In the node pool list, click a node pool name. On the node pool details page, view the basic information, advanced ECS settings, advanced Kubernetes settings, and node list of the node pool.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0013.html b/docs/cce/umn/cce_01_0013.html deleted file mode 100644 index 05431118..00000000 --- a/docs/cce/umn/cce_01_0013.html +++ /dev/null @@ -1,34 +0,0 @@ - - -

Managing Pods

-

Scenario

A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates an application's container (or, in some cases, multiple containers), storage resources, a unique network identity (IP address), as well as options that govern how the container(s) should run. A pod represents a single instance of an application in Kubernetes, which might consist of either a single container or a small number of containers that are tightly coupled and that share resources.

-

Pods in a Kubernetes cluster can be used in either of the following ways:

-
  • Pods that run a single container. The "one-container-per-pod" model is the most common Kubernetes use case. In this case, a pod functions as a wrapper around a single container, and Kubernetes manages the pods rather than the containers directly.
  • Pods that run multiple containers that need to work together. A pod might encapsulate an application composed of multiple co-located containers that are tightly coupled and need to share resources. The possible scenarios are as follows:
    • Content management systems, file and data loaders, local cache managers, etc;
    • Log and checkpoint backup, compression, rotation, snapshotting, etc;
    • Data change watchers, log tailers, logging and monitoring adapters, event publishers, etc;
    • Proxies, bridges, adapters, etc;
    • Controllers, managers, configurators, and updaters
    -
-

You can easily manage pods on CCE, such as editing YAML files and monitoring pods.

-
-

Editing a YAML File

To edit and download the YAML file of a pod online, do as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Edit YAML at the same row as the target pod. In the Edit YAML dialog box displayed, modify the YAML file of the pod.
  3. Click Edit and then OK to save the changes.

    If a pod is created by another workload, its YAML file cannot be modified individually on the Pods page.

    -
    -

  4. (Optional) In the Edit YAML window, click Download to download the YAML file.
-
-

Monitoring Pods

On the CCE console, you can view the CPU and memory usage, upstream and downstream rates, and disk read/write rates of a workload pod to determine the required resource specifications.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Monitoring at the same row as the target pod to view the CPU and memory usage, upstream and downstream rates, and disk read/write rates of the pod.

    You cannot view the monitoring data of a pod that is not running.

    -
    -

-
-

Deleting a Pod

If a pod is no longer needed, you can delete it. Deleted pods cannot be recovered. Exercise caution when performing this operation.

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Pods.
  2. Click Delete at the same row as the pod to be deleted.

    Read the system prompts carefully. A pod cannot be restored after it is deleted. Exercise caution when performing this operation.

    -

  3. Click Yes to delete the pod.

    • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
    • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.
    -
    -

-
- -
-
- -
- diff --git a/docs/cce/umn/cce_01_0014.html b/docs/cce/umn/cce_01_0014.html deleted file mode 100644 index 160930bf..00000000 --- a/docs/cce/umn/cce_01_0014.html +++ /dev/null @@ -1,711 +0,0 @@ - - -

LoadBalancer

-

Scenario

A workload can be accessed from public networks through a load balancer, which is more secure and reliable than EIP.

-

The LoadBalancer access address is in the format of <IP address of public network load balancer>:<access port>, for example, 10.117.117.117:80.

-

In this access mode, requests are transmitted through an ELB load balancer to a node and then forwarded to the destination pod through the Service.

-
Figure 1 LoadBalancer
-
-

Notes and Constraints

  • LoadBalancer Services allow workloads to be accessed from public networks through ELB. This access mode has the following restrictions:
    • It is recommended that automatically created load balancers not be used by other resources. Otherwise, these load balancers cannot be completely deleted, causing residual resources.
    • Do not change the listener name for the load balancer in clusters of v1.15 and earlier. Otherwise, the load balancer cannot be accessed.
    -
  • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
  • If the service affinity is set to the node level (that is, externalTrafficPolicy is set to Local), the cluster may fail to access the Service by using the ELB address. For details, see Why a Cluster Fails to Access Services by Using the ELB Address.
  • CCE Turbo clusters support only cluster-level service affinity.
  • Dedicated ELB load balancers can be used only in clusters of v1.17 and later.
  • The specifications of dedicated load balancers must use TCP/UDP (network load balancing) and support private networks. If the Service needs to support HTTP, the specifications of dedicated load balancers must use HTTP (application load balancing) in addition to TCP/UDP (network load balancing).
  • If you create a LoadBalancer Service on the CCE console, a random node port is automatically generated. If you use kubectl to create a LoadBalancer Service, a random node port is generated unless you specify one.
  • In a CCE cluster, if the cluster-level affinity is configured for a LoadBalancer Service, requests are distributed to the node ports of each node using SNAT when entering the cluster. The number of node ports cannot exceed the number of available node ports on the node. If the Service affinity is at the node level (local), there is no such constraint. In a CCE Turbo cluster, this constraint applies to shared ELB load balancers, but not dedicated ones. You are advised to use dedicated ELB load balancers in CCE Turbo clusters.
  • When the cluster service forwarding (proxy) mode is IPVS, the node IP cannot be configured as the external IP of the Service. Otherwise, the node is unavailable.
  • Dedicated load balancers are available only in certain regions.
-
-

Adding a Service When Creating a Workload

You can set the Service when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select LoadBalancer (ELB).
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Service Affinity:
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    -

    ELB Configuration

    -
    • Elastic Load Balancer: A load balancer automatically distributes Internet access traffic to multiple nodes where the workload is located.
      • Shared: Shared load balancers provide domain name- and URL-based route balancing. Resources are shared among load balancers, and the performance of a load balancer is affected by other load balancers.
      • Dedicated: Resources are exclusively used by a load balancer, and the performance of a load balancer is not affected by other load balancers. IPv6 is supported.
        • AZ: Dedicated load balancers can be deployed across AZs to provide higher reliability.
        • Subnet: subnet where the backend server of the load balancer is located.

          Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer subnet CIDR block.

          -
        • Specifications: Specifications determine the types of listeners that can be added to a load balancer. Select specifications that best fit your needs.
        -
      -

      You can create public network or private network load balancers.

      -
      • Public network: You can select an existing public network load balancer or have the system automatically create a new one.
      • Private network: You can select an existing private network load balancer or have the system automatically create a new private network load balancer.
      -
      The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).
      • Enterprise Project: Select an enterprise project in which the load balancer is created.
      • Specifications: This field is displayed only when you select Public network and Automatically created for Elastic Load Balancer. You can click to modify the name, specifications, billing mode, and bandwidth of the load balancer.
      • Algorithm Type: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash.
        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing capability. This algorithm is often used for persistent connections, such as database connections.
        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This enables requests from different clients to be distributed in load balancing mode and ensures that requests from the same client are forwarded to the same server. This algorithm applies to TCP connections without cookies.
        -
        -
      • Sticky Session: This function is disabled by default. You can select Based on source IP address. Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.
      • Health Check: This function is enabled by default. Enabling it will perform health checks on your load balancer. By default, the Service ports (Node Port and container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a Service port (name: cce-healthz; protocol: TCP) will be added for the Service.
      -
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port defined in the container image and on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the load balancer's IP address. The workload can be accessed at <Load balancer's IP address>:<Access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration is complete, click OK.
  3. On the workload creation page, click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. After the workload is successfully created, choose Workloads > Deployments or Workloads > StatefulSets on the CCE console. Click the name of the workload to view its details. On the workload details page, click the Services tab and obtain the access address.

    -

  5. Click the access address.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network.
  2. On the Services tab page, click Create Service.

    The parameters are the same as those in Adding a Service When Creating a Workload.

    -

  3. Click Create.
-
-

Using kubectl to Create a Service (Using an Existing Load Balancer)

You can set the access type when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx 
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    -

    vi nginx-elb-svc.yaml

    -

    Before enabling sticky session, ensure that the following conditions are met:

    -
    • The workload protocol is TCP.
    • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Workload-Node Anti-Affinity.
    -
    -
    apiVersion: v1 
    -kind: Service 
    -metadata: 
    -  annotations:
    -    kubernetes.io/elb.id: 3c7caa5a-a641-4bff-801a-feace27424b6          # Load balancer ID. Replace it with the actual value.
    -    kubernetes.io/elb.class: performance                               # Load balancer type
    -  name: nginx 
    -spec: 
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type as required.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
    -

    Default value: union

    -

    kubernetes.io/elb.session-affinity-mode

    -

    No

    -

    String

    -

    Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

    -
    • Disabling sticky session: Do not set this parameter.
    • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    This parameter specifies the sticky session timeout.

    -

    kubernetes.io/elb.id

    -

    Yes

    -

    String

    -

    This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

    -

    Mandatory when an existing load balancer is to be associated.

    -

    Obtaining the load balancer ID:

    -

    On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0.
    -

    kubernetes.io/elb.lb-algorithm

    -

    No

    -

    String

    -

    This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

    -

    Options:

    -
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    -

    When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

    -

    kubernetes.io/elb.health-check-flag

    -

    No

    -

    String

    -

    Whether to enable the ELB health check.

    -

    Enabled by default.

    -
    • Enabling health check: Leave blank this parameter or set it to on.
    • Disabling health check: Set this parameter to off.
    -

    kubernetes.io/elb.health-check-option

    -

    No

    -

    Table 3 Object

    -

    ELB health check configuration items.

    -

    port

    -

    Yes

    -

    Integer

    -

    Access port that is registered on the load balancer and mapped to the cluster-internal IP address.

    -

    targetPort

    -

    Yes

    -

    String

    -

    Container port set on the CCE console.

    -
    -
    - -
    - - - - - - - - - - - -
    Table 2 Data structure of the elb.session-affinity-option field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    persistence_timeout

    -

    Yes

    -

    String

    -

    Sticky session timeout, in minutes. This parameter is valid only when elb.session-affinity-mode is set to SOURCE_IP.

    -

    Value range: 1 to 60. Default value: 60

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 Data structure description of the elb.health-check-option field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    delay

    -

    No

    -

    String

    -

    Initial waiting time (in seconds) for starting the health check.

    -

    Value range: 1 to 50. Default value: 5

    -

    timeout

    -

    No

    -

    String

    -

    Health check timeout, in seconds.

    -

    Value range: 1 to 50. Default value: 10

    -

    max_retries

    -

    No

    -

    String

    -

    Maximum number of health check retries.

    -

    Value range: 1 to 10. Default value: 3

    -

    protocol

    -

    No

    -

    String

    -

    Health check protocol.

    -

    Default value: protocol of the associated Service

    -

    Value options: TCP, UDP_CONNECT, or HTTP

    -

    path

    -

    No

    -

    String

    -

    Health check URL. This parameter needs to be configured when the protocol is HTTP.

    -

    Default value: /

    -

    The value contains 1 to 10,000 characters.

    -
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get pod

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-c1xwh   1/1       Running            0          6s
    -

  4. Create a Service.

    kubectl create -f nginx-elb-svc.yaml

    -

    If information similar to the following is displayed, the Service has been created.

    -
    service "nginx" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

    -
    NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
    -kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
    -nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
    -

  5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

    The Nginx is accessible.

    -
    Figure 2 Accessing Nginx through the LoadBalancer Service
    -

-
-

Using kubectl to Create a Service (Automatically Creating a Load Balancer)

You can add a Service when creating a workload using kubectl. This section uses an Nginx workload as an example to describe how to add a LoadBalancer Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-elb-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-elb-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx 
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    -

    vi nginx-elb-svc.yaml

    -

    Before enabling sticky session, ensure that the following conditions are met:

    -
    • The workload protocol is TCP.
    • Anti-affinity has been configured between pods of the workload. That is, all pods of the workload are deployed on different nodes. For details, see Workload-Node Anti-Affinity.
    -
    -
    Example of a Service using a shared, public network load balancer:
    apiVersion: v1 
    -kind: Service 
    -metadata: 
    -  annotations:   
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/elb.autocreate: 
    -        '{
    -            "type": "public",
    -            "bandwidth_name": "cce-bandwidth-1551163379627",
    -            "bandwidth_chargemode":"traffic",
    -            "bandwidth_size": 5,
    -            "bandwidth_sharetype": "PER",
    -            "eip_type": "5_bgp",
    -            "name": "james"
    -        }'
    -  labels: 
    -    app: nginx 
    -  name: nginx 
    -spec: 
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    -
    -

    Example of a Service using a dedicated, public network load balancer:

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  name: nginx
    -  labels:
    -    app: nginx
    -  namespace: default
    -  annotations:
    -    kubernetes.io/elb.class: performance
    -    kubernetes.io/elb.autocreate: 
    -        '{
    -            "type": "public",
    -            "bandwidth_name": "cce-bandwidth-1626694478577",
    -            "bandwidth_chargemode": "traffic",
    -            "bandwidth_size": 5,
    -            "bandwidth_sharetype": "PER",
    -            "eip_type": "5_bgp",
    -            "available_zone": [
    -                "eu-de-01"
    -            ],
    -            "l4_flavor_name": "L4_flavor.elb.s1.small"
    -        }'
    -spec:
    -  selector:
    -    app: nginx
    -  ports:
    -  - name: cce-service-0
    -    targetPort: 80
    -    nodePort: 0
    -    port: 80
    -    protocol: TCP
    -  type: LoadBalancer
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type as required.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer, which can be used only in clusters of v1.17 and later.
    -

    Default value: union

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    This parameter indicates the ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0.
    -

    kubernetes.io/elb.enterpriseID

    -

    No

    -

    String

    -

    Clusters of v1.15 and later versions support this field. In clusters earlier than v1.15, load balancers are created in the default project by default.

    -

    This parameter indicates the ID of the enterprise project in which the ELB load balancer will be created.

    -

    If this parameter is not specified or is set to 0, resources will be bound to the default enterprise project.

    -

    How to obtain:

    -

    Log in to the management console and choose Enterprise > Project Management on the top menu bar. In the list displayed, click the name of the target enterprise project, and copy the ID on the enterprise project details page.

    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    Sticky session timeout.

    -

    kubernetes.io/elb.autocreate

    -

    Yes

    -

    elb.autocreate object

    -

    Whether to automatically create a load balancer associated with the Service.

    -

    Example:

    -
    • Automatically created public network load balancer:

      {"type":"public","bandwidth_name":"cce-bandwidth-1551163379627","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}

      -
    • Automatically created private network load balancer:

      {"type":"inner","name":"A-location-d-test"}

      -
    -

    kubernetes.io/elb.lb-algorithm

    -

    No

    -

    String

    -

    This parameter indicates the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

    -

    Options:

    -
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    -

    When the value is SOURCE_IP, the weights of backend servers in the server group are invalid.

    -

    kubernetes.io/elb.health-check-flag

    -

    No

    -

    String

    -

    Whether to enable the ELB health check.

    -

    Disabled by default.

    -
    • Enabling health check: Leave blank this parameter or set it to on.
    • Disabling health check: Set this parameter to off.
    -

    kubernetes.io/elb.health-check-option

    -

    No

    -

    Table 3 Object

    -

    ELB health check configuration items.

    -

    kubernetes.io/elb.session-affinity-mode

    -

    No

    -

    String

    -

    Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.

    -
    • Disabling sticky session: Do not set this parameter.
    • Enabling sticky session: Set this parameter to SOURCE_IP, indicating that the sticky session is based on the source IP address.
    -

    kubernetes.io/elb.session-affinity-option

    -

    No

    -

    Table 2 Object

    -

    Sticky session timeout.

    -

    kubernetes.io/hws-hostNetwork

    -

    No

    -

    String

    -

    This parameter indicates whether the workload Services use the host network. Setting this parameter to true will enable the load balancer to forward requests to the host network.

    -

    The host network is not used by default. The value can be true or false.

    -

    externalTrafficPolicy

    -

    No

    -

    String

    -

    If sticky session is enabled, add this parameter so that requests are transferred to a fixed node. If a LoadBalancer Service with this parameter set to Local is created, a client can access the target backend only if the client is installed on the same node as the backend.

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 5 Data structure of the elb.autocreate field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    name

    -

    No

    -

    String

    -

    Name of the automatically created load balancer.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    Default name: cce-lb+service.UID

    -

    type

    -

    No

    -

    String

    -

    Network type of the load balancer.

    -
    • public: public network load balancer
    • inner: private network load balancer
    -

    Default value: inner

    -

    bandwidth_name

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth name. The default value is cce-bandwidth-******.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    bandwidth_chargemode

    -

    No

    -

    String

    -

    Bandwidth billing mode.

    -
    • traffic: billed by traffic
    -

    bandwidth_size

    -

    Yes for public network load balancers

    -

    Integer

    -

    Bandwidth size. The default value is 1 to 2000 Mbit/s. Set this parameter based on the bandwidth range allowed in your region.

    -

    bandwidth_sharetype

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth sharing mode.

    -
    • PER: dedicated bandwidth
    -

    eip_type

    -

    Yes for public network load balancers

    -

    String

    -

    EIP type, which may vary depending on sites. For details, see the type parameter specified when creating an EIP.

    -
    • 5_bgp: dynamic BGP
    • 5_gray: dedicated load balancer
    -

    available_zone

    -

    Yes

    -

    Array of strings

    -

    AZ where the load balancer is located.

    -

    This parameter is available only for dedicated load balancers.

    -

    l4_flavor_name

    -

    Yes

    -

    String

    -

    Flavor name of the layer-4 load balancer.

    -

    This parameter is available only for dedicated load balancers.

    -

    l7_flavor_name

    -

    No

    -

    String

    -

    Flavor name of the layer-7 load balancer.

    -

    This parameter is available only for dedicated load balancers.

    -

    elb_virsubnet_ids

    -

    No

    -

    Array of strings

    -

    Subnet where the backend server of the load balancer is located. If this parameter is left blank, the default cluster subnet is used.

    -

    Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

    -

    This parameter is available only for dedicated load balancers.

    -

    Example:

    -
    "elb_virsubnet_ids": [
    -   "14567f27-8ae4-42b8-ae47-9f847a4690dd"
    - ]
    -
    -
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload is being created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-c1xwh   1/1       Running            0          6s
    -

  4. Create a Service.

    kubectl create -f nginx-elb-svc.yaml

    -

    If information similar to the following is displayed, the Service has been created.

    -
    service "nginx" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the access type has been set successfully, and the workload is accessible.

    -
    NAME         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
    -kubernetes   ClusterIP      10.247.0.1       <none>        443/TCP        3d
    -nginx        LoadBalancer   10.247.130.196   10.78.42.242   80:31540/TCP   51s
    -

  5. Enter the URL in the address box of the browser, for example, 10.78.42.242:80. 10.78.42.242 indicates the IP address of the load balancer, and 80 indicates the access port displayed on the CCE console.

    The Nginx is accessible.

    -
    Figure 3 Accessing Nginx through the LoadBalancer Service
    -

-
-

Why a Cluster Fails to Access Services by Using the ELB Address

If the service affinity of a LoadBalancer Service is set to the node level, that is, the value of externalTrafficPolicy is Local, the ELB address may fail to be accessed from the cluster (specifically, nodes or containers).

-

This is because when the LoadBalancer Service is created, kube-proxy adds the ELB access address (external-ip) to iptables or IPVS. When the ELB address is accessed from the cluster, the ELB load balancer is not used. Instead, kube-proxy directly forwards the access request. The case depends on which container network model and service forwarding mode you use.

-

The following methods can be used to solve this problem:

-
  • (Recommended) In the cluster, use the ClusterIP Service or service domain name for access.
  • Set externalTrafficPolicy of the Service to Cluster, which means cluster-level service affinity. Note that this affects source address persistence.
    apiVersion: v1 
    -kind: Service
    -metadata: 
    -  annotations:   
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/elb.autocreate: '{"type":"public","bandwidth_name":"cce-bandwidth","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'
    -  labels: 
    -    app: nginx 
    -  name: nginx 
    -spec: 
    -  externalTrafficPolicy: Cluster
    -  ports: 
    -  - name: service0 
    -    port: 80
    -    protocol: TCP 
    -    targetPort: 80
    -  selector: 
    -    app: nginx 
    -  type: LoadBalancer
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0016.html b/docs/cce/umn/cce_01_0016.html deleted file mode 100644 index 737f296c..00000000 --- a/docs/cce/umn/cce_01_0016.html +++ /dev/null @@ -1,86 +0,0 @@ - - -

Using a Secret

-

The following secrets are used by the CCE system. Do not perform any operations on them.

-
  • Do not operate secrets under kube-system.
  • Do not operate default-secret and paas.elb in any of the namespaces. The default-secret is used to pull the private image of SWR, and the paas.elb is used to connect the service in the namespace to the ELB service.
-
- -

The following example shows how to use a secret.

-
apiVersion: v1
-kind: Secret
-metadata:
-  name: mysecret
-type: Opaque
-data:
-  username: ****** #The value must be Base64-encoded.
-  password: ******  #The value must be encoded using Base64.
-

When a secret is used in a pod, the pod and secret must be in the same cluster and namespace.

-
-

Configuring the Data Volume of a Pod

A secret can be used as a file in a pod. As shown in the following example, the username and password of the mysecret secret are saved in the /etc/foo directory as files.
apiVersion: v1
-kind: Pod
-metadata:
-  name: mypod
-spec:
-  containers:
-  - name: mypod
-    image: redis
-    volumeMounts:
-    - name: foo
-      mountPath: "/etc/foo"
-      readOnly: true
-  volumes:
-  - name: foo
-    secret:
-      secretName: mysecret
-
-
In addition, you can specify the directory and permission to access a secret. The username is stored in the /etc/foo/my-group/my-username directory of the container.
apiVersion: v1
-kind: Pod
-metadata:
-  name: mypod
-spec:
-  containers:
-  - name: mypod
-    image: redis
-    volumeMounts:
-    - name: foo
-      mountPath: "/etc/foo"
-  volumes:
-  - name: foo
-    secret:
-      secretName: mysecret
-      items:
-      - key: username
-        path: my-group/my-username
-        mode: 511
-
-

To mount a secret to a data volume, you can also perform operations on the CCE console. When creating a workload, set advanced settings for the container, choose Data Storage > Local Volume, click Add Local Volume, and select Secret. For details, see Secret.

-
-

Setting Environment Variables of a Pod

A secret can be used as an environment variable of a pod. As shown in the following example, the username and password of the mysecret secret are defined as an environment variable of the pod.
apiVersion: v1
-kind: Pod
-metadata:
-  name: secret-env-pod
-spec:
-  containers:
-  - name: mycontainer
-    image: redis
-    env:
-      - name: SECRET_USERNAME
-        valueFrom:
-          secretKeyRef:
-            name: mysecret
-            key: username
-      - name: SECRET_PASSWORD
-        valueFrom:
-          secretKeyRef:
-            name: mysecret
-            key: password
-  restartPolicy: Never
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0018.html b/docs/cce/umn/cce_01_0018.html deleted file mode 100644 index a5f57dc6..00000000 --- a/docs/cce/umn/cce_01_0018.html +++ /dev/null @@ -1,217 +0,0 @@ - - -

Container Logs

-

Scenario

CCE allows you to configure policies for collecting, managing, and analyzing workload logs periodically to prevent logs from being over-sized.

-

CCE works with AOM to collect workload logs. When a node is created, the ICAgent (the DaemonSet named icagent in the kube-system namespace of the cluster) of AOM is installed by default. After the ICAgent collects workload logs and reports them to AOM, you can view workload logs on the CCE or AOM console.

-
  • By default, the ICAgent collects the standard outputs of containers. You do not need to perform any configuration.
  • You can also configure the path for storing container logs when creating a workload so that the ICAgent collects logs from this path.
    You can select either of the following modes for container logs:
    • HostPath: The host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
    • EmptyDir: The temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
    -
    -
-
-

Precautions

The ICAgent only collects *.log, *.trace, and *.out text log files.

-
-

Setting the Path for Storing Container Logs

  1. When creating a workload on the CCE console, add a container and expand Log Policies.
  2. In the Log Policies area, click Add Log Policy. Configure parameters in the log policy. The following uses Nginx as an example.

    Figure 1 Adding a log policy
    -

  3. Set Storage Type to Host Path or Container Path.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring log policies

    Parameter

    -

    Description

    -

    Storage Type

    -
    • Host Path: In HostPath mode, the host path is mounted to the specified container path (mount path). In the node host path, you can view the container logs output into the mount path.
    • Container Path: In EmptyDir mode, the temporary path of the node is mounted to the specified path (mount path). Log data that exists in the temporary path but is not reported by the collector to AOM will disappear after the pod is deleted.
    -

    Add Container Path

    -

    *Host Path

    -

    Enter the host path, for example, /var/paas/sys/log/nginx.

    -

    Container Path

    -
    Container path (for example, /tmp) to which the storage resources will be mounted.
    NOTICE:
    • Do not mount storage to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
    • When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
    • AOM collects only the first 20 log files that have been modified recently. It collects files from 2 levels of subdirectories by default.
    • AOM only collects .log, .trace, and .out text log files in mounting paths.
    • For details about how to set permissions for mount points in a container, see Configure a Security Context for a Pod or Container.
    -
    -
    -

    Extended Host Path

    -

    This parameter is mandatory only if Storage Type is set to Host Path.

    -

    Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

    -

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    -
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.
    -

    Log Dumping

    -

    Log dump refers to rolling log files on a local host.

    -
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    -
    NOTE:
    • Log file rolling of AOM is implemented in the copytruncate mode. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
    • Currently, mainstream log components such as Log4j and Logback support log file rolling. If your log files already support rolling, skip the configuration. Otherwise, conflicts may occur.
    • You are advised to configure log file rolling for your own services to flexibly control the size and number of rolled files.
    -
    -

    Multi-line Log

    -

    Some program logs (for example, Java program logs) contain a log that occupies multiple lines. By default, the log collection system collects logs by line. If you want to display logs as a single log message in the log collection system, you can enable the multi-line log function and use the log time or regular pattern mode. When a line of log message matches the preset time format or regular expression, it is considered as the start of a log message and the next line starts with this line of log message is considered as the end identifier of the log message.

    -

    Split Mode

    -
    • Log Time: Enter a time wildcard. For example, if the time in the log is 2017-01-01 23:59:59, the wildcard is YYYY-MM-DD hh:mm:ss.
    • Regular Pattern: Enter a regular expression.
    -
    -
    -

  4. Click OK.
-
-

Using kubectl to Set the Container Log Storage Path

You can set the container log storage path by defining a YAML file.

-

As shown in the following figure, EmptyDir is mounted a temporary path to /var/log/nginx. In this way, the ICAgent collects logs in /var/log/nginx. The policy field is customized by CCE and allows the ICAgent to identify and collect logs.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: testlog
-  namespace: default
-spec:
-  selector:
-    matchLabels:
-      app: testlog
-  template:
-    replicas: 1
-    metadata:
-      labels:
-        app: testlog
-    spec:
-      containers:
-        - image: 'nginx:alpine'
-          name: container-0
-          resources:
-            requests:
-              cpu: 250m
-              memory: 512Mi
-            limits:
-              cpu: 250m
-              memory: 512Mi
-          volumeMounts:
-            - name: vol-log
-              mountPath: /var/log/nginx
-              policy:
-                logs:
-                  rotate: ''
-      volumes:
-        - emptyDir: {}
-          name: vol-log
-      imagePullSecrets:
-        - name: default-secret
-

The following shows how to use the HostPath mode. Compared with the EmptyDir mode, the type of volume is changed to hostPath, and the path on the host needs to be configured for this hostPath volume. In the following example, /tmp/log on the host is mounted to /var/log/nginx. In this way, the ICAgent can collects logs in /var/log/nginx, without deleting the logs from /tmp/log.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: testlog
-  namespace: default
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: testlog
-  template:
-    metadata:
-      labels:
-        app: testlog
-    spec:
-      containers:
-        - image: 'nginx:alpine'
-          name: container-0
-          resources:
-            requests:
-              cpu: 250m
-              memory: 512Mi
-            limits:
-              cpu: 250m
-              memory: 512Mi
-          volumeMounts:
-            - name: vol-log
-              mountPath: /var/log/nginx
-              readOnly: false
-              extendPathMode: PodUID
-              policy:
-                logs:
-                  rotate: Hourly
-                  annotations:
-                    
-                    format: ''
-      volumes:
-        - hostPath:
-            path: /tmp/log
-          name: vol-log
-      imagePullSecrets:
-        - name: default-secret
- -
- - - - - - - - - - - - - - - - - -
Table 2 Parameter description

Parameter

-

Explanation

-

Description

-

extendPathMode

-

Extended host path

-

Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

-

A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

-
  • None: No extended path is configured.
  • PodUID: ID of a pod.
  • PodName: name of a pod.
  • PodUID/ContainerName: ID of a pod or name of a container.
  • PodName/ContainerName: name of a pod or container.
-

policy.logs.rotate

-

Log dumping

-

Log dump refers to rolling log files on a local host.

-
  • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
  • Disabled: AOM does not dump log files.
-
NOTE:
  • Log file rolling of AOM is implemented in the copytruncate mode. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
  • Currently, mainstream log components such as Log4j and Logback support log file rolling. If your log files already support rolling, skip the configuration. Otherwise, conflicts may occur.
  • You are advised to configure log file rolling for your own services to flexibly control the size and number of rolled files.
-
-

policy.logs.annotations.format

-

Multi-line log matching

-

Some program logs (for example, Java program logs) contain a log that occupies multiple lines. By default, the log collection system collects logs by line. If you want to display logs as a single log message in the log collection system, you can enable the multi-line log function and use the log time or regular pattern mode. When a line of log message matches the preset time format or regular expression, it is considered as the start of a log message and the next line starts with this line of log message is considered as the end identifier of the log message.

-

The format is as follows:

-
{
-    "multi": {
-        "mode": "time",
-        "value": "YYYY-MM-DD hh:mm:ss"
-    }
-}
-

multi indicates the multi-line mode.

-
  • time: log time. Enter a time wildcard. For example, if the time in the log is 2017-01-01 23:59:59, the wildcard is YYYY-MM-DD hh:mm:ss.
  • regular: regular pattern. Enter a regular expression.
-
-
-
-

Viewing Logs

After a log collection path is configured and the workload is created, the ICAgent collects log files from the configured path. The collection takes about 1 minute.

-

After the log collection is complete, go to the workload details page and click Logs in the upper right corner to view logs.

-

You can also view logs on the AOM console.

-

You can also run the kubectl logs command to view the standard output of a container.

-
# View logs of a specified pod.
-kubectl logs <pod_name>
-kubectl logs -f <pod_name> # Similar to tail -f
-
-# View logs of a specified container in a specified pod.
-kubectl logs <pod_name> -c <container_name>
-
-kubectl logs pod_name -c container_name -n namespace (one-off query)
-kubectl logs -f <pod_name> -n namespace (real-time query in tail -f mode)
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0019.html b/docs/cce/umn/cce_01_0019.html deleted file mode 100644 index 6567cb8c..00000000 --- a/docs/cce/umn/cce_01_0019.html +++ /dev/null @@ -1,11 +0,0 @@ - - -

Charts (Helm)

-
-
- -
- diff --git a/docs/cce/umn/cce_01_0020.html b/docs/cce/umn/cce_01_0020.html deleted file mode 100644 index 76ddc328..00000000 --- a/docs/cce/umn/cce_01_0020.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Networking

-
- - diff --git a/docs/cce/umn/cce_01_0023.html b/docs/cce/umn/cce_01_0023.html deleted file mode 100644 index 686278bc..00000000 --- a/docs/cce/umn/cce_01_0023.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

kubectl Usage Guide

- -
- -
- diff --git a/docs/cce/umn/cce_01_0025.html b/docs/cce/umn/cce_01_0025.html deleted file mode 100644 index ce0be9fe..00000000 --- a/docs/cce/umn/cce_01_0025.html +++ /dev/null @@ -1,598 +0,0 @@ - - -

CCE Operations Supported by CTS

-

CTS is available only in certain regions.

-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 CCE operations supported by CTS

Operation

-

Resource Type

-

Event Name

-

Creating an agency

-

Cluster

-

createUserAgencies

-

Creating a cluster

-

Cluster

-

createCluster

-

Updating the description of a cluster

-

Cluster

-

updateCluster

-

Upgrading a cluster

-

Cluster

-

clusterUpgrade

-

Deleting a cluster

-

Cluster

-

claimCluster/deleteCluster

-

Downloading a cluster certificate

-

Cluster

-

getClusterCertByUID

-

Binding and unbinding an EIP

-

Cluster

-

operateMasterEIP

-

Waking up a cluster and resetting node management (V2)

-

Cluster

-

operateCluster

-

Hibernating a cluster (V3)

-

Cluster

-

hibernateCluster

-

Waking up a cluster (V3)

-

Cluster

-

awakeCluster

-

Changing the specifications of a cluster

-

Cluster

-

resizeCluster

-

Modifying configurations of a cluster

-

Cluster

-

updateConfiguration

-

Creating a node pool

-

Node pool

-

createNodePool

-

Updating a node pool

-

Node pool

-

updateNodePool

-

Deleting a node pool

-

Node pool

-

claimNodePool

-

Migrating a node pool

-

Node pool

-

migrateNodepool

-

Modifying node pool configurations

-

Node pool

-

updateConfiguration

-

Creating a node

-

Node

-

createNode

-

Deleting all the nodes from a specified cluster

-

Node

-

deleteAllHosts

-

Deleting a single node

-

Node

-

deleteOneHost/claimOneHost

-

Updating the description of a node

-

Node

-

updateNode

-

Creating an add-on instance

-

Add-on instance

-

createAddonInstance

-

Deleting an add-on instance

-

Add-on instance

-

deleteAddonInstance

-

Uploading a chart

-

Chart

-

uploadChart

-

Updating a chart

-

Chart

-

updateChart

-

Deleting a chart

-

Chart

-

deleteChart

-

Creating a release

-

Release

-

createRelease

-

Upgrading a release

-

Release

-

updateRelease

-

Deleting a release

-

Release

-

deleteRelease

-

Creating a ConfigMap

-

Kubernetes resource

-

createConfigmaps

-

Creating a DaemonSet

-

Kubernetes resource

-

createDaemonsets

-

Creating a Deployment

-

Kubernetes resource

-

createDeployments

-

Creating an event

-

Kubernetes resource

-

createEvents

-

Creating an Ingress

-

Kubernetes resource

-

createIngresses

-

Creating a job

-

Kubernetes resource

-

createJobs

-

Creating a namespace

-

Kubernetes resource

-

createNamespaces

-

Creating a node

-

Kubernetes resource

-

createNodes

-

Creating a PersistentVolumeClaim

-

Kubernetes resource

-

createPersistentvolumeclaims

-

Creating a pod

-

Kubernetes resource

-

createPods

-

Creating a replica set

-

Kubernetes resource

-

createReplicasets

-

Creating a resource quota

-

Kubernetes resource

-

createResourcequotas

-

Creating a secret

-

Kubernetes resource

-

createSecrets

-

Creating a service

-

Kubernetes resource

-

createServices

-

Creating a StatefulSet

-

Kubernetes resource

-

createStatefulsets

-

Creating a volume

-

Kubernetes resource

-

createVolumes

-

Deleting a ConfigMap

-

Kubernetes resource

-

deleteConfigmaps

-

Deleting a DaemonSet

-

Kubernetes resource

-

deleteDaemonsets

-

Deleting a Deployment

-

Kubernetes resource

-

deleteDeployments

-

Deleting an event

-

Kubernetes resource

-

deleteEvents

-

Deleting an Ingress

-

Kubernetes resource

-

deleteIngresses

-

Deleting a job

-

Kubernetes resource

-

deleteJobs

-

Deleting a namespace

-

Kubernetes resource

-

deleteNamespaces

-

Deleting a node

-

Kubernetes resource

-

deleteNodes

-

Deleting a Pod

-

Kubernetes resource

-

deletePods

-

Deleting a replica set

-

Kubernetes resource

-

deleteReplicasets

-

Deleting a resource quota

-

Kubernetes resource

-

deleteResourcequotas

-

Deleting a secret

-

Kubernetes resource

-

deleteSecrets

-

Deleting a service

-

Kubernetes resource

-

deleteServices

-

Deleting a StatefulSet

-

Kubernetes resource

-

deleteStatefulsets

-

Deleting volumes

-

Kubernetes resource

-

deleteVolumes

-

Replacing a specified ConfigMap

-

Kubernetes resource

-

updateConfigmaps

-

Replacing a specified DaemonSet

-

Kubernetes resource

-

updateDaemonsets

-

Replacing a specified Deployment

-

Kubernetes resource

-

updateDeployments

-

Replacing a specified event

-

Kubernetes resource

-

updateEvents

-

Replacing a specified ingress

-

Kubernetes resource

-

updateIngresses

-

Replacing a specified job

-

Kubernetes resource

-

updateJobs

-

Replacing a specified namespace

-

Kubernetes resource

-

updateNamespaces

-

Replacing a specified node

-

Kubernetes resource

-

updateNodes

-

Replacing a specified PersistentVolumeClaim

-

Kubernetes resource

-

updatePersistentvolumeclaims

-

Replacing a specified pod

-

Kubernetes resource

-

updatePods

-

Replacing a specified replica set

-

Kubernetes resource

-

updateReplicasets

-

Replacing a specified resource quota

-

Kubernetes resource

-

updateResourcequotas

-

Replacing a specified secret

-

Kubernetes resource

-

updateSecrets

-

Replacing a specified service

-

Kubernetes resource

-

updateServices

-

Replacing a specified StatefulSet

-

Kubernetes resource

-

updateStatefulsets

-

Replacing the specified status

-

Kubernetes resource

-

updateStatus

-

Uploading a chart

-

Kubernetes resource

-

uploadChart

-

Updating a component template

-

Kubernetes resource

-

updateChart

-

Deleting a chart

-

Kubernetes resource

-

deleteChart

-

Creating a template application

-

Kubernetes resource

-

createRelease

-

Updating a template application

-

Kubernetes resource

-

updateRelease

-

Deleting a template application

-

Kubernetes resource

-

deleteRelease

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0026.html b/docs/cce/umn/cce_01_0026.html deleted file mode 100644 index 9b0b379a..00000000 --- a/docs/cce/umn/cce_01_0026.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

Querying CTS Logs

-

Scenario

After you enable CTS, the system starts recording operations on CCE resources. Operation records of the last 7 days can be viewed on the CTS management console.

-
-

Procedure

  1. Log in to the management console.
  2. Click in the upper left corner and select a region.
  3. Choose Service List from the main menu. Choose Management & Deployment > Cloud Trace Service.
  4. In the navigation pane of the CTS console, choose Cloud Trace Service > Trace List.
  5. On the Trace List page, query operation records based on the search criteria. Currently, the trace list supports trace query based on the combination of the following search criteria:

    • Trace Source, Resource Type, and Search By

      Select the search criteria from the drop-down lists. Select CCE from the Trace Source drop-down list.

      -

      If you select Trace name from the Search By drop-down list, specify the trace name.

      -

      If you select Resource ID from the Search By drop-down list, select or enter a specific resource ID.

      -

      If you select Resource name from the Search By drop-down list, select or enter a specific resource name.

      -
    • Operator: Select a specific operator (at user level rather than account level).
    • Trace Status: Set this parameter to any of the following values: All trace statuses, normal, warning, and incident.
    • Time range: You can query traces generated during any time range in the last seven days.
    -

  6. Click on the left of a trace to expand its details, as shown below.

    Figure 1 Expanding trace details
    -

  7. Click View Trace in the Operation column. The trace details are displayed.

    Figure 2 Viewing event details
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0027.html b/docs/cce/umn/cce_01_0027.html deleted file mode 100644 index 5c0cd79e..00000000 --- a/docs/cce/umn/cce_01_0027.html +++ /dev/null @@ -1,31 +0,0 @@ - - -

Clusters

-
- - diff --git a/docs/cce/umn/cce_01_0028.html b/docs/cce/umn/cce_01_0028.html deleted file mode 100644 index 161b419f..00000000 --- a/docs/cce/umn/cce_01_0028.html +++ /dev/null @@ -1,233 +0,0 @@ - - -

Creating a CCE Cluster

-

On the CCE console, you can easily create Kubernetes clusters. Kubernetes can manage container clusters at scale. A cluster manages a group of node resources.

-

In CCE, you can create a CCE cluster to manage VMs as nodes. By using high-performance network models, hybrid clusters provide a multi-scenario, secure, and stable runtime environment for containers.

-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • You can create a maximum of 50 clusters in a single region.
  • After a cluster is created, the following items cannot be changed:
    • Number of master nodes in the cluster.
    • AZ of a master node.
    • Network configuration of the cluster, such as the VPC, subnet, container CIDR block, Service CIDR block, and kube-proxy (forwarding) settings.
    • Network model. For example, change the tunnel network to the VPC network.
    -
-
-

Procedure

  1. Log in to the CCE console. On the Dashboard page, click Create Cluster. Alternatively, choose Resource Management > Clusters in the navigation pane and click Create next to CCE Cluster.
  2. Set cluster parameters by referring to Table 1. Pay attention to the parameters marked with an asterisk (*).

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a cluster

    Parameter

    -

    Description

    -

    Region

    -

    Select a region near you to ensure the lowest latency possible.

    -

    *Cluster Name

    -

    Name of the new cluster, which cannot be changed after the cluster is created.

    -

    A cluster name contains 4 to 128 characters starting with a letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    Version

    -

    Kubernetes community baseline version. The latest version is recommended.

    -

    If a Beta version is available, you can use it for trial. However, it is not recommended for commercial use.

    -

    Management Scale

    -

    Maximum number of worker nodes that can be managed by the master nodes of the current cluster. You can select 50 nodes, 200 nodes, or 1,000 nodes for your cluster, or 2,000 nodes if you are buying a cluster of v1.15.11 or later.

    -

    If you select 1000 nodes, the master nodes of the cluster can manage a maximum of 1000 worker nodes. The configuration fee varies depending on the specifications of master nodes for different management scales.

    -

    Number of master nodes

    -

    3: Three master nodes will be created to make the cluster highly available. If a master node is faulty, the cluster can still be available without affecting service functions. Click Change. In the dialog box displayed, you can configure the following parameters:

    -

    Disaster recovery level

    -
    • AZ: Master nodes are deployed in different AZs for disaster recovery.
    • Fault domain: Master nodes are deployed in different failure domains in the same AZ for disaster recovery. This option is displayed only when the environment supports failure domains.
    • Host computer: Master nodes are deployed on different hosts in the same AZ for disaster recovery.
    • Customize: You can select different locations to deploy different master nodes. In the fault domain mode, master nodes must be in the same AZ.
    -

    1: Only one master node is created in the cluster, which cannot ensure SLA for the cluster. Single-master clusters (non-HA clusters) are not recommended for commercial scenarios. Click Change. In the AZ Settings dialog box, select an AZ for the master node.

    -
    NOTE:
    • You are advised to create multiple master nodes to improve the cluster DR capability in commercial scenarios.
    • The multi-master mode cannot be changed after the cluster is created. A single-master cluster cannot be upgraded to a multi-master cluster. For a single-master cluster, if a master node is faulty, services will be affected.
    • To ensure reliability, the multi-master mode is enabled by default for a cluster with 1,000 or more nodes.
    -
    -

    *VPC

    -

    VPC where the cluster is located. The value cannot be changed after the cluster is created.

    -

    A VPC provides a secure and logically isolated network environment.

    -

    If no VPC is available, click Create a VPC to create a VPC. After the VPC is created, click the refresh icon.

    -

    *Subnet

    -

    Subnet where the node VM runs. The value cannot be changed after the cluster is created.

    -

    A subnet provides dedicated network resources that are logically isolated from other networks for network security.

    -

    If no subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh icon. For details about the relationship between VPCs, subnets, and clusters, see Cluster Overview.

    -

    During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

    -

    The selected subnet cannot be changed after the cluster is created.

    -

    Network Model

    -

    After a cluster is created, the network model cannot be changed. Exercise caution when selecting a network model. For details about how to select a network model, see Overview.

    -

    VPC network

    -

    In this network model, each node occupies one VPC route. The number of VPC routes supported by the current region and the number of container IP addresses that can be allocated to each node (that is, the maximum number of pods that can be created) are displayed on the console.

    -
    • The container network uses VPC routes to integrate with the underlying network. This network model is applicable to performance-intensive scenarios. However, each node occupies one VPC route, and the maximum number of nodes allowed in a cluster depends on the VPC route quota.
    • Each node is assigned a CIDR block of a fixed size. VPC networks are free from packet encapsulation overheads and outperform container tunnel networks. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in the cluster can be directly accessed from outside the cluster.
      NOTE:
      • In the VPC network model, extended CIDR blocks and network policies are not supported.
      • When creating multiple clusters using the VPC network model in one VPC, select a CIDR block for each cluster that does not overlap with the VPC address or other container CIDR blocks.
      -
      -
    -

    Tunnel network

    -
    • The container network is an overlay tunnel network on top of a VPC network and uses the VXLAN technology. This network model is applicable when there is no high requirements on performance.
    • VXLAN encapsulates Ethernet packets as UDP packets for tunnel transmission. Though at some cost of performance, the tunnel encapsulation enables higher interoperability and compatibility with advanced features (such as network policy-based isolation), meeting the requirements of most applications.
    -

    Container Network Segment

    -

    An IP address range that can be allocated to container pods. After the cluster is created, the value cannot be changed.

    -
    • If Automatically select is deselected, enter a CIDR block manually. If the CIDR block you specify conflicts with a subnet CIDR block, the system prompts you to select another CIDR block. The recommended CIDR blocks are 10.0.0.0/8-18, 172.16.0.0/16-18, and 192.168.0.0/16-18.

      If different clusters share a container CIDR block, an IP address conflict will occur and access to applications may fail.

      -
    • If Automatically select is selected, the system automatically assigns a CIDR block that does not conflict with any subnet CIDR block.
    -

    The mask of the container CIDR block must be appropriate. It determines the number of available nodes in a cluster. A too small mask value will cause the cluster to soon fall short of nodes. After the mask is set, the estimated maximum number of containers supported by the current CIDR block will be displayed.

    -

    Service Network Segment

    -

    An IP address range that can be allocated to Kubernetes Services. After the cluster is created, the value cannot be changed. The Service CIDR block cannot conflict with the created route. If they conflict, select another CIDR block.

    -
    • Default: The default CIDR block 10.247.0.0/16 will be used.
    • Custom: Manually set a CIDR block and mask based on service requirements. The mask determines the maximum number of Service IP addresses available in the cluster.
    -

    Authorization Mode

    -

    RBAC is selected by default and cannot be deselected.

    -

    After RBAC is enabled, IAM users access resources in the cluster according to fine-grained permissions policies. For details, see Namespace Permissions (Kubernetes RBAC-based).

    -

    Authentication Mode

    -

    The authentication mechanism controls user permission on resources in a cluster.

    -

    The X.509-based authentication mode is enabled by default. X.509 is a commonly used certificate format.

    -

    If you want to perform permission control on the cluster, select Enhanced authentication. The cluster will identify users based on the header of the request for authentication.

    -

    You need to upload your own CA certificate, client certificate, and client certificate private key (for details about how to create a certificate, see Certificates), and select I have confirmed that the uploaded certificates are valid.

    -
    CAUTION:
    • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
    • The validity period of the client certificate must be longer than five years.
    • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
    -
    -

    Cluster Description

    -

    Optional. Enter the description of the new container cluster.

    -

    Advanced Settings

    -

    Click Advanced Settings to expand the details page. The following functions are supported (unsupported functions in current AZs are hidden):

    -

    Service Forwarding Mode

    -
    • iptables: Traditional kube-proxy uses iptables rules to implement Service load balancing. In this mode, too many iptables rules will be generated when many Services are deployed. In addition, non-incremental updates will cause a latency and even obvious performance issues in the case of heavy service traffic.
    • ipvs: optimized kube-proxy mode to achieve higher throughput and faster speed, ideal for large-sized clusters. This mode supports incremental updates and can keep connections uninterrupted during Service updates.

      In this mode, when the ingress and Service use the same ELB instance, the ingress cannot be accessed from the nodes and containers in the cluster.

      -
    -
    NOTE:
    • ipvs provides better scalability and performance for large clusters.
    • Compared with iptables, ipvs supports more complex load balancing algorithms such as least load first (LLF) and weighted least connections (WLC).
    • ipvs supports server health checking and connection retries.
    -
    -

    CPU Policy

    -

    This parameter is displayed only for clusters of v1.13.10-r0 and later.

    -
    • On: Exclusive CPU cores can be allocated to workload pods. Select On if your workload is sensitive to latency in CPU cache and scheduling.
    • Off: Exclusive CPU cores will not be allocated to workload pods. Select Off if you want a large pool of shareable CPU cores.
    -

    For details about CPU management policies, see Feature Highlight: CPU Manager.

    -

    After CPU Policy is enabled, workloads cannot be started or created on nodes after the node specifications are changed.

    -

    Open EIP

    -

    An independent public IP address that is reachable from public networks. Select an EIP that has not been bound to any node. A cluster's EIP is preset in the cluster's certificate. Do no delete the EIP after the cluster has been created. Otherwise, two-way authentication will fail.

    -
    • Do not configure: The cluster's master node will not have an EIP.
    • Configure now: If no EIP is available for selection, create one.
    -
    -
    -

  3. Click Next: Create Node and set the following parameters.

    • Create Node
      • Create now: Create a node when creating a cluster. Currently, only VM nodes are supported. If a node fails to be created, the cluster will be rolled back.
      • Create later: No node will be created. Only an empty cluster will be created.
      -
    • Current Region: geographic location of the nodes to be created.
    • AZ: Set this parameter based on the site requirements. An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      You are advised to deploy worker nodes in different AZs after the cluster is created to make your workloads more reliable. When creating a cluster, you can deploy nodes only in one AZ.

      -
    • Node Type
      • VM node: A VM node will be created in the cluster.
      -
    • Node Name: Enter a node name. A node name contains 1 to 56 characters starting with a lowercase letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • VPC: A VPC where the current cluster is located. This parameter cannot be changed and is displayed only for clusters of v1.13.10-r0 or later.
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks. You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

      -

      -
    • EIP: an independent public IP address. If the nodes to be created require public network access, select Automatically assign or Use existing.
      An EIP bound to the node allows public network access. EIP bandwidth can be modified at any time. An ECS without a bound EIP cannot access the Internet or be accessed by public networks.
      • Do not use: A node without an EIP cannot be accessed from public networks. It can be used only as a cloud server for deploying services or clusters on a private network.
      • Automatically assign: An EIP with specified configurations is automatically assigned to each node. If the number of EIPs is smaller than the number of nodes, the EIPs are randomly bound to the nodes.

        Configure the EIP specifications, billing factor, bandwidth type, and bandwidth size as required. When creating an ECS, ensure that the elastic IP address quota is sufficient.

        -
      • Use existing: Existing EIPs are assigned to the nodes to be created.
      -

      By default, VPC's SNAT feature is disabled for CCE. If SNAT is enabled, you do not need to use EIPs to access public networks. For details about SNAT, see Custom Policies.

      -
      -
      -
    • Login Mode:
      • Key pair: Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

        -

        When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

        -
        -
      -
    • Advanced ECS Settings (optional): Click to show advanced ECS settings.
      • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
        • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
        -

        Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

        -
      • Resource Tags: By adding tags to resources, you can classify resources.

        You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

        -

        CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

        -
      • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
      • Pre-installation Script: Enter a maximum of 1,000 characters.

        The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

        -
      • Post-installation Script: Enter a maximum of 1,000 characters.

        The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

        -
      • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
      -
    • Advanced Kubernetes Settings: (Optional) Click to show advanced cluster settings.
      • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

        This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

        -
      • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
      -
    • Nodes: The value cannot exceed the management scale you select when configuring cluster parameters. Set this parameter based on service requirements and the remaining quota displayed on the page. Click to view the factors that affect the number of nodes to be added (depending on the factor with the minimum value).
    -

  4. Click Next: Install Add-on, and select the add-ons to be installed in the Install Add-on step.

    System resource add-ons must be installed. Advanced functional add-ons are optional.

    -

    You can also install all add-ons after the cluster is created. To do so, choose Add-ons in the navigation pane of the CCE console and select the add-on you will install. For details, see Add-ons.

    -

  5. Click Next: Confirm. Read the product instructions and select I am aware of the above limitations. Confirm the configured parameters, specifications, and fees.
  6. Click Submit.

    It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details. If the cluster status is Available, the cluster is successfully created.

    -

-
-

Related Operations

-
  • Create a namespace. You can create multiple namespaces in a cluster and organize resources in the cluster into different namespaces. These namespaces serve as logical groups and can be managed separately. For more information about how to create a namespace for a cluster, see Namespaces.
  • Create a workload. Once the cluster is created, you can use an image to create an application that can be accessed from public networks. For details, see Creating a Deployment or Creating a StatefulSet.
  • Click the cluster name to view cluster details. -
    - - - - - - - - - - - - - - - - - - - -
    Table 2 Cluster details

    Tab

    -

    Description

    -

    Cluster Details

    -

    View the details and operating status of the cluster.

    -

    Monitoring

    -

    You can view the CPU and memory allocation rates of all nodes in the cluster (that is, the maximum allocated amount), as well as the CPU usage, memory usage, and specifications of the master node(s).

    -

    Events

    -
    • View cluster events on the Events tab page.
    • Set search criteria. For example, you can set the time segment or enter an event name to view corresponding events.
    -

    Auto Scaling

    -

    You can configure auto scaling to add or reduce worker nodes in a cluster to meet service requirements. For details, see Setting Cluster Auto Scaling.

    -

    Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.

    -

    kubectl

    -

    To access a Kubernetes cluster from a PC, you need to use the Kubernetes command line tool kubectl. For details, see Connecting to a Cluster Using kubectl.

    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0030.html b/docs/cce/umn/cce_01_0030.html deleted file mode 100644 index ece577f5..00000000 --- a/docs/cce/umn/cce_01_0030.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Namespaces

-
- - diff --git a/docs/cce/umn/cce_01_0031.html b/docs/cce/umn/cce_01_0031.html deleted file mode 100644 index 7891cf88..00000000 --- a/docs/cce/umn/cce_01_0031.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Managing a Cluster

-
- - diff --git a/docs/cce/umn/cce_01_0033.html b/docs/cce/umn/cce_01_0033.html deleted file mode 100644 index 6aeabcec..00000000 --- a/docs/cce/umn/cce_01_0033.html +++ /dev/null @@ -1,76 +0,0 @@ - - -

Creating a Node

-

Scenario

A node is a virtual or physical machine that provides computing resources. Sufficient nodes must be available in your project to ensure that operations, such as creating workloads, can be performed.

-
-

Prerequisites

  • At least one cluster is available. For details on how to create a cluster, see Creating a CCE Cluster.
  • A key pair has been created. The key pair will be used for identity authentication upon remote node login.
-
-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • Only KVM nodes can be created. Non-KVM nodes cannot be used after being created.
  • Once a node is created, its AZ cannot be changed.
  • CCE supports GPUs through an add-on named gpu-beta. You need to install this add-on to use GPU-enabled nodes in your cluster.
-
-

Procedure

  1. Log in to the CCE console. Use either of the following methods to add a node:

    • In the navigation pane, choose Resource Management > Nodes. Select the cluster to which the node will belong and click Create Node on the upper part of the node list page.
    • In the navigation pane, choose Resource Management > Clusters. In the card view of the cluster to which you will add nodes, click Create Node.
    -

  2. Select a region and an AZ.

    • Current Region: geographic location of the nodes to be created.
    • AZ: Set this parameter based on the site requirements. An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network.

      You are advised to deploy worker nodes in different AZs after the cluster is created to make your workloads more reliable. When creating a cluster, you can deploy nodes only in one AZ.

      -
    -

  3. Configure node parameters.

    • Node Type
      • VM node: A VM node will be created in the cluster.
      -
    • Node Name: Enter a node name. A node name contains 1 to 56 characters starting with a lowercase letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.
    • Specifications: Select node specifications that best fit your business needs.
      • General-purpose: provides a balance of computing, memory, and network resources. It is a good choice for many applications, such as web servers, workload development, workload testing, and small-scale databases.
      • Memory-optimized: provides higher memory capacity than general-purpose nodes and is suitable for relational databases, NoSQL, and other workloads that are both memory-intensive and data-intensive.
      • GPU-accelerated: provides powerful floating-point computing and is suitable for real-time, highly concurrent massive computing. Graphical processing units (GPUs) of P series are suitable for deep learning, scientific computing, and CAE. GPUs of G series are suitable for 3D animation rendering and CAD. GPU-accelerated nodes can be created only in clusters of v1.11 or later. GPU-accelerated nodes are available only in certain regions.
      • General computing-plus: provides stable performance and exclusive resources to enterprise-class workloads with high and stable computing performance.
      • Disk-intensive: supports local disk storage and provides high network performance. It is designed for workloads requiring high throughput and data switching, such as big data workloads.
      -

      To ensure node stability, CCE automatically reserves some resources to run necessary system components. For details, see Formula for Calculating the Reserved Resources of a Node.

      -
    • OS: Select an OS for the node to be created. -

      Reinstalling the OS or modifying OS configurations could make the node unavailable. Exercise caution when performing these operations.

      -
    • System Disk: Set the system disk space of the worker node. The value ranges from 40GB to 1024 GB. The default value is 40GB.

      By default, system disks support Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD) EVS disks.

      -
      Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
      -
    • Data Disk: Set the data disk space of the worker node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

      If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

      -
      -
      • LVM: If this option is selected, CCE data disks are managed by the Logical Volume Manager (LVM). On this condition, you can adjust the disk space allocation for different resources. This option is selected for the first disk by default and cannot be unselected. You can choose to enable or disable LVM for new data disks.
        • This option is selected by default, indicating that LVM management is enabled.
        • You can deselect the check box to disable LVM management.
          • Disk space of the data disks managed by LVM will be allocated according to the ratio you set.
          • When creating a node in a cluster of v1.13.10 or later, if LVM is not selected for a data disk, follow instructions in Adding a Second Data Disk to a Node in a CCE Cluster to fill in the pre-installation script and format the data disk. Otherwise, the data disk will still be managed by LVM.
          • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
          -
          -
        -
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
        This function is supported only for clusters of v1.13.10 or later in certain regions, and is not displayed for clusters of v1.13.10 or earlier.
        • Encryption is not selected by default.
        • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
        -
        -
      • Add Data Disk: Currently, a maximum of two data disks can be attached to a node. After the node is created, you can go to the ECS console to attach more data disks. This function is available only to clusters of certain versions.
      • Data disk space allocation: Click to specify the resource ratio for Kubernetes Space and User Space. Disk space of the data disks managed by LVM will be allocated according to the ratio you set. This function is available only to clusters of certain versions.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.

          The Docker space cannot be less than 10%, and the space size cannot be less than 60 GB. The kubelet space cannot be less than 10%.

          -

          The Docker space size is determined by your service requirements. For details, see Data Disk Space Allocation.

          -
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.

          Note that the mount path cannot be /, /home/paas, /var/paas, /var/lib, /var/script, /var/log, /mnt/paas, or /opt/cloud, and cannot conflict with the system directories (such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr). Otherwise, the system or node installation will fail.

          -
          -
        -
      -
      If the cluster version is v1.13.10-r0 or later and the node specification is Disk-intensive, the following options are displayed for data disks:
      • EVS: Parameters are the same as those when the node type is not Disk-intensive. For details, see Data Disk above.
      • Local disk: Local disks may break down and do not ensure data reliability. It is recommended that you store service data in EVS disks, which are more reliable than local disks.
        Local disk parameters are as follows:
        • Disk Mode: If the node type is disk-intensive, the supported disk mode is HDD.
        • Read/Write Mode: When multiple local disks exist, you can set the read/write mode. The serial and sequential modes are supported. Sequential indicates that data is read and written in linear mode. When a disk is used up, the next disk is used. Serial indicates that data is read and written in striping mode, allowing multiple local disks to be read and written at the same time.
        • Kubernetes Space: You can specify the ratio of the data disk space for storing Docker and kubelet resources. Docker resources include the Docker working directory, Docker images, and image metadata. kubelet resources include pod configuration files, secrets, and emptyDirs.
        • User Space: You can set the ratio of the disk space that is not allocated to Kubernetes resources and the path to which the user space is mounted.
        -
        -
      -
      • The ratio of disk space allocated to the Kubernetes space and user space must be equal to 100% in total. You can click to refresh the data after you have modified the ratio.
      • By default, disks run in the direct-lvm mode. If data disks are removed, the loop-lvm mode will be used and this will impair system stability.
      -
      -
      -
    • VPC: A VPC where the current cluster is located. This parameter cannot be changed and is displayed only for clusters of v1.13.10-r0 or later.
    • Subnet: A subnet improves network security by providing exclusive network resources that are isolated from other networks. You can select any subnet in the cluster VPC. Cluster nodes can belong to different subnets.

      During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

      -

      -
    -

  4. EIP: an independent public IP address. If the nodes to be created require public network access, select Automatically assign or Use existing.

    An EIP bound to the node allows public network access. EIP bandwidth can be modified at any time. An ECS without a bound EIP cannot access the Internet or be accessed by public networks.
    • Do not use: A node without an EIP cannot be accessed from public networks. It can be used only as a cloud server for deploying services or clusters on a private network.
    • Automatically assign: An EIP with specified configurations is automatically assigned to each node. If the number of EIPs is smaller than the number of nodes, the EIPs are randomly bound to the nodes.

      Configure the EIP specifications, billing factor, bandwidth type, and bandwidth size as required. When creating an ECS, ensure that the elastic IP address quota is sufficient.

      -
    • Use existing: Existing EIPs are assigned to the nodes to be created.
    -

    By default, VPC's SNAT feature is disabled for CCE. If SNAT is enabled, you do not need to use EIPs to access public networks. For details about SNAT, see Custom Policies.

    -
    -
    -

  5. Login Mode:

    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -

      When creating a node using a key pair, IAM users can select only the key pairs created by their own, regardless of whether these users are in the same group. For example, user B cannot use the key pair created by user A to create a node, and the key pair is not displayed in the drop-down list on the CCE console.

      -
      -
    -

  6. Advanced ECS Settings (optional): Click to show advanced ECS settings.

    • ECS Group: An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.
      • Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.
      -

      Select an existing ECS group, or click Create ECS Group to create one. After the ECS group is created, click the refresh button.

      -
    • Resource Tags: By adding tags to resources, you can classify resources.

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag. A maximum of 5 tags can be added.

      -
    • Agency: An agency is created by a tenant administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources. To authorize an ECS or BMS to call cloud services, select Cloud service as the agency type, click Select, and then select ECS BMS.
    • Pre-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The script is usually used to format data disks.

      -
    • Post-installation Script: Enter a maximum of 1,000 characters.

      The script will be executed after Kubernetes software is installed and will not affect the installation. The script is usually used to modify Docker parameters.

      -
    • Subnet IP Address: Select Automatically assign IP address (recommended) or Manually assigning IP addresses.
    -

  7. Advanced Kubernetes Settings: (Optional) Click to show advanced cluster settings.

    • Max Pods: maximum number of pods that can be created on a node, including the system's default pods. If the cluster uses the VPC network model, the maximum value is determined by the number of IP addresses that can be allocated to containers on each node.

      This limit prevents the node from being overloaded by managing too many pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

      -
    • Maximum Data Space per Container: maximum data space that can be used by a container. The value ranges from 10 GB to 500 GB. If the value of this field is larger than the data disk space allocated to Docker resources, the latter will override the value specified here. Typically, 90% of the data disk space is allocated to Docker resources. This parameter is displayed only for clusters of v1.13.10-r0 and later.
    -

  8. Nodes: The value cannot exceed the management scale you select when configuring cluster parameters. Set this parameter based on service requirements and the remaining quota displayed on the page. Click to view the factors that affect the number of nodes to be added (depending on the factor with the minimum value).
  9. Click Next: Confirm. After confirming that the configuration is correct, click Submit.

    The node list page is displayed. If the node status is Available, the node is added successfully. It takes about 6 to 10 minutes to create a node.
    • Do not delete the security groups and related rules automatically configured during cluster creation. Otherwise, the cluster will exhibit unexpected behavior.
    -
    -
    -

  10. Click Back to Node List. The node has been created successfully if it changes to the Available state.

    The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node.

    -

    The calculation formula is as follows:

    -
    • Allocatable CPUs = Total CPUs – Requested CPUs of all pods – Reserved CPUs for other resources
    • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0035.html b/docs/cce/umn/cce_01_0035.html deleted file mode 100644 index 578ca003..00000000 --- a/docs/cce/umn/cce_01_0035.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Node Pools

-
- - diff --git a/docs/cce/umn/cce_01_0036.html b/docs/cce/umn/cce_01_0036.html deleted file mode 100644 index 553e85b1..00000000 --- a/docs/cce/umn/cce_01_0036.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

Stopping a Node

-

Scenario

After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not result in adverse impacts.

-
-

Notes and Constraints

  • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
  • Unexpected risks may occur during node deletion. Back up related data in advance.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Only worker nodes can be stopped.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the node list, click the name of the node to be stopped.
  3. On the node details page displayed, click the node name.

    Figure 1 Nodes details page
    -

  4. In the upper right corner of the ECS details page, click Stop. In the Stop ECS dialog box, click Yes.

    Figure 2 ECS details page
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0042.html b/docs/cce/umn/cce_01_0042.html deleted file mode 100644 index 08ca90ec..00000000 --- a/docs/cce/umn/cce_01_0042.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

Storage (CSI)

-

-
- - diff --git a/docs/cce/umn/cce_01_0044.html b/docs/cce/umn/cce_01_0044.html deleted file mode 100644 index 77545b16..00000000 --- a/docs/cce/umn/cce_01_0044.html +++ /dev/null @@ -1,18 +0,0 @@ - - -

EVS Volumes

-

-
- - diff --git a/docs/cce/umn/cce_01_0045.html b/docs/cce/umn/cce_01_0045.html deleted file mode 100644 index d6f81ab3..00000000 --- a/docs/cce/umn/cce_01_0045.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Configuration Center

-
- - diff --git a/docs/cce/umn/cce_01_0046.html b/docs/cce/umn/cce_01_0046.html deleted file mode 100644 index eef0c613..00000000 --- a/docs/cce/umn/cce_01_0046.html +++ /dev/null @@ -1,29 +0,0 @@ - - -

Workloads

-
- - diff --git a/docs/cce/umn/cce_01_0047.html b/docs/cce/umn/cce_01_0047.html deleted file mode 100644 index 0d933869..00000000 --- a/docs/cce/umn/cce_01_0047.html +++ /dev/null @@ -1,294 +0,0 @@ - - -

Creating a Deployment

-

Scenario

Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.

-
-

Prerequisites

  • Before creating a containerized workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
  • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

    If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the Deployment will fail.

    -
    -
-
-

Using the CCE Console

CCE provides multiple methods for creating a workload. You can use any of the following methods:
  • Use an image in Third-Party Images. You do not need to upload any image before using it.
  • Use an image that you have uploaded to SWR.
  • Use a shared image to create a workload. Specifically, other tenants share an image with you by using the SWR service.
  • Use a YAML file to create a workload. You can click Create YAML on the right of the Configure Advanced Settings page when creating a Deployment. For details about YAML, see Table 3. After the YAML file is written, click Create to create a workload.

    Settings in the YAML file are synchronized with those on the console. You can edit the YAML file on the console to create a workload. For example:

    -
    • If you enter a workload name on the console, the name will automatically appear in the YAML file.
    • If you add an image on the console, the image will be automatically added to the YAML file.
    -

    When you click Create YAML on the right of the console, do not create multiple YAML files in the YAML definition pane displayed. You need to create them one by one. Otherwise, an error will be reported during the creation.

    -
    -
-
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the page displayed, click Create Deployment. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of the workload to be created. The name must be unique.

    -

    Enter 4 to 63 characters starting with a letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    * Instances

    -

    Number of pods in the workload. A workload can have one or more pods. You can set the number of pods. The default value is 2 and can be set to 1.

    -

    Each workload pod consists of the same containers. Configuring multiple pods for a workload ensures that the workload can still run properly even if a pod is faulty. If only one pod is used, a node or pod exception may cause service exceptions.

    -

    * Container runtime

    -

    Select a container runtime, which cannot be changed after creation. This parameter is available only for CCE Turbo clusters.

    -
    • runc: Common containers will run on the node.
    • kata: Secure containers will be used and the workload can run only on the node that uses the secure runtime.
    -

    For details about common containers and secure containers, see Secure Containers and Common Containers.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  2. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.

      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately.

      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      Currently, cloud storage cannot be mounted to secure (Kata) containers in a CCE Turbo cluster.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  3. Click Next: Set Application Access. Then, click Add Service and set the workload access type.

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  4. Click Next: Configure Advanced Settings to configure advanced policies.

    • Upgrade Mode: You can specify the upgrade mode of a Deployment, including Rolling upgrade and In-place upgrade.
      • Rolling upgrade: Old pods are gradually replaced with new ones. During the upgrade, service traffic is evenly distributed to both pods to ensure service continuity.
        • Maximum Number of Unavailable Pods: maximum number of unavailable pods allowed in a rolling upgrade. If the number is equal to the total number of pods, services may be interrupted. Minimum number of alive pods = Total pods – Maximum number of unavailable pods
        -
      • In-place upgrade: Old pods are deleted before new pods are created. Services will be interrupted during an in-place upgrade.
      -
    • Graceful Deletion: A time window can be set for workload deletion and reserved for executing commands in the pre-stop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.
      • Graceful Time Window (s): Set a time window (0–9999s) for pre-stop commands to finish execution before a workload is deleted. The default value is 30s.
      • Scale Order: Choose Prioritize new pods or Prioritize old pods based on service requirements. Prioritize new pods indicates that new pods will be first deleted when a scale-in is triggered.
      -
    • Migration Policy: When the node where a workload's pods are located is unavailable for the specified amount of time, the pods will be rescheduled to other available nodes.
      • Migration Time Window (s): Set a time window for migration. The default value is 300s.
      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  5. After the preceding configurations are complete, click Create. On the page displayed, click Return to Workload List to view the workload status.

    If the workload is in the Running state, it has been successfully created.

    -

    Workload status is not updated in real time. Click in the upper right corner or press F5 to refresh the page.

    -

  6. To access the workload in a browser, go to the workload list on the Deployments page. Copy the corresponding External Access Address and paste it into the address box in the browser.

    • External access addresses are available only if the Deployment access type is set to NodePort and an EIP is assigned to any node in the cluster, or if the Deployment access type is set to LoadBalancer (ELB).
    • If the workload list contains more than 500 records, the Kubernetes pagination mechanism will be used. Specifically, you can only go to the first page or the next page, but cannot go to the previous page. In addition, if resources are divided into discrete pages, the total number of resources displayed is the maximum number of resources that can be queried at a time, not the actual total number of resources.
    -
    -

-
-

Using kubectl

The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name. You can rename it as required.

    vi nginx-deployment.yaml

    -

    The following is an example YAML file. For more information about Deployments, see Kubernetes documentation.

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  strategy:
    -    type: RollingUpdate
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx    # If you use an image in My Images, obtain the image path from SWR.
    -        imagePullPolicy: Always
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    For details about these parameters, see Table 3.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 Deployment YAML parameters

    Parameter

    -

    Description

    -

    Mandatory/Optional

    -

    apiVersion

    -

    API version.

    -
    NOTE:

    Set this parameter based on the cluster version.

    -
    • For clusters of v1.17 or later, the apiVersion format of Deployments is apps/v1.
    • For clusters of v1.15 or earlier, the apiVersion format of Deployments is extensions/v1beta1.
    -
    -

    Mandatory

    -

    kind

    -

    Type of a created object.

    -

    Mandatory

    -

    metadata

    -

    Metadata of a resource object.

    -

    Mandatory

    -

    name

    -

    Name of the Deployment.

    -

    Mandatory

    -

    Spec

    -

    Detailed description of the Deployment.

    -

    Mandatory

    -

    replicas

    -

    Number of pods.

    -

    Mandatory

    -

    selector

    -

    Determines container pods that can be managed by the Deployment.

    -

    Mandatory

    -

    strategy

    -

    Upgrade mode. Possible values:

    -
    • RollingUpdate
    • ReplaceUpdate
    -

    By default, rolling update is used.

    -

    Optional

    -

    template

    -

    Detailed description of a created container pod.

    -

    Mandatory

    -

    metadata

    -

    Metadata.

    -

    Mandatory

    -

    labels

    -

    metadata.labels: Container labels.

    -

    Optional

    -

    spec:

    -

    containers

    -
    • image (mandatory): Name of a container image.
    • imagePullPolicy (optional): Policy for obtaining an image. The options include Always (attempting to download images each time), Never (only using local images), and IfNotPresent (using local images if they are available; downloading images if local images are unavailable). The default value is Always.
    • name (mandatory): Container name.
    -

    Mandatory

    -

    imagePullSecrets

    -

    Name of the secret used during image pulling. If a private image is used, this parameter is mandatory.

    -
    • To pull an image from the Software Repository for Container (SWR), set this parameter to default-secret.
    • To pull an image from a third-party image repository, set this parameter to the name of the created secret.
    -

    Optional

    -
    -
    -

  3. Create a Deployment.

    kubectl create -f nginx-deployment.yaml

    -

    If the following information is displayed, the Deployment is being created.

    -
    deployment "nginx" created
    -

  4. Query the Deployment status.

    kubectl get deployment

    -

    If the following information is displayed, the Deployment is running.

    -
    NAME           READY     UP-TO-DATE   AVAILABLE   AGE 
    -nginx          1/1       1            1           4m5s
    -

    Parameter description

    -
    • NAME: pod name
    • READY: number of pod replicas that have been deployed
    • STATUS: status of the Deployment
    • RESTARTS: restart times
    • AGE: period the Deployment keeps running
    -

  5. If the Deployment will be accessed through a ClusterIP or NodePort Service, add the corresponding Service. For details, see Networking.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0048.html b/docs/cce/umn/cce_01_0048.html deleted file mode 100644 index 18d3e23c..00000000 --- a/docs/cce/umn/cce_01_0048.html +++ /dev/null @@ -1,224 +0,0 @@ - - -

Creating a StatefulSet

-

Scenario

StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

-

A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

-
-

Prerequisites

  • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Cluster.
  • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

    If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

    -
    -
-
-

Using the CCE Console

CCE provides multiple methods for creating a workload. You can use any of the following methods:
  1. Use an image in Third-Party Images. You do not need to upload any image before using it.
  2. Use an image that you have uploaded to SWR.
  3. Use a shared image to create a workload. Specifically, other tenants share an image with you by using the SWR service.
  4. Use a YAML file to create a workload. You can click Create YAML on the right of the Create StatefulSet page. For details about YAML, see Using kubectl. After the YAML file is written, click Create to create a workload.

    Settings in the YAML file are synchronized with those on the console. You can edit the YAML file on the console to create a workload. For example:

    -
    • If you enter a workload name on the console, the name will automatically appear in the YAML file.
    • If you add an image on the console, the image will be automatically added to the YAML file.
    -

    When you click Create YAML on the right of the console, do not create multiple YAML files in the YAML definition pane displayed. You need to create them one by one. Otherwise, an error will be reported during the creation.

    -
    -
-
-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > StatefulSets. On the displayed page, click Create StatefulSet. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of a workload, which must be unique.

    -

    Enter 4 to 52 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    * Instances

    -

    Number of pods in a workload. A workload can have one or more pods. The default value is 2. You can customize the value, for example, setting it to 1.

    -

    Each workload pod consists of the same containers. You can configure multiple pods for a workload to ensure high reliability. For such a workload, if one pod is faulty, the workload can still run properly. If only one pod is used, a node or pod exception may cause service exceptions.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  2. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.
      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately. -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      You can add data storage volumes only when creating a StatefulSet.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  3. Click Next: Set Application Access and set Headless Service and workload access type.

    Table 3 describes the parameters in the Headless Service area. -
    - - - - - - - - - - - - - -
    Table 3 Parameter description

    Parameter

    -

    Description

    -

    Service Name

    -

    Name of the Service corresponding to the workload for mutual access between pods. This Service is used for internal discovery of pods, and does not require an independent IP address or load balancing.

    -

    Port Name

    -

    Name of the container port. You are advised to enter a name that indicates the function of the port.

    -

    Container Port

    -

    Listening port inside the container.

    -
    -
    -
    -

    Click Add Service and set the workload access type.

    -

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  4. Click Next: Configure Advanced Settings.

    • Upgrade Policy: Only Rolling upgrade is supported.

      During a rolling upgrade, old pods are gradually replaced with new ones, and service traffic is evenly distributed to both pods to ensure service continuity.

      -
    • Pod Management Policy: There are two types of policies: ordered and parallel.

      Ordered: The StatefulSet will deploy, delete, or scale pods in order and one by one (the StatefulSet waits until each pod is ready before continuing). This is the default policy.

      -

      Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.

      -
    • Graceful Deletion: A time window can be set for workload deletion and reserved for executing commands in the pre-stop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.
      • Graceful Time Window (s): Set a time window (0–9999s) for pre-stop commands to finish execution before a workload is deleted. The default value is 30s.
      • Scale Order: Choose Prioritize new pods or Prioritize old pods based on service requirements. Prioritize new pods indicates that new pods will be first deleted when a scale-in is triggered.
      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  5. Click Create and then Back to StatefulSet List. If the workload is in the Running state, it has been successfully created. If the workload status is not updated, click in the upper right corner or press F5 to refresh the page.

    • When a node is unavailable, pods become Unready. In this case, you need to manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
    • If the workload list contains more than 500 records, the Kubernetes pagination mechanism will be used. Specifically, you can only go to the first page or the next page, but cannot go to the previous page. In addition, if resources are divided into discrete pages, the total number of resources displayed is the maximum number of resources that can be queried at a time, not the actual total number of resources.
    -
    -

-
-

Using kubectl

The following procedure uses an etcd workload as an example to describe how to create a workload using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the etcd-statefulset.yaml file.

    etcd-statefulset.yaml is an example file name, and you can change it as required.

    -

    vi etcd-statefulset.yaml

    -

    The following provides an example of the file contents. For more information on StatefulSet, see the Kubernetes documentation.

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: etcd
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: etcd
    -  serviceName: etcd-svc
    -  template:
    -    metadata:
    -      labels:
    -        app: etcd
    -    spec:
    -      containers:
    -      - env:
    -        - name: PAAS_APP_NAME
    -          value: tesyhhj
    -        - name: PAAS_NAMESPACE
    -          value: default
    -        - name: PAAS_PROJECT_ID
    -          value: 9632fae707ce4416a0ab1e3e121fe555
    -        image: etcd # If you use an image in My Images, obtain the image path from SWR.
    -        imagePullPolicy: IfNotPresent
    -        name: container-0
    -  updateStrategy:
    -    type: RollingUpdate
    -

    vi etcd-headless.yaml

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: etcd
    -  name: etcd-svc
    -spec:
    -  clusterIP: None
    -  ports:
    -  - name: etcd-svc
    -    port: 3120
    -    protocol: TCP
    -    targetPort: 3120
    -  selector:
    -    app: etcd
    -  sessionAffinity: None
    -  type: ClusterIP
    -

  3. Create a workload and the corresponding headless service.

    kubectl create -f etcd-statefulset.yaml

    -

    If the following information is displayed, the StatefulSet has been successfully created.

    -
    statefulset.apps/etcd created
    -

    kubectl create -f etcd-headless.yaml

    -

    If the following information is displayed, the headless service has been successfully created.

    -
    service/etcd-svc created
    -

  4. If the workload will be accessed through a ClusterIP or NodePort Service, set the corresponding workload access type. For details, see Networking.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0051.html b/docs/cce/umn/cce_01_0051.html deleted file mode 100644 index cbc5181d..00000000 --- a/docs/cce/umn/cce_01_0051.html +++ /dev/null @@ -1,29 +0,0 @@ - - -

Scheduling Policy Overview

-

Custom Scheduling Policies

You can configure node affinity, workload affinity, and workload anti-affinity in custom scheduling policies.

- -

Custom scheduling policies depend on node labels and pod labels. You can use default labels or customize labels as required.

-
-
-

Simple Scheduling Policies

A simple scheduling policy allows you to configure affinity between workloads and AZs, between workloads and nodes, and between workloads.

-
-
  • Workload-AZ affinity: Multiple AZ-based scheduling policies (including affinity and anti-affinity policies) can be configured. However, scheduling is performed as long as one of the scheduling policies is met. -
  • Workload-node affinity: Multiple node-based scheduling policies (including affinity and anti-affinity scheduling) can be configured. However, scheduling is performed as long as one of the scheduling policies is met. For example, if a cluster contains nodes A, B, and C and two scheduling policies are set (one policy defines node A as an affinity node and the other policy defines node B as an anti-affinity node), then the workload can be scheduled to any node other than B. -
  • Workload-workload affinity: Multiple workload-based scheduling policies can be configured, but the labels in these policies must belong to the same workload.
    • Affinity between workloads: For details, see Workload-Workload Affinity. You can deploy workloads on the same node to reduce consumption of network resources.
      Figure 1 shows an example of affinity deployment, in which all workloads are deployed on the same node.
      Figure 1 Affinity between workloads
      -
      -
    • Anti-affinity between workloads: For details, see Workload-Workload Anti-Affinity. Constraining multiple instances of the same workload from being deployed on the same node reduces the impact of system breakdowns. Anti-affinity deployment is also recommended for workloads that may interfere with each other.
      Figure 2 shows an example of anti-affinity deployment, in which four workloads are deployed on four different nodes.
      Figure 2 Anti-affinity between workloads
      -
      -
    -
-

When setting workload-workload affinity and workload-node affinity, ensure that the affinity relationships do not contradict each other; otherwise, workload deployment will fail.

-

For example, Workload 3 will fail to be deployed when the following conditions are met:

-
  • Anti-affinity is configured for Workload 1 and Workload 2. Workload 1 is deployed on Node A and Workload 2 is deployed on Node B.
  • Affinity is configured between Workload 2 and Workload 3, but the target node on which Workload 3 is to be deployed is Node C or Node A.
-
-
- - diff --git a/docs/cce/umn/cce_01_0053.html b/docs/cce/umn/cce_01_0053.html deleted file mode 100644 index 9a3fd38b..00000000 --- a/docs/cce/umn/cce_01_0053.html +++ /dev/null @@ -1,231 +0,0 @@ - - -

Using Local Disks as Storage Volumes

-

You can mount a file directory of the host where a container is located to a specified container path (the hostPath mode in Kubernetes) for persistent data storage. Alternatively, you can leave the source path empty (the emptyDir mode in Kubernetes), and a temporary directory of the host will be mounted to the mount point of the container for temporary storage.

-

Using Local Volumes

CCE supports four types of local volumes.

-
-
  • hostPath: mounts a file directory of the host where the container is located to the specified mount point of the container. For example, if the container needs to access /etc/hosts, you can use a hostPath volume to map /etc/hosts.
  • emptyDir: stores data temporarily. An emptyDir volume is first created when a pod is assigned to a node, and exists as long as that pod is running on that node. When a container pod is terminated, EmptyDir will be deleted and the data is permanently lost.
  • ConfigMap: A ConfigMap can be mounted as a volume, and all contents stored in its key are mounted onto the specified container directory. A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. For details about how to create a ConfigMap, see Creating a ConfigMap. For details about how to use a ConfigMap, see Using a ConfigMap.
  • Secret: You can store sensitive information such as passwords, in secrets and mount them as files for use by pods. A secret is a type of resource that holds sensitive data, such as authentication and key information. All content is user-defined. For details about how to create a secret, see Creating a Secret. For details about how to use a secret, see Using a Secret.
-

The following describes how to mount these four types of volumes.

-

hostPath

You can mount a path on the host to a specified container path. A hostPath volume is usually used to store workload logs permanently or used by workloads that need to access internal data structure of the Docker engine on the host.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set parameters for adding a local volume, as listed in Table 1.

    -

    - - - - - - - - - - - - - -
    Table 1 Setting parameters for mounting a hostPath volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select HostPath.

    -

    Host Path

    -

    Path of the host to which the local volume is to be mounted, for example, /etc/hosts.

    -
    NOTE:

    Host Path cannot be set to the root directory /. Otherwise, the mounting fails. Mount paths can be as follows:

    -
    • /opt/xxxx (excluding /opt/cloud)
    • /mnt/xxxx (excluding /mnt/paas)
    • /tmp/xxx
    • /var/xxx (excluding key directories such as /var/lib, /var/script, and /var/paas)
    • /xxxx (It cannot conflict with the system directory, such as bin, lib, home, root, boot, dev, etc, lost+found, mnt, proc, sbin, srv, tmp, var, media, opt, selinux, sys, and usr.)
    -

    Do not set this parameter to /home/paas, /var/paas, /var/lib, /var/script, /mnt/paas, or /opt/cloud. Otherwise, the system or node installation will fail.

    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Permission
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

emptyDir

emptyDir applies to temporary data storage, disaster recovery, and runtime data sharing. It will be deleted upon deletion or transfer of workload pods.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to emptyDir and set parameters for adding a local volume, as described in Table 2.

    -

    - - - - - - - - - - - - - -
    Table 2 Setting parameters for mounting an emptyDir volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select emptyDir.

    -

    Medium

    -
    • Default: Data is stored in hard disks, which is applicable to a large amount of data with low requirements on reading and writing efficiency.
    • Memory: Selecting this option can improve the running speed, but the storage capacity is subject to the memory size. This mode applies to scenarios where the data volume is small and the read and write efficiency is high.
    -
    NOTE:
    • If you select Memory, any files you write will count against your container's memory limit. Pay attention to the memory quota. If the memory usage exceeds the threshold, OOM may occur.
    • If Memory is selected, the size of an emptyDir volume is 50% of the pod specifications and cannot be changed.
    • If Memory is not selected, emptyDir volumes will not occupy the system memory.
    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Permission
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

ConfigMap

The data stored in a ConfigMap can be referenced in a volume of type ConfigMap. You can mount such a volume to a specified container path. The platform supports the separation of workload codes and configuration files. ConfigMap volumes are used to store workload configuration parameters. Before that, you need to create ConfigMaps in advance. For details, see Creating a ConfigMap.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to ConfigMap and set parameters for adding a local volume, as shown in Table 3.

    -

    - - - - - - - - - - - - - -
    Table 3 Setting parameters for mounting a ConfigMap volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select ConfigMap.

    -

    Option

    -

    Select the desired ConfigMap name.

    -

    A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set the permission to Read-only. Data volumes in the path are read-only.
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

Secret

You can mount a secret as a volume to the specified container path. Contents in a secret are user-defined. Before that, you need to create a secret. For details, see Creating a Secret.

-
-
  1. Log in to the CCE console.
  2. When creating a workload, click Data Storage in the Container Settings. Click the Local Volumes tab and click .
  3. Set the local volume type to Secret and set parameters for adding a local volume, as shown in Table 4.

    -

    - - - - - - - - - - - - - -
    Table 4 Setting parameters for mounting a secret volume

    Parameter

    -

    Description

    -

    Storage Type

    -

    Select Secret.

    -

    Secret

    -

    Select the desired secret name.

    -

    A secret must be created in advance. For details, see Creating a Secret.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter a subpath, for example, tmp.

      A subpath is used to mount a local disk so that the same data volume is used in a single pod. If this parameter is left blank, the root path is used by default.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the container to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      When the container is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set the permission to Read-only. Data volumes in the path are read-only.
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

-

Mounting a hostPath Volume Using kubectl

You can use kubectl to mount a file directory of the host where the container is located to a specified mount path of the container.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the hostPath-pod-example.yaml file, which is used to create a pod.

    touch hostPath-pod-example.yaml

    -

    vi hostPath-pod-example.yaml

    -

    Mount the hostPath volume for the Deployment. The following is an example:

    -
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: hostpath-pod-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: hostpath-pod-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: hostpath-pod-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: hostpath-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: hostpath-example 
    -        hostPath: 
    -          path: /tmp/test
    - -
    - - - - - - - - - - -
    Table 5 Local disk storage dependency parameters

    Parameter

    -

    Description

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    hostPath

    -

    Host path. In this example, the host path is /tmp/test.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f hostPath-pod-example.yaml

    -

  4. Verify the mounting.

    1. Query the pod name of the workload (hostpath-pod-example is used as an example).
      kubectl get po|grep hostpath-pod-example
      -

      Expected outputs:

      -
      hostpath-pod-example-55c8d4dc59-md5d9   1/1     Running   0          35s
      -
    2. Create the test1 file in the container mount path /tmp.
      kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- touch /tmp/test1
      -
    3. Verify that the file is created in the host path /tmp/test/.
      ll /tmp/test/
      -

      Expected outputs:

      -
      -rw-r--r--  1 root root    0 Jun  1 16:12 test1
      -
    4. Create the test2 file in the host path /tmp/test/.
      touch /tmp/test/test2
      -
    5. Verify that the file is created in the container mount path.
      kubectl exec hostpath-pod-example-55c8d4dc59-md5d9 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root 0 Jun  1 08:12 test1
      --rw-r--r-- 1 root root 0 Jun  1 08:14 test2
      -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0057.html b/docs/cce/umn/cce_01_0057.html deleted file mode 100644 index 274f2512..00000000 --- a/docs/cce/umn/cce_01_0057.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Scaling a Workload

-
  • Auto scaling: You can set metric-based, scheduled, and periodic policies. After configuration, pods can be automatically added or deleted based on resource changes or the specified schedule.
  • Manual scaling: Pods are immediately added or deleted after the configuration is complete.
-

Scaling policy priority: If you do not manually adjust the number of pods, auto scaling policies will take effect for resource scheduling. If manual scaling is triggered, auto scaling policies will be temporarily invalid.

-
-

Auto Scaling - HPA

HPA policies can be used for auto scaling. You can view all policies or perform more operations in Auto Scaling.

-
-

Auto Scaling - AOM

You can define auto scaling policies as required, which can intelligently adjust resources in response to service changes and data traffic spikes.

-

Auto scaling can be backed by Application Operations Management (AOM), but not for clusters of v1.17 and later.

-

Currently, CCE supports the following types of auto scaling policies:

-

Metric-based policy: After a workload is created, pods will be automatically scaled when the workload's CPU or memory usage exceeds or falls below a preset limit.

-

Scheduled policy: scaling at a specified time. Scheduled auto scaling is applicable flash sales, premier shopping events, and other regular events that bring a high burst of traffic load.

-

Periodic policy: scaling at a specified time on a daily, weekly, or monthly basis. Periodic scheduling is applicable to scenarios where traffic changes periodically.

-
-
  • Metric-based policy: Supports auto scaling of a workload based on the CPU/memory usage.
    1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments or StatefulSets. In the same row as the target workload, choose More > Scaling.
    2. In the Auto Scaling area, click Add Scaling Policy.
    3. Set the policy parameters as listed in Table 1. -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Parameters for adding a metric-based policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Metric-based policy.

      -

      The alarm policy is triggered based on historical data. The system checks whether the indicators set by the user in the monitoring window meet the triggering conditions every minute. If the triggering conditions are met for N consecutive periods, the system performs the action specified by the policy.

      -

      Metric

      -

      Set the metrics that describe the resource performance data or status.

      -
      • CPU Usage: CPU usage of the measured object. The value is the percentage of the used CPU cores to the total CPU cores.
      • Physical Memory Usage: percentage of the physical memory size used by the measured object to the physical memory size that the measured object has applied for.
      -

      Trigger Condition

      -

      The value can be higher (>) or lower (<) than a threshold. When the usage of the preceding metrics reaches the specified value, the scaling policy is triggered.

      -

      For example, if Metric is set to CPU Usage and this parameter is set to > 70%, the scaling policy is triggered when the CPU usage exceeds 70%.

      -

      Monitoring window

      -

      Size of the data aggregation window.

      -

      If the value is set to 60, metric statistics are collected every 60 seconds.

      -

      Threshold Crossings

      -

      Number of consecutive times that the threshold is reached within the monitoring window. The calculation cycle is fixed at one minute.

      -

      If the parameter is set to 3, the action is triggered if threshold is reached for three consecutive measurement periods.

      -

      Action

      -

      Action executed after a policy is triggered. Two actions are available: add or reduce pods.

      -
      -
      -
    4. Click OK.
    5. In the Auto Scaling area, check that the policy has been started.

      When the trigger condition is met, the auto scaling policy starts automatically.

      -
    -
  • Scheduled policy: scaling at a specified time.
    1. In the Auto Scaling area, click Add Scaling Policy. Select Scheduled policy. -
      - - - - - - - - - - - - - - - - -
      Table 2 Parameters for adding a scheduled policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Scheduled policy.

      -

      Trigger Time

      -

      Time at which the policy is enforced.

      -

      Action

      -

      Action executed after a policy is triggered. Three actions are available: add pods, reduce pods, and set the number of pods.

      -
      -
      -
    2. Click OK.
    3. In the Auto Scaling area, check that the policy has been started.

      When the trigger time is reached, you can see on the Pods tab page that the auto scaling policy has taken effect.

      -
    -
  • Periodic policy: scaling at a specified time on a daily, weekly, or monthly basis.
    1. In the Auto Scaling area, click Add Scaling Policy. Select Periodic policy. -
      - - - - - - - - - - - - - - - - -
      Table 3 Parameters for adding a periodic policy

      Parameter

      -

      Description

      -

      Policy Name

      -

      Enter the name of the scaling policy.

      -

      The policy name must be 1 to 64 characters in length and start with a letter. Only letters, digits, underscores (_), and hyphens (-) are allowed.

      -

      Policy Type

      -

      Set this parameter to Periodic policy.

      -

      Time Range

      -

      Specify the time for triggering the policy.

      -

      Action

      -

      Action executed after a policy is triggered. Three actions are available: add pods, reduce pods, and set the number of pods.

      -
      -
      -
    2. Click OK.
    3. In the Auto Scaling area, check that the policy has been started.

      When the trigger condition is met, the auto scaling policy starts automatically.

      -
    -
-

Manual Scaling

  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments or StatefulSets. In the same row as the target workload, choose More > Scaling.
  2. In the Manual Scaling area, click and change the number of pods to, for example, 3. Then, click Save. The scaling takes effect immediately.
  3. On the Pods tab page, check that a new pod is being created. When the pod status becomes Running, pod scaling is complete.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0059.html b/docs/cce/umn/cce_01_0059.html deleted file mode 100644 index 50e9139b..00000000 --- a/docs/cce/umn/cce_01_0059.html +++ /dev/null @@ -1,147 +0,0 @@ - - -

Network Policies

-

As the service logic becomes increasingly complex, many applications require network calls between modules. Traditional external firewalls or application-based firewalls cannot meet the requirements. Network policies are urgently needed between modules, service logic layers, or functional teams in a large cluster.

-

CCE has enhanced the Kubernetes-based network policy feature, allowing network isolation in a cluster by configuring network policies. This means that a firewall can be set between pods.

-

For example, to make a payment system accessible only to specified components for security purposes, you can configure network policies.

-

Notes and Constraints

  • Only clusters that use the tunnel network model support network policies.
  • Network isolation is not supported for IPv6 addresses.
  • Network policies do not support egress rules except for clusters of v1.23 or later.

    Egress rules are supported only in the following operating systems:

    -
    • EulerOS 2.9: kernel version 4.18.0-147.5.1.6.h541.eulerosv2r9.x86_64
    • CentOS 7.7: kernel version 3.10.0-1062.18.1.el7.x86_64
    • EulerOS 2.5: kernel version 3.10.0-862.14.1.5.h591.eulerosv2r7.x86_64
    -
  • If a cluster is upgraded to v1.23 in in-place mode, you cannot use egress rules because the node OS is not upgraded. In this case, reset the node.
-
-

Precautions

If no network policies have been configured for a workload, such as workload-1, other workloads in the same cluster can access workload-1.

-
-

Using Ingress Rules

  • Using podSelector to specify the access scope
    apiVersion: networking.k8s.io/v1
    -kind: NetworkPolicy
    -metadata:
    -  name: test-network-policy
    -  namespace: default
    -spec:
    -  podSelector:                  # The rule takes effect for pods with the role=db label.
    -    matchLabels:
    -      role: db
    -  ingress:                      #This is an ingress rule.
    -  - from:
    -    - podSelector:              #Only traffic from the pods with the role=frontend label is allowed.
    -        matchLabels:
    -          role: frontend
    -    ports:                      #Only TCP can be used to access port 6379.
    -    - protocol: TCP
    -      port: 6379
    -

    Diagram:

    -
    Figure 1 podSelector
    -
-
  • Using namespaceSelector to specify the access scope
    apiVersion: networking.k8s.io/v1
    -kind: NetworkPolicy
    -metadata:
    -  name: test-network-policy
    -spec:
    -  podSelector:                  # The rule takes effect for pods with the role=db label.
    -    matchLabels:
    -      role: db
    -  ingress:                      #This is an ingress rule.
    -  - from:
    -    - namespaceSelector:        # Only traffic from the pods in the namespace with the "project=myproject" label is allowed.
    -        matchLabels:
    -          project: myproject
    -    ports:                      #Only TCP can be used to access port 6379.
    -    - protocol: TCP
    -      port: 6379
    -

    Figure 2 shows how namespaceSelector selects ingress sources.

    -
    Figure 2 namespaceSelector
    -
-
-

Using Egress Rules

Egress supports not only podSelector and namespaceSelector, but also ipBlock.

-

Only clusters of version 1.23 or later support egress rules. Currently, only EulerOS 2.5, EulerOS 2.9, and CentOS 7.7 nodes are supported.

-
-
apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: deny-client-a-via-except-cidr-egress-rule
-  namespace: default
-spec:
-  policyTypes:                  # Must be specified for an egress rule.
-    - Egress
-  podSelector:                  # The rule takes effect for pods with the role=db label.
-    matchLabels:
-      role: db
-  egress:                       # Egress rule
-  - to:
-    - ipBlock:
-        cidr: 172.16.0.16/16    # Allow access to this CIDR block.
-        except:
-        - 172.16.0.40/32        # This CIDR block cannot be accessed. This value must fall within the range specified by cidr.
-

Diagram:

-
Figure 3 ipBlock
-

You can define ingress and egress in the same rule.

-
apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: test-network-policy
-  namespace: default
-spec:
-  policyTypes:
-  - Ingress
-  - Egress
-  podSelector:                  # The rule takes effect for pods with the role=db label.
-    matchLabels:
-      role: db
-  ingress:                      # Ingress rule
-  - from:
-    - podSelector:              #Only traffic from the pods with the "role=frontend" label is allowed.
-        matchLabels:
-          role: frontend
-    ports:                      #Only TCP can be used to access port 6379.
-    - protocol: TCP
-      port: 6379
-  egress:                       # Egress rule
-  - to:
-    - podSelector:              # Only pods with the role=web label can be accessed.
-        matchLabels:
-          role: web
-

Diagram:

-
Figure 4 Using both ingress and egress
-
-

Adding a Network Policy on the Console

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network. On the Network Policies tab page, click Create Network Policy.

    • Network Policy Name: Specify a network policy name.
    • Cluster Name: Select a cluster to which the network policy belongs.
    • Namespace: Select a namespace in which the network policy is applied.
    • Workload

      Click Select Workload. In the dialog box displayed, select a workload for which the network policy is to be created, for example, workload-1. Then, click OK.

      -
    • Rules: Click Add Rule, set the parameters listed in Table 1, and click OK. -
      - - - - - - - - - - - - - - - - -
      Table 1 Parameters for adding a rule

      Parameter

      -

      Description

      -

      Direction

      -

      Only Inbound is supported, indicating that the whitelisted workloads access the current workload (workload-1 in this example).

      -

      Protocol

      -

      Select a protocol. Currently, the TCP and UDP protocols are supported. The ICMP protocol is not supported.

      -

      Destination Container Port

      -

      Specify a port on which the workload in the container image listens. The Nginx application listens on port 80.

      -

      If no container port is specified, all ports can be accessed by default.

      -

      Whitelisted Workloads

      -

      Select other workloads that can access the current workload. These workloads will access the current workload at the destination container port.

      -
      • Namespace: All workloads in the selected namespace(s) are added to the whitelist. That is, all workloads in the namespace(s) can access workload-1.
      • Workload: The selected workloads can access workload-1. Only other workloads in the same namespace as workload-1 can be selected.
      -
      -
      -
    -

  2. Click Create.
  3. Repeat the preceding steps to add more network policies for the current workload when other ports need to be accessed by some workloads.

    After the network policies are created, only the specified workloads or workloads in the specified namespaces can access the current workload.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0063.html b/docs/cce/umn/cce_01_0063.html deleted file mode 100644 index 46fcb364..00000000 --- a/docs/cce/umn/cce_01_0063.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Managing Node Scaling Policies

-

Scenario

After a node scaling policy is created, you can delete, edit, disable, enable, or clone the policy.

-
-

Viewing a Node Scaling Policy

You can view the associated node pool, rules, and scaling history of a node scaling policy and rectify faults according to the error information displayed.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click in front of the policy to be viewed.
  2. In the expanded area, the Associated Node Pool, Execution Rules, and Scaling Records tab pages are displayed. If the policy is abnormal, locate and rectify the fault based on the error information.

    You can also enable or disable auto scaling in Node Pools. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools, and click Edit in the upper right corner of the node pool to be operated. In the Edit Node Pool dialog box displayed, you can enable Autoscaler and set the limits of the number of nodes.

    -
    -

-
-

Deleting a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Delete in the Operation column of the policy to be deleted.
  2. In the Delete Node Policy dialog box displayed, confirm whether to delete the policy.
  3. Enter DELETE in the text box.
  4. Click OK to delete the policy.
-
-

Editing a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Edit in the Operation column of the policy.
  2. On the Edit Node Scaling Policy page displayed, modify policy parameter values listed in Table 1.
  3. After the configuration is complete, click OK.
-
-

Cloning a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click More > Clone in the Operation column of the policy.
  2. On the Create Node Scaling Policy page displayed, certain parameters have been cloned. Add or modify other policy parameters based on service requirements.
  3. Click Create Now to clone the policy. The cloned policy is displayed in the policy list on the Node Scaling tab page.
-
-

Enabling or Disabling a Node Scaling Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click More > Disable or Enable in the Operation column of the policy.
  2. In the dialog box displayed, confirm whether to disable or enable the node policy.
  3. Click Yes. The policy status is displayed in the node scaling list.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0064.html b/docs/cce/umn/cce_01_0064.html deleted file mode 100644 index 42705e9c..00000000 --- a/docs/cce/umn/cce_01_0064.html +++ /dev/null @@ -1,23 +0,0 @@ - - -

Add-ons

-
- - diff --git a/docs/cce/umn/cce_01_0066.html b/docs/cce/umn/cce_01_0066.html deleted file mode 100644 index 752fedd9..00000000 --- a/docs/cce/umn/cce_01_0066.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

everest (System Resource Add-on, Mandatory)

-

Introduction

Everest is a cloud-native container storage system. Based on Container Storage Interface (CSI), clusters of Kubernetes v1.15 or later can interconnect with cloud storage services such as EVS, OBS, SFS, and SFS Turbo.

-

everest is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.15 or later is created.

-
-

Notes and Constraints

  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
  • By default, this add-on is installed in clusters of v1.15 and later. For clusters of v1.13 and earlier, the storage-driver add-on is installed by default.
-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under everest.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Select Single or HA for Add-on Specifications, and click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under everest.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • When the upgrade is complete, the original everest version on cluster nodes will be replaced by the latest version.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Select Single or HA for Add-on Specifications, and click Upgrade.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under everest.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0068.html b/docs/cce/umn/cce_01_0068.html deleted file mode 100644 index 58318d38..00000000 --- a/docs/cce/umn/cce_01_0068.html +++ /dev/null @@ -1,68 +0,0 @@ - - -

CCE Kubernetes Release Notes

-

CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

-

After the latest Kubernetes version is released, CCE will provide you the changes in this version. For details, see Table 1.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster version differences

Source Version

-

Target Version

-

Description

-

v1.19

-

v1.21

-
-

v1.17

-

v1.19

-
-

v1.15

-

v1.17

-
-

v1.13

-

v1.15

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0081.html b/docs/cce/umn/cce_01_0081.html deleted file mode 100644 index 5387f3ce..00000000 --- a/docs/cce/umn/cce_01_0081.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Node Pool Overview

-

Introduction

CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a cluster.

-

You can create custom node pools on the CCE console. With node pools, you can quickly create, manage, and destroy nodes without affecting the cluster. All nodes in a custom node pool have identical parameters and node type. You cannot configure a single node in a node pool; any configuration changes affect all nodes in the node pool.

-

You can also use node pools for auto scaling.

-
  • When a pod in a cluster cannot be scheduled due to insufficient resources, scale-out can be automatically triggered.
  • When there is an idle node or a monitoring metric threshold is met, scale-in can be automatically triggered.
-

This section describes how node pools work in CCE and how to create and manage node pools.

-
-

Node Pool Architecture

Figure 1 Overall architecture of a node pool
-

Generally, all nodes in a node pool have the following same attributes:

-
  • Node OS
  • Startup parameters of Kubernetes components on a node
  • User-defined startup script of a node
  • K8S Labels and Taints
-

CCE provides the following extended attributes for node pools:

-
  • Node pool OS
  • Maximum number of pods on each node in a node pool
-
-

Description of DefaultPool

DefaultPool is not a real node pool. It only classifies nodes that are not in any node pool. These nodes are directly created on the console or by calling APIs. DefaultPool does not support any node pool functions, including scaling and parameter configuration. DefaultPool cannot be edited, deleted, expanded, or auto scaled, and nodes in it cannot be migrated.

-
-

Applicable Scenarios

When a large-scale cluster is required, you are advised to use node pools to manage nodes.

-

The following table describes multiple scenarios of large-scale cluster management and the functions of node pools in each scenario.

- -
- - - - - - - - - - - - - -
Table 1 Using node pools for different management scenarios

Scenario

-

Function

-

Multiple heterogeneous nodes (with different models and configurations) in the cluster

-

Nodes can be grouped into different pools for management.

-

Frequent node scaling required in a cluster

-

Node pools support auto scaling to dynamically add or reduce nodes.

-

Complex application scheduling rules in a cluster

-

Node pool tags can be used to quickly specify service scheduling rules.

-
-
-
-

Functions and Precautions

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Function

-

Description

-

Notes

-

Creating a node pool

-

Add a node pool.

-

It is recommended that a cluster contain no more than 100 node pools.

-

Deleting a node pool

-

Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.

-

If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

-

Enabling auto scaling for a node pool

-

After auto scaling is enabled, nodes will be automatically created or deleted in the node pool based on the cluster loads.

-

You are advised not to store important data on nodes in a node pool because after auto scaling, data cannot be restored as nodes may be deleted.

-

Enabling auto scaling for a node pool

-

After auto scaling is disabled, the number of nodes in a node pool will not automatically change with the cluster loads.

-

/

-

Adjusting the size of a node pool

-

The number of nodes in a node pool can be directly adjusted. If the number of nodes is reduced, nodes are randomly removed from the current node pool.

-

After auto scaling is enabled, you are not advised to manually adjust the node pool size.

-

Changing node pool configurations

-

You can modify the node pool name, node quantity, Kubernetes labels, taints, and resource tags.

-

The modified Kubernetes labels and taints will apply to all nodes in the node pool, which may cause pod re-scheduling. Therefore, exercise caution when performing this operation.

-

Adding an existing node to a node pool

-

Nodes that do not belong to the cluster can be added to a node pool. The following requirements must be met:

-
  • The node to be added and the CCE cluster are in the same VPC and subnet.
  • The node is not used by other clusters and has the same configurations (such as specifications and billing mode) as the node pool.
-

Unless required, you are not advised to add existing nodes. You are advised to create a node pool.

-

Removing a node from a node pool

-

Nodes in a node pool can be migrated to the default node pool of the same cluster.

-

Nodes in the default node pool cannot be migrated to other node pools, and nodes in a user-created node pool cannot be migrated to other user-created node pools.

-

Cloning a node pool

-

You can copy the configuration of an existing node pool to create a new node pool.

-

/

-

Setting Kubernetes parameters

-

You can configure core components with fine granularity.

-
  • This function is supported only for clusters of v1.15 and later. It is not displayed for versions earlier than v1.15
  • The default node pool DefaultPool does not support this type of configuration.
-
-
-
-

Deploying a Workload in a Specified Node Pool

When creating a workload, you can constrain pods to run in a specified node pool.

-

For example, on the CCE console, you can set the affinity between the workload and the node on the Scheduling Policies tab page on the workload details page to forcibly deploy the workload to a specific node pool. In this way, the workload runs only on nodes in the node pool. If you need to better control where the workload is to be scheduled, you can use affinity or anti-affinity policies between workloads and nodes described in Scheduling Policy Overview.

-

For example, you can use container's resource request as a nodeSelector so that workloads will run only on the nodes that meet the resource request.

-

If the workload definition file defines a container that requires four CPUs, the scheduler will not choose the nodes with two CPUs to run workloads.

-
-

Related Operations

You can log in to the CCE console and refer to the following sections to perform operations on node pools:

- -
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0083.html b/docs/cce/umn/cce_01_0083.html deleted file mode 100644 index 8e8ce9a2..00000000 --- a/docs/cce/umn/cce_01_0083.html +++ /dev/null @@ -1,107 +0,0 @@ - - -

Managing Workload Scaling Policies

-

Scenario

After an HPA policy is created, you can update, clone, edit, and delete the policy, as well as edit the YAML file.

-
-

Checking an HPA Policy

You can view the rules, status, and events of an HPA policy and handle exceptions based on the error information displayed.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click in front of the target policy.
  2. In the expanded area, you can view the Rules, Status, and Events tab pages. If the policy is abnormal, locate and rectify the fault based on the error information.

    You can also view the created HPA policy on the workload details page. Log in to the CCE console, choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane, and choose More > Scaling in the Operation column. On the workload details page, click the Scaling tab. You can see the Auto Scaling-HPA pane, as well as the HPA policy you have configured on the Auto Scaling page.

    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Event types and names

    Event Type

    -

    Event Name

    -

    Description

    -

    Normal

    -

    SuccessfulRescale

    -

    The scaling is performed successfully.

    -

    Abnormal

    -

    InvalidTargetRange

    -

    Invalid target range.

    -

    InvalidSelector

    -

    Invalid selector.

    -

    FailedGetObjectMetric

    -

    Objects fail to be obtained.

    -

    FailedGetPodsMetric

    -

    Pods fail to be obtained.

    -

    FailedGetResourceMetric

    -

    Resources fail to be obtained.

    -

    FailedGetExternalMetric

    -

    External metrics fail to be obtained.

    -

    InvalidMetricSourceType

    -

    Invalid metric source type.

    -

    FailedConvertHPA

    -

    HPA conversion failed.

    -

    FailedGetScale

    -

    The scale fails to be obtained.

    -

    FailedComputeMetricsReplicas

    -

    Failed to calculate metric-defined replicas.

    -

    FailedGetScaleWindow

    -

    Failed to obtain ScaleWindow.

    -

    FailedRescale

    -

    Failed to scale the service.

    -
    -
    -

-
-

Updating an HPA Policy

An HPA policy is used as an example.

-
  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Update in the Operation column of the policy to be updated.
  2. On the Update HPA Policy page displayed, set the policy parameters listed in Table 1.
  3. Click Update.
-
-

Cloning an HPA Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Clone in the Operation column of the target policy.
  2. For example, for an HPA policy, on the Create HPA Policy page, you can view that parameters such as Pod Range, Cooldown Period, and Rules have been cloned. Add or modify other policy parameters as needed.
  3. Click Create to complete policy cloning. On the Workload Scaling tab page, you can view the cloned policy in the policy list.
-
-

Editing the YAML File (HPA Policy)

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, choose More > Edit YAML in the Operation column of the target policy.
  2. In the Edit YAML dialog box displayed, edit or download the YAML file.
  3. Click the close button in the upper right corner.
-
-

Deleting an HPA Policy

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, choose More > Delete in the Operation column of the target policy.
  2. In the Delete HPA Policy dialog box displayed, confirm whether to delete the HPA policy.
  3. Click Yes to delete the policy.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0085.html b/docs/cce/umn/cce_01_0085.html deleted file mode 100644 index 984889cb..00000000 --- a/docs/cce/umn/cce_01_0085.html +++ /dev/null @@ -1,49 +0,0 @@ - - -

Controlling Cluster Permissions

-

Scenario

This section describes how to control permissions on resources in a cluster, for example, allow user A to read and write application data in a namespace, and user B to only read resource data in a cluster.

-
-

Procedure

  1. If you need to perform permission control on the cluster, select Enhanced authentication for Authentication Mode during cluster creation, upload your own CA certificate, client certificate, and client certificate private key (for details about how to create a certificate, see Certificates), and select I have confirmed that the uploaded certificates are valid. For details, see Table 1.

    • Upload a file smaller than 1 MB. The CA certificate and client certificate can be in .crt or .cer format. The private key of the client certificate can only be uploaded unencrypted.
    • The validity period of the client certificate must be longer than five years.
    • The uploaded CA certificate is used for both the authentication proxy and the kube-apiserver aggregation layer configuration. If the certificate is invalid, the cluster cannot be created.
    -
    -

  2. Create a role using kubectl.

    The following example shows how to create a role and allow the role to read all pods in the default namespace. For details about the parameters, see the official Kubernetes documentation.
    kind: Role
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  namespace: default
    -  name: pod-reader
    -rules:
    -- apiGroups: [""]
    -  resources: ["pods"]
    -  verbs: ["get", "watch", "list"]
    -
    -

  3. Bind the role to a user by using kubectl.

    In the following example, the RoleBinding assigns the role of pod-reader in the default namespace to user jane. This policy allows user jane to read all pods in the default namespace. For details about the parameters, see the official Kubernetes documentation.
    kind: RoleBinding
    -apiVersion: rbac.authorization.k8s.io/v1
    -metadata:
    -  name: read-pods
    -  namespace: default
    -subjects:
    -- kind: User
    -  name: jane   #User name
    -  apiGroup: rbac.authorization.k8s.io
    -roleRef:
    -  kind: Role
    -  name: pod-reader    #Name of the role that is created
    -  apiGroup: rbac.authorization.k8s.io
    -
    -

  4. After a role is created and bound to a user, call a Kubernetes API by initiating an API request message where headers carry user information and the certificate uploaded during cluster creation. For example, to call the pod query API, run the following command:

    curl -k -H "X-Remote-User: jane" --cacert /root/tls-ca.crt --key /root/tls.key --cert /root/tls.crt https://192.168.23.5:5443/api/v1/namespaces/default/pods

    -

    If 200 is returned, user jane is authorized to read pods in the cluster's default namespace. If 403 is returned, user jane is not authorized to read pods in the cluster's default namespace.

    -

    To prevent the command execution failure, upload the certificate to the /root directory in advance.

    -
    -

    The parameter descriptions are as follows:

    -
    • X-Remote-User: jane: The request header is fixed at X-Remote-User, and jane is the username.
    • tls-ca.crt: CA root certificate uploaded during cluster creation.
    • tls.crt: client certificate that matches the CA root certificate uploaded during cluster creation.
    • tls.key: client key corresponding to the CA root certificate uploaded during cluster creation.
    • 192.168.23.5:5443: address for connecting to the cluster. To obtain the address, perform the following steps:

      Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. Click the name of the cluster to be connected and obtain the IP address and port number from Internal API Server Address on the cluster details page.

      -
      Figure 1 Obtaining the access address
      -
    -

    In addition, the X-Remote-Group header field, that is, the user group name, is supported. During role binding, a role can be bound to a group and carry user group information when you access the cluster.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0105.html b/docs/cce/umn/cce_01_0105.html deleted file mode 100644 index b702311d..00000000 --- a/docs/cce/umn/cce_01_0105.html +++ /dev/null @@ -1,209 +0,0 @@ - - -

Setting Container Lifecycle Parameters

-

Scenario

CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before stopping, you can register a hook function.

-

CCE provides the following lifecycle callback functions:

-
  • Start Command: executed to start a container. For details, see Setting Container Startup Commands.
  • Post-Start: executed immediately after a container is started. For details, see Post-Start Processing.
  • Pre-Stop: executed before a container is stopped. The pre-stop processing function helps you ensure that the services running on the pods can be completed in advance in the case of pod upgrade or deletion. For details, see Pre-Stop Processing.
-
-

Commands and Parameters Used to Run a Container

A Docker image has metadata that stores image information. If lifecycle commands and arguments are not set, CCE runs the default commands and arguments, that is, Docker instructions ENTRYPOINT and CMD, provided during image creation.

-

If the commands and arguments used to run a container are set during application creation, the default commands ENTRYPOINT and CMD are overwritten during image build. The rules are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Commands and parameters used to run a container

Image Entrypoint

-

Image CMD

-

Command to Run a Container

-

Parameters to Run a Container

-

Command Executed

-

[touch]

-

[/root/test]

-

Not set

-

Not set

-

[touch /root/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

Not set

-

[mkdir]

-

[touch]

-

[/root/test]

-

Not set

-

[/opt/test]

-

[touch /opt/test]

-

[touch]

-

[/root/test]

-

[mkdir]

-

[/opt/test]

-

[mkdir /opt/test]

-
-
-
-

Startup Commands

By default, the default command during image start. To run a specific command or rewrite the default image value, you must perform specific settings: For details, see Setting Container Startup Commands.

-
-

Post-Start Processing

  1. Log in to the CCE console. Expand Lifecycle when adding a container during workload creation.
  2. Set the post-start processing parameters, as listed in Table 2.

    -

    - - - - - - - - - - -
    Table 2 Post-start processing parameters

    Parameter

    -

    Description

    -

    CLI

    -

    Set commands to be executed in the container for post-start processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution. Commands that are executed in the background or asynchronously are not supported.

    -

    Example command:

    -
    exec: 
    -  command: 
    -  - /install.sh 
    -  - install_agent
    -

    Enter /install install_agent in the script. This command indicates that install.sh will be executed after the container is created successfully.

    -

    HTTP request

    -

    Send an HTTP request for post-start processing. The related parameters are described as follows:

    -
    • Path: (optional) request URL.
    • Port: (mandatory) request port.
    • Host Address: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
    -
    -
    -

-
-

Pre-Stop Processing

  1. When creating a workload and adding a container, expand Lifecycle.
  2. Set pre-stop parameters, as shown in Table 2.

    -

    - - - - - - - - - - -
    Table 3 Pre-stop parameters

    Parameter

    -

    Description

    -

    CLI

    -

    Set commands to be executed in the container for pre-stop processing. The command format is Command Args[1] Args[2].... Command is a system command or a user-defined executable program. If no path is specified, an executable program in the default path will be selected. If multiple commands need to be executed, write the commands into a script for execution.

    -

    Example command:

    -
    exec: 
    -  command: 
    -  - /uninstall.sh 
    -  - uninstall_agent
    -

    Enter /uninstall uninstall_agent in the script. This command indicates that the uninstall.sh script will be executed before the container completes its execution and stops running.

    -

    HTTP request

    -

    Send an HTTP request for pre-stop processing. The related parameters are described as follows:

    -
    • Path: (optional) request URL.
    • Port: (mandatory) request port.
    • Host Address: (optional) IP address of the request. The default value is the IP address of the node where the container resides.
    -
    -
    -

-
-

Container Restart Policy

The restartPolicy field is used to specify the pod restart policy. The restart policy type can be Always, OnFailure, or Never. The default value is Always.

-

When restartPolicy is used, containers are restarted only through kubelet on the same node.

- -
- - - - - - - - - - - - - -

Restart Policy

-

Description

-

Always

-

When a container fails, kubelet automatically restarts the container.

-

OnFailure

-

When the container stops running and the exit code is not 0, kubelet automatically restarts the container.

-

Never

-

kubelet does not restart the container regardless of the container running status.

-
-
-

Controllers that can manage pods include ReplicaSet Controllers, jobs, DaemonSets, and kubelet (static pod).

-
  • ReplicaSet Controller and DaemonSet: The policy must be set to Always to ensure that containers run continuously.
  • Job: The policy can be set to OnFailure or Never to ensure that containers are not restarted after being executed.
  • kubelet will restart a pod whenever it fails, regardless of the value of restartPolicy. In addition, no health check is performed on the pod.
-
-
-

Example YAML for Setting the Container Lifecycle

This section uses Nginx as an example to describe how to set the container lifecycle.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can change it as required.

    vi nginx-deployment.yaml

    -

    In the following configuration file, the postStart command is defined to run the install.sh command in the /bin/bash directory. preStop is defined to run the uninstall.sh command.

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  strategy:
    -    type: RollingUpdate
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      restartPolicy: Always               #Restart policy
    -      containers:
    -      - image: nginx 
    -        command:
    -        - sleep 3600                        #Startup command
    -        imagePullPolicy: Always
    -        lifecycle:
    -          postStart:
    -            exec:
    -              command:
    -              - /bin/bash
    -              - install.sh                  #Post-start command
    -          preStop:
    -            exec:
    -              command:
    -              - /bin/bash
    -              - uninstall.sh                 #Pre-stop command
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0107.html b/docs/cce/umn/cce_01_0107.html deleted file mode 100644 index 13076760..00000000 --- a/docs/cce/umn/cce_01_0107.html +++ /dev/null @@ -1,39 +0,0 @@ - - -

Connecting to a Cluster Using kubectl

-

Scenario

This section uses a CCE cluster as an example to describe how to connect to a CCE cluster using kubectl.

-
-

Permission Description

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user.

-

For details about user permissions, see Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

-
-

Using kubectl

Background

-

To connect a client to a Kubernetes cluster, you can use kubectl. For details, see Install Tools.

-

Prerequisites

-
CCE allows you to access a cluster through a VPC network or a public network.
  • VPC internal access: Clusters in the same VPC can access each other.
  • Public network access: You need to prepare an ECS that can connect to a public network.
-
-

If public network access is used, the kube-apiserver of the cluster will be exposed to the public network and may be attacked. You are advised to configure Advanced Anti-DDoS for the EIP of the node where the kube-apiserver is located.

-
-

Downloading kubectl

-

You need to download kubectl and configuration file, copy the file to your client, and configure kubectl. After the configuration is complete, you can use kubectl to access your Kubernetes clusters.

-

On the Kubernetes release page, click the corresponding link based on the cluster version, click Client Binaries, and download the corresponding platform software package.

-
Figure 1 Downloading kubectl
-

Installing and configuring kubectl

-
  1. Log in to the CCE console, click Resource Management > Clusters, and choose Command Line Tool > Kubectl under the cluster to be connected.
  2. On the Kubectl tab page of the cluster details page, connect to the cluster as prompted.

    • You can download the kubectl configuration file (kubeconfig.json) on the kubectl tab page. This file is used for user cluster authentication. If the file is leaked, your clusters may be attacked.
    • If two-way authentication is enabled for the current cluster and an EIP has been bound to the cluster, when the authentication fails (x509: certificate is valid), you need to bind the EIP and download the kubeconfig.json file again.
    • By default, two-way authentication is disabled for domain names in the current cluster. You can run the kubectl config use-context externalTLSVerify command to enable two-way authentication. For details, see Two-Way Authentication for Domain Names. For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
    • The Kubernetes permissions assigned by the configuration file downloaded by IAM users are the same as those assigned to the IAM users on the CCE console.
    • If the KUBECONFIG environment variable is configured in the Linux OS, kubectl preferentially loads the KUBECONFIG environment variable instead of $home/.kube/config.
    -
    -

-
-

Two-Way Authentication for Domain Names

Currently, CCE supports two-way authentication for domain names.

-
  • Two-way authentication is disabled for domain names by default. You can run the kubectl config use-context externalTLSVerify command to switch to the externalTLSVerify context to enable it.
  • When an EIP is bound to or unbound from a cluster, or a custom domain name is configured or updated, the cluster server certificate will be added the latest cluster access address (including the EIP bound to the cluster and all custom domain names configured for the cluster).
  • Asynchronous cluster synchronization takes about 5 to 10 minutes.
  • For a cluster that has been bound to an EIP, if the authentication fails (x509: certificate is valid) when two-way authentication is used, you need to bind the EIP again and download kubeconfig.json again.
  • If the domain name two-way authentication is not supported, kubeconfig.json contains the "insecure-skip-tls-verify": true field, as shown in Figure 2. To use two-way authentication, you can download the kubeconfig.json file again and enable two-way authentication for the domain names.
    Figure 2 Two-way authentication disabled for domain names
    -
-
-

Common Issue (Error from server Forbidden)

When you use kubectl to create or query Kubernetes resources, the following output is returned:

-

# kubectl get deploy Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"

-

The cause is that the user does not have the permissions to operate the Kubernetes resources. For details about how to assign permissions, see Namespace Permissions (Kubernetes RBAC-based).

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0110.html b/docs/cce/umn/cce_01_0110.html deleted file mode 100644 index 704f4b5d..00000000 --- a/docs/cce/umn/cce_01_0110.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Monitoring and Logs

-

-
- - diff --git a/docs/cce/umn/cce_01_0111.html b/docs/cce/umn/cce_01_0111.html deleted file mode 100644 index c0f06569..00000000 --- a/docs/cce/umn/cce_01_0111.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

SFS Volumes

- -

-
- - - diff --git a/docs/cce/umn/cce_01_0112.html b/docs/cce/umn/cce_01_0112.html deleted file mode 100644 index 6469710c..00000000 --- a/docs/cce/umn/cce_01_0112.html +++ /dev/null @@ -1,50 +0,0 @@ - - -

Setting Health Check for a Container

-

Scenario

Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect service exceptions or automatically restart the service to restore it. This will result in a situation where the pod status is normal but the service in the pod is abnormal.

-

CCE provides the following health check probes:

-
  • Liveness probe: checks whether a container is still alive. It is similar to the ps command that checks whether a process exists. If the liveness check of a container fails, the cluster restarts the container. If the liveness check is successful, no operation is executed.
  • Readiness probe: checks whether a container is ready to process user requests. Upon that the container is detected unready, service traffic will not be directed to the container. It may take a long time for some applications to start up before they can provide services. This is because that they need to load disk data or rely on startup of an external module. In this case, the application process is running, but the application cannot provide services. To address this issue, this health check probe is used. If the container readiness check fails, the cluster masks all requests sent to the container. If the container readiness check is successful, the container can be accessed.
-
-

Health Check Methods

  • HTTP request

    This health check mode is applicable to containers that provide HTTP/HTTPS services. The cluster periodically initiates an HTTP/HTTPS GET request to such containers. If the return code of the HTTP/HTTPS response is within 200–399, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port and an HTTP/HTTPS request path.

    -

    For example, for a container that provides HTTP services, the HTTP check path is /health-check, the port is 80, and the host address is optional (which defaults to the container IP address). Here, 172.16.0.186 is used as an example, and we can get such a request: GET http://172.16.0.186:80/health-check. The cluster periodically initiates this request to the container.

    -
  • TCP port

    For a container that provides TCP communication services, the cluster periodically establishes a TCP connection to the container. If the connection is successful, the probe is successful. Otherwise, the probe fails. In this health check mode, you must specify a container listening port.

    -

    For example, if you have a Nginx container with service port 80, after you specify TCP port 80 for container listening, the cluster will periodically initiate a TCP connection to port 80 of the container. If the connection is successful, the probe is successful. Otherwise, the probe fails.

    -
  • CLI

    CLI is an efficient tool for health check. When using the CLI, you must specify an executable command in a container. The cluster periodically runs the command in the container. If the command output is 0, the health check is successful. Otherwise, the health check fails.

    -

    The CLI mode can be used to replace the HTTP request-based and TCP port-based health check.

    -
    • For a TCP port, you can write a program script to connect to a container port. If the connection is successful, the script returns 0. Otherwise, the script returns –1.
    • For an HTTP request, you can write a program script to run the wget command for a container.

      wget http://127.0.0.1:80/health-check

      -

      Check the return code of the response. If the return code is within 200–399, the script returns 0. Otherwise, the script returns –1.

      -
      • Put the program to be executed in the container image so that the program can be executed.
      • If the command to be executed is a shell script, do not directly specify the script as the command, but add a script parser. For example, if the script is /data/scripts/health_check.sh, you must specify sh/data/scripts/health_check.sh for command execution. The reason is that the cluster is not in the terminal environment when executing programs in a container.
      -
      -
    -
-
-

Common Parameter Description

-
- - - - - - - - - - -
Table 1 Common parameter description

Parameter

-

Description

-

Initial Delay (s)

-

Check delay time in seconds. Set this parameter according to the normal startup time of services.

-

For example, if this parameter is set to 30, the health check will be started 30 seconds after the container is started. The time is reserved for containerized services to start.

-

Timeout (s)

-

Timeout duration. Unit: second.

-

For example, if this parameter is set to 10, the timeout wait time for performing a health check is 10s. If the wait time elapses, the health check is regarded as a failure. If the parameter is left blank or set to 0, the default timeout time is 1s.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0113.html b/docs/cce/umn/cce_01_0113.html deleted file mode 100644 index d557e687..00000000 --- a/docs/cce/umn/cce_01_0113.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

Setting an Environment Variable

-

Scenario

An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deployed, increasing flexibility in workload configuration.

-

The function of setting environment variables on CCE is the same as that of specifying ENV in a Dockerfile.

-

CCE provides three ways to add environment variables: Manually add environment variables, import environment variables from a secret, and import environment variables from a configMap.

-

After a container is started, do not modify configurations in the container. If configurations in the container are modified (for example, passwords, certificates, and environment variables of a containerized application are added to the container), the configurations will be lost after the container restarts and container services will become abnormal. An example scenario of container restart is pod rescheduling due to node anomalies.

-

Configurations must be imported to a container as arguments. Otherwise, configurations will be lost after the container restarts.

-
-
-

Manually Adding Environment Variables

  1. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  2. Configure the following parameters as required:

    • Type: Set this to Added manually.
    • Variable Name: Enter a variable name, for example, demo.
    • Variable Value/Reference: Enter a variable value, for example, value.
    -
    Figure 1 Manually adding environment variables
    -

-
-

Importing Environment Variables from a Secret

  1. You need to create a key first. For details, see Creating a Secret.
  2. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  3. Configure the following parameters as required:

    • Type: Set this to Added from Secret.
    • Variable Name: Enter a variable name.
    • Variable Value/Reference: Select the corresponding secret name and key.
    -
    Figure 2 Importing environment variables from a secret
    -

-
-

Importing Environment Variables from a ConfigMap

  1. Create a ConfigMap first. For details, see Creating a ConfigMap.
  2. When creating a workload, add a container image. Then, expand Environment Variables and click Add Environment Variables.
  3. Configure the following parameters as required:

    • Type: Set this to Added from ConfigMap.
    • Variable Name: Enter a variable name.
    • Variable Value/Reference: Select the corresponding ConfigMap name and key.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0114.html b/docs/cce/umn/cce_01_0114.html deleted file mode 100644 index 4658db94..00000000 --- a/docs/cce/umn/cce_01_0114.html +++ /dev/null @@ -1,91 +0,0 @@ - - -

ENI LoadBalancer

-

Scenario

An ENI LoadBalancer Service directs traffic from a load balancer at backend pods, reducing the latency and avoiding performance loss for containerized applications.

-

External access requests are directly forwarded from a load balancer to pods. Internal access requests can be forwarded to a pod through a Service.

-

-
-

Notes and Constraints

  • ENI LoadBalancer is available only in certain regions.
  • Only dedicated load balancers are supported, and they must support layer-4 networking (TCP/UDP).
  • After a load balancer is created, its flavor cannot be changed. Therefore, in CCE, after you create a Service, you cannot connect the automatically created load balancer to other objects. If no load balancer is automatically created, you can connect any existing one to the Service.
  • The cluster version must be 1.17 or later.
  • ENI LoadBalancer Services can be created only for workloads (containers) bound with elastic network interfaces (ENIs).
-
-

Adding a Service When Creating a Workload

You can set the Service when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select ENI LoadBalancer (ELB). This option is available only if you have selected Attach ENI to Pod when specifying basic workload information during workload creation.
    • Service Name: Specify a Service name, which can be the same as the workload name.
    -

    ELB Configuration

    -
    • Elastic Load Balancer: Only dedicated load balancers are supported.

      Dedicated: Resources are shared among load balancers, and the performance of a load balancer is not affected by other load balancers. IPv6 is supported.

      -

      You can create public network or private network load balancers.

      -
      • Public network: You can select an existing public network load balancer or have the system automatically create a new one.
      • Private network: You can select an existing private network load balancer or have the system automatically create a new one.
      -
      The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).
      • Enterprise Project: Select an enterprise project in which the load balancer is created.
      • Specifications: This field is displayed only when you select Public network and Automatically created for Elastic Load Balancer. You can click to modify the name, specifications, billing mode, and bandwidth of the load balancer.
      • Configure Dedicated Load Balancer
        • AZ: Dedicated load balancers can be deployed across AZs to provide higher reliability.
        • Subnet: subnet where the backend server of the load balancer is located.

          Load balancers occupy different number of subnet IP addresses based on their specifications. Therefore, you are not advised to use the subnet CIDR blocks of other resources (such as clusters and nodes) as the load balancer CIDR block.

          -
        • Specifications: Specifications determine the types of listeners that can be added to a load balancer. Select specifications that best fit your needs. For details, see Specifications of Dedicated Load Balancers.
        -
      • Algorithm Type: You can select Weighted round robin, Weighted least connections, or Source IP hash. The weight is dynamically adjusted based on the number of pods of the workload associated with the Service on each node.
        • Weighted round robin: Requests are forwarded to different servers based on their weights, which indicate server processing performance. Backend servers with higher weights receive proportionately more requests, whereas equal-weighted servers receive the same number of requests. This algorithm is often used for short connections, such as HTTP services.
        • Weighted least connections: In addition to the weight assigned to each server, the number of connections processed by each backend server is also considered. Requests are forwarded to the server with the lowest connections-to-weight ratio. Building on least connections, the weighted least connections algorithm assigns a weight to each server based on their processing performance. This algorithm is often used for persistent connections, such as database connections.
        • Source IP hash: The source IP address of each request is calculated using the hash algorithm to obtain a unique hash key, and all backend servers are numbered. The generated key allocates the client to a particular server. This allows requests from different clients to be routed based on source IP addresses and ensures that a client is directed to the same server as always. This algorithm applies to TCP connections without cookies.
        -
        -
      • Sticky Session: This function is disabled by default. You can select Based on source IP address. Listeners ensure session stickiness based on IP addresses. Requests from the same IP address will be forwarded to the same backend server.
      • Health Check: This function is enabled by default. Enabling it will perform health checks on your load balancer. For details about how to configure the ELB health check parameters, see Configuring a Health Check.
      -
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port defined in the container image and on which the workload listens. The Nginx application listens on port 80.
      • Access Port: port mapped to the container port at the load balancer's IP address. The workload can be accessed at <Load balancer's IP address>:<Access port>. The port number range is 1–65535.
      -
    -

  2. After the configuration is complete, click OK.
  3. On the workload creation page, click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. After the workload is successfully created, choose Workloads > Deployments or Workloads > StatefulSets on the CCE console. Click the name of the workload to view its details. On the workload details page, click the Services tab and obtain the access address.
  5. Click the access address.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network.
  2. On the Services tab page, click Create Service.

    The parameters are the same as those in Adding a Service When Creating a Workload.

    -

  3. Click Create. An ENI LoadBalancer Service will be added for the workload.
-
-

Using kubectl to Create a Service (Automatically Creating a Load Balancer)

An ENI LoadBalancer Service supports only dedicated ELBs. You do not need to specify NodePort when creating a Service.

-
apiVersion: v1
-kind: Service
-metadata:
-    name: example
-    annotations:
-        kubernetes.io/elb.class: performance
-        kubernetes.io/elb.autocreate: 
-          '
-          {
-              "type": "public",
-              "bandwidth_name": "cce-bandwidth-1630813564682",
-              "bandwidth_chargemode": "traffic",
-              "bandwidth_size": 5,
-              "bandwidth_sharetype": "PER",
-              "eip_type": "5_bgp",
-              "available_zone": [
-                  "eu-de-01"
-              ],
-              "l7_flavor_name": "L7_flavor.elb.s2.medium",
-              "l4_flavor_name": "L4_flavor.elb.s1.small"
-          }
-          '
-spec:
-    selector:
-        app: example
-    ports:
-        -   name: cce-service-0
-            targetPort: 80
-            port: 8082
-            protocol: TCP
-    type: LoadBalancer
-

For details about the parameters, see Table 4.

-
-

Using kubectl to Create a Service (Using an Existing Load Balancer)

When creating a Service using an existing load balancer, you only need to specify the ID of the load balancer.

-
apiVersion: v1
-kind: Service
-metadata:
-    name: example
-    annotations:
-        kubernetes.io/elb.id: bcc44e84-d0b5-4192-8bec-b2ca55ce5025     # ID of the load balancer. Replace it with the actual value.
-spec:
-    selector:
-        app: example
-    ports:
-        -   name: cce-service-0
-            targetPort: 80
-            port: 8082
-            protocol: TCP
-    type: LoadBalancer
-
-

ELB Forwarding

After an ENI LoadBalancer Service is created, you can view the listener forwarding rules of the load balancer on the ELB console.

-
Figure 1 ELB forwarding
-

You can find that a listener is created for the load balancer. The backend server address is the IP address of the pod, and the service port is the container port. This is because the pod uses an ENI or sub-ENI. When traffic passes through the load balancer, it directly forwards the traffic to the pod. This is the same as that described in Scenario.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0120.html b/docs/cce/umn/cce_01_0120.html deleted file mode 100644 index 6070c05b..00000000 --- a/docs/cce/umn/cce_01_0120.html +++ /dev/null @@ -1,62 +0,0 @@ - - -

Performing Replace/Rolling Upgrade (v1.13 and Earlier)

-

Scenario

You can upgrade your clusters to a newer Kubernetes version on the CCE console.

-

Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Overview and Before You Start.

-
-

Precautions

  • If the coredns add-on needs to be upgraded during the cluster upgrade, ensure that the number of nodes is greater than or equal to the number of coredns instances and all coredns instances are running. Otherwise, the upgrade will fail. Before upgrading a cluster of v1.11 or v1.13, you need to upgrade the coredns add-on to the latest version available for the cluster.
  • When a cluster of v1.11 or earlier is upgraded to v1.13, the impacts on the cluster are as follows:
    • All cluster nodes will be restarted as their OSs are upgraded, which affects application running.
    • The cluster signing certificate mechanism is changed. As a result, the original cluster certificate becomes invalid. You need to obtain the certificate or kubeconfig file again after the cluster is upgraded.
    -
  • During the upgrade from one release of v1.13 to a later release of v1.13, applications in the cluster are interrupted for a short period of time only during the upgrade of network components.
  • During the upgrade from Kubernetes 1.9 to 1.11, the kube-dns of the cluster will be uninstalled and replaced with CoreDNS, which may cause loss of the cascading DNS configuration in the kube-dns or temporary interruption of the DNS service. Back up the DNS address configured in the kube-dns so you can configure the domain name in the CoreDNS again when domain name resolution is abnormal.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. In the cluster list, check the cluster version.
  2. Click More for the cluster you want to upgrade, and select Upgrade from the drop-down menu.

    • If your cluster version is up-to-date, the Upgrade button is grayed out.
    • If the cluster status is Unavailable, the upgrade flag in the upper right corner of the cluster card view will be grayed out. Check the cluster status by referring to Before You Start.
    -
    -

  3. In the displayed Pre-upgrade Check dialog box, click Check Now.
  4. The pre-upgrade check starts. While the pre-upgrade check is in progress, the cluster status will change to Pre-checking and new nodes/applications will not be able to be deployed on the cluster. However, existing nodes and applications will not be affected. It takes 3 to 5 minutes to complete the pre-upgrade check.
  5. When the status of the pre-upgrade check is Completed, click Upgrade.
  6. On the cluster upgrade page, review or configure basic information by referring to Table 1.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic information

    Parameter

    -

    Description

    -

    Cluster Name

    -

    Review the name of the cluster to be upgraded.

    -

    Current Version

    -

    Review the version of the cluster to be upgraded.

    -

    Target Version

    -

    Review the target version after the upgrade.

    -

    Node Upgrade Policy

    -

    Replace (replace upgrade): Worker nodes will be reset. Their OSs will be reinstalled, and data on the system and data disks will be cleared. Exercise caution when performing this operation.

    -
    NOTE:
    • The lifecycle management function of the nodes and workloads in the cluster is unavailable.
    • APIs cannot be called temporarily.
    • Running workloads will be interrupted because nodes are reset during the upgrade.
    • Data in the system and data disks on the worker nodes will be cleared. Back up important data before resetting the nodes.
    • Data disks without LVM mounted to worker nodes need to be mounted again after the upgrade, and data on the disks will not be lost during the upgrade.
    • The EVS disk quota must be greater than 0.
    • The container IP addresses change, but the communication between containers is not affected.
    • Custom labels on the worker nodes will be cleared.
    • It takes about 20 minutes to upgrade a master node and about 30 to 120 minutes to upgrade worker nodes (about 3 minutes for each worker node), depending on the number of worker nodes and upgrade batches.
    -
    -

    Login Mode

    -
    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -
    -
    -
    -
    -

  7. Click Next. In the dialog box displayed, click OK.
  8. Upgrade add-ons. If an add-on needs to be upgraded, a red dot is displayed. Click the Upgrade button in the lower left corner of the add-on card view. After the upgrade is complete, click Upgrade in the lower right corner of the page.

    • Master nodes will be upgraded first, and then the worker nodes will be upgraded concurrently. If there are a large number of worker nodes, they will be upgraded in different batches.
    • Select a proper time window for the upgrade to reduce impacts on services.
    • Clicking OK will start the upgrade immediately, and the upgrade cannot be canceled. Do not shut down or restart nodes during the upgrade.
    -
    -

  9. In the displayed Upgrade dialog box, read the information and click OK. Note that the cluster cannot be rolled back after the upgrade.
  10. Back to the cluster list, you can see that the cluster status is Upgrading. Wait until the upgrade is completed.

    After the upgrade is successful, you can view the cluster status and version on the cluster list or cluster details page.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0125.html b/docs/cce/umn/cce_01_0125.html deleted file mode 100644 index 34d1cf91..00000000 --- a/docs/cce/umn/cce_01_0125.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

SFS Turbo Volumes

- -

-
- - - diff --git a/docs/cce/umn/cce_01_0127.html b/docs/cce/umn/cce_01_0127.html deleted file mode 100644 index af45efbc..00000000 --- a/docs/cce/umn/cce_01_0127.html +++ /dev/null @@ -1,28 +0,0 @@ - - -

storage-driver (System Resource Add-on, Mandatory)

-

Introduction

storage-driver functions as a standard Kubernetes FlexVolume plug-in to allow containers to use IaaS storage resources. By installing and upgrading storage-driver, you can quickly install and update cloud storage capabilities.

-

storage-driver is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.13 or earlier is created.

-
-

Notes and Constraints

  • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume any more. You need to use the everest add-on. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE any more. Otherwise, the storage resources may not function normally.
  • This add-on can be installed only in clusters of v1.13 or earlier. By default, the everest add-on is installed when clusters of v1.15 or later are created.

    In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.

    -
    -
-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-

If storage-driver is not installed in a cluster, perform the following steps to install it:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under storage-driver.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Click Install to install the add-on. Note that the storage-driver has no configurable parameters and can be directly installed.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Upgrade under storage-driver.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • When the upgrade is complete, the original storage-driver version on cluster nodes will be replaced by the latest version.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Click Upgrade to upgrade the storage-driver add-on. Note that the storage-driver has no configurable parameters and can be directly upgraded.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Uninstall under storage-driver.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0129.html b/docs/cce/umn/cce_01_0129.html deleted file mode 100644 index 96a904ff..00000000 --- a/docs/cce/umn/cce_01_0129.html +++ /dev/null @@ -1,173 +0,0 @@ - - -

coredns (System Resource Add-on, Mandatory)

-

Introduction

The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

-

coredns is an open-source software and has been a part of CNCF. It provides a means for cloud services to discover each other in cloud-native deployments. Each of the plug-ins chained by coredns provides a particular DNS function. You can integrate coredns with only the plug-ins you need to make it fast, efficient, and flexible. When used in a Kubernetes cluster, coredns can automatically discover services in the cluster and provide domain name resolution for these services. By working with a cloud DNS server, coredns can resolve external domain names for workloads in a cluster.

-

coredns is a system resource add-on. It is installed by default when a cluster of Kubernetes v1.11 or later is created.

-

Kubernetes v1.11 and later back CoreDNS as the official default DNS for all clusters going forward.

-

CoreDNS official website: https://coredns.io/

-

Open source community: https://github.com/coredns/coredns

-
-

Notes and Constraints

When CoreDNS is running properly or being upgraded, ensure that the number of available nodes is greater than or equal to the number of CoreDNS instances and all CoreDNS instances are running. Otherwise, the upgrade will fail.

-
-

Installing the Add-on

This add-on has been installed by default. If it is uninstalled due to some reasons, you can reinstall it by performing the following steps:

-
  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under coredns.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. In the Configuration step, set the following parameters:

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 coredns add-on parameters

    Parameter

    -

    Description

    -

    Add-on Specifications

    -

    Concurrent domain name resolution ability. Select add-on specifications that best fit your needs.

    -

    Instances

    -

    Number of pods that will be created to match the selected add-on specifications. The number cannot be modified.

    -

    Container

    -

    CPU and memory quotas of the container allowed for the selected add-on specifications. The quotas cannot be modified.

    -

    Notes

    -

    Add-on precautions. Read the precautions before you proceed with the step.

    -

    stub domain

    -

    A domain name server for a user-defined domain name. The format is a key-value pair. The key is a suffix of DNS domain name, and the value is one or more DNS IP addresses. For example, acme.local -- 1.2.3.4,6.7.8.9 means that DNS requests with the .acme.local suffix are forwarded to a DNS listening at 1.2.3.4,6.7.8.9.

    -
    -
    -

  4. After the preceding configurations are complete, click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Configuring the Stub Domain for CoreDNS

Cluster administrators can modify the ConfigMap for the CoreDNS Corefile to change how service discovery works. They can configure stub domains for CoreDNS using the proxy plug-in.

-

Assume that a cluster administrator has a Consul DNS server located at 10.150.0.1 and all Consul domain names have the suffix .consul.local.

-

To configure this Consul DNS server in CoreDNS, run the following command to edit the CoreDNS ConfigMap:

-

kubectl edit configmap coredns -n kube-system

-

Example configuration:

-
consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-

In clusters of v1.15.11 and later, the modified ConfigMap is as follows:

-
apiVersion: v1
-metadata:
-  name: coredns
-  namespace: kube-system
-  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
-  uid: 00cb8f29-62d7-4df8-a769-0a16237903c1
-  resourceVersion: '2074614'
-  creationTimestamp: '2021-04-07T03:52:42Z'
-  labels:
-    app: coredns
-    k8s-app: coredns
-    kubernetes.io/cluster-service: 'true'
-    kubernetes.io/name: CoreDNS
-    release: cceaddon-coredns
-data:
-  Corefile: |-
-    .:5353 {
-        bind {$POD_IP}
-        cache 30
-        errors
-        health {$POD_IP}:8080
-        kubernetes cluster.local in-addr.arpa ip6.arpa {
-          pods insecure
-          upstream /etc/resolv.conf
-          fallthrough in-addr.arpa ip6.arpa
-        }
-        loadbalance round_robin
-        prometheus {$POD_IP}:9153
-        forward . /etc/resolv.conf
-        reload
-    }
-
-    consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-

In clusters earlier than v1.15.11, the modified ConfigMap is as follows:

-
apiVersion: v1
-data:
-  Corefile: |-
-    .:5353 {
-        cache 30
-        errors
-        health
-        kubernetes cluster.local in-addr.arpa ip6.arpa {
-          pods insecure
-          upstream /etc/resolv.conf
-          fallthrough in-addr.arpa ip6.arpa
-        }
-        loadbalance round_robin
-        prometheus 0.0.0.0:9153
-        proxy . /etc/resolv.conf
-        reload
-    }
-
-    consul.local:5353 {
-        errors
-        cache 30
-        proxy . 10.150.0.1
-    }
-kind: ConfigMap
-metadata:
-  name: coredns
-  namespace: kube-system
-
-

How Does Domain Name Resolution Work in Kubernetes?

DNS policies can be set on a per-pod basis. Currently, Kubernetes supports four types of DNS policies: Default, ClusterFirst, ClusterFirstWithHostNet, and None. For details, see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/. These policies are specified in the dnsPolicy field in the pod-specific.

-
-
  • Default: Pods inherit the name resolution configuration from the node that the pods run on. The custom upstream DNS server and the stub domain cannot be used together with this policy.
  • ClusterFirst: Any DNS query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream name server inherited from the node. Cluster administrators may have extra stub domains and upstream DNS servers configured.
  • ClusterFirstWithHostNet: For pods running with hostNetwork, set its DNS policy ClusterFirstWithHostNet.
  • None: It allows a pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsPolicy field in the pod-specific.
-
  • Clusters of Kubernetes v1.10 and later support Default, ClusterFirst, ClusterFirstWithHostNet, and None. Clusters earlier than Kubernetes v1.10 support only Default, ClusterFirst, and ClusterFirstWithHostNet.
  • Default is not the default DNS policy. If dnsPolicy is not explicitly specified, ClusterFirst is used.
-
-

Routing

-

Without stub domain configurations: Any query that does not match the configured cluster domain suffix, such as www.kubernetes.io, is forwarded to the upstream DNS server inherited from the node.

-

With stub domain configurations: If stub domains and upstream DNS servers are configured, DNS queries are routed according to the following flow:

-
  1. The query is first sent to the DNS caching layer in coredns.
  2. From the caching layer, the suffix of the request is examined and then the request is forwarded to the corresponding DNS:
    • Names with the cluster suffix, for example, .cluster.local: The request is sent to coredns.
    -
    • Names with the stub domain suffix, for example, .acme.local: The request is sent to the configured custom DNS resolver that listens, for example, on 1.2.3.4.
    • Names that do not match the suffix (for example, widget.com): The request is forwarded to the upstream DNS.
    -
-
Figure 1 Routing
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under coredns.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • During the upgrade, the previous configurations are lost and need to be specified again.
    • When the upgrade is complete, the original coredns version on cluster nodes will be replaced by the latest version. If an exception occurs during the upgrade, uninstall the add-on and then re-install it.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Configure the parameters listed in Table 2. After the configuration is complete, click Upgrade to upgrade the coredns add-on.

    -

    - - - - - - - - - - -
    Table 2 Parameters for installing coredns

    Parameter

    -

    Description

    -

    Add-on Specifications

    -

    Concurrent domain name resolution ability. Select add-on specifications that best fit your needs.

    -

    stub domain

    -

    A domain name server for a user-defined domain name. The format is a key-value pair. The key is a suffix of DNS domain name, and the value is one or more DNS IP addresses. For example, acme.local -- 1.2.3.4,6.7.8.9 means that DNS requests with the .acme.local suffix are forwarded to a DNS listening at 1.2.3.4,6.7.8.9.

    -
    -
    -

-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under coredns.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0130.html b/docs/cce/umn/cce_01_0130.html deleted file mode 100644 index adf3daba..00000000 --- a/docs/cce/umn/cce_01_0130.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Configuring a Container

-
- - diff --git a/docs/cce/umn/cce_01_0139.html b/docs/cce/umn/cce_01_0139.html deleted file mode 100644 index 2b3031a0..00000000 --- a/docs/cce/umn/cce_01_0139.html +++ /dev/null @@ -1,186 +0,0 @@ - - -

Common kubectl Commands

-

Getting Started

get

-

The get command displays one or many resources of a cluster.

-

This command prints a table of the most important information about all resources, including cluster nodes, running pods, Deployments, and Services.

-

A cluster can have multiple namespaces. If no namespace is specified, this command will run with the --namespace=default flag.

-
-

Examples:

-

To list all pods with detailed information:

-
kubectl get po -o wide
-

To display pods in all namespaces:

-
kubectl get po --all-namespaces
-

To list labels of pods in all namespaces:

-
kubectl get po --show-labels
-

To list all namespaces of the node:

-
kubectl get namespace
-

To list information of other nodes, run this command with the -s flag. To list a specified type of resources, add the resource type to this command, for example, kubectl get svc, kubectl get nodes, and kubectl get deploy.

-
-

To list a pod with a specified name in YAML output format:

-
kubectl get po <podname> -o yaml
-

To list a pod with a specified name in JSON output format:

-
kubectl get po <podname> -o json
-
kubectl get po rc-nginx-2-btv4j -o=custom-columns=LABELS:.metadata.labels.app
-

LABELS indicates a comma separated list of user-defined column titles. metadata.labels.app indicates the data to be listed in either YAML or JSON output format.

-
-

create

-

The create command creates a cluster resource from a file or input.

-

If there is already a resource descriptor (a YAML or JSON file), you can create the resource from the file by running the following command:

-
kubectl create -f filename
-

expose

-

The expose command exposes a resource as a new Kubernetes service. Possible resources include a pod, Service, and Deployment.

-
kubectl expose deployment deployname --port=81 --type=NodePort --target-port=80 --name=service-name
-

The example command creates a service of NodePort type for the deployment with the name specified in deployname. The service will serve on port 81 specified in -port and connect to the containers on port 80 specified in -target-port. More specifically, the service is reachable at <cluster-internal IP address>:<port>, and containers are reachable at <node IP address>:<target-port>.

-
-

run

-

Examples:

-

To run a particular image in the cluster:

-
kubectl run deployname --image=nginx:latest
-

To run a particular image using a specified command:

-
kubectl run deployname -image=busybox --command -- ping baidu.com
-

set

-

The set command configures object resources.

-

Example:

-

To change the image of a deployment with the name specified in deployname to image 1.0:

-
kubectl set image deploy deployname containername=containername:1.0
-

edit

-

The edit command edits a resource from the default editor.

-

Examples:

-

To update a pod:

-
kubectl edit po po-nginx-btv4j
-

The example command yields the same effect as the following command:

-
kubectl get po po-nginx-btv4j -o yaml >> /tmp/nginx-tmp.yaml
-vim /tmp/nginx-tmp.yaml
-/*do some changes here */
-kubectl replace -f /tmp/nginx-tmp.yaml
-

explain

-

The explain command views documents or reference documents.

-

Example:

-

To get documentation of pods:

-
kubectl explain pod
-

delete

-

The delete command deletes resources by resource name or label.

-

Example:

-

To delete a pod with minimal delay:

-
kubectl delete po podname --now 
-
kubectl delete -f nginx.yaml
-kubectl delete deployment deployname
-
-

Deployment Commands

rolling-update*

-

rolling-update is a very important command. It updates a running service with zero downtime. Pods are incrementally replaced by new ones. One pod is updated at a time. The old pod is deleted only after the new pod is up. New pods must be distinct from old pods by name, version, and label. Otherwise, an error message will be reported.

-
kubectl rolling-update poname -f newfilename
-kubectl rolling-update poname -image=image:v2
-

If any problem occurs during the rolling update, run the command with the -rollback flag to abort the rolling update and revert to the previous pod.

-
kubectl rolling-update poname -rollback
-

rollout

-

The rollout command manages the rollout of a resource.

-

Examples:

-

To check the rollout status of a particular deployment:

-
kubectl rollout status deployment/deployname
-

To view the rollout history of a particular deployment:

-
kubectl rollout history deployment/deployname
-

To roll back to the previous deployment: (by default, a resource is rolled back to the previous version)

-
kubectl rollout undo deployment/test-nginx
-

scale

-

The scale command sets a new size for a resource by adjusting the number of resource replicas.

-
kubectl scale deployment deployname --replicas=newnumber
-

autoscale

-

The autoscale command automatically chooses and sets the number of pods. This command specifies the range for the number of pod replicas maintained by a replication controller. If there are too many pods, the replication controller terminates the extra pods. If there is too few, the replication controller starts more pods.

-
kubectl autoscale deployment deployname --min=minnumber --max=maxnumber
-
-

Cluster Management Commands

cordon, drain, uncordon*

-

If a node to be upgraded is running many pods or is already down, perform the following steps to prepare the node for maintenance:

-
  1. Run the cordon command to mark a node as unschedulable. This means that new pods will not be scheduled onto the node.

    kubectl cordon nodename
    -

    Note: In CCE, nodename indicates the private network IP address of a node.

    -

  2. Run the drain command to smoothly migrate the running pods from the node to another node.

    kubectl drain nodename --ignore-daemonsets --ignore-emptydir
    -

    ignore-emptydir ignores the pods that use emptyDirs.

    -

  3. Perform maintenance operations on the node, such as upgrading the kernel and upgrading Docker.
  4. After node maintenance is completed, run the uncordon command to mark the node as schedulable.

    kubectl uncordon nodename
    -

-

cluster-info

-

To display the add-ons running in the cluster:

-
kubectl cluster-info
-

To dump current cluster information to stdout:

-
kubectl cluster-info dump
-

top*

-

The top command displays resource (CPU/memory/storage) usage. This command requires Heapster to be correctly configured and working on the server.

-

taint*

-

The taint command updates the taints on one or more nodes.

-

certificate*

-

The certificate command modifies the certificate resources.

-
-

Fault Diagnosis and Debugging Commands

describe

-

The describe command is similar to the get command. The difference is that the describe command shows details of a specific resource or group of resources, whereas the get command lists one or more resources in a cluster. The describe command does not support the -o flag. For resources of the same type, resource details are printed out in the same format.

-

If the information about a resource is queried, you can use the get command to obtain more detailed information. If you want to check the status of a specific resource, for example, to check if a pod is in the running state, run the describe command to show more detailed status information.

-
kubectl describe po <podname>
-
-

logs

-

The logs command prints logs for a container in a pod or specified resource to stdout. To display logs in the tail -f mode, run this command with the -f flag.

-
kubectl logs -f podname
-

exec

-

The kubectl exec command is similar to the Docker exec command and executes a command in a container. If there are multiple containers in a pod, use the -c flag to choose a container.

-
kubectl exec -it podname bash
-kubectl exec -it podname -c containername bash
-

port-forward*

-

The port-forward command forwards one or more local ports to a pod.

-

Example:

-

To listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod:

-
kubectl port -forward podname 5000:6000
-

proxy*

-

The proxy command creates a proxy server between localhost and the Kubernetes API server.

-

Example:

-

To enable the HTTP REST APIs on the master node:

-
kubectl proxy -accept-hosts= '.*' -port=8001 -address= '0.0.0.0'
-

cp

-

The cp command copies files and directories to and from containers.

-
cp filename newfilename
-

auth*

-

The auth command inspects authorization.

-

attach*

-

The attach command is similar to the logs -f command and attaches to a process that is already running inside an existing container. To exit, run the ctrl-c command. If a pod contains multiple containers, to view the output of a specific container, use the -c flag and containername following podname to specify a container.

-
kubectl attach podname -c containername
-
-

Advanced Commands

replace

-

The replace command updates or replaces an existing resource by attributes including the number of replicas, labels, image versions, and ports. You can directly modify the original YAML file and then run the replace command.

-
kubectl replace -f filename
-

Resource names cannot be updated.

-
-

apply*

-

The apply command provides a more strict control on resource updating than patch and edit commands. The apply command applies a configuration to a resource and maintains a set of configuration files in source control. Whenever there is an update, the configuration file is pushed to the server, and then the kubectl apply command applies the latest configuration to the resource. The Kubernetes compares the new configuration file with the original one and updates only the changed configuration instead of the whole file. The configuration that is not contained in the -f flag will remain unchanged. Unlike the replace command which deletes the resource and creates a new one, the apply command directly updates the original resource. Similar to the git operation, the apply command adds an annotation to the resource to mark the current apply.

-
kubectl apply -f
-

patch

-

If you want to modify attributes of a running container without first deleting the container or using the replace command, the patch command is to the rescue. The patch command updates field(s) of a resource using strategic merge patch, a JSON merge patch, or a JSON patch. For example, to change a pod label from app=nginx1 to app=nginx2 while the pod is running, use the following command:

-
kubectl patch pod podname -p '{"metadata":{"labels":{"app":"nginx2"}}}'
-

convent*

-

The convert command converts configuration files between different API versions.

-
-

Configuration Commands

label

-

The label command update labels on a resource.

-
kubectl label pods my-pod new-label=newlabel
-

annotate

-

The annotate command update annotations on a resource.

-
kubectl annotate pods my-pod icon-url=http://......
-

completion

-

The completion command provides autocompletion for shell.

-
-

Other Commands

api-versions

-

The api-versions command prints the supported API versions.

-
kubectl api-versions
-

api-resources

-

The api-resources command prints the supported API resources.

-
kubectl api-resources
-

config*

-

The config command modifies kubeconfig files. An example use case of this command is to configure authentication information in API calls.

-

help

-

The help command gets all command references.

-

version

-

The version command prints the client and server version information for the current context.

-
kubectl version
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0141.html b/docs/cce/umn/cce_01_0141.html deleted file mode 100644 index 8ee6eb75..00000000 --- a/docs/cce/umn/cce_01_0141.html +++ /dev/null @@ -1,37 +0,0 @@ - - -

gpu-beta

-

Introduction

gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA Tesla drivers.

-
-

Notes and Constraints

  • This add-on is available only in certain regions.
  • This add-on can be installed only in CCE clusters of v1.11 or later.
  • If GPU nodes are used in the cluster, the gpu-beta add-on must be installed.
  • The driver to be downloaded must be a .run file.
  • Only Tesla drivers are supported, not GRID drivers.
-
  • If the download link is a public network address, for example, https://us.download.nvidia.com/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run, bind an EIP to each GPU node. For details about how to obtain the driver link, see Obtaining the Driver Link from Public Network.
  • If the download link is an OBS URL, you do not need to bind an EIP to GPU nodes.
  • Ensure that the NVIDIA driver version matches the GPU node.
  • After the driver version is changed, restart the node for the change to take effect.
-
-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under gpu-beta.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. In the Configuration step, enter the link to download the NVIDIA driver.
  4. Click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each GPU node in the cluster.

    -

-
-

Verifying the Add-on

After the add-on is installed, run the nvidia-smi command on the GPU node and the container that schedules GPU resources to verify the availability of the GPU device and driver.

-
GPU node:
cd /opt/cloud/cce/nvidia/bin && ./nvidia-smi
-
-

Container:

-
cd /usr/local/nvidia/bin && ./nvidia-smi
-

If GPU information is returned, the device is available and the add-on is successfully installed.

-

-
-

Obtaining the Driver Link from Public Network

  1. Log in to the CCE console.
  2. Click Create Node and select the GPU node to be created in the Specifications area. The GPU card model of the node is displayed in the lower part of the page.
  1. Visit https://www.nvidia.com/Download/Find.aspx?lang=en.
  2. Select the driver information on the NVIDIA Driver Downloads page, as shown in Figure 1. Operating System must be Linux 64-bit.

    Figure 1 Setting parameters
    -

  3. After confirming the driver information, click SEARCH. A page is displayed, showing the driver information, as shown in Figure 2. Click DOWNLOAD.

    Figure 2 Driver information
    -

  4. Obtain the driver link in either of the following ways:

    • Method 1: As shown in Figure 3, find url=/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run in the browser address box. Then, supplement it to obtain the driver link https://us.download.nvidia.com/tesla/396.37/NVIDIA-Linux-x86_64-396.37.run. By using this method, you must bind an EIP to each GPU node.
    • Method 2: As shown in Figure 3, click AGREE & DOWNLOAD to download the driver. Then, upload the driver to OBS and record the OBS URL. By using this method, you do not need to bind an EIP to GPU nodes.
      Figure 3 Obtaining the link
      -
    -

-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the cluster and click Uninstall under gpu-beta.
  2. In the dialog box displayed, click Yes to uninstall the add-on.

    The driver will not be uninstalled during gpu-beta add-on uninstall. If the driver is reinstalled, you must restart all GPU nodes.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0142.html b/docs/cce/umn/cce_01_0142.html deleted file mode 100644 index fcc48061..00000000 --- a/docs/cce/umn/cce_01_0142.html +++ /dev/null @@ -1,151 +0,0 @@ - - -

NodePort

-

Scenario

A Service is exposed on each node's IP address at a static port (NodePort). A ClusterIP Service, to which the NodePort Service will route, is automatically created. By requesting <NodeIP>:<NodePort>, you can access a NodePort Service from outside the cluster.

-
Figure 1 NodePort access
-
-

Notes and Constraints

  • By default, a NodePort Service is accessed within a VPC. If you need to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
  • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. You are advised not to modify the Service affinity setting after the Service is created. If you need to modify it, create a Service again.
  • The service port of a NodePort Service created on the CCE console is the same as the configured container port.
  • CCE Turbo clusters support only cluster-level service affinity.
-
-

Adding a Service When Creating a Workload

You can set the access type when creating a workload on the CCE console. An Nginx workload is used as an example.

-
  1. In the Set Application Access step of Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet, click Add Service and set the following parameters:

    • Access Type: Select NodePort.

      If you want to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.

      -
      -
    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload in the container image listens. The value ranges from 1 to 65535.
      • Access Port: node port (with a private IP address) to which the container port will be mapped. You are advised to select Automatically generated.
        • Automatically generated: The system automatically assigns a port number.
        • Specified port: You have to manually specify a fixed node port number in the range of 30000–32767. Ensure that the port is unique in a cluster.
        -
      -
    -

  2. After the configuration is complete, click OK.
  3. Click Next: Configure Advanced Settings. On the page displayed, click Create.
  4. Click View Deployment Details or View StatefulSet Details. On the Services tab page, obtain the access address, for example, 192.168.0.160:30358.
-
-

Adding a Service After Creating a Workload

You can set the Service after creating a workload. This has no impact on the workload status and takes effect immediately. The procedure is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Workloads > Deployments. On the workload list, click the name of the workload for which you will create a Service.

    If the Service is associated with an ingress, the ingress is unavailable after the port information of the Service is updated. In this case, you need to delete and recreate the Service.

    -
    -

  2. On the Services tab page, click Add Service.
  3. On the Create Service page, select NodePort from the Access Type drop-down list.

    If you want to use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.

    -
    -

  4. Set node access parameters.

    • Service Name: Service name, which can be the same as the workload name.
    • Cluster Name: name of the cluster where the workload runs. The value is inherited from the workload creation page and cannot be changed.
    • Namespace: namespace where the workload is located. The value is inherited from the workload creation page and cannot be changed.
    • Workload: workload for which you want to add a Service. The value is inherited from the workload creation page and cannot be changed.
    • Service Affinity
      • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
      • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
      -
    • Port Settings
      • Protocol: protocol used by the Service.
      • Container Port: port on which the workload in the container image listens. The Nginx workload listens on port 80.
      • Access Port: node port (with a private IP address) to which the container port will be mapped. You are advised to select Automatically generated.
        • Automatically generated: The system automatically assigns a port number.
        • Specified port: You have to manually specify a fixed node port number in the range of 30000–32767. Ensure that the port is unique in a cluster.
        -
      -
    -

  5. Click Create. A NodePort Service will be added for the workload.
-
-

Using kubectl

You can run kubectl commands to set the access type. This section uses a Nginx workload as an example to describe how to set a NodePort Service using kubectl.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the nginx-deployment.yaml and nginx-nodeport-svc.yaml files.

    The file names are user-defined. nginx-deployment.yaml and nginx-nodeport-svc.yaml are merely example file names.

    -

    vi nginx-deployment.yaml

    -
    apiVersion: apps/v1
    -kind: Deployment
    -metadata:
    -  name: nginx
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: nginx
    -  template:
    -    metadata:
    -      labels:
    -        app: nginx
    -    spec:
    -      containers:
    -      - image: nginx:latest
    -        name: nginx
    -      imagePullSecrets:
    -      - name: default-secret
    -

    vi nginx-nodeport-svc.yaml

    -
    apiVersion: v1
    -kind: Service
    -metadata:
    -  labels:
    -    app: nginx
    -  name: nginx-nodeport
    -spec:
    -  ports:
    -  - name: service
    -    nodePort: 30000     # Node port. The value ranges from 30000 to 32767.
    -    port: 8080          # Port for accessing a Service.
    -    protocol: TCP       # Protocol used for accessing a Service. The value can be TCP or UDP.
    -    targetPort: 80      # Port used by a Service to access the target container. This port is closely related to the applications running in a container. In this example, the Nginx image uses port 80 by default.
    -  selector:             # Label selector. A Service selects a pod based on the label and forwards the requests for accessing the Service to the pod. In this example, select the pod with the app:nginx label.
    -    app: nginx
    -  type: NodePort        # Service type. NodePort indicates that the Service is accessed through a node port.
    -

  3. Create a workload.

    kubectl create -f nginx-deployment.yaml

    -

    If information similar to the following is displayed, the workload has been created.

    -
    deployment "nginx" created
    -

    kubectl get po

    -

    If information similar to the following is displayed, the workload is running.

    -
    NAME                     READY     STATUS             RESTARTS   AGE
    -nginx-2601814895-qhxqv   1/1       Running            0          9s
    -

  4. Create a Service.

    kubectl create -f nginx-nodeport-svc.yaml

    -

    If information similar to the following is displayed, the Service is being created.

    -
    service "nginx-nodeport" created
    -

    kubectl get svc

    -

    If information similar to the following is displayed, the Service has been created.

    -
    # kubectl get svc
    -NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
    -kubernetes       ClusterIP   10.247.0.1     <none>        443/TCP          4d8h
    -nginx-nodeport   NodePort    10.247.30.40   <none>        8080:30000/TCP   18s
    -

  5. Access the Service.

    By default, a NodePort Service can be accessed by using Any node IP address:Node port.

    -

    The Service can be accessed from a node in another cluster in the same VPC or in another pod in the cluster. If a public IP address is bound to the node, you can also use the public IP address to access the Service. Create a container in the cluster and access the container by using Node IP address:Node port.

    -
    # kubectl get node -owide
    -NAME           STATUS   ROLES    AGE    INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
    -10.100.0.136   Ready    <none>   152m   10.100.0.136   <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
    -10.100.0.5     Ready    <none>   152m   10.100.0.5     <none>        CentOS Linux 7 (Core)   3.10.0-1160.25.1.el7.x86_64   docker://18.9.0
    -# kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
    -If you don't see a command prompt, try pressing enter.
    -/ # curl 10.100.0.136:30000
    -<!DOCTYPE html>
    -<html>
    -<head>
    -<title>Welcome to nginx!</title>
    -<style>
    -    body {
    -        width: 35em;
    -        margin: 0 auto;
    -        font-family: Tahoma, Verdana, Arial, sans-serif;
    -    }
    -</style>
    -</head>
    -<body>
    -<h1>Welcome to nginx!</h1>
    -<p>If you see this page, the nginx web server is successfully installed and
    -working. Further configuration is required.</p>
    -
    -<p>For online documentation and support please refer to
    -<a href="http://nginx.org/">nginx.org</a>.<br/>
    -Commercial support is available at
    -<a href="http://nginx.com/">nginx.com</a>.</p>
    -
    -<p><em>Thank you for using nginx.</em></p>
    -</body>
    -</html>
    -/ # 
    -

-
-

externalTrafficPolicy (Service Affinity)

For a NodePort Service, requests are first sent to the node port, then the Service, and finally the pod backing the Service. The backing pod may be not located in the node receiving the requests. By default, the backend workload can be accessed from any node IP address and service port. If the pod is not on the node that receives the request, the request will be redirected to the node where the pod is located, which may cause performance loss.

-

externalTrafficPolicy is a configuration parameter of the Service.

-
apiVersion: v1
-kind: Service
-metadata:
-  name: nginx-nodeport
-spec:
-  externalTrafficPolicy: local
-  ports:
-  - name: service
-    nodePort: 30000
-    port: 80
-    protocol: TCP
-    targetPort: 80
-  selector:
-    app: nginx
-  type: NodePort
-

If the value of externalTrafficPolicy is local, requests sent from Node IP address:Service port will be forwarded only to the pod on the local node. If the node does not have a pod, the requests are suspended.

-

The other value of externalTrafficPolicy is cluster (default value), which indicates that requests are forwarded in a cluster.

-

You can set this parameter when creating a Service of the NodePort type on the CCE console.

-

-

The values of externalTrafficPolicy are as follows:

-
  • cluster: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
  • local: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0143.html b/docs/cce/umn/cce_01_0143.html deleted file mode 100644 index 2fc7af47..00000000 --- a/docs/cce/umn/cce_01_0143.html +++ /dev/null @@ -1,21 +0,0 @@ - - -

My Charts

-
- - diff --git a/docs/cce/umn/cce_01_0144.html b/docs/cce/umn/cce_01_0144.html deleted file mode 100644 index 509a9a85..00000000 --- a/docs/cce/umn/cce_01_0144.html +++ /dev/null @@ -1,81 +0,0 @@ - - -

Preparing a Chart

-

You can prepare a chart using one of the following methods:

- -

Customizing a Chart

  1. Customize the content of a chart as required.

    For details about how to create a chart, see https://helm.sh/docs/chart_template_guide/getting_started/.

    -

  2. Set the chart directory structure and name the chart based on the requirements defined in Chart Specifications.
-
-

Using a Kubernetes Official Chart

  1. Visit https://artifacthub.io/ to obtain the required chart.
  2. Log in to a Linux host.
  3. Upload the chart obtained in 1.
  4. Run the following command to compress the chart.

    • If the Helm client is not installed on the Linux host, run the following command:

      tar pzcf {name}-{version}.tgz {name}/

      -

      In the preceding command,

      -

      {name} indicates the actual chart name.

      -

      {version} indicates the actual chart version.

      -

      The values of {name} and {version} must be the same as the values of name and version in the Chart.yaml file in the chart.

      -
      -
    • If the Helm client is installed on the Linux host, run the following command:

      helm package {name}/

      -

      In the preceding command, replace {name} with the actual chart name.

      -
    -

  5. Set the chart directory structure and name the chart based on the requirements defined in Chart Specifications.
-
-

Chart Specifications

This section uses the redis chart as an example to illustrate the chart specifications.

-
  • Naming Requirement

    A chart package is named in the format of {name}-{version}.tgz, where {version} indicates the version number in the format of Major version number.Minor version number.Revision number, for example, redis-0.4.2.tgz.

    -

    The chart name {name} can contain a maximum of 64 characters.

    -

    The version number must comply with the semantic versioning rules.

    -
    • The main and minor version numbers are mandatory, and the revision number is optional.
    • The major and minor version numbers and revision number must be integers, greater than or equal to 0, and less than or equal to 99.
    -
    -
  • Directory Structure

    The directory structure of a chart is as follows:

    -
    redis/
    -  templates/
    -  values.yaml
    -  README.md
    -  Chart.yaml
    -  .helmignore
    -
    As listed in Table 1, the parameters marked with * are mandatory. -
    - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters in the directory structure of a chart

    Parameter

    -

    Description

    -

    * templates

    -

    Stores all templates.

    -

    * values.yaml

    -

    Describes configuration parameters required by templates.

    -
    NOTICE:

    Make sure that the image address set in the values.yaml file is the same as the image address in the container image repository. Otherwise, an exception occurs when you create a workload, and the system displays a message indicating that the image fails to be pulled.

    -

    To obtain the image address, perform the following operations: Log in to the CCE console. In the navigation pane, choose Image Repository to access the SWR console. Choose My Images > Private Images and click the name of the uploaded image. On the Image Tags tab page, obtain the image address from the pull command. You can click to copy the command in the Image Pull Command column.

    -
    -

    README.md

    -

    A markdown file, including:

    -
    • The workload or services provided by the chart.
    • Prerequisites for running the chart.
    • Configurations in the values.yaml file.
    • Information about chart installation and configuration.
    -

    * Chart.yaml

    -

    Basic information about the chart.

    -

    .helmignore

    -

    Files or data that does not need to read templates during workload installation.

    -
    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0145.html b/docs/cce/umn/cce_01_0145.html deleted file mode 100644 index 942cc1d3..00000000 --- a/docs/cce/umn/cce_01_0145.html +++ /dev/null @@ -1,49 +0,0 @@ - - -

Uploading a Chart

-

Scenario

Upload a chart to Charts > Uploaded Charts for subsequent workload creation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts and click Upload Chart.
  2. Click Select File, select the chart to be uploaded, and click Upload.

    When you upload a chart, the naming rule of the OBS bucket is changed from cce-charts-{region}-{domain_name} to cce-charts-{region}-{domain_id}. In the old naming rule, the system converts the domain_name value into a Base64 string and uses the first 63 characters. If you cannot find the chart in the OBS bucket with the new name, search for the bucket with the old name.

    -
    -

-
-

Related Operations

After a chart is created, you can perform operations listed in Table 1 on the Uploaded Charts page.

- -
- - - - - - - - - - - - - - - - -
Table 1 Related operations

Operation

-

Description

-

Installing a chart

-

Click Install Chart to install the chart for creating workloads. For details, see Creating a Workload from a Chart.

-

Updating a chart

-

The chart content will be updated while the chart version remains unchanged. The procedure is similar to that of uploading a chart.

-

Downloading a chart

-

Click More > Download to download the chart to the local host.

-

Deleting a chart

-

Click More > Delete to delete the installed chart.

-

Deleted charts cannot be restored. Exercise caution when performing this operation.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0146.html b/docs/cce/umn/cce_01_0146.html deleted file mode 100644 index b8577983..00000000 --- a/docs/cce/umn/cce_01_0146.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Creating a Workload from a Chart

-

Creating a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Chart.
  2. In the list of uploaded charts, click Install.
  3. Set the installation parameters listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Installation parameters

    Parameter

    -

    Description

    -

    * Release Name

    -

    Unique name of the chart release.

    -

    * Chart Version

    -

    Chart version by default.

    -

    * Cluster

    -

    Cluster where the workload will be deployed.

    -

    * Namespace

    -

    Namespace to which the workload will be deployed.

    -

    Advanced Settings

    -

    You can import and replace the values.yaml file or directly edit the chart parameters online.

    -
    NOTE:

    An imported values.yaml file must comply with YAML specifications, that is, KEY:VALUE format. The fields in the file are not restricted.

    -

    The key value of the imported values.yaml must be the same as that of the selected chart package. Otherwise, the values.yaml does not take effect. That is, the key cannot be changed.

    -
    -
    1. Click Import Configuration File.
    2. Select the corresponding values.yaml file and click Open.
    -
    -
    -

  4. After the configuration is complete, click Next.
  5. Confirm the configuration and click Submit.
  6. Click Back to Release List to view the running status of the chart-based workload (also called release), or click View Release Details to view details about the release.
-
-

Upgrading a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click Upgrade in the row where the desired workload resides and set the parameters for the workload.
  3. Select a chart version for Chart Version.
  4. Follow the prompts to modify the chart parameters. Click Upgrade, and then click Submit.
  5. Click Back to Release List. If the chart status changes to Upgrade successful, the workload is successfully upgraded.
-
-

Rolling Back a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click More > Roll Back for the workload to be rolled back, select the workload version, and click Roll back to this version.

    In the workload list, if the status is Rollback successful, the workload is rolled back successfully.

    -

-
-

Uninstalling a Chart-based Workload

  1. Log in to the CCE console. In the navigation pane, choose Charts > Uploaded Charts. Click the Template Instances tab.
  2. Click More > Uninstall next to the release to be uninstalled, and click Yes. Exercise caution when performing this operation because releases cannot be restored after being uninstalled.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0149.html b/docs/cce/umn/cce_01_0149.html deleted file mode 100644 index 99035f48..00000000 --- a/docs/cce/umn/cce_01_0149.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Affinity and Anti-Affinity Scheduling

-
- - diff --git a/docs/cce/umn/cce_01_0150.html b/docs/cce/umn/cce_01_0150.html deleted file mode 100644 index 8de85033..00000000 --- a/docs/cce/umn/cce_01_0150.html +++ /dev/null @@ -1,236 +0,0 @@ - - -

Creating a Job

-

Scenario

Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).

-

A job is a resource object that is used to control batch tasks. It is different from a long-term servo workload (such as Deployment and StatefulSet).

-

A job is started and terminated at specific times, while a long-term servo workload runs unceasingly unless being terminated. The pods managed by a job automatically exit after successfully completing the job based on user configurations. The success flag varies according to the spec.completions policy.

-
  • One-off jobs: A single pod runs once until successful termination.
  • Jobs with a fixed success count: N pods run until successful termination.
  • A queue job is considered completed based on the global success confirmed by the application.
-
-

Prerequisites

Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

- -
-

Procedure

  1. (Optional) If you use a private container image to create your job, upload the container image to the image repository.

    -

  2. Log in to the CCE console. In the navigation pane, choose Workloads > Jobs. Click Create Job.
  3. Configure the basic job information listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic job information

    Parameter

    -

    Description

    -

    * Job Name

    -

    Name of a new job. The name must be unique.

    -

    Enter 4 to 63 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster

    -

    Cluster to which a new job belongs.

    -

    * Namespace

    -

    Namespace to which the new job belongs. By default, this parameter is set to default.

    -

    *Instances

    -

    Number of pods in this job. A job can have one or more pods. You can specify the number of pods. The default value is 1.

    -

    Each job pod consists of the same containers. Configuring multiple job pods can ensure high availability. The job can continue to run even if one of the pods is faulty.

    -

    Description

    -

    Description of a job.

    -
    -
    -

  4. Click Next: Add Container to add a container and an image.

    1. Click Select Container Image to select the image to be deployed.
      • My Images: displays all image repositories you created.
      • Third-Party Images: Create a job using an image from any third-party image repository. When you create a job using a third-party image, ensure that the node where the job is running can access public networks. For details about how to use a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image address in Image Address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: The images shared by other tenants using the SWR service are displayed here. You can create workloads based on the shared images.
      -
    2. Set image parameters. -
      - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 0.5 GiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses that GPU accordingly.

      -
      -
      -
    3. (Optional) Configure advanced settings. -
      - - - - - - - - - - - - - - - - -
      Table 3 Advanced settings

      Parameter

      -

      Description

      -

      Lifecycle

      -
      Lifecycle scripts define the actions taken for container-related jobs when a lifecycle event occurs. -
      -

      Environment Variables

      -
      Environment variables can be added to a container. In general, environment variables are used to set parameters. On the Environment Variables tab page, click Add Environment Variable. Currently, environment variables can be added using any of the following methods:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.
      -
      -

      Data Storage

      -

      The local disk or cloud storage can be mounted to a container to implement persistent data file storage.

      -

      For details, see Storage (CSI).

      -

      Log Policies

      -

      Set a log policy and log path for collecting workload logs and preventing logs from being over-sized. For details, see Container Logs.

      -
      -
      -
    4. (Optional) One job pod contains one or more related containers. If your job contains multiple containers, click Add Container to add containers.
    -

  5. Click Create.

    If the status of the job is Executing, the job has been created successfully.

    -

-
-

Using kubectl

A job has the following configuration parameters:

-
  • spec.template: has the same schema as a pod.
  • RestartPolicy: can only be set to Never or OnFailure.
  • For a single-pod job, the job ends after the pod runs successfully by default.
  • .spec.completions: indicates the number of pods that need to run successfully to end a job. The default value is 1.
  • .spec.parallelism: indicates the number of pods that run concurrently. The default value is 1.
  • spec.backoffLimit: indicates the maximum number of retries performed if a pod fails. When the limit is reached, the pod will not try again.
  • .spec.activeDeadlineSeconds: indicates the running time of pods. Once the time is reached, all pods of the job are terminated. The priority of .spec.activeDeadlineSeconds is higher than that of .spec.backoffLimit. That is, if a job reaches the .spec.activeDeadlineSeconds, the spec.backoffLimit is ignored.
-

Based on the .spec.completions and .spec.Parallelism settings, jobs are classified into the following types.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 4 Job types

Job Type

-

Description

-

Example

-

One-off jobs

-

A single pod runs once until successful termination.

-

Database migration

-

Jobs with a fixed completion count

-

One pod runs until reaching the specified completions count.

-

Work queue processing pod

-

Parallel jobs with a fixed completion count

-

Multiple pods run until reaching the specified completions count.

-

Multiple pods for processing work queues concurrently

-

Parallel jobs

-

One or more pods run until successful termination.

-

Multiple pods for processing work queues concurrently

-
-
-

The following is an example job, which calculates π till the 2000th digit and prints the output.

-
apiVersion: batch/v1
-kind: Job
-metadata:
-  name: myjob
-spec:
-  completions: 50        # 50 pods need to be run to finish a job. In this example, π is printed for 50 times.
-  parallelism: 5        # 5 pods are run in parallel.
-  backoffLimit: 5        # The maximum number of retry times is 5.
-  template:
-    spec:
-      containers:
-      - name: pi
-        image: perl
-        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
-      restartPolicy: Never
-

Description

-
  • apiVersion: batch/v1 indicates the version of the current job.
  • kind: Job indicates that the current resource is a job.
  • restartPolicy: Never indicates the current restart policy. For jobs, this parameter can only be set to Never or OnFailure. For other controllers (for example, Deployments), you can set this parameter to Always.
-

Run the job.

-
  1. Start the job.

    [root@k8s-master k8s]# kubectl apply -f myjob.yaml
    -job.batch/myjob created
    -

  2. View the job details.

    kubectl get job

    -
    [root@k8s-master k8s]# kubectl get job
    -NAME    COMPLETIONS   DURATION   AGE
    -myjob   50/50         23s        3m45s
    -

    If the value of COMPLETIONS is 50/50, the job is successfully executed.

    -

  3. Query the pod status.

    kubectl get pod

    -
    [root@k8s-master k8s]# kubectl get pod
    -NAME          READY   STATUS      RESTARTS   AGE
    -myjob-29qlw   0/1     Completed   0          4m5s
    -...
    -

    If the status is Completed, the job is complete.

    -

  4. View the pod logs.

    kubectl logs

    -
    # kubectl logs myjob-29qlw
    -3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
    -

-
-

Related Operations

After a one-off job is created, you can perform operations listed in Table 5.

- -
- - - - - - - - - - -
Table 5 Other operations

Operation

-

Description

-

Viewing a YAML

-

Click View YAML next to the job name to view the YAML file corresponding to the current job.

-

Deleting a one-off job

-
  1. Select the job to be deleted and click Delete in the Operation column.
  2. Click OK.

    Deleted jobs cannot be restored. Exercise caution when deleting a job.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0151.html b/docs/cce/umn/cce_01_0151.html deleted file mode 100644 index 27a7ac29..00000000 --- a/docs/cce/umn/cce_01_0151.html +++ /dev/null @@ -1,215 +0,0 @@ - - -

Creating a Cron Job

-

Scenario

A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.

-
A cron job runs periodically at the specified time. It is similar with Linux crontab. A cron job has the following characteristics:
  • Runs only once at the specified time.
  • Runs periodically at the specified time.
-
-

The typical usage of a cron job is as follows:

-
  • Schedules jobs at the specified time.
  • Creates jobs to run periodically, for example, database backup and email sending.
-
-

Prerequisites

Resources have been created. For details, see Creating a Node. If clusters and nodes are available, you need not create them again.

-
-

Procedure

  1. (Optional) If you use a private container image to create your cron job, upload the container image to the image repository.

    -

  2. Log in to the CCE console. In the navigation pane, choose Workloads > Cron Jobs. Then, click Create Cron Job.
  3. Configure the basic cron job information listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - -
    Table 1 Basic cron job information

    Parameter

    -

    Description

    -

    * Job Name

    -

    Name of a new cron job. The name must be unique.

    -

    Enter 4 to 52 characters starting with a lowercase letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster

    -

    Cluster to which a new cron job belongs.

    -

    * Namespace

    -

    Namespace to which a cron job belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of a cron job.

    -
    -
    -

  4. Click Next: Configure Timing Rule.
  5. Set the timing rule.

    -

    - - - - - - - - - - - - - -
    Table 2 Timing rule parameters

    Parameter

    -

    Description

    -

    * Concurrency Policy

    -

    The following policies are supported:

    -
    • Forbid: A new job cannot be created before the previous job is complete.
    • Allow: The cron job allows concurrently running jobs, which preempt cluster resources.
    • Replace: A new job replaces the previous job when it is time to create the job but the previous job is not complete.
    -

    * Schedule

    -

    Time when a new cron job is executed.

    -

    Job Records

    -

    You can set the number of jobs that are successfully executed or fail to be executed. Setting a limit to 0 corresponds to keeping none of the jobs after they finish.

    -
    -
    -

  6. Click Next: Add Container to add a container.

    1. Click Select Container Image to select the image to be deployed.
      • My Images: displays all image repositories you created.
      • Third-Party Images: Create a job using an image from any third-party image repository. When you create a job using a third-party image, ensure that the node where the job is running can access public networks. For details about how to use a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image address in Image Address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: The images shared by other tenants using the SWR service are displayed here. You can create workloads based on the shared images.
      -
    2. Set image parameters. -
      - - - - - - - - - - - - - - - - -
      Table 3 Image parameters

      Parameter

      -

      Description

      -

      Image

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 0.5 GiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses that GPU accordingly.

      -
      -
      -
    3. (Optional) Configure advanced settings. -
      - - - - - - - - - - -
      Table 4 Advanced settings

      Parameter

      -

      Description

      -

      Lifecycle

      -
      Actions defined in the lifecycle script definition are taken for the lifecycle events of container tasks. -
      -

      Environment Variables

      -
      Environment variables can be added to a container. In general, environment variables are used to set parameters. On the Environment Variables tab page, click Add Environment Variable. Currently, environment variables can be added using any of the following methods:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.
      -
      -
      -
      -
    4. (Optional) One job pod contains one or more related containers. If your cron job contains multiple containers, click Add Container to add containers.
    -

  7. Click Create.

    If the status is Started, the cron job has been created successfully.

    -

-
-

Using kubectl

A cron job has the following configuration parameters:

-
  • .spec.schedule: takes a Cron format string, for example, 0 * * * * or @hourly, as schedule time of jobs to be created and executed.
  • .spec.jobTemplate: specifies jobs to be run, and has the same schema as when you are Creating a Job Using kubectl.
  • .spec.startingDeadlineSeconds: specifies the deadline for starting a job.
  • .spec.concurrencyPolicy: specifies how to treat concurrent executions of a job created by the Cron job. The following options are supported:
    • Allow (default value): allows concurrently running jobs.
    • Forbid: forbids concurrent runs, skipping next run if previous has not finished yet.
    • Replace: cancels the currently running job and replaces it with a new one.
    -
-

The following is an example cron job, which is saved in the cronjob.yaml file.

-
apiVersion: batch/v1beta1
-kind: CronJob
-metadata:
-  name: hello
-spec:
-  schedule: "*/1 * * * *"
-  jobTemplate:
-    spec:
-      template:
-        spec:
-          containers:
-          - name: hello
-            image: busybox
-            args:
-            - /bin/sh
-            - -c
-            - date; echo Hello from the Kubernetes cluster
-          restartPolicy: OnFailure
-

Run the job.

-
  1. Create a cron job.

    kubectl create -f cronjob.yaml

    -

    Information similar to the following is displayed:

    -
    cronjob.batch/hello created
    -

  2. Query the running status of the cron job:

    kubectl get cronjob

    -
    NAME      SCHEDULE      SUSPEND   ACTIVE    LAST SCHEDULE   AGE
    -hello     */1 * * * *   False     0         <none>          9s
    -

    kubectl get jobs

    -
    NAME               COMPLETIONS   DURATION   AGE
    -hello-1597387980   1/1           27s        45s
    -

    kubectl get pod

    -
    NAME                           READY     STATUS      RESTARTS   AGE
    -hello-1597387980-tjv8f         0/1       Completed   0          114s
    -hello-1597388040-lckg9         0/1       Completed   0          39s
    -

    kubectl logs hello-1597387980-tjv8f

    -
    Fri Aug 14 06:56:31 UTC 2020
    -Hello from the Kubernetes cluster
    -

    kubectl delete cronjob hello

    -
    cronjob.batch "hello" deleted
    -

    When a cron job is deleted, the related jobs and pods are deleted too.

    -
    -

-
-

Related Operations

After a cron job is created, you can perform operations listed in Table 5.

- -
- - - - - - - - - - - - - -
Table 5 Other operations

Operation

-

Description

-

Editing a YAML file

-

Click More > View YAML next to the cron job name to view the YAML file of the current job.

-

Stopping a cron job

-
  1. Select the job to be stopped and click Stop in the Operation column.
  2. Click OK.
-

Deleting a cron job

-
  1. Select the cron job to be deleted and click More > Delete in the Operation column.
  2. Click OK.

    Deleted jobs cannot be restored. Therefore, exercise caution when deleting a job.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0152.html b/docs/cce/umn/cce_01_0152.html deleted file mode 100644 index 16644290..00000000 --- a/docs/cce/umn/cce_01_0152.html +++ /dev/null @@ -1,123 +0,0 @@ - - -

Creating a ConfigMap

-

Scenario

A ConfigMap is a type of resource that stores configuration information required by a workload. Its content is user-defined. After creating ConfigMaps, you can use them as files or environment variables in a containerized workload.

-

ConfigMaps allow you to decouple configuration files from container images to enhance the portability of containerized workloads.

-

Benefits of ConfigMaps:

-
  • Manage configurations of different environments and services.
  • Deploy workloads in different environments. Multiple versions are supported for configuration files so that you can update and roll back workloads easily.
  • Quickly import configurations in the form of files to containers.
-
-

Prerequisites

Cluster and node resources have been created. For more information, see Creating a CCE Cluster. If you have available clusters and node resources, skip this operation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Configuration Center > ConfigMaps. Click Create ConfigMap.
  2. You can create a ConfigMap directly or based on YAML. If you create a ConfigMap based on YAML, go to 4.
  3. Method 1: Create a ConfigMap directly.

    Set the parameters by referring to Table 1. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a ConfigMap

    Parameter

    -

    Description

    -

    Name

    -

    Name of a ConfigMap, which must be unique in a namespace.

    -

    Cluster

    -

    Cluster that will use the ConfigMap you create.

    -

    Namespace

    -

    Namespace to which the ConfigMap belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of the ConfigMap.

    -

    Data

    -

    The workload configuration data can be used in a container or used to store the configuration data. Key indicates a file name. Value indicates the content in the file.

    -
    1. Click Add Data.
    2. Set Key and Value.
    -

    Labels

    -

    Labels are attached to objects such as workloads, nodes, and Services in key-value pairs.

    -

    Labels define the identifiable attributes of these objects and are used to manage and select the objects.

    -
    1. Click Add Label.
    2. Set Key and Value.
    -
    -
    -
    -

  4. Method 2: Create a ConfigMap based on YAML.

    To create ConfigMaps by uploading a file, ensure that the resource description file has been created. CCE supports files in YAML format. For more information, see ConfigMap Requirements.

    -
    -
    Click Create YAML on the right of the page.
    • Method 1: Import the orchestration file.

      Click Add File to import the file in YAML format. The orchestration content can be directly displayed.

      -
    • Method 2: Directly orchestrate the content.

      In the orchestration content area, enter the content of the YAML file.

      -
    -
    -

  5. After the configuration is complete, click Create.

    The new ConfigMap is displayed in the ConfigMap list.

    -

-
-

ConfigMap Requirements

A ConfigMap resource file must be in YAML format, and the file size cannot exceed 2 MB.

-
The file name is configmap.yaml and the following shows a configuration example.
apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: test-configmap
-data:
-  data-1: value-1
-  data-2: value-2
-
-
-

Creating a ConfigMap Using kubectl

  1. Configure the kubectl command to connect an ECS to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create and edit the cce-configmap.yaml file.

    vi cce-configmap.yaml

    -
    apiVersion: v1
    -kind: ConfigMap
    -metadata:
    -  name: cce-configmap
    -data:
    -  SPECIAL_LEVEL: Hello
    -  SPECIAL_TYPE: CCE
    -

  3. Run the following commands to create a ConfigMap.

    kubectl create -f cce-configmap.yaml

    -

    kubectl get cm

    -
    NAME               DATA            AGE
    -cce-configmap      3               3h
    -cce-configmap1     3               7m
    -

-
-

Related Operations

After creating a configuration item, you can update or delete it as described in Table 2. -
- - - - - - - - - - - - - -
Table 2 Related operations

Operation

-

Description

-

Viewing a YAML file

-

Click View YAML next to the ConfigMap name to view the YAML file corresponding to the current ConfigMap.

-

Updating a ConfigMap

-
  1. Select the name of the ConfigMap to be updated and click Update.
  2. Modify the secret data. For more information, see Table 1.
  3. Click Update.
-

Deleting a ConfigMap

-

Select the configuration you want to delete and click Delete.

-

Follow the prompts to delete the ConfigMap.

-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0153.html b/docs/cce/umn/cce_01_0153.html deleted file mode 100644 index c9e1b4e0..00000000 --- a/docs/cce/umn/cce_01_0153.html +++ /dev/null @@ -1,145 +0,0 @@ - - -

Creating a Secret

-

Scenario

A secret is a type of resource that holds sensitive data, such as authentication and key information. Its content is user-defined. After creating secrets, you can use them as files or environment variables in a containerized workload.

-
-

Prerequisites

Cluster and node resources have been created. For more information, see Creating a CCE Cluster. If you have available clusters and node resources, skip this operation.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Configuration Center > Secrets. Click Create Secret.
  2. You can create a secret directly or based on YAML. If you want to create a secret based on YAML, go to 4.
  3. Method 1: Create a secret directly.

    Set the basic information by referring to Table 1. -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a secret

    Parameter

    -

    Description

    -

    Name

    -

    Name of the secret you create, which must be unique.

    -

    Cluster

    -

    Cluster that will use the secret you create.

    -

    Namespace

    -

    Namespace to which the secret belongs. If you do not specify this parameter, the value default is used by default.

    -

    Description

    -

    Description of a secret.

    -

    Type

    -

    Type of the secret you create.

    -
    • Opaque: common secret.
    • kubernetes.io/dockerconfigjson: a secret that stores the authentication information required for pulling images from a private repository.
    • IngressTLS: a secret that stores the certificate required by ingresses (layer-7 load balancing Services).
    • Other: another type of secret, which is specified manually.
    -

    Secret Data

    -

    Workload secret data can be used in containers.

    -
    • If the secret is of the Opaque type:
      1. Click Add Data.
      2. Enter the key and value. The value must be based on the Base64 coding method. For details about the method, see Base64 Encoding.
      -
    • If the secret type is kubernetes.io/dockerconfigjson, enter the account and password of the private image repository.
    • If the secret type is IngressTLS, upload the certificate file and private key file.
      NOTE:
      • A certificate is a self-signed or CA-signed credential used for identity authentication.
      • A certificate request is a request for a signature with a private key.
      -
      -
    -

    Secret Label

    -

    Labels are attached to objects such as workloads, nodes, and Services in key-value pairs.

    -

    Labels define the identifiable attributes of these objects and are used to manage and select the objects.

    -
    1. Click Add Label.
    2. Enter the key and value.
    -
    -
    -
    -

  4. Method 2: Create a secret based on the YAML file.

    To create a resource by uploading a file, ensure that the resource description file has been created. CCE supports files in JSON or YAML format. For more information, see Secret Resource File Configuration.

    -
    -
    You can import or directly write the file content in YAML or JSON format.
    • Method 1: Import the orchestration file.

      Click Add File to import the file in YAML or JSON format. The orchestration content can be directly displayed.

      -
    • Method 2: Directly orchestrate the content.

      In the orchestration content area, enter the content of the YAML or JSON file.

      -
    -
    -

  5. After the configuration is complete, click Create.

    The new secret is displayed in the key list.

    -

-
-

Secret Resource File Configuration

This section describes configuration examples of secret resource description files.

-

For example, you can retrieve the username and password for a workload through a secret.

-
  • YAML format

    The secret.yaml file is defined as shown below. The value must be based on the Base64 coding method. For details about the method, see Base64 Encoding.

    -
    apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: mysecret           #Secret name
    -  namespace: default       #Namespace. The default value is default.
    -data:
    -  username: ******  #The value must be Base64-encoded.
    -  password: ******  #The value must be encoded using Base64.
    -type: Opaque     #You are advised not to change this parameter value.
    -
-
-

Creating a Secret Using kubectl

  1. According to Connecting to a Cluster Using kubectl, configure the kubectl command to connect an ECS to the cluster.
  2. Create and edit the Base64-encoded cce-secret.yaml file.

    # echo -n "content to be encoded" | base64
    -******
    -

    vi cce-secret.yaml

    -
    apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: mysecret
    -type: Opaque
    -data:
    -  username: ******
    -  password: ******
    -

  3. Create a secret.

    kubectl create -f cce-secret.yaml

    -

    You can query the secret after creation.

    -

    kubectl get secret

    -

-
-

Related Operations

After creating a secret, you can update or delete it as described in Table 2.

The secret list contains system secret resources that can be queried only. The system secret resources cannot be updated or deleted.

-
- -
- - - - - - - - - - - - - - - - -
Table 2 Related Operations

Operation

-

Description

-

Viewing a YAML file

-

Click View YAML next to the secret name to view the YAML file corresponding to the current secret.

-

Updating a secret

-
  1. Select the name of the secret to be updated and click Update.
  2. Modify the secret data. For more information, see Table 1.
  3. Click Update.
-

Deleting a secret

-

Select the secret you want to delete and click Delete.

-

Follow the prompts to delete the secret.

-

Deleting secrets in batches

-
  1. Select the secrets to be deleted.
  2. Click Delete above the secret list.
  3. Follow the prompts to delete the secrets.
-
-
-
-
-

Base64 Encoding

To encrypt a character string using Base64, run the echo -n to-be-encoded content | base64 command. The following is an example.

-
root@ubuntu:~# echo -n "content to be encoded" | base64
-******
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0154.html b/docs/cce/umn/cce_01_0154.html deleted file mode 100644 index 26b3e3f6..00000000 --- a/docs/cce/umn/cce_01_0154.html +++ /dev/null @@ -1,146 +0,0 @@ - - -

autoscaler

-

Introduction

Autoscaler is an important Kubernetes controller. It supports microservice scaling and is key to serverless design.

-

When the CPU or memory usage of a microservice is too high, horizontal pod autoscaling is triggered to add pods to reduce the load. These pods can be automatically reduced when the load is low, allowing the microservice to run as efficiently as possible.

-

CCE simplifies the creation, upgrade, and manual scaling of Kubernetes clusters, in which traffic loads change over time. To balance resource usage and workload performance of nodes, Kubernetes introduces the autoscaler add-on to automatically resize a cluster based on the resource usage required for workloads deployed in the cluster. For details, see Creating a Node Scaling Policy.

-

Open source community: https://github.com/kubernetes/autoscaler

-
-

How the Add-on Works

autoscaler controls auto scale-out and scale-in.

-
  • Auto scale-out

    If pods in a cluster cannot be scheduled due to insufficient worker nodes, cluster scaling is triggered to add nodes. The nodes to be added have the same specification as configured for the node pool to which the nodes belong. For details, see Creating a Node Scaling Policy.

    -
    The add-on follows the "No Less, No More" policy. For example, if three cores are required for creating a pod and the system supports four-core and eight-core nodes, autoscaler will preferentially create a four-core node.

    Auto scale-out will be performed when:

    -
    • Node resources are insufficient.
    • No node affinity policy is set in the pod scheduling configuration. That is, if a node has been configured as an affinity node for pods, no node will not be automatically added when pods cannot be scheduled. For details about how to configure the node affinity policy, see Node Affinity.
    -
    -
    -
  • Auto scale-in

    When a cluster node is idle for a period of time (10 minutes by default), cluster scale-in is triggered, and the node is automatically deleted. However, a node cannot be deleted from a cluster if the following pods exist:

    -
    • Pods that do not meet specific requirements set in PodDisruptionBudget
    • Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
    • Pods that have the cluster-autoscaler.kubernetes.io/safe-to-evict: 'false' annotation
    • Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
    • Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
    -
-
-

Notes and Constraints

  • Only clusters of v1.9.10-r2 and later support autoscaler.
  • Ensure that there are sufficient resources for installing the add-on.
-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under autoscaler.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Configure add-on installation parameters listed in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic settings

    Parameter

    -

    Add-on Version

    -

    Description

    -

    Add-on Specifications

    -

    Available in all versions

    -

    The add-on can be deployed in the following specifications:

    -
    • Single: The add-on is deployed with only one pod.
    • HA50: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability.
    • HA200: The add-on is deployed with two pods, serving a cluster with 50 nodes and ensuring high availability. Each pod uses more resources than those of the HA50 specification.
    • Custom: You can customize the number of pods and specifications as required.
    -

    Instances

    -

    Available in all versions

    -

    Number of pods that will be created to match the selected add-on specifications. The number cannot be modified.

    -

    Container

    -

    Available in all versions

    -

    CPU and memory quotas of the container allowed for the selected add-on specifications. The quotas cannot be modified.

    -

    Login Mode

    -

    Available only in certain versions

    -

    Select a login mode for the worker nodes to be added during auto scale-up.

    -

    If you select Key pair:

    -

    Key pair: Select an existing key pair or create a new one for identity authentication during remote login to the added nodes.

    -

    Auto Scale-In

    -

    Available in all versions

    -

    Off: Auto scale-down is not allowed. Only auto scale-up is allowed.

    -

    On: Enable auto scale-in. The scale-in policy is valid for node pools in the cluster with auto scaling enabled.

    -
    • Idle Time (min): Time for which a node should be unneeded before it is eligible for scale-down. Default value: 10 minutes.
    • Resource Usage: If the percentage of both CPU and memory usage on a node is below this threshold, auto scale-down will be triggered to delete the node from the cluster. The default value is 0.5, which means 50%.
    • Scale-in Cooldown After Scale-out: The time after scale-up that the scale-down evaluation will resume. Default value: 10 minutes.
      NOTE:

      If both auto scale-out and scale-in exist in a cluster, you are advised to set Scale-in Cooldown After Scale-out to 0 minutes. This can prevent the node scale-in from being blocked due to continuous scale-out of some node pools or retries upon a scale-out failure, resulting in unexpected waste of node resources.

      -
      -
    • Scale-in Cooldown After Node Deletion: The time after node deletion that the scale-down evaluation will resume. Default value: 10 minutes.
    • Scale-in Cooldown After Failure: The time after a scale-down failure that the scale-down evaluation will resume. Default value: 3 minutes. For details about the impact and relationship between the scale-in cooling intervals configured in the node pool and autoscaler, see Scale-in Cooling Interval.
    • Max empty bulk delete: The maximum number of empty nodes that can be deleted at the same time. Default value: 10.
    • Node Recheck Timeout: The timeout before autoscaler checks again the node that could not be previously removed. Default value: 5 minutes.
    -

    Node Pool Configuration

    -

    Available only in certain versions

    -

    Configuration of the default node pool. A node pool is a group of compute nodes with the same node type (VM or BMS), specifications, and labels. When a cluster needs to be scaled up, autoscaler will automatically add nodes from node pools to the cluster. If no custom node pool is available, autoscaler will use the default node pool.

    -

    Click Add Node Pool Configuration and set the following parameters:

    -
    • AZ: A physical region where resources use independent power supplies and networks. AZs are physically isolated but interconnected through the internal network.
    • OS: OS of the nodes to be created.
    • Taints: No taints are added by default.
      Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node pool. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      NOTICE:
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • Taints cannot be modified after configuration. Incorrect taints may cause a scale-up failure or prevent pods from being scheduled onto the added nodes.
      -
      -
      -
    • Resource Tags: Resource tags can be added to classify resources.
      NOTE:

      You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

      -
      -
    • Specifications: CPU and memory of the added nodes.
    -
    -
    -

    To configure more add-on parameters, click Advanced Settings at the bottom of this page.

    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Advanced settings

    Parameter

    -

    Add-on Version

    -

    Description

    -

    Total Nodes

    -

    Available in all versions

    -

    Maximum number of nodes that can be managed by the cluster, within which cluster scale-out is performed.

    -

    Total Cores

    -

    Available in all versions

    -

    Maximum sum of CPU cores of all nodes in a cluster, within which cluster scale-out is performed.

    -

    Total Memory (GB)

    -

    Available in all versions

    -

    Maximum sum of memory of all nodes in a cluster, within which cluster scale-out is performed.

    -

    Auto Scale-Out

    -

    Available only in certain versions

    -

    Triggered when there are pods unscheduled: Selected by default.

    -
    -
    -

  4. When the configuration is complete, click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under autoscaler.

    • If the Upgrade button is unavailable, the current add-on is already up-to-date and no upgrade is required.
    • If the Upgrade button is available, click Upgrade to upgrade the add-on.
    • During the upgrade, the coredns add-on of the original version on cluster nodes will be discarded, and the add-on of the target version will be installed.
    -
    -

  2. In the dialog box displayed, set parameters and upgrade the add-on. For details about the parameters, see the parameter description in Installing the Add-on.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, select the target cluster and click Uninstall under autoscaler.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0157.html b/docs/cce/umn/cce_01_0157.html deleted file mode 100644 index d246e191..00000000 --- a/docs/cce/umn/cce_01_0157.html +++ /dev/null @@ -1,135 +0,0 @@ - - -

Setting Cluster Auto Scaling

-

Scenario

The Cluster Auto Scaling feature allows CCE to automatically scale out a cluster (adding worker nodes to a cluster) according to custom policies when workloads cannot be scheduled into the cluster due to insufficient cluster resources.

-
-

Notes and Constraints

  • Currently, master nodes cannot be automatically added to or removed from clusters.
  • If both auto scale-in and auto scale-out are required, use the autoscaler add-on. For details, see autoscaler.
  • Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.
-
-

Automatic Cluster Scale-out

  1. Log in to the CCE console. Choose Resource Management > Clusters in the navigation pane. In the card view of the cluster to be scaled, choose More > Auto Scaling.
  2. Click the Scale-out Settings tab and then Edit. Set the maximum number of nodes, minimum number of nodes, cooldown period, and node configuration.

    -

    - - - - - - - - - - - - - -
    Table 1 Scale-out settings

    Parameter

    -

    Description

    -

    Cooldown Period

    -

    Interval between consecutive scale-out operations, in the unit of second. The cooldown period ensures that a scale-out operation is initiated only when previous scaling operation is finished and the system is running stably.

    -

    The value ranges from 60 to 3600, in seconds. The default value is 900. If the cooling interval is less than 900 seconds (15 minutes), the auto scaling may not work well, because creating a node may take 2 to 10 minutes.

    -

    Maximum Nodes

    -

    Maximum number of nodes to which the cluster can scale out.

    -

    1 ≤ Maximum Nodes < cluster node quota

    -
    NOTE:

    The cluster node quota depends on the cluster size (maximum number of nodes that can be managed by a cluster) and the node quota of the account. The cluster node quota used here is the smaller of the two.

    -
    -

    Node Configuration

    -

    If scale-out is required after the scale-out policy is executed, the system creates a node.

    -
    1. Click Set and set the node parameters. For details about how to set the node parameters, see Creating a Node.
    2. After the parameters are configured, click Submit.
    -
    -
    -

  3. After confirming the scale-out configuration and node parameters, click OK.
  4. Set the scale-out policy for the cluster. Click the Scale-out Policies tab and click Add Policy.

    • Policy Name: Enter a policy name, for example, policy01.
    • Policy Type: Currently, the following types of auto scale-out policies are supported:
      • Metric-based policy: Scale-out is performed based on the CPU or memory settings. -
        - - - - - - - - - - - - - - - - - - - -
        Table 2 Parameters for adding a metric-based policy

        Parameter

        -

        Description

        -

        *Metric

        -

        Select Allocated CPU or Allocated Memory.

        -

        *Trigger Condition

        -

        Set a condition for triggering a scale-out policy, that is, when the average CPU or memory allocation value is greater than or less than a specified percentage.

        -

        *Monitoring Window

        -

        Size of the data aggregation window. Select a value from the drop-down list.

        -

        If you select 15min, the selected metric is measured every 15 minutes.

        -

        *Threshold Crossings

        -

        Number of consecutive times that the threshold is reached within the monitoring window. The calculation cycle is fixed at one minute. If you set this parameter to 3, the configured action will be triggered when the metrics meet the specified threshold for three consecutive times.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      • Scheduled policy: Scale-out is performed at a specified time. -
        - - - - - - - - - - - - - -
        Table 3 Parameters for adding a scheduled policy

        Parameter

        -

        Description

        -

        *Policy Type

        -

        Set this parameter to Scheduled policy.

        -

        *Trigger Time

        -

        Time at which the policy is triggered.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      • Periodic policy: Scale-out can be performed by day, week, or month. -
        - - - - - - - - - - - - - -
        Table 4 Parameters for adding a periodic policy

        Parameter

        -

        Description

        -

        *Policy Type

        -

        Set the parameter to Periodic policy.

        -

        *Time Range

        -

        Specify the time for triggering the policy.

        -

        *Action

        -

        Action executed after a policy is triggered.

        -
        -
        -
      -
    -

  5. Click OK.

    After the auto scale-out is completed, choose Resource Management > Nodes in the navigation pane. On the node list, you can view the worker nodes added during cluster auto scaling.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0160.html b/docs/cce/umn/cce_01_0160.html deleted file mode 100644 index 9909a5f2..00000000 --- a/docs/cce/umn/cce_01_0160.html +++ /dev/null @@ -1,23 +0,0 @@ - - - -

OBS Volumes

- -

-
- - - diff --git a/docs/cce/umn/cce_01_0163.html b/docs/cce/umn/cce_01_0163.html deleted file mode 100644 index 3d7aa871..00000000 --- a/docs/cce/umn/cce_01_0163.html +++ /dev/null @@ -1,74 +0,0 @@ - - -

Setting Container Specifications

-

Scenario

CCE allows you to set resource limits for added containers during workload creation. You can request and limit the CPU and memory quotas used by each pod in the workload.

-
-

Meanings

For CPU and Memory, the meanings of Request and Limit are as follows:
  • If Request is selected, the system schedules the pod to the node that meets the requirements for workload deployment based on the request value.
  • If Request is deselected, the system schedules the pod to a random node for workload deployment.
  • If Limit is selected, the system limits the resources used by the workload based on the preset value.
  • If Limit is deselected, the system does not limit the resources used by the pod. If the memory resources used by the pod exceed the memory allocated to the node, the workload or node may be unavailable.
-
-

When creating a workload, you are advised to set the upper and lower limits of CPU and memory resources. If the upper and lower resource limits are not set for a workload, a resource leak of this workload will make resources unavailable for other workloads deployed on the same node. In addition, workloads that do not have upper and lower resource limits cannot be accurately monitored.

-
-
-

For GPU quotas, the meanings of Use and Any GPU type are as follows:

-
  • If Use is selected, the system schedules the pod to a node that meets the requirements for workload deployment based on the configured value.
  • Any GPU type is selected by default and cannot be deselected. This option indicates that the resources used by pods are not limited.
-

Configuration Description

  • CPU quotas: -
    - - - - - - - - - - -
    Table 1 Description of CPU quotas

    Parameter

    -

    Description

    -

    CPU request

    -

    Minimum number of CPU cores required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available CPU on the node is greater than or equal to the number of containerized CPU applications.

    -

    CPU limit

    -

    Maximum number of CPU cores available for a container.

    -
    -
    -

    Recommended configuration

    -

    Actual available CPU of a node ≥ Sum of CPU limits of all containers on the current node ≥ Sum of CPU requests of all containers on the current node. You can view the actual available CPUs of a node on the CCE console (Resource Management > Nodes > Allocatable).

    -
-
  • Memory quotas: -
    - - - - - - - - - - -
    Table 2 Description of memory quotas

    Parameter

    -

    Description

    -

    Memory request

    -

    Minimum amount of memory required by a container. Resources are scheduled for the container based on this value. The container can be scheduled to this node only when the total available memory on the node is greater than or equal to the number of containerized memory applications.

    -

    Memory Limit

    -

    Maximum amount of memory available for a container. When the memory usage exceeds the configured memory limit, the instance may be restarted, which affects the normal use of the workload.

    -
    -
    -

    Recommended configuration

    -

    Actual available memory of a node ≥ Sum of memory limits of all containers on the current node ≥ Sum of memory requests of all containers on the current node. You can view the actual available memory of a node on the CCE console (Resource Management > Nodes > Allocatable).

    -
-

The allocatable resources are calculated based on the resource request value (Request), which indicates the upper limit of resources that can be requested by pods on this node, but does not indicate the actual available resources of the node. The calculation formula is as follows:

-
  • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
  • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
-
-
-

Example

Assume that a cluster contains a node with 4 cores and 8 GB. A workload containing two pods has been deployed on the cluster. The resources of the two pods (pods 1 and 2) are as follows: {CPU request, CPU limit, memory request, memory limit} = {1 core, 2 cores, 2 GB, 2 GB}.

-

The CPU and memory usage of the node is as follows:

-
  • Allocatable CPU = 4 cores - (1 core requested by pod 1 + 1 core requested by pod 2) = 2 cores
  • Allocatable memory = 8 GB - (2 GB requested by pod 1 + 2 GB requested by pod 2) = 4 GB
-

Therefore, the remaining 2 cores and 4 GB can be used by the next new pod.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0164.html b/docs/cce/umn/cce_01_0164.html deleted file mode 100644 index b427f78f..00000000 --- a/docs/cce/umn/cce_01_0164.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Permissions Management

-
- - diff --git a/docs/cce/umn/cce_01_0175.html b/docs/cce/umn/cce_01_0175.html deleted file mode 100644 index f9fc9165..00000000 --- a/docs/cce/umn/cce_01_0175.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Obtaining a Cluster Certificate

-

Scenario

Before accessing cluster resources through open-source Kubernetes APIs, obtain the cluster's certificate.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. In the card view of the target cluster, choose More > Download X.509 Certificate.
  3. In the Download X.509 Certificate dialog box displayed, select the certificate expiration time and download the X.509 certificate of the cluster as prompted.

    Figure 1 Downloading a certificate
    -
    • The downloaded certificate contains three files: client.key, client.crt, and ca.crt. Keep these files secure.
    • Certificates are not required for mutual access between containers in a cluster.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0178.html b/docs/cce/umn/cce_01_0178.html deleted file mode 100644 index 0438c9ba..00000000 --- a/docs/cce/umn/cce_01_0178.html +++ /dev/null @@ -1,129 +0,0 @@ - - -

Formula for Calculating the Reserved Resources of a Node

-

Some of the resources on the node need to run some necessary Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node resources and the number of assignable node resources in Kubernetes are different. The larger the node specifications, the more the containers deployed on the node. Therefore, Kubernetes needs to reserve more resources.

-

To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications.

-

CCE calculates the resources that can be allocated to user nodes as follows:

-

Allocatable resources = Total amount - Reserved amount - Eviction threshold

-

-

Rules for Reserving Node Memory

You can use the following formula calculate how much memory you should reserve for running containers on a node:

-

Total reserved amount = Reserved memory for system components + Reserved memory for kubelet to manage pods

- -
- - - - - - - - - - - - - - - - -
Table 1 Reservation rules for system components

Total Memory (TM)

-

Reserved Memory for System Components

-

TM ≤ 8 GB

-

0 MB

-

8 GB < TM ≤ 16 GB

-

[(TM – 8 GB) x 1024 x 10%] MB

-

16 GB < TM ≤ 128 GB

-

[8 GB x 1024 x 10% + (TM – 16 GB) x 1024 x 6%] MB

-

TM > 128 GB

-

(8 GB x 1024 x 10% + 112 GB x 1024 x 6% + (TM – 128 GB) x 1024 x 2%) MB

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Reservation rules for kubelet

Total Memory (TM)

-

Number of Pods

-

Reserved Memory for kubelet

-

TM ≤ 2 GB

-

-

-

TM x 25%

-

TM > 2 GB

-

0 < Max. pods on a node ≤ 16

-

700 MB

-

16 < Max. pods on a node ≤ 32

-

[700 + (Max. pods on a node – 16) x 18.75] MB

-

32 < Max. pods on a node ≤ 64

-

[1024 + (Max. pods on a node – 32) x 6.25] MB

-

64 < Max. pods on a node ≤ 128

-

[1230 + (Max. pods on a node – 64) x 7.80] MB

-

Max. pods on a node > 128

-

[1740 + (Max. pods on a node – 128) x 11.20] MB

-
-
-

For a small-capacity node, adjust the maximum number of instances based on the site requirements. Alternatively, when creating a node on the CCE console, you can adjust the maximum number of instances for the node based on the node specifications.

-
-
-

Rules for Reserving Node CPU

-
- - - - - - - - - - - - - - - - -
Table 3 Node CPU reservation rules

Total CPU Cores (Total)

-

Reserved CPU Cores

-

Total ≤ 1 core

-

Total x 6%

-

1 core < Total ≤ 2 cores

-

1 core x 6% + (Total – 1 core) x 1%

-

2 cores < Total ≤ 4 cores

-

1 core x 6% + 1 core x 1% + (Total – 2 cores) x 0.5%

-

Total > 4 cores

-

1 core x 6% + 1 core x 1% + 2 cores x 0.5% + (Total – 4 cores) x 0.25%

-
-
-

CCE reserves an extra 100 MiB for kubelet eviction.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0180.html b/docs/cce/umn/cce_01_0180.html deleted file mode 100644 index 72bc1ec6..00000000 --- a/docs/cce/umn/cce_01_0180.html +++ /dev/null @@ -1,284 +0,0 @@ - - -

Overview

-

Introduction

A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (PM), depending on your service requirements. The components on a node include kubelet, container runtime, and kube-proxy.

-

A Kubernetes cluster consists of master nodes and node nodes. The nodes described in this section refer to worker nodes, the computing nodes of a cluster that run containerized applications.

-
-

CCE uses high-performance Elastic Cloud Servers (ECSs) as nodes to build highly available Kubernetes clusters.

-
-

Notes

  • To ensure node stability, a certain amount of CCE node resources will be reserved for Kubernetes components (such as kubelet, kube-proxy, and docker) based on the node specifications. Therefore, the total number of node resources and the amount of allocatable node resources for your cluster are different. The larger the node specifications, the more the containers deployed on the node. Therefore, more node resources need to be reserved to run Kubernetes components.
  • The node networking (such as the VM networking and container networking) is taken over by CCE. You are not allowed to add NICs or change routes. If you modify the networking configuration, the availability of CCE may be affected.
-
-

Node Lifecycle

A lifecycle indicates the node statuses recorded from the time when the node is created through the time when the node is deleted or released.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Node statuses

Status

-

Status Attribute

-

Description

-

Available

-

Stable state

-

The node is running properly and is connected to the cluster.

-

Nodes in this state can provide services.

-

Unavailable

-

Stable state

-

The node is not running properly.

-

Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

-

Creating

-

Intermediate state

-

The node has been created but is not running.

-

Installing

-

Intermediate state

-

The Kubernetes software is being installed on the node.

-

Deleting

-

Intermediate state

-

The node is being deleted.

-

If this state stays for a long time, an exception occurs.

-

Stopped

-

Stable state

-

The node is stopped properly.

-

A node in this state cannot provide services. You can start the node on the ECS console.

-

Error

-

Stable state

-

The node is abnormal.

-

Instances in this state no longer provide services. In this case, perform the operations in Resetting a Node.

-
-
-
-

Mapping between Node OSs and Container Engines

-
- - - - - - - - - - - - - - - - -
Table 2 Node OSs and container engines in CCE clusters

OS

-

Kernel Version

-

Container Engine

-

Container Storage Rootfs

-

Container Runtime

-

CentOS 7.x

-

3.x

-

Docker

-

Clusters of v1.19 and earlier use Device Mapper.

-

Clusters of v1.21 and later use OverlayFS.

-

runC

-

EulerOS 2.5

-

Device Mapper

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 3 Node OSs and container engines in CCE Turbo clusters

Node Type

-

OS

-

Kernel Version

-

Container Engine

-

Container Storage Rootfs

-

Container Runtime

-

VM

-

centos 7.x

-

3.x

-

Docker

-

OverlayFS

-

Runc

-

BMS in the shared resource pool

-

EulerOS 2.9

-

4.x

-

containerd

-

Device Mapper

-

Kata

-
-
-
-

Secure Containers and Common Containers

Secure (Kata) containers are distinguished from common containers in a few aspects.

-

The most significant difference is that each secure container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualization layer. CCE provides container isolation that is more secure than independent private Kubernetes clusters. With isolated OS kernels, computing resources, and networks, pod resources and data will not be preempted and stolen by other pods.

-

You can run common or secure containers on a single node in a CCE Turbo cluster. The differences between them are as follows:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Category

-

Secure Container (Kata)

-

Common Container (Docker)

-

Common Container (containerd)

-

Node type used to run containers

-

Bare-metal server (BMS)

-

VM

-

VM

-

Container engine

-

containerd

-

Docker

-

Default value for common containers created on the console.

-

containerd

-

Container runtime

-

Kata

-

runC

-

runC

-

Container kernel

-

Exclusive kernel

-

Sharing the kernel with the host

-

Sharing the kernel with the host

-

Container isolation

-

Lightweight VMs

-

cgroups and namespaces

-

cgroups and namespaces

-

Container engine storage driver

-

Device Mapper

-

OverlayFS2

-

OverlayFS

-

Pod overhead

-

Memory: 50 MiB

-

CPU: 0.1 cores

-

Pod overhead is a feature for accounting for the resources consumed by the pod infrastructure on top of the container requests and limits. For example, if limits.cpu is set to 0.5 cores and limits.memory to 256 MiB for a pod, the pod will request 0.6-core CPUs and 306 MiB of memory.

-

None

-

None

-

Minimal specifications

-

Memory: 256 MiB

-

CPU: 0.25 cores

-

None

-

None

-

Container engine CLI

-

crictl

-

docker

-

crictl

-

Pod computing resources

-

The request and limit values must be the same for both CPU and memory.

-

The request and limit values can be different for both CPU and memory.

-

The request and limit values can be different for both CPU and memory.

-

Host network

-

Not supported

-

Supported

-

Supported

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0182.html b/docs/cce/umn/cce_01_0182.html deleted file mode 100644 index 9950a338..00000000 --- a/docs/cce/umn/cce_01_0182.html +++ /dev/null @@ -1,100 +0,0 @@ - - -

Monitoring Overview

-

CCE works with AOM to comprehensively monitor clusters. When a node is created, the ICAgent (the DaemonSet named icagent in the kube-system namespace of the cluster) of AOM is installed by default. The ICAgent collects monitoring data of underlying resources and workloads running on the cluster. It also collects monitoring data of custom metrics of the workload.

-
  • Resource metrics

    Basic resource monitoring includes CPU, memory, and disk monitoring. For details, see Resource Metrics. You can view these metrics of clusters, nodes, and workloads on the CCE or AOM console.

    -
-

AOM is available only in certain regions.

-
-

Resource Metrics

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Resource metrics

Metric

-

Description

-

CPU Allocation Rate

-

Indicates the percentage of CPUs allocated to workloads.

-

Memory Allocation Rate

-

Indicates the percentage of memory allocated to workloads.

-

CPU Usage

-

Indicates the CPU usage.

-

Memory Usage

-

Indicates the memory usage.

-

Disk Usage

-

Indicates the disk usage.

-

Down

-

Indicates the speed at which data is downloaded to a node. The unit is KB/s.

-

Up

-

Indicates the speed at which data is uploaded from a node. The unit is KB/s.

-

Disk Read Rate

-

Indicates the data volume read from a disk per second. The unit is KB/s.

-

Disk Write Rate

-

Indicates the data volume written to a disk per second. The unit is KB/s.

-
-
-
-

Viewing Cluster Monitoring Data

In the navigation pane of the CCE console, choose Resource Management > Clusters. Click on the cluster card to access the cluster monitoring page.

-

-

The cluster monitoring page displays the monitoring status of cluster resources, CPU, memory, and disk usage of all nodes in a cluster, and CPU and memory allocation rates.

-

Explanation of monitoring metrics:

-
  • CPU allocation rate = Sum of CPU quotas requested by pods in the cluster/Sum of CPU quotas that can be allocated of all nodes (excluding master nodes) in the cluster
  • Memory allocation rate = Sum of memory quotas requested by pods in the cluster/Sum of memory quotas that can be allocated of all nodes (excluding master nodes) in the cluster
  • CPU usage: Average CPU usage of all nodes (excluding master nodes) in a cluster
  • Memory usage: Average memory usage of all nodes (excluding master nodes) in a cluster
-

Allocatable node resources (CPU or memory) = Total amount – Reserved amount – Eviction thresholds. For details, see Formula for Calculating the Reserved Resources of a Node.

-
-

On the cluster monitoring page, you can also view monitoring data of nodes, workloads, and pods. You can click to view the detailed data.

-

-
-

Viewing Monitoring Data of Master Nodes

CCE allows you to view monitoring data of master nodes. You can view the monitoring data of a master node in the upper right corner of the cluster details page. Clicking More will direct you to the AOM console.

-

-
-

Viewing Monitoring Data of Worker Nodes

In addition to the cluster monitoring page, you can also view node monitoring data on the node console by clicking Monitoring in the row where the node resides.

-

-

The node list page also displays the data about the allocable resources of the node. Allocatable resources indicate the upper limit of resources that can be requested by pods on a node, and are calculated based on the requests. Allocatable resources do not indicate the actual available resources of the node.

-

The calculation formulas are as follows:

-
  • Allocatable CPU = Total CPU – Requested CPU of all pods – Reserved CPU for other resources
  • Allocatable memory = Total memory – Requested memory of all pods – Reserved memory for other resources
-

-
-

Viewing Workload Monitoring Data

You can view monitoring data of a workload on the Monitoring tab page of the workload details page.

-

-

You can also click AOM to go to the AOM console to view monitoring data of the workload.

-

-

-
-

Viewing Pod Monitoring Data

You can view monitoring data of a pod on the Pods tab page of the workload details page.

-

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0183.html b/docs/cce/umn/cce_01_0183.html deleted file mode 100644 index 281f03a3..00000000 --- a/docs/cce/umn/cce_01_0183.html +++ /dev/null @@ -1,41 +0,0 @@ - - -

Nodes

-
- - diff --git a/docs/cce/umn/cce_01_0184.html b/docs/cce/umn/cce_01_0184.html deleted file mode 100644 index 882614b4..00000000 --- a/docs/cce/umn/cce_01_0184.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Synchronizing Node Data

-

Scenario

Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required.

-

Some information about CCE nodes is maintained independently from the ECS console. After you change the name, EIP, billing mode, or specifications of an ECS on the ECS console, you need to synchronize the ECS information to the corresponding node on the CCE console. After the synchronization, information on both consoles is consistent.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes.
  2. In the same row as the node whose data will be synchronized, choose More > Sync Node data.

    Alternatively, click the node name, and click Sync Node Data in the upper right corner of the node details page.

    -
    -
    Figure 1 Synchronizing node data
    -

    After the synchronization is complete, the "Sync success" message is displayed in the upper right corner.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0185.html b/docs/cce/umn/cce_01_0185.html deleted file mode 100644 index af83d767..00000000 --- a/docs/cce/umn/cce_01_0185.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Logging In to a Node

-

Notes and Constraints

  • If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).
  • Only login to a running ECS is allowed.
  • Only the user linux can log in to a Linux server.
-
-

Login Modes

You can log in to an ECS in either of the following modes:

-
  • Management console (VNC)

    If an ECS has no EIP, log in to the ECS console and click Remote Login in the same row as the ECS.

    -
  • SSH

    This mode applies only to ECSs running Linux. Usually, you can use a remote login tool, such as PuTTY, Xshell, and SecureCRT, to log in to your ECS. If none of the remote login tools can be used, log in to the ECS console and click Remote Login in the same row as the ECS to view the connection status and running status of the ECS.

    -
    • When you use the Windows OS to log in to a Linux node, set Auto-login username to linux.
    • The CCE console does not support node OS upgrade. Do not upgrade the node OS using the yum update command. Otherwise, the container networking components will be unavailable.
    -
    -
-
- -
- - - - - - - - - - - - - - - - - -
Table 1 Linux ECS login modes

EIP Binding

-

On-Premises OS

-

Connection Method

-

Yes

-

Windows

-

Use a remote login tool, such as PuTTY or Xshell.

-

Yes

-

Linux

-

Run commands.

-

Yes/No

-

Windows/Linux

-

Use the remote login function available on the console.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0186.html b/docs/cce/umn/cce_01_0186.html deleted file mode 100644 index 455684ab..00000000 --- a/docs/cce/umn/cce_01_0186.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Deleting a Node

-

Scenario

When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.

-
-

Notes and Constraints

  • After a CCE cluster is deleted, the ECS nodes in the cluster are also deleted.
-
-

Notes

  • Deleting a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
  • Unexpected risks may occur during the operation. Back up related data in advance.
  • During the operation, the backend will set the node to the unschedulable state.
  • Only worker nodes can be deleted.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the node you will delete, choose More > Delete.
  2. In the Delete Node dialog box, enter DELETE and click Yes.

    • After the node is deleted, pods on it are automatically migrated to other available nodes.
    • If the disks and EIPs bound to the node are important resources, unbind them first. Otherwise, they will be deleted with the node.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0187.html b/docs/cce/umn/cce_01_0187.html deleted file mode 100644 index b8d36291..00000000 --- a/docs/cce/umn/cce_01_0187.html +++ /dev/null @@ -1,71 +0,0 @@ - - -

Permissions Overview

-

CCE permissions management allows you to assign permissions to IAM users and user groups under your tenant accounts. CCE combines the advantages of Identity and Access Management (IAM) and Kubernetes Role-based Access Control (RBAC) authorization to provide a variety of authorization methods, including IAM fine-grained authorization, IAM token authorization, cluster-scoped authorization, and namespace-wide authorization.

-

If you need to perform refined permissions management on CCE clusters and related resources, for example, to control the access of employees in different departments to cloud resources, you can perform multi-dimensional permissions management on CCE.

-

This section describes the CCE permissions management mechanism and related concepts. If your account has met your service requirements, you can skip the configurations in this chapter.

-

CCE Permissions Management

CCE permissions are described as follows:
  • Cluster-level permissions: Cluster-level permissions management evolves out of the system policy authorization feature of IAM. IAM users in the same user group have the same permissions. On IAM, you can configure system policies to describe which IAM user groups can perform which operations on cluster resources. For example, you can grant user group A to create and delete cluster X, add a node, or install an add-on, while granting user group B to view information about cluster X.

    Cluster-level permissions involve CCE non-Kubernetes APIs and support fine-grained IAM policies and enterprise project management capabilities.

    -
  • Namespace-level permissions: You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. CCE has also been enhanced based on open-source capabilities. It supports RBAC authorization based on IAM user or user group, and RBAC authentication on access to APIs using IAM tokens.

    Namespace-level permissions involve CCE Kubernetes APIs and are enhanced based on the Kubernetes RBAC capabilities. Namespace-level permissions can be granted to IAM users or user groups for authentication and authorization, but are independent of fine-grained IAM policies.

    -

    Starting from version 1.11.7-r2, CCE clusters allow you to configure namespace permissions. Clusters earlier than v1.11.7-r2 have all namespace permissions by default.

    -
-
-

In general, you configure CCE permissions in two scenarios. The first is creating and managing clusters and related resources, such as nodes. The second is creating and using Kubernetes resources in the cluster, such as workloads and Services.

-
Figure 1 Illustration on CCE permissions
-

These permissions allow you to manage resource users at a finer granularity.

-
-

Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Differences in namespace permissions

User

-

Clusters Earlier Than v1.11.7-r2

-

Clusters of v1.11.7-r2

-

User with the Tenant Administrator permissions

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Administrator role

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Viewer role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-

IAM user with the Tenant Guest role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-
-
-
-

kubectl Permissions

You can use kubectl to access Kubernetes resources in a cluster.

-

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Table 1.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0188.html b/docs/cce/umn/cce_01_0188.html deleted file mode 100644 index 8441b6b1..00000000 --- a/docs/cce/umn/cce_01_0188.html +++ /dev/null @@ -1,92 +0,0 @@ - - -

Cluster Permissions (IAM-based)

-

CCE cluster permissions are assigned based on IAM system policies and custom policies. You can use user groups to assign permissions to IAM users.

-

Cluster permissions are configured only for cluster-related resources (such as clusters and nodes). You must also configure namespace permissions to operate Kubernetes resources (such as workloads and Services).

-
-

Prerequisites

  • A user with the Security Administrator role has all IAM permissions except role switching. Only these users can view user groups and their permissions on the Permissions Management page on the CCE console.
-
-

Configuration

On the CCE console, when you choose Permissions Management > Cluster-Level Permissions to create a user group, you will be directed to the IAM console to complete the process. After the user group is created and its permissions are configured, you can view the information on the Cluster-Level Permissions tab page. This section describes the operations in IAM.

-
-

Process Flow

Figure 1 Process of assigning CCE permissions
-

-
  1. Create a user group and assign permissions to it.

    Create a user group on the IAM console, and assign CCE permissions, for example, the CCE Viewer policy to the group.

    -

    CCE is deployed by region. On the IAM console, select Region-specific projects when assigning CCE permissions.

    -
    -
  2. Create a user and add it to a user group.

    Create a user on the IAM console and add the user to the group created in 1.

    -
  3. Log in and verify permissions.

    Log in to the management console as the user you created, and verify that the user has the assigned permissions.

    -
    • Log in to the management console and switch to the CCE console. Click Create Cluster in the upper right corner. If you fail to do so (assuming that only the CCE Viewer role is assigned), the permission control policy takes effect.
    • Switch to the console of any other service. If a message appears indicating that you do not have the required permissions to access the service, the CCE Viewer policy takes effect.
    -
-
-

Custom Policies

Custom policies can be created as a supplement to the system-defined policies of CCE.

-

You can create custom policies in either of the following ways:

-
  • Visual editor: Select cloud services, actions, resources, and request conditions. This does not require knowledge of policy syntax.
  • JSON: Edit JSON policies from scratch or based on an existing policy.
-

This section provides examples of common custom CCE policies.

-
-

Example Custom Policies:

-
  • Example 1: Creating a cluster named test
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Effect": "Allow",
    -            "Action": [
    -                "cce:cluster:create"
    -            ]
    -        }
    -    ]
    -}
    -
  • Example 2: Denying node deletion

    A policy with only "Deny" permissions must be used in conjunction with other policies to take effect. If the permissions assigned to a user contain both "Allow" and "Deny", the "Deny" permissions take precedence over the "Allow" permissions.

    -

    The following method can be used if you need to assign permissions of the CCEFullAccess policy to a user but you want to prevent the user from deleting nodes (cce:node:delete). Create a custom policy for denying node deletion, and attach both policies to the group to which the user belongs. Then, the user can perform all operations on CCE except deleting nodes. The following is an example of a deny policy:

    -
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Effect": "Deny",
    -            "Action": [
    -                "cce:node:delete"
    -            ]
    -        }
    -    ]
    -}
    -
  • Example 3: Defining permissions for multiple services in a policy

    A custom policy can contain the actions of multiple services that are of the global or project-level type. The following is an example policy containing actions of multiple services:

    -
    {
    -    "Version": "1.1",
    -    "Statement": [
    -        {
    -            "Action": [
    -                "ecs:cloudServers:resize",
    -                "ecs:cloudServers:delete",
    -                "ecs:cloudServers:delete",
    -                "ims:images:list",
    -                "ims:serverImages:create"
    -            ],
    -            "Effect": "Allow"
    -        }
    -    ]
    -}
    -
-

CCE Cluster Permissions and Enterprise Projects

CCE supports resource management and permission allocation by cluster and enterprise project.

-

Note that:

-
  • IAM projects are based on physical isolation of resources, whereas enterprise projects provide global logical groups of resources, which better meet the actual requirements of enterprises. In addition, IAM policies can be managed based on enterprise projects. Therefore, you are advised to use enterprise projects for permissions management.
  • When there are both IAM projects and enterprise projects, IAM preferentially matches the IAM project policies.
  • When creating a cluster or node using purchased cloud resources, ensure that IAM users have been granted the required permissions in the enterprise project to use these resources. Otherwise, the cluster or node may fail to be created.
-
-

CCE Cluster Permissions and IAM RBAC

CCE is compatible with IAM system roles for permissions management. You are advised to use fine-grained policies provided by IAM to simplify permissions management.

-

CCE supports the following roles:

-
  • Basic IAM roles:
    • te_admin (Tenant Administrator): Users with this role can call all APIs of all services except IAM.
    • readonly (Tenant Guest): Users with this role can call APIs with the read-only permissions of all services except IAM.
    -
  • Custom CCE administrator role: CCE Administrator
-
  • Tenant Administrator and Tenant Guest are special IAM system roles. After any system or custom policy is configured, Tenant Administrator and Tenant Guest take effect as system policies to achieve compatibility with IAM RBAC and ABAC scenarios.
  • If a user has the Tenant Administrator or CCE Administrator system role, the user has the cluster-admin permissions in Kubernetes RBAC and the permissions cannot be removed after the cluster is created.
    If the user is the cluster creator, the cluster-admin permissions in Kubernetes RBAC are granted to the user by default. The permissions can be manually removed after the cluster is created.
    • Method 1: Choose Permissions Management > Namespace-Level Permissions > Delete at the same role as cluster-creator on the CCE console.
    • Method 2: Delete ClusterRoleBinding: cluster-creator through the API or kubectl.
    -
    -
-
-

When RBAC and IAM policies co-exist, the backend authentication logic for open APIs or console operations on CCE is as follows:

-

-

Certain CCE APIs involve namespace-level permissions or key operations and therefore, they require special permissions:

-

Using clusterCert to obtain the cluster kubeconfig: cceadm/teadmin

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0189.html b/docs/cce/umn/cce_01_0189.html deleted file mode 100644 index 4577b49d..00000000 --- a/docs/cce/umn/cce_01_0189.html +++ /dev/null @@ -1,234 +0,0 @@ - - -

Namespace Permissions (Kubernetes RBAC-based)

-

Namespace Permissions (Kubernetes RBAC-based)

You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles. The RBAC API declares four kinds of Kubernetes objects: Role, ClusterRole, RoleBinding, and ClusterRoleBinding, which are described as follows:

-
  • Role: defines a set of rules for accessing Kubernetes resources in a namespace.
  • RoleBinding: defines the relationship between users and roles.
  • ClusterRole: defines a set of rules for accessing Kubernetes resources in a cluster (including all namespaces).
  • ClusterRoleBinding: defines the relationship between users and cluster roles.
-

Role and ClusterRole specify actions that can be performed on specific resources. RoleBinding and ClusterRoleBinding bind roles to specific users, user groups, or ServiceAccounts. Illustration:

-
Figure 1 Role binding
-

On the CCE console, you can assign permissions to a user or user group to access resources in one or multiple namespaces. By default, the CCE console provides the following ClusterRoles:

-
  • view: read-only permission on most resources in all or selected namespaces.
  • edit: read and write permissions on most resources in all or selected namespaces. If this ClusterRole is configured for all namespaces, its capability is the same as the O&M permission.
  • admin: read and write permissions on most resources in all namespaces, and read-only permission on nodes, storage volumes, namespaces, and quota management.
  • cluster-admin: read and write permissions on all resources in all namespaces.
-
-

Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based)

Users with different cluster permissions (assigned using IAM) have different namespace permissions (assigned using Kubernetes RBAC). Table 1 lists the namespace permissions of different users.

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Differences in namespace permissions

User

-

Clusters Earlier Than v1.11.7-r2

-

Clusters of v1.11.7-r2

-

User with the Tenant Administrator permissions

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Administrator role

-

All namespace permissions

-
  • Has all namespace permissions when using CCE on the console.
  • Requires Kubernetes RBAC authorization when using CCE via kubectl.
-
NOTE:

When such a user accesses the CCE console, an administrator group is added. Therefore, the user has all namespace permissions.

-
-

IAM user with the CCE Viewer role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-

IAM user with the Tenant Guest role

-

All namespace permissions

-

Requires Kubernetes RBAC authorization.

-
-
-
-

Prerequisites

  • Kubernetes RBAC authorization can be used for clusters of v1.11.7-r2 and later. Ensure that you have deployed a supported cluster version. For details about upgrading a cluster, see Performing Replace/Rolling Upgrade (v1.13 and Earlier).
  • After you create a cluster of v1.11.7-r2 or later, CCE automatically assigns the cluster-admin permission to you, which means you have full control on all resources in all namespaces in the cluster.
  • A user with the Security Administrator role has all IAM permissions except role switching. Only these users can assign permissions on the Permissions Management page on the CCE console.
-
-

Configuring Namespace Permissions (on the Console)

You can regulate users' or user groups' access to Kubernetes resources in a single namespace based on their Kubernetes RBAC roles.

-
  1. Log in to the CCE console. In the navigation pane, choose Permissions Management.
  2. On the displayed page, click the Namespace-Level Permissions tab. In the upper right corner of the namespace permissions list, select the cluster that contains the namespace whose access will be managed, and click Add Permissions.
  3. Confirm the cluster name and select the namespace to assign permissions for. For example, select All namespaces, the target user or user group, and select the permissions.

    -

  4. Click Create.
-
-

Using kubectl to Configure Namespace Permissions

When you access a cluster using kubectl, CCE uses the kubeconfig.json file generated on the cluster for authentication. This file contains user information, based on which CCE determines which Kubernetes resources can be accessed by kubectl. The permissions recorded in a kubeconfig.json file vary from user to user. The permissions that a user has are listed in Cluster Permissions (IAM-based) and Namespace Permissions (Kubernetes RBAC-based).

-
-

In addition to cluster-admin, admin, edit, and view, you can define Roles and RoleBindings to configure the permissions to add, delete, modify, and query resources, such as pods, Deployments, and Services, in the namespace.

-

The procedure for creating a Role is very simple. To be specific, specify a namespace and then define rules. The rules in the following example are to allow GET and LIST operations on pods in the default namespace.

-
kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  namespace: default                          # Namespace
-  name: role-example
-rules:
-- apiGroups: [""]
-  resources: ["pods"]                         # The pod can be accessed.
-  verbs: ["get", "list"]                      # The GET and LIST operations can be performed.
-
  • apiGroups indicates the API group to which the resource belongs.
  • resources indicates the resources that can be operated. Pods, Deployments, ConfigMaps, and other Kubernetes resources are supported.
  • verbs indicates the operations that can be performed. get indicates querying a specific object, and list indicates listing all objects of a certain type. Other value options include create, update, and delete.
-

For details, see Using RBAC Authorization.

-

After creating a Role, you can bind the Role to a specific user, which is called RoleBinding. The following is an example.

-
kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
-  name: RoleBinding-example
-  namespace: default
-  annotations:
-    CCE.com/IAM: 'true'
-roleRef:
-  kind: Role
-  name: role-example
-  apiGroup: rbac.authorization.k8s.io
-subjects:
-- kind: User
-  name: 0c97ac3cb280f4d91fa7c0096739e1f8 # User ID of the user-example
-  apiGroup: rbac.authorization.k8s.io
-

The subjects section binds a Role with an IAM user so that the IAM user can obtain the permissions defined in the Role, as shown in the following figure.

-
Figure 2 A RoleBinding binds the Role to the user.
-

You can also specify a user group in the subjects section. In this case, all users in the user group obtain the permissions defined in the Role.

-
subjects:
-- kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7    # User group ID
-  apiGroup: rbac.authorization.k8s.io
-

Use the IAM user user-example to connect to the cluster and obtain the pod information. The following is an example of the returned pod information.

-
# kubectl get pod
-NAME                                   READY   STATUS    RESTARTS   AGE
-deployment-389584-2-6f6bd4c574-2n9rk   1/1     Running   0          4d7h
-deployment-389584-2-6f6bd4c574-7s5qw   1/1     Running   0          4d7h
-deployment-3895841-746b97b455-86g77    1/1     Running   0          4d7h
-deployment-3895841-746b97b455-twvpn    1/1     Running   0          4d7h
-nginx-658dff48ff-7rkph                 1/1     Running   0          4d9h
-nginx-658dff48ff-njdhj                 1/1     Running   0          4d9h
-# kubectl get pod nginx-658dff48ff-7rkph
-NAME                     READY   STATUS    RESTARTS   AGE
-nginx-658dff48ff-7rkph   1/1     Running   0          4d9h
-

Try querying Deployments and Services in the namespace. The output shows user-example does not have the required permissions. Try querying the pods in namespace kube-system. The output shows user-example does not have the required permissions, neither. This indicates that the IAM user user-example has only the GET and LIST Pod permissions in the default namespace, which is the same as expected.

-
# kubectl get deploy
-Error from server (Forbidden): deployments.apps is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "deployments" in API group "apps" in the namespace "default"
-# kubectl get svc
-Error from server (Forbidden): services is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "services" in API group "" in the namespace "default"
-# kubectl get pod --namespace=kube-system
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
-
-

Example: Assigning All Cluster Permissions (cluster-admin)

You can use the cluster-admin role to assign all permissions on a cluster. This role contains the permissions for cluster resources (such as PVs and StorageClasses).

-

In the following example kubectl output, a ClusterRoleBinding has been created and binds the cluster-admin role to the user group cce-role-group.

-
# kubectl get clusterrolebinding
-NAME                                                              ROLE                           AGE
-clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/cluster-admin      61s
-
-# kubectl get clusterrolebinding clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-23T09:15:22Z"
-  name: clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
-  resourceVersion: "36659058"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/clusterrole_cluster-admin_group0c96fad22880f32a3f84c009862af6f7
-  uid: d6cd43e9-b4ca-4b56-bc52-e36346fc1320
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: cluster-admin
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to the cluster as an authorized user. If the PVs and StorageClasses can be queried, the permission configuration takes effect.

-
# kubectl get pv
-No resources found
-# kubectl get sc
-NAME                PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
-csi-disk            everest-csi-provisioner         Delete          Immediate              true                   75d
-csi-disk-topology   everest-csi-provisioner         Delete          WaitForFirstConsumer   true                   75d
-csi-nas             everest-csi-provisioner         Delete          Immediate              true                   75d
-csi-obs             everest-csi-provisioner         Delete          Immediate              false                  75d
-csi-sfsturbo        everest-csi-provisioner         Delete          Immediate              true                   75d
-
-

Example: Assigning All Namespace Permissions (admin)

The admin role contains all permissions on a namespace. You can assign permissions to users to access one or multiple namespaces.

-

In the following example kubectl output, a RoleBinding has been created, the admin role is bound to the user group cce-role-group, and the target namespace is the default namespace.

-
# kubectl get rolebinding
-NAME                                                      ROLE                AGE
-clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/admin   18s
-# kubectl get rolebinding clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-24T01:30:08Z"
-  name: clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
-  namespace: default
-  resourceVersion: "36963685"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_admin_group0c96fad22880f32a3f84c009862af6f7
-  uid: 6c6f46a6-8584-47da-83f5-9eef1f7b75d6
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: admin
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to a cluster as an authorized user. In this example, you can create and query resources in the default namespace, but cannot query resources in the kube-system namespace or cluster resources.

-
# kubectl get pod
-NAME                    READY   STATUS    RESTARTS   AGE
-test-568d96f4f8-brdrp   1/1     Running   0          33m
-test-568d96f4f8-cgjqp   1/1     Running   0          33m
-# kubectl get pod -nkube-system
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "pods" in API group "" in the namespace "kube-system"
-# kubectl get pv
-Error from server (Forbidden): persistentvolumes is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot list resource "persistentvolumes" in API group "" at the cluster scope
-
-

Example: Assigning Read-Only Namespace Permissions (view)

The view role has the read-only permissions on a namespace. You can assign permissions to users to view one or multiple namespaces.

-

In the following example kubectl output, a RoleBinding has been created, the view role is bound to the user group cce-role-group, and the target namespace is the default namespace.

-
# kubectl get rolebinding
-NAME                                                     ROLE               AGE
-clusterrole_view_group0c96fad22880f32a3f84c009862af6f7   ClusterRole/view   7s
-
-# kubectl get rolebinding clusterrole_view_group0c96fad22880f32a3f84c009862af6f7 -oyaml
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  annotations:
-    CCE.com/IAM: "true"
-  creationTimestamp: "2021-06-24T01:36:53Z"
-  name: clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
-  namespace: default
-  resourceVersion: "36965800"
-  selfLink: /apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings/clusterrole_view_group0c96fad22880f32a3f84c009862af6f7
-  uid: b86e2507-e735-494c-be55-c41a0c4ef0dd
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: view
-subjects:
-- apiGroup: rbac.authorization.k8s.io
-  kind: Group
-  name: 0c96fad22880f32a3f84c009862af6f7
-

Connect to the cluster as an authorized user. In this example, you can query resources in the default namespace but cannot create resources.

-
# kubectl get pod
-NAME                    READY   STATUS    RESTARTS   AGE
-test-568d96f4f8-brdrp   1/1     Running   0          40m
-test-568d96f4f8-cgjqp   1/1     Running   0          40m
-# kubectl run -i --tty --image tutum/dnsutils dnsutils --restart=Never --rm /bin/sh
-Error from server (Forbidden): pods is forbidden: User "0c97ac3cb280f4d91fa7c0096739e1f8" cannot create resource "pods" in API group "" in the namespace "default"
-
-

Example: Assigning Permissions for a Specific Kubernetes Resource Object

You can assign permissions on a specific Kubernetes resource object, such as pod, Deployment, and Service. For details, see Using kubectl to Configure Namespace Permissions.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0191.html b/docs/cce/umn/cce_01_0191.html deleted file mode 100644 index f22ae1e0..00000000 --- a/docs/cce/umn/cce_01_0191.html +++ /dev/null @@ -1,17 +0,0 @@ - - -

Overview

-

CCE uses Helm, a Kubernetes package manager, to simplify deployment and management of packages (also called charts). A chart is a collection of files that describe a related set of Kubernetes resources. The use of charts handles all the complexity in Kubernetes resource installation and management, making it possible to achieve unified resource scheduling and management.

-

Helm is a tool for packaging Kubernetes applications. For more information, see Helm documentation.

-
-

Custom charts simplify workload deployment.

-

This section describes how to create a workload using a custom chart. You can use multiple methods to create an orchestration chart on the CCE console.

-

Notes and Constraints

  • The number of charts that can be uploaded by a single user is limited. The value displayed on the console of each region is the allowed quantity.
  • CCE uses Helm v2.12. If you use Helm v3 or later to manage CCE, compatibility problems may occur.
  • A chart with multiple versions consumes the same amount of portion of chart quota.
  • Users with chart operation permissions can perform multiple operations on clusters. Therefore, exercise caution when assigning users the chart lifecycle management permissions, including uploading charts and creating, deleting, and updating chart releases.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0197.html b/docs/cce/umn/cce_01_0197.html deleted file mode 100644 index b3ed445e..00000000 --- a/docs/cce/umn/cce_01_0197.html +++ /dev/null @@ -1,200 +0,0 @@ - - -

Overview

-

To enable interoperability from one Kubernetes installation to the next, you must upgrade your Kubernetes clusters before the maintenance period ends.

-

After the latest Kubernetes version is available in CCE, CCE will describe the changes in this version.

-

You can use the CCE console to upgrade the Kubernetes version of a cluster.

-

An upgrade flag will be displayed on the cluster card view if there is a new version for the cluster to upgrade.

-

How to check:

-

Choose Resource Management > Clusters and check whether there is an upgrade flag in the upper right corner of the cluster card view. If yes, the cluster can be upgraded.

-
Figure 1 Cluster with the upgrade flag
-

Cluster Upgrade

The following table describes the target version to which each cluster version can be upgraded, the supported upgrade modes, and upgrade impacts.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster upgrade paths and impacts

Source Version

-

Target Version

-

Upgrade Modes

-

Impacts

-

v1.21

-

v1.23

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.19

-

v1.21

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.17

-

v1.15

-

v1.19

-

In-place upgrade

-

You need to identify the differences between versions.

-

v1.13

-

v1.15

-

Rolling upgrade

-

Replace upgrade

-
  • The proxy configuration item in the coredns add-on configuration is not supported and needs to be replaced with forward.
  • The storage add-on is changed from storage-driver to everest.
-

v1.11

-

v1.9

-

1.15

-

Replace upgrade

-
  • The cluster signature certificate mechanism is changed. As a result, the original cluster certificate becomes invalid. You need to obtain the certificate or kubeconfig file again after the cluster is upgraded.
  • RBAC is enabled for clusters of Kubernetes v1.13 by default. Applications need to adapt to RBAC.
  • After the cluster is upgraded from v1.9 to v1.15, kube-dns in the cluster will be replaced with CoreDNS. Before the upgrade, you need to back up the kube-dns configuration. After the upgrade, you need to reconfigure kube-dns in the coredns add-on.
-

v1.9

-

v1.7

-

Latest version that can be created on the console

-

Migration

-

You need to identify the differences between versions.

-
-
-
-

Upgrade Modes

CCE provides the following upgrade modes based on the cluster version and deployment site. The upgrade processes are the same for master nodes. The differences between the upgrade modes of worker nodes are described as follows:

- -
- - - - - - - - - - - - - - - - - - - - - -
Table 2 Differences between upgrade modes and their advantages and disadvantages

Upgrade Mode

-

Method

-

Advantage

-

Disadvantage

-

In-place upgrade

-

Kubernetes components, network components, and CCE management components are upgraded on the node. During the upgrade, service pods and networks are not affected. The SchedulingDisabled label will be added to all existing nodes. After the upgrade is complete, you can properly use existing nodes.

-

You do not need to migrate services, ensuring service continuity.

-

In-place upgrade does not upgrade the OS of a node. If you want to upgrade the OS, clear the corresponding node after the node upgrade is complete and reset the node to upgrade the OS to a new version.

-

Rolling upgrade

-

Only the Kubernetes components and certain network components are upgraded on the node. The SchedulingDisabled label will be added to all existing nodes to ensure that the running applications are not affected. After the upgrade is complete, you need to manually create nodes and gradually release the old nodes, thereby migrating your applications to the new nodes. In this mode, you can control the upgrade process.

-

Services are not interrupted.

-

-

-

Replace upgrade

-

The latest worker node image is used to reset the node OS.

-

This is the fastest upgrade mode and requires few manual interventions.

-

Data or configurations on the node will be lost, and services will be interrupted for a period of time.

-
-
-
-

Cluster Upgrade Between Major Versions

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 3 Changelog between minor versions

Source Version

-

Target Version

-

Description

-

v1.21

-

v1.23

-
-

v1.19

-

v1.21

-
-

v1.17

-

v1.19

-
-

v1.15

-

v1.17

-
-

v1.13

-

v1.15

-
-

v1.11

-

v1.9

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0200.html b/docs/cce/umn/cce_01_0200.html deleted file mode 100644 index 8926fd6b..00000000 --- a/docs/cce/umn/cce_01_0200.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Creating a Linux LVM Disk Partition for Docker

-

Scenario

This section describes how to check whether there are available raw disks and Linux LVM disk partitions and how to create Linux LVM disk partitions.

-
-

Prerequisites

To improve the system stability, attach a data disk to Docker and use the direct-lvm mode.

-
-

Procedure

  1. Check whether available raw disks exist on the current node.

    1. Log in to the target node as the root user.
    2. Check the raw disk device.

      lsblk -l | grep disk

      -

      If the following information is displayed, the raw disks named xvda and xvdb exist on the node.

      -
      xvda  202:0    0   40G  0 disk
      -xvdb  202:16   0  100G  0 disk
      -
    3. Check whether the raw disk is in use.

      lsblk /dev/<devicename>

      -

      devicename indicates the raw disk name, for example, xvda and xvdb in the previous step.

      -

      Run the lsblk /dev/xvda and lsblk /dev/xvdb commands. If the following information is displayed, xvda has been partitioned and used while xvdb is available. If no raw disk is available, bind an EVS disk to the node. It is advised that the disk space be no less than 80 GB.

      -
      NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -xvda    202:0    0   40G  0 disk
      -├─xvda1 202:1    0  100M  0 part /boot
      -└─xvda2 202:2    0 39.9G  0 part /
      -
      NAME MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -xvdb 202:16   0  100G  0 disk
      -
    -

  2. Check whether there are partitions available. Currently, only Linux LVM partitions are supported.

    1. Log in to the target node as the root user.
    2. Check the partition whose system type is Linux LVM.

      sfdisk -l 2>>/dev/null| grep "Linux LVM"

      -

      If the following information is displayed, two Linux LVM partitions, /dev/nvme0n1p1 and /dev/nvme0n1p2, exist in the system.

      -
      /dev/nvme0n1p1          1  204800  204800  209715200   8e  Linux LVM
      -/dev/nvme0n1p2     204801  409600  204800  209715200   8e  Linux LVM
      -
    3. Check whether the partition is in use.

      lsblk <partdevice>

      -

      <partdevice> is the Linux LVM partition found in the previous step.

      -

      In this example, run the lsblk/dev/nvme0n1p1 and lsblk/dev/nvme0n1p2 commands. If the following information is displayed, partition nvme0n1p is in use while nvme0n1p2 is available.

      -
      NAME                       MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -nvme0n1p1                   259:3    0  200G  0 part
      -└─vgpaas-thinpool_tdata   251:8    0  360G  0 lvm
      -  └─vgpaas-thinpool       251:10   0  360G  0 lvm
      -
      NAME      MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
      -nvme0n1p2 259:1    0  100G  0 part
      -

      If no AZ is available, perform 3 to create a partition for Docker.

      -
    -

  3. Create a Linux LVM disk partition for Docker.

    1. Run the following command to create a disk partition. devicename indicates the available raw disk name, for example, xvdb in 1.

      fdisk /dev/devicename

      -
    2. Enter n to create a new partition. Enter p to display the primary partition number. Enter 4 to indicate the fourth primary partition.
      Figure 1 Creating a partition
      -
    3. Configure the start and last sectors as follows for example:
      Start sector (1048578048-4294967295, 1048578048 by default):
      -1048578048
      -Last sector, +sector or size {K, M, or G} (1048578048-4294967294, 4294967294 by default): +100G
      -

      This configuration indicates that partition 4 has been set to the Linux type and the size is 100 GiB.

      -
    4. Enter t to change the partition system type. Enter the hex code 8e when prompted to change the system type to Linux LVM.
      Command (enter m to obtain help): t
      -Partition ID (ranging from 1 to 4, 4 by default): 4
      -Hex code (enter L to list all codes): 8e
      -This configuration changes the type of the partition Linux to Linux LVM.
      -
    5. Enter w to save the modification.
      Command (enter m to obtain help): w
      -The partition table has been altered!
      -
    6. Run the partprobe command to refresh the disk partition.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0205.html b/docs/cce/umn/cce_01_0205.html deleted file mode 100644 index 7db26d83..00000000 --- a/docs/cce/umn/cce_01_0205.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

metrics-server

-

From version 1.8 onwards, Kubernetes provides resource usage metrics, such as the container CPU and memory usage, through the Metrics API. These metrics can be directly accessed by users (for example, by using the kubectl top command) or used by controllers (for example, Horizontal Pod Autoscaler) in a cluster for decision-making. The specific component is metrics-server, which is used to substitute for heapster for providing the similar functions. heapster has been gradually abandoned since v1.11.

-

metrics-server is an aggregator for monitoring data of core cluster resources. You can quickly install this add-on on the CCE console.

-

After metrics-server is installed, you can create an HPA policy on the Workload Scaling tab page of the Auto Scaling page. For details, see Creating an HPA Policy for Workload Auto Scaling.

-

The official community project and documentation are available at https://github.com/kubernetes-sigs/metrics-server.

-

Notes and Constraints

This add-on can be installed only in CCE clusters of v1.13 or later.

-
-

Installing the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Marketplace tab page, click Install Add-on under metrics-server.
  2. On the Install Add-on page, select the cluster and the add-on version, and click Next: Configuration.
  3. Select Single or HA for Add-on Specifications, and click Install.

    After the add-on is installed, click Go Back to Previous Page. On the Add-on Instance tab page, select the corresponding cluster to view the running instance. This indicates that the add-on has been installed on each node in the cluster.

    -

-
-

Upgrading the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Upgrade under metrics-server.

    • If the Upgrade button is not available, the current add-on is already up-to-date and no upgrade is required.
    • During the upgrade, the metrics-server add-on of the original version on cluster nodes will be discarded, and the add-on of the target version will be installed.
    -
    -

  2. On the Basic Information page, select the add-on version and click Next.
  3. Set the parameters by referring to the parameter description in Installing the Add-on and click Upgrade.
-
-

Uninstalling the Add-on

  1. Log in to the CCE console. In the navigation pane, choose Add-ons. On the Add-on Instance tab page, click Uninstall under metrics-server.
  2. In the dialog box displayed, click Yes to uninstall the add-on.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0207.html b/docs/cce/umn/cce_01_0207.html deleted file mode 100644 index a66fda91..00000000 --- a/docs/cce/umn/cce_01_0207.html +++ /dev/null @@ -1,15 +0,0 @@ - - -

Auto Scaling

-
- - diff --git a/docs/cce/umn/cce_01_0208.html b/docs/cce/umn/cce_01_0208.html deleted file mode 100644 index fc05f43b..00000000 --- a/docs/cce/umn/cce_01_0208.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

Creating an HPA Policy for Workload Auto Scaling

-

Horizontal Pod Autoscaling (HPA) in Kubernetes implements horizontal scaling of pods. In a CCE HPA policy, you can configure different cooldown time windows and scaling thresholds for different applications based on the Kubernetes HPA.

-

Prerequisites

The metrics-server add-on has been installed. This add-on collects public metrics of kubelet in Kubernetes clusters, including the CPU usage and memory usage.

-
-

Notes and Constraints

  • HPA policies can be created only for clusters of v1.13 or later.
  • Only one policy can be created for each workload. That is, if you have created an HPA policy, you cannot create other HPA policies for the workload. You can delete the created HPA policy and create a new one.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Workload Scaling tab page, click Create HPA Policy.
  2. In the Check Add-ons step:

    • If is displayed next to the add-on name, click Install, set add-on parameters as required, and click Install to install the add-on.
    • If is displayed next to the add-on name, the add-on has been installed.
    -

  3. After the required add-ons have been installed, click Next: Policy configuration.

    If the add-ons have been installed, after you click Create HPA Policy, you will directly land on the second step to configure the policy. The first step (checking the add-ons) has been completed almost instantly.

    -
    -

  4. Set policy parameters by referring to Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 HPA policy parameters

    Parameter

    -

    Description

    -

    Policy Name

    -

    Name of the policy to be created. Set this parameter as required.

    -

    Cluster Name

    -

    Cluster to which the workload belongs.

    -

    Namespace

    -

    Namespace to which the workload belongs.

    -

    Associated Workload

    -

    Workload with which the HPA policy is associated.

    -

    Pod Range

    -

    Minimum and maximum numbers of pods.

    -

    When a policy is triggered, the workload pods are scaled within this range.

    -

    Cooldown Period

    -

    Interval between a scale-in and a scale-out. The unit is minute. The interval cannot be shorter than 1 minute.

    -

    This parameter is available only for clusters of v1.15 and later. It is not supported in clusters of v1.13 or earlier.

    -

    This parameter indicates the interval between consecutive scaling operations. The cooldown period ensures that a scaling operation is initiated only when the previous one is completed and the system is running stably.

    -

    Rules

    -

    Policy rules can be based on system metrics.

    -

    System metrics

    -
    • Metric: You can select CPU usage or Memory usage.
      NOTE:

      Usage = CPUs or memory used by pods/Requested CPUs or memory.

      -
      -
    • Expected Value: Enter the expected average resource usage.

      This parameter indicates the expected value of the selected metric. The number of new pods required (rounded up) = Current metric value/Expected value x Number of current pods

      -
    • Threshold: Enter the scaling thresholds.

      If the metric value is greater than the scale-in threshold and less than the scale-out threshold, no scaling is triggered. This parameter is supported only in clusters of v1.15 or later.

      -
    -

    You can click Add Rule again to add more scaling policies.

    -
    NOTE:

    When calculating the number of pods to be added or reduced, the HPA policy uses the maximum metrics values in the last 5 minutes.

    -
    -
    -
    -

  5. After the configuration is complete, click Create. If the system displays a message indicating that the request to create workload policy *** is successfully submitted, click Back to Workload Scaling.
  6. On the Workload Scaling tab page, you can view the newly created HPA policy.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0209.html b/docs/cce/umn/cce_01_0209.html deleted file mode 100644 index d7c4ce4e..00000000 --- a/docs/cce/umn/cce_01_0209.html +++ /dev/null @@ -1,207 +0,0 @@ - - -

Creating a Node Scaling Policy

-

CCE provides auto scaling through the autoscaler add-on. Nodes with different specifications can be automatically added across AZs on demand.

-

If a node scaling policy and the configuration in the autoscaler add-on take effect at the same time, for example, there are pods that cannot be scheduled and the value of a metric reaches the threshold at the same time, scale-out is performed first for the unschedulable pods.

-
  • If the scale-out succeeds for the unschedulable pods, the system skips the metric-based rule logic and enters the next loop.
  • If the scale-out fails for the unschedulable pods, the metric-based rule is executed.
-

Prerequisites

Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Auto Scaling. On the Node Scaling tab page, click Create Node Scaling Policy.
  2. In the Check Add-ons step:

    • If is displayed next to the add-on name, click Install, set add-on parameters as required, and click Install to install the add-on.
    • If is displayed next to the add-on name, the add-on has been installed.
    -

  3. After the required add-ons have been installed, click Next: Policy configuration.

    If the add-ons have been installed, after you click Create Node Scaling Policy, you will directly land on the second step to configure the policy. The first step (checking the add-ons) has been completed almost instantly.

    -
    -

  4. On the Create Node Scaling Policy page, set the following policy parameters.

    • Policy Name: name of the policy to be created, which can be customized.
    • Associated Node Pool: Click Add Node Pool and select the node pool to be associated. You can associate multiple node pools to use the same scaling policy.

      Priority is now supported for node pools. CCE will select a node pool for auto scaling based on the following policies:

      -
      1. CCE uses algorithms to determine whether a node pool meets the conditions to allow scheduling of a pod in pending state, including whether the node resources are greater than requested by the pod, and whether the nodeSelect, nodeAffinity, and taints meet the conditions. In addition, the node pools that fail to be scaled (due to insufficient resources or other reasons) and are still in the 15-minute cool-down interval are filtered.
      2. If multiple node pools meet the scaling requirements, the system checks the priority of each node pool and selects the node pool with the highest priority for scaling. The value ranges from 0 to 100 and the default priority is 0. The value 100 indicates the highest priority, and the value 0 indicates the lowest priority.
      3. If multiple node pools have the same priority or no priority is configured for them, the system selects the node pool that will consume the least resources based on the configured VM specification.
      4. If the VM specifications of multiple node pools are the same but the node pools are deployed in different AZs, the system randomly selects a node pool to trigger scaling.
      5. If the resources of the preferred node pool are insufficient, the system automatically selects next node pool based on the priority.
      -

      For details about the node pool priority, see Autoscaler.

      -
      -
    • Execution Rules: Click Add Rule. In the dialog box displayed, set the following parameters:

      Name: Enter a rule name.

      -

      Type: You can select Metric-based or Periodic. The differences between the two types are as follows:

      -
      • Metric-based:
        • Condition: Select CPU allocation or Memory allocation and enter a value. The value must be greater than the scale-in percentage configured in the autoscaler add-on.
          • Resource allocation (%) = Resources requested by pods in the node pool/Resources allocatable to pods in the node pool
          • If multiple rules meet the conditions, the rules are executed in either of the following modes:

            If rules based on the CPU allocation rate and memory allocation rate are configured and two or more rules meet the scale-out conditions, the rule that will add the most nodes will be executed.

            -

            If a rule based on the CPU allocation rate and a periodic rule are configured and they both meet the scale-out conditions, one of them will be executed randomly. The rule executed first (rule A) changes the node pool to the scaling state. As a result, the other rule (rule B) cannot be executed. After rule A is executed and the node pool status becomes normal, rule B will not be executed.

            -
          • If rules based on the CPU allocation rate and memory allocation rate are configured, the policy detection period varies with the processing logic of each loop of the autoscaler add-on. Scale-out is triggered once the conditions are met, but it is constrained by other factors such as the cool-down interval and node pool status.
          -
          -
        • Action: Set an action to be performed when the trigger condition is met.
        -
      • Periodic:
        • Triggered At: You can select a specific time point every day, every week, every month, or every year.
        • Action: Set an action to be performed when the Triggered At value is reached.
        -
      -

      You can click Add Rule again to add more node scaling policies. You can add a maximum of one CPU usage-based rule and one memory usage-based rule. The total number of rules cannot exceed 10.

      -
    -

  5. After the configuration is complete, click Create. If the system displays a message indicating that the request to create a node scaling policy is submitted successfully, click Back to Node Scaling Policy List.
  6. On the Node Scaling tab page, you can view the created node scaling policy.
-
-

Constraints on Scale-in

CCE cannot trigger scale-in by using node scaling policies. You can set a scale-in policy when installing the autoscaler add-on.

-

Node scale-in can be triggered only by the resource allocation rate. When CPU and memory allocation rates in a cluster are lower than the specified thresholds (set when the autoscaler add-on is installed or modified), scale-in is triggered for nodes in the node pool (this function can be disabled).

-
-

Example YAML File

The following is a YAML example of a node scaling policy:

-
apiVersion: autoscaling.cce.io/v1alpha1
-kind: HorizontalNodeAutoscaler
-metadata:
-  creationTimestamp: "2020-02-13T12:47:49Z"
-  generation: 1
-  name: xxxx
-  namespace: kube-system
-  resourceVersion: "11433270"
-  selfLink: /apis/autoscaling.cce.io/v1alpha1/namespaces/kube-system/horizontalnodeautoscalers/xxxx
-  uid: c2bd1e1d-60aa-47b5-938c-6bf3fadbe91f
-spec:
-  disable: false
-  rules:
-  - action:
-      type: ScaleUp
-      unit: Node
-      value: 1
-    cronTrigger:
-      schedule: 47 20 * * *
-    disable: false
-    ruleName: cronrule
-    type: Cron
-  - action:
-      type: ScaleUp
-      unit: Node
-      value: 2
-    disable: false
-    metricTrigger:
-      metricName: Cpu
-      metricOperation: '>'
-      metricValue: "40"
-      unit: Percent
-    ruleName: metricrule
-    type: Metric
-  targetNodepoolIds:
-  - 7d48eca7-3419-11ea-bc29-0255ac1001a8
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Key parameters

Parameter

-

Type

-

Description

-

spec.disable

-

Bool

-

Whether to enable the scaling policy. This parameter takes effect for all rules in the policy.

-

spec.rules

-

Array

-

All rules in a scaling policy.

-

spec.rules[x].ruleName

-

String

-

Rule name.

-

spec.rules[x].type

-

String

-

Rule type. Currently, Cron and Metric are supported.

-

spec.rules[x].disable

-

Bool

-

Rule switch. Currently, only false is supported.

-

spec.rules[x].action.type

-

String

-

Rule action type. Currently, only ScaleUp is supported.

-

spec.rules[x].action.unit

-

String

-

Rule action unit. Currently, only Node is supported.

-

spec.rules[x].action.value

-

Integer

-

Rule action value.

-

spec.rules[x].cronTrigger

-

/

-

Optional. This parameter is valid only in periodic rules.

-

spec.rules[x].cronTrigger.schedule

-

String

-

Cron expression of a periodic rule.

-

spec.rules[x].metricTrigger

-

/

-

Optional. This parameter is valid only in metric-based rules.

-

spec.rules[x].metricTrigger.metricName

-

String

-

Metric of a metric-based rule. Currently, Cpu and Memory are supported.

-

spec.rules[x].metricTrigger.metricOperation

-

String

-

Comparison operator of a metric-based rule. Currently, only > is supported.

-

spec.rules[x].metricTrigger.metricValue

-

String

-

Metric threshold of a metric-based rule. The value can be any integer from 1 to 100 and must be a character string.

-

spec.rules[x].metricTrigger.Unit

-

String

-

Unit of the metric-based rule threshold. Currently, only % is supported.

-

spec.targetNodepoolIds

-

Array

-

All node pools associated with the scaling policy.

-

spec.targetNodepoolIds[x]

-

String

-

ID of the node pool associated with the scaling policy.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0210.html b/docs/cce/umn/cce_01_0210.html deleted file mode 100644 index b5cfa4ec..00000000 --- a/docs/cce/umn/cce_01_0210.html +++ /dev/null @@ -1,60 +0,0 @@ - - -

Migrating Services Across Clusters of Different Versions

-

Application Scenarios

This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.

-

This operation is applicable when a cross-version cluster upgrade is required (for example, upgrade from v1.7.* or v1.9.* to 1.17.*) and new clusters can be created for service migration.

-
-

Prerequisites

-
- - - - - - - - - - - - - - - - - - - -
Table 1 Checklist before migration

Category

-

Description

-

Cluster

-

NodeIP-related: Check whether node IP addresses (including EIPs) of the cluster before the migration have been used in other configurations or whitelists.

-

Workloads

-

Record the number of workloads for post-migration check.

-

Storage

-
  1. Check whether the storage resources in use are provisioned by the cloud or by your organization.
  2. Change the automatically created storage to the existing storage in the new cluster.
-

Network

-
  1. Pay special attention to the ELB and ingress.
  2. Clusters of an earlier version support only the classic load balancer. To migrate services to a new cluster, you need to change load balancer type to shared load balancer. Then, the corresponding ELB service will be re-established.
-

O&M

-

Private configuration: Check whether kernel parameters or system data have been configured on nodes in the cluster.

-
-
-
-

Procedure

  1. Create a CCE cluster.

    Create a cluster with the same specifications and configurations as the cluster of the earlier version. For details, see Creating a CCE Cluster.

    -

  2. Add a node.

    Add nodes with the same specifications and manual configuration items. For details, see Creating a Node.

    -

  3. Create a storage volume in the new cluster.

    Use an existing storage volume to create a PVC in the new cluster. The PVC name remains unchanged. For details, see PersistentVolumeClaims (PVCs).

    -

    Storage switching supports only OBS buckets, SFS file systems, and shared EVS disks. If a non-shared EVS disk is used, you need to suspend the workloads in the old cluster to switch the storage resources. As a result, services will be interrupted.

    -
    -

  4. Create a workload in the new cluster.

    The workload name and specifications remain unchanged. For details about how to create a workload, see Creating a Deployment or Creating a StatefulSet. For details about how to mount a storage volume to the workload, see Creating a Pod Mounted with an EVS Volume.

    -

  5. Create a Service in the new cluster.

    The Service name and specifications remain unchanged. For details about how to create a Service, see Services.

    -

  6. Commission services.

    After all resources are created, commission the containerized services. If the commissioning is successful, migrate the services to the new cluster.

    -

  7. Delete the old cluster.

    When all functions of the new cluster are stable, delete the old cluster. For details about how to delete a cluster, see Deleting a Cluster.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0211.html b/docs/cce/umn/cce_01_0211.html deleted file mode 100644 index ea44b5ae..00000000 --- a/docs/cce/umn/cce_01_0211.html +++ /dev/null @@ -1,67 +0,0 @@ - - -

Snapshots and Backups

-

CCE works with EVS to support snapshots. A snapshot is a complete copy or image of EVS disk data at a certain point of time, which can be used for data DR.

-

You can create snapshots to rapidly save the disk data at specified time points. In addition, you can use snapshots to create new disks so that the created disks will contain the snapshot data in the beginning.

-

Precautions

  • The snapshot function is available only for clusters of v1.15 or later and requires the CSI-based everest add-on.
  • The subtype (common I/O, high I/O, or ultra-high I/O), disk mode (SCSI or VBD), data encryption, sharing status, and capacity of an EVS disk created from a snapshot must be the same as those of the disk associated with the snapshot. These attributes cannot be modified after being queried or set.
  • Snapshots can be created only for available or in-use CSI disks. During the free trial, you can create up to 7 snapshots per disk.
  • Snapshot data of encrypted disks is stored encrypted, and that of non-encrypted disks is stored non-encrypted.
-
-

Application Scenario

The snapshot feature helps address your following needs:

-
  • Routine data backup

    You can create snapshots for EVS disks regularly and use snapshots to recover your data in case that data loss or data inconsistency occurred due to misoperations, viruses, or attacks.

    -
  • Rapid data restoration

    You can create a snapshot or multiple snapshots before an OS change, application software upgrade, or a service data migration. If an exception occurs during the upgrade or migration, service data can be rapidly restored to the time point when the snapshot was created.

    -
    For example, a fault occurred on system disk A of ECS A, and therefore ECS A cannot be started. Because system disk A is already faulty, the data on system disk A cannot be restored by rolling back snapshots. In this case, you can use an existing snapshot of system disk A to create EVS disk B and attach it to ECS B that is running properly. Then, ECS B can read data from system disk A using EVS disk B.

    The snapshot capability provided by CCE is the same as the CSI snapshot function provided by the Kubernetes community. EVS disks can be created only based on snapshots, and snapshots cannot be rolled back to source EVS disks.

    -
    -
    -
  • Rapid deployment of multiple services

    You can use a snapshot to create multiple EVS disks containing the same initial data, and these disks can be used as data resources for various services, for example, data mining, report query, and development and testing. This method protects the initial data and creates disks rapidly, meeting the diversified service data requirements.

    -
-
-

Creating a Snapshot

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the Snapshots and Backups tab.
  3. Click Create Snapshot in the upper right corner. In the dialog box displayed, set related parameters.

    • Snapshot Name: Enter a snapshot name.
    • Storage: Select the PVC for which you want to create a snapshot.
    -

  4. Click Create.
-

Using YAML

-
kind: VolumeSnapshot
-apiVersion: snapshot.storage.k8s.io/v1beta1
-metadata:
-  finalizers:
-    - snapshot.storage.kubernetes.io/volumesnapshot-as-source-protection
-    - snapshot.storage.kubernetes.io/volumesnapshot-bound-protection
-  name: cce-disksnap-test
-  namespace: default
-spec:
-  source:
-    persistentVolumeClaimName: pvc-evs-test     # PVC name. Only an EVS PVC can be created.
-  volumeSnapshotClassName: csi-disk-snapclass
-
-

Using a Snapshot to Creating a PVC

The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Mode: Select Snapshot.
    • PVC Name: name of a PVC.
    • Snapshot: Select the snapshot to be used.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: '10'
-  storageClassName: csi-disk
-  dataSource:
-    name: cce-disksnap-test             # Snapshot name
-    kind: VolumeSnapshot
-    apiGroup: snapshot.storage.k8s.io
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0212.html b/docs/cce/umn/cce_01_0212.html deleted file mode 100644 index 19db47f2..00000000 --- a/docs/cce/umn/cce_01_0212.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Deleting a Cluster

-

Scenario

This section describes how to delete a cluster.

-
-

Precautions

  • Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be restored. Before performing this operation, ensure that data has been backed up or migrated. Deleted data cannot be restored.
    Resources that are not created in CCE will not be deleted:
    • Accepted nodes (only the nodes created in CCE are deleted);
    • ELB load balancers associated with Services and ingresses (only the automatically created load balancers are deleted);
    • Manually created cloud storage resources associated with PVs or imported cloud storage resources (only the cloud storage resources automatically created by PVCs are deleted)
    -
    -
  • A hibernated cluster cannot be deleted. Wake up the cluster and try again.
  • If a cluster whose status is Unavailable is deleted, some storage resources of the cluster may need to be manually deleted.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Delete.
  3. Delete the cluster.

    Figure 1 Deleting a cluster
    -

  4. Click Yes to start deleting the cluster.

    The delete operation takes 1 to 3 minutes to complete.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0213.html b/docs/cce/umn/cce_01_0213.html deleted file mode 100644 index 90563979..00000000 --- a/docs/cce/umn/cce_01_0213.html +++ /dev/null @@ -1,196 +0,0 @@ - - -

Configuring Kubernetes Parameters

-

Scenario

CCE clusters allow you to manage Kubernetes parameters, through which you can let core components work under your very requirements.

-
-

Notes and Constraints

This function is supported only in clusters of v1.15 and later. It is not displayed for versions earlier than v1.15.

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Configuration.
  3. On the Configuration page on the right, change the values of the following Kubernetes parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Kubernetes parameters

    Component

    -

    Parameter

    -

    Description

    -

    Value

    -

    kube-apiserver

    -

    default-not-ready-toleration-seconds

    -

    notReady tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

    -

    Default: 300

    -

    default-unreachable-toleration-seconds

    -

    unreachable tolerance time, in seconds. NoExecute that is added by default to every pod that does not already have such a toleration.

    -

    Default: 300

    -

    max-mutating-requests-inflight

    -

    Maximum number of concurrent mutating requests. When the value of this parameter is exceeded, the server rejects requests.

    -

    The value 0 indicates no limitation.

    -

    Manual configuration is no longer supported since cluster version 1.21. The value is automatically specified based on the cluster scale.

    -
    • 200 for clusters with 50 or 200 nodes
    • 500 for clusters with 1000 nodes
    • 1000 for clusters with 2000 nodes
    -

    Default: 1000

    -

    max-requests-inflight

    -

    Maximum number of concurrent non-mutating requests. When the value of this parameter is exceeded, the server rejects requests.

    -

    The value 0 indicates no limitation.

    -

    Manual configuration is no longer supported since cluster version 1.21. The value is automatically specified based on the cluster scale.

    -
    • 400 for clusters with 50 or 200 nodes
    • 1000 for clusters with 1000 nodes
    • 2000 for clusters with 2000 nodes
    -

    Default: 2000

    -

    service-node-port-range

    -

    Range of node port numbers.

    -

    Default:

    -

    30000-32767

    -

    Options:

    -

    min>20105

    -

    max<32768

    -

    kube-controller-manager

    -

    -

    concurrent-deployment-syncs

    -

    Number of Deployments that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-endpoint-syncs

    -

    Number of endpoints that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-gc-syncs

    -

    Number of garbage collector workers that are allowed to synchronize concurrently.

    -

    Default: 20

    -

    concurrent-job-syncs

    -

    Number of jobs that can be synchronized at the same time.

    -

    Default: 5

    -

    concurrent-namespace-syncs

    -

    Number of namespaces that are allowed to synchronize concurrently.

    -

    Default: 10

    -

    concurrent-replicaset-syncs

    -

    Number of ReplicaSets that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-resource-quota-syncs

    -

    Number of resource quotas that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-service-syncs

    -

    Number of Services that are allowed to synchronize concurrently.

    -

    Default: 10

    -

    concurrent-serviceaccount-token-syncs

    -

    Number of service account tokens that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent-ttl-after-finished-syncs

    -

    Number of TTL-after-finished controller workers that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    concurrent_rc_syncs

    -

    Number of replication controllers that are allowed to synchronize concurrently.

    -

    Default: 5

    -

    horizontal-pod-autoscaler-sync-period

    -

    How often HPA audits metrics in a cluster.

    -

    Default: 15 seconds

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-scheduler

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    Default: 100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    Default: 100

    -
    -
    -

  4. Click OK.
-
- -
-
- -
- diff --git a/docs/cce/umn/cce_01_0214.html b/docs/cce/umn/cce_01_0214.html deleted file mode 100644 index fa37a1e8..00000000 --- a/docs/cce/umn/cce_01_0214.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Hibernating and Waking Up a Cluster

-

Scenario

If you do not need to use a cluster temporarily, you are advised to hibernate the cluster to save cluster management costs.

-

After a cluster is hibernated, resources such as workloads cannot be created or managed in the cluster.

-

A hibernated cluster can be quickly woken up and used normally.

-
-

Hibernating a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Hibernate for the target cluster.
  3. In the dialog box displayed, check the precautions and click Yes. Wait until the cluster is hibernated.

    • After a cluster is hibernated, resources, such as worker nodes (ECSs), bound EIPs, and bandwidth, are still billed based on their own billing modes. To shut down nodes, select Stop all nodes in the cluster in the dialog box or see Stopping a Node.
    -
    -

  4. When the cluster status changes from Hibernating to Hibernation, the cluster is hibernated.
-
-

Waking Up a Cluster

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters.
  2. Choose More > Wake.
  3. In the dialog box displayed, click Yes and wait until the cluster is woken up.
  4. When the cluster status changes from Waking to Available, the cluster is woken up.

    After the cluster is woken up, billing will be resumed for the resources on the master node.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0215.html b/docs/cce/umn/cce_01_0215.html deleted file mode 100644 index b8a84ec8..00000000 --- a/docs/cce/umn/cce_01_0215.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Upgrading a Cluster

-
- - diff --git a/docs/cce/umn/cce_01_0216.html b/docs/cce/umn/cce_01_0216.html deleted file mode 100644 index 4c551437..00000000 --- a/docs/cce/umn/cce_01_0216.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Creating a DaemonSet

-

Scenario

CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, monitoring, scaling, upgrade, uninstall, service discovery, and load balancing.

-

DaemonSet ensures that only one pod runs on all or some nodes. When a node is added to a cluster, a new pod is also added for the node. When a node is removed from a cluster, the pod is also reclaimed. If a DaemonSet is deleted, all pods created by it will be deleted.

-

The typical application scenarios of a DaemonSet are as follows:

-
  • Run the cluster storage daemon, such as glusterd or Ceph, on each node.
  • Run the log collection daemon, such as Fluentd or Logstash, on each node.
  • Run the monitoring daemon, such as Prometheus Node Exporter, collectd, Datadog agent, New Relic agent, or Ganglia (gmond), on each node.
-

You can deploy a DaemonSet for each type of daemons on all nodes, or deploy multiple DaemonSets for the same type of daemons. In the second case, DaemonSets have different flags and different requirements on memory and CPU for different hardware types.

-
-

Prerequisites

You must have one cluster available before creating a DaemonSet. For details on how to create a cluster, see Creating a CCE Cluster.

- -
-

Procedure

  1. Log in to the CCE console.
  2. In the navigation pane on the left, choose Workloads > DaemonSets. Click Create DaemonSet in the upper right corner of the page. Set basic workload parameters as described in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Basic workload parameters

    Parameter

    -

    Description

    -

    * Workload Name

    -

    Name of the containerized workload to be created. The name must be unique.

    -

    Enter 4 to 63 characters starting with a letter and ending with a letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    * Cluster Name

    -

    Cluster to which the workload belongs.

    -

    * Namespace

    -

    In a single cluster, data in different namespaces is isolated from each other. This enables applications to share the services of the same cluster without interfering each other. If no namespace is set, the default namespace is used.

    -

    Time Zone Synchronization

    -

    If this parameter is enabled, the container and the node use the same time zone.

    -
    NOTICE:

    After time zone synchronization is enabled, disks of the hostPath type will be automatically added and listed in the Data Storage > Local Volume area. Do not modify or delete the disks.

    -
    -

    Description

    -

    Description of the workload.

    -
    -
    -

  3. Click Next: Add Container.

    1. Click Add Container and select the image to be deployed.
      • My Images: Create a workload using an image in the image repository you created.
      • Third-Party Images: Create a workload using an image from any third-party image repository. When you create a workload using a third-party image, ensure that the node where the workload is running can access public networks. For details on how to create a workload using a third-party image, see Using a Third-Party Image.
        • If your image repository does not require authentication, set Secret Authentication to No, enter an image pull address, and then click OK.
        • If your image repository must be authenticated (account and password), you need to create a secret and then use a third-party image. For details, see Using a Third-Party Image.
        -
      • Shared Images: Create a workload using an image shared by another tenant through the SWR service.
      -
    2. Configure basic image information.

      A workload is an abstract model of a group of pods. One pod can encapsulate one or more containers. You can click Add Container in the upper right corner to add multiple container images and set them separately.

      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 2 Image parameters

      Parameter

      -

      Description

      -

      Image Name

      -

      Name of the image. You can click Change Image to update it.

      -

      *Image Version

      -

      Select the image tag to be deployed.

      -

      *Container Name

      -

      Name of the container. You can modify it.

      -

      Privileged Container

      -

      Programs in a privileged container have certain privileges.

      -

      If Privileged Container is On, the container is granted superuser permissions. For example, privileged containers can manipulate network devices on the host machine and modify kernel parameters.

      -

      Container Resources

      -

      CPU

      -
      • Request: minimum number of CPU cores required by a container. The default value is 0.25 cores.
      • Limit: maximum number of CPU cores available for a container. Do not leave Limit unspecified. Otherwise, intensive use of container resources will occur and your workload may exhibit unexpected behavior.
      -

      Memory

      -
      • Request: minimum amount of memory required by a container. The default value is 512 MiB.
      • Limit: maximum amount of memory available for a container. When memory usage exceeds the specified memory limit, the container will be terminated.
      -

      For more information about Request and Limit, see Setting Container Specifications.

      -

      GPU: configurable only when the cluster contains GPU nodes.

      -

      It indicates the percentage of GPU resources reserved for a container. Select Use and set the percentage. For example, if this parameter is set to 10%, the container is allowed to use 10% of GPU resources. If you do not select Use or set this parameter to 0, no GPU resources can be used.

      -

      GPU/Graphics Card: The workload's pods will be scheduled to the node with the specified GPU.

      -

      If Any GPU type is selected, the container uses a random GPU in the node. If you select a specific GPU, the container uses this GPU accordingly.

      -
      -
      -
    3. Lifecycle: Commands for starting and running containers can be set. -
    4. Health Check: CCE provides two types of probes: liveness probe and readiness probe. They are used to determine whether containers and user services are running properly. For more information, see Setting Health Check for a Container.
      • Liveness Probe: used to restart the unhealthy container.
      • Readiness Probe: used to change the container to the unready state when detecting that the container is unhealthy. In this way, service traffic will not be directed to the container.
      -
    5. Environment Variables: Environment variables can be added to a container. In general, environment variables are used to set parameters.
      On the Environment Variables tab page, click Add Environment Variable. Currently, three types of environment variables are supported:
      • Added manually: Set Variable Name and Variable Value/Reference.
      • Added from Secret: Set Variable Name and select the desired secret name and data. A secret must be created in advance. For details, see Creating a Secret.
      • Added from ConfigMap: Set Variable Name and select the desired ConfigMap name and data. A ConfigMap must be created in advance. For details, see Creating a ConfigMap.

        To edit an environment variable that has been set, click Edit. To delete an environment variable that has been set, click Delete.

        -
        -
      -
      -
    6. Data Storage: Data storage can be mounted to containers for persistent storage and high disk I/O. Local volume and cloud storage are supported. For details, see Storage (CSI).

      Currently, cloud storage cannot be mounted to secure (Kata) containers in a CCE Turbo cluster.

      -
      -
    7. Security Context: Container permissions can be configured to protect CCE and other containers from being affected.

      Enter the user ID to set container permissions and prevent systems and other containers from being affected.

      -
    8. Log Policies: Log collection policies and log directory can be configured to collect container logs for unified management and analysis. For details, see Container Logs.
    -

  4. Click Next: Set Application Access. Then, click Add Service and set the workload access type.

    If your workload will be reachable to other workloads or public networks, add a Service to define the workload access type.

    -

    The workload access type determines the network attributes of the workload. Workloads with different access types can provide different network capabilities. For details, see Overview.

    -

  5. Click Next: Configure Advanced Settings to configure advanced policies.

    • Upgrade Policy:
      • Upgrade Mode: Only Rolling upgrade is supported. During a rolling upgrade, old pods are gradually replaced with new ones. During the upgrade, service traffic is evenly distributed to both pods to ensure service continuity.
      • Maximum Number of Unavailable Pods: Maximum number of unavailable pods allowed in a rolling upgrade. If the number is equal to the total number of pods, services may be interrupted. Minimum number of alive pods = Total pods – Maximum number of unavailable pods
      -
    • Graceful Deletion:

      Graceful Time Window: Enter the time. The graceful scale-in policy provides a time window for workload deletion and is reserved for executing commands in the PreStop phase in the lifecycle. If workload processes are not terminated after the time window elapses, the workload will be forcibly deleted.

      -
    • Scheduling Policies: You can combine static global scheduling policies or dynamic runtime scheduling policies as required. For details, see Scheduling Policy Overview.
    • Advanced Pod Settings
      • Pod Label: The built-in app label is specified when the workload is created. It is used to set affinity and anti-affinity scheduling and cannot be modified. You can click Add Label to add labels.
      -
      Figure 1 Advanced pod settings
      -
    • Client DNS Configuration: A CCE cluster has a built-in DNS add-on (CoreDNS) to provide domain name resolution for workloads in the cluster.
      • DNS Policy
        • ClusterFirst: The default DNS configuration overrides the Nameserver and DNS Search Domain configurations of the client.
        • None: Only the Nameserver and DNS Search Domain configurations are used for domain name resolution.
        • Default: The pod inherits the DNS configuration from the node on which the pod runs.
        -
      • Nameserver: You can configure a domain name server for a user-defined domain name. The value is one or a group of DNS IP addresses, for example, 1.2.3.4.
      • DNS Search Domain: a search list for host-name lookup. When a domain name cannot be resolved, DNS queries will be attempted combining the domain name with each domain in the search list in turn until a match is found or all domains in the search list are tried.
      • Timeout (s): amount of time the resolver will wait for a response from a remote name server before retrying the query on a different name server. Set it based on the site requirements.
      • ndots: threshold for the number of dots that must appear in a domain name before an initial absolute query will be made. If a domain name has ndots or more than ndots dots, the name is a fully qualified domain name (FQDN) and will be tried first as an absolute name. If a domain name has less than ndots dots, the operating system will look up the name in a list of search domain names.
      -
    -

  6. After the preceding configurations are complete, click Create. On the page displayed, click Return to Workload List to view the workload status.

    If the workload is in the Running state, it has been successfully created.

    -

    Workload status is not updated in real time. Click in the upper right corner or press F5 to refresh the page.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0220.html b/docs/cce/umn/cce_01_0220.html deleted file mode 100644 index 7c37b420..00000000 --- a/docs/cce/umn/cce_01_0220.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Workload Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Inter-Pod Affinity and Anti-affinity > Affinity with Pods > Add.
  2. Select the workloads that will be co-located with the current workload on the same node, and click OK.

    The workload to be created will be deployed on the same node as the selected affinity workloads.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-workload affinity. The following is an example YAML file for workload-workload affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: app          #workload's label key
-                operator: In        
-                values:
-                - test     #workload's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Workload and select the workloads to be deployed on the same node as the created workload. The created workload and the selected workloads will be deployed on the same node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0222.html b/docs/cce/umn/cce_01_0222.html deleted file mode 100644 index 37001a83..00000000 --- a/docs/cce/umn/cce_01_0222.html +++ /dev/null @@ -1,214 +0,0 @@ - - -

Managing a Node Pool

-

Notes and Constraints

The default node pool DefaultPool does not support the following management operations.

-
-

Configuring Kubernetes Parameters

CCE allows you to highly customize Kubernetes parameter settings on core components in a cluster. For more information, see kubelet.

-

This function is supported only in clusters of v1.15 and later. It is not displayed for clusters earlier than v1.15.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click Configuration next to the node pool name.
  4. On the Configuration page on the right, change the values of the following Kubernetes parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Kubernetes parameters

    Component

    -

    Parameter

    -

    Description

    -

    Default Value

    -

    Remarks

    -

    docker

    -

    -

    native-umask

    -

    `--exec-opt native.umask

    -

    normal

    -

    Cannot be changed.

    -

    docker-base-size

    -

    `--storage-opts dm.basesize

    -

    10G

    -

    Cannot be changed.

    -

    insecure-registry

    -

    Address of an insecure image registry

    -

    false

    -

    Cannot be changed.

    -

    limitcore

    -

    Limit on the number of cores

    -

    5368709120

    -

    -

    -

    default-ulimit-nofile

    -

    Limit on the number of handles in a container

    -

    {soft}:{hard}

    -

    -

    -

    kube-proxy

    -

    conntrack-min

    -

    sysctl -w net.nf_conntrack_max

    -

    131072

    -

    The values can be modified during the node pool lifecycle.

    -

    conntrack-tcp-timeout-close-wait

    -

    sysctl -w net.netfilter.nf_conntrack_tcp_timeout_close_wait

    -

    1h0m0s

    -

    kubelet

    -

    cpu-manager-policy

    -

    `--cpu-manager-policy

    -

    none

    -

    The values can be modified during the node pool lifecycle.

    -

    kube-api-qps

    -

    Query per second (QPS) to use while talking with kube-apiserver.

    -

    100

    -

    kube-api-burst

    -

    Burst to use while talking with kube-apiserver.

    -

    100

    -

    max-pods

    -

    Maximum number of pods managed by kubelet.

    -

    110

    -

    pod-pids-limit

    -

    PID limit in Kubernetes

    -

    -1

    -

    with-local-dns

    -

    Whether to use the local IP address as the ClusterDNS of the node.

    -

    false

    -

    allowed-unsafe-sysctls

    -

    Insecure system configuration allowed.

    -

    Starting from v1.17.17, CCE enables pod security policies for kube-apiserver. You need to add corresponding configurations to allowedUnsafeSysctls of a pod security policy to make the policy take effect. (This configuration is not required for clusters earlier than v1.17.17.) For details, see Example of Enabling Unsafe Sysctls in Pod Security Policy.

    -

    []

    -
    -
    -

  5. Click OK.
-
-

Editing a Node Pool

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click Edit next to the name of the node pool you will edit. In the Edit Node Pool dialog box, edit the following parameters:

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Node pool parameters

    Parameter

    -

    Description

    -

    Name

    -

    Name of the node pool.

    -

    Nodes

    -

    Modify the number of nodes based on service requirements.

    -

    Autoscaler

    -

    By default, autoscaler is disabled.

    -

    After you enable autoscaler by clicking , nodes in the node pool are automatically created or deleted based on service requirements.

    -
    • Maximum Nodes and Minimum Nodes: You can set the maximum and minimum number of nodes to ensure that the number of nodes to be scaled is within a proper range.
    • Priority: A larger value indicates a higher priority. For example, if this parameter is set to 1 and 4 respectively for node pools A and B, B has a higher priority than A, and auto scaling is first triggered for B. If the priorities of multiple node pools are set to the same value, for example, 2, the node pools are not prioritized and the system performs scaling based on the minimum resource waste principle.
    -

    If the Autoscaler field is set to on, install the autoscaler add-on to use the autoscaler feature.

    -

    Taints

    -
    • This field is left blank by default. Taints allow nodes to repel a set of pods. You can add a maximum of 10 taints for each node pool. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      -
      NOTICE:

      If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.

      -
      -
    -

    K8S Labels

    -

    K8S labels are key/value pairs that are attached to objects, such as pods. Labels are used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system. For more information, see Labels and Selectors.

    -

    Resource Tags

    -

    It is recommended that you use TMS's predefined tag function to add the same tag to different cloud resources.

    -

    Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and migration efficiency.

    -

    Tag changes do not affect the node.

    -
    -
    -

  4. After the configuration is complete, click Save.

    In the node pool list, the node pool status becomes Scaling. After the status changes to Completed, the node pool parameters are modified successfully. The modified configuration will be synchronized to all nodes in the node pool.

    -

-
-

Deleting a Node Pool

Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools. If pods in the node pool have a specific node selector and none of the other nodes in the cluster satisfies the node selector, the pods will become unschedulable.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Choose More > Delete next to a node pool name to delete the node pool.
  4. Read the precautions in the Delete Node Pool dialog box.
  5. Enter DELETE in the text box and click Yes to confirm that you want to continue the deletion.
-
-

Copying a Node Pool

You can copy the configuration of an existing node pool to create a new node pool on the CCE console.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Choose More > Copy next to a node pool name to copy the node pool.
  4. The configuration of the selected node pool is replicated to the Create Node Pool page. You can edit the configuration as required and click Next: Confirm.
  5. On the Confirm page, confirm the node pool configuration and click Create Now. Then, a new node pool is created based on the edited configuration.
-
-

Migrating a Node

Nodes in a node pool can be migrated. Currently, nodes in a node pool can be migrated only to the default node pool (defaultpool) in the same cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
  2. In the upper right corner of the displayed page, select a cluster to filter node pools by cluster.
  3. Click More > Migrate next to the name of the node pool.
  4. In the dialog box displayed, select the destination node pool and the node to be migrated.

    After node migration, original resource tags, Kubernetes labels, and taints will be retained, and new Kubernetes labels and taints from the destination node pool will be added.

    -
    -

  5. Click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0225.html b/docs/cce/umn/cce_01_0225.html deleted file mode 100644 index b1ba45e5..00000000 --- a/docs/cce/umn/cce_01_0225.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Node Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Workload-Node Affinity and Anti-affinity > Affinity with Nodes > Add.
  2. Select the node on which you want to deploy the workload, and click OK.

    If you select multiple nodes, the system automatically chooses one of them during workload deployment.

    -

-
-

Using kubectl

This section uses an Nginx workload as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-node affinity. The following is an example YAML file for workload-node affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: nodeName          #node's label key
-                operator: In
-                values:
-                - test-node-1          #node's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Node and select the node where the workload is to be deployed. The workload will be deployed on the selected node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0226.html b/docs/cce/umn/cce_01_0226.html deleted file mode 100644 index ec597e04..00000000 --- a/docs/cce/umn/cce_01_0226.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Node Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Workload-Node Affinity and Anti-affinity > Anti-affinity with Nodes > Add.
  2. Select the node on which the workload is ineligible to be deployed, and click OK.

    If you select multiple nodes, the workload will not be deployed on these nodes.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-node affinity. The following is an example YAML file for workload-node anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: nodeName          #node's label key
-                operator: NotIn        #Indicates that the workload will not be deployed on the node.
-                values:
-                - test-node-1          #node's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Node and select the node on which the workload is ineligible to be deployed. The workload will be constrained from being deployed on the selected node.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0227.html b/docs/cce/umn/cce_01_0227.html deleted file mode 100644 index 47e3e3ef..00000000 --- a/docs/cce/umn/cce_01_0227.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-Workload Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, choose Inter-Pod Affinity and Anti-affinity > Anti-affinity with Pods > Add.
  2. Select the workloads to which you want to deploy the target workload on a different node, and click OK.

    The workload to be created and the selected workloads will be deployed on different nodes.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-workload anti-affinity. The following is an example YAML file for workload-workload anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: app          #workload's label key
-                operator: NotIn        
-                values:
-                - test     #workload's label value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Workload and select the workloads to be deployed on a different node from the created workload. The created workload and the selected workloads will be deployed on different nodes.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0228.html b/docs/cce/umn/cce_01_0228.html deleted file mode 100644 index 4a855538..00000000 --- a/docs/cce/umn/cce_01_0228.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-AZ Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, click next to Workload-AZ Affinity and Anti-affinity > Affinity with AZs.
  2. Select the AZ in which you want to deploy the workload.

    The created workload will be deployed in the selected AZ.

    -

-
-

Using kubectl

This section uses an Nginx workload as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-AZ affinity. The following is an example YAML file for workload-AZ affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: az-in-deployment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: az-in-deployment
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: az-in-deployment
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: failure-domain.beta.kubernetes.io/zone #node's label key
-                operator: In        
-                values:
-                - az1                              #node's key value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Affinity Object.
  3. Set Object Type to Availability Zone, and select the AZ in which the workload is eligible to be deployed. The workload will be deployed in the selected AZ.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0229.html b/docs/cce/umn/cce_01_0229.html deleted file mode 100644 index 139c19d9..00000000 --- a/docs/cce/umn/cce_01_0229.html +++ /dev/null @@ -1,54 +0,0 @@ - - -

Workload-AZ Anti-Affinity

-

Using the CCE Console

  1. When Creating a Deployment or Creating a StatefulSet, in the Scheduling Policies area on the Configure Advanced Settings page, click next to Workload-AZ Affinity and Anti-affinity > Anti-affinity with AZs.
  2. Select an AZ in which the workload is ineligible to be deployed.

    The created workload is not deployed on the selected AZ.

    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to create a workload using kubectl.

-

Prerequisites

-

The ECS where the kubectl client runs has been connected to your cluster. For details, see Connecting to a Cluster Using kubectl.

-

Procedure

-

When using kubectl to create a Deployment or using kubectl to create a StatefulSet, configure workload-AZ anti-affinity. The following is an example YAML file for workload-AZ anti-affinity.

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  strategy:
-    type: RollingUpdate
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: failure-domain.beta.kubernetes.io/zone       #node's label key   
-                operator: NotIn        
-                values:
-                - az1                                   #node's key value
-
-

Setting the Object Type After Creating a Workload

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click the name of the workload for which you will add a scheduling policy. On the workload details page, choose Scheduling Policies > Add Simple Scheduling Policy > Add Anti-affinity Object.
  3. Set Object Type to Availability Zone and select the AZ in which the workload is ineligible to be deployed. The workload will be constrained from being deployed in the selected AZ.

    This method can be used to add, edit, or delete scheduling policies.

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0230.html b/docs/cce/umn/cce_01_0230.html deleted file mode 100644 index 859c94f5..00000000 --- a/docs/cce/umn/cce_01_0230.html +++ /dev/null @@ -1,25 +0,0 @@ - - -

Simple Scheduling Policies

-
- - diff --git a/docs/cce/umn/cce_01_0231.html b/docs/cce/umn/cce_01_0231.html deleted file mode 100644 index 81fe1dbf..00000000 --- a/docs/cce/umn/cce_01_0231.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Custom Scheduling Policies

-
- - diff --git a/docs/cce/umn/cce_01_0232.html b/docs/cce/umn/cce_01_0232.html deleted file mode 100644 index d6ce9c4e..00000000 --- a/docs/cce/umn/cce_01_0232.html +++ /dev/null @@ -1,111 +0,0 @@ - - -

Node Affinity

-

Using the CCE Console

  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Node Affinity area, you can specify node labels to meet required or preferred rules in scheduling.

    -

    - - - - - - - - - - -
    Table 1 Node affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple required rules. A pod will be scheduled on a node that meets any of the rules configured.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Label

    -

    Node label. You can use the default label or customize a label.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, DoesNotExist, Gt, and Lt

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value. If you set the operator to Gt or Lt for a label, the label value must be greater than or less than a certain integer.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Node affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure node affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Label to kubernetes.io/hostname, add affinity nodes, and set the operator to In. Then, click OK.

-

YAML file of the workload with node affinity:

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-              - matchExpressions:
-                  - key: kubernetes.io/hostname
-                    operator: In
-                    values:
-                     - 192.168.6.174
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0233.html b/docs/cce/umn/cce_01_0233.html deleted file mode 100644 index 866f8ba1..00000000 --- a/docs/cce/umn/cce_01_0233.html +++ /dev/null @@ -1,133 +0,0 @@ - - -

Workload Affinity

-

Using the CCE Console

Workload affinity determines the pods as which the target workload will be deployed in the same topology domain.

-
  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Pod Affinity area, set the namespace, topology key, and the label requirements to be met.

    There are two types of pod affinity rules: Required (hard rule) and Preferred (soft rule). The label operators include In, NotIn, Exists, and DoesNotExist.

    - -
    - - - - - - - - - - -
    Table 1 Pod affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple required rules. Ensure that all the labels specified in the rules must be in the same workload. Each rule requires a namespace and topology key.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can click Add Rule to add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Namespace

    -

    By default, the namespace of the current pod is used. You can also use another namespace.

    -

    Topology Key

    -

    Key of the worker node label that the system uses to denote a topology domain in which scheduling can be performed. Default and custom node labels can be used.

    -

    Label

    -

    Label of the workload. You can customize the label name.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, and DoesNotExist

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Pod affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure pod affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Namespace to default and Topology Key to the built-in node label kubernetes.io/hostname, which means that the scheduling scope is a node. Set labels app and type and their value to redis and database, respectively. Set Operator to In and click OK.

-

The YAML of the workload with pod affinity is as follows:

-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity: {}
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                matchExpressions:
-                  - key: app
-                    operator: In
-                    values:
-                      - redis
-                  - key: type
-                    operator: In
-                    values:
-                      - database
-              namespaces:
-                - default
-              topologyKey: kubernetes.io/hostname
-
-

In this example, only when a candidate workload (for example, workload A) with both labels app=redis and type=database is found can the workload Nginx be successfully scheduled to the node of the candidate workload.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0234.html b/docs/cce/umn/cce_01_0234.html deleted file mode 100644 index 88593f42..00000000 --- a/docs/cce/umn/cce_01_0234.html +++ /dev/null @@ -1,128 +0,0 @@ - - -

Workload Anti-Affinity

-

Using the CCE Console

Workload anti-affinity determines the pods from which the target workload will be deployed in a different topology domain.

-
  1. Log in to the CCE console and choose Workloads > Deployments or Workloads > StatefulSets in the navigation pane.
  2. Click a workload name in the Deployment or StatefulSet list. On the displayed workload details page, click the Scheduling Policies tab and then click Add Custom Scheduling Policy.
  3. In the Pod Anti-Affinity area, set the namespace, topology key, and the label requirements to be met.

    There are two types of pod anti-affinity rules: Required (hard rule) and Preferred (soft rule), and the label operators include In, NotIn, Exists, and DoesNotExist.

    - -
    - - - - - - - - - - -
    Table 1 Workload anti-affinity settings

    Parameter

    -

    Description

    -

    Required

    -

    It specifies a rule that must be met in scheduling. It corresponds to requiredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can add multiple required rules. Ensure that all the labels specified in the rules must be in the same workload. Each rule requires a namespace and topology key.

    -

    Preferred

    -

    It specifies a preference in scheduling. It corresponds to preferredDuringSchedulingIgnoredDuringExecution in Kubernetes. You can add multiple preferred rules. The scheduler will try to enforce the rules but will not guarantee. If the scheduler cannot satisfy any one of the rules, the pod will still be scheduled.

    -
    -
    -

  4. Set a rule according to the following table. You can click Add Selector to configure multiple selectors for a rule.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Selector settings

    Parameter

    -

    Description

    -

    Weight

    -
    • This parameter is unavailable for a required rule.
    • Set the weight of a preferred rule. A higher weight indicates a higher priority.
    -

    Namespace

    -

    By default, the namespace of the current pod is used. You can also use another namespace.

    -

    Topology Key

    -

    Key of the worker node label that the system uses to denote a topology domain in which scheduling can be performed. Default and custom node labels can be used.

    -

    Label

    -

    Label of the workload. You can customize the label name.

    -

    Operator

    -

    The following relations are supported: In, NotIn, Exists, and DoesNotExist

    -

    Value

    -

    Tag value.

    -

    Operators In and NotIn allow one or more label values. Values are separated with colons (;). Operators Exists and DoesNotExist are used to determine whether a label exists, and do not require a label value.

    -

    Operation

    -

    You can click Delete to delete a selector.

    -

    Add Selector

    -

    A selector corresponds to matchExpressions in Kubernetes. You can click Add Selector to add multiple selectors for a scheduling rule. The rule is applied in scheduling only when all its selectors are satisfied.

    -
    -
    -
    Figure 1 Pod anti-affinity scheduling policy
    -

-
-

Using kubectl

This section uses Nginx as an example to describe how to configure pod anti-affinity.

-

Prerequisites

-

A workload that uses the nginx container image has been deployed on a node.

-

Procedure

-

Set Namespace to default and Topology Key to the built-in node label kubernetes.io/hostname, which means that the scheduling scope is a node. Set the label app and its value to redis. Set Operator to In and click OK.

-

The YAML of the workload with pod anti-affinity:

-
-
apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: nginx
-  namespace: default
-spec:
-  replicas: 2
-  selector:
-    matchLabels:
-      app: nginx 
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx 
-        imagePullPolicy: Always
-        name: nginx
-      imagePullSecrets:
-      - name: default-secret
-      affinity:
-        nodeAffinity: {}
-        podAffinity: {}
-        podAntiAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                matchExpressions:
-                  - key: app
-                    operator: In
-                    values:
-                      - redis
-              namespaces:
-                - default
-              topologyKey: kubernetes.io/hostname
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0247.html b/docs/cce/umn/cce_01_0247.html deleted file mode 100644 index 0a04fdcf..00000000 --- a/docs/cce/umn/cce_01_0247.html +++ /dev/null @@ -1,27 +0,0 @@ - - - -

Services

- -

-
- - - diff --git a/docs/cce/umn/cce_01_0248.html b/docs/cce/umn/cce_01_0248.html deleted file mode 100644 index 7fa196e8..00000000 --- a/docs/cce/umn/cce_01_0248.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Ingress

-
- - diff --git a/docs/cce/umn/cce_01_0251.html b/docs/cce/umn/cce_01_0251.html deleted file mode 100644 index 13aa656a..00000000 --- a/docs/cce/umn/cce_01_0251.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Using ELB Ingresses on the Console

-

Prerequisites

  • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
-
-

Precautions

  • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
  • After an ingress is created, upgrade and maintain the configuration of the selected load balancers on the CCE console. Do not modify the configuration on the ELB console. Otherwise, the ingress service may be abnormal.
  • The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.
-
-

Adding an ELB Ingress

This section uses an Nginx workload as an example to describe how to add an ELB ingress.

-
  1. Log in to the CCE console.
  2. In the navigation pane, choose Resource Management > Network. On the Ingresses tab page, select the corresponding cluster and namespace.
  3. Click Create Ingress to access the ingress configuration page.

    Set the ingress parameters as required. The key parameters are as follows:
    • Access Type: Use a load balancer to access Services. Requests can be forwarded only to NodePort Services.
    • Ingress Name: Specify a name of an ingress, for example, ingress-demo.
    • Cluster Name: Select the cluster to which the ingress is to be added.
    • Namespace: Select the namespace to which the ingress is to be added.
    • ELB Configuration: Ingress uses the load balancer of the ELB service to provide layer-7 network access. You can select an existing load balancer or have the system automatically create a new one. To manually create a load balancer, click Create Load Balancer and then click the refresh button.
      • It is recommended that other resources not use the load balancer automatically created by an ingress. Otherwise, the load balancer will be occupied when the ingress is deleted, resulting in residual resources.
      • Dedicated load balancers are supported only when the cluster version is 1.17 or later.
      • To interconnect with an existing dedicated load balancer, ensure that HTTP is supported and the network type supports private networks.
      -
      -

      Elastic Load Balancer: The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).

      -

      You can create public network or private network load balancers. The default value is Public network.

      -
      • Public network: After you attach an EIP to a load balancer, the load balancer can distribute requests from the Internet to backend servers.
        • Enterprise Project: Select an enterprise project in which the load balancer is created.
        • Change Configuration: When selecting Public network > Automatically created, you can click Change Configuration to modify the name, specifications, billing mode, and bandwidth of the ELB instance to be created.
        -
      • Private network: After you attach a private IP address to a load balancer, the load balancer can distribute requests from the clients in the same VPC to backends.
        • Enterprise Project: Select an enterprise project in which the load balancer is created.
        -
      -
    • Listener Configuration: Ingress configures a listener for the load balancer, which listens to requests from the load balancer and distributes traffic. After the configuration is complete, a listener is created on the load balancer. The default listener name is k8s__<Protocol type>_<Port number>, for example, k8s_HTTP_80.
      • Front-End Protocol: HTTP and HTTPS are available.
      • External Port: Port number that is open to the ELB service address. The port number can be specified randomly.
      • Server Certificate: When an HTTPS listener is created for a load balancer, you need to bind a certificate to the load balancer to support encrypted authentication for HTTPS data transmission. For details on how to create a secret, see Creating a Secret.

        If there is already an HTTPS ingress for the chosen port on the load balancer, the certificate of the new HTTPS ingress must be the same as the certificate of the existing ingress. This means that a listener has only one certificate. If two certificates, each with a different ingress, are added to the same listener of the same load balancer, only the certificate added earliest takes effect on the load balancer.

        -
        -
      • SNI: Click to enable the Server Name Indication (SNI) function. SNI is an extended protocol of TLS. It allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
        • The SNI option is available only when HTTPS is selected.
        -
        • This function is supported only for clusters of v1.15.11 and later.
        • Specify the domain name for the SNI certificate. Only one domain name can be specified for each certificate. Wildcard-domain certificates are supported.
        -
        -
      • Security Policy: combinations of different TLS versions and supported cipher suites available to HTTPS listeners.

        For details about security policies, see ELB User Guide.

        -
        • Security Policy is available only when HTTPS is selected.
        • This function is supported only for clusters of v1.17.9 and later.
        -
        -
      -
    • Forwarding Policies: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL, for example, 10.117.117.117:80/helloworld), the request is forwarded to the corresponding target Service for processing. Click Add Forwarding Policies to add multiple forwarding policies.
      • Domain Name: actual domain name. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.
      • Rule Matching
        • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed. For example, /healthz/v1 and /healthz/v2.
        • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
        • Regular expression: The URL is matched based on the regular expression. For example, if the regular expression is /[A-Za-z0-9_.-]+/test, all URLs that comply with this rule can be accessed, for example, /abcA9/test and /v1-Ab/test. Two regular expression standards are supported: POSIX and Perl.
        -
      • URL: access path to be registered, for example, /healthz.
      • Target Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
      • Service Access Port: Select the access port of the target Service.
      • ELB Settings: If multiple routes use the same Service, they are using the same Service load balancing configuration.
        • Algorithm Type: Three algorithms are available: weighted round robin, weighted least connections algorithm, or source IP hash. For details about the allocation policies, see LoadBalancer.
        • Sticky Session: This function is disabled by default. After this function is enabled, you need to select a sticky session type and set the sticky session duration.

          ELB cookie: The load balancer generates a cookie after receiving a request from the client. All subsequent requests with the cookie are routed to the same backend server for processing.

          -

          Application cookie: The application deployed on the backend server generates a cookie after receiving the first request from the client. All subsequent requests that contain the cookie are routed to this backend server. This sticky session type is supported by shared load balancers.

          -
        • Health Check: This function is disabled by default. To enable this function, set parameters as prompted. For details about the parameters, see Configuring a Health Check.
        -
      • Operation: Click Delete to delete the configuration.
      -
    -
    -

  4. After the configuration is complete, click Create. After the ingress is created, it is displayed in the ingress list.

    On the ELB console, you can view the ELB automatically created through CCE. The default name is cce-lb-ingress.UID. Click the ELB name to access its details page. On the Listeners tab page, view the route settings of the ingress, including the URL, listener port, and backend server group port.

    -

    After the ingress is created, upgrade and maintain the selected load balancer on the CCE console. Do not maintain the load balancer on the ELB console. Otherwise, the ingress service may be abnormal.

    -
    -

  5. Access the /healthz interface of the workload, for example, workload defaultbackend.

    1. Obtain the access address of the /healthz interface of the workload. The access address consists of the load balancer IP address, external port, and mapping URL, for example, 10.**.**.**:80/healthz.
    2. Enter the URL of the /healthz interface, for example, http://10.**.**.**:80/healthz, in the address box of the browser to access the workload, as shown in Figure 1.
      Figure 1 Accessing the /healthz interface of defaultbackend
      -
    -

-
-

Updating an Ingress

After adding an ingress, you can update its port, domain name, and route configuration. The procedure is as follows:

-

You can modify the load balancer settings, including algorithm, sticky session, and health check configurations, after you select a Service in Forwarding Policies on the CCE console. Do not modify these configurations on the ELB console.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Network. On the Ingresses tab page, filter ingresses by cluster and namespace, and click Update for the ingress to be updated.
  2. On the Update Ingress page, modify the required parameters.

    The parameters are the same as those set during creation.

    -

  3. Click Submit. The ingress will be updated for the workload.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0252.html b/docs/cce/umn/cce_01_0252.html deleted file mode 100644 index b6e957e3..00000000 --- a/docs/cce/umn/cce_01_0252.html +++ /dev/null @@ -1,595 +0,0 @@ - - -

Using kubectl to Create an ELB Ingress

-

Scenario

This section uses an Nginx workload as an example to describe how to create an ELB ingress using kubectl.

- -
-

Prerequisites

  • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a sample Nginx workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • A NodePort Service has been configured for the workload. For details about how to configure the Service, see NodePort.
-
-

Creating an Ingress - Automatically Creating a Load Balancer

The following describes how to run the kubectl command to automatically create a load balancer when creating an ingress.

-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a YAML file named ingress-test.yaml. The file name can be customized.

    vi ingress-test.yaml

    -
    • For clusters of v1.15 or later, the value of apiVersion is networking.k8s.io/v1beta1.
    • For clusters of v1.13 or earlier, the value of apiVersion is extensions/v1beta1.
    -
    -

    You can create a load balancer as required. The YAML files are as follows:

    -
    Example of using a dedicated public network load balancer:
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress 
    -metadata: 
    -  name: ingress-test
    -  annotations: 
    -    kubernetes.io/elb.class: union
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '80'
    -    kubernetes.io/elb.autocreate: 
    -      '{
    -          "type":"public",
    -          "bandwidth_name":"cce-bandwidth-******",
    -          "bandwidth_chargemode":"traffic",
    -          "bandwidth_size":5,
    -          "bandwidth_sharetype":"PER",
    -          "eip_type":"5_bgp"
    -        }'
    -spec:
    -  rules: 
    -  - host: ''
    -    http: 
    -      paths: 
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    -
    -

    Example of using a dedicated public network load balancer:

    -
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress
    -metadata:
    -  name: ingress-test
    -  namespace: default
    -  annotations:
    -    kubernetes.io/elb.class: performance
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '80'
    -    kubernetes.io/elb.autocreate: 
    -      '{
    -          "type": "public",
    -          "bandwidth_name": "cce-bandwidth-******",
    -          "bandwidth_chargemode": "traffic",
    -          "bandwidth_size": 5,
    -          "bandwidth_sharetype": "PER",
    -          "eip_type": "5_bgp",
    -          "available_zone": [
    -              "eu-de-01"
    -          ],
    -          "l7_flavor_name": "L7_flavor.elb.s1.small"
    -       }'
    -spec:
    -  rules:
    -  - host: ''
    -    http:
    -      paths:
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.class

    -

    No

    -

    String

    -

    Select a proper load balancer type.

    -

    The value can be:

    -
    • union: shared load balancer
    • performance: dedicated load balancer..
    -

    The default value is union.

    -

    kubernetes.io/ingress.class

    -

    Yes

    -

    String

    -

    cce: The self-developed ELBIngress is used.

    -

    This parameter is mandatory when an ingress is created by calling the API.

    -

    kubernetes.io/elb.port

    -

    Yes

    -

    Integer

    -

    This parameter indicates the external port registered with the address of the LoadBalancer Service.

    -

    Supported range: 1 to 65535

    -

    kubernetes.io/elb.subnet-id

    -

    -

    -

    String

    -

    ID of the subnet where the cluster is located. The value can contain 1 to 100 characters.

    -
    • Mandatory when a cluster of v1.11.7-r0 or earlier is to be automatically created.
    • Optional for clusters later than v1.11.7-r0. It is left blank by default.
    -

    kubernetes.io/elb.enterpriseID

    -

    No

    -

    String

    -

    Kubernetes clusters of v1.15 and later versions support this field. In Kubernetes clusters earlier than v1.15, load balancers are created in the default project by default.

    -

    ID of the enterprise project in which the load balancer will be created.

    -

    The value contains 1 to 100 characters.

    -

    How to obtain:

    -

    Log in to the management console and choose Enterprise > Project Management on the top menu bar. In the list displayed, click the name of the target enterprise project, and copy the ID on the enterprise project details page.

    -

    kubernetes.io/elb.autocreate

    -

    Yes

    -

    elb.autocreate object

    -

    Whether to automatically create a load balancer associated with an ingress. For details about the field description, see Table 2.

    -

    Example

    -
    • If a public network load balancer will be automatically created, set this parameter to the following value:

      '{"type":"public","bandwidth_name":"cce-bandwidth-******","bandwidth_chargemode":"traffic","bandwidth_size":5,"bandwidth_sharetype":"PER","eip_type":"5_bgp","name":"james"}'

      -
    • If a private network load balancer will be automatically created, set this parameter to the following value:

      {"type":"inner","name":"A-location-d-test"}

      -
    -

    host

    -

    No

    -

    String

    -

    Domain name for accessing the Service. By default, this parameter is left blank, and the domain name needs to be fully matched.

    -

    path

    -

    Yes

    -

    String

    -

    User-defined route path. All external access requests must match host and path.

    -

    serviceName

    -

    Yes

    -

    String

    -

    Name of the target Service bound to the ingress.

    -

    servicePort

    -

    Yes

    -

    Integer

    -

    Access port of the target Service.

    -

    ingress.beta.kubernetes.io/url-match-mode

    -

    No

    -

    String

    -

    Route matching policy.

    -

    Default: STARTS_WITH (prefix match)

    -

    Options:

    -
    • EQUAL_TO: exact match
    • STARTS_WITH: prefix match
    • REGEX: regular expression match
    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Data structure of the elb.autocreate field

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    type

    -

    No

    -

    String

    -

    Network type of the load balancer.

    -
    • public: public network load balancer
    • inner: private network load balancer
    -

    The default value is inner.

    -

    bandwidth_name

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth name. The default value is cce-bandwidth-******.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    bandwidth_chargemode

    -

    Yes

    -

    String

    -

    Bandwidth billing mode.

    -
    • traffic: billed by traffic
    -

    bandwidth_size

    -

    Yes for public network load balancers

    -

    Integer

    -

    Bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. The actual range varies depending on the configuration in each region.

    -
    • The minimum increment for bandwidth adjustment varies depending on the bandwidth range. The details are as follows:
      • The minimum increment is 1 Mbit/s if the allowed bandwidth ranges from 0 Mbit/s to 300 Mbit/s (with 300 Mbit/s included).
      • The minimum increment is 50 Mbit/s if the allowed bandwidth ranges from 300 Mbit/s to 1000 Mbit/s.
      • The minimum increment is 500 Mbit/s if the allowed bandwidth is greater than 1000 Mbit/s.
      -
    -

    bandwidth_sharetype

    -

    Yes for public network load balancers

    -

    String

    -

    Bandwidth type.

    -

    PER: dedicated bandwidth

    -

    eip_type

    -

    Yes for public network load balancers

    -

    String

    -

    EIP type, which may vary depending on sites. For details, see the type parameter specified when creating an EIP.

    -
    • 5_bgp: dynamic BGP
    • 5_gray: dedicated load balancer
    -

    name

    -

    No

    -

    String

    -

    Name of the automatically created load balancer.

    -

    Value range: a string of 1 to 64 characters, including lowercase letters, digits, and underscores (_). The value must start with a lowercase letter and end with a lowercase letter or digit.

    -

    Default value: cce-lb+ingress.UID

    -
    -
    -

  3. Create an ingress.

    kubectl create -f ingress-test.yaml

    -

    If information similar to the following is displayed, the ingress has been created.

    -
    ingress/ingress-test created
    -

    kubectl get ingress

    -

    If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

    -
    NAME             HOSTS     ADDRESS          PORTS   AGE
    -ingress-test     *         121.**.**.**     80      10s
    -

  4. Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

    121.**.**.** indicates the IP address of the unified load balancer.

    -

-
-

Creating an Ingress - Interconnecting with an Existing Load Balancer

CCE allows you to connect to an existing load balancer when creating an ingress.
  • For clusters of v1.15 or later, the value of apiVersion is networking.k8s.io/v1beta1.
  • For clusters of v1.13 or earlier, the value of apiVersion is extensions/v1beta1.
  • To interconnect with an existing dedicated load balancer, ensure that HTTP is supported and the network type supports private networks.
-
-
-

If the cluster version is 1.15 or later, the YAML file configuration is as follows:

-
apiVersion: networking.k8s.io/v1beta1
-kind: Ingress 
-metadata: 
-  name: ingress-test
-  annotations: 
-    kubernetes.io/elb.class: performance                               # Load balancer type
-    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
-    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with your existing load balancer IP.
-    kubernetes.io/elb.port: '80'
-    kubernetes.io/ingress.class: cce
-spec:
-  rules: 
-  - host: ''
-    http: 
-      paths: 
-      - path: '/'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 3 Key parameters

Parameter

-

Mandatory

-

Type

-

Description

-

kubernetes.io/elb.class

-

No

-

String

-

Select a proper load balancer type.

-

The value can be:

-
  • union: shared load balancer
  • performance: dedicated load balancer..
-

Defaults to union.

-

kubernetes.io/elb.id

-

Yes

-

String

-

This parameter indicates the ID of a load balancer. The value can contain 1 to 100 characters.

-

How to obtain:

-

On the management console, click Service List, and choose Networking > Elastic Load Balance. Click the name of the target load balancer. On the Summary tab page, find and copy the ID.

-

kubernetes.io/elb.ip

-

Yes

-

String

-

This parameter indicates the service address of a load balancer. The value can be the public IP address of a public network load balancer or the private IP address of a private network load balancer.

-
-
-
-

Configuring HTTPS Certificates

Ingress supports TLS certificate configuration and provides security services in HTTPS mode.

-
  • If a Service needs to be exposed using HTTPS, you must configure the TLS certificate in the ingress. For details on how to create a secret, see Creating a Secret.
  • If HTTPS is used for the same port of the same load balancer of multiple ingresses, you must select the same certificate.
-
-
  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following command to create a YAML file named ingress-test-secret.yaml (the file name can be customized):

    vi ingress-test-secret.yaml

    -
    The YAML file is configured as follows:
    apiVersion: v1
    -data:
    -  tls.crt: LS0******tLS0tCg==
    -  tls.key: LS0tL******0tLS0K
    -kind: Secret
    -metadata:
    -  annotations:
    -    description: test for ingressTLS secrets
    -  name: ingress-test-secret
    -  namespace: default
    -type: IngressTLS
    -
    -

    In the preceding information, tls.crt and tls.key are only examples. Replace them with the actual files. The values of tls.crt and tls.key are the content encrypted using Base64.

    -
    -

  3. Create a secret.

    kubectl create -f ingress-test-secret.yaml

    -

    If information similar to the following is displayed, the secret is being created:

    -
    secret/ingress-test-secret created
    -

    View the created secrets.

    -

    kubectl get secrets

    -

    If information similar to the following is displayed, the secret has been created successfully:

    -
    NAME                         TYPE                                  DATA      AGE
    -ingress-test-secret          IngressTLS                            2         13s
    -

  4. Create a YAML file named ingress-test.yaml. The file name can be customized.

    vi ingress-test.yaml

    -

    Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.

    -
    -

    Example YAML file to associate an existing load balancer:

    -
    apiVersion: networking.k8s.io/v1beta1
    -kind: Ingress 
    -metadata: 
    -  name: ingress-test
    -  annotations: 
    -    kubernetes.io/elb.class: performance                               # Load balancer type
    -    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
    -    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
    -    kubernetes.io/ingress.class: cce
    -    kubernetes.io/elb.port: '443'
    -    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
    -spec:
    -  tls: 
    -  - secretName: ingress-test-secret
    -  rules: 
    -  - host: ''
    -    http: 
    -      paths: 
    -      - path: '/'
    -        backend: 
    -          serviceName: <your_service_name>  # Replace it with the name of your target Service.
    -          servicePort: 80
    -        property:
    -          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Key parameters

    Parameter

    -

    Mandatory

    -

    Type

    -

    Description

    -

    kubernetes.io/elb.tls-ciphers-policy

    -

    No

    -

    String

    -

    The default value is tls-1-2, which is the security policy used by the listener and takes effect only when the HTTPS protocol is used.

    -

    Options:

    -
    • tls-1-0
    • tls-1-1
    • tls-1-2
    • tls-1-2-strict
    -

    For details of cipher suites for each security policy, see Table 5.

    -

    tls

    -

    No

    -

    Array of strings

    -

    This parameter is mandatory if HTTPS is used. Multiple independent domain names and certificates can be added to this parameter. For details, see Configuring the Server Name Indication (SNI).

    -

    secretName

    -

    No

    -

    String

    -

    This parameter is mandatory if HTTPS is used. Set this parameter to the name of the created secret.

    -
    -
    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 5 tls_ciphers_policy parameter description

    Security Policy

    -

    TLS Version

    -

    Cipher Suite

    -

    tls-1-0

    -

    TLS 1.2

    -

    TLS 1.1

    -

    TLS 1.0

    -

    ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-SHA:AES256-SHA

    -

    tls-1-1

    -

    TLS 1.2

    -

    TLS 1.1

    -

    tls-1-2

    -

    TLS 1.2

    -

    tls-1-2-strict

    -

    TLS 1.2

    -

    ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES128-SHA256:AES256-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384

    -
    -
    -

  5. Create an ingress.

    kubectl create -f ingress-test.yaml

    -

    If information similar to the following is displayed, the ingress has been created.

    -
    ingress/ingress-test created
    -

    View the created ingress.

    -

    kubectl get ingress

    -

    If information similar to the following is displayed, the ingress has been created successfully and the workload is accessible.

    -
    NAME             HOSTS     ADDRESS          PORTS   AGE
    -ingress-test     *         121.**.**.**     80      10s
    -

  6. Enter https://121.**.**.**:443 in the address box of the browser to access the workload (for example, Nginx workload).

    121.**.**.** indicates the IP address of the unified load balancer.

    -

-
-

Configuring the Server Name Indication (SNI)

SNI allows multiple TLS-based access domain names to be provided for external systems using the same IP address and port number. Different domain names can use different security certificates.
  • Only one domain name can be specified for each SNI certificate. Wildcard-domain certificates are supported.
  • Security policy (kubernetes.io/elb.tls-ciphers-policy) is supported only in clusters of v1.17.11 or later.
-
-
-
You can enable SNI when the preceding conditions are met. The following uses the automatic creation of a load balancer as an example. In this example, sni-test-secret-1 and sni-test-secret-2 are SNI certificates. The domain names specified by the certificates must be the same as those in the certificates.
apiVersion: networking.k8s.io/v1beta1
-kind: Ingress 
-metadata: 
-  name: ingress-test
-  annotations: 
-    kubernetes.io/elb.class: performance                               # Load balancer type
-    kubernetes.io/elb.id: <your_elb_id>  # Replace it with the ID of your existing load balancer.
-    kubernetes.io/elb.ip: <your_elb_ip>  # Replace it with the IP of your existing load balancer.
-    kubernetes.io/ingress.class: cce
-    kubernetes.io/elb.port: '443'
-    kubernetes.io/elb.tls-ciphers-policy: tls-1-2
-spec:
-  tls: 
-  - secretName: ingress-test-secret
-  - hosts:
-      - example.top  # Domain name specified a certificate is issued
-    secretName: sni-test-secret-1  
-  - hosts:
-      - example.com  # Domain name specified a certificate is issued
-    secretName: sni-test-secret-2
-  rules: 
-  - host: ''
-    http: 
-      paths: 
-      - path: '/'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-
-
-

Accessing Multiple Services

Ingresses can route requests to multiple backend Services based on different matching policies. The spec field in the YAML file is set as below. You can access www.example.com/foo, www.example.com/bar, and foo.example.com/ to route to three different backend Services.

-

The URL registered in an ingress forwarding policy must be the same as the URL exposed by the backend Service. Otherwise, a 404 error will be returned.

-
-
spec:
-  rules: 
-  - host: 'www.example.com'
-    http: 
-      paths: 
-      - path: '/foo'
-        backend: 
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-      - path: '/bar'
-        backend:
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-  - host: 'foo.example.com'
-    http:
-      paths:
-      - path: '/'
-        backend:
-          serviceName: <your_service_name>  # Replace it with the name of your target Service.
-          servicePort: 80
-        property:
-          ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0254.html b/docs/cce/umn/cce_01_0254.html deleted file mode 100644 index b806cd0a..00000000 --- a/docs/cce/umn/cce_01_0254.html +++ /dev/null @@ -1,151 +0,0 @@ - - -

Using EVS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
  • Data in a shared disk cannot be shared between nodes in a CCE cluster. If the same EVS disk is attached to multiple nodes, read and write conflicts and data cache conflicts may occur. When creating a Deployment, you are advised to create only one pod if you want to use EVS disks.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
  • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
  • EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • EVS volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an EVS Disk

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. Click Create EVS Disk.
  2. Configure basic disk information. Table 1 describes the parameters.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring basic disk information

    Parameter

    -

    Description

    -

    * PVC Name

    -

    New PVC Name: name of the PVC to be created. A storage volume is automatically created when a PVC is created. One PVC corresponds to one storage volume. The storage volume name is automatically generated when the PVC is created.

    -

    Cluster Name

    -

    Cluster where the EVS disk is deployed.

    -

    Namespace

    -

    Select the namespace where the EVS disk is deployed. If you do not need to select a namespace, retain the default value.

    -

    Volume Capacity (GB)

    -

    Size of the storage to be created.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
    -

    Primary AZ

    -

    AZ to which the volume belongs.

    -

    Type

    -

    Type of the new EVS disk.

    -
    • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
    • High I/O: uses serial attached SCSI (SAS) drives to store data.
    • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
    -

    Storage Format

    -

    The default value is CSI and cannot be changed.

    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure cloud service for your keys, will be used for EVS disks. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  3. Review your order, click Submit, and wait until the creation is successful.

    The file system is displayed in the list. When its status becomes Normal, the file system is created successfully.

    -

  4. Click the volume name to view detailed information about the volume.
-
-

Adding an EVS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage volume type to EVS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters required for mounting an EVS volume

    Parameter

    -

    Description

    -

    Type

    -

    EVS: You can use EVS disks the same way you use traditional hard disks on servers. EVS disks deliver higher data reliability and I/O throughput and are easy to use. They can be used for file systems, databases, or other system software and applications that require block storage resources.

    -
    CAUTION:
    • To attach an EVS disk to a workload, you must set the number of pods to 1 when creating the workload. If multiple pods are configured, you cannot attach EVS disks.
    • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
    • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
    -
    -

    Allocation Mode

    -

    Manual

    -

    Select a created disk. If no disk is available, follow the prompts to create one.

    -

    For the same cluster and namespace, you can use an existing storage volume when creating a Deployment (with Allocation Mode set to Manual).

    -

    When creating a StatefulSet, you can only use a volume automatically allocated by the system (only Automatic is available for Allocation Mode).

    -

    Automatic

    -

    If you select Automatic, you need to configure the following items:

    -
    1. Access Mode: permissions of user applications on storage resources (PVs).
      • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
      -
    2. Availability Zone: AZ where the storage volume is located. Only the AZ where the node is located can be selected.
    3. Sub-Type: Select a storage subtype.
      • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
      • High I/O: uses serial attached SCSI (SAS) drives to store data.
      • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
      -
    4. Storage Capacity: Enter the storage capacity in the unit of GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    5. Storage Format: The default value is CSI.

      The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

      -
    6. After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for EVS disks. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -
    1. Click Add Container Path.
    2. Container Path: Enter the container path to which the data volume is mounted.
      NOTICE:
      • Do not mount a data volume to a system directory such as / or /var/run; this action may cause a container error to occur. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      • If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -
    -
    -

  3. Click OK.
-
-

Importing an EVS Disk

CCE allows you to import existing EVS disks.

-

An EVS disk can be imported into only one namespace. If an EVS disk has been imported into a namespace, it is invisible in other namespaces and cannot be imported again. If you want to import an EVS disk that has file system (ext4) formatted, ensure that no partition has been created for the disk. Otherwise, data may be lost.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the EVS tab page, click Import.
  2. Select one or more EVS disks that you want to import. Then, click OK.
-
-

Unbinding an EVS Disk

After an EVS volume is successfully created or imported, the EVS volume is automatically bound to the current cluster and cannot be used by other clusters. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the EVS volume has been mounted to a workload, it cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the EVS disk list, click Unbind next to the target EVS disk.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an EVS volume is created, you can perform operations described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an EVS volume

-
  1. Select the EVS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0257.html b/docs/cce/umn/cce_01_0257.html deleted file mode 100644 index e0fa479a..00000000 --- a/docs/cce/umn/cce_01_0257.html +++ /dev/null @@ -1,207 +0,0 @@ - - -

Creating a Pod Mounted with an EVS Volume

-

Scenario

After an EVS volume is created or imported to CCE, you can mount it to a workload.

-

EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

-
-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Using EVS Volumes for Deployments

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

    touch evs-deployment-example.yaml

    -

    vi evs-deployment-example.yaml

    -
    Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: evs-deployment-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: evs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: evs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: pvc-evs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-evs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-evs-auto-example
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec.template.spec.containers.volumeMounts

    -

    name

    -

    Name of the volume mounted to the container.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec.template.spec.volumes

    -

    name

    -

    Name of the volume.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f evs-deployment-example.yaml

    -

-
-

Using EVS Volumes for StatefulSets

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-statefulset-example.yaml file, which is used to create a Deployment.

    touch evs-statefulset-example.yaml

    -

    vi evs-statefulset-example.yaml

    -

    Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: evs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: evs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: evs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-evs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-evs-auto-example
    -        namespace: default
    -        labels:
    -          failure-domain.beta.kubernetes.io/region: eu-de
    -          failure-domain.beta.kubernetes.io/zone: eu-de-01
    -        annotations:
    -          everest.io/disk-volume-type: SAS
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-disk   
    -  serviceName: evs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -
    -

  3. Run the following command to create the workload:

    kubectl create -f evs-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an EVS Volume

  1. Query the pod and EVS files of the deployed workload (for example, evs-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep evs-statefulset-example
      -

      Expected outputs:

      -
      evs-statefulset-example-0   1/1     Running   0          22h
      -
    2. Run the following command to check whether an EVS volume is mounted to the /tmp directory:
      kubectl exec evs-statefulset-example-0 -- df tmp
      -

      Expected outputs:

      -
      /dev/sda        10255636 36888  10202364   1% /tmp
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec evs-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec evs-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named evs-statefulset-example-0:

    kubectl delete po evs-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      evs-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec evs-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -
    3. The test file still exists after the pod is rebuilt, indicating that the data in the EVS volume can be persistently stored.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0259.html b/docs/cce/umn/cce_01_0259.html deleted file mode 100644 index 214b986b..00000000 --- a/docs/cce/umn/cce_01_0259.html +++ /dev/null @@ -1,141 +0,0 @@ - - -

Using SFS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • SFS volumes are available only in certain regions.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an SFS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the SFS tab, click Create SFS File System.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an SFS volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    Cluster Name

    -

    Cluster to which the file system volume belongs.

    -

    Namespace

    -

    Namespace in which the volume is created.

    -

    Total Capacity

    -

    The total capacity is the capacity of a single volume. Fees are charged by actual usage.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The SFS volume can be mounted as read-write by multiple nodes.
    -

    Storage Format

    -

    The default value is CSI and cannot be changed.

    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure key service, will be used for SFS file systems. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  4. Click Create.

    The volume is displayed in the list. When PVS Status becomes Bound, the volume is created successfully.

    -

  5. Click the volume name to view detailed information about the volume.
-
-

Adding an SFS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage class to SFS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters for mounting an SFS volume

    Parameter

    -

    Parameter Description

    -

    Type

    -

    File Storage (NFS): This type applies to a wide range of scenarios, including media processing, content management, big data, and application analysis.

    -

    Allocation Mode

    -

    Manual

    -
    • Name: Select a created file system. You need to create a file system in advance. For details about how to create a file system, see Creating an SFS Volume.
    • Sub-Type: subtype of the created file storage.
    • Storage Capacity: This field is one of the PVC attributes. If the storage capacity has been expanded on the IaaS side, it is normal that the capacity values are inconsistent. The PVC capacity is the same as the storage entity capacity only after end-to-end container storage capacity expansion is supported for CCE clusters of v1.13.
    -

    Automatic

    -

    An SFS volume is created automatically. You need to enter the storage capacity.

    -
    • Sub-Type: Select NFS.
    • Storage Capacity: Specify the total storage capacity, in GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    • Storage Format: The default value is CSI.

      The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

      -
    • After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for file systems. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      If this parameter is not specified, the root path of the data volume is used by default. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      The container path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an SFS Volume

CCE allows you to import existing SFS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace. Then, click OK.
-
-

Unbinding an SFS Volume

When an SFS volume is successfully created or imported, the volume is automatically bound to the current cluster. Other clusters can also use the volume. When the SFS volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS volume has been attached to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS volume list, click Unbind next to the target volume.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an SFS volume is created, you can perform the operation described in Table 3. -
- - - - - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an SFS volume

-
  1. Select the SFS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS disk.
-

Importing an SFS volume

-

CCE allows you to import existing SFS volumes.

-
  1. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace.
  4. Click Yes.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0262.html b/docs/cce/umn/cce_01_0262.html deleted file mode 100644 index 8b44b14b..00000000 --- a/docs/cce/umn/cce_01_0262.html +++ /dev/null @@ -1,149 +0,0 @@ - - -

Creating a StatefulSet Mounted with an SFS Volume

-

Scenario

CCE allows you to use an existing SGS volume to create a StatefulSet (by using a PVC).

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an SFS volume by referring to PersistentVolumeClaims (PVCs) and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

    touch sfs-statefulset-example.yaml

    -

    vi sfs-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-sfs-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-sfs-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: sfs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec

    -

    replicas

    -

    Number of pods.

    -

    metadata

    -

    name

    -

    Name of the new workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image used by the workload.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of a container.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML file:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-sfs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-sfs-auto-example
    -        namespace: default
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-nas
    -  serviceName: sfs-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create a StatefulSet.

    kubectl create -f sfs-statefulset-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0263.html b/docs/cce/umn/cce_01_0263.html deleted file mode 100644 index 9899c4ac..00000000 --- a/docs/cce/umn/cce_01_0263.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Creating a Deployment Mounted with an SFS Volume

-

Scenario

After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

    touch sfs-deployment-example.yaml

    -

    vi sfs-deployment-example.yaml

    -
    Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: sfs-deployment-example                                # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: sfs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: sfs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                                # Mount path 
    -          name: pvc-sfs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-sfs-auto-example                # PVC name
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f sfs-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0265.html b/docs/cce/umn/cce_01_0265.html deleted file mode 100644 index 3c53fe2b..00000000 --- a/docs/cce/umn/cce_01_0265.html +++ /dev/null @@ -1,158 +0,0 @@ - - -

Using OBS Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • CCE clusters of v1.7.3-r8 and earlier do not support OBS volumes. You need to upgrade these clusters or create clusters of a later version that supports OBS.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Preparations

To mount reliable and stable OBS buckets as volumes, you must create AK/SK before you create OBS buckets.

-

The procedure for configuring the AK/SK is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the OBS tab page, click AK/SK in the notice.
    Figure 1 Configuring the AK/SK
    -
  3. Click , select a key file, and click Upload to upload the key file.
  4. Select the corresponding workload and click Restart.
-

When creating an OBS volume, you must use the AK/SK. If the key file is not uploaded, the pod will fail to be started or OBS data access will be abnormal due to the volume mounting failure.

-
-
-

Creating an OBS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. Click the OBS tab and click Create OBS Bucket.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an OBS volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    The name contains 3 to 55 characters (excluding the prefix). It must contain lowercase letters, digits, and hyphens (-), and cannot start or end with a hyphen (-).

    -

    Cluster Name

    -

    Cluster to which the OBS volume belongs.

    -

    Namespace

    -

    Namespace to which the volume belongs. The default value is default.

    -

    Instance Type

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: If the cluster version is v1.15 or later and the everest add-on version is 1.0.2 or later, parallel file systems that can be mounted by obsfs can be created.
    • Object bucket: A bucket is a container for storing objects in OBS. OBS provides flat storage in the form of buckets and objects. Unlike the conventional multi-layer directory structure of file systems, all objects in a bucket are stored at the same logical layer.
    -
    NOTE:

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

    Storage Class

    -

    This parameter is displayed when you select Object bucket for Instance Type.

    -

    This parameter indicates the storage classes supported by OBS.

    -
    • Standard: applicable to scenarios where a large number of hotspot files or small-sized files need to be accessed frequently (multiple times per month on average) and require fast access response.
    • Infrequent access: applicable to scenarios where data is not frequently accessed (less than 12 times per year on average) but requires fast access response.
    -

    Storage Policy

    -

    Object storage has the following policies:

    -

    Private: Only the bucket owner has full control over the bucket. Unauthorized users do not have permissions to access the bucket.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The volume is mounted as read-write by multiple nodes.
    -

    Storage Format

    -

    The default type is CSI.

    -

    The container storage interface (CSI) is used to establish a set of standard storage management interfaces between Kubernetes and external storage systems to provide storage services for containers.

    -
    -
    -

  4. Click Create.

    After the OBS volume is successfully created, it is displayed in the OBS volume list. Click the PVC name to view detailed information about the OBS volume.

    -

-
-

Adding an OBS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set Type to OBS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 OBS volume parameters

    Parameter

    -

    Description

    -

    Type

    -

    Select OBS.

    -

    OBS: Standard and Infrequent Access OBS buckets are supported. OBS buckets are commonly used for big data analytics, cloud native applications, static website hosting, and backup/active archiving.

    -

    Allocation Mode

    -

    Manual

    -

    Name: Select a created OBS volume.

    -

    Sub-Type: class of the selected volume. The value can be Standard or Infrequent access, and you do not need to set this parameter.

    -

    Automatic

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: If the cluster version is v1.15 or later and the everest add-on version is 1.0.2 or later, parallel file systems that can be mounted by obsfs can be created.

      Storage Format: The default value is CSI.

      -
    • Object bucket: A bucket is a container for storing objects in OBS.

      Sub-Type: Select Standard or Infrequent access.

      -

      Storage Format: The default value is CSI.

      -
    -
    NOTE:

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    2. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an OBS Volume

CCE allows you to import existing OBS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the OBS tab page, click Import.
  2. Select one or more OBS volumes that you want to import.

    Parallel file systems are optimized OBS objects. You are advised to use parallel file systems instead of object buckets to mount OBS volumes to containers.

    -
    -

  3. Select the target cluster and namespace.
  4. Click OK.
-
-

Unbinding an OBS Volume

When an OBS volume is successfully created, the OBS volume is automatically bound to the current cluster. Other clusters can also use the OBS volume. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the OBS volume list, click Unbind next to the target OBS volume.
  2. In the dialog box displayed, click Yes.
-
-

Related Operations

After an OBS volume is created, you can perform the operation described in Table 3. -
- - - - - - - -
Table 3 Other Operations

Operation

-

Description

-

Deleting an OBS volume

-
  1. Select the OBS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the volume.
-
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0268.html b/docs/cce/umn/cce_01_0268.html deleted file mode 100644 index 2dc8d80c..00000000 --- a/docs/cce/umn/cce_01_0268.html +++ /dev/null @@ -1,152 +0,0 @@ - - -

Creating a StatefulSet Mounted with an OBS Volume

-

Scenario

CCE allows you to use an existing OBS volume to create a StatefulSet through a PVC.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an OBS volume by referring to PersistentVolumeClaims (PVCs) and obtain the PVC name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

    touch obs-statefulset-example.yaml

    -

    vi obs-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-obs-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-obs-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-obs-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: obs-statefulset-example-headless    # Name of the headless Service
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the new workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path of a container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:latest'
    -          volumeMounts:
    -            - name: pvc-obs-auto-example
    -              mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-obs-auto-example
    -        namespace: default
    -        annotations:
    -          everest.io/obs-volume-type: STANDARD
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -        storageClassName: csi-obs  
    -  serviceName: obs-statefulset-example-headless
    -

  4. Create a StatefulSet.

    kubectl create -f obs-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an OBS Volume

  1. Query the pod and OBS volume of the deployed workload (for example, obs-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep obs-statefulset-example
      -

      Expected outputs:

      -
      obs-statefulset-example-0   1/1     Running   0          2m5s
      -
    2. Run the following command to check whether an OBS volume is mounted to the /tmp directory:
      kubectl exec obs-statefulset-example-0 -- mount|grep /tmp
      -

      Expected outputs:

      -
      s3fs on /tmp type fuse.s3fs (rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other)
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec obs-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec obs-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named obs-statefulset-example-0:

    kubectl delete po obs-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      obs-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec obs-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -
    3. The test file still exists after the pod is rebuilt, indicating that the data in the OBS volume can be persistently stored.
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0269.html b/docs/cce/umn/cce_01_0269.html deleted file mode 100644 index 51b68e11..00000000 --- a/docs/cce/umn/cce_01_0269.html +++ /dev/null @@ -1,52 +0,0 @@ - - -

Creating a Deployment Mounted with an OBS Volume

-

Scenario

After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

    touch obs-deployment-example.yaml

    -

    vi obs-deployment-example.yaml

    -
    Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: obs-deployment-example                        # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: obs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: obs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                       # Mount path
    -          name: pvc-obs-example 
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes: 
    -      - name: pvc-obs-example  
    -        persistentVolumeClaim: 
    -          claimName: pvc-obs-auto-example       # PVC name
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f obs-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0271.html b/docs/cce/umn/cce_01_0271.html deleted file mode 100644 index 8d473d86..00000000 --- a/docs/cce/umn/cce_01_0271.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Using SFS Turbo Volumes

-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

  • SFS Turbo volumes are available only in certain regions.
  • Currently, SFS Turbo file systems cannot be directly created on CCE.
  • Only an SFS Turbo file system in the same VPC as the cluster and in the same subnet as the node can be imported.
  • Inbound ports (111, 445, 2049, 2051, and 20048) must be enabled for the security group to which the SFS Turbo file system belongs.
-
-

Importing an SFS Turbo Volume

CCE allows you to import existing SFS Turbo volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS Turbo tab page, click Import.
  2. Select one or more SFS Turbo volumes that you want to import.
  3. Select the cluster and namespace to which you want to import the volumes.
  4. Click Next. The volumes are displayed in the list. When PVC Status becomes Bound, the volumes are imported successfully.
-
-

Adding an SFS Turbo Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set the storage volume type to SFS Turbo.

    -

    - - - - - - - - - - - - - - - -
    Table 1 Parameters for configuring an SFS Turbo volume

    Parameter

    -

    Parameter Description

    -

    Type

    -

    SFS Turbo: applicable to DevOps, containerized microservices, and enterprise office applications.

    -

    Allocation Mode

    -

    Manual

    -

    Select an existing SFS Turbo volume. You need to import SFS Turbo volumes in advance. For details, see Importing an SFS Turbo Volume.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      This parameter specifies a subpath inside the referenced volume instead of its root. If this parameter is not specified, the root path is used. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Unbinding an SFS Turbo Volume

When an SFS Turbo volume is successfully imported to a cluster, the volume is bound to the cluster. The volume can also be imported to other clusters. When the volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS Turbo volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS Turbo volume list, click Unbind next to the target volume.
  2. In the dialog box displayed, click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0273.html b/docs/cce/umn/cce_01_0273.html deleted file mode 100644 index 9b64e95b..00000000 --- a/docs/cce/umn/cce_01_0273.html +++ /dev/null @@ -1,117 +0,0 @@ - - -

Creating a StatefulSet Mounted with an SFS Turbo Volume

-

Scenario

CCE allows you to use an existing SFS Turbo volume to create a StatefulSet.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Create an SFS Turbo volume and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfsturbo-statefulset-example.yaml.

    touch sfsturbo-statefulset-example.yaml

    -

    vi sfsturbo-statefulset-example.yaml

    -

    Configuration example:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfsturbo-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: sfsturbo-statefulset-example
    -  template:
    -    metadata:
    -      labels:
    -        app: sfsturbo-statefulset-example
    -    spec:
    -      volumes: 
    -      - name: pvc-sfsturbo-example 
    -        persistentVolumeClaim:
    -          claimName: pvc-sfsturbo-example     
    -      containers:
    -      - name: container-0
    -        image: 'nginx:latest'
    -        volumeMounts:
    -          - name: pvc-sfsturbo-example
    -            mountPath: /tmp
    -      restartPolicy: Always
    -      imagePullSecrets:
    -      - name: default-secret 
    -  serviceName: sfsturbo-statefulset-example-headless
    -  updateStrategy:
    -    type: RollingUpdate
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the new workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path of a container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create the StatefulSet.

    kubectl create -f sfsturbo-statefulset-example.yaml

    -

-
-

Verifying Persistent Storage of an SFS Turbo Volume

  1. Query the pod and SFS Turbo volume of the deployed workload (for example, sfsturbo-statefulset-example).

    1. Run the following command to query the pod name of the workload:
      kubectl get po | grep sfsturbo-statefulset-example
      -

      Expected outputs:

      -
      sfsturbo-statefulset-example-0   1/1     Running   0          2m5s
      -
    2. Run the following command to check whether an SFS Turbo volume is mounted to the /tmp directory:
      kubectl exec sfsturbo-statefulset-example-0 -- mount|grep /tmp
      -

      Expected outputs:

      -
      192.168.0.108:/ on /tmp type nfs (rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,nolock,noresvport,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=192.168.0.108,mountvers=3,mountport=20048,mountproto=tcp,local_lock=all,addr=192.168.0.108)
      -
    -

  2. Run the following command to create a file named test in the /tmp directory:

    kubectl exec sfsturbo-statefulset-example-0 -- touch /tmp/test
    -

  3. Run the following command to view the file in the /tmp directory:

    kubectl exec sfsturbo-statefulset-example-0 -- ls -l /tmp
    -

    Expected outputs:

    -
    -rw-r--r-- 1 root root     0 Jun  1 02:50 test
    -

  4. Run the following command to delete the pod named sfsturbo-statefulset-example-0:

    kubectl delete po sfsturbo-statefulset-example-0
    -

  5. Check whether the file still exists after the pod is rebuilt.

    1. Run the following command to query the name of the rebuilt pod:
      kubectl get po
      -

      Expected outputs:

      -
      sfsturbo-statefulset-example-0   1/1     Running   0          2m
      -
    2. Run the following command to view the file in the /tmp directory:
      kubectl exec sfsturbo-statefulset-example-0 -- ls -l /tmp
      -

      Expected outputs:

      -
      -rw-r--r-- 1 root root     0 Jun  1 02:50 test
      -

      The test file still exists after the pod is rebuilt, indicating that the data in the SFS Turbo volume can be persistently stored.

      -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0274.html b/docs/cce/umn/cce_01_0274.html deleted file mode 100644 index 2cd8428e..00000000 --- a/docs/cce/umn/cce_01_0274.html +++ /dev/null @@ -1,77 +0,0 @@ - - -

Creating a Deployment Mounted with an SFS Turbo Volume

-

Scenario

After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the CSI plug-in (everest) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.15 or later.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfsturbo-deployment-example.yaml file, which is used to create a Deployment:

    touch sfsturbo-deployment-example.yaml

    -

    vi sfsturbo-deployment-example.yaml

    -

    Example of mounting an SFS Turbo volume to a Deployment (PVC-based, shared volume):

    -
    apiVersion: apps/v1  
    -kind: Deployment  
    -metadata:  
    -  name: sfsturbo-deployment-example                                # Workload name
    -  namespace: default  
    -spec:  
    -  replicas: 1  
    -  selector:  
    -    matchLabels:  
    -      app: sfsturbo-deployment-example  
    -  template:  
    -    metadata:  
    -      labels:  
    -        app: sfsturbo-deployment-example  
    -    spec:  
    -      containers:  
    -      - image: nginx  
    -        name: container-0  
    -        volumeMounts:  
    -        - mountPath: /tmp                                # Mount path
    -          name: pvc-sfsturbo-example  
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes:  
    -      - name: pvc-sfsturbo-example  
    -        persistentVolumeClaim:  
    -          claimName: pvc-sfsturbo-example                # PVC name
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created Deployment.

    -

    app

    -

    Name of the Deployment.

    -

    mountPath

    -

    Mount path of the container. In this example, the mount path is /tmp.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the workload:

    kubectl create -f sfsturbo-deployment-example.yaml

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0276.html b/docs/cce/umn/cce_01_0276.html deleted file mode 100644 index 2712ee4a..00000000 --- a/docs/cce/umn/cce_01_0276.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Performing Rolling Upgrade for Nodes

-

Scenario

In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.

-
Figure 1 Workload migration
-
-

Notes and Constraints

  • The original node and the target node to which the workload is to be migrated must be in the same cluster.
  • The cluster must be of v1.13.10 or later.
  • The default node pool DefaultPool does not support this configuration.
-
-

Scenario 1: The Original Node Is in DefaultPool

  1. Create a node.

    1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
    2. Select the cluster to which the original node belongs.
    3. Click Create Node Pool, set the following parameters, and modify other parameters as required. For details about the parameters, see Creating a Node Pool.
      1. Name: Enter the name of the new node pool, for example, nodepool-demo.
      2. Nodes: In this example, add one node.
      3. Specifications: Select node specifications that best suit your needs.
      4. OS: Select the operating system (OS) of the nodes to be created.
      5. Login Mode:
        • If the login mode is Key pair, select a key pair for logging in to the node and select the check box to acknowledge that you have obtained the key file and that without this file you will not be able to log in to the node.

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

          -
        -
      -
    4. Click Next: Confirm. Confirm the node pool configuration and click Submit.

      Go back to the node pool list. In the node list, you can view that the new node pool has been created and is in the Normal state.

      -
    -

  2. Click the name of the node pool. The IP address of the new node is displayed in the node list.
  1. Install and configure kubectl.

    1. In the navigation pane of the CCE console, choose Resource Management > Clusters, and click Command Line Tool > Kubectl under the cluster where the original node is located.
    2. On the Kubectl tab page of the cluster details page, connect to the cluster as prompted.
    -

  1. Migrate the workload.

    1. Add a taint to the node where the workload needs to be migrated out.

      kubectl taint node [node] key=value:[effect]

      -

      In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located. The value of [effect] can be NoSchedule, PreferNoSchedule, or NoExecute. In this example, set this parameter to NoSchedule.

      -
      • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
      • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
      • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
      -

      To reset a taint, run the kubectl taint node [node] key:[effect]- command to remove the taint.

      -
      -
    2. Safely evicts the workload on the node.

      kubectl drain [node]

      -

      In the preceding command, [node] indicates the IP address of the node where the workload to be migrated is located.

      -
    3. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
    -

    During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and click Add Simple Scheduling Policy to configure the affinity and anti-affinity policies of the new node. For details, see Simple Scheduling Policies.

    -
    -

    After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

    -

  1. Delete the original node.

    After the workload is successfully migrated and is running properly, choose Resource Management > Nodes to delete the original node.

    -

-
-

Scenario 2: The Original Node Is Not in DefaultPool

  1. Copy the node pool and add nodes to it.

    1. Log in to the CCE console. In the navigation pane, choose Resource Management > Node Pools.
    2. Select the cluster to which the original node belongs.

      In the node pool list, locate the node pool to which the original node belongs.

      -
    3. Click More > Copy next to the node pool name. On the Create Node Pool page, set the following parameters and modify other parameters as required. For details about the parameters, see Creating a Node Pool.
      • Name: Enter the name of the new node pool, for example, nodepool-demo.
      • Nodes: In this example, add one node.
      • Specifications: Select node specifications that best suit your needs.
      • OS: Select the operating system (OS) of the nodes to be created.
      • Login Mode:
        • If the login mode is Key pair, select a key pair for logging in to the node and select the check box to acknowledge that you have obtained the key file and that without this file you will not be able to log in to the node.

          A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

          -
        -
      -
    4. Click Next: Confirm. Confirm the node pool configuration and click Submit.

      Go back to the node pool list. In the node list, you can view that the new node pool has been created and is in the Normal state.

      -
    -

  2. Click the name of the node pool. The IP address of the new node is displayed in the node list.
  1. Migrate the workload.

    1. Click Edit on the right of nodepool-demo and set Taints.
    2. Click Add Taint, set Key and Value, and set Effect to NoExecute. The value options of Effect include NoSchedule, PreferNoSchedule, or NoExecute.
      • NoSchedule: Pods that do not tolerate this taint are not scheduled on the node; existing pods are not evicted from the node.
      • PreferNoSchedule: Kubernetes tries to avoid scheduling pods that do not tolerate this taint onto the node.
      • NoExecute: A pod is evicted from the node if it is already running on the node, and is not scheduled onto the node if it is not yet running on the node.
      -

      If you need to reset the taint, enter the new values or click Delete.

      -
      -
    3. Click Save.
    4. In the navigation pane of the CCE console, choose Workloads > Deployments. In the workload list, the status of the workload to be migrated changes from Running to Unready. If the workload status changes to Running again, the migration is successful.
    -

    During workload migration, if node affinity is configured for the workload, the workload keeps displaying a message indicating that the workload is not ready. In this case, click the workload name to go to the workload details page. On the Scheduling Policies tab page, delete the affinity configuration of the original node and click Add Simple Scheduling Policy to configure the affinity and anti-affinity policies of the new node. For details, see Simple Scheduling Policies.

    -
    -

    After the workload is successfully migrated, you can view that the workload is migrated to the node created in 1 on the Pods tab page of the workload details page.

    -

  1. Delete the original node.

    After the workload is successfully migrated and is running properly, choose Resource Management > Node Pools to delete the original node.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0277.html b/docs/cce/umn/cce_01_0277.html deleted file mode 100644 index 9ab00308..00000000 --- a/docs/cce/umn/cce_01_0277.html +++ /dev/null @@ -1,51 +0,0 @@ - - -

Overview

-

CCE provides multiple types of add-ons to extend cluster functions and meet feature requirements. You can install add-ons as required.

- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 1 Add-on list

Add-on Name

-

Introduction

-

coredns

-

The coredns add-on is a DNS server that provides domain name resolution services for Kubernetes clusters. coredns chains plug-ins to provide additional features.

-

storage-driver

-

storage-driver is a FlexVolume driver used to support IaaS storage services such as EVS, SFS, and OBS.

-

everest

-

Everest is a cloud native container storage system. Based on CSI, clusters of Kubernetes v1.15.6 and later can connect to storage services such as EVS, OBS, SFS, and SFS Turbo.

-

autoscaler

-

The autoscaler add-on resizes a cluster based on pod scheduling status and resource usage.

-

metrics-server

-

metrics-server is an aggregator for monitoring data of core cluster resources.

-

gpu-beta

-

gpu-beta is a device management add-on that supports GPUs in containers. It supports only NVIDIA drivers.

-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0278.html b/docs/cce/umn/cce_01_0278.html deleted file mode 100644 index 0001142d..00000000 --- a/docs/cce/umn/cce_01_0278.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

Creating a Namespace

-

When to Use Namespaces

A namespace is a collection of resources and objects. Multiple namespaces can be created inside a cluster and isolated from each other. This enables namespaces to share the same cluster Services without affecting each other.

-

For example, you can deploy workloads in a development environment into one namespace, and deploy workloads in a testing environment into another namespace.

-
-

Prerequisites

At least one cluster has been created. For details, see Creating a CCE Cluster.

-
-

Notes and Constraints

A maximum of 6,000 Services can be created in each namespace. The Services mentioned here indicate the Kubernetes Service resources added for workloads.

-
-

Namespace Types

Namespaces can be created in either of the following ways:

-
  • Created automatically: When a cluster is up, the default, kube-public, kube-system, and kube-node-lease namespaces are created by default.
    • default: All objects for which no namespace is specified are allocated to this namespace.
    • kube-public: Resources in this namespace can be accessed by all users (including unauthenticated users), such as public add-ons and container charts.
    • kube-system: All resources created by Kubernetes are in this namespace.
    • kube-node-lease: Each node has an associated Lease object in this namespace. The object is periodically updated by the node. Both NodeStatus and NodeLease are considered as heartbeats from a node. In versions earlier than v1.13, only NodeStatus is available. The NodeLease feature is introduced in v1.13. NodeLease is more lightweight than NodeStatus. This feature significantly improves the cluster scalability and performance.
    -
  • Created manually: You can create namespaces to serve separate purposes. For example, you can create three namespaces, one for a development environment, one for joint debugging environment, and one for test environment. You can also create one namespace for login services and one for game services.
-
-

Creating a Namespace

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces. Click Create Namespace.
  2. Set the parameters listed in Table 1. The parameters marked with an asterisk (*) are mandatory.

    -

    - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating a namespace

    Parameter

    -

    Description

    -

    * Namespace

    -

    Unique name of the created namespace.

    -

    * Cluster

    -

    Cluster to which the namespace belongs.

    -

    Node Affinity

    -

    If this parameter is set to on, workloads in this namespace will be scheduled only to nodes with specified labels. To add labels to a node, choose Resource Management > Nodes > Manage Labels.

    -

    This parameter is displayed only for clusters of v1.13.10-r0 and later.

    -

    Description

    -

    Description about the namespace.

    -

    Set Resource Quotas

    -

    Resource quotas can limit the amount of resources available in namespaces, achieving resource allocation by namespace.

    -
    NOTICE:

    You are advised to set resource quotas in the namespace as required to prevent cluster or node exceptions caused by resource overload.

    -

    For example, the default number of pods that can be created on each node in a cluster is 110. If you create a cluster with 50 nodes, you can create a maximum of 5,500 pods. Therefore, you can set a resource quota to ensure that the total number of pods in all namespaces does not exceed 5,500.

    -
    -

    Quotas can be configured for the following resources:

    -
    • CPU (cores)
    • Memory (MiB)
    • StatefulSet
    • Deployment
    • Job
    • Cron job
    • Pod
    • Service
    -

    Enter an integer. If the quota of a resource is set to 0, no limit is posed on the resource.

    -

    If you want to limit the CPU or memory quota, you must specify the CPU or memory request value when creating a workload.

    -
    -
    -

  3. When the configuration is complete, click OK.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0281.html b/docs/cce/umn/cce_01_0281.html deleted file mode 100644 index f606b2a9..00000000 --- a/docs/cce/umn/cce_01_0281.html +++ /dev/null @@ -1,105 +0,0 @@ - - -

Overview

-

The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:

-
  • Container tunnel network
  • VPC network
  • Cloud Native Network 2.0
-

Network Model Comparison

Table 1 describes the differences of network models supported by CCE.

-

After a cluster is created, the network model cannot be changed.

-
-
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Network model comparison

Dimension

-

Tunnel Network

-

VPC Network

-

Cloud Native Network 2.0

-

Core technology

-

OVS

-

IPvlan and VPC route

-

VPC ENI/sub-ENI

-

Applicable clusters

-

CCE cluster

-

CCE cluster

-

CCE Turbo cluster

-

Network isolation

-

Yes. For details, see Network Policies.

-

No

-

Yes. For details, see SecurityGroups.

-

Passthrough networking

-

No

-

No

-

Yes

-

IP address management

-
  • The container CIDR block is allocated separately.
  • CIDR blocks are divided by node and can be dynamically allocated (CIDR blocks can be dynamically added after being allocated.)
-
  • The container CIDR block is allocated separately.
  • CIDR blocks are divided by node and statically allocated (the CIDR block cannot be changed after a node is created).
-

The container CIDR block is divided from the VPC subnet and does not need to be allocated separately.

-

Network performance

-

Performance loss due to VXLAN encapsulation

-

No tunnel encapsulation. Cross-node packets are forwarded through VPC routers, delivering performance equivalent to that of the host network.

-

The container network is integrated with the VPC network, eliminating performance loss.

-

Networking scale

-

A maximum of 2,000 nodes are supported.

-

By default, 200 nodes are supported.

-

Each time a node is added to the cluster, a route is added to the VPC routing table. Therefore, the cluster scale is limited by the VPC route table.

-

A maximum of 2,000 nodes are supported.

-

Application scenarios

-
  • Common container service scenarios
  • Scenarios that do not have high requirements on network latency and bandwidth
-
  • Scenarios that have high requirements on network latency and bandwidth
  • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
-
  • Scenarios that have high requirements on network latency, bandwidth, and performance
  • Containers can communicate with VMs using a microservice registration framework, such as Dubbo and CSE.
-
-
-
  1. The scale of a cluster that uses the VPC network model is limited by the custom routes of the VPC. Therefore, you need to estimate the number of required nodes before creating a cluster.
  2. The scale of a cluster that uses the Cloud Native Network 2.0 model depends on the size of the VPC subnet CIDR block selected for the network attachment definition. Before creating a cluster, evaluate the scale of your cluster.
  3. By default, VPC routing network supports direct communication between containers and hosts in the same VPC. If a peering connection policy is configured between the VPC and another VPC, the containers can directly communicate with hosts on the peer VPC. In addition, in hybrid networking scenarios such as Direct Connect and VPN, communication between containers and hosts on the peer end can also be achieved with proper planning.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0284.html b/docs/cce/umn/cce_01_0284.html deleted file mode 100644 index 30b11480..00000000 --- a/docs/cce/umn/cce_01_0284.html +++ /dev/null @@ -1,79 +0,0 @@ - - -

Cloud Native Network 2.0

-

Model Definition

Developed by CCE, Cloud Native Network 2.0 deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and elastic IPs (EIPs) are bound to deliver high performance.

-
Figure 1 Cloud Native Network 2.0
-

Pod-to-pod communication

-
  • On the same node: Packets are forwarded through the VPC ENI or sub-ENI.
  • Across nodes: Packets are forwarded through the VPC ENI or sub-ENI.
-
-

Notes and Constraints

This network model is available only to CCE Turbo clusters.

-
-

Advantages and Disadvantages

Advantages

-
  • As the container network directly uses VPC, it is easy to locate network problems and provide the highest performance.
  • External networks in a VPC can be directly connected to container IP addresses.
  • The load balancing, security group, and EIP capabilities provided by VPC can be used directly.
-

Disadvantages

-

The container network directly uses VPC, which occupies the VPC address space. Therefore, you must properly plan the container CIDR block before creating a cluster.

-
-

Application Scenarios

  • High performance requirements and use of other VPC network capabilities: Cloud Native Network 2.0 directly uses VPC, which delivers almost the same performance as the VPC network. Therefore, it is applicable to scenarios that have high requirements on bandwidth and latency, such as online live broadcast and e-commerce seckill.
  • Large-scale networking: Cloud Native Network 2.0 supports a maximum of 2000 ECS nodes and 100,000 containers.
-
-

Container IP Address Management

In the Cloud Native Network 2.0 model, BMS nodes use ENIs and ECS nodes use sub-ENIs. The following figure shows how IP addresses are managed on these nodes.

-
Figure 2 IP address management in Cloud Native Network 2.0
-
  • Pod IP addresses are allocated from Pod Subnet you configure from the VPC.
  • ENIs and sub-ENIs bound to an ECS node = Number of ENIs used to bear sub-ENIs + Number of sub-ENIs currently used by pods + Number of sub-ENIs to be bound
  • ENIs bound to a BMS node = Number of ENIs currently used by pods + Number of pre-bound ENIs
  • Pre-binding policy: The system periodically (every 2 minutes by default) checks whether the total number of ENIs on the node. If the low threshold is not reached, the system pre-binds ENIs. If the high threshold is exceeded, the system releases ENIs.
  • On an ECS node, when the number of pre-bound sub-ENIs plus the number of sub-ENIs currently used by the pods is smaller than the number of sub-ENIs at the low threshold (sub-ENI quota on the node x low threshold), the system pre-binds sub-ENIs to make the numbers equal.
  • On an ECS node, when the number of pre-bound sub-ENIs plus the number of sub-ENIs currently used by the pods is larger than the number of sub-ENIs at the high threshold (sub-ENI quota on the node x high threshold), the system releases sub-ENIs to make the numbers equal.
  • On a BMS node, when the number of pre-bound ENIs plus the number of ENIs currently used by the pods is smaller than the number of ENIs at the low threshold (ENI quota on the node x low threshold), the system pre-binds ENIs to make the numbers equal.
  • On a BMS node, when the number of pre-bound ENIs plus the number of ENIs currently used by the pods is larger than the number of ENIs at the high threshold (ENI quota on the node x high threshold), the system releases ENIs to make the numbers equal.
-
-

Recommendation for CIDR Block Planning

As described in Cluster Network Structure, network addresses in a cluster can be divided into three parts: node network, container network, and service network. When planning network addresses, consider the following aspects:

-
  • The three CIDR blocks cannot overlap. Otherwise, a conflict occurs. All subnets (including those created from the secondary CIDR block) in the VPC where the cluster resides cannot conflict with the container and Service CIDR blocks.
  • Ensure that each CIDR block has sufficient IP addresses.
    • The IP addresses in the node CIDR block must match the cluster scale. Otherwise, nodes cannot be created due to insufficient IP addresses.
    • The IP addresses in the container CIDR block must match the service scale. Otherwise, pods cannot be created due to insufficient IP addresses.
    -
-

In the Cloud Native Network 2.0 model, the container CIDR block and node CIDR block share the network addresses in a VPC. It is recommended that the container subnet and node subnet not use the same subnet. Otherwise, containers or nodes may fail to be created due to insufficient IP resources.

-

In addition, a subnet can be added to the container CIDR block after a cluster is created to increase the number of available IP addresses. In this case, ensure that the added subnet does not conflict with other subnets in the container CIDR block.

-
Figure 3 Configuring CIDR blocks
-
-

Example of Cloud Native Network 2.0 Access

Create a CCE Turbo cluster, which contains three ECS nodes.

-
Figure 4 Cluster network
-

Access the details page of one node. You can see that the node has one primary NIC and one extended NIC, and both of them are ENIs. The extended NIC belongs to the container CIDR block and is used to mount a sub-ENI to the pod.

-

Create a Deployment on the cluster.

-
kind: Deployment
-apiVersion: apps/v1
-metadata:
-  name: example
-  namespace: default
-spec:
-  replicas: 6
-  selector:
-    matchLabels:
-      app: example
-  template:
-    metadata:
-      labels:
-        app: example
-    spec:
-      containers:
-        - name: container-0
-          image: 'nginx:perl'
-          resources:
-            limits:
-              cpu: 250m
-              memory: 512Mi
-            requests:
-              cpu: 250m
-              memory: 512Mi
-      imagePullSecrets:
-        - name: default-secret
-

View the created pod.

-
$ kubectl get pod -owide
-NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
-example-5bdc5699b7-54v7g   1/1     Running   0          7s    10.1.18.2     10.1.0.167   <none>           <none>
-example-5bdc5699b7-6dzx5   1/1     Running   0          7s    10.1.18.216   10.1.0.186   <none>           <none>
-example-5bdc5699b7-gq7xs   1/1     Running   0          7s    10.1.16.63    10.1.0.144   <none>           <none>
-example-5bdc5699b7-h9rvb   1/1     Running   0          7s    10.1.16.125   10.1.0.167   <none>           <none>
-example-5bdc5699b7-s9fts   1/1     Running   0          7s    10.1.16.89    10.1.0.144   <none>           <none>
-example-5bdc5699b7-swq6q   1/1     Running   0          7s    10.1.17.111   10.1.0.167   <none>           <none>
-

The IP addresses of all pods are sub-ENIs, which are mounted to the ENI (extended NIC) of the node.

-

For example, the extended NIC of node 10.1.0.167 is 10.1.17.172. On the Network Interfaces page of the Network Console, you can see that three sub-ENIs are mounted to the extended NIC 10.1.17.172, which is the IP address of the pod.

-

In the VPC, the IP address of the pod can be successfully accessed.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0285.html b/docs/cce/umn/cce_01_0285.html deleted file mode 100644 index 58f52f6f..00000000 --- a/docs/cce/umn/cce_01_0285.html +++ /dev/null @@ -1,26 +0,0 @@ - - -

Managing Namespaces

-

Selecting a Namespace

  • When creating a workload, you can select a namespace to isolate resources or users.
  • When querying workloads, you can select a namespace to view all workloads in the namespace.
-
-

Isolating Namespaces

  • Isolating namespaces by environment

    An application generally goes through the development, joint debugging, and testing stages before it is launched. In this process, the workloads deployed in each environment (stage) are the same, but are logically defined. There are two ways to define them:

    -
    • Group them in different clusters for different environments.

      Resources cannot be shared among different clusters. In addition, services in different environments can access each other only through load balancing.

      -
    • Group them in different namespaces for different environments.

      Workloads in the same namespace can be mutually accessed by using the Service name. Cross-namespace access can be implemented by using the Service name or namespace name.

      -

      The following figure shows namespaces created for the development, joint debugging, and testing environments, respectively.

      -
      Figure 1 One namespace for one environment
      -
    -
  • Isolating namespaces by application

    You are advised to use this method if a large number of workloads are deployed in the same environment. For example, in the following figure, different namespaces (APP1 and APP2) are created to logically manage workloads as different groups. Workloads in the same namespace access each other using the Service name, and workloads in different namespaces access each other using the Service name or namespace name.

    -
    Figure 2 Grouping workloads into different namespaces
    -
-
-

Deleting a Namespace

If a namespace is deleted, all resources (such as workloads, jobs, and ConfigMaps) in this namespace will also be deleted. Exercise caution when deleting a namespace.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. Select the namespace to be deleted and click Delete.

    Follow the prompts to delete the namespace. The default namespaces cannot be deleted.

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0286.html b/docs/cce/umn/cce_01_0286.html deleted file mode 100644 index 1a1f9de3..00000000 --- a/docs/cce/umn/cce_01_0286.html +++ /dev/null @@ -1,34 +0,0 @@ - - -

Configuring a Namespace-level Network Policy

-

You can configure a namespace-level network policy after enabling network isolation.

-

By default, Network Isolation is disabled for namespaces. For example, if network isolation is off for namespace default, all workloads in the current cluster can access the workloads in namespace default.

-

To prevent other workloads from accessing the workloads in namespace default, perform the following steps:

-

Only clusters that use the tunnel network model support network isolation.

-
-

Prerequisites

-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. At the row of a namespace (for example, default), switch on Network Isolation.

    After network isolation is enabled, workloads in namespace default can access each other but they cannot be accessed by workloads in other namespaces.

    -
    Figure 1 Namespace-level network policy
    -

-
-

Network Isolation Description

Enabling network isolation is to create a network policy in a namespace. The network policy selects all pods in the namespace and prevents pods in other namespaces from accessing.

-
kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
-    name: deny-default
-    namespace: default
-spec:
-    ingress:
-        - from:
-          - podSelector: {}
-    podSelector: {}                     # {} indicates that all pods are selected.
-

You can also customize a network policy. For details, see Network Policies.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0287.html b/docs/cce/umn/cce_01_0287.html deleted file mode 100644 index 92504987..00000000 --- a/docs/cce/umn/cce_01_0287.html +++ /dev/null @@ -1,127 +0,0 @@ - - -

Setting a Resource Quota

-

Namespace-level resource quotas limit the amount of resources available to teams or users when these teams or users use the same cluster. The quotas include the total number of a type of objects and the total amount of compute resources (CPU and memory) consumed by the objects.

-

Quotas can be set only in clusters of v1.9 or later.

-
-

Prerequisites

-
-

Usage

By default, running pods can use the CPUs and memory of a node without restrictions. This means the pods in a namespace may exhaust all resources of the cluster.

-

Kubernetes provides namespaces for you to group workloads in a cluster. By setting resource quotas for each namespace, you can prevent resource exhaustion and ensure cluster reliability.

-

You can configure quotas for resources such as CPU, memory, and the number of pods in a namespace. For more information, see Resource Quotas.

-

The following table recommends how many pods you can configure for your clusters of different sizes.

- -
- - - - - - - - - - - - - - - - -

Cluster Scale

-

Recommended Number of Pods

-

50 nodes

-

2,500 pods

-

200 nodes

-

10,000 pods

-

1,000 nodes

-

30,000 pods

-

2,000 nodes

-

50,000 pods

-
-
-

Starting from clusters of v1.21 and later, the default Resource Quotas are created when a namespace is created. Table 1 lists the resource quotas based on cluster specifications. You can modify them according to your service requirements.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Default resource quotas

Cluster Scale

-

Pod

-

Deployment

-

Secret

-

ConfigMap

-

Service

-

50 nodes

-

2000

-

1000

-

1000

-

1000

-

1000

-

200 nodes

-

2000

-

1000

-

1000

-

1000

-

1000

-

1,000 nodes

-

5000

-

2000

-

2000

-

2000

-

2000

-

2,000 nodes

-

5000

-

2000

-

2000

-

2000

-

2000

-
-
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Namespaces.
  2. Select the cluster to which the namespace belongs from the Clusters drop-down list.
  3. In the Operation column of a namespace, click Manage Quota.

    This operation cannot be performed on system namespaces kube-system and kube-public.

    -

  4. Set the resource quotas and click OK.

    • CPU (cores): maximum number of CPU cores that can be allocated to workload pods in the namespace.
    • Memory (MiB): maximum amount of memory that can be allocated to workload pods in the namespace.
    • StatefulSet: maximum number of StatefulSets that can be created in the namespace.
    • Deployment: maximum number of Deployments that can be created in the namespace.
    • Job: maximum number of one-off jobs that can be created in the namespace.
    • Cron Job: maximum number of cron jobs that can be created in the namespace.
    • Pod: maximum number of pods that can be created in the namespace.
    • Service: maximum number of Services that can be created in the namespace.
    -
    • After setting CPU and memory quotas for a namespace, you must specify the request and limit values of CPU and memory resources when creating a workload. Otherwise, the workload cannot be created. If the quota of a resource is set to 0, the resource usage is not limited.
    • Accumulated quota usage includes the resources used by CCE to create default components, such as the Kubernetes Services (which can be viewed using kubectl) created under the default namespace. Therefore, you are advised to set a resource quota greater than expected to reserve resource for creating default components.
    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0288.html b/docs/cce/umn/cce_01_0288.html deleted file mode 100644 index 047271c2..00000000 --- a/docs/cce/umn/cce_01_0288.html +++ /dev/null @@ -1,196 +0,0 @@ - - -

SecurityGroups

-

When the Cloud Native Network 2.0 model is used, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. CCE provides a custom resource object named SecurityGroup for you to associate security groups with pods in CCE. You can customize workloads with specific security isolation requirements using SecurityGroups.

-

Notes and Constraints

  • This function is supported for CCE Turbo clusters of v1.19 and later. Upgrade your CCE Turbo clusters if their versions are earlier than v1.19.
  • A workload can be bound to a maximum of five security groups.
-
-

Using the Console

  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, select the target cluster in the upper right corner and click Create.
  3. Set the parameters as described in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuration parameters

    Parameter

    -

    Description

    -

    Example Value

    -

    SecurityGroup Name

    -

    Enter a SecurityGroup name.

    -

    Enter 4 to 63 characters. The value must start with a lowercase letter and cannot end with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    security-group

    -

    Cluster Name

    -

    Select a cluster.

    -

    cce-turbo

    -

    Namespace

    -

    Select a namespace. If the namespace is not created, click Create Namespace.

    -

    default

    -

    Workload

    -

    Select a workload.

    -

    nginx

    -

    Security Group

    -

    The selected security group will be bound to the ENI or supplementary ENI of the selected workload. A maximum of five security groups can be selected from the drop-down list. You must select one or multiple security groups to create a SecurityGroup.

    -

    If no security group has not been created, click Create Security Group. After the security group is created, click the refresh button.

    -
    NOTICE:
    • A maximum of 5 security groups can be selected.
    • Hover the cursor on the security group name, and you can view details about the security group.
    -
    -

    64566556-bd6f-48fb-b2c6-df8f44617953

    -

    5451f1b0-bd6f-48fb-b2c6-df8f44617953

    -
    -
    -

  4. After setting the parameters, click Create.

    After the SecurityGroup is created, the system automatically returns to the SecurityGroup list page. You can see that the newly added SecurityGroup is in the list.

    -

-
-

Using kubectl

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Create a description file named securitygroup-demo.yaml.

    vi securitygroup-demo.yaml

    -

    For example, create the following SecurityGroup to bind all nginx workloads with two security groups 64566556-bd6f-48fb-b2c6-df8f44617953 and 5451f1b0-bd6f-48fb-b2c6-df8f44617953 that have been created in advance. An example is as follows:

    -
    apiVersion: crd.yangtse.cni/v1
    -kind: SecurityGroup
    -metadata:
    -  name: demo
    -  namespace: default
    -spec:
    -  podSelector:
    -    matchLabels:
    -      app: nginx    
    -  securityGroups:
    -  - id: 64566556-bd6f-48fb-b2c6-df8f44617953
    -  - id: 5451f1b0-bd6f-48fb-b2c6-df8f44617953
    -
    Table 2 describes the parameters in the YAML file. -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Description

    Field

    -

    Description

    -

    Mandatory

    -

    apiVersion

    -

    API version. The value is crd.yangtse.cni/v1.

    -

    Yes

    -

    kind

    -

    Type of the object to be created.

    -

    Yes

    -

    metadata

    -

    Metadata definition of the resource object.

    -

    Yes

    -

    name

    -

    Name of the SecurityGroup.

    -

    Yes

    -

    namespace

    -

    Name of the namespace.

    -

    Yes

    -

    Spec

    -

    Detailed description of the SecurityGroup.

    -

    Yes

    -

    podselector

    -

    Used to define the workload to be associated with security groups in the SecurityGroup.

    -

    Yes

    -

    SecurityGroups

    -

    Security group ID.

    -

    Yes

    -
    -
    -
    -

  3. Run the following command to create the SecurityGroup:

    kubectl create -f securitygroup-demo.yaml

    -

    If the following information is displayed, the SecurityGroup is being created.

    -
    securitygroup.crd.yangtse.cni/demo created
    -

  4. Run the following command to view the SecurityGroup:

    kubectl get sg

    -

    If the name of the created SecurityGroup is demo in the command output, the SecurityGroup is created successfully.

    -
    NAME                       POD-SELECTOR                      AGE
    -all-no                     map[matchLabels:map[app:nginx]]   4h1m
    -s001test                   map[matchLabels:map[app:nginx]]   19m
    -demo                       map[matchLabels:map[app:nginx]]   2m9s
    -

-
-

Other Operations

-
- - - - - - - - - - - - - - - - -
Table 3 Other operations

Operation

-

Procedure

-

Deletion

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, select the target SecurityGroup.
  3. Click SecurityGroup to delete the SecurityGroup.
-

Update

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click Update at the same row as the SecurityGroup.

    You can update the SecurityGroup ID and associated workload.

    -
-

Viewing the YAML file

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click View YAML at the same row as the SecurityGroup.

    You can view, copy, and download the YAML file.

    -
-

Viewing events

-
  1. In the navigation pane of the CCE console, choose Resource Management > Network.
  2. On the SecurityGroup tab page, click View Event.

    You can query the event information.

    -
-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0291.html b/docs/cce/umn/cce_01_0291.html deleted file mode 100644 index e26e81aa..00000000 --- a/docs/cce/umn/cce_01_0291.html +++ /dev/null @@ -1,20 +0,0 @@ - - -

Scaling a Cluster/Node

-

-
- - diff --git a/docs/cce/umn/cce_01_0293.html b/docs/cce/umn/cce_01_0293.html deleted file mode 100644 index 08e78262..00000000 --- a/docs/cce/umn/cce_01_0293.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Scaling a Workload

-

-
- - diff --git a/docs/cce/umn/cce_01_0296.html b/docs/cce/umn/cce_01_0296.html deleted file mode 100644 index a148d81d..00000000 --- a/docs/cce/umn/cce_01_0296.html +++ /dev/null @@ -1,27 +0,0 @@ - - -

Node Scaling Mechanisms

-

Kubernetes HPA is designed for pods. However, if the cluster resources are insufficient, you can only add nodes. Scaling of cluster nodes could be laborious. Now with clouds, you can add or delete nodes by simply calling APIs.

-

autoscaler is a component provided by Kubernetes for auto scaling of cluster nodes based on the pod scheduling status and resource usage.

-

Prerequisites

Before using the node scaling function, you must install the autoscaler add-on of v1.13.8 or later.

-
-

How autoscaler Works

The cluster autoscaler (CA) goes through two processes.

-
  • Scale-out: The CA checks all unschedulable pods every 10 seconds and selects a node group that meets the requirements for scale-out based on the policy you set.
  • Scale-in: The CA scans all nodes every 10 seconds. If the number of pod requests on a node is less than the user-defined percentage for scale-in, the CA simulates whether the pods on the node can be migrated to other nodes. If yes, the node will be removed after an idle time window.
-

As described above, if a cluster node is idle for a period of time (10 minutes by default), scale-in is triggered, and the idle node is removed.

-

However, a node cannot be removed from a cluster if the following pods exist:

-
  1. Pods that do not meet specific requirements set in PodDisruptionBudget
  2. Pods that cannot be scheduled to other nodes due to constraints such as affinity and anti-affinity policies
  3. Pods that have the "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" annotation
  4. Pods (except those created by kube-system DaemonSet) that exist in the kube-system namespace on the node
  5. Pods that are not created by the controller (Deployment/ReplicaSet/job/StatefulSet)
-
-

autoscaler Architecture

Figure 1 shows the autoscaler architecture and its core modules:

-
Figure 1 autoscaler architecture
-

Description

-
  • Estimator: Evaluates the number of nodes to be added to each node pool to host unschedulable pods.
  • Simulator: Finds the nodes that meet the scale-in conditions in the scale-in scenario.
  • Expander: Selects an optimal node from the node pool picked out by the Estimator based on the user-defined policy in the scale-out scenario. Currently, the Expander has the following policies:
    • Random: Selects a node pool randomly. If you have not specified a policy, Random is set by default.
    • most-Pods: Selects the node pool that can host the largest number of unschedulable pods after the scale-out. If multiple node pools meet the requirement, a random node pool will be selected.
    • least-waste: Selects the node pool that has the least CPU or memory resource waste after scale-out.
    • price: Selects the node pool in which the to-be-added nodes cost least for scale-out.
    • priority: Selects the node pool with the highest weight. The weights are user-defined.
    -
-

Currently, CCE supports all policies except price. By default, CCE add-ons use the least-waste policy.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0298.html b/docs/cce/umn/cce_01_0298.html deleted file mode 100644 index b9862b21..00000000 --- a/docs/cce/umn/cce_01_0298.html +++ /dev/null @@ -1,170 +0,0 @@ - - -

Creating a CCE Turbo Cluster

-

CCE Turbo clusters run on a cloud native infrastructure that features software-hardware synergy to support passthrough networking, high security and reliability, and intelligent scheduling.

-

CCE Turbo clusters are paired with the Cloud Native Network 2.0 model for large-scale, high-performance container deployment. Containers are assigned IP addresses from the VPC CIDR block. Containers and nodes can belong to different subnets. Access requests from external networks in a VPC can be directly routed to container IP addresses, which greatly improves networking performance. It is recommended that you go through Cloud Native Network 2.0 to understand the features and network planning of each CIDR block of Cloud Native Network 2.0.

-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • You can create a maximum of 50 clusters in a single region.
  • CCE Turbo clusters support only Cloud Native Network 2.0. For details about this network model, see Cloud Native Network 2.0.
  • Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.
  • CCE Turbo clusters are available only in certain regions.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. Click Create next to CCE Turbo Cluster.

    Figure 1 Creating a CCE Turbo cluster
    -

  2. On the page displayed, set the following parameters:

    Basic configuration

    -
    Specify the basic cluster configuration. -
    - - - - - - - - - - - - - -
    Table 1 Basic parameters for creating a cluster

    Parameter

    -

    Description

    -

    Cluster Name

    -

    Name of the cluster to be created. The cluster name must be unique under the same account and cannot be changed after the cluster is created.

    -

    A cluster name contains 4 to 128 characters, starting with a letter and not ending with a hyphen (-). Only lowercase letters, digits, and hyphens (-) are allowed.

    -

    Version

    -

    Version of Kubernetes to use for the cluster.

    -

    Management Scale

    -

    Maximum number of worker nodes that can be managed by the master nodes of the cluster. You can select 200 nodes, 1,000 nodes, or 2,000 nodes for your cluster.

    -

    Master node specifications change with the cluster management scale you choose, and you will be charged accordingly.

    -
    -
    -
    -

    Networking configuration

    -
    Select the CIDR blocks used by nodes and containers in the cluster. If IP resources in the CIDR blocks are insufficient, nodes and containers cannot be created. -
    - - - - - - - - - - - - - - - - -
    Table 2 Networking parameters

    Parameter

    -

    Description

    -

    Network Model

    -

    Cloud Native Network 2.0: This network model deeply integrates the native elastic network interfaces (ENIs) of VPC, uses the VPC CIDR block to allocate container addresses, and supports direct traffic distribution to containers through a load balancer to deliver high performance.

    -

    VPC

    -

    Select the VPC used by nodes and containers in the cluster. The VPC cannot be changed after the cluster is created.

    -

    A VPC provides a secure and logically isolated network environment.

    -

    If no VPC is available, create one on the VPC console. After the VPC is created, click the refresh icon.

    -

    Node Subnet

    -

    This parameter is available after you select a VPC.

    -

    The subnet you select is used by nodes in the cluster and determines the maximum number of nodes in the cluster. This subnet will be the default subnet where your nodes are created. When creating a node, you can select other subnets in the same VPC.

    -

    A node subnet provides dedicated network resources that are logically isolated from other networks for higher security.

    -

    If no node subnet is available, click Create Subnet to create a subnet. After the subnet is created, click the refresh icon. For details about the relationship between VPCs, subnets, and clusters, see Cluster Overview.

    -

    During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.

    -

    The selected subnet cannot be changed after the cluster is created.

    -

    Pod Subnet

    -

    This parameter is available after you select a VPC.

    -

    The subnet you select is used by pods in the cluster and determines the maximum number of pods in the cluster. The subnet cannot be changed after the cluster is created.

    -

    IP addresses used by pods will be allocated from this subnet.

    -
    NOTE:

    If the pod subnet is the same as the node subnet, pods and nodes share the remaining IP addresses in the subnet. As a result, pods or nodes may fail to be created due to insufficient IP addresses.

    -
    -
    -
    -
    -

    Advanced Settings

    -
    Configure enhanced capabilities for your CCE Turbo cluster. -
    - - - - - - - - - - - - - -
    Table 3 Networking parameters

    Parameter

    -

    Description

    -

    Service Network Segment

    -

    An IP range from which IP addresses are allocated to Kubernetes Services. After the cluster is created, the CIDR block cannot be changed. The Service CIDR block cannot conflict with the created routes. If they conflict, select another CIDR block.

    -

    The default value is 10.247.0.0/16. You can change the CIDR block and mask according to your service requirements. The mask determines the maximum number of Service IP addresses available in the cluster.

    -

    After you set the mask, the console will provide an estimated maximum number of Services you can create in this CIDR block.

    -

    kube-proxy Mode

    -

    Load balancing between Services and their backend pods. The value cannot be changed after the cluster is created.

    -
    • IPVS: optimized kube-proxy mode to achieve higher throughput and faster speed, ideal for large-sized clusters. This mode supports incremental updates and can keep connections uninterrupted during Service updates.

      In this mode, when the ingress and Service use the same ELB instance, the ingress cannot be accessed from the nodes and containers in the cluster.

      -
    • iptables: Use iptables rules to implement Service load balancing. In this mode, too many iptables rules will be generated when many Services are deployed. In addition, non-incremental updates will cause a latency and even tangible performance issues in the case of service traffic spikes.
    -
    NOTE:
    • IPVS provides better scalability and performance for large clusters.
    • Compared with iptables, IPVS supports more complex load balancing algorithms such as least load first (LLF) and weighted least connections (WLC).
    • IPVS supports server health check and connection retries.
    -
    -

    CPU Policy

    -
    • On: Exclusive CPU cores can be allocated to workload pods. Select On if your workload is sensitive to latency in CPU cache and scheduling.
    • Off: Exclusive CPU cores will not be allocated to workload pods. Select Off if you want a large pool of shareable CPU cores.
    -
    -
    -
    -

  3. Click Next: Confirm to review the configurations and change them if required.
  4. Click Submit.

    It takes about 6 to 10 minutes to create a cluster. You can click Back to Cluster List to perform other operations on the cluster or click Go to Cluster Events to view the cluster details.

    -

  5. If the cluster status is Available, the CCE Turbo cluster is successfully created, and Turbo is displayed next to the cluster name.

    -

-
-

Related Operations

-
  • Creating a namespace: You can create multiple namespaces in a cluster and organize resources in the cluster into different namespaces. These namespaces serve as logical groups and can be managed separately. For details about how to create a namespace for a cluster, see Namespaces.
  • Creating a workload: Once the cluster is created, you can use an image to create an application that can be accessed from public networks. For details, see Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
  • Viewing cluster details: Click the cluster name to view cluster details. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Details about the created cluster

    Tab

    -

    Description

    -

    Basic Information

    -

    You can view the details and running status of the cluster.

    -

    Monitoring

    -

    You can view the CPU and memory allocation rates of all nodes in the cluster (that is, the maximum allocated amount), as well as the CPU usage, memory usage, and specifications of the master node(s).

    -

    Events

    -
    • View cluster events.
    • Set search criteria, such as the event name or the time segment during which an event is generated, to filter events.
    -

    Auto Scaling

    -

    You can configure auto scaling to add or reduce worker nodes in a cluster to meet service requirements. For details, see Setting Cluster Auto Scaling.

    -

    Clusters of v1.17 do not support auto scaling using AOM. You can use node pools for auto scaling. For details, see Node Pool Overview.

    -

    kubectl

    -

    To access a Kubernetes cluster from a PC, you need to use the Kubernetes command line tool kubectl. For details, see Connecting to a Cluster Using kubectl.

    -

    Resource Tags

    -

    Resource tags can be added to classify resources.

    -

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use predefined tags to improve tag creation and resource migration efficiency.

    -

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag. A maximum of 5 tags can be added.

    -
    -
    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0300.html b/docs/cce/umn/cce_01_0300.html index c81c8574..a1798429 100644 --- a/docs/cce/umn/cce_01_0300.html +++ b/docs/cce/umn/cce_01_0300.html @@ -8,14 +8,29 @@ -

2022-08-27

+

2023-02-10

+ + + + +

2022-12-20

+ + + + +

2022-11-21

+ +

Added Best Practice.

+ + +

2022-08-27

EulerOS 2.9 is supported. For details, see OS Patch Notes for Cluster Nodes.

2022-07-13

-

Supported egress rules. For details, see Network Policies.

+

Supported egress rules. For details, see Network Policies.

2022-05-24

@@ -30,33 +45,33 @@

2022-04-14

-

Allowed cluster upgrade from v1.19 to v1.21. For details, see Performing In-place Upgrade.

+

Allowed cluster upgrade from v1.19 to v1.21. For details, see Performing In-place Upgrade.

2022-03-24

-
  • Supported the creation of clusters of v1.21.
  • Two-way authentication is supported for domain name access. For details, see Two-Way Authentication for Domain Names.
  • The Docker storage mode of nodes running CentOS 7 in CCE clusters is changed from Device Mapper to OverlayFS. For details, see Node Overview.
+
  • Supported the creation of clusters of v1.21.
  • Two-way authentication is supported for domain name access. For details, see Two-Way Authentication for Domain Names.
  • The Docker storage mode of nodes running CentOS 7 in CCE clusters is changed from Device Mapper to OverlayFS. For details, see Node Overview.

2022-02-17

Supported the creation of CCE Turbo Cluster.

- +

2021-12-14

-

The validity period of the certificate of cluster can be configured. For details, see Obtaining a Cluster Certificate.

+

The validity period of the certificate of cluster can be configured. For details, see Obtaining a Cluster Certificate.

2021-11-30

- +

2021-11-15

- +

2021-06-23

@@ -72,7 +87,7 @@

2021-01-30

- +

2020-11-02

@@ -97,7 +112,7 @@

2020-02-21

-

Updated Namespaces.

+

Updated Namespaces.

2019-10-30

@@ -122,7 +137,7 @@

2019-07-30

-
  • Allows users to modify Maximum Number of Unavailable Pods after creating an application.
  • Allows users to add pod scheduling policies after creating an application. For details, see Affinity and Anti-Affinity Scheduling.
+
  • Allows users to modify Maximum Number of Unavailable Pods after creating an application.
  • Allows users to add pod scheduling policies after creating an application. For details, see Affinity and Anti-Affinity Scheduling.

2019-07-29

@@ -237,7 +252,7 @@

2018-09-15

-
  • Added a step to the procedure of adding a node to a BMS cluster. For details, see 3.4-Adding Existing Nodes to a BMS Cluster.
  • Deleted the EVS and ELB related constraints. For details, see 3.4-Constraints.
  • Added the description of DeH in 3.7-Table Parameters for creating a node.
+
  • Added a step to the procedure of adding a node to a BMS cluster. For details, see 3.4-Adding Existing Nodes to a BMS Cluster.
  • Deleted the EVS and ELB related constraints. For details, see 3.4-Constraints.

2018-09-05

diff --git a/docs/cce/umn/cce_01_0301.html b/docs/cce/umn/cce_01_0301.html deleted file mode 100644 index 11d41108..00000000 --- a/docs/cce/umn/cce_01_0301.html +++ /dev/null @@ -1,39 +0,0 @@ - - -

Performing In-place Upgrade (v1.15 and Later)

-

Scenario

On the CCE console, You can perform an in-place cluster upgrade to use new cluster features.

-

Before the upgrade, learn about the target version to which each CCE cluster can be upgraded in what ways, and the upgrade impacts. For details, see Overview and Before You Start.

-
-

Description

  • An in-place upgrade updates the Kubernetes components on cluster nodes, without changing their OS version.
  • Data plane nodes are upgraded in batches. By default, they are prioritized based on their CPU, memory, and PodDisruptionBudgets (PDBs). You can also set the priorities according to your service requirements.
-
-

Precautions

  • During the cluster upgrade, the system will automatically upgrade add-ons to a version compatible with the target cluster version. Do not uninstall or reinstall add-ons during the cluster upgrade.
  • Before the upgrade, ensure that all add-ons are running. If an add-on fails to be upgraded, rectify the fault and try again.
  • During the upgrade, CCE checks the add-on running status. Some add-ons (such as coredns) require at least two nodes to run normally. In this case, at least two nodes must be available for the upgrade.
-

For more information, see Before You Start.

-
-

Procedure

This section describes how to upgrade a CCE cluster of v1.15 or later. For other versions, see Performing Replace/Rolling Upgrade (v1.13 and Earlier).

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Clusters. In the cluster list, view the cluster version.
  2. Click More for the cluster you want to upgrade, and select Upgrade from the drop-down menu.

    Figure 1 Upgrading a cluster
    -
    • If your cluster version is up-to-date, the Upgrade button is grayed out.
    • If the cluster status is Unavailable, the upgrade flag in the upper right corner of the cluster card view will be grayed out. Check the cluster status by referring to Before You Start.
    -
    -

  3. (Optional) On the cluster upgrade confirmation page, click Backup to back up the entire master node. This backup mode is recommended.

    A manual confirmation is required for backing up the entire master node. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged. You are advised to back up the master node.

    -
    Figure 2 Determining whether to back up the entire master node
    -

  4. Check the version information, last update/upgrade time, available upgrade version, and upgrade history of the current cluster.

    The cluster upgrade goes through pre-upgrade check, add-on upgrade/uninstallation, master node upgrade, worker node upgrade, and post-upgrade processing.

    -
    Figure 3 Cluster upgrade page
    -

  5. Click Upgrade on the right. Set the upgrade parameters.

    • Available Versions: Select v1.19 in this example.
    • Cluster Backup: A manual confirmation is required for backing up the entire master node. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged.
    • Add-on Upgrade Configuration: Add-ons that have been installed in your cluster are listed. During the cluster upgrade, the system automatically upgrades the add-ons to be compatible with the target cluster version. You can click Set to re-define the add-on parameters.

      If a red dot is displayed on the right of an add-on, the add-on is incompatible with the target cluster version. During the upgrade, the add-on will be uninstalled and then re-installed. Ensure that the add-on parameters are correctly configured.

      -
      -
    • Node Upgrade Configuration: Before setting the node upgrade priority, you need to select a node pool. Nodes and node pools will be upgraded according to the priorities you specify. You can set the maximum number of nodes to be upgraded in batch, or set priorities for nodes to be upgraded. If you do not set this parameter, the system will determine the nodes to upgrade in batches based on specific conditions.
      • Add Upgrade Priority: Add upgrade priorities for node pools.
      • Add Node Priority: After adding a node pool priority, you can set the upgrade sequence of nodes in the node pool. The system upgrades nodes in the sequence you specify. If you skip this setting, the system upgrades nodes based on the default policy.
      -
    -
    Figure 4 Configuring upgrade parameters
    -

  6. Read the upgrade instructions carefully, and select I have read the upgrade instructions. Click Upgrade.

    Figure 5 Final step before upgrade
    -

  7. After you click Upgrade, the cluster upgrade starts. You can view the upgrade process in the lower part of the page.

    During the upgrade, you can click Suspend on the right to suspend the cluster upgrade. To continue the upgrade, click Continue.

    -

    -
    Figure 6 Cluster upgrade in process
    -

  8. When the upgrade progress reaches 100%, the cluster is upgraded. The version information will be properly displayed, and no upgrade is required.

    Figure 7 Upgrade completed
    -

  9. After the upgrade is complete, verify the cluster Kubernetes version on the Clusters page.

    Figure 8 Verifying the upgrade success
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0302.html b/docs/cce/umn/cce_01_0302.html deleted file mode 100644 index cbbc3e0c..00000000 --- a/docs/cce/umn/cce_01_0302.html +++ /dev/null @@ -1,191 +0,0 @@ - - -

Before You Start

-

Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Overview.

-

Precautions

  • Upgraded clusters cannot be rolled back. Therefore, perform the upgrade during off-peak hours to minimize the impact on your services.
  • Do not shut down or restart nodes during cluster upgrade. Otherwise, the upgrade fails.
  • Before upgrading a cluster, disable auto scaling policies to prevent node scaling during the upgrade. Otherwise, the upgrade fails.
  • If you locally modify the configuration of a cluster node, the cluster upgrade may fail or the configuration may be lost after the upgrade. Therefore, modify the configurations on the CCE console (cluster or node pool list page) so that they will be automatically inherited during the upgrade.
  • During the cluster upgrade, the running workload services will not be interrupted, but access to the API server will be temporarily interrupted.
  • Before upgrading the cluster, check whether the cluster is healthy.
  • To ensure data security, you are advised to back up data before upgrading the cluster. During the upgrade, you are not advised to perform any operations on the cluster.
  • CCE 1.17 and later versions do not support workload scaling using the AOM service. Before and after the upgrade, switch scaling policies by referring to Switching from AOM to HPA for Auto Scaling.
-
-

Notes and Constraints

  • Currently, only CCE clusters consisting of VM nodes can be upgraded.
  • If initContainer or Istio is used in the in-place upgrade of a cluster of v1.15, pay attention to the following restrictions:

    In kubelet 1.16 and later versions, QoS classes are different from those in earlier versions. In kubelet 1.15 and earlier versions, only containers in spec.containers are counted. In kubelet 1.16 and later versions, containers in both spec.containers and spec.initContainers are counted. The QoS class of a pod will change after the upgrade. As a result, the container in the pod restarts. You are advised to modify the QoS class of the service container before the upgrade to avoid this problem. For details, see Table 1.

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 QoS class changes before and after the upgrade

    Init Container (Calculated Based on spec.initContainers)

    -

    Service Container (Calculated Based on spec.containers)

    -

    Pod (Calculated Based on spec.containers and spec.initContainers)

    -

    Impacted or Not

    -

    Guaranteed

    -

    Besteffort

    -

    Burstable

    -

    Yes

    -

    Guaranteed

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Guaranteed

    -

    Guaranteed

    -

    Guaranteed

    -

    No

    -

    Besteffort

    -

    Besteffort

    -

    Besteffort

    -

    No

    -

    Besteffort

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Besteffort

    -

    Guaranteed

    -

    Burstable

    -

    Yes

    -

    Burstable

    -

    Besteffort

    -

    Burstable

    -

    Yes

    -

    Burstable

    -

    Burstable

    -

    Burstable

    -

    No

    -

    Burstable

    -

    Guaranteed

    -

    Burstable

    -

    Yes

    -
    -
    -
-
-

Performing Pre-upgrade Check

Before upgrading a cluster, check the health status of the cluster and nodes and ensure that they are available.

-

Method 1: Use the console.

-

On the CCE console, click Resource Management in the navigation pane, and click Clusters and Nodes separately to check whether the cluster and nodes are normal.

-

Method 2: Run kubectl commands.

-
  1. Run the following command to verify that all cluster modules are in the Healthy state:

    kubectl get cs

    -
    Information similar to the following is displayed:
     NAME                 STATUS    MESSAGE              ERROR
    - scheduler            Healthy   ok
    - controller-manager   Healthy   ok
    - etcd-0               Healthy   {"health": "true"}
    - etcd-1               Healthy   {"health": "true"}
    - etcd-2               Healthy   {"health": "true"}
    -
    -

    In the command output, the value of STATUS must be Healthy for all items.

    -
    -

  2. Run the following command to verify that all nodes are in the Ready state:

    kubectl get nodes

    All nodes must be in the Ready state.

    -
    -
     NAME                   STATUS    ROLES     AGE       VERSION
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    - xxx.xxx.xx.xx   Ready     <none>    38d       v1.9.7-r1
    -
    -

-
-

Pre-upgrade Checklist

Before upgrading a cluster, follow the pre-upgrade checklist to identify risks and problems in advance.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Cluster upgrade check items

Module

-

Item

-

Cluster

-

Check whether the node IP addresses (including EIPs) of the current cluster are used in other configurations or whitelists.

-

Perform the pre-upgrade check.

-

Workload

-

Record the number and status of workloads for comparison after the upgrade.

-

For the databases you use (such as Direct Connect, Redis, and MongoDB), you need to consider the changes in their whitelists, routes, or security group policies in advance.

-

Storage

-

Record the storage status to check whether storage resources are lost after the upgrade.

-

Networking

-

Check and back up the load balancing services and ingresses.

-

If Direct Connect is used, check whether the upgrade causes changes in the IP addresses of nodes or pods where services are deployed. To handle changes, you need to enable routes on Direct Connect in advance.

-

Add-on

-

When Kubernetes 1.9 is upgraded to 1.11, the kube-dns of the cluster is uninstalled and replaced with CoreDNS. Back up the DNS address configured in kube-dns so that you can use it in CoreDNS when the domain name resolution is abnormal.

-

O&M

-

Private configurations: Check whether data plane passwords, certificates, and environment variables are configured for nodes or containers in the cluster before the upgrade. If a container is restarted (for example, the node is abnormal and the pod is re-scheduled), the configurations will be lost and your service will be abnormal.

-

Check and back up kernel parameters or system configurations.

-
-
-
-

Upgrade Backup

Currently, there are two backup modes for cluster upgrade:

-
  • etcd database backup: CCE automatically backs up the etcd database during the cluster upgrade.
  • Master node backup (recommended, manual confirmation required): On the upgrade confirmation page, click Backup to back up the entire master node of the cluster. The backup process uses the Cloud Backup and Recovery (CBR) service and takes about 20 minutes. If there are many cloud backup tasks at the current site, the backup time may be prolonged.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0305.html b/docs/cce/umn/cce_01_0305.html deleted file mode 100644 index cdbdbd86..00000000 --- a/docs/cce/umn/cce_01_0305.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Storage (FlexVolume)

-

-
- - diff --git a/docs/cce/umn/cce_01_0306.html b/docs/cce/umn/cce_01_0306.html deleted file mode 100644 index 9fb116a9..00000000 --- a/docs/cce/umn/cce_01_0306.html +++ /dev/null @@ -1,60 +0,0 @@ - - -

FlexVolume Overview

-

In container storage, you can use different types of volumes and mount them to containers in pods as many as you want.

-

In CCE, container storage is backed both by Kubernetes-native objects, such as emptyDir/hostPath volumes, secrets, and ConfigMaps, and by storage services.

-

CCE clusters of 1.13 and earlier versions use the storage-driver add-on to connect to storage services to support Kubernetes FlexVolume driver for container storage. The FlexVolume driver has been deprecated in favor of the Container Storage Interface (CSI). The everest add-on for CSI is installed in CCE clusters of 1.15 and later versions by default. For details, see Overview.

-
  • In CCE clusters earlier than Kubernetes 1.13, end-to-end capacity expansion of container storage is not supported, and the PVC capacity is inconsistent with the storage capacity.
  • In a cluster of v1.13 or earlier, when an upgrade or bug fix is available for storage functionalities, you only need to install or upgrade the storage-driver add-on. Upgrading the cluster or creating a cluster is not required.
-
-

Notes and Constraints

  • For clusters created in CCE, Kubernetes v1.15.11 is a transitional version in which the FlexVolume plug-in (storage-driver) is compatible with the CSI plug-in (everest). Clusters of v1.17 and later versions do not support FlexVolume any more. You need to use the everest add-on. For details about CSI and FlexVolume, see Differences Between CSI and FlexVolume Plug-ins.
  • The FlexVolume plug-in will be maintained by Kubernetes developers, but new functionality will only be added to CSI. You are advised not to create storage that connects to the FlexVolume plug-in (storage-driver) in CCE any more. Otherwise, the storage resources may not function normally.
-
-

Differences Between CSI and FlexVolume Plug-ins

-
- - - - - - - - - - - - - - - - -
Table 1 CSI and FlexVolume

Kubernetes Solution

-

CCE Add-on

-

Feature

-

Usage

-

CSI

-

everest

-

CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

-

The everest add-on consists of two parts:

-
  • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
  • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
-

For details, see everest.

-

The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

-

FlexVolume

-

storage-driver

-

FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

-

For details, see storage-driver.

-

For clusters of v1.13 or earlier that have been created, the installed FlexVolume plug-in (the storage-driver add-on in CCE) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

-
-
-
  • A cluster can use only one type of storage plug-ins.
  • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade Between Major Versions.
-
-
-

Notice on Using Add-ons

  • To use the CSI plug-in (the everest add-on in CCE), your cluster must be using Kubernetes 1.15 or later. This add-on is installed by default when you create a cluster of v1.15 or later. The FlexVolume plug-in (the storage-driver add-on in CCE) is installed by default when you create a cluster of v1.13 or earlier.
  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
-
-

Checking Storage Add-ons

  1. Log in to the CCE console.
  2. In the navigation tree on the left, click Add-ons.
  3. Click the Add-on Instance tab.
  4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0307.html b/docs/cce/umn/cce_01_0307.html deleted file mode 100644 index 55f4274d..00000000 --- a/docs/cce/umn/cce_01_0307.html +++ /dev/null @@ -1,235 +0,0 @@ - - -

Overview

-

Volume

On-disk files in a container are ephemeral, which will be lost when the container crashes and are difficult to be shared between containers running together in a pod. The Kubernetes volume abstraction solves both of these problems. Volumes cannot be independently created, but defined in the pod spec.

-

All containers in a pod can access its volumes, but the volumes must have been mounted. Volumes can be mounted to any directory in a container.

-

The following figure shows how a storage volume is used between containers in a pod.

-

-

A volume will no longer exist if the pod to which it is mounted does not exist. However, files in the volume may outlive the volume, depending on the volume type.

-
-

Volume Types

Volumes can be classified into local volumes and cloud volumes.

-
  • Local volumes
    CCE supports the following five types of local volumes. For details about how to use them, see Using Local Disks as Storage Volumes.
    • emptyDir: an empty volume used for temporary storage
    • hostPath: mounts a directory on a host (node) to your container for reading data from the host.
    • ConfigMap: references the data stored in a ConfigMap for use by containers.
    • Secret: references the data stored in a secret for use by containers.
    -
    -
  • Cloud volumes

    CCE supports the following types of cloud volumes:

    -
    • EVS
    • SFS Turbo
    • OBS
    • SFS
    -
-
-

CSI

You can use Kubernetes Container Storage Interface (CSI) to develop plug-ins to support specific storage volumes.

-

CCE developed the storage add-on everest for you to use cloud storage services, such as EVS and OBS. You can install this add-on when creating a cluster.

-
-

PV and PVC

Kubernetes provides PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs) to abstract details of how storage is provided from how it is consumed. You can request specific size of storage when needed, just like pods can request specific levels of resources (CPU and memory).

-
  • PV: A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.
  • PVC: A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.
-

You can bind PVCs to PVs in a pod so that the pod can use storage resources. The following figure shows the relationship between PVs and PVCs.

-
Figure 1 PVC-to-PV binding
-

PVs describes storage resources in the cluster. PVCs are requests for those resources. The following sections will describe how to use kubectl to connect to storage resources.

-

If you do not want to create storage resources or PVs manually, you can use StorageClasses.

-
-

StorageClass

StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to create a PV of the corresponding type and automatically create underlying storage resources.

-

You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
-csi-disk-topology   everest-csi-provisioner         17d          # Storage class for EVS disks with delayed binding
-csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
-csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
-

After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

-
-

Cloud Services for Container Storage

CCE allows you to mount local and cloud storage volumes listed in Volume Types to your pods. Their features are described below.

-
Figure 2 Volume types supported by CCE
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Detailed description of cloud storage services

Dimension

-

EVS

-

SFS

-

OBS

-

SFS Turbo

-

Definition

-

EVS offers scalable block storage for cloud servers. With high reliability, high performance, and rich specifications, EVS disks can be used for distributed file systems, dev/test environments, data warehouses, and high-performance computing (HPC) applications.

-

Expandable to petabytes, SFS provides fully hosted shared file storage, highly available and stable to handle data- and bandwidth-intensive applications in HPC, media processing, file sharing, content management, and web services.

-

-

OBS is a stable, secure, and easy-to-use object storage service that lets you inexpensively store data of any format and size. You can use it in enterprise backup/archiving, video on demand (VoD), video surveillance, and many other scenarios.

-

Expandable to 320 TB, SFS Turbo provides a fully hosted shared file storage, highly available and stable to support small files and applications requiring low latency and high IOPS. You can use SFS Turbo in high-traffic websites, log storage, compression/decompression, DevOps, enterprise OA, and containerized applications.

-

Data storage logic

-

Stores binary data and cannot directly store files. To store files, you need to format the file system first.

-

Stores files and sorts and displays data in the hierarchy of files and folders.

-

Stores objects. Files directly stored automatically generate the system metadata, which can also be customized by users.

-

Stores files and sorts and displays data in the hierarchy of files and folders.

-

Services

-

Accessible only after being mounted to ECSs or BMSs and initialized.

-

Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

-

Accessible through the Internet or Direct Connect (DC). You need to specify the bucket address and use transmission protocols such as HTTP and HTTPS.

-

Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

-

Static provisioning

-

Supported

-

Supported

-

Supported

-

Supported

-

Dynamic provisioning

-

Supported

-

Supported

-

Supported

-

Not supported

-

Features

-

Non-shared storage. Each volume can be mounted to only one node.

-

Shared storage featuring high performance and throughput

-

Shared, user-mode file system

-

Shared storage featuring high performance and bandwidth

-

Usage

-

HPC, enterprise core cluster applications, enterprise application systems, and dev/test

-
NOTE:

HPC apps here require high-speed and high-IOPS storage, such as industrial design and energy exploration.

-
-

HPC, media processing, content management, web services, big data, and analysis applications

-
NOTE:

HPC apps here require high bandwidth and shared file storage, such as gene sequencing and image rendering.

-
-

Big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks)

-

High-traffic websites, log storage, DevOps, and enterprise OA

-

Capacity

-

TB

-

PB

-

EB

-

TB

-

Latency

-

1-2 ms

-

3-10 ms

-

10ms

-

1-2 ms

-

IOPS/TPS

-

33,000 for a single disk

-

10,000 for a single file system

-

Tens of millions

-

100K

-

Bandwidth

-

MB/s

-

GB/s

-

TB/s

-

GB/s

-
-
-
-

Notes and Constraints

Secure containers do not support OBS volumes.

-
  • A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.
  • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

    For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volume mounted, a new pod cannot be started because EVS disks cannot be attached.

    -
  • When you uninstall a subpath in a cluster of v1.19 or earlier, all folders in the subpath are traversed. If there are a large number of folders, the traversal takes a long time, so does the volume unmount. You are advised not to create too many folders in the subpath.
  • The maximum size of a single file in OBS mounted to a CCE cluster is far smaller than that defined by obsfs.
-
-

Notice on Using Add-ons

  • To use the CSI plug-in (the everest add-on in CCE), your cluster must be using Kubernetes 1.15 or later. This add-on is installed by default when you create a cluster of v1.15 or later. The FlexVolume plug-in (the storage-driver add-on in CCE) is installed by default when you create a cluster of v1.13 or earlier.
  • If your cluster is upgraded from v1.13 to v1.15, storage-driver is replaced by everest (v1.1.6 or later) for container storage. The takeover does not affect the original storage functions.
  • In version 1.2.0 of the everest add-on, key authentication is optimized when OBS is used. After the everest add-on is upgraded from a version earlier than 1.2.0, you need to restart all workloads that use OBS in the cluster. Otherwise, workloads may not be able to use OBS.
-
-

Differences Between CSI and FlexVolume Plug-ins

-
- - - - - - - - - - - - - - - - -
Table 2 CSI and FlexVolume

Kubernetes Solution

-

CCE Add-on

-

Feature

-

Recommendation

-

CSI

-

Everest

-

CSI was developed as a standard for exposing arbitrary block and file storage storage systems to containerized workloads. Using CSI, third-party storage providers can deploy plugins exposing new storage systems in Kubernetes without having to touch the core Kubernetes code. In CCE, the everest add-on is installed by default in clusters of Kubernetes v1.15 and later to connect to storage services (EVS, OBS, SFS, and SFS Turbo).

-

The everest add-on consists of two parts:

-
  • everest-csi-controller for storage volume creation, deletion, capacity expansion, and cloud disk snapshots
  • everest-csi-driver for mounting, unmounting, and formatting storage volumes on nodes
-

For details, see everest.

-

The everest add-on is installed by default in clusters of v1.15 and later. CCE will mirror the Kubernetes community by providing continuous support for updated CSI capabilities.

-

Flexvolume

-

storage-driver

-

FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes since version 1.2 (before CSI). CCE provided FlexVolume volumes through the storage-driver add-on installed in clusters of Kubernetes v1.13 and earlier versions. This add-on connects clusters to storage services (EVS, OBS, SFS, and SFS Turbo).

-

For details, see storage-driver.

-

For clusters of v1.13 or earlier that have been created, the installed FlexVolume plug-in (the storage-driver add-on in CCE) can still be used. CCE stops providing update support for this add-on, and you are advised to upgrade these clusters.

-
-
-
  • A cluster can use only one type of storage plug-ins.
  • The FlexVolume plug-in cannot be replaced by the CSI plug-in in clusters of v1.13 or earlier. You can only upgrade these clusters. For details, see Cluster Upgrade Between Major Versions.
-
-
-

Checking Storage Add-ons

  1. Log in to the CCE console.
  2. In the navigation tree on the left, click Add-ons.
  3. Click the Add-on Instance tab.
  4. Select a cluster in the upper right corner. The default storage add-on installed during cluster creation is displayed.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0310.html b/docs/cce/umn/cce_01_0310.html deleted file mode 100644 index e76e197b..00000000 --- a/docs/cce/umn/cce_01_0310.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

To achieve persistent storage, CCE allows you to mount the storage volumes created from Elastic Volume Service (EVS) disks to a path of a container. When the container is migrated, the mounted EVS volumes are also migrated. By using EVS volumes, you can mount the remote file directory of storage system into a container so that data in the data volume is permanently preserved even when the container is deleted.

-
Figure 1 Mounting EVS volumes to CCE
-

Description

  • User-friendly: Similar to formatting disks for on-site servers in traditional layouts, you can format block storage (disks) mounted to cloud servers, and create file systems on them.
  • Data isolation: Each server uses an independent block storage device (disk).
  • Private network: User can access data only in private networks of data centers.
  • Capacity and performance: The capacity of a single volume is limited (TB-level), but the performance is excellent (ms-level read/write I/O latency).
  • Restriction: EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Applications: HPC, enterprise core applications running in clusters, enterprise application systems, and development and testing. These volumes are often used by single-pod Deployments and jobs, or exclusively by each pod in a StatefulSet. EVS disks are non-shared storage and cannot be attached to multiple nodes at the same time. If two pods are configured to use the same EVS disk and the two pods are scheduled to different nodes, one pod cannot be started because the EVS disk cannot be attached to it.
-
-
- - diff --git a/docs/cce/umn/cce_01_0311.html b/docs/cce/umn/cce_01_0311.html deleted file mode 100644 index c4f20439..00000000 --- a/docs/cce/umn/cce_01_0311.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Using EVS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
  • Data in a shared disk cannot be shared between nodes in a CCE cluster. If the same EVS disk is attached to multiple nodes, read and write conflicts and data cache conflicts may occur. When creating a Deployment, you are advised to create only one pod if you want to use EVS disks.
  • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
  • EVS disks that have partitions or have non-ext4 file systems cannot be imported.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Buying an EVS Disk

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. Click Create EVS Disk.
  2. Configure basic disk information. Table 1 describes the parameters.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuring basic disk information

    Parameter

    -

    Description

    -

    * PVC Name

    -

    New PVC Name: name of the PVC to be created. A storage volume is automatically created when a PVC is created. One PVC corresponds to one storage volume. The storage volume name is automatically generated when the PVC is created.

    -

    Cluster Name

    -

    Cluster where the EVS disk is deployed.

    -

    Namespace

    -

    Namespace where the EVS disk is deployed. You can retain the default value or specify one.

    -

    Volume Capacity (GB)

    -

    Size of the storage to be created.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteOnce (RWO): The volume can be mounted as read-write by a single node, and data reading and writing are supported based on a non-shared EVS volume. EVS volumes in RWO mode are supported since v1.13.10-r1.
    -

    AZ

    -

    AZ to which the disk belongs.

    -

    Type

    -

    Type of the new EVS disk.

    -
    • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
    • High I/O: uses serial attached SCSI (SAS) drives to store data.
    • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure cloud service for your keys, will be used for EVS disks. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  3. Review your order, click Submit, and wait until the creation is successful.

    The file system is displayed in the list. When its status becomes Normal, the file system is created successfully.

    -

  4. Click the volume name to view detailed information about the volume.
-
-

Adding an EVS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage volume type to EVS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters required for mounting an EVS volume

    Parameter

    -

    Description

    -

    Type

    -

    EVS: You can use EVS disks the same way you use traditional hard disks on servers. EVS disks deliver higher data reliability and I/O throughput and are easy to use. They can be used for file systems, databases, or other system software and applications that require block storage resources.

    -
    CAUTION:
    • To attach an EVS disk to a workload, you must set the number of pods to 1 when creating the workload. If multiple pods are created, you cannot attach EVS disks.
    • When you create a StatefulSet and add a cloud storage volume, existing EVS volumes cannot be used.
    • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple jobs.
    -
    -

    Allocation Mode

    -

    Manual

    -

    Select a created disk. If no disk is available, follow the prompts to create one.

    -

    For the same cluster and namespace, you can use an existing storage volume when creating a Deployment (with Allocation Mode set to Manual).

    -

    When creating a StatefulSet, you can only use a volume automatically allocated by the system (only Automatic is available for Allocation Mode).

    -

    Automatic

    -

    If you select Automatic, you need to configure the following items:

    -
    1. Access Mode: permissions of user applications on storage resources (PVs).
      • ReadWriteOnce (RWO): A non-shared EVS volume is mounted as read-write to a pod by a single node. EVS volumes in RWO mode are supported since v1.13.10-r1.
      -
    2. Availability Zone: AZ where the storage volume is located. Only the AZ where the worker node is located can be selected.
    3. Sub-Type: Select a storage subtype.
      • Common I/O: uses Serial Advanced Technology Attachment (SATA) drives to store data.
      • High I/O: uses serial attached SCSI (SAS) drives to store data.
      • Ultra-high I/O: uses solid state disk (SSD) drives to store data.
      -
    4. Storage Capacity: Enter the storage capacity in the unit of GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    5. After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for EVS disks. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name EVSAccessKMS indicates that EVS is granted the permission to access KMS. After EVS is authorized successfully, it can obtain KMS keys to encrypt and decrypt EVS systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -
    1. Click Add Container Path.
    2. Container Path: Enter the container path to which the volume is mounted.
      NOTICE:
      • Do not mount the volume to a system directory such as / or /var/run; this action may cause container errors. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      • If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -
    -
    -

  3. Click OK.
-
-

Importing an EVS Disk

CCE allows you to import existing EVS disks.

-

An EVS disk can be imported into only one namespace. If an EVS disk has been imported into a namespace, it is invisible in other namespaces and cannot be imported again. If you want to import an EVS disk that has file system (ext4) formatted, ensure that no partition has been created for the disk. Otherwise, data may be lost.

-
-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the EVS tab page, click Import.
  2. Select one or more EVS disks that you want to import. Then, click OK.
-
-

Unbinding an EVS Disk

After an EVS volume is successfully created or imported, the EVS volume is automatically bound to the current cluster and cannot be used by other clusters. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the EVS volume has been mounted to a workload, it cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the EVS disk list, click Unbind next to the target EVS disk.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an EVS volume is created, you can perform operations described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an EVS volume

-
  1. Select the EVS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS volume.
-
-
-
-
-
- - diff --git a/docs/cce/umn/cce_01_0312.html b/docs/cce/umn/cce_01_0312.html deleted file mode 100644 index 36d6b457..00000000 --- a/docs/cce/umn/cce_01_0312.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

(kubectl) Automatically Creating an EVS Disk

-

Scenario

CCE supports creating EVS volumes through PersistentVolumeClaims (PVCs).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-evs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-evs-auto-example.yaml

    -

    vi pvc-evs-auto-example.yaml

    -
    Example YAML file for clusters of v1.9, v1.11, and v1.13:
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name: pvc-evs-auto-example
    -  namespace: default
    -  annotations:
    -    volume.beta.kubernetes.io/storage-class: sas
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: eu-de-01
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    EVS disk type. The value is in lowercase.

    -

    Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the cluster is located.

    -

    For details about the value of region, see Regions and Endpoints.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    -

    For details about the value of zone, see Regions and Endpoints.

    -

    storage

    -

    Storage capacity in the unit of Gi.

    -

    accessModes

    -

    Read/write mode of the volume.

    -

    You can set this parameter to ReadWriteMany (shared volume) and ReadWriteOnce (non-shared volume).

    -
    -
    -
    -

  3. Run the following command to create a PVC.

    kubectl create -f pvc-evs-auto-example.yaml

    -

    After the command is executed, an EVS disk is created in the partition where the cluster is located. Choose Storage > EVS to view the EVS disk. Alternatively, you can view the EVS disk based on the volume name on the EVS console.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0313.html b/docs/cce/umn/cce_01_0313.html deleted file mode 100644 index 76e4d343..00000000 --- a/docs/cce/umn/cce_01_0313.html +++ /dev/null @@ -1,555 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing EVS Disk

-

Scenario

CCE allows you to create a PersistentVolume (PV) using an existing EVS disk. After the PV is created, you can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the EVS console, create an EVS disk, and record the volume ID, capacity, and disk type of the EVS disk.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PersistentVolume (PV) and PersistentVolumeClaim (PVC). Assume that the file names are pv-evs-example.yaml and pvc-evs-example.yaml.

    touch pv-evs-example.yaml pvc-evs-example.yaml

    - -
    - - - - - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11.7 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11.7 to v1.13

    -

    Example YAML

    -

    1.11 ≤ K8s version < 1.11.7

    -

    Clusters from v1.11 to v1.11.7

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11.7 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  eu-de-01
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxivol
      -  name: pv-evs-example 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce 
      -  capacity: 
      -    storage: 10Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-evs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options:
      -      disk-mode: SCSI
      -      fsType: ext4 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The value defaults to VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      PVC name. The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: eu-de-01     
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce  
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class, which must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters from v1.11 to v1.11.7

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  
      -  name: pv-evs-example 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options:
      -      fsType: ext4 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: eu-de-01     
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class. The value can be sas or ssd. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone:  
      -  name: pv-evs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteOnce
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxivol 
      -    fsType: ext4 
      -    options: 
      -      fsType: ext4 
      -      kubernetes.io/namespace: default 
      -      volumeID: 0992dbda-6340-470e-a74e-4f0db288ed82 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: sas
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 5 Key parameters

      Parameter

      -

      Description

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      EVS volume capacity in the unit of Gi.

      -

      storageClassName

      -

      EVS disk type. Supported values: Common I/O (SATA), High I/O (SAS), and Ultra-high I/O (SSD)High I/O (SAS) and Ultra-high I/O (SSD)

      -

      driver

      -

      Storage driver.

      -

      For EVS disks, set this parameter to huawei.com/fuxivol.

      -

      volumeID

      -

      Volume ID of the EVS disk.

      -

      To obtain the volume ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the EVS tab page, and copy the PVC ID on the PVC details page.

      -

      disk-mode

      -

      Device type of the EVS disk. The value is VBD or SCSI.

      -

      For CCE clusters earlier than v1.11.7, you do not need to set this field. The default value is VBD.

      -

      This field is mandatory for CCE clusters from v1.11.7 to v1.13 that use Linux x86. As the EVS volumes dynamically provisioned by a PVC are created from SCSI EVS disks, you are advised to choose SCSI when manually creating volumes (static PVs). Volumes in the VBD mode can still be used after cluster upgrades.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1  
      -kind: PersistentVolumeClaim  
      -metadata:  
      -  annotations:  
      -    volume.beta.kubernetes.io/storage-class: sas
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol 
      -  labels: 
      -    failure-domain.beta.kubernetes.io/region: eu-de
      -    failure-domain.beta.kubernetes.io/zone: 
      -  name: pvc-evs-example 
      -  namespace: default  
      -spec:  
      -  accessModes:  
      -  - ReadWriteOnce 
      -  resources:  
      -    requests:  
      -      storage: 10Gi
      -  volumeName: pv-evs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 6 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class, which must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxivol.

      -

      failure-domain.beta.kubernetes.io/region

      -

      Region where the cluster is located.

      -

      For details about the value of region, see Regions and Endpoints.

      -

      failure-domain.beta.kubernetes.io/zone

      -

      AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

      -

      For details about the value of zone, see Regions and Endpoints.

      -

      storage

      -

      Requested capacity in the PVC, in Gi.

      -

      The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

  4. Create the PV.

    kubectl create -f pv-evs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-evs-example.yaml

    -

    After the operation is successful, choose Resource Management > Storage to view the created PVC. You can also view the EVS disk by name on the EVS console.

    -

  6. (Optional) Add the metadata associated with the cluster to ensure that the EVS disk associated with the mounted static PV is not deleted when the node or cluster is deleted.

    If you skip this step in this example or when creating a static PV or PVC, ensure that the EVS disk associated with the static PV has been unbound from the node before you delete the node.

    -
    -
    1. Obtain the tenant token. For details, see Obtaining a User Token.
    2. Obtain the EVS access address EVS_ENDPOINT. For details, see Regions and Endpoints.

      -
    3. Add the metadata associated with the cluster to the EVS disk backing the static PV.
      curl -X POST ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
      -    -d '{"metadata":{"cluster_id": "${cluster_id}", "namespace": "${pvc_namespace}"}}' \
      -    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
      -    -H 'X-Auth-Token:${TOKEN}'
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 7 Key parameters

      Parameter

      -

      Description

      -

      EVS_ENDPOINT

      -

      EVS access address. Set this parameter to the value obtained in 6.b.

      -

      project_id

      -

      Project ID. You can click the login user in the upper right corner of the console page, select My Credentials from the drop-down list, and view the project ID on the Projects tab page.

      -

      volume_id

      -

      ID of the associated EVS disk. Set this parameter to volume_id of the static PV to be created. You can also log in to the EVS console, click the name of the EVS disk to be imported, and obtain the ID from Summary on the disk details page.

      -

      cluster_id

      -

      ID of the cluster where the EVS PV is to be created. On the CCE console, choose Resource Management > Clusters. Click the name of the cluster to be associated. On the cluster details page, obtain the cluster ID.

      -

      pvc_namespace

      -

      Namespace where the PVC is to be bound.

      -

      TOKEN

      -

      User token. Set this parameter to the value obtained in 6.a.

      -
      -
      -

      For example, run the following commands:

      -
      curl -X POST https://evs.eu-de.otc.t-systems.com:443/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
      -    -d '{"metadata":{"cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442", "namespace": "default"}}' \
      -    -H 'Accept:application/json' -H 'Content-Type:application/json;charset=utf8' \
      -    -H 'X-Auth-Token:MIIPe******IsIm1ldG
      -

      After the request is executed, run the following commands to check whether the EVS disk has been associated with the metadata of the cluster:

      -
      curl -X GET ${EVS_ENDPOINT}/v2/${project_id}/volumes/${volume_id}/metadata --insecure \
      -    -H 'X-Auth-Token:${TOKEN}'
      -

      For example, run the following commands:

      -
      curl -X GET https://evs.eu-de.otc.t-systems.com/v2/060576866680d5762f52c0150e726aa7/volumes/69c9619d-174c-4c41-837e-31b892604e14/metadata --insecure \
      -    -H 'X-Auth-Token:MIIPeAYJ***9t1c31ASaQ=='
      -

      The command output displays the current metadata of the EVS disk.

      -
      {
      -    "metadata": {
      -        "namespace": "default",
      -        "cluster_id": "71e8277e-80c7-11ea-925c-0255ac100442",
      -        "hw:passthrough": "true"
      -    }
      -}
      -
    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0314.html b/docs/cce/umn/cce_01_0314.html deleted file mode 100644 index ba971b3a..00000000 --- a/docs/cce/umn/cce_01_0314.html +++ /dev/null @@ -1,176 +0,0 @@ - - -

(kubectl) Creating a Pod Mounted with an EVS Volume

-

Scenario

After an EVS volume is created or imported to CCE, you can mount it to a workload.

-

EVS disks cannot be attached across AZs. Before mounting a volume, you can run the kubectl get pvc command to query the available PVCs in the AZ where the current cluster is located.

-
-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the evs-deployment-example.yaml file, which is used to create a Deployment.

    touch evs-deployment-example.yaml

    -

    vi evs-deployment-example.yaml

    -
    Example of mounting an EVS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: evs-deployment-example 
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: evs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: evs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp 
    -          name: pvc-evs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-evs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-evs-auto-example
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec.template.spec.containers.volumeMounts

    -

    name

    -

    Name of the volume mounted to the container.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec.template.spec.volumes

    -

    name

    -

    Name of the volume.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Mounting an EVS volume to a StatefulSet (PVC template-based, non-shared volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-evs-sas-in
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: deploy-evs-sata-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-evs-sata-in
    -        failure-domain.beta.kubernetes.io/region: eu-de
    -        failure-domain.beta.kubernetes.io/zone: eu-de-01
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          volumeMounts:
    -            - name: bs-sas-mountoptionpvc
    -              mountPath: /tmp
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: bs-sas-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: sas
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxivol
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -  serviceName: wwww
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path of the container. In this example, the volume is mounted to the /tmp directory.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -
    -

  3. Run the following command to create the pod:

    kubectl create -f evs-deployment-example.yaml

    -

    After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > EVS. Then, click the PVC name. On the PVC details page, you can view the binding relationship between the EVS volume and the PVC.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0316.html b/docs/cce/umn/cce_01_0316.html deleted file mode 100644 index d86bfefa..00000000 --- a/docs/cce/umn/cce_01_0316.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from a Scalable File Service (SFS) file system to a container to store data persistently. SFS volumes are commonly used in ReadWriteMany scenarios, such as media processing, content management, big data analysis, and workload process analysis.

-
Figure 1 Mounting SFS volumes to CCE
-

Description

  • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
  • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
  • Private network: User can access data only in private networks of data centers.
  • Capacity and performance: The capacity of a single file system is high (PB level) and the performance is excellent (ms-level read/write I/O latency).
  • Use cases: Deployments/StatefulSets in the ReadWriteMany mode and jobs created for high-performance computing (HPC), media processing, content management, web services, big data analysis, and workload process analysis
-
-
- - diff --git a/docs/cce/umn/cce_01_0317.html b/docs/cce/umn/cce_01_0317.html deleted file mode 100644 index 0340e06d..00000000 --- a/docs/cce/umn/cce_01_0317.html +++ /dev/null @@ -1,135 +0,0 @@ - - -

Using SFS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • SFS volumes are available only in certain regions.
  • Container storage in CCE clusters of Kubernetes 1.13 or later version supports encryption. Currently, E2E encryption is supported only in certain regions.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
-
-

Creating an SFS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the SFS tab, click Create SFS File System.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for Creating a File System Volume

    Parameter

    -

    Parameter Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    Cluster Name

    -

    Cluster to which the file system volume belongs.

    -

    Namespace

    -

    Namespace with which the snapshot is associated.

    -

    Total Capacity

    -

    The total capacity is the capacity of a single volume. Fees are charged by actual usage.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The SFS volume can be mounted as read-write by multiple nodes.
    -

    Encryption

    -

    KMS Encryption is deselected by default.

    -

    After KMS Encryption is selected, Key Management Service (KMS), an easy-to-use and highly secure key service, will be used for SFS file systems. If no agency has been created, click Create Agency and set the following parameters:

    -
    • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
    • Key Name: After a key is created, it can be loaded and used in containerized applications.
    • Key ID: generated by default.
    -

    This function is supported only for clusters of v1.13.10 and later in certain regions.

    -
    -
    -

  4. Click Create.

    The volume is displayed in the list. When PVS Status becomes Bound, the volume is created successfully.

    -

  5. Click the volume name to view detailed information about the volume.
-
-

Adding an SFS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. During creation, expand Data Storage after adding a container. On the Cloud Volume tab page, click Add Cloud Volume.
  2. Set the storage class to SFS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 Parameters for mounting a file system

    Parameter

    -

    Parameter Description

    -

    Type

    -

    File Storage (NFS): This type applies to a wide range of scenarios, including media processing, content management, big data, and application analysis.

    -

    Allocation Mode

    -

    Manual

    -
    • Name: Select a created file system. You need to create a file system in advance. For details about how to create a file system, see Creating an SFS Volume.
    • Sub-Type: subtype of the created file storage.
    • Storage Capacity: This field is one of the PVC attributes. If the storage capacity has been expanded on the IaaS side, it is normal that the capacity values are inconsistent. The PVC capacity is the same as the storage entity capacity only after end-to-end container storage capacity expansion is supported for CCE clusters of v1.13.
    -

    Automatic

    -

    An SFS volume is created automatically. You need to enter the storage capacity.

    -
    • Sub-Type: Select NFS.
    • Storage Capacity: Specify the total storage capacity, in GB. Ensure that the storage capacity quota is not exceeded; otherwise, creation will fail.
    • After you select KMS Encryption, Key Management Service (KMS), an easy-to-use and highly secure service, will be enabled for file systems. This function is supported only for clusters of v1.13.10 and later in certain regions. If no agency has been created, click Create Agency and set the following parameters:
      • Agency Name: Agencies can be used to assign permissions to trusted accounts or cloud services for a specific period of time. If no agency is created, click Create Agency. The agency name SFSAccessKMS indicates that SFS is granted the permission to access KMS. After SFS is authorized successfully, it can obtain KMS keys to encrypt and decrypt file systems.
      • Key Name: After a key is created, it can be loaded and used in containerized applications.
      • Key ID: generated by default.
      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      If this parameter is not specified, the root path of the data volume is used by default. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the path of the container, for example, /tmp.
      The container path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data volumes mounted to the path.
      • Read/Write: You can modify the data volumes mounted to the path. Newly written data is not migrated if the container is migrated, which may cause a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an SFS Volume

CCE allows you to import existing SFS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace. Then, click OK.
-
-

Unbinding an SFS Volume

When an SFS volume is successfully created or imported, the volume is automatically bound to the current cluster. Other clusters can also use the volume. When the SFS volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS volume has been attached to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS volume list, click Unbind next to the target volume.
  2. Confirm the unbinding, and click OK.
-
-

Related Operations

After an SFS volume is created, you can perform the operation described in Table 3. -
- - - - - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an SFS volume

-
  1. Select the SFS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the EVS disk.
-

Importing an SFS volume

-

CCE allows you to import existing SFS volumes.

-
  1. On the SFS tab page, click Import.
  2. Select one or more SFS volumes that you want to attach.
  3. Select the target cluster and namespace.
  4. Click Yes.
-
-
-
-
-
- - diff --git a/docs/cce/umn/cce_01_0318.html b/docs/cce/umn/cce_01_0318.html deleted file mode 100644 index 8bcd3db1..00000000 --- a/docs/cce/umn/cce_01_0318.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

(kubectl) Automatically Creating an SFS Volume

-

Scenario

CCE supports creating SFS volumes through PersistentVolumeClaims (PVCs).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-sfs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-sfs-auto-example.yaml

    -

    vi pvc-sfs-auto-example.yaml

    -
    Example YAML file:
    apiVersion: v1 
    -kind: PersistentVolumeClaim 
    -metadata: 
    -  annotations: 
    -    volume.beta.kubernetes.io/storage-class: nfs-rw
    -  name: pvc-sfs-auto-example 
    -  namespace: default 
    -spec: 
    -  accessModes: 
    -  - ReadWriteMany 
    -  resources: 
    -    requests: 
    -      storage: 10Gi
    - -
    - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    File storage class. Currently, the standard file protocol type (nfs-rw) is supported.

    -

    name

    -

    Name of the PVC to be created.

    -

    accessModes

    -

    Only ReadWriteMany is supported. ReadWriteOnly is not supported.

    -

    storage

    -

    Storage capacity in the unit of Gi.

    -
    -
    -
    -

  3. Run the following command to create the PVC.

    kubectl create -f pvc-sfs-auto-example.yaml

    -

    After the command is executed, a file system is created in the VPC to which the cluster belongs. Choose Storage > SFS on the CCE console or log in to the SFS console to view the file system.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0319.html b/docs/cce/umn/cce_01_0319.html deleted file mode 100644 index 2c484bff..00000000 --- a/docs/cce/umn/cce_01_0319.html +++ /dev/null @@ -1,283 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing SFS File System

-

Scenario

CCE allows you to use an existing file system to create a PersistentVolume (PV). After the creation is successful, create the corresponding PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-sfs-example.yaml and pvc-sfs-example.yaml.

    touch pv-sfs-example.yaml pvc-sfs-example.yaml

    - -
    - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11 to v1.13

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-sfs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxinfs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 10Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-sfs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxinfs 
      -    fsType: nfs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
      -      fsType: nfs 
      -      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: nfs-rw
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

      -

      deviceMountPath

      -

      Shared path of the file system.

      -

      On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

      -

      volumeID

      -

      File system ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      Namespace of the PVC. The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: nfs-rw
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
      -  name: pvc-sfs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 10Gi
      -  volumeName: pv-sfs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxinfs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-sfs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 10Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxinfs 
      -    fsType: nfs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your file.
      -      fsType: nfs 
      -      kubernetes.io/namespace: default 
      -      volumeID: f6976f9e-2493-419b-97ca-d7816008d91c 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: nfs-rw
      - -
      - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxinfs for the file system.

      -

      deviceMountPath

      -

      Shared path of the file system.

      -

      On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

      -

      volumeID

      -

      File system ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Read/write mode supported by the file system. Currently, nfs-rw and nfs-ro are supported.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: nfs-rw
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
      -  name: pvc-sfs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 10Gi
      -  volumeName: pv-sfs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by the file system. nfs-rw and nfs-ro are supported. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxinfs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    The VPC to which the file system belongs must be the same as the VPC of the ECS VM to which the workload is planned.

    -
    -

  4. Create the PV.

    kubectl create -f pv-sfs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-sfs-example.yaml

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0320.html b/docs/cce/umn/cce_01_0320.html deleted file mode 100644 index fb2affab..00000000 --- a/docs/cce/umn/cce_01_0320.html +++ /dev/null @@ -1,168 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an SFS Volume

-

Scenario

After an SFS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the sfs-deployment-example.yaml file, which is used to create a pod.

    touch sfs-deployment-example.yaml

    -

    vi sfs-deployment-example.yaml

    -
    Example of mounting an SFS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -  name: sfs-deployment-example                                # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: sfs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: sfs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                                # Mount path 
    -          name: pvc-sfs-example 
    -      imagePullSecrets:
    -        - name: default-secret
    -      restartPolicy: Always 
    -      volumes: 
    -      - name: pvc-sfs-example 
    -        persistentVolumeClaim: 
    -          claimName: pvc-sfs-auto-example                # PVC name
    -
    - -
    - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the pod to be created.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Example of mounting an SFS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-sfs-nfs-rw-in
    -  namespace: default
    -  labels:
    -    appgroup: ''
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: deploy-sfs-nfs-rw-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-sfs-nfs-rw-in
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          volumeMounts:
    -            - name: bs-nfs-rw-mountoptionpvc
    -              mountPath: /aaa
    -      imagePullSecrets:
    -        - name: default-secret
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: bs-nfs-rw-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: nfs-rw
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxinfs
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -  serviceName: wwww
    -
    - -
    - - - - - - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image of the workload.

    -

    spec.template.spec.containers.volumeMount

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f sfs-deployment-example.yaml

    -

    After the creation is complete, log in to the CCE console. In the navigation pane, choose Resource Management > Storage > SFS. Click the PVC name. On the PVC details page, you can view the binding relationship between SFS and PVC.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0321.html b/docs/cce/umn/cce_01_0321.html deleted file mode 100644 index f2dfa354..00000000 --- a/docs/cce/umn/cce_01_0321.html +++ /dev/null @@ -1,110 +0,0 @@ - - -

(kubectl) Creating a StatefulSet Mounted with an SFS Volume

-

Scenario

CCE allows you to use an existing SFS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Create an SFS volume by referring to Creating an SFS Volume and record the volume name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is sfs-statefulset-example.yaml.

    touch sfs-statefulset-example.yaml

    -

    vi sfs-statefulset-example.yaml

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: sfs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 2
    -  selector:
    -    matchLabels:
    -      app: sfs-statefulset-example
    -  serviceName: qwqq
    -  template:
    -    metadata:
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: "true"
    -      labels:
    -        app: sfs-statefulset-example
    -    spec:
    -      affinity: {}
    -      containers:
    -      - image: nginx:latest
    -        name: container-0
    -        volumeMounts:
    -        - mountPath: /tmp
    -          name: pvc-sfs-example
    -      imagePullSecrets:
    -      - name: default-secret
    -      volumes:
    -        - name: pvc-sfs-example
    -          persistentVolumeClaim:
    -            claimName: cce-sfs-demo
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parent Parameter

    -

    Parameter

    -

    Description

    -

    spec

    -

    replicas

    -

    Number of pods.

    -

    metadata

    -

    name

    -

    Name of the created workload.

    -

    spec.template.spec.containers

    -

    image

    -

    Image used by the workload.

    -

    spec.template.spec.containers.volumeMounts

    -

    mountPath

    -

    Mount path in the container.

    -

    spec

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    spec.template.spec.volumes.persistentVolumeClaim

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  4. Create the StatefulSet.

    kubectl create -f sfs-statefulset-example .yaml

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0323.html b/docs/cce/umn/cce_01_0323.html deleted file mode 100644 index 43903118..00000000 --- a/docs/cce/umn/cce_01_0323.html +++ /dev/null @@ -1,19 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from an Object Storage Service (OBS) bucket to a container to store data persistently. Object storage is commonly used in cloud workloads, data analysis, content analysis, and hotspot objects.

-
Figure 1 Mounting OBS volumes to CCE
-

Storage Class

Object storage offers three storage classes, Standard, Infrequent Access, and Archive, to satisfy different requirements for storage performance and costs.

-
  • The Standard storage class features low access latency and high throughput. It is therefore applicable to storing a large number of hot files (frequently accessed every month) or small files (less than 1 MB). The application scenarios include big data analytics, mobile apps, hot videos, and picture processing on social media.
  • The Infrequent Access storage class is ideal for storing data that is semi-frequently accessed (less than 12 times a year), with requirements for quick response. The application scenarios include file synchronization or sharing, and enterprise-level backup. It provides the same durability, access latency, and throughput as the Standard storage class but at a lower cost. However, the Infrequent Access storage class has lower availability than the Standard storage class.
  • The Archive storage class is suitable for archiving data that is rarely-accessed (averagely once a year). The application scenarios include data archiving and long-term data backup. The Archive storage class is secure and durable at an affordable low cost, which can be used to replace tape libraries. However, it may take hours to restore data from the Archive storage class.
-
-

Description

  • Standard APIs: With HTTP RESTful APIs, OBS allows you to use client tools or third-party tools to access object storage.
  • Data sharing: Servers, embedded devices, and IoT devices can use the same path to access shared object data in OBS.
  • Public/Private networks: OBS allows data to be accessed from public networks to meet Internet application requirements.
  • Capacity and performance: No capacity limit; high performance (read/write I/O latency within 10 ms).
  • Use cases: Deployments/StatefulSets in the ReadOnlyMany mode and jobs created for big data analysis, static website hosting, online video on demand (VoD), gene sequencing, intelligent video surveillance, backup and archiving, and enterprise cloud boxes (web disks). You can create object storage by using the OBS console, tools, and SDKs.
-
-

Reference

CCE clusters can also be mounted with OBS buckets of third-party tenants, including OBS parallel file systems (preferred) and OBS object buckets.

-
-
- - diff --git a/docs/cce/umn/cce_01_0324.html b/docs/cce/umn/cce_01_0324.html deleted file mode 100644 index 03ad5eec..00000000 --- a/docs/cce/umn/cce_01_0324.html +++ /dev/null @@ -1,144 +0,0 @@ - - -

Using OBS Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • CCE clusters of v1.7.3-r8 and earlier do not support OBS volumes. You need to upgrade these clusters or create clusters of a later version that supports OBS.
  • Volumes cannot be created in specified enterprise projects. Only the default enterprise project is supported.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Preparations

To mount reliable and stable OBS buckets as volumes, you must create AK/SK before you create OBS buckets.

-

The procedure for configuring the AK/SK is as follows:

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. On the OBS tab page, click AK/SK in the notice.
    Figure 1 Configuring the AK/SK
    -
  3. Click , select a key file, and click Upload to upload the key file.
  4. Select the corresponding workload and click Restart.
-

When creating an OBS volume, you must use the AK/SK. If the key file is not uploaded, the pod will fail to be started or OBS data access will be abnormal due to the volume mounting failure.

-
-
-

Creating an OBS Volume

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage.
  2. Click the OBS tab and click Create OBS Bucket.
  3. Configure basic information, as shown in Table 1.

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Parameters for creating an OBS volume

    Parameter

    -

    Description

    -

    * PVC Name

    -

    Name of the new PVC, which is different from the volume name. The actual volume name is automatically generated when the PV is created by the PVC.

    -

    The name contains 3 to 55 characters (excluding the prefix). It must contain lowercase letters, digits, and hyphens (-), and cannot start or end with a hyphen (-).

    -

    Cluster Name

    -

    Cluster to which the OBS volume belongs.

    -

    Namespace

    -

    Namespace to which the volume belongs. The default value is default.

    -

    Instance Type

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: supported when the cluster version is 1.15 or later and the everest add-on version is 1.0.2 or later.
    • Object bucket: A bucket is a container for storing objects in OBS. OBS provides flat storage in the form of buckets and objects. Unlike the conventional multi-layer directory structure of file systems, all objects in a bucket are stored at the same logical layer.
    -

    Storage Class

    -

    This parameter is displayed when you select Object bucket for Instance Type.

    -

    This parameter indicates the storage classes supported by OBS.

    -
    • Standard: applicable to scenarios where a large number of hotspot files or small-sized files need to be accessed frequently (multiple times per month on average) and require fast access response.
    • Infrequent access: applicable to scenarios where data is not frequently accessed (less than 12 times per year on average) but requires fast access response.
    -

    Storage Policy

    -

    Object storage has the following policies:

    -

    Private: Only the bucket owner has full control over the bucket. Unauthorized users do not have permissions to access the bucket.

    -

    Access Mode

    -

    Access permissions of user applications on storage resources (PVs).

    -
    • ReadWriteMany (RWX): The volume is mounted as read-write by multiple nodes.
    -
    -
    -

  4. Click Create.

    After the OBS volume is successfully created, it is displayed in the OBS volume list. Click the PVC name to view detailed information about the OBS volume.

    -

-
-

Adding an OBS Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set Type to OBS.

    -

    - - - - - - - - - - - - - - - - - - -
    Table 2 OBS volume parameters

    Parameter

    -

    Description

    -

    Type

    -

    Select OBS.

    -

    OBS: Standard and Infrequent Access OBS buckets are supported. OBS buckets are commonly used for big data analytics, cloud native applications, static website hosting, and backup/active archiving.

    -

    Allocation Mode

    -

    Manual

    -

    Name: Select a created OBS volume.

    -

    Sub-Type: class of the selected volume. The value can be Standard or Infrequent access, and you do not need to set this parameter.

    -

    Automatic

    -

    Type of the storage instance created on OBS.

    -
    • Parallel file system: supported when the cluster version is 1.15 or later and the everest add-on version is 1.0.2 or later.
    • Object bucket: A bucket is a container for storing objects in OBS.

      Sub-Type: Select Standard or Infrequent access.

      -
    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    2. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Importing an OBS Volume

CCE allows you to import existing OBS volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the OBS tab page, click Import.
  2. Select one or more OBS volumes that you want to import.
  3. Select the target cluster and namespace.
  4. Click OK.
-
-

Unbinding an OBS Volume

When an OBS volume is successfully created, the OBS volume is automatically bound to the current cluster. Other clusters can also use the OBS volume. When the volume is unbound from the cluster, other clusters can still use the volume.

-

If the volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the OBS volume list, click Unbind next to the target OBS volume.
  2. In the dialog box displayed, click Yes.
-
-

Related Operations

After an OBS volume is created, you can perform the operation described in Table 3. -
- - - - - - - -
Table 3 Other operations

Operation

-

Description

-

Deleting an OBS volume

-
  1. Select the OBS volume to be deleted and click Delete in the Operation column.
  2. Follow the prompts to delete the volume.
-
-
-
-
-
- - diff --git a/docs/cce/umn/cce_01_0325.html b/docs/cce/umn/cce_01_0325.html deleted file mode 100644 index d3b48fa4..00000000 --- a/docs/cce/umn/cce_01_0325.html +++ /dev/null @@ -1,66 +0,0 @@ - - -

(kubectl) Automatically Creating an OBS Volume

-

Scenario

During the use of OBS, expected OBS buckets can be automatically created and mounted as volumes. Currently, standard and infrequent access OBS buckets are supported, which correspond to obs-standard and obs-standard-ia, respectively.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the pvc-obs-auto-example.yaml file, which is used to create a PVC.

    touch pvc-obs-auto-example.yaml

    -

    vi pvc-obs-auto-example.yaml

    -

    Example YAML:

    -
    apiVersion: v1 
    -kind: PersistentVolumeClaim 
    -metadata: 
    -  annotations: 
    -    volume.beta.kubernetes.io/storage-class: obs-standard  # OBS bucket type. The value can be obs-standard (standard) or obs-standard-ia (infrequent access).
    -  name: pvc-obs-auto-example  # PVC name
    -  namespace: default 
    -spec: 
    -  accessModes: 
    -  - ReadWriteMany 
    -  resources: 
    -    requests: 
    -      storage: 1Gi   # Storage capacity in the unit of Gi. For OBS buckets, this parameter is used only for verification (fixed to 1, cannot be empty or 0). Any value you set does not take effect for OBS buckets.
    - -
    - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    volume.beta.kubernetes.io/storage-class

    -

    Bucket type. Currently, obs-standard and obs-standard-ia are supported.

    -

    name

    -

    Name of the PVC to be created.

    -

    accessModes

    -

    Only ReadWriteMany is supported. ReadWriteOnly is not supported.

    -

    storage

    -

    Storage capacity in the unit of Gi. For OBS buckets, this field is used only for verification (cannot be empty or 0). Its value is fixed at 1, and any value you set does not take effect for OBS buckets.

    -
    -
    -

  3. Run the following command to create the PVC.

    kubectl create -f pvc-obs-auto-example.yaml

    -

    After the command is executed, an OBS bucket is created in the VPC to which the cluster belongs. You can click the bucket name in Storage > OBS to view the bucket or view it on the OBS console.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0326.html b/docs/cce/umn/cce_01_0326.html deleted file mode 100644 index 57f78493..00000000 --- a/docs/cce/umn/cce_01_0326.html +++ /dev/null @@ -1,291 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing OBS Bucket

-

Scenario

CCE allows you to use an existing OBS bucket to create a PersistentVolume (PV). You can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-obs-example.yaml and pvc-obs-example.yaml.

    touch pv-obs-example.yaml pvc-obs-example.yaml

    - -
    - - - - - - - - - - - - - -

    Kubernetes Version

    -

    Description

    -

    YAML Example

    -

    1.11 ≤ K8s version ≤ 1.13

    -

    Clusters from v1.11 to v1.13

    -

    Example YAML

    -

    K8s version = 1.9

    -

    Clusters of v1.9

    -

    Example YAML

    -
    -
    -

    Clusters from v1.11 to v1.13

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-obs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiobs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 1Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-obs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxiobs 
      -    fsType: obs 
      -    options: 
      -      fsType: obs 
      -      region: eu-de
      -      storage_class: STANDARD 
      -      volumeID: test-obs 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: obs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

      -

      storage_class

      -

      Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

      -

      region

      -

      For details about the value of region, see Regions and Endpoints.

      -

      volumeID

      -

      OBS bucket name.

      -

      To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -

      storageClassName

      -

      Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: obs-standard
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
      -  name: pvc-obs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 1Gi
      -  volumeName: pv-obs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class supported by OBS, including obs-standard and obs-standard-ia.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxiobs.

      -

      volumeName

      -

      Name of the PV.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -
      -
      -
    -

    Clusters of v1.9

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-obs-example 
      -  namespace: default  
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 1Gi 
      -  flexVolume: 
      -    driver: huawei.com/fuxiobs 
      -    fsType: obs 
      -    options: 
      -      fsType: obs 
      -      kubernetes.io/namespace: default 
      -      region: eu-de
      -      storage_class: STANDARD 
      -      volumeID: test-obs 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: obs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - -
      Table 3 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set the driver to huawei.com/fuxiobs for the OBS volume.

      -

      storage_class

      -

      Storage class, including STANDARD (standard bucket) and STANDARD_IA (infrequent access bucket).

      -

      region

      -

      For details about the value of region, see Regions and Endpoints.

      -

      volumeID

      -

      OBS bucket name.

      -

      To obtain the name, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the OBS tab page, and copy the PV name on the PV Details tab page.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -

      storageClassName

      -

      Storage class supported by OBS, including obs-standard (standard bucket) and obs-standard-ia (infrequent access bucket).

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1
      -kind: PersistentVolumeClaim
      -metadata:
      -  annotations:
      -    volume.beta.kubernetes.io/storage-class: obs-standard
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
      -  name: pvc-obs-example
      -  namespace: default
      -spec:
      -  accessModes:
      -  - ReadWriteMany
      -  resources:
      -    requests:
      -      storage: 1Gi
      -  volumeName: pv-obs-example
      -  volumeNamespace: default
      - -
      - - - - - - - - - - - - - - - - -
      Table 4 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Storage class supported by OBS, including obs-standard and obs-standard-ia.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      Must be set to flexvolume-huawei.com/fuxiobs.

      -

      volumeName

      -

      Name of the PV.

      -

      storage

      -

      Storage capacity in the unit of Gi. The value is fixed at 1Gi.

      -
      -
      -
    -

  4. Create the PV.

    kubectl create -f pv-obs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-obs-example.yaml

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0327.html b/docs/cce/umn/cce_01_0327.html deleted file mode 100644 index 9a4136ac..00000000 --- a/docs/cce/umn/cce_01_0327.html +++ /dev/null @@ -1,175 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an OBS Volume

-

Scenario

After an OBS volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the obs-deployment-example.yaml file, which is used to create a pod.

    touch obs-deployment-example.yaml

    -

    vi obs-deployment-example.yaml

    -
    Example of mounting an OBS volume to a Deployment (PVC-based, shared volume):
    apiVersion: apps/v1 
    -kind: Deployment 
    -metadata: 
    -   name: obs-deployment-example                       # Workload name
    -  namespace: default 
    -spec: 
    -  replicas: 1 
    -  selector: 
    -    matchLabels: 
    -      app: obs-deployment-example 
    -  template: 
    -    metadata: 
    -      labels: 
    -        app: obs-deployment-example 
    -    spec: 
    -      containers: 
    -      - image: nginx 
    -        name: container-0 
    -        volumeMounts: 
    -        - mountPath: /tmp                       # Mount path
    -          name: pvc-obs-example 
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes: 
    -      - name: pvc-obs-example  
    -        persistentVolumeClaim: 
    -          claimName: pvc-obs-auto-example       # PVC name
    -
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the pod to be created.

    -

    app

    -

    Name of the application running in the pod.

    -

    mountPath

    -

    Mount path in the container.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

    Example of mounting an OBS volume to a StatefulSet (PVC template-based, dedicated volume):

    -
    Example YAML:
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: deploy-obs-standard-in
    -  namespace: default
    -  generation: 1
    -  labels:
    -    appgroup: ''
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: deploy-obs-standard-in
    -  template:
    -    metadata:
    -      labels:
    -        app: deploy-obs-standard-in
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: 'true'
    -    spec:
    -      containers:
    -        - name: container-0
    -          image: 'nginx:1.12-alpine-perl'
    -          env:
    -            - name: PAAS_APP_NAME
    -              value: deploy-obs-standard-in
    -            - name: PAAS_NAMESPACE
    -              value: default
    -            - name: PAAS_PROJECT_ID
    -              value: a2cd8e998dca42e98a41f596c636dbda
    -          resources: {}
    -          volumeMounts:
    -            - name: obs-bs-standard-mountoptionpvc
    -              mountPath: /tmp
    -          terminationMessagePath: /dev/termination-log
    -          terminationMessagePolicy: File
    -          imagePullPolicy: IfNotPresent
    -      restartPolicy: Always
    -      terminationGracePeriodSeconds: 30
    -      dnsPolicy: ClusterFirst
    -      securityContext: {}
    -      imagePullSecrets:
    -        - name: default-secret
    -      affinity: {}
    -      schedulerName: default-scheduler
    -  volumeClaimTemplates:
    -    - metadata:
    -        name: obs-bs-standard-mountoptionpvc
    -        annotations:
    -          volume.beta.kubernetes.io/storage-class: obs-standard
    -          volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiobs
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -  serviceName: wwww
    -  podManagementPolicy: OrderedReady
    -  updateStrategy:
    -    type: RollingUpdate
    -  revisionHistoryLimit: 10
    -
    - -
    - - - - - - - - - - - - - - - - -
    Table 2 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created workload.

    -

    image

    -

    Image of the workload.

    -

    mountPath

    -

    Mount path in the container. In this example, the volume is mounted to the /tmp directory.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.volumeClaimTemplates.metadata.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f obs-deployment-example.yaml

    -

    After the creation is complete, choose Storage > OBS on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between the OBS service and the PVC.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0328.html b/docs/cce/umn/cce_01_0328.html deleted file mode 100644 index bba3314d..00000000 --- a/docs/cce/umn/cce_01_0328.html +++ /dev/null @@ -1,96 +0,0 @@ - - -

(kubectl) Creating a StatefulSet Mounted with an OBS Volume

-

Scenario

CCE allows you to use an existing OBS volume to create a StatefulSet through a PersistentVolumeClaim (PVC).

-
-

Prerequisites

  • You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.
  • The AK/SK has been uploaded. For details, see Preparations.
-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Create an OBS volume by referring to Creating an OBS Volume and obtain the PVC name.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create a YAML file for creating the workload. Assume that the file name is obs-statefulset-example.yaml.

    touch obs-statefulset-example.yaml

    -

    vi obs-statefulset-example.yaml

    -

    Example YAML:

    -
    apiVersion: apps/v1
    -kind: StatefulSet
    -metadata:
    -  name: obs-statefulset-example
    -  namespace: default
    -spec:
    -  replicas: 1
    -  selector:
    -    matchLabels:
    -      app: obs-statefulset-example
    -  serviceName: qwqq
    -  template:
    -    metadata:
    -      annotations:
    -        metrics.alpha.kubernetes.io/custom-endpoints: '[{"api":"","path":"","port":"","names":""}]'
    -        pod.alpha.kubernetes.io/initialized: "true"
    -      creationTimestamp: null
    -      labels:
    -        app: obs-statefulset-example
    -    spec:
    -      affinity: {}
    -      containers:	
    -        image: nginx:latest
    -        imagePullPolicy: Always
    -        name: container-0
    -        volumeMounts:
    -        - mountPath: /tmp
    -          name: pvc-obs-example
    -      imagePullSecrets:
    -      - name: default-secret
    -      volumes:
    -        - name: pvc-obs-example
    -          persistentVolumeClaim:
    -            claimName: cce-obs-demo
    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    replicas

    -

    Number of pods.

    -

    name

    -

    Name of the created workload.

    -

    image

    -

    Image used by the workload.

    -

    mountPath

    -

    Mount path in the container.

    -

    serviceName

    -

    Service corresponding to the workload. For details about how to create a Service, see Creating a StatefulSet.

    -

    claimName

    -

    Name of an existing PVC.

    -
    -
    -

  4. Create the StatefulSet.

    kubectl create -f obs-statefulset-example.yaml

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0330.html b/docs/cce/umn/cce_01_0330.html deleted file mode 100644 index aa21cdb6..00000000 --- a/docs/cce/umn/cce_01_0330.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Overview

-

CCE allows you to mount a volume created from an SFS Turbo file system to a container to store data persistently. Provisioned on demand and fast, SFS Turbo is suitable for DevOps, container microservices, and enterprise OA scenarios.

-
Figure 1 Mounting SFS Turbo volumes to CCE
-

Description

  • Standard file protocols: You can mount file systems as volumes to servers, the same as using local directories.
  • Data sharing: The same file system can be mounted to multiple servers, so that data can be shared.
  • Private network: User can access data only in private networks of data centers.
  • Data isolation: The on-cloud storage service provides exclusive cloud file storage, which delivers data isolation and ensures IOPS performance.
  • Use cases: Deployments/StatefulSets in the ReadWriteMany mode, DaemonSets, and jobs created for high-traffic websites, log storage, DevOps, and enterprise OA applications
-
-
- - diff --git a/docs/cce/umn/cce_01_0331.html b/docs/cce/umn/cce_01_0331.html deleted file mode 100644 index b74d2295..00000000 --- a/docs/cce/umn/cce_01_0331.html +++ /dev/null @@ -1,58 +0,0 @@ - - -

Using SFS Turbo Volumes

-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

  • SFS Turbo volumes are available only in certain regions.
  • Currently, SFS Turbo file systems cannot be directly created on CCE.
  • The following operations apply to clusters of Kubernetes 1.13 or earlier.
-
-

Importing an SFS Turbo Volume

CCE allows you to import existing SFS Turbo volumes.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. On the SFS Turbo tab page, click Import.
  2. Select one or more SFS Turbo volumes that you want to import.
  3. Select the cluster and namespace to which you want to import the volumes.
  4. Click OK. The volumes are displayed in the list. When PVS Status becomes Bound, the volumes are imported successfully.
-
-

Adding an SFS Turbo Volume

  1. Create a workload or job by referring to Creating a Deployment, Creating a StatefulSet, Creating a DaemonSet, or Creating a Job. After you have added a container, choose Data Storage > Cloud Volume, and then click Add Cloud Volume.
  2. Set the storage volume type to SFS Turbo.

    -

    - - - - - - - - - - - - - - - -
    Table 1 Parameters for configuring an SFS Turbo volume

    Parameter

    -

    Description

    -

    Type

    -

    SFS Turbo: applicable to DevOps, containerized microservices, and enterprise OA applications.

    -

    Allocation Mode

    -

    Manual

    -

    Select an existing SFS Turbo volume. You need to import SFS Turbo volumes in advance. For details, see Importing an SFS Turbo Volume.

    -

    Add Container Path

    -

    Configure the following parameters:

    -
    1. subPath: Enter the subpath of the file storage, for example, /tmp.

      This parameter specifies a subpath inside the referenced volume instead of its root. If this parameter is not specified, the root path is used. Currently, only file storage is supported. The value must be a relative path and cannot start with a slash (/) or ../.

      -
    2. Container Path: Enter the mount path in the container, for example, /tmp.
      The mount path must not be a system directory, such as / and /var/run. Otherwise, an exception occurs. You are advised to mount the volume to an empty directory. If the directory is not empty, ensure that there are no files affecting container startup in the directory. Otherwise, such files will be replaced, resulting in failures to start the container and create the workload.
      NOTICE:

      If the volume is mounted to a high-risk directory, you are advised to use an account with minimum permissions to start the container; otherwise, high-risk files on the host machine may be damaged.

      -
      -
      -
    3. Set permissions.
      • Read-only: You can only read the data in the mounted volumes.
      • Read/Write: You can modify the data in the mounted volumes. Newly written data is not migrated if the container is migrated, which causes a data loss.
      -
    -

    Click Add Container Path to add multiple settings. Then, click OK.

    -
    -
    -

  3. Click OK.
-
-

Unbinding an SFS Turbo Volume

When an SFS Turbo volume is successfully imported to a cluster, the volume is bound to the cluster. The volume can also be imported to other clusters. When the volume is unbound from the cluster, other clusters can still import and use the volume.

-

If the SFS Turbo volume has been mounted to a workload, the volume cannot be unbound from the cluster.

-
  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Storage. In the SFS Turbo volume list, click Unbind next to the target volume.
  2. In the dialog box displayed, click OK.
-
-
- - diff --git a/docs/cce/umn/cce_01_0332.html b/docs/cce/umn/cce_01_0332.html deleted file mode 100644 index 4fa2cec8..00000000 --- a/docs/cce/umn/cce_01_0332.html +++ /dev/null @@ -1,150 +0,0 @@ - - -

(kubectl) Creating a PV from an Existing SFS Turbo File System

-

Scenario

CCE allows you to use an existing SFS Turbo file system to create a PersistentVolume (PV). After the creation is successful, you can create a PersistentVolumeClaim (PVC) and bind it to the PV.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Log in to the SFS console, create a file system, and record the file system ID, shared path, and capacity.
  2. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  3. Create two YAML files for creating the PV and PVC. Assume that the file names are pv-efs-example.yaml and pvc-efs-example.yaml.

    touch pv-efs-example.yaml pvc-efs-example.yaml

    -
    • Example YAML file for the PV:
      apiVersion: v1 
      -kind: PersistentVolume 
      -metadata: 
      -  name: pv-efs-example 
      -  annotations:
      -    pv.kubernetes.io/provisioned-by: flexvolume-huawei.com/fuxiefs
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  capacity: 
      -    storage: 100Gi 
      -  claimRef:
      -    apiVersion: v1
      -    kind: PersistentVolumeClaim
      -    name: pvc-efs-example
      -    namespace: default
      -  flexVolume: 
      -    driver: huawei.com/fuxiefs 
      -    fsType: efs 
      -    options: 
      -      deviceMountPath: <your_deviceMountPath>  # Shared storage path of your SFS Turbo file.
      -      fsType: efs 
      -      volumeID: 8962a2a2-a583-4b7f-bb74-fe76712d8414 
      -  persistentVolumeReclaimPolicy: Delete 
      -  storageClassName: efs-standard
      - -
      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      Table 1 Key parameters

      Parameter

      -

      Description

      -

      driver

      -

      Storage driver used to mount the volume. Set it to huawei.com/fuxiefs.

      -

      deviceMountPath

      -

      Shared path of the SFS Turbo volume.

      -

      volumeID

      -

      SFS Turbo volume ID.

      -

      To obtain the ID, log in to the CCE console, choose Resource Management > Storage, click the PVC name in the SFS Turbo tab page, and copy the PVC ID on the PVC details page.

      -

      storage

      -

      File system size.

      -

      storageClassName

      -

      Volume type supported by SFS Turbo. The value can be efs-standard and efs-performance. Currently, SFS Turbo does not support dynamic creation; therefore, this parameter is not used for now.

      -

      spec.claimRef.apiVersion

      -

      The value is fixed at v1.

      -

      spec.claimRef.kind

      -

      The value is fixed at PersistentVolumeClaim.

      -

      spec.claimRef.name

      -

      The value is the same as the name of the PVC created in the next step.

      -

      spec.claimRef.namespace

      -

      The value is the same as the namespace of the PVC created in the next step.

      -
      -
      -
    • Example YAML file for the PVC:
      apiVersion: v1 
      -kind: PersistentVolumeClaim 
      -metadata: 
      -  annotations: 
      -    volume.beta.kubernetes.io/storage-class: efs-standard 
      -    volume.beta.kubernetes.io/storage-provisioner: flexvolume-huawei.com/fuxiefs 
      -  name: pvc-efs-example 
      -  namespace: default 
      -spec: 
      -  accessModes: 
      -  - ReadWriteMany 
      -  resources: 
      -    requests: 
      -      storage: 100Gi 
      -  volumeName: pv-efs-example
      - -
      - - - - - - - - - - - - - - - - -
      Table 2 Key parameters

      Parameter

      -

      Description

      -

      volume.beta.kubernetes.io/storage-class

      -

      Read/write mode supported by SFS Turbo. The value can be efs-standard or efs-performance. The value must be the same as that of the existing PV.

      -

      volume.beta.kubernetes.io/storage-provisioner

      -

      The field must be set to flexvolume-huawei.com/fuxiefs.

      -

      storage

      -

      Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

      -

      volumeName

      -

      Name of the PV.

      -
      -
      -
    -

    The VPC to which the SFS Turbo file system belongs must be the same as the VPC of the ECS VM planned for the workload. Ports 111, 445, 2049, 2051, and 20048 must be enabled in the security groups.

    -
    -

  4. Create the PV.

    kubectl create -f pv-efs-example.yaml

    -

  5. Create the PVC.

    kubectl create -f pvc-efs-example.yaml

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0333.html b/docs/cce/umn/cce_01_0333.html deleted file mode 100644 index b398c4cf..00000000 --- a/docs/cce/umn/cce_01_0333.html +++ /dev/null @@ -1,78 +0,0 @@ - - -

(kubectl) Creating a Deployment Mounted with an SFS Turbo Volume

-

Scenario

After an SFS Turbo volume is created or imported to CCE, you can mount the volume to a workload.

-
-

Prerequisites

You have created a CCE cluster and installed the FlexVolume plug-in (storage-driver) in the cluster.

-
-

Notes and Constraints

The following configuration example applies to clusters of Kubernetes 1.13 or earlier.

-
-

Procedure

  1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
  2. Run the following commands to configure the efs-deployment-example.yaml file, which is used to create a Deployment:

    touch efs-deployment-example.yaml

    -

    vi efs-deployment-example.yaml

    -

    Example of mounting an SFS Turbo volume to a Deployment (PVC-based, shared volume):

    -
    apiVersion: apps/v1  
    -kind: Deployment  
    -metadata:  
    -  name: efs-deployment-example                                # Workload name
    -  namespace: default  
    -spec:  
    -  replicas: 1  
    -  selector:  
    -    matchLabels:  
    -      app: efs-deployment-example  
    -  template:  
    -    metadata:  
    -      labels:  
    -        app: efs-deployment-example  
    -    spec:  
    -      containers:  
    -      - image: nginx  
    -        name: container-0  
    -        volumeMounts:  
    -        - mountPath: /tmp                                # Mount path
    -          name: pvc-efs-example  
    -      restartPolicy: Always
    -      imagePullSecrets:
    -        - name: default-secret
    -      volumes:  
    -      - name: pvc-efs-example  
    -        persistentVolumeClaim:  
    -          claimName: pvc-sfs-auto-example                # PVC name
    - -
    - - - - - - - - - - - - - -
    Table 1 Key parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the created Deployment.

    -

    app

    -

    Name of the application running in the Deployment.

    -

    mountPath

    -

    Mount path in the container. In this example, the mount path is /tmp.

    -
    -
    -

    spec.template.spec.containers.volumeMounts.name and spec.template.spec.volumes.name must be consistent because they have a mapping relationship.

    -
    -

  3. Run the following command to create the pod:

    kubectl create -f efs-deployment-example.yaml

    -

    After the creation is complete, choose Storage > SFS Turbo on the CCE console and click the PVC name. On the PVC details page, you can view the binding relationship between SFS Turbo and PVC.

    -

-
-
- - diff --git a/docs/cce/umn/cce_01_0336.html b/docs/cce/umn/cce_01_0336.html deleted file mode 100644 index 6cf954f5..00000000 --- a/docs/cce/umn/cce_01_0336.html +++ /dev/null @@ -1,242 +0,0 @@ - - -

Using a Custom AK/SK to Mount an OBS Volume

-

Scenario

You can solve this issue by using Everest 1.2.8 and later versions to use custom access keys for different IAM users.

-
-

Prerequisites

  • The everest add-on version must be 1.2.8 or later.
  • The cluster version must be 1.15.11 or later.
-
-

Notes and Constraints

Custom access keys cannot be configured for secure containers.

-
-

Disabling Auto Key Mounting

The key you uploaded is used by default when mounting an OBS volume. That is, all IAM users under your account will use the same key to mount OBS buckets, and they have the same permissions on buckets. This setting does not allow you to configure differentiated permissions for different IAM users.

-

If you have uploaded the AK/SK, you are advised to disable the automatic mounting of access keys by enabling the disable_auto_mount_secret parameter in the everest add-on to prevent IAM users from performing unauthorized operations. In this way, the access keys uploaded on the console will not be used when creating OBS volumes.

-
  • When enabling disable-auto-mount-secret, ensure that no OBS volume exists in the cluster. A workload mounted with an OBS volume, when scaled or restarted, will fail to remount the OBS volume because it needs to specify the access key but is prohibited by disable-auto-mount-secret.
  • If disable-auto-mount-secret is set to true, an access key must be specified when a PV or PVC is created. Otherwise, the OBS volume fails to be mounted.
-
-

kubectl edit ds everest-csi-driver -nkube-system

-

Search for disable-auto-mount-secret and set it to true.

-

-

Run :wq to save the settings and exit. Wait until the pod is restarted.

-
-

Creating a Secret Using an Access Key

  1. Obtain an access key.

    For details, see Creating Access Keys (AK and SK).

    -

  2. Encode the keys using Base64. (Assume that the AK is xxx and the SK is yyy.)

    echo -n xxx|base64

    -

    echo -n yyy|base64

    -

    Record the encoded AK and SK.

    -

  3. Create a YAML file for the secret, for example, test-user.yaml.

    apiVersion: v1
    -data:
    -  access.key: WE5WWVhVNU*****
    -  secret.key: Nnk4emJyZ0*****
    -kind: Secret
    -metadata:
    -  name: test-user
    -  namespace: default
    -  labels:
    -    secret.kubernetes.io/used-by: csi
    -type: cfe/secure-opaque
    -

    Specifically:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -

    Parameter

    -

    Description

    -

    access.key

    -

    Base64-encoded AK.

    -

    secret.key

    -

    Base64-encoded SK.

    -

    name

    -

    Secret name.

    -

    namespace

    -

    Namespace of the secret.

    -

    secret.kubernetes.io/used-by: csi

    -

    You need to add this label in the YAML file if you want to make it available on the CCE console when you create an OBS PV/PVC.

    -

    type

    -

    Secret type. The value must be cfe/secure-opaque.

    -

    When this type is used, the data entered by users is automatically encrypted.

    -
    -
    -

  4. Create the secret.

    kubectl create -f test-user.yaml

    -

-
-

Mounting a Secret When Statically Creating an OBS Volume

After a secret is created using the AK/SK, you can associate the secret with the PV to be created and then use the AK/SK in the secret to mount an OBS volume.

-
  1. Log in to the OBS console, create an OBS bucket, and record the bucket name and storage class. The parallel file system is used as an example.
  2. Create a YAML file for the PV, for example, pv-example.yaml.

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-obs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 1Gi
    -  csi:
    -    nodePublishSecretRef:
    -      name: test-user
    -      namespace: default
    -    driver: obs.csi.everest.io
    -    fsType: obsfs
    -    volumeAttributes:
    -      everest.io/obs-volume-type: STANDARD
    -      everest.io/region: eu-de
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: obs-normal-static-pv
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-obs
    - -
    - - - - - - - - - - - - - -

    Parameter

    -

    Description

    -

    nodePublishSecretRef

    -

    Secret specified during the mounting.

    -
    • name: name of the secret
    • namespace: namespace of the secret
    -

    fsType

    -

    File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

    -

    volumeHandle

    -

    OBS bucket name.

    -
    -
    -

  3. Create the PV.

    kubectl create -f pv-example.yaml

    -

    After a PV is created, you can create a PVC and associate it with the PV.

    -

  4. Create a YAML file for the PVC, for example, pvc-example.yaml.

    Example YAML file for the PVC:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    csi.storage.k8s.io/node-publish-secret-name: test-user
    -    csi.storage.k8s.io/node-publish-secret-namespace: default
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: obsfs
    -  name: obs-secret
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    -  volumeName: pv-obs-example
    - -
    - - - - - - - - - - -

    Parameter

    -

    Description

    -

    csi.storage.k8s.io/node-publish-secret-name

    -

    Name of the secret

    -

    csi.storage.k8s.io/node-publish-secret-namespace

    -

    Namespace of the secret

    -
    -
    -

  5. Create the PVC.

    kubectl create -f pvc-example.yaml

    -

    After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

    -

-
-

Mounting a Secret When Dynamically Creating an OBS Volume

When dynamically creating an OBS volume, you can use the following method to specify a secret:

-
  1. Create a YAML file for the PVC, for example, pvc-example.yaml.

    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    csi.storage.k8s.io/node-publish-secret-name: test-user
    -    csi.storage.k8s.io/node-publish-secret-namespace: default
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: obsfs
    -  name: obs-secret
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    - -
    - - - - - - - - - - -

    Parameter

    -

    Description

    -

    csi.storage.k8s.io/node-publish-secret-name

    -

    Name of the secret

    -

    csi.storage.k8s.io/node-publish-secret-namespace

    -

    Namespace of the secret

    -
    -
    -

  2. Create the PVC.

    kubectl create -f pvc-example.yaml

    -

    After the PVC is created, you can create a workload and associate it with the PVC to create volumes.

    -

-
-

Verification

You can use a secret of an IAM user to mount an OBS volume. Assume that a workload named obs-secret is created, the mount path in the container is /temp, and the IAM user has the CCE ReadOnlyAccess and Tenant Guest permissions.
  1. Query the name of the workload pod.

    kubectl get po | grep obs-secret

    -

    Expected outputs:

    -
    obs-secret-5cd558f76f-vxslv          1/1     Running   0          3m22s
    -
  2. Query the objects in the mount path. In this example, the query is successful.

    kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

    -
  3. Write data into the mount path. In this example, the write operation fails.

    kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

    -

    Expected outputs:

    -
    touch: setting times of '/temp/test': No such file or directory
    -command terminated with exit code 1
    -
  4. Set the read/write permissions for the IAM user who mounted the OBS volume by referring to the bucket policy configuration.

    -

    -
  5. Write data into the mouth path again. In this example, the write operation succeeded.

    kubectl exec obs-secret-5cd558f76f-vxslv -- touch /temp/test

    -
  6. Check the mount path in the container to see whether the data is successfully written.

    kubectl exec obs-secret-5cd558f76f-vxslv -- ls -l /temp/

    -

    Expected outputs:

    -
    -rwxrwxrwx 1 root root 0 Jun  7 01:52 test
    -
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0337.html b/docs/cce/umn/cce_01_0337.html deleted file mode 100644 index b6bb6888..00000000 --- a/docs/cce/umn/cce_01_0337.html +++ /dev/null @@ -1,184 +0,0 @@ - - -

Setting Mount Options

-

Scenario

You can mount cloud storage volumes to your containers and use these volumes as local directories.

-

This section describes how to set mount options when mounting SFS and OBS volumes. You can set mount options in a PV and bind the PV to a PVC. Alternatively, set mount options in a StorageClass and use the StorageClass to create a PVC. In this way, PVs can be dynamically created and inherit mount options configured in the StorageClass by default.

-
-

SFS Volume Mount Options

The everest add-on in CCE presets the options described in Table 1 for mounting SFS volumes. You can set other mount options if needed. For details, see Mounting an NFS File System to ECSs (Linux).

- -
- - - - - - - - - - - - - - - - -
Table 1 Preset mount options for SFS volumes

Option

-

Description

-

vers=3

-

File system version. Currently, only NFSv3 is supported, Value: 3

-

nolock

-

Whether to lock files on the server using the NLM protocol. If nolock is selected, the lock is valid for applications on one host. For applications on another host, the lock is invalid.

-

timeo=600

-

Waiting time before the NFS client retransmits a request. The unit is 0.1 seconds. Recommended value: 600

-

hard/soft

-

Mounting mode.

-
  • hard: If the NFS request times out, the client keeps resending the request until the request is successful.
  • soft: If the NFS request times out, the client returns an error to the invoking program.
-

The default value is hard.

-
-
-
-

OBS Volume Mount Options

When mounting file storage, the everest add-on presets the options described in Table 2 and Table 3 by default. The options in Table 2 are mandatory.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Mandatory mount options configured by default

Option

-

Description

-

use_ino

-

If enabled, obsfs allocates the inode number. Enabled by default in read/write mode.

-

big_writes

-

If configured, the maximum size of the cache can be modified.

-

nonempty

-

Allows non-empty mount paths.

-

allow_other

-

Allows other users to access the parallel file system.

-

no_check_certificate

-

Disables server certificate verification.

-

enable_noobj_cache

-

Enables cache entries for objects that do not exist, which can improve performance. Enabled by default in object bucket read/write mode.

-

This option is no longer set by default since everest 1.2.40.

-

sigv2

-

Specifies the signature version. Used by default in object buckets.

-
-
- -
- - - - - - - - - - - - - - - - -
Table 3 Optional mount options configured by default

Option

-

Description

-

max_write=131072

-

If specified, obsfs allocates the inode number. Enabled by default in read/write mode.

-

ssl_verify_hostname=0

-

Disables verifying the SSL certificate based on the host name.

-

max_background=100

-

Allows setting the maximum number of waiting requests in the background. Used by default in parallel file systems.

-

public_bucket=1

-

If set to 1, public buckets are mounted anonymously. Enabled by default in object bucket read/write mode.

-
-
-

You can log in to the node to which the pod is scheduled and view all mount options used for mounting the OBS volume in the process details.

-
  • Object bucket: ps -ef | grep s3fs
    root     22142     1  0 Jun03 ?        00:00:00 /usr/bin/s3fs pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d /mnt/paas/kubernetes/kubelet/pods/0b13ff68-4c8e-4a1c-b15c-724fd4d64389/volumes/kubernetes.io~csi/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622707954357702943_obstmpcred/pvc-82fe2cbe-3838-43a2-8afb-f994e402fb9d -o nonempty -o big_writes -o enable_noobj_cache -o sigv2 -o allow_other -o no_check_certificate -o ssl_verify_hostname=0 -o max_write=131072 -o multipart_size=20 -o umask=0
    -
  • Parallel file system: ps -ef | grep obsfs
    root      1355     1  0 Jun03 ?        00:03:16 /usr/bin/obsfs pvc-86720bb9-5aa8-4cde-9231-5253994f8468 /mnt/paas/kubernetes/kubelet/pods/c959a91d-eced-4b41-91c6-96cbd65324f9/volumes/kubernetes.io~csi/pvc-86720bb9-5aa8-4cde-9231-5253994f8468/mount -o url=https://{{endpoint}}:443 -o endpoint=xxxxxx -o passwd_file=/opt/everest-host-connector/1622714415305160399_obstmpcred/pvc-86720bb9-5aa8-4cde-9231-5253994f8468 -o allow_other -o nonempty -o big_writes -o use_ino -o no_check_certificate -o ssl_verify_hostname=0 -o umask=0027 -o max_write=131072 -o max_background=100 -o uid=10000 -o gid=10000
    -
-
-

Prerequisites

  • The everest add-on version must be 1.2.8 or later.
  • The add-on identifies the mount options and transfers them to the underlying storage resources, which determine whether the specified options are valid.
-
-

Notes and Constraints

Mount options cannot be configured for secure containers.

-
-

Setting Mount Options in a PV

You can use the mountOptions field to set mount options in a PV. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: pv-obs-example
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-spec:
-  mountOptions:
-  - umask=0027
-  - uid=10000
-  - gid=10000
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 1Gi
-  claimRef:
-    apiVersion: v1
-    kind: PersistentVolumeClaim
-    name: pvc-obs-example
-    namespace: default
-  csi:
-    driver: obs.csi.everest.io
-    fsType: obsfs
-    volumeAttributes:
-      everest.io/obs-volume-type: STANDARD
-      everest.io/region: eu-de
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-    volumeHandle: obs-normal-static-pv
-  persistentVolumeReclaimPolicy: Delete
-  storageClassName: csi-obs
-

After a PV is created, you can create a PVC and bind it to the PV, and then mount the PV to the container in the workload.

-
-

Setting Mount Options in a StorageClass

You can use the mountOptions field to set mount options in a StorageClass. The options you can configure in mountOptions are listed in SFS Volume Mount Options and OBS Volume Mount Options.

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-obs-mount-option
-mountOptions:
-- umask=0027
-- uid=10000
-- gid=10000
-parameters:
-  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
-  csi.storage.k8s.io/fstype: s3fs
-  everest.io/obs-volume-type: STANDARD
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-

After the StorageClass is configured, you can use it to create a PVC. By default, the dynamically created PVs inherit the mount options set in the StorageClass.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0338.html b/docs/cce/umn/cce_01_0338.html deleted file mode 100644 index 47f62342..00000000 --- a/docs/cce/umn/cce_01_0338.html +++ /dev/null @@ -1,45 +0,0 @@ - - -

Removing a Node

-

Scenario

Removing a node from a cluster in CCE will re-install the node OS and clear CCE components on the node.

-

Removing a node will not delete the server (ECS) corresponding to the node. You are advised to remove nodes at off-peak hours to avoid impacts on your services.

-

After a node is removed from the cluster, the node is still running and incurs fees.

-
-

Notes and Constraints

  • Nodes can be removed only when the cluster is in the Available or Unavailable state.
  • A CCE node can be removed only when it is in the Active, Abnormal, or Error state.
  • A CCE node in the Active state can have its OS re-installed and CCE components cleared after it is removed.
  • If the OS fails to be re-installed after the node is removed, manually re-install the OS. After the re-installation, log in to the node and run the clearance script to clear CCE components. For details, see Handling Failed OS Reinstallation.
-
-

Precautions

  • Removing a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
  • Unexpected risks may occur during the operation. Back up data in advance.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • After you remove the node and re-install the OS, the original LVM partitions will be cleared and the data managed by LVM will be cleared. Therefore, back up data in advance.
-
-

Procedure

  1. Log in to the CCE console. In the navigation pane, choose Resource Management > Nodes. In the same row as the target node, choose More > Remove.
  2. In the dialog box displayed, enter REMOVE, configure the login information required for re-installing the OS, and click Yes. Wait until the node is removed.

    After the node is removed, workload pods on the node are automatically migrated to other available nodes.

    -

-
-

Handling Failed OS Reinstallation

You can perform the following steps to re-install the OS and clear the CCE components on the node if previous attempts fail:

-
  1. Log in to the management console of the server and re-install the OS.
  2. Log in to the server and run the following commands to clear the CCE components and LVM data:

    Write the following scripts to the clean.sh file:

    -
    lsblk
    -vgs --noheadings | awk '{print $1}' | xargs vgremove -f
    -pvs --noheadings | awk '{print $1}' | xargs pvremove -f
    -lvs --noheadings | awk '{print $1}' | xargs -i lvremove -f --select {}
    -function init_data_disk() {
    -    all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
    -    for device in ${all_devices[@]}; do
    -        isRootDisk=$(lsblk -o KNAME,MOUNTPOINT $device 2>/dev/null| grep -E '[[:space:]]/$' | wc -l )
    -        if [[ ${isRootDisk} != 0 ]]; then
    -            continue
    -        fi
    -        dd if=/dev/urandom of=${device} bs=512 count=64
    -        return
    -    done
    -    exit 1
    -}
    -init_data_disk
    -lsblk
    -

    Run the following command:

    -

    bash clean.sh

    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0341.html b/docs/cce/umn/cce_01_0341.html deleted file mode 100644 index 062c0931..00000000 --- a/docs/cce/umn/cce_01_0341.html +++ /dev/null @@ -1,45 +0,0 @@ - - -

Data Disk Space Allocation

-

When creating a node, you need to configure data disks for the node.

-

-

The data disk is divided into Kubernetes space and user space. The user space defines the space that is not allocated to Kubernetes in the local disk. The Kubernetes space consists of the following two parts:

-
  • Docker space (90% by default): stores Docker working directories, Docker image data, and image metadata.
  • kubelet space (10% by default): stores pod configuration files, secrets, and mounted storage such as emptyDir volumes.
-

The Docker space size affects image download and container startup and running. This section describes how the Docker space is used so that you can configure the Docker space accordingly.

-

Docker Space Description

By default, a data disk, 100 GB for example, is divided as follows (depending on the container storage Rootfs):

-
  • Rootfs (Device Mapper)
    • The /var/lib/docker directory is used as the Docker working directory and occupies 20% of the Docker space by default. (Space size of the /var/lib/docker directory = Data disk space x 90% x 20%)
    • The thin pool is used to store Docker image data, image metadata, and container data, and occupies 80% of the Docker space by default. (Thin pool space = Data disk space x 90% x 80%)

      The thin pool is dynamically mounted. You can view it by running the lsblk command on a node, but not the df -h command.

      -
    -

    -
-
  • Rootfs (OverlayFS): No separate thinpool. The entire Docker space is in the /var/lib/docker directory.

    -
-

Using rootfs for container storage in CCE

-
  • CCE cluster: EulerOS 2.9 nodes use OverlayFS, and EulerOS 2.5 nodes use Device Mapper. CentOS 7.6 nodes in clusters earlier than v1.21 use Device Mapper, and use OverlayFS in clusters of v.1.21 and later.
-

You can log in to the node and run the docker info command to view the storage engine type.

-
# docker info
-Containers: 20
- Running: 17
- Paused: 0
- Stopped: 3
-Images: 16
-Server Version: 18.09.0
-Storage Driver: devicemapper
-
-

Docker Space and Containers

The number of pods and the space configured for each container determine whether the Docker space of a node is sufficient.

-

-

The Docker space should be greater than the total disk space used by containers. Formula: Docker space > Number of containers x Available data space for a single container (basesize)

-

When device mapper is used, although you can limit the size of the /home directory of a single container (to 10 GB by default), all containers on the node still share the thin pool of the node for storage. They are not completely isolated. When the sum of the thin pool space used by certain containers reaches the upper limit, other containers cannot run properly.

-

In addition, after a file is deleted in the /home directory of the container, the thin pool space occupied by the file is not released immediately. Therefore, even if basesize is set to 10 GB, the thin pool space occupied by files keeps increasing until 10 GB when files are created in the container. The space released after file deletion will be reused but after a while. If the number of containers on the node multiplied by basesize is greater than the thin pool space size of the node, there is a possibility that the thin pool space has been used up.

-
-

Garbage Collection Policies for Container Images

When the Docker space is insufficient, image garbage collection is triggered.

-

The policy for garbage collecting images takes two factors into consideration: HighThresholdPercent and LowThresholdPercent. Disk usage above the high threshold (default: 85%) will trigger garbage collection. The garbage collection will delete least recently used images until the low threshold (default: 80%) has been met.

-
-

Docker Space Configuration Suggestions

  • The Docker space should be greater than the total disk space used by containers. Formula: Docker space > Number of containers x Available data space for a single container (basesize)
  • You are advised to create and delete files of containerized services in local storage volumes (such as emptyDir and hostPath volumes) or cloud storage directories mounted to the containers. In this way, the thin pool space is not occupied. emptyDir volumes occupy the kubelet space. Therefore, properly plan the size of the kubelet space.
  • Docker uses the OverlayFS storage mode. This mode is used in Ubuntu 18.04 nodes in CCE clusters by default. You can deploy services on these nodes to prevent that the disk space occupied by files created or deleted in containers is not released immediately.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0342.html b/docs/cce/umn/cce_01_0342.html deleted file mode 100644 index 5f86ea80..00000000 --- a/docs/cce/umn/cce_01_0342.html +++ /dev/null @@ -1,79 +0,0 @@ - - -

CCE Turbo Clusters and CCE Clusters

-

Comparison Between CCE Turbo Clusters and CCE Clusters

The following table lists the differences between CCE Turbo clusters and CCE clusters:

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 1 Cluster types

Dimensions

-

Sub-dimension

-

CCE Turbo Cluster

-

CCE Cluster

-

Cluster

-

Positioning

-

Next-generation container cluster for Cloud Native 2.0 with accelerated computing, networking, and scheduling

-

Standard cluster for common commercial use

-

Node type

-

Hybrid deployment of VMs and bare-metal servers

-

Hybrid deployment of VMs

-

Network

-

Network model

-

Cloud Native Network 2.0: applies to large-scale and high-performance scenarios.

-

Networking scale: 2000 nodes

-

Cloud-native network 1.0 for scenarios that do not require high performance or involve large-scale deployment.

-
  • Tunnel network model
  • VPC network model
-

Network performance

-

The VPC network and container network are flattened into one, achieving zero performance loss.

-

The VPC network is overlaid with the container network, causing certain performance loss.

-

Container network isolation

-

Pods can be directly associated with security groups to configure isolation policies for resources inside and outside a cluster.

-
  • Tunnel network model: Network isolation policies are supported for intra-cluster communication (by configuring network policies).
  • VPC network model: Isolation is not supported.
-

Security

-

Isolation

-
  • Bare-metal server: You can select secure containers for VM-level isolation.
  • VM: Common containers are deployed.
-

Common containers are deployed and isolated by Cgroups.

-
-
-
-

QingTian Architecture

-

The QingTian architecture consists of data plane (software-hardware synergy) and management plane (Alkaid Smart Cloud Brain). The data plane innovates in five dimensions: simplified data center, diversified computing power, QingTian cards, ultra-fast engines, and simplified virtualization, to fully offload and accelerate compute, storage, networking, and security components. VMs, bare metal servers, and containers can run together. As a distributed operating system, the Alkaid Smart Cloud Brain focuses on the cloud, AI, and 5G, and provide all-domain scheduling to achieve cloud-edge-device collaboration and governance.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0343.html b/docs/cce/umn/cce_01_0343.html deleted file mode 100644 index c2ebe74b..00000000 --- a/docs/cce/umn/cce_01_0343.html +++ /dev/null @@ -1,645 +0,0 @@ - - -

How Do I Change the Storage Class Used by a Cluster of v1.15 from FlexVolume to CSI Everest?

-

In clusters later than v1.15.11-r1, CSI (the everest add-on) has taken over all functions of fuxi FlexVolume (the storage-driver add-on) for managing container storage. You are advised to use CSI Everest.

-

To migrate your storage volumes, create a static PV to associate with the original underlying storage, and then create a PVC to associate with this static PV. When you upgrade your application, mount the new PVC to the original mounting path to migrate the storage volumes.

-

Services will be interrupted during the migration. Therefore, properly plan the migration and back up data.

-
-

Procedure

  1. (Optional) Back up data to prevent data loss in case of exceptions.
  2. Configure a YAML file of the PV in the CSI format according to the PV in the FlexVolume format and associate the PV with the existing storage.

    To be specific, run the following commands to configure the pv-example.yaml file, which is used to create a PV.

    -

    touch pv-example.yaml

    -

    vi pv-example.yaml

    -
    Configuration example of a PV for an EVS volume:
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: <zone name>
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -  name: pv-evs-example
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: disk.csi.everest.io
    -    fsType: ext4
    -    volumeAttributes:
    -      everest.io/disk-mode: SCSI
    -      everest.io/disk-volume-type: SAS
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 0992dbda-6340-470e-a74e-4f0db288ed82
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-disk
    -
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 EVS volume configuration parameters

    Parameter

    -

    Description

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the EVS disk is located. Use the same value as that of the FlexVolume PV.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS disk is located. Use the same value as that of the FlexVolume PV.

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    EVS volume capacity in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to disk.csi.everest.io for the EVS volume.

    -

    volumeHandle

    -

    Volume ID of the EVS disk. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    everest.io/disk-mode

    -

    EVS disk mode. Use the value of spec.flexVolume.options.disk-mode of the FlexVolume PV.

    -

    everest.io/disk-volume-type

    -

    EVS disk type. Use the value of kubernetes.io/volumetype in the storage class corresponding to spec.storageClassName of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class associated with the storage volume. Set this field to csi-disk for EVS disks.

    -
    -
    -

    Configuration example of a PV for an SFS volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-sfs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: nas.csi.everest.io
    -    fsType: nfs
    -    volumeAttributes:
    -      everest.io/share-export-location:  # Shared path of the file storage
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 682f00bb-ace0-41d8-9b3e-913c9aa6b695
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-nas
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 2 SFS volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    File storage size in the unit of Gi. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to nas.csi.everest.io for the file system.

    -

    everest.io/share-export-location

    -

    Shared path of the file system. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

    -

    volumeHandle

    -

    File system ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-nas.

    -
    -
    -

    Configuration example of a PV for an OBS volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-obs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 1Gi
    -  csi:
    -    driver: obs.csi.everest.io
    -    fsType: s3fs
    -    volumeAttributes:
    -      everest.io/obs-volume-type: STANDARD
    -      everest.io/region: eu-de
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: obs-normal-static-pv
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-obs
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 3 OBS volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

    -

    driver

    -

    Storage driver used to attach the volume. Set the driver to obs.csi.everest.io for the OBS volume.

    -

    fsType

    -

    File type. Value options are obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. Set this parameter according to the value of spec.flexVolume.options.posix of the FlexVolume PV. If the value of spec.flexVolume.options.posix is true, set this parameter to obsfs. If the value is false, set this parameter to s3fs.

    -

    everest.io/obs-volume-type

    -

    Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter according to the value of spec.flexVolume.options.storage_class of the FlexVolume PV. If the value of spec.flexVolume.options.storage_class is standard, set this parameter to STANDARD. If the value is standard_ia, set this parameter to WARM.

    -

    everest.io/region

    -

    Region where the OBS bucket is located. Use the value of spec.flexVolume.options.region of the FlexVolume PV.

    -

    volumeHandle

    -

    OBS bucket name. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-obs.

    -
    -
    -

    Configuration example of a PV for an SFS Turbo volume:

    -
    apiVersion: v1
    -kind: PersistentVolume
    -metadata:
    -  name: pv-efs-example
    -  annotations:
    -    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  capacity:
    -    storage: 10Gi
    -  csi:
    -    driver: sfsturbo.csi.everest.io
    -    fsType: nfs
    -    volumeAttributes:
    -      everest.io/share-export-location: 192.168.0.169:/
    -      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
    -    volumeHandle: 8962a2a2-a583-4b7f-bb74-fe76712d8414
    -  persistentVolumeReclaimPolicy: Delete
    -  storageClassName: csi-sfsturbo
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 SFS Turbo volume configuration parameters

    Parameter

    -

    Description

    -

    name

    -

    Name of the PV, which must be unique in the cluster.

    -

    storage

    -

    File system size. Use the value of spec.capacity.storage of the FlexVolume PV.

    -

    driver

    -

    Storage driver used to attach the volume. Set it to sfsturbo.csi.everest.io.

    -

    everest.io/share-export-location

    -

    Shared path of the SFS Turbo volume. Use the value of spec.flexVolume.options.deviceMountPath of the FlexVolume PV.

    -

    volumeHandle

    -

    SFS Turbo volume ID. Use the value of spec.flexVolume.options.volumeID of the FlexVolume PV.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-sfsturbo for SFS Turbo volumes.

    -
    -
    -

  3. Configure a YAML file of the PVC in the CSI format according to the PVC in the FlexVolume format and associate the PVC with the PV created in 2.

    To be specific, run the following commands to configure the pvc-example.yaml file, which is used to create a PVC.

    -

    touch pvc-example.yaml

    -

    vi pvc-example.yaml

    -

    Configuration example of a PVC for an EVS volume:

    -
    apiVersion: v1  
    -kind: PersistentVolumeClaim
    -metadata:
    -  labels:
    -    failure-domain.beta.kubernetes.io/region: eu-de
    -    failure-domain.beta.kubernetes.io/zone: <zone name>
    -  annotations:
    -    everest.io/disk-volume-type: SAS
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-evs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  volumeName:  pv-evs-example
    -  storageClassName: csi-disk
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 5 PVC configuration parameters for an EVS volume

    Parameter

    -

    Description

    -

    failure-domain.beta.kubernetes.io/region

    -

    Region where the cluster is located. Use the same value as that of the FlexVolume PVC.

    -

    failure-domain.beta.kubernetes.io/zone

    -

    AZ where the EVS disk is deployed. Use the same value as that of the FlexVolume PVC.

    -

    everest.io/disk-volume-type

    -

    Storage class of the EVS disk. The value can be SAS or SSD. Set this parameter to the same value as that of the PV created in 2.

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Requested capacity of the PVC, which must be the same as the storage size of the existing PV.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV in 2.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-disk for EVS disks.

    -
    -
    -

    Configuration example of a PVC for an SFS volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-sfs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-nas
    -  volumeName: pv-sfs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 6 PVC configuration parameters for an SFS volume

    Parameter

    -

    Description

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

    -

    storageClassName

    -

    Set this field to csi-nas.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV in 2.

    -
    -
    -

    Configuration example of a PVC for an OBS volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -    everest.io/obs-volume-type: STANDARD
    -    csi.storage.k8s.io/fstype: s3fs
    -  name: pvc-obs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 1Gi
    -  storageClassName: csi-obs
    -  volumeName: pv-obs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 7 PVC configuration parameters for an OBS volume

    Parameter

    -

    Description

    -

    everest.io/obs-volume-type

    -

    OBS volume type, which can be STANDARD (standard bucket) and WARM (infrequent access bucket). Set this parameter to the same value as that of the PV created in 2.

    -

    csi.storage.k8s.io/fstype

    -

    File type, which can be obsfs or s3fs. The value must be the same as that of fsType of the static OBS volume PV.

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storage

    -

    Storage capacity, in the unit of Gi. Set this parameter to the fixed value 1Gi.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-obs.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV created in 2.

    -
    -
    -

    Configuration example of a PVC for an SFS Turbo volume:

    -
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  annotations:
    -    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
    -  name: pvc-efs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteMany
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-sfsturbo
    -  volumeName: pv-efs-example
    -

    -

    Pay attention to the fields in bold and red. The parameters are described as follows:

    - -
    - - - - - - - - - - - - - - - - - - - -
    Table 8 PVC configuration parameters for an SFS Turbo volume

    Parameter

    -

    Description

    -

    name

    -

    PVC name, which must be unique in the namespace. The value must be unique in the namespace. (If the PVC is dynamically created by a stateful application, the value of this parameter must be the same as the name of the FlexVolume PVC.)

    -

    namespace

    -

    Namespace to which the PVC belongs. Use the same value as that of the FlexVolume PVC.

    -

    storageClassName

    -

    Name of the Kubernetes storage class. Set this field to csi-sfsturbo.

    -

    storage

    -

    Storage capacity, in the unit of Gi. The value must be the same as the storage size of the existing PV.

    -

    volumeName

    -

    Name of the PV. Set this parameter to the name of the static PV created in 2.

    -
    -
    -

  4. Upgrade the workload to use a new PVC.

    For Deployments
    1. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    2. Go to the CCE console. On the workload upgrade page, click Upgrade > Advanced Settings > Data Storage > Cloud Storage.

      -
    3. Uninstall the old storage and add the PVC in the CSI format. Retain the original mounting path in the container.
    4. Click Submit.
    5. Wait until the pods are running.
    -
    -

    For StatefulSets that use existing storage

    -
    1. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    2. Run the kubectl edit command to edit the StatefulSet and use the newly created PVC.

      kubectl edit sts sts-example -n xxx

      -

      -

      Replace sts-example in the preceding command with the actual name of the StatefulSet to upgrade. xxx indicates the namespace to which the StatefulSet belongs.

      -
      -
    3. Wait until the pods are running.
    -

    The current console does not support the operation of adding new cloud storage for StatefulSets. Use the kubectl commands to replace the storage with the newly created PVC.

    -
    -

    For StatefulSets that use dynamically allocated storage

    -
    1. Back up the PV and PVC in the flexVolume format used by the StatefulSet.

      kubectl get pvc xxx -n {namespaces} -oyaml > pvc-backup.yaml

      -

      kubectl get pv xxx -n {namespaces} -oyaml > pv-backup.yaml

      -
    2. Change the number of pods to 0.
    3. On the storage page, disassociate the flexVolume PVC used by the StatefulSet.
    4. Run the kubectl create -f commands to create a PV and PVC.

      kubectl create -f pv-example.yaml

      -

      kubectl create -f pvc-example.yaml

      -

      Replace the example file name pvc-example.yaml in the preceding commands with the names of the YAML files configured in 2 and 3.

      -
      -
    5. Change the number of pods back to the original value and wait until the pods are running.
    -

    The dynamic allocation of storage for StatefulSets is achieved by using volumeClaimTemplates. This field cannot be modified by Kubernetes. Therefore, data cannot be migrated by using a new PVC.

    -

    The PVC naming rule of the volumeClaimTemplates is fixed. When a PVC that meets the naming rule exists, this PVC is used.

    -

    Therefore, disassociate the original PVC first, and then create a PVC with the same name in the CSI format.

    -
    -

    6. (Optional) Recreate the stateful application to ensure that a CSI PVC is used when the application is scaled out. Otherwise, FlexVolume PVCs are used in scaling out.

    -
    • Run the following command to obtain the YAML file of the StatefulSet:
    -

    kubectl get sts xxx -n {namespaces} -oyaml > sts.yaml

    -
    • Run the following command to back up the YAML file of the StatefulSet:
    -

    cp sts.yaml sts-backup.yaml

    -
    • Modify the definition of volumeClaimTemplates in the YAML file of the StatefulSet.
    -

    vi sts.yaml

    -

    Configuration example of volumeClaimTemplates for an EVS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161070049798261342
    -        namespace: default
    -        creationTimestamp: null
    -        annotations:
    -          everest.io/disk-volume-type: SAS
    -      spec:
    -        accessModes:
    -          - ReadWriteOnce
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-disk
    -

    The parameter value must be the same as the PVC of the EVS volume created in 3.

    -

    Configuration example of volumeClaimTemplates for an SFS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161063441560279697
    -        namespace: default
    -        creationTimestamp: null
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 10Gi
    -        storageClassName: csi-nas
    -

    The parameter value must be the same as the PVC of the SFS volume created in 3.

    -

    Configuration example of volumeClaimTemplates for an OBS volume:

    -
      volumeClaimTemplates:
    -    - metadata:
    -        name: pvc-161070100417416148
    -        namespace: default
    -        creationTimestamp: null
    -        annotations:
    -          csi.storage.k8s.io/fstype: s3fs
    -          everest.io/obs-volume-type: STANDARD
    -      spec:
    -        accessModes:
    -          - ReadWriteMany
    -        resources:
    -          requests:
    -            storage: 1Gi
    -        storageClassName: csi-obs
    -

    The parameter value must be the same as the PVC of the OBS volume created in 3.

    -
    • Delete the StatefulSet.
    -

    kubectl delete sts xxx -n {namespaces}

    -
    • Create the StatefulSet.
    -

    kubectl create -f sts.yaml

    -

  5. Check service functions.

    1. Check whether the application is running properly.
    2. Checking whether the data storage is normal.
    -

    If a rollback is required, perform 4. Select the PVC in FlexVolume format and upgrade the application.

    -
    -

  6. Uninstall the PVC in the FlexVolume format.

    If the application functions normally, unbind the PVC in the FlexVolume format on the storage management page.

    -

    You can also run the kubectl command to delete the PVC and PV of the FlexVolume format.

    -

    Before deleting a PV, change the persistentVolumeReclaimPolicy of the PV to Retain. Otherwise, the underlying storage will be reclaimed after the PV is deleted.

    -

    If the cluster has been upgraded before the storage migration, PVs may fail to be deleted. You can remove the PV protection field finalizers to delete PVs.

    -

    kubectl patch pv {pv_name} -p '{"metadata":{"finalizers":null}}'

    -
    -

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0344.html b/docs/cce/umn/cce_01_0344.html deleted file mode 100644 index 2866f65c..00000000 --- a/docs/cce/umn/cce_01_0344.html +++ /dev/null @@ -1,88 +0,0 @@ - - -

Adding a Second Data Disk to a Node in a CCE Cluster

-

You can use the pre-installation script feature to configure CCE cluster nodes (ECSs).

-
  • When creating a node in a cluster of v1.13.10 or later, if a data disk is not managed by LVM, follow instructions in this section to format the data disk before adding the disk. Otherwise, the data disk will still be managed by LVM.
  • When creating a node in a cluster earlier than v1.13.10, you must format the data disks that are not managed by LVM. Otherwise, either these data disks or the first data disk will be managed by LVM.
-
-

Before using this feature, write a script that can format data disks and save it to your OBS bucket. This script must be executed by user root.

-

Input Parameters

-
  1. Set the script name to formatdisk.sh, save the script to your OBS bucket, and obtain the address of the script in OBS.
  2. You need to specify the size of the Docker data disk (the data disk managed by LVM is called the Docker data disk). The size of the Docker disk must be different from that of the second disk. For example, the Docker data disk is 100 GB and the new disk is 110 GB.
  3. Set the mount path of the second data disk, for example, /data/code.
-

Run the following command in the pre-installation script to format the disk:

-
cd /tmp;curl -k -X GET OBS bucket address /formatdisk.sh -1 -O;fdisk -l;sleep 30;bash -x formatdisk.sh 100 /data/code;fdisk -l
-

Example script (formatdisk.sh):

-
dockerdisksize=$1
-mountdir=$2
-systemdisksize=40
-i=0
-while [ 20 -gt $i ]; do 
-    echo $i; 
-    if [ $(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}' |wc -l) -ge 3 ]; then 
-        break 
-    else 
-        sleep 5 
-    fi; 
-    i=$[i+1] 
-done 
-all_devices=$(lsblk -o KNAME,TYPE | grep disk | grep -v nvme | awk '{print $1}' | awk '{ print "/dev/"$1}')
-for device in ${all_devices[@]}; do
-    isRawDisk=$(lsblk -n $device 2>/dev/null | grep disk | wc -l)
-    if [[ ${isRawDisk} > 0 ]]; then
-        # is it partitioned ?
-        match=$(lsblk -n $device 2>/dev/null | grep -v disk | wc -l)
-        if [[ ${match} > 0 ]]; then
-            # already partited
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Raw disk ${device} has been partition, will skip this device"
-            continue
-        fi
-    else
-        isPart=$(lsblk -n $device 2>/dev/null | grep part | wc -l)
-        if [[ ${isPart} -ne 1 ]]; then
-            # not parted
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has not been partition, will skip this device"
-            continue
-        fi
-        # is used ?
-        match=$(lsblk -n $device 2>/dev/null | grep -v part | wc -l)
-        if [[ ${match} > 0 ]]; then
-            # already used
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
-            continue
-        fi
-        isMount=$(lsblk -n -o MOUNTPOINT $device 2>/dev/null)
-        if [[ -n ${isMount} ]]; then
-            # already used
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} has been used, will skip this device"
-            continue
-        fi
-        isLvm=$(sfdisk -lqL 2>>/dev/null | grep $device | grep "8e.*Linux LVM")
-        if [[ ! -n ${isLvm} ]]; then
-            # part system type is not Linux LVM
-            [[ -n "${DOCKER_BLOCK_DEVICES}" ]] && echo "Disk ${device} system type is not Linux LVM, will skip this device"
-            continue
-        fi
-    fi
-    block_devices_size=$(lsblk -n -o SIZE $device 2>/dev/null | awk '{ print $1}')
-    if [[ ${block_devices_size}"x" != "${dockerdisksize}Gx" ]] && [[ ${block_devices_size}"x" != "${systemdisksize}Gx" ]]; then
-echo "n
-p
-1
-
-
-w
-" | fdisk $device
-        mkfs -t ext4 ${device}1
-        mkdir -p $mountdir
-	uuid=$(blkid ${device}1 |awk '{print $2}')
-	echo "${uuid}  $mountdir ext4  noatime  0 0" | tee -a /etc/fstab >/dev/null
-        mount $mountdir
-    fi
-done
-

If the preceding example cannot be executed, use the dos2unix tool to convert the format.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0347.html b/docs/cce/umn/cce_01_0347.html deleted file mode 100644 index 8e37b697..00000000 --- a/docs/cce/umn/cce_01_0347.html +++ /dev/null @@ -1,19 +0,0 @@ - - - -

Cluster Parameters

- -

-
- - - diff --git a/docs/cce/umn/cce_01_0348.html b/docs/cce/umn/cce_01_0348.html deleted file mode 100644 index f8f25cb3..00000000 --- a/docs/cce/umn/cce_01_0348.html +++ /dev/null @@ -1,27 +0,0 @@ - - -

Maximum Number of Pods That Can Be Created on a Node

-

The maximum number of pods that can be created on a node is determined by the following parameters:

-
  • Number of container IP addresses that can be allocated on a node (alpha.cce/fixPoolMask): Set this parameter when creating a CCE cluster. This parameter is available only when Network Model is VPC network.
  • Maximum number of pods of a node (maxPods): Set this parameter when creating a node. It is a configuration item of kubelet.
  • Number of ENIs of a CCE Turbo cluster node: In a CCE Turbo cluster, ECS nodes use sub-ENIs and BMS nodes use ENIs. The maximum number of pods that can be created on a node depends on the number of ENIs that can be used by the node.
-

The maximum number of pods that can be created on a node depends on the minimum value of these parameters.

- -

Container Network vs. Host Network

When creating a pod, you can select the container network or host network for the pod.

-
  • Container network (default): Each pod is assigned an IP address by the cluster networking add-ons, which occupies the IP addresses of the container network.
  • Host network: The pod uses the host network (hostNetwork: true needs to be configured for the pod) and occupies the host port. The pod IP address is the host IP address. The pod does not occupy the IP addresses of the container network. To use the host network, you must confirm whether the container ports conflict with the host ports. Do not use the host network unless you know exactly which host port is used by which container.
-
-

Number of Container IP Addresses That Can Be Allocated on a Node

If you select VPC network for Network Model when creating a CCE cluster, you also need to set the number of container IP addresses that can be allocated to each node, as shown in the following figure.

-

This parameter affects the maximum number of pods that can be created on a node. Each pod occupies an IP address (when the container network is used). If the number of available IP addresses is insufficient, pods cannot be created.

-

-

By default, a node occupies three container IP addresses (network address, gateway address, and broadcast address). Therefore, the number of container IP addresses that can be allocated to a node equals the number of selected container IP addresses minus 3. For example, in the preceding figure, the number of container IP addresses that can be allocated to a node is 125 (128 – 3).

-
-

Maximum Number of Pods on a Node

When creating a node, you can configure the maximum number of pods that can be created on the node. This parameter is a configuration item of kubelet and determines the maximum number of pods that can be created by kubelet.

-

-
-

Number of NICs on a CCE Turbo Cluster Node

For details about the number of NICs on a CCE Turbo cluster node, see Cloud Native Network 2.0.

-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0352.html b/docs/cce/umn/cce_01_0352.html deleted file mode 100644 index 87d1c14f..00000000 --- a/docs/cce/umn/cce_01_0352.html +++ /dev/null @@ -1,82 +0,0 @@ - - -

Configuring Node Scheduling (Tainting)

-

Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.

-

Taints

A taint is a key-value pair associated with an effect. The following effects are available:

-
  • NoSchedule: No pod will be able to schedule onto the node unless it has a matching toleration. Existing pods will not be evicted from the node.
  • PreferNoSchedule: Kubernetes prevents pods that cannot tolerate this taint from being scheduled onto the node.
  • NoExecute: If the pod has been running on a node, the pod will be evicted from the node. If the pod has not been running on a node, the pod will not be scheduled onto the node.
-

To add a taint to a node, run the kubectl taint node nodename command as follows:

-
$ kubectl get node
-NAME             STATUS   ROLES    AGE    VERSION
-192.168.10.170   Ready    <none>   73d    v1.19.8-r1-CCE21.4.1.B003
-192.168.10.240   Ready    <none>   4h8m   v1.19.8-r1-CCE21.6.1.2.B001
-$ kubectl taint node 192.168.10.240 key1=value1:NoSchedule
-node/192.168.10.240 tainted
-

To view the taint configuration, run the describe and get commands as follows:

-
$ kubectl describe node 192.168.10.240
-Name:               192.168.10.240
-...
-Taints:             key1=value1:NoSchedule
-...
-$ kubectl get node 192.168.10.240 -oyaml
-apiVersion: v1
-...
-spec:
-  providerID: 06a5ea3a-0482-11ec-8e1a-0255ac101dc2
-  taints:
-  - effect: NoSchedule
-    key: key1
-    value: value1
-...
-

To remove a taint, run the following command with a hyphen (-) added after NoSchedule:

-
$ kubectl taint node 192.168.10.240 key1=value1:NoSchedule-
-node/192.168.10.240 untainted
-$ kubectl describe node 192.168.10.240
-Name:               192.168.10.240
-...
-Taints:             <none>
-...
-

To configure scheduling settings, log in to the CCE console, choose Resource Management > Nodes in the navigation pane, and choose More > Scheduling settings in the Operation column of a node in the node list.

-

-

In the dialog box that is displayed, click OK to set the node to be unschedulable.

-

-

This operation will add a taint to the node. You can use kubectl to view the content of the taint.

-
$ kubectl describe node 192.168.10.240
-...
-Taints:             node.kubernetes.io/unschedulable:NoSchedule
-...
-

On the CCE console, perform the same operations again to remove the taint and set the node to be schedulable.

-

-
-

Tolerations

Tolerations are applied to pods, and allow (but do not require) the pods to schedule onto nodes with matching taints.

-

Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node. This marks that the node should not accept any pods that do not tolerate the taints.

-

Here's an example of a pod that uses tolerations:

-
apiVersion: v1
-kind: Pod
-metadata:
-  name: nginx
-  labels:
-    env: test
-spec:
-  containers:
-  - name: nginx
-    image: nginx
-    imagePullPolicy: IfNotPresent
-  tolerations:
-  - key: "key1"
-    operator: "Equal"
-    value: "value1"
-    effect: "NoSchedule"  
-

In the preceding example, the toleration label of the pod is key1=value1 and the taint effect is NoSchedule. Therefore, the pod can be scheduled onto the corresponding node.

-

You can also configure tolerations similar to the following information, which indicates that the pod can be scheduled onto a node when the node has the taint key1:

-
tolerations:
-- key: "key1"
-  operator: "Exists"
-  effect: "NoSchedule"
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0363.html b/docs/cce/umn/cce_01_0363.html deleted file mode 100644 index 5ceb035a..00000000 --- a/docs/cce/umn/cce_01_0363.html +++ /dev/null @@ -1,169 +0,0 @@ - - -

Creating a Node in a CCE Turbo Cluster

-

Prerequisites

  • At least one CCE Turbo cluster is available. For details on how to create a cluster, see Creating a CCE Turbo Cluster.
  • A key pair has been created for identity authentication upon remote node login.
-
-

Notes and Constraints

  • During the node creation, software packages are downloaded from OBS using the domain name. You need to use a private DNS server to resolve the OBS domain name, and configure the subnet where the node resides with a private DNS server address. When you create a subnet, the private DNS server is used by default. If you change the subnet DNS, ensure that the DNS server in use can resolve the OBS domain name.
  • Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.
  • CCE Turbo clusters are available only in certain regions.
-
-

Procedure for Creating a Node

After a CCE Turbo cluster is created, you can create nodes for the cluster.

-
  1. Click Create Node in the card view of the created CCE Turbo cluster. In the Node Configuration step, set node parameters by referring to the following tables.

    Computing configurations:

    -
    You can configure the specifications and OS of a cloud server, on which your containerized applications run. -
    - - - - - - - - - - - - - - - - - - - - - - -
    Table 1 Configuration parameters

    Parameter

    -

    Description

    -

    AZ

    -

    AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after creation.

    -

    You are advised to select Random to deploy your node in a random AZ based on the selected node flavor.

    -

    An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

    -

    Container runtime

    -

    Container runtime used on the node. Different container runtimes support different node specifications and cannot be changed after the node is created.

    -
    • runc: The runC runtime is used. By default, Docker is selected as the container engine when you create a container on the console.
    • kata: The Kata runtime is used. If you select this type for both nodes and workloads, the workloads run only on the nodes that use the Kata runtime. containerd is used by default.
    -

    For details about common containers and secure containers, see Secure Containers and Common Containers.

    -

    Specifications

    -

    Select node specifications that best fit your business needs.

    -

    Nodes in a CCE Turbo cluster must be the models developed on the QingTian architecture that features software-hardware synergy.

    -

    OS

    -

    Public image: Select an OS for the node.

    -

    Node Name

    -

    Name of the node, which must be unique. When nodes (ECSs) are created in batches, the value of this parameter is used as the name prefix for each ECS.

    -

    The system generates a default name for you, which can be modified.

    -

    A node name must start with a lowercase letter and cannot end with a hyphen (-). Only digits, lowercase letters, and hyphens (-) are allowed.

    -

    Login Mode

    -
    • Key pair: Select the key pair used to log in to the node. You can select a shared key.

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create a key pair.

      -
    -
    -
    -
    -

    Storage configuration

    -
    Configure storage resources on a node for the containers running on it. Set the disk size according to site requirements. -
    - - - - - - - - - - -
    Table 2 Configuration parameters

    Parameter

    -

    Description

    -

    System Disk

    -

    System disk used by the node OS. The value ranges from 40 GB to 1,024 GB. The default value is 50 GB.

    -

    Data Disk

    -

    Data disk used by the container runtime and kubelet on the node. The value ranges from 100 GB to 32,768 GB. The default value is 100 GB. The EVS disk types provided for the data disk are the same as those for the system disk.

    -
    CAUTION:

    If the data disk is uninstalled or damaged, the Docker service becomes abnormal and the node becomes unavailable. You are advised not to delete the data disk.

    -
    -

    Click Expand to set the following parameters:

    -
    • Custom space allocation: Select this option to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata.
    • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption function.
      • Encryption is not selected by default.
      • After you select Encryption, you can select an existing key in the displayed Encryption Setting dialog box. If no key is available, click the link next to the drop-down box to create a key. After the key is created, click the refresh icon.
      -
    -
    -
    -
    -

    Networking configuration

    -
    Configure networking resources to allow node and containerized application access. -
    - - - - - - - -
    Table 3 Configuration parameters

    Parameter

    -

    Description

    -

    Node Subnet

    -

    The node subnet selected during cluster creation is used by default. You can choose another subnet instead. The value cannot be changed after creation.

    -
    -
    -
    -

    Advanced Settings

    -
    Configure advanced node capabilities such as labels, taints, and startup command. -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 4 Advanced configuration parameters

    Parameter

    -

    Description

    -

    Kubernetes Label

    -

    Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 10 labels can be added.

    -

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    -

    Resource Tags

    -

    You can add resource tags to classify resources.

    -

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

    -

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

    -

    Taints

    -
    This parameter is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    -
    NOTICE:
    • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-out may fail or pods cannot be scheduled onto the added nodes.
    • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
    -
    -
    -

    Max Pods

    -

    Maximum number of pods that can run on the node, including the default system pods.

    -

    This limit prevents the node from being overloaded of pods. For details, see Maximum Number of Pods That Can Be Created on a Node.

    -

    Pre-installation Script

    -

    Enter commands. A maximum of 1,000 characters are allowed.

    -

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed. The commands are run to format data disks.

    -

    Post-installation Script

    -

    Enter commands. A maximum of 1,000 characters are allowed.

    -

    The script will be executed after Kubernetes software is installed and will not affect the installation. The commands are run to modify Docker parameters.

    -

    Agency

    -

    An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

    -

    If no agency is available, click Create Agency on the right to create one.

    -
    -
    -
    -

  2. Click Next: Confirm to review the configurations.
  3. Click Submit.

    The node list page is displayed. If the node status is Available, the node is created successfully. It takes about 6 to 10 minutes to create a node.

    -

  4. Click Back to Node List. The node is created successfully if it changes to the Available state.
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0378.html b/docs/cce/umn/cce_01_0378.html deleted file mode 100644 index 883ed839..00000000 --- a/docs/cce/umn/cce_01_0378.html +++ /dev/null @@ -1,284 +0,0 @@ - - -

PersistentVolumeClaims (PVCs)

-

A PVC describes a workload's request for storage resources. This request consumes existing PVs in the cluster. If there is no PV available, underlying storage and PVs are dynamically created. When creating a PVC, you need to describe the attributes of the requested persistent storage, such as the size of the volume and the read/write permissions.

-

Notes and Constraints

When a PVC is created, the system checks whether there is an available PV with the same configuration in the cluster. If yes, the PVC binds the available PV to the cluster. If no PV meets the matching conditions, the system dynamically creates a storage volume.

- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Description

-

PVC Field

-

PV Field

-

Matching Logic

-

region

-

pvc.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

-

pv.metadata.labels (failure-domain.beta.kubernetes.io/region or topology.kubernetes.io/region)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

zone

-

pvc.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

-

pv.metadata.labels (failure-domain.beta.kubernetes.io/zone or topology.kubernetes.io/zone)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

EVS disk type

-

pvc.metadata.annotations (everest.io/disk-volume-type)

-

pv.spec.csi.volumeAttributes (everest.io/disk-volume-type)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

Key ID

-

pvc.metadata.annotations (everest.io/crypt-key-id)

-

pv.spec.csi.volumeAttributes (everest.io/crypt-key-id)

-

Defined or not defined at the same time. If defined, the settings must be consistent.

-

accessMode

-

accessMode

-

accessMode

-

The settings must be consistent.

-

Storage class

-

storageclass

-

storageclass

-

The settings must be consistent.

-
-
-
-

Volume Access Modes

PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

-
  • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
  • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, SFS Turbo, and OBS.
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Supported access modes

Storage Type

-

ReadWriteOnce

-

ReadWriteMany

-

EVS

-

√

-

×

-

SFS

-

×

-

√

-

OBS

-

×

-

√

-

SFS Turbo

-

×

-

√

-
-
-
-

Using a Storage Class to Create a PVC

StorageClass describes the storage class used in the cluster. You need to specify StorageClass to dynamically create PVs and underlying storage resources when creating a PVC.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Method: Select Storage class.
    • PVC Name: Enter a PVC name.
    • Storage Class: Select the required storage class. The following storage resources can be dynamically provisioned:
      • csi-disk: EVS disk.
      • csi-obs: OBS bucket.
      -
    • AZ (supported only by EVS): Select the AZ where the EVS disk is located.
    • Disk Type: Select an EVS disk type. EVS disk types vary in different regions.
      • Common I/O
      • High I/O
      • Ultra-high I/O
      -
    • Access Mode: ReadWriteOnce and ReadWriteMany are supported. For details, see Volume Access Modes.
    • Capacity (GiB) (supported only by EVS and SFS): storage capacity. This parameter is not available for OBS.
    • Encryption (supported only for EVS and SFS): Select Encryption. After selecting this option, you need to select a key.
    • Secret (supported only for OBS): Select an access key for OBS. For details, see Using a Custom AK/SK to Mount an OBS Volume.
    -

  4. Click Create.
-

Using YAML

-

Example YAML for EVS

-
  • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

    For details about the value of region, see Regions and Endpoints.

    -
  • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    For details about the value of zone, see Regions and Endpoints.

    -
-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-evs-auto-example
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD    # EVS disk type.
-    everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82  # (Optional) Key ID. The key is used to encrypt EVS disks.
-    
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
-  resources:
-    requests:
-      storage: 10Gi             # EVS disk capacity, ranging from 1 to 32768.
-  storageClassName: csi-disk    # The storage class type is EVS.
-

Example YAML for OBS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: obs-warm-provision-pvc
-  namespace: default
-  annotations:
-    everest.io/obs-volume-type: STANDARD      # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
-    csi.storage.k8s.io/fstype: obsfs          # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
-    
-spec:
-  accessModes:
-  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
-  resources:
-    requests:
-      storage: 1Gi                 # This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
-  storageClassName: csi-obs        # The storage class type is OBS.
-
-

Using a PV to Create a PVC

If a PV has been created, you can create a PVC to apply for PV resources.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Method: Select Existing volume.
    • PVC Name: Enter a PVC name.
    • Volume Type: Select your required volume type.
      • EVS
      • SFS
      • OBS
      • SFS Turbo
      -
    • Associate Volume: Select the volume to be associated, that is, the PV.
    -

  4. Click Create.
-

Using YAML

-

Example YAML for EVS

-
  • failure-domain.beta.kubernetes.io/region: region where the cluster is located.

    For details about the value of region, see Regions and Endpoints.

    -
  • failure-domain.beta.kubernetes.io/zone: AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

    For details about the value of zone, see Regions and Endpoints.

    -
-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SAS                                # EVS disk type.
-    everest.io/crypt-key-id: fe0757de-104c-4b32-99c5-ee832b3bcaa3   # (Optional) Key ID. The key is used to encrypt EVS disks.
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-    
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-  - ReadWriteOnce               # The value must be ReadWriteOnce for EVS.
-  resources:
-    requests:
-      storage: 10Gi              
-  storageClassName: csi-disk     # Storage class name. The value is csi-disk for EVS.
-  volumeName: cce-evs-test       # PV name.
-

Example YAML for SFS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-sfs-test
-  namespace: default
-  annotations:
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-spec:
-  accessModes:
-  - ReadWriteMany              # The value must be ReadWriteMany for SFS.
-  resources:
-    requests:
-      storage: 100Gi           # Requested PVC capacity.
-  storageClassName: csi-nas    # Storage class name. The value is csi-nas for SFS.
-  volumeName: cce-sfs-test     # PV name.
-

Example YAML for OBS:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-obs-test
-  namespace: default
-  annotations:
-    everest.io/obs-volume-type: STANDARD                         # OBS bucket type. Currently, standard (STANDARD) and infrequent access (WARM) are supported.
-    csi.storage.k8s.io/fstype: s3fs                              # File type. obsfs indicates to create a parallel file system (recommended), and s3fs indicates to create an OBS bucket.
-    csi.storage.k8s.io/node-publish-secret-name: test-user
-    csi.storage.k8s.io/node-publish-secret-namespace: default
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-    
-spec:
-  accessModes:
-  - ReadWriteMany             # The value must be ReadWriteMany for OBS.
-  resources:
-    requests:
-      storage: 1Gi            # Requested PVC capacity. This field is valid only for verification (fixed to 1, cannot be empty or 0). The value setting does not take effect for OBS buckets.
-  storageClassName: csi-obs   # Storage class name. The value is csi-obs for OBS.
-  volumeName: cce-obs-test    # PV name.
-

Example YAML for SFS Turbo:

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
-spec:
-  accessModes:
-    - ReadWriteMany               # The value must be ReadWriteMany for SFS Turbo.
-  resources:
-    requests:
-      storage: 100Gi              # Requested PVC capacity.
-  storageClassName: csi-sfsturbo  # Storage class name. The value is csi-sfsturbo for SFS Turbo.
-  volumeName: pv-sfsturbo-test         # PV name.
-
-

Using a Snapshot to Creating a PVC

The disk type, encryption setting, and disk mode of the created EVS PVC are consistent with those of the snapshot's source EVS disk.

-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Go to the cluster details page, choose Storage from the navigation pane, and click the PersistentVolumeClaims (PVCs) tab.
  3. Click Create PVC in the upper right corner. In the dialog box displayed, set the PVC parameters.

    • Creation Mode: Select Snapshot.
    • PVC Name: name of a PVC.
    • Snapshot: Select the snapshot to be used.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-test
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SSD     # EVS disk type, which must be the same as that of the source EVS disk of the snapshot.
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: 
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: '10'
-  storageClassName: csi-disk
-  dataSource:
-    name: cce-disksnap-test             # Snapshot name
-    kind: VolumeSnapshot
-    apiGroup: snapshot.storage.k8s.io
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0379.html b/docs/cce/umn/cce_01_0379.html deleted file mode 100644 index 5725bca6..00000000 --- a/docs/cce/umn/cce_01_0379.html +++ /dev/null @@ -1,395 +0,0 @@ - - -

PersistentVolumes (PVs)

-

A PV is a persistent storage volume in a cluster. Same as a node, a PV is a cluster-level resource.

-

Notes and Constraints

  • On the new CCE console (the cluster needs to be upgraded to v1.19.10 or later and the everest add-on needs to be upgraded to v1.2.10 or later), PVs are open to you for management. On the old CCE console, PVs can only be imported or dynamically created. You cannot manage the PV lifecycle on the console.
  • Multiple PVs can use the same SFS or SFS Turbo file system with the following restrictions:
    • An error may occur if multiple PVCs/PVs that use the same underlying SFS or SFS Turbo file system are mounted to the same pod.
    • The persistentVolumeReclaimPolicy parameter in the PVs must be set to Retain. Otherwise, when a PV is deleted, the associated underlying volume may be deleted. In this case, other PVs associated with the underlying volume may be abnormal.
    • When the underlying volume is repeatedly used, it is recommended that ReadWriteMany be implemented at the application layer to prevent data overwriting and loss.
    -
-
-

Volume Access Modes

PVs can be mounted to the host system only in the mode supported by underlying storage resources. For example, a file storage system can be read and written by multiple nodes, but an EVS disk can be read and written by only one node.

-
  • ReadWriteOnce: A volume can be mounted as read-write by a single node. This access mode is supported by EVS.
  • ReadWriteMany: A volume can be mounted as read-write by multiple nodes. This access mode is supported by SFS, SFS Turbo, and OBS.
- -
- - - - - - - - - - - - - - - - - - - - - -
Table 1 Access modes supported by cloud storage

Storage Type

-

ReadWriteOnce

-

ReadWriteMany

-

EVS

-

√

-

×

-

SFS

-

×

-

√

-

OBS

-

×

-

√

-

SFS Turbo

-

×

-

√

-
-
-
-

PV Reclaim Policy

A PV reclaim policy is used to delete or reclaim underlying volumes when a PVC is deleted. The value can be Delete or Retain.

-
  • Delete: When a PVC is deleted, the PV and underlying storage resources are deleted.
  • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After a PVC is deleted, the PV resource is in the Released state and cannot be bound to the PVC again.
-

Everest also allows you to delete a PVC without deleting underlying storage resources. This function can be achieved only by using a YAML file. Set the PV reclaim policy to Delete and add annotations"everest.io/reclaim-policy: retain-volume-only". In this way, when the PVC is deleted, the PV resource is deleted, but the underlying storage resources are retained.

-
-

Creating an EVS Volume

The requirements for creating an EVS volume are as follows:

-
  • System disks, DSS disks, and shared disks cannot be used.
  • The EVS disk is one of the supported types (common I/O, high I/O, and ultra-high I/O), and the EVS disk device type is SCSI.
  • The EVS disk is not frozen or used, and the status is available.
  • If the EVS disk is encrypted, the key must be available.
-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select EVS.
    • EVS:
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteOnce
    • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-evs-test
-  labels:
-    failure-domain.beta.kubernetes.io/region: eu-de
-    failure-domain.beta.kubernetes.io/zone: eu-de-01
-spec:
-  accessModes:
-    - ReadWriteOnce     # Access mode. The value is fixed to ReadWriteOnce for EVS.
-  capacity:
-    storage: 10Gi       #  EVS disk capacity, in the unit of Gi. The value ranges from 1 to 32768.
-  csi:
-    driver: disk.csi.everest.io     # Dependent storage driver for the mounting.
-    fsType: ext4
-    volumeHandle: 459581af-e78c-4356-9e78-eaf9cd8525eb   # Volume ID of the EVS disk.
-    volumeAttributes:
-      everest.io/disk-mode: SCSI           # Device type of the EVS disk. Only SCSI is supported.
-      everest.io/disk-volume-type: SAS     # EVS disk type.
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-      everest.io/crypt-key-id: 0992dbda-6340-470e-a74e-4f0db288ed82    # (Optional) Encryption key ID. Mandatory for an encrypted disk.
-  persistentVolumeReclaimPolicy: Delete    # Reclain policy.
-  storageClassName: csi-disk               # Storage class name. The value must be csi-disk.
- -
- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 2 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

failure-domain.beta.kubernetes.io/region

-

Region where the cluster is located.

-

For details about the value of region, see Regions and Endpoints.

-

failure-domain.beta.kubernetes.io/zone

-

AZ where the EVS volume is created. It must be the same as the AZ planned for the workload.

-

For details about the value of zone, see Regions and Endpoints.

-

volumeHandle

-

Volume ID of the EVS disk.

-

To obtain the volume ID, log in to the Cloud Server Console. In the navigation pane, choose Elastic Volume Service > Disks. Click the name of the target EVS disk to go to its details page. On the Summary tab page, click the copy button after ID.

-

everest.io/disk-volume-type

-

EVS disk type. All letters are in uppercase.

-
  • SATA: common I/O
  • SAS: high I/O
  • SSD: ultra-high I/O
-

everest.io/crypt-key-id

-

Encryption key ID. This field is mandatory when the volume is an encrypted volume.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and EVS disk are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the EVS resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an SFS Volume

  • The SFS file system and the cluster must be in the same VPC.
-
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only      # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-sfs-test
-spec:
-  accessModes:
-  - ReadWriteMany      # Access mode. The value must be ReadWriteMany for SFS.
-  capacity:
-    storage: 1Gi       # File storage capacity.
-  csi:
-    driver: disk.csi.everest.io   # Mount the dependent storage driver.
-    fsType: nfs
-    volumeHandle: 30b3d92a-0bc7-4610-b484-534660db81be   # SFS file system ID.
-    volumeAttributes:
-      everest.io/share-export-location:   # Shared path of the file storage
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-  persistentVolumeReclaimPolicy: Retain    # Reclaim policy.
-  storageClassName: csi-nas                # Storage class name. The value must be csi-nas for SFS.
-  mountOptions: []                         # Mount options
- -
- - - - - - - - - - - - - - - - - - - - - - -
Table 3 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

volumeHandle

-

File system ID.

-

On the management console, choose Service List > Storage > Scalable File Service. In the SFS file system list, click the name of the target file system and copy the content following ID on the page displayed.

-

everest.io/share-export-location

-

Shared path of the file system.

-

On the management console, choose Service List > Storage > Scalable File Service. You can obtain the shared path of the file system from the Mount Address column.

-

mountOptions

-

Mount options.

-

If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

-
mountOptions:
-- vers=3
-- timeo=600
-- nolock
-- hard
-

everest.io/crypt-key-id

-

Encryption key ID. This field is mandatory when the volume is an encrypted volume.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The options are as follows:

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and SFS volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the file storage resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an OBS Volume

Secure containers do not support OBS volumes.

-

A single user can create a maximum of 100 OBS buckets on the console. If you have a large number of CCE workloads and you want to mount an OBS bucket to every workload, you may easily run out of buckets. In this scenario, you are advised to use OBS through the OBS API or SDK and do not mount OBS buckets to the workload on the console.

-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select OBS.
    • Select OBS resources.
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteMany
    • Reclaim Policy: Select Delete or Retain as required. For details, see PV Reclaim Policy.
    • Key: You can customize the access key (AK/SK) for mounting an OBS volume. You can use the AK/SK to create a secret and mount the secret to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.
    • Mount Options: mount options. For details about the options, see Setting Mount Options.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-    everest.io/reclaim-policy: retain-volume-only         # (Optional) The PV is deleted while the underlying volume is retained.
-  name: cce-obs-test
-spec:
-  accessModes:
-  - ReadWriteMany                      # Access mode. The value must be ReadWriteMany for OBS.
-  capacity:
-    storage: 1Gi      # Storage capacity. This parameter is set only to meet the PV format requirements. It can be set to any value. The actual OBS space size is not limited by this value.
-  csi:
-    driver: obs.csi.everest.io        # Dependent storage driver for the mounting.
-    fsType: obsfs                      # OBS file type.
-    volumeHandle: cce-obs-bucket       # OBS bucket name.
-    volumeAttributes:
-      everest.io/obs-volume-type: STANDARD
-      everest.io/region: eu-de
-      
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-    nodePublishSecretRef:
-      name: test-user
-      namespace: default
-  persistentVolumeReclaimPolicy: Retain       # Reclaim policy.
-  storageClassName: csi-obs                   # Storage class name. The value must be csi-obs for OBS.
-  mountOptions: []                            # Mount options.
- -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Table 4 Key parameters

Parameter

-

Description

-

everest.io/reclaim-policy: retain-volume-only

-

This field is optional.

-

Currently, only retain-volume-only is supported.

-

This field is valid only when the everest version is 1.2.9 or later and the reclaim policy is Delete. If the reclaim policy is Delete and the current value is retain-volume-only, the associated PV is deleted while the underlying storage volume is retained, when a PVC is deleted.

-

fsType

-

File type. The value can be obsfs or s3fs. If the value is s3fs, an OBS bucket is created and mounted using s3fs. If the value is obsfs, an OBS parallel file system is created and mounted using obsfs. You are advised to set this field to obsfs.

-

volumeHandle

-

OBS bucket name.

-

everest.io/obs-volume-type

-

Storage class, including STANDARD (standard bucket) and WARM (infrequent access bucket).

-

everest.io/region

-

Region where the OBS bucket is deployed.

-

For details about the value of region, see Regions and Endpoints.

-

nodePublishSecretRef

-

Access key (AK/SK) used for mounting the object storage volume. You can use the AK/SK to create a secret and mount it to the PV. For details, see Using a Custom AK/SK to Mount an OBS Volume.

-

mountOptions

-

Mount options. For details, see OBS Volume Mount Options.

-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and OBS volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the object storage resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-

Creating an SFS Turbo Volume

SFS Turbo and the cluster must be in the same VPC.

-
-

Using the CCE Console

-
  1. Log in to the CCE console.
  2. Access the cluster details page, choose Storage from the navigation pane, and click the Volumes tab.
  3. Click Create Volume in the upper right corner. In the dialog box displayed, set the volume parameters.

    • Volume Type: Select SFS Turbo.
    • SFS Turbo: Select SFS Turbo resources.
    • PV Name: Enter a PV name.
    • Access Mode: ReadWriteMany
    • Reclaim Policy: Select Retain. For details, see PV Reclaim Policy.
    • Mount Options: mount options. For details about the options, see Setting Mount Options.
    -

  4. Click Create.
-

Using YAML

-
apiVersion: v1
-kind: PersistentVolume
-metadata:
-  annotations:
-    pv.kubernetes.io/provisioned-by: everest-csi-provisioner
-  name: cce-sfsturbo-test
-spec:
-  accessModes:
-    - ReadWriteMany       # Access mode. The value must be ReadWriteMany for SFS Turbo.
-  capacity:
-    storage: 100.00Gi     # SFS Turbo volume capacity.
-  csi:
-    driver: sfsturbo.csi.everest.io    # Dependent storage driver for the mounting.
-    fsType: nfs
-    volumeHandle: 6674bd0a-d760-49de-bb9e-805c7883f047      # SFS Turbo volume ID.
-    volumeAttributes:
-      everest.io/share-export-location: 192.168.0.85:/      # Shared path of the SFS Turbo volume.
-      storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
-  persistentVolumeReclaimPolicy: Retain     # Reclaim policy.
-  storageClassName: csi-sfsturbo            # Storage class name. The value must be csi-sfsturbo for SFS Turbo.
-  mountOptions: []                          # Mount options.
- -
- - - - - - - - - - - - - - - - -
Table 5 Key parameters

Parameter

-

Description

-

volumeHandle

-

SFS Turbo volume ID.

-

You can obtain the ID on the SFS Turbo storage instance details page on the SFS console.

-

everest.io/share-export-location

-

Shared path of the SFS Turbo volume.

-

mountOptions

-

Mount options.

-

If not specified, the following configurations are used by default. For details, see SFS Volume Mount Options.

-
mountOptions:
-- vers=3
-- timeo=600
-- nolock
-- hard
-

persistentVolumeReclaimPolicy

-

A reclaim policy is supported when the cluster version is equal to or later than 1.19.10 and the everest version is equal to or later than 1.2.9.

-

The Delete and Retain policies are supported.

-

Delete:

-
  • If everest.io/reclaim-policy is not specified, both the PV and SFS Turbo volume are deleted when a PVC is deleted.
  • If everest.io/reclaim-policy is set to retain-volume-only set, when a PVC is deleted, the PV is deleted but the SFF Turbo resources are retained.
-

Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.

-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-
-
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0380.html b/docs/cce/umn/cce_01_0380.html deleted file mode 100644 index 50546fc5..00000000 --- a/docs/cce/umn/cce_01_0380.html +++ /dev/null @@ -1,209 +0,0 @@ - - -

StorageClass

-

StorageClass describes the storage class used in the cluster. You need to specify StorageClass when creating a PVC or PV. As of now, CCE provides storage classes such as csi-disk, csi-nas, and csi-obs by default. When defining a PVC, you can use a StorageClassName to automatically create a PV of the corresponding type and automatically create underlying storage resources.

-

You can run the following command to query the storage classes that CCE supports. You can use the CSI plug-in provided by CCE to customize a storage class, which functions similarly as the default storage classes in CCE.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d          # Storage class for EVS disks
-csi-nas             everest-csi-provisioner         17d          # Storage class for SFS file systems
-csi-obs             everest-csi-provisioner         17d          # Storage class for OBS buckets
-

After a StorageClass is set, PVs can be automatically created and maintained. You only need to specify the StorageClass when creating a PVC, which greatly reduces the workload.

-

In addition to the predefined storage classes provided by CCE, you can also customize storage classes. The following sections describe the application status, solutions, and methods of customizing storage classes.

-

Challenges

When using storage resources in CCE, the most common method is to specify storageClassName to define the type of storage resources to be created when creating a PVC. The following configuration shows how to use a PVC to apply for an SAS (high I/O) EVS disk (block storage).

-
apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: pvc-evs-example
-  namespace: default
-  annotations:
-    everest.io/disk-volume-type: SAS
-spec:
-  accessModes:
-  - ReadWriteOnce
-  resources:
-    requests:
-      storage: 10Gi
-  storageClassName: csi-disk
-

If you need to specify the EVS disk type, you can set the everest.io/disk-volume-type field. The value SAS is used as an example here, indicating the high I/O EVS disk type. Or you can choose SATA (common I/O) and SSD (ultra-high I/O).

-

This configuration method may not work if you want to:

-
  • Set storageClassName only, which is simpler than specifying the EVS disk type by using everest.io/disk-volume-type.
  • Avoid modifying YAML files or Helm charts. Some users switch from self-built or other Kubernetes services to CCE and have written YAML files of many applications. In these YAML files, different types of storage resources are specified by different StorageClassNames. When using CCE, they need to modify a large number of YAML files or Helm charts to use storage resources, which is labor-consuming and error-prone.
  • Set the default storageClassName for all applications to use the default storage class. In this way, you can create storage resources of the default type without needing to specify storageClassName in the YAML file.
-
-

Solution

This section describes how to set a custom storage class in CCE and how to set the default storage class. You can specify different types of storage resources by setting storageClassName.

-
  • For the first scenario, you can define custom storageClassNames for SAS and SSD EVS disks. For example, define a storage class named csi-disk-sas for creating SAS disks. The following figure shows the differences before and after you use a custom storage class.

    -
  • For the second scenario, you can define a storage class with the same name as that in the existing YAML file without needing to modify storageClassName in the YAML file.
  • For the third scenario, you can set the default storage class as described below to create storage resources without specifying storageClassName in YAML files.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name: pvc-evs-example
    -  namespace: default
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -
-
-

Custom Storage Classes

You can customize a high I/O storage class in a YAML file. For example, the name csi-disk-sas indicates that the disk type is SAS (high I/O).

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-sas                          # Name of the high I/O storage class, which can be customized.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SAS            # High I/O EVS disk type, which cannot be customized.
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true                    # true indicates that capacity expansion is allowed.
-

For an ultra-high I/O storage class, you can set the class name to csi-disk-ssd to create SSD EVS disk (ultra-high I/O).

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-ssd                       # Name of the ultra-high I/O storage class, which can be customized.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SSD         # Ultra-high I/O EVS disk type, which cannot be customized.
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true
-

reclaimPolicy: indicates the recycling policies of the underlying cloud storage. The value can be Delete or Retain.

-
  • Delete: When a PVC is deleted, both the PV and the EVS disk are deleted.
  • Retain: When a PVC is deleted, the PV and underlying storage resources are not deleted. Instead, you must manually delete these resources. After that, the PV resource is in the Released state and cannot be bound to the PVC again.
-

If high data security is required, you are advised to select Retain to prevent data from being deleted by mistake.

-

After the definition is complete, run the kubectl create commands to create storage resources.

-
# kubectl create -f sas.yaml
-storageclass.storage.k8s.io/csi-disk-sas created
-# kubectl create -f ssd.yaml
-storageclass.storage.k8s.io/csi-disk-ssd created
-

Query the storage class again. Two more types of storage classes are displayed in the command output, as shown below.

-
# kubectl get sc
-NAME                PROVISIONER                     AGE
-csi-disk            everest-csi-provisioner         17d
-csi-disk-sas        everest-csi-provisioner         2m28s
-csi-disk-ssd        everest-csi-provisioner         16s
-csi-disk-topology   everest-csi-provisioner         17d
-csi-nas             everest-csi-provisioner         17d
-csi-obs             everest-csi-provisioner         17d
-csi-sfsturbo        everest-csi-provisioner         17d
-

Other types of storage resources can be defined in the similar way. You can use kubectl to obtain the YAML file and modify it as required.

-
  • File storage
    # kubectl get sc csi-nas -oyaml
    -kind: StorageClass
    -apiVersion: storage.k8s.io/v1
    -metadata:
    -  name: csi-nas
    -provisioner: everest-csi-provisioner
    -parameters:
    -  csi.storage.k8s.io/csi-driver-name: nas.csi.everest.io
    -  csi.storage.k8s.io/fstype: nfs
    -  everest.io/share-access-level: rw
    -  everest.io/share-access-to: 5e3864c6-e78d-4d00-b6fd-de09d432c632   # ID of the VPC to which the cluster belongs
    -  everest.io/share-is-public: 'false'
    -  everest.io/zone: xxxxx          # AZ
    -reclaimPolicy: Delete
    -allowVolumeExpansion: true
    -volumeBindingMode: Immediate
    -
  • Object storage
    # kubectl get sc csi-obs -oyaml
    -kind: StorageClass
    -apiVersion: storage.k8s.io/v1
    -metadata:
    -  name: csi-obs
    -provisioner: everest-csi-provisioner
    -parameters:
    -  csi.storage.k8s.io/csi-driver-name: obs.csi.everest.io
    -  csi.storage.k8s.io/fstype: s3fs           # Object storage type. s3fs indicates an object bucket, and obsfs indicates a parallel file system.
    -  everest.io/obs-volume-type: STANDARD      # Storage class of the OBS bucket
    -reclaimPolicy: Delete
    -volumeBindingMode: Immediate
    -
-
-

Setting a Default Storage Class

You can specify a storage class as the default class. In this way, if you do not specify storageClassName when creating a PVC, the PVC is created using the default storage class.

-

For example, to specify csi-disk-ssd as the default storage class, edit your YAML file as follows:

-
apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
-  name: csi-disk-ssd
-  annotations:
-    storageclass.kubernetes.io/is-default-class: "true"   # Specifies the default storage class in a cluster. A cluster can have only one default storage class.
-parameters:
-  csi.storage.k8s.io/csi-driver-name: disk.csi.everest.io
-  csi.storage.k8s.io/fstype: ext4
-  everest.io/disk-volume-type: SSD
-  everest.io/passthrough: "true"
-provisioner: everest-csi-provisioner
-reclaimPolicy: Delete
-volumeBindingMode: Immediate
-allowVolumeExpansion: true
-

Delete the created csi-disk-ssd disk, run the kubectl create command to create a csi-disk-ssd disk again, and then query the storage class. The following information is displayed.

-
# kubectl delete sc csi-disk-ssd
-storageclass.storage.k8s.io "csi-disk-ssd" deleted
-# kubectl create -f ssd.yaml
-storageclass.storage.k8s.io/csi-disk-ssd created
-# kubectl get sc
-NAME                     PROVISIONER                     AGE
-csi-disk                 everest-csi-provisioner         17d
-csi-disk-sas             everest-csi-provisioner         114m
-csi-disk-ssd (default)   everest-csi-provisioner         9s
-csi-disk-topology        everest-csi-provisioner         17d
-csi-nas                  everest-csi-provisioner         17d
-csi-obs                  everest-csi-provisioner         17d
-csi-sfsturbo             everest-csi-provisioner         17d
-
-

Verification

  • Use csi-disk-sas to create a PVC.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name:  sas-disk
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -  storageClassName: csi-disk-sas
    -

    Create a storage class and view its details. As shown below, the object can be created and the value of STORAGECLASS is csi-disk-sas.

    -
    # kubectl create -f sas-disk.yaml 
    -persistentvolumeclaim/sas-disk created
    -# kubectl get pvc
    -NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    -sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   24s
    -# kubectl get pv
    -NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
    -pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            30s
    -

    View the PVC details on the CCE console. On the PV details page, you can see that the disk type is high I/O.

    -

    -
  • If storageClassName is not specified, the default configuration is used, as shown below.
    apiVersion: v1
    -kind: PersistentVolumeClaim
    -metadata:
    -  name:  ssd-disk
    -spec:
    -  accessModes:
    -  - ReadWriteOnce
    -  resources:
    -    requests:
    -      storage: 10Gi
    -

    Create and view the storage resource. You can see that the storage class of PVC ssd-disk is csi-disk-ssd, indicating that csi-disk-ssd is used by default.

    -
    # kubectl create -f ssd-disk.yaml 
    -persistentvolumeclaim/ssd-disk created
    -# kubectl get pvc
    -NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    -sas-disk   Bound    pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            csi-disk-sas   16m
    -ssd-disk   Bound    pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            csi-disk-ssd   10s
    -# kubectl get pv
    -NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
    -pvc-4d2b059c-0d6c-44af-9994-f74d01c78731   10Gi       RWO            Delete           Bound       default/ssd-disk          csi-disk-ssd            15s
    -pvc-6e2f37f9-7346-4419-82f7-b42e79f7964c   10Gi       RWO            Delete           Bound       default/sas-disk          csi-disk-sas            17m
    -

    View the PVC details on the CCE console. On the PV details page, you can see that the disk type is ultra-high I/O.

    -

    -
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0388.html b/docs/cce/umn/cce_01_0388.html deleted file mode 100644 index 905fd44d..00000000 --- a/docs/cce/umn/cce_01_0388.html +++ /dev/null @@ -1,64 +0,0 @@ - - -

Cluster Secrets

-

By default, CCE creates the following secrets in each namespace:

-
  • default-secret
  • paas.elb
  • default-token-xxxxx (xxxxx is a random number.)
-

-

The functions of these secrets are described as follows.

-

default-secret

The type of default-secret is kubernetes.io/dockerconfigjson. The data is the credential for logging in to the SWR image repository and is used to pull images from SWR. If you need to pull an image from SWR when creating a workload on CCE, set imagePullSecrets to default-secret.

-
apiVersion: v1                      
-kind: Pod                          
-metadata:
-  name: nginx                      
-spec:                            
-  containers:
-  - image: nginx:alpine            
-    name: container-0               
-    resources:                      
-      limits:
-        cpu: 100m
-        memory: 200Mi
-      requests:
-        cpu: 100m
-        memory: 200Mi
-  imagePullSecrets:
-  - name: default-secret
-

The data of default-secret is updated periodically, and the current data will expire after a certain period of time. You can run the describe command to view the expiration time in of default-secret.

-

Use default-secret directly instead of copying the secret content to create a new one. The credential in the copied secret will expire and the image cannot be pulled.

-
-
$ kubectl describe secret default-secret
-Name:         default-secret
-Namespace:    default
-Labels:       secret-generated-by=cce
-Annotations:  temporary-ak-sk-expires-at: 2021-11-26 20:55:31.380909 +0000 UTC
-
-Type:  kubernetes.io/dockerconfigjson
-
-Data
-====
-.dockerconfigjson:  347 bytes
-
-

paas.elb

The data of paas.elb is the temporary AK/SK data, which is used to create ELB load balancers during Service and ingress creation. The data of paas.elb is periodically updated and expires after a certain period of time.

-

In practice, you will not directly use paas.elb. However, do not delete it. Otherwise, ELB load balancers will fail to be created.

-
-

default-token-xxxxx

By default, Kubernetes creates a service account named default for each namespace. default-token-xxxxx is the key of the service account, and xxxxx is a random number.

-
$ kubectl get sa
-NAME     SECRETS   AGE
-default  1         30d
-$ kubectl describe sa default
-Name:                default
-Namespace:           default
-Labels:              <none>
-Annotations:         <none>
-Image pull secrets:  <none>
-Mountable secrets:   default-token-vssmw
-Tokens:              default-token-vssmw
-Events:              <none>
-
-
-
- -
- diff --git a/docs/cce/umn/cce_01_0393.html b/docs/cce/umn/cce_01_0393.html deleted file mode 100644 index 4b713370..00000000 --- a/docs/cce/umn/cce_01_0393.html +++ /dev/null @@ -1,22 +0,0 @@ - - -

Deployment Examples

-

-
-
- - - -
- diff --git a/docs/cce/umn/cce_01_0395.html b/docs/cce/umn/cce_01_0395.html deleted file mode 100644 index 4799eddc..00000000 --- a/docs/cce/umn/cce_01_0395.html +++ /dev/null @@ -1,14 +0,0 @@ - - -

Switching from AOM to HPA for Auto Scaling

-

CCE clusters of v1.15 or earlier support workload scaling based on AOM monitoring data. This function is no longer supported in CCE clusters of v1.17 or later.

-

If you have configured auto scaling based on AOM, you can switch to HPA policies after your cluster is upgraded to v1.17. Note the following differences during the switchover:

-
  • In AOM-based auto scaling, resource usage rate is calculated based on the limit of a workload, from 0% to 100%.
-

For example, if the memory request of a workload is 2 GB and the memory limit is 16 GB, a scale-out is triggered as long as the memory utilization reaches 50% of the limit (8 GB) in AOM-based auto scaling. In HPA-based scaling, you need to set the memory usage rate to 400% (16 x 50%/2) to trigger the same scaling.

-
-
- -
- diff --git a/docs/cce/umn/cce_10_0002.html b/docs/cce/umn/cce_10_0002.html new file mode 100644 index 00000000..c0d8561b --- /dev/null +++ b/docs/cce/umn/cce_10_0002.html @@ -0,0 +1,21 @@ + + +

Cluster Overview

+
+ + diff --git a/docs/cce/umn/cce_10_0003.html b/docs/cce/umn/cce_10_0003.html new file mode 100644 index 00000000..665f5868 --- /dev/null +++ b/docs/cce/umn/cce_10_0003.html @@ -0,0 +1,131 @@ + + +

Resetting a Node

+

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

+

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

+
+

Notes and Constraints

  • For CCE clusters and CCE Turbo clusters, the version must be v1.13 or later to support node resetting.
+
+

Notes

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • Resetting a node will reinstall the node OS and interrupt workload services running on the node. Therefore, perform this operation during off-peak hours.
  • Data in the system disk and Docker data disks will be cleared. Back up important data before resetting the node.
  • When an extra data disk is mounted to a node, data in this disk will be cleared if the disk has not been unmounted before the node reset. To prevent data loss, back up data in advance and mount the data disk again after the node reset is complete.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
+
+

Procedure

The new console allows you to reset nodes in batches. You can also use private images to reset nodes in batches.

+
  1. Log in to the CCE console.
  2. Click the cluster name and access the cluster details page, choose Nodes in the navigation pane, and select one or multiple nodes to be reset in the list on the right. Choose More > Reset.
  3. In the displayed dialog box, click Yes.

    • For nodes in the DefaultPool node pool, the parameter setting page is displayed. Set the parameters by referring to 4.
    • For a node you create in a node pool, resetting the node does not support parameter configuration. You can directly use the configuration image of the node pool to reset the node.
    +

  4. Specify node parameters.

    Compute Settings +
    + + + + + + + + + + + + + + + + +
    Table 1 Configuration parameters

    Parameter

    +

    Description

    +

    Specification

    +

    Node specifications cannot be modified when you reset a node.

    +

    Container Engine

    +

    CCE clusters support Docker.

    +

    For a CCE Turbo cluster, both Docker and containerd are supported. For details, see Mapping between Node OSs and Container Engines.

    +

    OS

    +

    Public image: Select an OS for the node.

    +

    Private image: You can use private images.

    +

    Login Mode

    +
    • Key Pair

      Select the key pair used to log in to the node. You can select a shared key.

      +

      A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      +
    +
    +
    +
    +

    Storage Settings

    +
    Configure storage resources on a node for the containers running on it. +
    + + + + + + + + + + +
    Table 2 Configuration parameters

    Parameter

    +

    Description

    +

    System Disk

    +

    Directly use the system disk of the cloud server.

    +

    Data Disk

    +

    At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

    +

    Click Expand and select Allocate Disk Space to define the disk space occupied by the container runtime to store the working directories, container image data, and image metadata. For details about how to allocate data disk space, see Data Disk Space Allocation.

    +

    For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

    +
    +
    +
    +
    Advanced Settings +
    + + + + + + + + + + + + + + + + + + + + + + +
    Table 3 Advanced configuration parameters

    Parameter

    +

    Description

    +

    Kubernetes Label

    +

    Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

    +

    Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

    +

    Resource Tag

    +

    You can add resource tags to classify resources.

    +

    You can create predefined tags in Tag Management Service (TMS). Predefined tags are visible to all service resources that support the tagging function. You can use these tags to improve tagging and resource migration efficiency.

    +

    CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

    +

    Taint

    +
    This field is left blank by default. You can add taints to set anti-affinity for the node. A maximum of 10 taints are allowed for each node. Each taint contains the following parameters:
    • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
    • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
    • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
    +
    NOTICE:
    • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
    • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.
    +
    +
    +

    Max. Pods

    +

    Maximum number of pods that can run on the node, including the default system pods.

    +

    This limit prevents the node from being overloaded with pods.

    +

    Pre-installation Command

    +

    Enter commands. A maximum of 1,000 characters are allowed.

    +

    The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

    +

    Post-installation Command

    +

    Enter commands. A maximum of 1,000 characters are allowed.

    +

    The script will be executed after Kubernetes software is installed and will not affect the installation.

    +
    +
    +
    +

  5. Click Next: Confirm.
  6. Click Submit.
+

+
+
+
+ +
+ diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html new file mode 100644 index 00000000..bbe9012a --- /dev/null +++ b/docs/cce/umn/cce_10_0004.html @@ -0,0 +1,105 @@ + + +

Managing Node Labels

+

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

+
  • Node management: Node labels are used to classify nodes.
  • Affinity and anti-affinity between a workload and node:
    • Different workloads have different resource requirements such as CPU, memory, and I/O. If a workload consumes too many resources in a cluster, other workloads in the same cluster may fail to run properly. In this case, you are advised to add different labels to nodes. When deploying a workload, you can select nodes with specified labels for affinity deployment to ensure the normal operation of the system. Otherwise, node anti-affinity deployment can be used.
    • A system can be divided into multiple modules. Each module consists of multiple microservices. To ensure efficient O&M, you can add a module label to each node so that each module can be deployed on the corresponding node. In this way, modules do not interfere with each other and microservices can be easily maintained on their nodes.
    +
+
+

Inherent Label of a Node

After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Inherent label of a node

Key

+

Description

+

New: topology.kubernetes.io/region

+

Old: failure-domain.beta.kubernetes.io/region

+

Region where the node is located

+

New: topology.kubernetes.io/zone

+

Old: failure-domain.beta.kubernetes.io/zone

+

AZ where the node is located

+

New: node.kubernetes.io/baremetal

+

Old: failure-domain.beta.kubernetes.io/is-baremetal

+

Whether the node is a bare metal node

+

false indicates that the node is not a bare metal node.

+

node.kubernetes.io/instance-type

+

Node specifications

+

kubernetes.io/arch

+

Node processor architecture

+

kubernetes.io/hostname

+

Node name

+

kubernetes.io/os

+

OS type

+

node.kubernetes.io/subnetid

+

ID of the subnet where the node is located.

+

os.architecture

+

Node processor architecture

+

For example, amd64 indicates a AMD64-bit processor.

+

os.name

+

Node OS name

+

os.version

+

Node OS kernel version

+

node.kubernetes.io/container-engine

+

Container engine used by the node.

+

accelerator

+

GPU node labels.

+

cce.cloud.com/cce-nodepool

+

The dedicated label of a node in a node pool.

+
+
+
+

Adding or Deleting a Node Label

  1. Log in to the CCE console.
  2. Click the cluster name, access the cluster details page, and choose Nodes in the navigation pane. On the page displayed, select a node and click Manage Labels and Taints.
  3. In the displayed dialog box, click Add batch operations under Batch Operation, and then choose Add/Update or Delete.

    Enter the key and value of the label to be added or deleted, and click OK.

    +

    For example, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

    +

  4. After the label is added, check the added label in node data.
+
+
+
+ +
+ diff --git a/docs/cce/umn/cce_01_0006.html b/docs/cce/umn/cce_10_0006.html similarity index 63% rename from docs/cce/umn/cce_01_0006.html rename to docs/cce/umn/cce_10_0006.html index 5f5e19e3..3e407221 100644 --- a/docs/cce/umn/cce_01_0006.html +++ b/docs/cce/umn/cce_10_0006.html @@ -1,79 +1,79 @@ - +

Overview

-

CCE provides Kubernetes-native container deployment and management and supports lifecycle management of container workloads, including creation, configuration, monitoring, auto scaling, upgrade, uninstall, service discovery, and load balancing.

-

Pod

A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod encapsulates one or more containers, storage volumes, a unique network IP address, and options that govern how the containers should run.

-

Pods can be used in either of the following ways:

-