From f7b9a8853556a8428d2206e1e4cce2535b0072e1 Mon Sep 17 00:00:00 2001 From: "Dong, Qiu Jian" Date: Wed, 4 Sep 2024 11:43:54 +0000 Subject: [PATCH] CCE UMN update -20240625 version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Kovács, Zoltán Co-authored-by: Dong, Qiu Jian Co-committed-by: Dong, Qiu Jian --- docs/cce/umn/ALL_META.TXT.json | 11013 +++++++++------- docs/cce/umn/CLASS.TXT.json | 5399 ++++---- docs/cce/umn/cce_01_0300.html | 27 +- docs/cce/umn/cce_10_0003.html | 115 +- docs/cce/umn/cce_10_0004.html | 13 +- docs/cce/umn/cce_10_0006.html | 4 +- docs/cce/umn/cce_10_0007.html | 16 +- docs/cce/umn/cce_10_0009.html | 4 +- docs/cce/umn/cce_10_0010.html | 8 +- docs/cce/umn/cce_10_0011.html | 10 +- docs/cce/umn/cce_10_0012.html | 156 +- docs/cce/umn/cce_10_0014.html | 16 +- docs/cce/umn/cce_10_0015.html | 12 +- docs/cce/umn/cce_10_0016.html | 10 +- docs/cce/umn/cce_10_0018.html | 12 +- docs/cce/umn/cce_10_0020.html | 6 +- docs/cce/umn/cce_10_0024.html | 2 +- docs/cce/umn/cce_10_0025.html | 2 +- docs/cce/umn/cce_10_0026.html | 2 +- docs/cce/umn/cce_10_0028.html | 31 +- docs/cce/umn/cce_10_0031.html | 8 +- docs/cce/umn/cce_10_0035.html | 2 + docs/cce/umn/cce_10_00356.html | 4 +- docs/cce/umn/cce_10_0036.html | 6 +- docs/cce/umn/cce_10_0044.html | 4 + docs/cce/umn/cce_10_0046.html | 8 +- docs/cce/umn/cce_10_0047.html | 10 +- docs/cce/umn/cce_10_0048.html | 14 +- docs/cce/umn/cce_10_0054.html | 8 +- docs/cce/umn/cce_10_0059.html | 31 +- docs/cce/umn/cce_10_0063.html | 7 +- docs/cce/umn/cce_10_0064.html | 18 +- docs/cce/umn/cce_10_0066.html | 273 +- docs/cce/umn/cce_10_0068.html | 4 +- docs/cce/umn/cce_10_0081.html | 16 +- docs/cce/umn/cce_10_0083.html | 90 +- docs/cce/umn/cce_10_0084.html | 2 +- docs/cce/umn/cce_10_0091.html | 4 +- docs/cce/umn/cce_10_0094.html | 8 +- docs/cce/umn/cce_10_0105.html | 6 +- docs/cce/umn/cce_10_0107.html | 14 +- docs/cce/umn/cce_10_0112.html | 10 +- docs/cce/umn/cce_10_0113.html | 4 +- docs/cce/umn/cce_10_0125.html | 4 +- docs/cce/umn/cce_10_0127.html | 4 +- docs/cce/umn/cce_10_0129.html | 194 +- docs/cce/umn/cce_10_0130.html | 10 +- docs/cce/umn/cce_10_0132.html | 114 +- docs/cce/umn/cce_10_0140.html | 4 +- docs/cce/umn/cce_10_0141.html | 135 +- docs/cce/umn/cce_10_0142.html | 12 +- docs/cce/umn/cce_10_0146.html | 10 +- docs/cce/umn/cce_10_0150.html | 4 +- docs/cce/umn/cce_10_0151.html | 4 +- docs/cce/umn/cce_10_0152.html | 6 +- docs/cce/umn/cce_10_0153.html | 12 +- docs/cce/umn/cce_10_0154.html | 534 +- docs/cce/umn/cce_10_0163.html | 8 +- docs/cce/umn/cce_10_0164.html | 2 - docs/cce/umn/cce_10_0175.html | 10 +- docs/cce/umn/cce_10_0178.html | 52 +- docs/cce/umn/cce_10_0183.html | 4 +- docs/cce/umn/cce_10_0184.html | 4 +- docs/cce/umn/cce_10_0185.html | 2 +- docs/cce/umn/cce_10_0186.html | 10 +- docs/cce/umn/cce_10_0187.html | 2 +- docs/cce/umn/cce_10_0188.html | 14 +- docs/cce/umn/cce_10_0189.html | 4 +- docs/cce/umn/cce_10_0191.html | 4 +- docs/cce/umn/cce_10_0193.html | 323 +- docs/cce/umn/cce_10_0196.html | 61 +- docs/cce/umn/cce_10_0197.html | 25 +- docs/cce/umn/cce_10_0198.html | 44 +- docs/cce/umn/cce_10_0201.html | 6 +- docs/cce/umn/cce_10_0205.html | 137 +- docs/cce/umn/cce_10_0208.html | 14 +- docs/cce/umn/cce_10_0209.html | 29 +- docs/cce/umn/cce_10_0210.html | 4 +- docs/cce/umn/cce_10_0212.html | 4 +- docs/cce/umn/cce_10_0213.html | 491 +- docs/cce/umn/cce_10_0214.html | 7 +- docs/cce/umn/cce_10_0215.html | 2 +- docs/cce/umn/cce_10_0216.html | 8 +- docs/cce/umn/cce_10_0222.html | 4 +- docs/cce/umn/cce_10_0232.html | 72 +- docs/cce/umn/cce_10_0240.html | 87 +- docs/cce/umn/cce_10_0245.html | 2 +- docs/cce/umn/cce_10_0249.html | 4 +- docs/cce/umn/cce_10_0251.html | 62 +- docs/cce/umn/cce_10_0252.html | 37 +- docs/cce/umn/cce_10_0276.html | 4 +- docs/cce/umn/cce_10_0277.html | 7 +- docs/cce/umn/cce_10_0278.html | 6 +- docs/cce/umn/cce_10_0279.html | 30 +- docs/cce/umn/cce_10_0280.html | 10 +- docs/cce/umn/cce_10_0281.html | 38 +- docs/cce/umn/cce_10_0282.html | 51 +- docs/cce/umn/cce_10_0283.html | 63 +- docs/cce/umn/cce_10_0284.html | 79 +- docs/cce/umn/cce_10_0285.html | 8 +- docs/cce/umn/cce_10_0287.html | 2 +- docs/cce/umn/cce_10_0288.html | 10 +- docs/cce/umn/cce_10_0290.html | 8 +- docs/cce/umn/cce_10_0291.html | 2 + docs/cce/umn/cce_10_0293.html | 4 +- docs/cce/umn/cce_10_0296.html | 56 +- docs/cce/umn/cce_10_0300.html | 12 +- docs/cce/umn/cce_10_0302.html | 134 +- docs/cce/umn/cce_10_0307.html | 14 +- docs/cce/umn/cce_10_0336.html | 10 +- docs/cce/umn/cce_10_0337.html | 78 +- docs/cce/umn/cce_10_0338.html | 4 +- docs/cce/umn/cce_10_0341.html | 16 +- docs/cce/umn/cce_10_0342.html | 60 +- docs/cce/umn/cce_10_0345.html | 10 +- docs/cce/umn/cce_10_0348.html | 42 +- docs/cce/umn/cce_10_0349.html | 44 +- docs/cce/umn/cce_10_0351.html | 4 +- docs/cce/umn/cce_10_0352.html | 4 +- docs/cce/umn/cce_10_0353.html | 2 +- docs/cce/umn/cce_10_0354.html | 2 +- docs/cce/umn/cce_10_0355.html | 14 +- docs/cce/umn/cce_10_0360.html | 4 +- docs/cce/umn/cce_10_0361.html | 4 +- docs/cce/umn/cce_10_0363.html | 116 +- docs/cce/umn/cce_10_0365.html | 3 +- docs/cce/umn/cce_10_0367.html | 10 +- docs/cce/umn/cce_10_0373.html | 362 + docs/cce/umn/cce_10_0374.html | 2 +- docs/cce/umn/cce_10_0377.html | 12 +- docs/cce/umn/cce_10_0378.html | 16 +- docs/cce/umn/cce_10_0379.html | 110 +- docs/cce/umn/cce_10_0380.html | 86 +- docs/cce/umn/cce_10_0381.html | 10 +- docs/cce/umn/cce_10_0382.html | 4 +- docs/cce/umn/cce_10_0384.html | 121 +- docs/cce/umn/cce_10_0385.html | 538 +- docs/cce/umn/cce_10_0386.html | 38 +- docs/cce/umn/cce_10_0391.html | 2 +- docs/cce/umn/cce_10_0397.html | 6 +- docs/cce/umn/cce_10_0400.html | 10 +- docs/cce/umn/cce_10_0402.html | 6 +- docs/cce/umn/cce_10_0403.html | 6 +- docs/cce/umn/cce_10_0405.html | 594 +- docs/cce/umn/cce_10_0406.html | 236 + docs/cce/umn/cce_10_0415.html | 19 +- docs/cce/umn/cce_10_0421.html | 2 +- docs/cce/umn/cce_10_0425.html | 13 +- docs/cce/umn/cce_10_0426.html | 18 + docs/cce/umn/cce_10_0430.html | 2 +- docs/cce/umn/cce_10_0431.html | 4 +- docs/cce/umn/cce_10_0436.html | 2 +- docs/cce/umn/cce_10_0437.html | 2 +- docs/cce/umn/cce_10_0439.html | 4 +- docs/cce/umn/cce_10_0441.html | 86 +- docs/cce/umn/cce_10_0442.html | 2 +- docs/cce/umn/cce_10_0448.html | 2 +- docs/cce/umn/cce_10_0450.html | 4 +- docs/cce/umn/cce_10_0456.html | 2 +- docs/cce/umn/cce_10_0460.html | 2 +- docs/cce/umn/cce_10_0462.html | 21 +- docs/cce/umn/cce_10_0463.html | 62 +- docs/cce/umn/cce_10_0465.html | 2 +- docs/cce/umn/cce_10_0476.html | 93 +- docs/cce/umn/cce_10_0477.html | 4 +- docs/cce/umn/cce_10_0488.html | 2 +- docs/cce/umn/cce_10_0489.html | 2 +- docs/cce/umn/cce_10_0493.html | 2 +- docs/cce/umn/cce_10_0494.html | 4 +- docs/cce/umn/cce_10_0495.html | 8 +- docs/cce/umn/cce_10_0497.html | 10 +- docs/cce/umn/cce_10_0498.html | 8 +- docs/cce/umn/cce_10_0500.html | 4 +- docs/cce/umn/cce_10_0504.html | 2 +- docs/cce/umn/cce_10_0505.html | 2 +- docs/cce/umn/cce_10_0510.html | 6 +- docs/cce/umn/cce_10_0511.html | 8 +- docs/cce/umn/cce_10_0512.html | 6 +- docs/cce/umn/cce_10_0513.html | 4 +- docs/cce/umn/cce_10_0514.html | 4 +- docs/cce/umn/cce_10_0515.html | 4 +- docs/cce/umn/cce_10_0516.html | 4 +- docs/cce/umn/cce_10_0517.html | 4 +- docs/cce/umn/cce_10_0518.html | 4 +- docs/cce/umn/cce_10_0549.html | 46 +- docs/cce/umn/cce_10_0550.html | 20 +- docs/cce/umn/cce_10_0552.html | 6 +- docs/cce/umn/cce_10_0601.html | 5 +- docs/cce/umn/cce_10_0602.html | 8 +- docs/cce/umn/cce_10_0603.html | 2 +- docs/cce/umn/cce_10_0604.html | 3 +- docs/cce/umn/cce_10_0605.html | 23 +- docs/cce/umn/cce_10_0613.html | 8 +- docs/cce/umn/cce_10_0614.html | 118 +- docs/cce/umn/cce_10_0615.html | 99 +- docs/cce/umn/cce_10_0616.html | 140 +- docs/cce/umn/cce_10_0617.html | 4 +- docs/cce/umn/cce_10_0619.html | 148 +- docs/cce/umn/cce_10_0620.html | 107 +- docs/cce/umn/cce_10_0624.html | 4 +- docs/cce/umn/cce_10_0625.html | 146 +- docs/cce/umn/cce_10_0626.html | 22 +- docs/cce/umn/cce_10_0628.html | 40 +- docs/cce/umn/cce_10_0630.html | 84 +- docs/cce/umn/cce_10_0631.html | 40 +- docs/cce/umn/cce_10_0633.html | 8 +- docs/cce/umn/cce_10_0634.html | 113 +- docs/cce/umn/cce_10_0635.html | 79 +- docs/cce/umn/cce_10_0637.html | 4 +- docs/cce/umn/cce_10_0638.html | 18 +- docs/cce/umn/cce_10_0642.html | 8 +- docs/cce/umn/cce_10_0649.html | 26 + docs/cce/umn/cce_10_0651.html | 262 + docs/cce/umn/cce_10_0652.html | 133 +- docs/cce/umn/cce_10_0653.html | 155 +- docs/cce/umn/cce_10_0654.html | 6 +- docs/cce/umn/cce_10_0655.html | 2 +- docs/cce/umn/cce_10_0657.html | 2 +- docs/cce/umn/cce_10_0658.html | 17 + docs/cce/umn/cce_10_0659.html | 8 +- docs/cce/umn/cce_10_0660.html | 18 +- docs/cce/umn/cce_10_0675.html | 10 +- docs/cce/umn/cce_10_0677.html | 8 +- docs/cce/umn/cce_10_0678.html | 16 +- docs/cce/umn/cce_10_0679.html | 19 - docs/cce/umn/cce_10_0680.html | 10 +- docs/cce/umn/cce_10_0681.html | 111 +- docs/cce/umn/cce_10_0683.html | 25 +- docs/cce/umn/cce_10_0684.html | 8 +- docs/cce/umn/cce_10_0685.html | 2 +- docs/cce/umn/cce_10_0686.html | 8 +- docs/cce/umn/cce_10_0687.html | 4 +- docs/cce/umn/cce_10_0688.html | 2 +- docs/cce/umn/cce_10_0689.html | 6 +- docs/cce/umn/cce_10_0691.html | 4 +- docs/cce/umn/cce_10_0694.html | 6 +- docs/cce/umn/cce_10_0695.html | 324 +- docs/cce/umn/cce_10_0702.html | 4 +- docs/cce/umn/cce_10_0704.html | 4 +- docs/cce/umn/cce_10_0705.html | 4 +- docs/cce/umn/cce_10_0721.html | 2 +- docs/cce/umn/cce_10_0722.html | 4 +- docs/cce/umn/cce_10_0725.html | 6 +- docs/cce/umn/cce_10_0726.html | 20 +- docs/cce/umn/cce_10_0727.html | 16 +- docs/cce/umn/cce_10_0728.html | 10 +- docs/cce/umn/cce_10_0729.html | 112 +- docs/cce/umn/cce_10_0730.html | 8 +- docs/cce/umn/cce_10_0734.html | 316 + docs/cce/umn/cce_10_0766.html | 12 +- docs/cce/umn/cce_10_0767.html | 9 +- docs/cce/umn/cce_10_0768.html | 4 + docs/cce/umn/cce_10_0773.html | 4 +- docs/cce/umn/cce_10_0775.html | 2 +- docs/cce/umn/cce_10_0777.html | 4 +- docs/cce/umn/cce_10_0778.html | 4 +- docs/cce/umn/cce_10_0789.html | 135 + docs/cce/umn/cce_10_0811.html | 2 + docs/cce/umn/cce_10_0813.html | 134 + docs/cce/umn/cce_10_0831.html | 79 + docs/cce/umn/cce_10_0832.html | 84 + docs/cce/umn/cce_10_0839.html | 141 + docs/cce/umn/cce_10_0841.html | 93 + docs/cce/umn/cce_10_0842.html | 96 + docs/cce/umn/cce_10_0859.html | 37 + docs/cce/umn/cce_10_0860.html | 17 + docs/cce/umn/cce_10_0864.html | 22 + docs/cce/umn/cce_10_0883.html | 70 + docs/cce/umn/cce_10_0886.html | 17 + docs/cce/umn/cce_10_0896.html | 90 + docs/cce/umn/cce_10_0897.html | 105 + docs/cce/umn/cce_10_0904.html | 21 + docs/cce/umn/cce_10_0906.html | 16 + docs/cce/umn/cce_10_0907.html | 23 + docs/cce/umn/cce_10_0908.html | 23 + docs/cce/umn/cce_10_0909.html | 19 + docs/cce/umn/cce_10_0910.html | 19 + docs/cce/umn/cce_10_0911.html | 21 + docs/cce/umn/cce_bestpractice_00004.html | 20 +- docs/cce/umn/cce_bestpractice_0003.html | 2 +- docs/cce/umn/cce_bestpractice_00035.html | 29 +- docs/cce/umn/cce_bestpractice_0004.html | 2 +- docs/cce/umn/cce_bestpractice_0006.html | 8 +- docs/cce/umn/cce_bestpractice_0009.html | 2 +- docs/cce/umn/cce_bestpractice_0010.html | 4 +- docs/cce/umn/cce_bestpractice_00162.html | 38 +- docs/cce/umn/cce_bestpractice_00198.html | 107 +- docs/cce/umn/cce_bestpractice_00199.html | 6 +- docs/cce/umn/cce_bestpractice_00221.html | 18 +- docs/cce/umn/cce_bestpractice_00226.html | 4 +- docs/cce/umn/cce_bestpractice_00228.html | 2 +- docs/cce/umn/cce_bestpractice_00231.html | 6 +- docs/cce/umn/cce_bestpractice_0024.html | 2 +- docs/cce/umn/cce_bestpractice_00253.html | 48 +- docs/cce/umn/cce_bestpractice_00253_0.html | 48 +- docs/cce/umn/cce_bestpractice_00254.html | 79 +- docs/cce/umn/cce_bestpractice_00281.html | 80 +- docs/cce/umn/cce_bestpractice_00282.html | 12 +- docs/cce/umn/cce_bestpractice_00284.html | 6 +- docs/cce/umn/cce_bestpractice_0050.html | 6 +- docs/cce/umn/cce_bestpractice_0051.html | 4 +- docs/cce/umn/cce_bestpractice_0052.html | 4 + docs/cce/umn/cce_bestpractice_0053.html | 8 +- docs/cce/umn/cce_bestpractice_0107.html | 4 +- docs/cce/umn/cce_bestpractice_0307.html | 6 +- docs/cce/umn/cce_bestpractice_0312.html | 2 +- docs/cce/umn/cce_bestpractice_0313.html | 2 +- docs/cce/umn/cce_bestpractice_0315.html | 8 +- docs/cce/umn/cce_bestpractice_0317.html | 4 +- docs/cce/umn/cce_bestpractice_0318.html | 2 +- docs/cce/umn/cce_bestpractice_0319.html | 2 +- docs/cce/umn/cce_bestpractice_0320.html | 2 +- docs/cce/umn/cce_bestpractice_0323.html | 2 +- docs/cce/umn/cce_bestpractice_0324.html | 26 +- docs/cce/umn/cce_bestpractice_0325.html | 2 +- docs/cce/umn/cce_bestpractice_0346.html | 23 + docs/cce/umn/cce_bestpractice_0347.html | 23 + docs/cce/umn/cce_bestpractice_0348.html | 18 + docs/cce/umn/cce_bestpractice_0349.html | 13 + docs/cce/umn/cce_bestpractice_0350.html | 11 + docs/cce/umn/cce_bestpractice_0352.html | 13 + docs/cce/umn/cce_bestpractice_0353.html | 12 + docs/cce/umn/cce_bestpractice_0354.html | 19 + docs/cce/umn/cce_bestpractice_0355.html | 11 + docs/cce/umn/cce_bestpractice_0356.html | 31 + docs/cce/umn/cce_bestpractice_0357.html | 137 + docs/cce/umn/cce_bestpractice_10001.html | 6 +- docs/cce/umn/cce_bestpractice_10002.html | 6 +- docs/cce/umn/cce_bestpractice_10006.html | 20 + docs/cce/umn/cce_bestpractice_10008.html | 2 +- docs/cce/umn/cce_bestpractice_10009.html | 14 +- docs/cce/umn/cce_bestpractice_10010.html | 2 +- docs/cce/umn/cce_bestpractice_10012.html | 66 +- docs/cce/umn/cce_bestpractice_10016.html | 90 +- docs/cce/umn/cce_bestpractice_10017.html | 8 +- docs/cce/umn/cce_bestpractice_10020.html | 14 +- docs/cce/umn/cce_bestpractice_10024.html | 95 + docs/cce/umn/cce_bestpractice_10027.html | 29 +- docs/cce/umn/cce_bestpractice_10041.html | 38 + docs/cce/umn/cce_bulletin_0000.html | 2 +- docs/cce/umn/cce_bulletin_0003.html | 29 - docs/cce/umn/cce_bulletin_0026.html | 6 +- docs/cce/umn/cce_bulletin_0027.html | 2 +- docs/cce/umn/cce_bulletin_0033.html | 168 + docs/cce/umn/cce_bulletin_0058.html | 66 +- docs/cce/umn/cce_bulletin_0059.html | 98 +- docs/cce/umn/cce_bulletin_0061.html | 86 +- docs/cce/umn/cce_bulletin_0068.html | 52 +- docs/cce/umn/cce_bulletin_0089.html | 46 + docs/cce/umn/cce_faq_0000.html | 2 - docs/cce/umn/cce_faq_00001.html | 2 +- docs/cce/umn/cce_faq_00006.html | 4 +- docs/cce/umn/cce_faq_00012.html | 2 +- docs/cce/umn/cce_faq_00015.html | 68 +- docs/cce/umn/cce_faq_00018.html | 80 +- docs/cce/umn/cce_faq_00020.html | 2 +- docs/cce/umn/cce_faq_00037.html | 4 +- docs/cce/umn/cce_faq_00039.html | 6 +- docs/cce/umn/cce_faq_00040.html | 2 +- docs/cce/umn/cce_faq_00041.html | 2 +- docs/cce/umn/cce_faq_00089.html | 2 +- docs/cce/umn/cce_faq_00097.html | 6 +- docs/cce/umn/cce_faq_00098.html | 68 +- docs/cce/umn/cce_faq_00107.html | 4 +- docs/cce/umn/cce_faq_00120.html | 10 +- docs/cce/umn/cce_faq_00134.html | 18 +- docs/cce/umn/cce_faq_00140.html | 4 +- docs/cce/umn/cce_faq_00141.html | 2 + docs/cce/umn/cce_faq_00146.html | 2 +- docs/cce/umn/cce_faq_00154.html | 2 +- docs/cce/umn/cce_faq_00163.html | 4 + docs/cce/umn/cce_faq_00186.html | 2 - docs/cce/umn/cce_faq_00192.html | 2 +- docs/cce/umn/cce_faq_00195.html | 4 +- docs/cce/umn/cce_faq_00197.html | 6 +- docs/cce/umn/cce_faq_00200.html | 2 +- docs/cce/umn/cce_faq_00202.html | 2 +- docs/cce/umn/cce_faq_00203.html | 4 +- docs/cce/umn/cce_faq_00204.html | 2 +- docs/cce/umn/cce_faq_00209.html | 2 +- docs/cce/umn/cce_faq_00215.html | 2 +- docs/cce/umn/cce_faq_00218.html | 2 +- docs/cce/umn/cce_faq_00224.html | 4 +- docs/cce/umn/cce_faq_00260.html | 3 +- docs/cce/umn/cce_faq_00262.html | 2 +- docs/cce/umn/cce_faq_00263.html | 2 +- docs/cce/umn/cce_faq_00264.html | 2 + docs/cce/umn/cce_faq_00265.html | 142 +- docs/cce/umn/cce_faq_00266.html | 2 +- docs/cce/umn/cce_faq_00275.html | 19 + docs/cce/umn/cce_faq_00279.html | 2 +- docs/cce/umn/cce_faq_00284.html | 2 + docs/cce/umn/cce_faq_00286.html | 4 +- docs/cce/umn/cce_faq_00292.html | 21 - docs/cce/umn/cce_faq_00293.html | 2 +- docs/cce/umn/cce_faq_00296.html | 4 +- docs/cce/umn/cce_faq_00307.html | 68 +- docs/cce/umn/cce_faq_00314.html | 2 +- docs/cce/umn/cce_faq_00319.html | 4 +- docs/cce/umn/cce_faq_00322.html | 12 +- docs/cce/umn/cce_faq_00325.html | 2 +- docs/cce/umn/cce_faq_00326.html | 2 +- docs/cce/umn/cce_faq_00392.html | 19 + docs/cce/umn/cce_faq_00394.html | 2 +- docs/cce/umn/cce_faq_00406.html | 2 +- docs/cce/umn/cce_faq_00409.html | 2 +- docs/cce/umn/cce_faq_00417.html | 2 +- docs/cce/umn/cce_faq_00429.html | 4 +- docs/cce/umn/cce_faq_00432.html | 15 + docs/cce/umn/cce_faq_00440.html | 60 + docs/cce/umn/cce_faq_00443.html | 64 + docs/cce/umn/cce_productdesc_0000.html | 2 +- docs/cce/umn/cce_productdesc_0001.html | 40 +- docs/cce/umn/cce_productdesc_0002.html | 4 +- docs/cce/umn/cce_productdesc_0003.html | 16 +- docs/cce/umn/cce_productdesc_0005.html | 140 +- docs/cce/umn/cce_productdesc_0007.html | 8 +- docs/cce/umn/cce_productdesc_0008.html | 2 +- docs/cce/umn/cce_productdesc_0012.html | 4 +- docs/cce/umn/cce_productdesc_0017.html | 2 +- docs/cce/umn/cce_productdesc_0018.html | 4 +- docs/cce/umn/cce_productdesc_0021.html | 6 +- docs/cce/umn/cce_qs_0008.html | 4 +- docs/cce/umn/en-us_image_0000001460905374.png | Bin 5102 -> 0 bytes docs/cce/umn/en-us_image_0000001461224886.png | Bin 17878 -> 0 bytes docs/cce/umn/en-us_image_0000001697958210.png | Bin 143802 -> 0 bytes docs/cce/umn/en-us_image_0000001702936020.png | Bin 169693 -> 0 bytes docs/cce/umn/en-us_image_0000001797870921.png | Bin 22353 -> 0 bytes docs/cce/umn/en-us_image_0000001851584036.png | Bin 0 -> 48799 bytes ...5.png => en-us_image_0000001851584048.png} | Bin ...8.png => en-us_image_0000001851742760.png} | Bin docs/cce/umn/en-us_image_0000001851742792.png | Bin 0 -> 26677 bytes docs/cce/umn/en-us_image_0000001851744368.png | Bin 111240 -> 0 bytes docs/cce/umn/en-us_image_0000001851745496.png | Bin 1065 -> 0 bytes docs/cce/umn/en-us_image_0000001851745612.png | Bin 284 -> 0 bytes docs/cce/umn/en-us_image_0000001851746504.png | Bin 158 -> 0 bytes docs/cce/umn/en-us_image_0000001851746508.png | Bin 1004 -> 0 bytes ...1.png => en-us_image_0000001897903417.png} | Bin ...3.png => en-us_image_0000001897903549.png} | Bin docs/cce/umn/en-us_image_0000001897906145.png | Bin 44467 -> 0 bytes docs/cce/umn/en-us_image_0000001897907185.png | Bin 1004 -> 0 bytes ...2.png => en-us_image_0000001898022949.png} | Bin ...2.png => en-us_image_0000001898023049.png} | Bin docs/cce/umn/en-us_image_0000001898023829.png | Bin 391 -> 0 bytes docs/cce/umn/en-us_image_0000001898024541.png | Bin 113232 -> 0 bytes docs/cce/umn/en-us_image_0000001898024545.png | Bin 58817 -> 0 bytes docs/cce/umn/en-us_image_0000001898025573.png | Bin 19591 -> 0 bytes docs/cce/umn/en-us_image_0000001911555737.png | Bin 173094 -> 0 bytes ...0.png => en-us_image_0000001950314864.png} | Bin ...0.png => en-us_image_0000001950314996.png} | Bin ...6.png => en-us_image_0000001950315008.png} | Bin ...3.png => en-us_image_0000001950315012.png} | Bin ...8.png => en-us_image_0000001950315060.png} | Bin ...9.png => en-us_image_0000001950315068.png} | Bin ...1.png => en-us_image_0000001950315204.png} | Bin ...6.png => en-us_image_0000001950315240.png} | Bin ...2.png => en-us_image_0000001950315260.png} | Bin ...5.png => en-us_image_0000001950315436.png} | Bin ...4.png => en-us_image_0000001950315440.png} | Bin ...2.png => en-us_image_0000001950315444.png} | Bin ...6.png => en-us_image_0000001950315452.png} | Bin ...6.png => en-us_image_0000001950315456.png} | Bin ...3.png => en-us_image_0000001950315464.png} | Bin ...2.png => en-us_image_0000001950315472.png} | Bin ...6.png => en-us_image_0000001950315604.png} | Bin ...2.jpg => en-us_image_0000001950315632.jpg} | Bin ...2.png => en-us_image_0000001950315720.png} | Bin ...3.png => en-us_image_0000001950315760.png} | Bin ...5.png => en-us_image_0000001950315780.png} | Bin ...2.png => en-us_image_0000001950315800.png} | Bin docs/cce/umn/en-us_image_0000001950315828.png | Bin 0 -> 82316 bytes ...6.png => en-us_image_0000001950315836.png} | Bin ...3.png => en-us_image_0000001950315840.png} | Bin ...8.png => en-us_image_0000001950315844.png} | Bin ...2.png => en-us_image_0000001950315848.png} | Bin ...6.png => en-us_image_0000001950315856.png} | Bin ...3.png => en-us_image_0000001950315912.png} | Bin ...2.png => en-us_image_0000001950315916.png} | Bin ...9.png => en-us_image_0000001950315932.png} | Bin ...3.png => en-us_image_0000001950315972.png} | Bin ...0.png => en-us_image_0000001950315976.png} | Bin ...0.png => en-us_image_0000001950316012.png} | Bin ...8.png => en-us_image_0000001950316024.png} | Bin ...3.png => en-us_image_0000001950316032.png} | Bin ...8.png => en-us_image_0000001950316040.png} | Bin ...7.png => en-us_image_0000001950316048.png} | Bin ...5.png => en-us_image_0000001950316056.png} | Bin ...9.png => en-us_image_0000001950316060.png} | Bin ...8.png => en-us_image_0000001950316068.png} | Bin ...4.png => en-us_image_0000001950316192.png} | Bin ...0.png => en-us_image_0000001950316248.png} | Bin docs/cce/umn/en-us_image_0000001950316540.png | Bin 0 -> 54287 bytes ...0.png => en-us_image_0000001950316596.png} | Bin ...6.png => en-us_image_0000001950316608.png} | Bin ...7.png => en-us_image_0000001950316676.png} | Bin ...1.png => en-us_image_0000001950316688.png} | Bin ...7.png => en-us_image_0000001950316840.png} | Bin ...0.png => en-us_image_0000001950316852.png} | Bin ...9.png => en-us_image_0000001950316872.png} | Bin ...2.png => en-us_image_0000001950316876.png} | Bin ...8.png => en-us_image_0000001950316896.png} | Bin ...5.png => en-us_image_0000001950317032.png} | Bin ...0.png => en-us_image_0000001950317048.png} | Bin ...3.png => en-us_image_0000001950317060.png} | Bin ...7.png => en-us_image_0000001950317068.png} | Bin ...0.png => en-us_image_0000001950317072.png} | Bin ...1.png => en-us_image_0000001950317076.png} | Bin ...2.png => en-us_image_0000001950317180.png} | Bin ...8.png => en-us_image_0000001950317192.png} | Bin ...4.png => en-us_image_0000001950317212.png} | Bin docs/cce/umn/en-us_image_0000001950317216.png | Bin 0 -> 15145 bytes ...7.png => en-us_image_0000001950317236.png} | Bin ...8.png => en-us_image_0000001950317252.png} | Bin ...1.png => en-us_image_0000001950317256.png} | Bin ...2.png => en-us_image_0000001950317280.png} | Bin ...4.png => en-us_image_0000001950317284.png} | Bin ...8.png => en-us_image_0000001950317344.png} | Bin ...4.png => en-us_image_0000001950317352.png} | Bin ...7.png => en-us_image_0000001950317380.png} | Bin ...0.png => en-us_image_0000001950317392.png} | Bin ...1.png => en-us_image_0000001950317396.png} | Bin ...6.png => en-us_image_0000001950317428.png} | Bin ...9.png => en-us_image_0000001950317436.png} | Bin ...7.png => en-us_image_0000001950317472.png} | Bin ...3.gif => en-us_image_0000001981274401.gif} | Bin ...0.gif => en-us_image_0000001981274409.gif} | Bin ...3.png => en-us_image_0000001981274417.png} | Bin ...4.png => en-us_image_0000001981274513.png} | Bin ...8.png => en-us_image_0000001981274533.png} | Bin ...8.png => en-us_image_0000001981274553.png} | Bin ...0.png => en-us_image_0000001981274593.png} | Bin ...6.png => en-us_image_0000001981274597.png} | Bin ...1.png => en-us_image_0000001981274613.png} | Bin ...6.png => en-us_image_0000001981274745.png} | Bin ...4.png => en-us_image_0000001981274973.png} | Bin ...7.png => en-us_image_0000001981274977.png} | Bin docs/cce/umn/en-us_image_0000001981274989.png | Bin 0 -> 177659 bytes ...4.png => en-us_image_0000001981275001.png} | Bin ...1.png => en-us_image_0000001981275029.png} | Bin ...0.png => en-us_image_0000001981275053.png} | Bin ...8.png => en-us_image_0000001981275113.png} | Bin ...4.png => en-us_image_0000001981275141.png} | Bin ...4.jpg => en-us_image_0000001981275157.jpg} | Bin ...9.jpg => en-us_image_0000001981275177.jpg} | Bin ...4.png => en-us_image_0000001981275205.png} | Bin ...7.png => en-us_image_0000001981275249.png} | Bin ...2.png => en-us_image_0000001981275269.png} | Bin ...8.png => en-us_image_0000001981275337.png} | Bin ...5.png => en-us_image_0000001981275361.png} | Bin ...5.png => en-us_image_0000001981275365.png} | Bin ...2.jpg => en-us_image_0000001981275417.jpg} | Bin docs/cce/umn/en-us_image_0000001981275425.png | Bin 0 -> 32091 bytes ...8.png => en-us_image_0000001981275437.png} | Bin ...9.png => en-us_image_0000001981275445.png} | Bin ...9.png => en-us_image_0000001981275449.png} | Bin ...2.png => en-us_image_0000001981275457.png} | Bin ...0.png => en-us_image_0000001981275489.png} | Bin ...3.png => en-us_image_0000001981275513.png} | Bin ...8.png => en-us_image_0000001981275577.png} | Bin ...1.png => en-us_image_0000001981275593.png} | Bin ...9.png => en-us_image_0000001981275621.png} | Bin ...7.png => en-us_image_0000001981275677.png} | Bin ...1.png => en-us_image_0000001981276081.png} | Bin ...6.png => en-us_image_0000001981276109.png} | Bin ...3.png => en-us_image_0000001981276145.png} | Bin ...1.png => en-us_image_0000001981276157.png} | Bin ...4.png => en-us_image_0000001981276161.png} | Bin ...6.png => en-us_image_0000001981276233.png} | Bin ...6.png => en-us_image_0000001981276257.png} | Bin ...0.png => en-us_image_0000001981276261.png} | Bin ...0.png => en-us_image_0000001981276265.png} | Bin ...1.png => en-us_image_0000001981276269.png} | Bin ...4.png => en-us_image_0000001981276281.png} | Bin ...1.png => en-us_image_0000001981276293.png} | Bin ...0.png => en-us_image_0000001981276309.png} | Bin ...7.png => en-us_image_0000001981276393.png} | Bin ...7.png => en-us_image_0000001981276397.png} | Bin ...9.png => en-us_image_0000001981276405.png} | Bin docs/cce/umn/en-us_image_0000001981276409.png | Bin 0 -> 40629 bytes docs/cce/umn/en-us_image_0000001981276429.png | Bin 0 -> 56985 bytes ...6.png => en-us_image_0000001981276441.png} | Bin ...3.png => en-us_image_0000001981276581.png} | Bin ...9.png => en-us_image_0000001981276601.png} | Bin ...8.png => en-us_image_0000001981276605.png} | Bin ...7.png => en-us_image_0000001981276629.png} | Bin ...4.png => en-us_image_0000001981276633.png} | Bin ...4.png => en-us_image_0000001981276729.png} | Bin ...2.png => en-us_image_0000001981276741.png} | Bin ...5.png => en-us_image_0000001981276749.png} | Bin ...6.png => en-us_image_0000001981276785.png} | Bin ...4.png => en-us_image_0000001981276797.png} | Bin ...5.png => en-us_image_0000001981276817.png} | Bin ...3.png => en-us_image_0000001981276821.png} | Bin ...5.png => en-us_image_0000001981276877.png} | Bin ...6.png => en-us_image_0000001981276889.png} | Bin ...4.png => en-us_image_0000001981276941.png} | Bin ...6.png => en-us_image_0000001981276965.png} | Bin ...0.png => en-us_image_0000001981276973.png} | Bin ...8.png => en-us_image_0000001981276977.png} | Bin ...4.png => en-us_image_0000001981277009.png} | Bin ...4.png => en-us_image_0000001981277029.png} | Bin ...1.png => en-us_image_0000001981277037.png} | Bin ...0.gif => en-us_image_0000001981434241.gif} | Bin ...6.png => en-us_image_0000001981434361.png} | Bin ...8.png => en-us_image_0000001981434369.png} | Bin ...1.png => en-us_image_0000001981434377.png} | Bin ...0.png => en-us_image_0000001981434389.png} | Bin ...9.png => en-us_image_0000001981434421.png} | Bin ...4.png => en-us_image_0000001981434437.png} | Bin ...0.png => en-us_image_0000001981434457.png} | Bin ...3.png => en-us_image_0000001981434473.png} | Bin ...4.png => en-us_image_0000001981434585.png} | Bin ...0.png => en-us_image_0000001981434645.png} | Bin ...3.png => en-us_image_0000001981434829.png} | Bin ...1.png => en-us_image_0000001981434893.png} | Bin ...0.png => en-us_image_0000001981434997.png} | Bin ...4.png => en-us_image_0000001981435093.png} | Bin ...6.png => en-us_image_0000001981435165.png} | Bin ...9.png => en-us_image_0000001981435213.png} | Bin ...0.png => en-us_image_0000001981435221.png} | Bin ...9.png => en-us_image_0000001981435225.png} | Bin ...4.png => en-us_image_0000001981435269.png} | Bin ...5.png => en-us_image_0000001981435329.png} | Bin ...9.png => en-us_image_0000001981435425.png} | Bin ...5.png => en-us_image_0000001981435441.png} | Bin ...6.png => en-us_image_0000001981435457.png} | Bin ...4.png => en-us_image_0000001981435969.png} | Bin ...0.png => en-us_image_0000001981436009.png} | Bin ...5.png => en-us_image_0000001981436029.png} | Bin ...4.png => en-us_image_0000001981436085.png} | Bin ...0.png => en-us_image_0000001981436089.png} | Bin ...6.png => en-us_image_0000001981436113.png} | Bin docs/cce/umn/en-us_image_0000001981436137.png | Bin 0 -> 458 bytes ...3.png => en-us_image_0000001981436181.png} | Bin ...4.png => en-us_image_0000001981436201.png} | Bin ...0.png => en-us_image_0000001981436237.png} | Bin docs/cce/umn/en-us_image_0000001981436241.png | Bin 0 -> 19903 bytes ...8.png => en-us_image_0000001981436293.png} | Bin ...9.png => en-us_image_0000001981436297.png} | Bin ...8.png => en-us_image_0000001981436301.png} | Bin ...5.jpg => en-us_image_0000001981436333.jpg} | Bin ...3.png => en-us_image_0000001981436361.png} | Bin ...1.png => en-us_image_0000001981436409.png} | Bin ...1.png => en-us_image_0000001981436441.png} | Bin ...3.png => en-us_image_0000001981436449.png} | Bin ...5.png => en-us_image_0000001981436461.png} | Bin ...5.png => en-us_image_0000001981436605.png} | Bin ...8.png => en-us_image_0000001981436609.png} | Bin ...1.png => en-us_image_0000001981436641.png} | Bin ...4.png => en-us_image_0000001981436649.png} | Bin ...2.png => en-us_image_0000001981436653.png} | Bin ...5.png => en-us_image_0000001981436657.png} | Bin ...9.png => en-us_image_0000001981436665.png} | Bin ...3.png => en-us_image_0000001981436701.png} | Bin ...4.png => en-us_image_0000001981436769.png} | Bin ...9.png => en-us_image_0000001981436773.png} | Bin ...5.png => en-us_image_0000001981436829.png} | Bin ...9.png => en-us_image_0000001981436837.png} | Bin ...6.png => en-us_image_0000001981436845.png} | Bin ...4.png => en-us_image_0000001981436873.png} | Bin ...4.png => en-us_image_0000001981436885.png} | Bin ...5.png => en-us_image_0000001981436901.png} | Bin 662 files changed, 19166 insertions(+), 11583 deletions(-) create mode 100644 docs/cce/umn/cce_10_0373.html create mode 100644 docs/cce/umn/cce_10_0406.html create mode 100644 docs/cce/umn/cce_10_0426.html create mode 100644 docs/cce/umn/cce_10_0649.html create mode 100644 docs/cce/umn/cce_10_0651.html create mode 100644 docs/cce/umn/cce_10_0658.html delete mode 100644 docs/cce/umn/cce_10_0679.html create mode 100644 docs/cce/umn/cce_10_0734.html create mode 100644 docs/cce/umn/cce_10_0789.html create mode 100644 docs/cce/umn/cce_10_0813.html create mode 100644 docs/cce/umn/cce_10_0831.html create mode 100644 docs/cce/umn/cce_10_0832.html create mode 100644 docs/cce/umn/cce_10_0839.html create mode 100644 docs/cce/umn/cce_10_0841.html create mode 100644 docs/cce/umn/cce_10_0842.html create mode 100644 docs/cce/umn/cce_10_0859.html create mode 100644 docs/cce/umn/cce_10_0860.html create mode 100644 docs/cce/umn/cce_10_0864.html create mode 100644 docs/cce/umn/cce_10_0883.html create mode 100644 docs/cce/umn/cce_10_0886.html create mode 100644 docs/cce/umn/cce_10_0896.html create mode 100644 docs/cce/umn/cce_10_0897.html create mode 100644 docs/cce/umn/cce_10_0904.html create mode 100644 docs/cce/umn/cce_10_0906.html create mode 100644 docs/cce/umn/cce_10_0907.html create mode 100644 docs/cce/umn/cce_10_0908.html create mode 100644 docs/cce/umn/cce_10_0909.html create mode 100644 docs/cce/umn/cce_10_0910.html create mode 100644 docs/cce/umn/cce_10_0911.html create mode 100644 docs/cce/umn/cce_bestpractice_0346.html create mode 100644 docs/cce/umn/cce_bestpractice_0347.html create mode 100644 docs/cce/umn/cce_bestpractice_0348.html create mode 100644 docs/cce/umn/cce_bestpractice_0349.html create mode 100644 docs/cce/umn/cce_bestpractice_0350.html create mode 100644 docs/cce/umn/cce_bestpractice_0352.html create mode 100644 docs/cce/umn/cce_bestpractice_0353.html create mode 100644 docs/cce/umn/cce_bestpractice_0354.html create mode 100644 docs/cce/umn/cce_bestpractice_0355.html create mode 100644 docs/cce/umn/cce_bestpractice_0356.html create mode 100644 docs/cce/umn/cce_bestpractice_0357.html create mode 100644 docs/cce/umn/cce_bestpractice_10006.html create mode 100644 docs/cce/umn/cce_bestpractice_10024.html create mode 100644 docs/cce/umn/cce_bestpractice_10041.html delete mode 100644 docs/cce/umn/cce_bulletin_0003.html create mode 100644 docs/cce/umn/cce_bulletin_0033.html create mode 100644 docs/cce/umn/cce_bulletin_0089.html create mode 100644 docs/cce/umn/cce_faq_00275.html delete mode 100644 docs/cce/umn/cce_faq_00292.html create mode 100644 docs/cce/umn/cce_faq_00392.html create mode 100644 docs/cce/umn/cce_faq_00440.html create mode 100644 docs/cce/umn/cce_faq_00443.html delete mode 100644 docs/cce/umn/en-us_image_0000001460905374.png delete mode 100644 docs/cce/umn/en-us_image_0000001461224886.png delete mode 100644 docs/cce/umn/en-us_image_0000001697958210.png delete mode 100644 docs/cce/umn/en-us_image_0000001702936020.png delete mode 100644 docs/cce/umn/en-us_image_0000001797870921.png create mode 100644 docs/cce/umn/en-us_image_0000001851584036.png rename docs/cce/umn/{en-us_image_0000001725939365.png => en-us_image_0000001851584048.png} (100%) rename docs/cce/umn/{en-us_image_0000001678179938.png => en-us_image_0000001851742760.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001851742792.png delete mode 100644 docs/cce/umn/en-us_image_0000001851744368.png delete mode 100644 docs/cce/umn/en-us_image_0000001851745496.png delete mode 100644 docs/cce/umn/en-us_image_0000001851745612.png delete mode 100644 docs/cce/umn/en-us_image_0000001851746504.png delete mode 100644 docs/cce/umn/en-us_image_0000001851746508.png rename docs/cce/umn/{en-us_image_0000001725939361.png => en-us_image_0000001897903417.png} (100%) rename docs/cce/umn/{en-us_image_0000001726059253.png => en-us_image_0000001897903549.png} (100%) delete mode 100644 docs/cce/umn/en-us_image_0000001897906145.png delete mode 100644 docs/cce/umn/en-us_image_0000001897907185.png rename docs/cce/umn/{en-us_image_0000001678020222.png => en-us_image_0000001898022949.png} (100%) rename docs/cce/umn/{en-us_image_0000001678020242.png => en-us_image_0000001898023049.png} (100%) delete mode 100644 docs/cce/umn/en-us_image_0000001898023829.png delete mode 100644 docs/cce/umn/en-us_image_0000001898024541.png delete mode 100644 docs/cce/umn/en-us_image_0000001898024545.png delete mode 100644 docs/cce/umn/en-us_image_0000001898025573.png delete mode 100644 docs/cce/umn/en-us_image_0000001911555737.png rename docs/cce/umn/{en-us_image_0000001851743660.png => en-us_image_0000001950314864.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743900.png => en-us_image_0000001950314996.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743916.png => en-us_image_0000001950315008.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024113.png => en-us_image_0000001950315012.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585068.png => en-us_image_0000001950315060.png} (100%) rename docs/cce/umn/{en-us_image_0000001898023989.png => en-us_image_0000001950315068.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024121.png => en-us_image_0000001950315204.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745196.png => en-us_image_0000001950315240.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585272.png => en-us_image_0000001950315260.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024285.png => en-us_image_0000001950315436.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585384.png => en-us_image_0000001950315440.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744112.png => en-us_image_0000001950315444.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744176.png => en-us_image_0000001950315452.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585416.png => en-us_image_0000001950315456.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024333.png => en-us_image_0000001950315464.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585432.png => en-us_image_0000001950315472.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585496.png => en-us_image_0000001950315604.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743732.jpg => en-us_image_0000001950315632.jpg} (100%) rename docs/cce/umn/{en-us_image_0000001851585032.png => en-us_image_0000001950315720.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904433.png => en-us_image_0000001950315760.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905125.png => en-us_image_0000001950315780.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743812.png => en-us_image_0000001950315800.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001950315828.png rename docs/cce/umn/{en-us_image_0000001851744516.png => en-us_image_0000001950315836.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905033.png => en-us_image_0000001950315840.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585668.png => en-us_image_0000001950315844.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585672.png => en-us_image_0000001950315848.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743836.png => en-us_image_0000001950315856.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904393.png => en-us_image_0000001950315912.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744412.png => en-us_image_0000001950315916.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905209.png => en-us_image_0000001950315932.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024613.png => en-us_image_0000001950315972.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743880.png => en-us_image_0000001950315976.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743860.png => en-us_image_0000001950316012.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743848.png => en-us_image_0000001950316024.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024033.png => en-us_image_0000001950316032.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743828.png => en-us_image_0000001950316040.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904517.png => en-us_image_0000001950316048.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024145.png => en-us_image_0000001950316056.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024249.png => en-us_image_0000001950316060.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744088.png => en-us_image_0000001950316068.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744004.png => en-us_image_0000001950316192.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585280.png => en-us_image_0000001950316248.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001950316540.png rename docs/cce/umn/{en-us_image_0000001851586420.png => en-us_image_0000001950316596.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745096.png => en-us_image_0000001950316608.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024157.png => en-us_image_0000001950316676.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025401.png => en-us_image_0000001950316688.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906057.png => en-us_image_0000001950316840.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745400.png => en-us_image_0000001950316852.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906149.png => en-us_image_0000001950316872.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745492.png => en-us_image_0000001950316876.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586708.png => en-us_image_0000001950316896.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906125.png => en-us_image_0000001950317032.png} (100%) rename docs/cce/umn/{en-us_image_0000001863378970.png => en-us_image_0000001950317048.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906233.png => en-us_image_0000001950317060.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906237.png => en-us_image_0000001950317068.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745580.png => en-us_image_0000001950317072.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025681.png => en-us_image_0000001950317076.png} (100%) rename docs/cce/umn/{en-us_image_0000001867802022.png => en-us_image_0000001950317180.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586988.png => en-us_image_0000001950317192.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745504.png => en-us_image_0000001950317212.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001950317216.png rename docs/cce/umn/{en-us_image_0000001898026057.png => en-us_image_0000001950317236.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745808.png => en-us_image_0000001950317252.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026001.png => en-us_image_0000001950317256.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745632.png => en-us_image_0000001950317280.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587144.png => en-us_image_0000001950317284.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587788.png => en-us_image_0000001950317344.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587784.png => en-us_image_0000001950317352.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906717.png => en-us_image_0000001950317380.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587340.png => en-us_image_0000001950317392.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026021.png => en-us_image_0000001950317396.png} (100%) rename docs/cce/umn/{en-us_image_0000001851746056.png => en-us_image_0000001950317428.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906729.png => en-us_image_0000001950317436.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906417.png => en-us_image_0000001950317472.png} (100%) rename docs/cce/umn/{en-us_image_0000001898023913.gif => en-us_image_0000001981274401.gif} (100%) rename docs/cce/umn/{en-us_image_0000001851743740.gif => en-us_image_0000001981274409.gif} (100%) rename docs/cce/umn/{en-us_image_0000001898023873.png => en-us_image_0000001981274417.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743884.png => en-us_image_0000001981274513.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585188.png => en-us_image_0000001981274533.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743708.png => en-us_image_0000001981274553.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585080.png => en-us_image_0000001981274593.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743816.png => en-us_image_0000001981274597.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024061.png => en-us_image_0000001981274613.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743956.png => en-us_image_0000001981274745.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744104.png => en-us_image_0000001981274973.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904777.png => en-us_image_0000001981274977.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001981274989.png rename docs/cce/umn/{en-us_image_0000001851585464.png => en-us_image_0000001981275001.png} (100%) rename docs/cce/umn/{en-us_image_0000001898023841.png => en-us_image_0000001981275029.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585440.png => en-us_image_0000001981275053.png} (100%) rename docs/cce/umn/{en-us_image_0000001851584908.png => en-us_image_0000001981275113.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744224.png => en-us_image_0000001981275141.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585004.jpg => en-us_image_0000001981275157.jpg} (100%) rename docs/cce/umn/{en-us_image_0000001897904309.jpg => en-us_image_0000001981275177.jpg} (100%) rename docs/cce/umn/{en-us_image_0000001851743664.png => en-us_image_0000001981275205.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024017.png => en-us_image_0000001981275249.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585112.png => en-us_image_0000001981275269.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585148.png => en-us_image_0000001981275337.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905025.png => en-us_image_0000001981275361.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024565.png => en-us_image_0000001981275365.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585232.jpg => en-us_image_0000001981275417.jpg} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001981275425.png rename docs/cce/umn/{en-us_image_0000001851585808.png => en-us_image_0000001981275437.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024049.png => en-us_image_0000001981275445.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905069.png => en-us_image_0000001981275449.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585812.png => en-us_image_0000001981275457.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585700.png => en-us_image_0000001981275489.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905113.png => en-us_image_0000001981275513.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743868.png => en-us_image_0000001981275577.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024041.png => en-us_image_0000001981275593.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024289.png => en-us_image_0000001981275621.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024237.png => en-us_image_0000001981275677.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025121.png => en-us_image_0000001981276081.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586336.png => en-us_image_0000001981276109.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905733.png => en-us_image_0000001981276145.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905601.png => en-us_image_0000001981276157.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744964.png => en-us_image_0000001981276161.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586476.png => en-us_image_0000001981276233.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745256.png => en-us_image_0000001981276257.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586520.png => en-us_image_0000001981276261.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745260.png => en-us_image_0000001981276265.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025441.png => en-us_image_0000001981276269.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586264.png => en-us_image_0000001981276281.png} (100%) rename docs/cce/umn/{en-us_image_0000001909237081.png => en-us_image_0000001981276293.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586680.png => en-us_image_0000001981276309.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906097.png => en-us_image_0000001981276393.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025577.png => en-us_image_0000001981276397.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025669.png => en-us_image_0000001981276405.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001981276409.png create mode 100644 docs/cce/umn/en-us_image_0000001981276429.png rename docs/cce/umn/{en-us_image_0000001851745436.png => en-us_image_0000001981276441.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906253.png => en-us_image_0000001981276581.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025749.png => en-us_image_0000001981276601.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745568.png => en-us_image_0000001981276605.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906037.png => en-us_image_0000001981276629.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745844.png => en-us_image_0000001981276633.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745864.png => en-us_image_0000001981276729.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586992.png => en-us_image_0000001981276741.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906185.png => en-us_image_0000001981276749.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587156.png => en-us_image_0000001981276785.png} (100%) rename docs/cce/umn/{en-us_image_0000001851746444.png => en-us_image_0000001981276797.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906365.png => en-us_image_0000001981276817.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906293.png => en-us_image_0000001981276821.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026705.png => en-us_image_0000001981276877.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587796.png => en-us_image_0000001981276889.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587344.png => en-us_image_0000001981276941.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587336.png => en-us_image_0000001981276965.png} (100%) rename docs/cce/umn/{en-us_image_0000001851746060.png => en-us_image_0000001981276973.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587348.png => en-us_image_0000001981276977.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745744.png => en-us_image_0000001981277009.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587044.png => en-us_image_0000001981277029.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025961.png => en-us_image_0000001981277037.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585000.gif => en-us_image_0000001981434241.gif} (100%) rename docs/cce/umn/{en-us_image_0000001851585156.png => en-us_image_0000001981434361.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585168.png => en-us_image_0000001981434369.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904561.png => en-us_image_0000001981434377.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585200.png => en-us_image_0000001981434389.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904609.png => en-us_image_0000001981434421.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585064.png => en-us_image_0000001981434437.png} (100%) rename docs/cce/umn/{en-us_image_0000001851743820.png => en-us_image_0000001981434457.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904533.png => en-us_image_0000001981434473.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585204.png => en-us_image_0000001981434585.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744000.png => en-us_image_0000001981434645.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904773.png => en-us_image_0000001981434829.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024381.png => en-us_image_0000001981434893.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585500.png => en-us_image_0000001981434997.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585104.png => en-us_image_0000001981435093.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585136.png => en-us_image_0000001981435165.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905169.png => en-us_image_0000001981435213.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744500.png => en-us_image_0000001981435221.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024709.png => en-us_image_0000001981435225.png} (100%) rename docs/cce/umn/{en-us_image_0000001851585684.png => en-us_image_0000001981435269.png} (100%) rename docs/cce/umn/{en-us_image_0000001898024625.png => en-us_image_0000001981435329.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904509.png => en-us_image_0000001981435425.png} (100%) rename docs/cce/umn/{en-us_image_0000001897904725.png => en-us_image_0000001981435441.png} (100%) rename docs/cce/umn/{en-us_image_0000001851744076.png => en-us_image_0000001981435457.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586224.png => en-us_image_0000001981435969.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586240.png => en-us_image_0000001981436009.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905605.png => en-us_image_0000001981436029.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745204.png => en-us_image_0000001981436085.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586460.png => en-us_image_0000001981436089.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586516.png => en-us_image_0000001981436113.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001981436137.png rename docs/cce/umn/{en-us_image_0000001898025173.png => en-us_image_0000001981436181.png} (100%) rename docs/cce/umn/{en-us_image_0000001852319184.png => en-us_image_0000001981436201.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745380.png => en-us_image_0000001981436237.png} (100%) create mode 100644 docs/cce/umn/en-us_image_0000001981436241.png rename docs/cce/umn/{en-us_image_0000001851586648.png => en-us_image_0000001981436293.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906049.png => en-us_image_0000001981436297.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586668.png => en-us_image_0000001981436301.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906165.jpg => en-us_image_0000001981436333.jpg} (100%) rename docs/cce/umn/{en-us_image_0000001897905793.png => en-us_image_0000001981436361.png} (100%) rename docs/cce/umn/{en-us_image_0000001897905921.png => en-us_image_0000001981436409.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025781.png => en-us_image_0000001981436441.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906213.png => en-us_image_0000001981436449.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906225.png => en-us_image_0000001981436461.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025705.png => en-us_image_0000001981436605.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745528.png => en-us_image_0000001981436609.png} (100%) rename docs/cce/umn/{en-us_image_0000001902829161.png => en-us_image_0000001981436641.png} (100%) rename docs/cce/umn/{en-us_image_0000001851586944.png => en-us_image_0000001981436649.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745852.png => en-us_image_0000001981436653.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026645.png => en-us_image_0000001981436657.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025869.png => en-us_image_0000001981436665.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026693.png => en-us_image_0000001981436701.png} (100%) rename docs/cce/umn/{en-us_image_0000001851587804.png => en-us_image_0000001981436769.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906289.png => en-us_image_0000001981436773.png} (100%) rename docs/cce/umn/{en-us_image_0000001898025885.png => en-us_image_0000001981436829.png} (100%) rename docs/cce/umn/{en-us_image_0000001898026669.png => en-us_image_0000001981436837.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745716.png => en-us_image_0000001981436845.png} (100%) rename docs/cce/umn/{en-us_image_0000001851746544.png => en-us_image_0000001981436873.png} (100%) rename docs/cce/umn/{en-us_image_0000001851745764.png => en-us_image_0000001981436885.png} (100%) rename docs/cce/umn/{en-us_image_0000001897906445.png => en-us_image_0000001981436901.png} (100%) diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index e00f7a4d..b9d91c57 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -27,7 +27,7 @@ "node_id":"cce_productdesc_0001.xml", "product_code":"cce", "code":"2", - "des":"Cloud Container Engine (CCE) is a scalable, enterprise-class hosted Kubernetes service. With CCE, you can easily deploy, manage, and scale containerized applications in t", + "des":"Cloud Container Engine (CCE) is a hosted Kubernetes cluster service for enterprises. It offers complete lifecycle management for containerized applications and delivers s", "doc_type":"usermanual2", "kw":"What Is CCE?,Service Overview,User Guide", "search_title":"", @@ -45,7 +45,7 @@ "node_id":"cce_productdesc_0003.xml", "product_code":"cce", "code":"3", - "des":"CCE is a container service built on Docker and Kubernetes. A wealth of features enables you to run container clusters at scale. CCE eases containerization thanks to its r", + "des":"CCE is a container service built on Docker and Kubernetes. A wealth of features enable you to run container clusters at scale. CCE eases containerization thanks to its re", "doc_type":"usermanual2", "kw":"Product Advantages,Service Overview,User Guide", "search_title":"", @@ -99,7 +99,7 @@ "node_id":"cce_productdesc_0021.xml", "product_code":"cce", "code":"6", - "des":"Shopping apps and websites, especially during promotionsLive streaming, where service loads often fluctuateGames, where many players may go online in certain time periods", + "des":"Shopping apps and websites, especially during promotions and flash salesLive streaming, where service loads often fluctuateGames, where many players may go online in cert", "doc_type":"usermanual2", "kw":"Auto Scaling in Seconds,Application Scenarios,User Guide", "search_title":"", @@ -155,7 +155,7 @@ "code":"9", "des":"This section describes the notes and constraints on using CCE.After a cluster is created, the following items cannot be changed:Number of master nodes: For example, a non", "doc_type":"usermanual2", - "kw":"Storage Volumes,Data sharing,Constraints,Service Overview,User Guide", + "kw":"Storage Volumes,Data sharing,Notes and Constraints,Service Overview,User Guide", "search_title":"", "metedata":[ { @@ -163,7 +163,7 @@ "documenttype":"usermanual" } ], - "title":"Constraints", + "title":"Notes and Constraints", "githuburl":"" }, { @@ -241,23 +241,22 @@ "githuburl":"" }, { - "uri":"cce_bulletin_0003.html", - "node_id":"cce_bulletin_0003.xml", + "uri":"cce_bulletin_0033.html", + "node_id":"cce_bulletin_0033.xml", "product_code":"cce", "code":"14", - "des":"This section explains versioning in CCE, and the policies for Kubernetes version support.Version number: The format is x.y.z, where x.y is the major version and z is the ", + "des":"CCE provides highly scalable, high-performance, enterprise-class Kubernetes clusters. This section describes the Kubernetes version policy of CCE clusters.The CCE console", "doc_type":"usermanual2", - "kw":"Kubernetes Version Support Mechanism,Product Bulletin,User Guide", + "kw":"Kubernetes Version Policy,Product Bulletin,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", "opensource":"true", - "documenttype":"usermanual2", - "IsMulti":"Yes" + "documenttype":"usermanual" } ], - "title":"Kubernetes Version Support Mechanism", + "title":"Kubernetes Version Policy", "githuburl":"" }, { @@ -265,7 +264,7 @@ "node_id":"cce_bulletin_0061.xml", "product_code":"cce", "code":"15", - "des":"Dear users,We are pleased to announce that a brand-new CCE console is available. The new console is modern, visually appealing, and concise, providing a more comfortable ", + "des":"Released: Sep 3, 2023Dear users,We are pleased to announce that a brand-new CCE console is available. The new console is modern, visually appealing, and concise, providin", "doc_type":"usermanual2", "kw":"CCE Console Upgrade,Product Bulletin,User Guide", "search_title":"", @@ -388,7 +387,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Introduction", @@ -406,7 +405,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Preparations", @@ -424,7 +423,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Kubernetes Cluster", @@ -442,7 +441,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Deployment (Nginx)", @@ -460,7 +459,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deploying WordPress and MySQL That Depend on Each Other", @@ -478,7 +477,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Overview", @@ -496,7 +495,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a MySQL Workload", @@ -514,7 +513,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a WordPress Workload", @@ -527,15 +526,15 @@ "code":"29", "des":"During service deployment or running, you may trigger high-risk operations at different levels, causing service faults or interruption. To help you better estimate and av", "doc_type":"usermanual2", - "kw":"High-Risk Operations and Solutions,User Guide", + "kw":"High-Risk Operations,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"High-Risk Operations and Solutions", + "title":"High-Risk Operations", "githuburl":"" }, { @@ -550,7 +549,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Clusters", @@ -568,7 +567,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Cluster Overview", @@ -586,7 +585,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Basic Cluster Information", @@ -604,17 +603,35 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes Version Release Notes", "githuburl":"" }, + { + "uri":"cce_bulletin_0089.html", + "node_id":"cce_bulletin_0089.xml", + "product_code":"cce", + "code":"34", + "des":"CCE allows you to create Kubernetes clusters 1.29. This section describes the changes made in Kubernetes 1.29.New and Enhanced FeaturesAPI Changes and RemovalsEnhanced Ku", + "doc_type":"usermanual2", + "kw":"Kubernetes 1.29 Release Notes,Kubernetes Version Release Notes,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Kubernetes 1.29 Release Notes", + "githuburl":"" + }, { "uri":"cce_bulletin_0068.html", "node_id":"cce_bulletin_0068.xml", "product_code":"cce", - "code":"34", + "code":"35", "des":"CCE allows you to create Kubernetes clusters 1.28. This section describes the changes made in Kubernetes 1.28.Important NotesNew and Enhanced FeaturesAPI Changes and Remo", "doc_type":"usermanual2", "kw":"Kubernetes 1.28 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -622,7 +639,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.28 Release Notes", @@ -632,7 +649,7 @@ "uri":"cce_bulletin_0059.html", "node_id":"cce_bulletin_0059.xml", "product_code":"cce", - "code":"35", + "code":"36", "des":"CCE allows you to create clusters of Kubernetes 1.27. This section describes the changes made in Kubernetes 1.27 compared with Kubernetes 1.25.New FeaturesDeprecations an", "doc_type":"usermanual2", "kw":"Kubernetes 1.27 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -640,7 +657,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.27 Release Notes", @@ -650,7 +667,7 @@ "uri":"cce_bulletin_0058.html", "node_id":"cce_bulletin_0058.xml", "product_code":"cce", - "code":"36", + "code":"37", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the changes made in Kubernetes 1.25 compared wi", "doc_type":"usermanual2", "kw":"Kubernetes 1.25 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -658,7 +675,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.25 Release Notes", @@ -668,7 +685,7 @@ "uri":"cce_bulletin_0027.html", "node_id":"cce_bulletin_0027.xml", "product_code":"cce", - "code":"37", + "code":"38", "des":"This section describes the updates in CCE Kubernetes 1.23.Kubernetes 1.23 Release NotesFlexVolume is deprecated. Use CSI.HorizontalPodAutoscaler v2 is promoted to GA, and", "doc_type":"usermanual2", "kw":"Kubernetes 1.23 Release Notes,Kubernetes Version Release Notes,User Guide", @@ -676,7 +693,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.23 Release Notes", @@ -686,25 +703,25 @@ "uri":"cce_bulletin_0026.html", "node_id":"cce_bulletin_0026.xml", "product_code":"cce", - "code":"38", + "code":"39", "des":"This section describes the updates in CCE Kubernetes 1.21.Kubernetes 1.21 Release NotesCronJob is now in the stable state, and the version number changes to batch/v1.The ", "doc_type":"usermanual2", - "kw":"Kubernetes 1.21 Release Notes,Kubernetes Version Release Notes,User Guide", + "kw":"Kubernetes 1.21 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Kubernetes 1.21 Release Notes", + "title":"Kubernetes 1.21 (EOM) Release Notes", "githuburl":"" }, { "uri":"cce_whsnew_0010.html", "node_id":"cce_whsnew_0010.xml", "product_code":"cce", - "code":"39", + "code":"40", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes v", "doc_type":"usermanual2", "kw":"Kubernetes 1.19 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", @@ -712,7 +729,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.19 (EOM) Release Notes", @@ -722,7 +739,7 @@ "uri":"cce_whsnew_0007.html", "node_id":"cce_whsnew_0007.xml", "product_code":"cce", - "code":"40", + "code":"41", "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", "doc_type":"usermanual2", "kw":"Kubernetes 1.17 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", @@ -730,7 +747,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Kubernetes 1.17 (EOM) Release Notes", @@ -740,15 +757,15 @@ "uri":"cce_10_0405.html", "node_id":"cce_10_0405.xml", "product_code":"cce", - "code":"41", - "des":"In CCE v1.27 and later versions, all nodes support only the containerd container engine.All nodes in the CCE clusters of version 1.25, except the ones running EulerOS 2.5", + "code":"42", + "des":"dockershim has been removed since Kubernetes v1.24, and Docker is not supported in v1.24 and later versions by default. Use containerd.All nodes in the CCE clusters of ve", "doc_type":"usermanual2", "kw":"Patch Version Release Notes,Cluster Overview,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Patch Version Release Notes", @@ -758,7 +775,7 @@ "uri":"cce_10_0298.html", "node_id":"cce_10_0298.xml", "product_code":"cce", - "code":"42", + "code":"43", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Cluster", @@ -766,7 +783,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Cluster", @@ -776,7 +793,7 @@ "uri":"cce_10_0342.html", "node_id":"cce_10_0342.xml", "product_code":"cce", - "code":"43", + "code":"44", "des":"CCE provides different types of clusters for you to select. The following table lists the differences between them.", "doc_type":"usermanual2", "kw":"Comparison Between Cluster Types,Creating a Cluster,User Guide", @@ -784,7 +801,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Comparison Between Cluster Types", @@ -794,7 +811,7 @@ "uri":"cce_10_0028.html", "node_id":"cce_10_0028.xml", "product_code":"cce", - "code":"44", + "code":"45", "des":"On the CCE console, you can easily create Kubernetes clusters. After a cluster is created, the master node is hosted by CCE. You only need to create worker nodes. In this", "doc_type":"usermanual2", "kw":"Creating a CCE Standard/Turbo Cluster,Creating a Cluster,User Guide", @@ -802,7 +819,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a CCE Standard/Turbo Cluster", @@ -812,7 +829,7 @@ "uri":"cce_10_0349.html", "node_id":"cce_10_0349.xml", "product_code":"cce", - "code":"45", + "code":"46", "des":"kube-proxy is a key component of a Kubernetes cluster. It is used for load balancing and forwarding data between a Service and its backend pods.CCE supports the iptables ", "doc_type":"usermanual2", "kw":"kube-proxy,iptables,IP Virtual Server (IPVS),forwarding modes,Comparing iptables and IPVS,Creating a", @@ -820,7 +837,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Comparing iptables and IPVS", @@ -830,7 +847,7 @@ "uri":"cce_10_0140.html", "node_id":"cce_10_0140.xml", "product_code":"cce", - "code":"46", + "code":"47", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Connecting to a Cluster", @@ -838,7 +855,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Connecting to a Cluster", @@ -848,7 +865,7 @@ "uri":"cce_10_0107.html", "node_id":"cce_10_0107.xml", "product_code":"cce", - "code":"47", + "code":"48", "des":"This section uses a CCE standard cluster as an example to describe how to access a CCE cluster using kubectl.When you access a cluster using kubectl, CCE uses kubeconfig ", "doc_type":"usermanual2", "kw":"kubectl,Intranet access,Two-Way Authentication for Domain Names,Error from server Forbidden,The conn", @@ -856,7 +873,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Connecting to a Cluster Using kubectl", @@ -866,25 +883,25 @@ "uri":"cce_10_0175.html", "node_id":"cce_10_0175.xml", "product_code":"cce", - "code":"48", - "des":"This section describes how to obtain the cluster certificate from the console and use it access Kubernetes clusters.The downloaded certificate contains three files: clien", + "code":"49", + "des":"This section describes how to obtain the cluster certificate from the console and use it to access Kubernetes clusters.The downloaded certificate contains three files: cl", "doc_type":"usermanual2", - "kw":"X.509 certificate,Connecting to a Cluster Using an X.509 Certificate,Connecting to a Cluster,User Gu", + "kw":"X.509 certificate,Accessing a Cluster Using an X.509 Certificate,Connecting to a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Connecting to a Cluster Using an X.509 Certificate", + "title":"Accessing a Cluster Using an X.509 Certificate", "githuburl":"" }, { "uri":"cce_10_0367.html", "node_id":"cce_10_0367.xml", "product_code":"cce", - "code":"49", + "code":"50", "des":"Subject Alternative Name (SAN) allows multiple values (including IP addresses, domain names, and so on) to be associated with certificates. A SAN is usually used by the c", "doc_type":"usermanual2", "kw":"SAN,X.509 certificate,Accessing a Cluster Using a Custom Domain Name,Connecting to a Cluster,User Gu", @@ -892,1403 +909,35 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Accessing a Cluster Using a Custom Domain Name", "githuburl":"" }, { - "uri":"cce_10_0215.html", - "node_id":"cce_10_0215.xml", - "product_code":"cce", - "code":"50", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Upgrading a Cluster", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Upgrading a Cluster", - "githuburl":"" - }, - { - "uri":"cce_10_0197.html", - "node_id":"cce_10_0197.xml", + "uri":"cce_10_0864.html", + "node_id":"cce_10_0864.xml", "product_code":"cce", "code":"51", - "des":"CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months aft", + "des":"You can bind an EIP to an API server of a Kubernetes cluster so that the API server can access the Internet.Binding an EIP to an API server for Internet access can pose a", "doc_type":"usermanual2", - "kw":"cluster upgrade process,Node Priority,In-place upgrade,Upgrade Overview,Upgrading a Cluster,User Gui", + "kw":"Configuring a Cluster's API Server for Internet Access,Connecting to a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Upgrade Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0302.html", - "node_id":"cce_10_0302.xml", - "product_code":"cce", - "code":"52", - "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Upgrade Overview.Before upgra", - "doc_type":"usermanual2", - "kw":"Deprecated APIs,Before You Start,Upgrading a Cluster,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Before You Start", - "githuburl":"" - }, - { - "uri":"cce_10_0560.html", - "node_id":"cce_10_0560.xml", - "product_code":"cce", - "code":"53", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Performing Post-Upgrade Verification", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Performing Post-Upgrade Verification", - "githuburl":"" - }, - { - "uri":"cce_10_0568.html", - "node_id":"cce_10_0568.xml", - "product_code":"cce", - "code":"54", - "des":"After a cluster is upgraded, check whether the cluster is in the Running state.CCE automatically checks your cluster status. Go to the cluster list page and confirm the c", - "doc_type":"usermanual2", - "kw":"Cluster Status Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cluster Status Check", - "githuburl":"" - }, - { - "uri":"cce_10_0569.html", - "node_id":"cce_10_0569.xml", - "product_code":"cce", - "code":"55", - "des":"After a cluster is upgraded, check whether nodes in the cluster are in the Running state.CCE automatically checks your node statuses. Go to the node list page and confirm", - "doc_type":"usermanual2", - "kw":"Node Status Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Status Check", - "githuburl":"" - }, - { - "uri":"cce_10_0567.html", - "node_id":"cce_10_0567.xml", - "product_code":"cce", - "code":"56", - "des":"After a cluster is upgraded, check whether there are any nodes that skip the upgrade in the cluster. These nodes may affect the proper running of the cluster.CCE automati", - "doc_type":"usermanual2", - "kw":"Node Skipping Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Skipping Check", - "githuburl":"" - }, - { - "uri":"cce_10_0561.html", - "node_id":"cce_10_0561.xml", - "product_code":"cce", - "code":"57", - "des":"After a cluster is upgraded, check whether its services are running properly.Different services have different verification mode. Select a suitable one and verify the ser", - "doc_type":"usermanual2", - "kw":"Service Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Service Check", - "githuburl":"" - }, - { - "uri":"cce_10_0565.html", - "node_id":"cce_10_0565.xml", - "product_code":"cce", - "code":"58", - "des":"Check whether nodes can be created in the cluster.If nodes cannot be created in your cluster after the cluster is upgraded, contact technical support.", - "doc_type":"usermanual2", - "kw":"New Node Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"New Node Check", - "githuburl":"" - }, - { - "uri":"cce_10_0566.html", - "node_id":"cce_10_0566.xml", - "product_code":"cce", - "code":"59", - "des":"Check whether pods can be created on the existing nodes after the cluster is upgraded.Check whether pods can be created on new nodes after the cluster is upgraded.After c", - "doc_type":"usermanual2", - "kw":"New Pod Check,Performing Post-Upgrade Verification,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"New Pod Check", - "githuburl":"" - }, - { - "uri":"cce_10_0210.html", - "node_id":"cce_10_0210.xml", - "product_code":"cce", - "code":"60", - "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", - "doc_type":"usermanual2", - "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Migrating Services Across Clusters of Different Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0550.html", - "node_id":"cce_10_0550.xml", - "product_code":"cce", - "code":"61", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Troubleshooting for Pre-upgrade Check Exceptions", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Troubleshooting for Pre-upgrade Check Exceptions", - "githuburl":"" - }, - { - "uri":"cce_10_0549.html", - "node_id":"cce_10_0549.xml", - "product_code":"cce", - "code":"62", - "des":"The system automatically checks a cluster before its upgrade. If the cluster does not meet the pre-upgrade check conditions, the upgrade cannot continue. To avoid risks, ", - "doc_type":"usermanual2", - "kw":"Pre-upgrade Check,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Pre-upgrade Check", - "githuburl":"" - }, - { - "uri":"cce_10_0431.html", - "node_id":"cce_10_0431.xml", - "product_code":"cce", - "code":"63", - "des":"Check the following items:Check whether the node is available.Check whether the node OS supports the upgrade.Check whether the node is marked with unexpected node pool la", - "doc_type":"usermanual2", - "kw":"Node Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0432.html", - "node_id":"cce_10_0432.xml", - "product_code":"cce", - "code":"64", - "des":"Check whether the target cluster is under upgrade management.CCE may temporarily restrict the cluster upgrade due to the following reasons:The cluster is identified as th", - "doc_type":"usermanual2", - "kw":"Upgrade Management,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Upgrade Management", - "githuburl":"" - }, - { - "uri":"cce_10_0433.html", - "node_id":"cce_10_0433.xml", - "product_code":"cce", - "code":"65", - "des":"Check the following items:Check whether the add-on status is normal.Check whether the add-on support the target version.Scenario 1: The add-on malfunctions.Log in to the ", - "doc_type":"usermanual2", - "kw":"Add-ons,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Add-ons", - "githuburl":"" - }, - { - "uri":"cce_10_0434.html", - "node_id":"cce_10_0434.xml", - "product_code":"cce", - "code":"66", - "des":"Check whether the current HelmRelease record contains discarded Kubernetes APIs that are not supported by the target cluster version. If yes, the Helm chart may be unavai", - "doc_type":"usermanual2", - "kw":"Helm Charts,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Helm Charts", - "githuburl":"" - }, - { - "uri":"cce_10_0435.html", - "node_id":"cce_10_0435.xml", - "product_code":"cce", - "code":"67", - "des":"Check whether CCE can connect to your master nodes.Contact technical support.", - "doc_type":"usermanual2", - "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"SSH Connectivity of Master Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0436.html", - "node_id":"cce_10_0436.xml", - "product_code":"cce", - "code":"68", - "des":"Check the node pool status.Check whether the node pool OS or container runtime is supported after the upgrade.Scenario: The node pool malfunctions.Log in to the CCE conso", - "doc_type":"usermanual2", - "kw":"Node Pools,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pools", - "githuburl":"" - }, - { - "uri":"cce_10_0437.html", - "node_id":"cce_10_0437.xml", - "product_code":"cce", - "code":"69", - "des":"Check whether the Protocol & Port of the worker node security groups are set to ICMP: All and whether the security group with the source IP address set to the master node", - "doc_type":"usermanual2", - "kw":"Security Groups,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Security Groups", - "githuburl":"" - }, - { - "uri":"cce_10_0439.html", - "node_id":"cce_10_0439.xml", - "product_code":"cce", - "code":"70", - "des":"Check whether the node needs to be migrated.For the 1.15 cluster that is upgraded from 1.13 in rolling mode, migrate (reset or create and replace) all nodes before perfor", - "doc_type":"usermanual2", - "kw":"To-Be-Migrated Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"To-Be-Migrated Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0440.html", - "node_id":"cce_10_0440.xml", - "product_code":"cce", - "code":"71", - "des":"Check whether there are discarded resources in the clusters.Scenario: The Service in the clusters of v1.25 or later has discarded annotation: tolerate-unready-endpoints.E", - "doc_type":"usermanual2", - "kw":"Discarded Kubernetes Resources,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Discarded Kubernetes Resources", - "githuburl":"" - }, - { - "uri":"cce_10_0441.html", - "node_id":"cce_10_0441.xml", - "product_code":"cce", - "code":"72", - "des":"Read the version compatibility differences and ensure that they are not affected. The patch upgrade does not involve version compatibility differences.", - "doc_type":"usermanual2", - "kw":"Compatibility Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Compatibility Risks", - "githuburl":"" - }, - { - "uri":"cce_10_0442.html", - "node_id":"cce_10_0442.xml", - "product_code":"cce", - "code":"73", - "des":"Check whether cce-agent on the current node is of the latest version.Scenario 1: The error message \"you cce-agent no update, please restart it\" is displayed.cce-agent doe", - "doc_type":"usermanual2", - "kw":"CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CCE Agent Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0443.html", - "node_id":"cce_10_0443.xml", - "product_code":"cce", - "code":"74", - "des":"Check whether the CPU usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule pod", - "doc_type":"usermanual2", - "kw":"Node CPU Usage,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node CPU Usage", - "githuburl":"" - }, - { - "uri":"cce_10_0444.html", - "node_id":"cce_10_0444.xml", - "product_code":"cce", - "code":"75", - "des":"Check the following items:Check whether the key CRD packageversions.version.cce.io of the cluster is deleted.Check whether the cluster key CRD network-attachment-definiti", - "doc_type":"usermanual2", - "kw":"CRDs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CRDs", - "githuburl":"" - }, - { - "uri":"cce_10_0445.html", - "node_id":"cce_10_0445.xml", - "product_code":"cce", - "code":"76", - "des":"Check the following items:Check whether the key data disks on the node meet the upgrade requirements.Check whether the /tmp directory has 500 MB available space.During th", - "doc_type":"usermanual2", - "kw":"Node Disks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Disks", - "githuburl":"" - }, - { - "uri":"cce_10_0446.html", - "node_id":"cce_10_0446.xml", - "product_code":"cce", - "code":"77", - "des":"Check the following items:Check whether the DNS configuration of the current node can resolve the OBS address.Check whether the current node can access the OBS address of", - "doc_type":"usermanual2", - "kw":"Node DNS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node DNS", - "githuburl":"" - }, - { - "uri":"cce_10_0447.html", - "node_id":"cce_10_0447.xml", - "product_code":"cce", - "code":"78", - "des":"Check whether the owner and owner group of the files in the /var/paas directory used by the CCE are both paas.Scenario 1: The error message \"xx file permission has been c", - "doc_type":"usermanual2", - "kw":"Node Key Directory File Permissions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Key Directory File Permissions", - "githuburl":"" - }, - { - "uri":"cce_10_0448.html", - "node_id":"cce_10_0448.xml", - "product_code":"cce", - "code":"79", - "des":"Check whether the kubelet on the node is running properly.Scenario 1: The kubelet status is abnormal.If the kubelet malfunctions, the node is unavailable. Restore the nod", - "doc_type":"usermanual2", - "kw":"Kubelet,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kubelet", - "githuburl":"" - }, - { - "uri":"cce_10_0449.html", - "node_id":"cce_10_0449.xml", - "product_code":"cce", - "code":"80", - "des":"Check whether the memory usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule ", - "doc_type":"usermanual2", - "kw":"Node Memory,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Memory", - "githuburl":"" - }, - { - "uri":"cce_10_0450.html", - "node_id":"cce_10_0450.xml", - "product_code":"cce", - "code":"81", - "des":"Check whether the clock synchronization server ntpd or chronyd of the node is running properly.Scenario 1: ntpd is running abnormally.Log in to the node and run the syste", - "doc_type":"usermanual2", - "kw":"Node Clock Synchronization Server,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Clock Synchronization Server", - "githuburl":"" - }, - { - "uri":"cce_10_0451.html", - "node_id":"cce_10_0451.xml", - "product_code":"cce", - "code":"82", - "des":"Check whether the OS kernel version of the node is supported by CCE.Case 1: The node image is not a standard CCE image.CCE nodes run depending on the initial standard ker", - "doc_type":"usermanual2", - "kw":"Node OS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node OS", - "githuburl":"" - }, - { - "uri":"cce_10_0452.html", - "node_id":"cce_10_0452.xml", - "product_code":"cce", - "code":"83", - "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", - "doc_type":"usermanual2", - "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node CPUs", - "githuburl":"" - }, - { - "uri":"cce_10_0453.html", - "node_id":"cce_10_0453.xml", - "product_code":"cce", - "code":"84", - "des":"Check whether the Python commands are available on a node.If the command output is not 0, the check fails.Install Python before the upgrade.", - "doc_type":"usermanual2", - "kw":"Node Python Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Python Commands", - "githuburl":"" - }, - { - "uri":"cce_10_0455.html", - "node_id":"cce_10_0455.xml", - "product_code":"cce", - "code":"85", - "des":"Check whether the nodes in the cluster are ready.Scenario 1: The nodes are in the unavailable status.Log in to the CCE console and click the cluster name to access the cl", - "doc_type":"usermanual2", - "kw":"Node Readiness,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Readiness", - "githuburl":"" - }, - { - "uri":"cce_10_0456.html", - "node_id":"cce_10_0456.xml", - "product_code":"cce", - "code":"86", - "des":"Check whether journald of a node is normal.Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the co", - "doc_type":"usermanual2", - "kw":"Node journald,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node journald", - "githuburl":"" - }, - { - "uri":"cce_10_0457.html", - "node_id":"cce_10_0457.xml", - "product_code":"cce", - "code":"87", - "des":"Check whether the containerd.sock file exists on the node. This file affects the startup of container runtime in the Euler OS.Scenario: The Docker used by the node is the", - "doc_type":"usermanual2", - "kw":"containerd.sock,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"containerd.sock", - "githuburl":"" - }, - { - "uri":"cce_10_0458.html", - "node_id":"cce_10_0458.xml", - "product_code":"cce", - "code":"88", - "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", - "doc_type":"usermanual2", - "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Internal Errors", - "githuburl":"" - }, - { - "uri":"cce_10_0459.html", - "node_id":"cce_10_0459.xml", - "product_code":"cce", - "code":"89", - "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as obsfs or SFS) is used by the node and ", - "doc_type":"usermanual2", - "kw":"Node Mount Points,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Mount Points", - "githuburl":"" - }, - { - "uri":"cce_10_0460.html", - "node_id":"cce_10_0460.xml", - "product_code":"cce", - "code":"90", - "des":"Check whether the taint needed for cluster upgrade exists on the node.Scenario 1: The node is skipped during the cluster upgrade.If the version of the node is different f", - "doc_type":"usermanual2", - "kw":"Kubernetes Node Taints,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kubernetes Node Taints", - "githuburl":"" - }, - { - "uri":"cce_10_0478.html", - "node_id":"cce_10_0478.xml", - "product_code":"cce", - "code":"91", - "des":"Check whether there are any compatibility restrictions on the current Everest add-on.There are compatibility restrictions on the current Everest add-on and it cannot be u", - "doc_type":"usermanual2", - "kw":"Everest Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Everest Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0479.html", - "node_id":"cce_10_0479.xml", - "product_code":"cce", - "code":"92", - "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", - "doc_type":"usermanual2", - "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"cce-hpa-controller Restrictions", - "githuburl":"" - }, - { - "uri":"cce_10_0480.html", - "node_id":"cce_10_0480.xml", - "product_code":"cce", - "code":"93", - "des":"Check whether the current cluster version and the target version support enhanced CPU policy.Scenario: Only the current cluster version supports the enhanced CPU policy f", - "doc_type":"usermanual2", - "kw":"Enhanced CPU Policies,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Enhanced CPU Policies", - "githuburl":"" - }, - { - "uri":"cce_10_0484.html", - "node_id":"cce_10_0484.xml", - "product_code":"cce", - "code":"94", - "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", - "doc_type":"usermanual2", - "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Health of Worker Node Components", - "githuburl":"" - }, - { - "uri":"cce_10_0485.html", - "node_id":"cce_10_0485.xml", - "product_code":"cce", - "code":"95", - "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", - "doc_type":"usermanual2", - "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Health of Master Node Components", - "githuburl":"" - }, - { - "uri":"cce_10_0486.html", - "node_id":"cce_10_0486.xml", - "product_code":"cce", - "code":"96", - "des":"Check whether the resources of Kubernetes components, such as etcd and kube-controller-manager, exceed the upper limit.Solution 1: Reduce Kubernetes resources that are ne", - "doc_type":"usermanual2", - "kw":"Memory Resource Limit of Kubernetes Components,Troubleshooting for Pre-upgrade Check Exceptions,User", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Memory Resource Limit of Kubernetes Components", - "githuburl":"" - }, - { - "uri":"cce_10_0487.html", - "node_id":"cce_10_0487.xml", - "product_code":"cce", - "code":"97", - "des":"The system scans the audit logs of the past day to check whether the user calls the deprecated APIs of the target Kubernetes version.Due to the limited time range of audi", - "doc_type":"usermanual2", - "kw":"Discarded Kubernetes APIs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Discarded Kubernetes APIs", - "githuburl":"" - }, - { - "uri":"cce_10_0488.html", - "node_id":"cce_10_0488.xml", - "product_code":"cce", - "code":"98", - "des":"If IPv6 is enabled for a CCE Turbo cluster, check whether the target cluster version supports IPv6.CCE Turbo clusters support IPv6 since v1.23. This feature is available ", - "doc_type":"usermanual2", - "kw":"IPv6 Capabilities of a CCE Turbo Cluster,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"IPv6 Capabilities of a CCE Turbo Cluster", - "githuburl":"" - }, - { - "uri":"cce_10_0489.html", - "node_id":"cce_10_0489.xml", - "product_code":"cce", - "code":"99", - "des":"Check whether NetworkManager of a node is normal.Log in to the node and run the systemctl is-active NetworkManager command to obtain the running status of NetworkManager.", - "doc_type":"usermanual2", - "kw":"Node NetworkManager,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node NetworkManager", - "githuburl":"" - }, - { - "uri":"cce_10_0490.html", - "node_id":"cce_10_0490.xml", - "product_code":"cce", - "code":"100", - "des":"Check the ID file format.", - "doc_type":"usermanual2", - "kw":"Node ID File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node ID File", - "githuburl":"" - }, - { - "uri":"cce_10_0491.html", - "node_id":"cce_10_0491.xml", - "product_code":"cce", - "code":"101", - "des":"When you upgrade a cluster to v1.19 or later, the system checks whether the following configuration files have been modified on the backend:/opt/cloud/cce/kubernetes/kube", - "doc_type":"usermanual2", - "kw":"Node Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Configuration Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0492.html", - "node_id":"cce_10_0492.xml", - "product_code":"cce", - "code":"102", - "des":"Check whether the configuration files of key components exist on the node.The following table lists the files to be checked.Contact technical support to restore the confi", - "doc_type":"usermanual2", - "kw":"Node Configuration File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Configuration File", - "githuburl":"" - }, - { - "uri":"cce_10_0493.html", - "node_id":"cce_10_0493.xml", - "product_code":"cce", - "code":"103", - "des":"Check whether the current CoreDNS key configuration Corefile is different from the Helm release record. The difference may be overwritten during the add-on upgrade, affec", - "doc_type":"usermanual2", - "kw":"CoreDNS Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CoreDNS Configuration Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0494.html", - "node_id":"cce_10_0494.xml", - "product_code":"cce", - "code":"104", - "des":"Whether the sudo commands and sudo-related files of the node are workingScenario 1: The sudo command fails to be executed.During the in-place cluster upgrade, the sudo co", - "doc_type":"usermanual2", - "kw":"sudo Commands of a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"sudo Commands of a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0495.html", - "node_id":"cce_10_0495.xml", - "product_code":"cce", - "code":"105", - "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: Executing the package manager command failed.Executing the rpm or dpkg command failed. I", - "doc_type":"usermanual2", - "kw":"Key Commands of Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Key Commands of Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0496.html", - "node_id":"cce_10_0496.xml", - "product_code":"cce", - "code":"106", - "des":"Check whether the docker/containerd.sock file is directly mounted to the pods on a node. During an upgrade, Docker or containerd restarts and the sock file on the host ch", - "doc_type":"usermanual2", - "kw":"Mounting of a Sock File on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Mounting of a Sock File on a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0497.html", - "node_id":"cce_10_0497.xml", - "product_code":"cce", - "code":"107", - "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS ingress created on CCE is modified on the ELB", - "doc_type":"usermanual2", - "kw":"HTTPS Load Balancer Certificate Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Gu", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"HTTPS Load Balancer Certificate Consistency", - "githuburl":"" - }, - { - "uri":"cce_10_0498.html", - "node_id":"cce_10_0498.xml", - "product_code":"cce", - "code":"108", - "des":"Check whether the default mount directory and soft link on the node have been manually mounted or modified.Non-shared diskBy default, /var/lib/docker, containerd, or /mnt", - "doc_type":"usermanual2", - "kw":"Node Mounting,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Mounting", - "githuburl":"" - }, - { - "uri":"cce_10_0499.html", - "node_id":"cce_10_0499.xml", - "product_code":"cce", - "code":"109", - "des":"Check whether user paas is allowed to log in to a node.Run the following command to check whether user paas is allowed to log in to a node:If the permissions assigned to ", - "doc_type":"usermanual2", - "kw":"Login Permissions of User paas on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Login Permissions of User paas on a Node", - "githuburl":"" - }, - { - "uri":"cce_10_0500.html", - "node_id":"cce_10_0500.xml", - "product_code":"cce", - "code":"110", - "des":"Check whether the load balancer associated with a Service is allocated with a private IPv4 address.Solution 1: Delete the Service that is associated with a load balancer ", - "doc_type":"usermanual2", - "kw":"Private IPv4 Addresses of Load Balancers,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Private IPv4 Addresses of Load Balancers", - "githuburl":"" - }, - { - "uri":"cce_10_0501.html", - "node_id":"cce_10_0501.xml", - "product_code":"cce", - "code":"111", - "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", - "doc_type":"usermanual2", - "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Historical Upgrade Records", - "githuburl":"" - }, - { - "uri":"cce_10_0502.html", - "node_id":"cce_10_0502.xml", - "product_code":"cce", - "code":"112", - "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", - "doc_type":"usermanual2", - "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CIDR Block of the Cluster Management Plane", - "githuburl":"" - }, - { - "uri":"cce_10_0503.html", - "node_id":"cce_10_0503.xml", - "product_code":"cce", - "code":"113", - "des":"The GPU add-on is involved in the upgrade, which may affect the GPU driver installation during the creation of a GPU node.The GPU add-on driver needs to be configured by ", - "doc_type":"usermanual2", - "kw":"GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU Add-on", - "githuburl":"" - }, - { - "uri":"cce_10_0504.html", - "node_id":"cce_10_0504.xml", - "product_code":"cce", - "code":"114", - "des":"Check whether the default system parameter settings on your nodes are modified.If the MTU value of the bond0 network on your BMS node is not the default value 1500, this ", - "doc_type":"usermanual2", - "kw":"Nodes' System Parameter Settings,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Nodes' System Parameter Settings", - "githuburl":"" - }, - { - "uri":"cce_10_0505.html", - "node_id":"cce_10_0505.xml", - "product_code":"cce", - "code":"115", - "des":"Check whether there are residual package version data in the current cluster.A message is displayed indicating that there are residual 10.12.1.109 CRD resources in your c", - "doc_type":"usermanual2", - "kw":"Residual Package Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Residual Package Versions", - "githuburl":"" - }, - { - "uri":"cce_10_0506.html", - "node_id":"cce_10_0506.xml", - "product_code":"cce", - "code":"116", - "des":"Check whether the commands required for the upgrade are available on the node.The cluster upgrade failure is typically caused by the lack of key node commands that are re", - "doc_type":"usermanual2", - "kw":"Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Commands", - "githuburl":"" - }, - { - "uri":"cce_10_0507.html", - "node_id":"cce_10_0507.xml", - "product_code":"cce", - "code":"117", - "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", - "doc_type":"usermanual2", - "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Swap", - "githuburl":"" - }, - { - "uri":"cce_10_0510.html", - "node_id":"cce_10_0510.xml", - "product_code":"cce", - "code":"118", - "des":"Check whether the service container running on the node may restart when the containerd component is upgraded on the node that uses containerd in the current cluster.Ensu", - "doc_type":"usermanual2", - "kw":"Check containerd pod restart risk,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Check containerd pod restart risk", - "githuburl":"" - }, - { - "uri":"cce_10_0511.html", - "node_id":"cce_10_0511.xml", - "product_code":"cce", - "code":"119", - "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", - "doc_type":"usermanual2", - "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Key GPU Add-on Parameters", - "githuburl":"" - }, - { - "uri":"cce_10_0512.html", - "node_id":"cce_10_0512.xml", - "product_code":"cce", - "code":"120", - "des":"Check whether GPU or NPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services", - "doc_type":"usermanual2", - "kw":"GPU or NPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU or NPU Pod Rebuild Risks", - "githuburl":"" - }, - { - "uri":"cce_10_0513.html", - "node_id":"cce_10_0513.xml", - "product_code":"cce", - "code":"121", - "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", - "doc_type":"usermanual2", - "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"ELB Listener Access Control", - "githuburl":"" - }, - { - "uri":"cce_10_0514.html", - "node_id":"cce_10_0514.xml", - "product_code":"cce", - "code":"122", - "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", - "doc_type":"usermanual2", - "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Master Node Flavor", - "githuburl":"" - }, - { - "uri":"cce_10_0515.html", - "node_id":"cce_10_0515.xml", - "product_code":"cce", - "code":"123", - "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", - "doc_type":"usermanual2", - "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Subnet Quota of Master Nodes", - "githuburl":"" - }, - { - "uri":"cce_10_0516.html", - "node_id":"cce_10_0516.xml", - "product_code":"cce", - "code":"124", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", - "doc_type":"usermanual2", - "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0517.html", - "node_id":"cce_10_0517.xml", - "product_code":"cce", - "code":"125", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", - "doc_type":"usermanual2", - "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pool Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0518.html", - "node_id":"cce_10_0518.xml", - "product_code":"cce", - "code":"126", - "des":"Check the number of images on your node. If the number is greater than 1000, Docker startup may be slow.Contact O&M personnel to check whether this issue affects the upgr", - "doc_type":"usermanual2", - "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Number of Node Images", + "title":"Configuring a Cluster's API Server for Internet Access", "githuburl":"" }, { "uri":"cce_10_0031.html", "node_id":"cce_10_0031.xml", "product_code":"cce", - "code":"127", + "code":"52", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Cluster", @@ -2296,7 +945,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing a Cluster", @@ -2306,69 +955,87 @@ "uri":"cce_10_0213.html", "node_id":"cce_10_0213.xml", "product_code":"cce", - "code":"128", - "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.This function is supported only in clusters of v1.15 a", + "code":"53", + "des":"CCE allows you to manage cluster parameters, through which you can let core components work under your requirements.kube-apiserverkube-controller-managerkube-scheduler", "doc_type":"usermanual2", - "kw":"cluster parameters,kube-apiserver,kube-controller-manager,Cluster Configuration Management,Managing ", + "kw":"cluster parameters,kube-apiserver,kube-controller-manager,Modifying Cluster Configurations,Managing ", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Cluster Configuration Management", + "title":"Modifying Cluster Configurations", "githuburl":"" }, { "uri":"cce_10_0602.html", "node_id":"cce_10_0602.xml", "product_code":"cce", - "code":"129", - "des":"If enabled, concurrent requests are dynamically controlled based on the resource pressure of master nodes to keep them and the cluster available.The cluster version must ", + "code":"54", + "des":"After overload control is enabled, the number of simultaneous requests is dynamically regulated according to the resource pressure on the master nodes. This ensures that ", "doc_type":"usermanual2", - "kw":"Cluster Overload Control,Managing a Cluster,User Guide", + "kw":"overload control,Enabling Overload Control for a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Cluster Overload Control", + "title":"Enabling Overload Control for a Cluster", "githuburl":"" }, { "uri":"cce_10_0403.html", "node_id":"cce_10_0403.xml", "product_code":"cce", - "code":"130", - "des":"CCE allows you to change the number of nodes managed in a cluster.This function is supported for clusters of v1.15 and later versions.Starting from v1.15.11, the number o", + "code":"55", + "des":"CCE allows you to change the number of nodes managed in a cluster.A cluster that has only one master node supports fewer than 1000 worker nodes.The number of master nodes", "doc_type":"usermanual2", "kw":"Changing Cluster Scale,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Changing Cluster Scale", "githuburl":"" }, + { + "uri":"cce_10_0426.html", + "node_id":"cce_10_0426.xml", + "product_code":"cce", + "code":"56", + "des":"When creating a cluster, you can customize a node security group to centrally manage network security policies. For a created cluster, you can change its default node sec", + "doc_type":"usermanual2", + "kw":"Changing the Default Security Group of a Node,Managing a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Changing the Default Security Group of a Node", + "githuburl":"" + }, { "uri":"cce_10_0212.html", "node_id":"cce_10_0212.xml", "product_code":"cce", - "code":"131", - "des":"Deleting a cluster will delete the nodes in the cluster (excluding accepted nodes), data disks attached to the nodes, workloads, and Services. Related services cannot be ", + "code":"57", + "des":"Deleting a cluster will delete the workloads and Services in the cluster, and the deleted data cannot be recovered. Before performing this operation, ensure that related ", "doc_type":"usermanual2", "kw":"Deleting a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Cluster", @@ -2378,25 +1045,1411 @@ "uri":"cce_10_0214.html", "node_id":"cce_10_0214.xml", "product_code":"cce", - "code":"132", - "des":"If you do not need to use a cluster temporarily, hibernate the cluster.After a cluster is hibernated, resources such as workloads cannot be created or managed in the clus", + "code":"58", + "des":"If a pay-per-use cluster is not needed temporarily, hibernate it to reduce costs.After a cluster is hibernated, resources such as workloads cannot be created or managed i", "doc_type":"usermanual2", - "kw":"Hibernating and Waking Up a Cluster,Managing a Cluster,User Guide", + "kw":"Hibernating or Waking Up a Cluster,Managing a Cluster,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Hibernating and Waking Up a Cluster", + "title":"Hibernating or Waking Up a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0215.html", + "node_id":"cce_10_0215.xml", + "product_code":"cce", + "code":"59", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Upgrading a Cluster", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Upgrading a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0197.html", + "node_id":"cce_10_0197.xml", + "product_code":"cce", + "code":"60", + "des":"CCE strictly complies with community consistency authentication. It releases three Kubernetes versions each year and offers a maintenance period of at least 24 months aft", + "doc_type":"usermanual2", + "kw":"cluster upgrade process,Node Priority,In-place upgrade,Process and Method of Upgrading a Cluster,Upg", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Process and Method of Upgrading a Cluster", + "githuburl":"" + }, + { + "uri":"cce_10_0302.html", + "node_id":"cce_10_0302.xml", + "product_code":"cce", + "code":"61", + "des":"Before the upgrade, you can check whether your cluster can be upgraded and which versions are available on the CCE console. For details, see Process and Method of Upgradi", + "doc_type":"usermanual2", + "kw":"Deprecated APIs,Before You Start,Upgrading a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Before You Start", + "githuburl":"" + }, + { + "uri":"cce_10_0560.html", + "node_id":"cce_10_0560.xml", + "product_code":"cce", + "code":"62", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Performing Post-Upgrade Verification", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Performing Post-Upgrade Verification", + "githuburl":"" + }, + { + "uri":"cce_10_0568.html", + "node_id":"cce_10_0568.xml", + "product_code":"cce", + "code":"63", + "des":"After a cluster is upgraded, check whether the cluster is in the Running state.CCE automatically checks your cluster status. Go to the cluster list page and confirm the c", + "doc_type":"usermanual2", + "kw":"Cluster Status Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Cluster Status Check", + "githuburl":"" + }, + { + "uri":"cce_10_0569.html", + "node_id":"cce_10_0569.xml", + "product_code":"cce", + "code":"64", + "des":"After a cluster is upgraded, check whether nodes in the cluster are in the Running state.CCE automatically checks your node statuses. Go to the node list page and confirm", + "doc_type":"usermanual2", + "kw":"Node Status Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Status Check", + "githuburl":"" + }, + { + "uri":"cce_10_0567.html", + "node_id":"cce_10_0567.xml", + "product_code":"cce", + "code":"65", + "des":"After a cluster is upgraded, check whether there are any nodes that skip the upgrade in the cluster. These nodes may affect the proper running of the cluster.CCE automati", + "doc_type":"usermanual2", + "kw":"Node Skipping Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Skipping Check", + "githuburl":"" + }, + { + "uri":"cce_10_0561.html", + "node_id":"cce_10_0561.xml", + "product_code":"cce", + "code":"66", + "des":"After a cluster is upgraded, check whether its services are running properly.Different services have different verification mode. Select a suitable one and verify the ser", + "doc_type":"usermanual2", + "kw":"Service Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Service Check", + "githuburl":"" + }, + { + "uri":"cce_10_0565.html", + "node_id":"cce_10_0565.xml", + "product_code":"cce", + "code":"67", + "des":"Check whether nodes can be created in the cluster.If nodes cannot be created in your cluster after the cluster is upgraded, contact technical support.", + "doc_type":"usermanual2", + "kw":"New Node Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"New Node Check", + "githuburl":"" + }, + { + "uri":"cce_10_0566.html", + "node_id":"cce_10_0566.xml", + "product_code":"cce", + "code":"68", + "des":"Check whether pods can be created on the existing nodes after the cluster is upgraded.Check whether pods can be created on new nodes after the cluster is upgraded.After c", + "doc_type":"usermanual2", + "kw":"New Pod Check,Performing Post-Upgrade Verification,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"New Pod Check", + "githuburl":"" + }, + { + "uri":"cce_10_0210.html", + "node_id":"cce_10_0210.xml", + "product_code":"cce", + "code":"69", + "des":"This section describes how to migrate services from a cluster of an earlier version to a cluster of a later version in CCE.This operation is applicable when a cross-versi", + "doc_type":"usermanual2", + "kw":"Migrating Services Across Clusters of Different Versions,Upgrading a Cluster,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Migrating Services Across Clusters of Different Versions", + "githuburl":"" + }, + { + "uri":"cce_10_0550.html", + "node_id":"cce_10_0550.xml", + "product_code":"cce", + "code":"70", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"usermanual2", + "kw":"Troubleshooting for Pre-upgrade Check Exceptions", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Troubleshooting for Pre-upgrade Check Exceptions", + "githuburl":"" + }, + { + "uri":"cce_10_0549.html", + "node_id":"cce_10_0549.xml", + "product_code":"cce", + "code":"71", + "des":"The system automatically checks a cluster before its upgrade. If the cluster does not meet the pre-upgrade check conditions, the upgrade cannot continue. To avoid risks, ", + "doc_type":"usermanual2", + "kw":"Pre-upgrade Check,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Pre-upgrade Check", + "githuburl":"" + }, + { + "uri":"cce_10_0431.html", + "node_id":"cce_10_0431.xml", + "product_code":"cce", + "code":"72", + "des":"Check the following items:Check whether the node is available.Check whether the node OS supports the upgrade.Check whether the node is marked with unexpected node pool la", + "doc_type":"usermanual2", + "kw":"Node Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0432.html", + "node_id":"cce_10_0432.xml", + "product_code":"cce", + "code":"73", + "des":"Check whether the target cluster is under upgrade management.CCE may temporarily restrict the cluster upgrade due to the following reasons:The cluster is identified as th", + "doc_type":"usermanual2", + "kw":"Upgrade Management,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Upgrade Management", + "githuburl":"" + }, + { + "uri":"cce_10_0433.html", + "node_id":"cce_10_0433.xml", + "product_code":"cce", + "code":"74", + "des":"Check the following items:Check whether the add-on status is normal.Check whether the add-on support the target version.Scenario 1: The add-on malfunctions.Log in to the ", + "doc_type":"usermanual2", + "kw":"Add-ons,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Add-ons", + "githuburl":"" + }, + { + "uri":"cce_10_0434.html", + "node_id":"cce_10_0434.xml", + "product_code":"cce", + "code":"75", + "des":"Check whether the current HelmRelease record contains discarded Kubernetes APIs that are not supported by the target cluster version. If yes, the Helm chart may be unavai", + "doc_type":"usermanual2", + "kw":"Helm Charts,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Helm Charts", + "githuburl":"" + }, + { + "uri":"cce_10_0435.html", + "node_id":"cce_10_0435.xml", + "product_code":"cce", + "code":"76", + "des":"Check whether CCE can connect to your master nodes.Contact technical support.", + "doc_type":"usermanual2", + "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"SSH Connectivity of Master Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0436.html", + "node_id":"cce_10_0436.xml", + "product_code":"cce", + "code":"77", + "des":"Check the node pool status.Check whether the node pool OS or container runtime is supported after the upgrade.Scenario: The node pool malfunctions.Log in to the CCE conso", + "doc_type":"usermanual2", + "kw":"Node Pools,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Pools", + "githuburl":"" + }, + { + "uri":"cce_10_0437.html", + "node_id":"cce_10_0437.xml", + "product_code":"cce", + "code":"78", + "des":"Check whether the Protocol & Port of the worker node security groups is set to ICMP: All and whether the security group with the source IP address set to the master node ", + "doc_type":"usermanual2", + "kw":"Security Groups,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Security Groups", + "githuburl":"" + }, + { + "uri":"cce_10_0439.html", + "node_id":"cce_10_0439.xml", + "product_code":"cce", + "code":"79", + "des":"Check whether nodes need to be migrated.For the 1.15 cluster that is upgraded from 1.13 in rolling mode, migrate (reset or create and replace) all nodes before performing", + "doc_type":"usermanual2", + "kw":"Residual Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Residual Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0440.html", + "node_id":"cce_10_0440.xml", + "product_code":"cce", + "code":"80", + "des":"Check whether there are discarded resources in the clusters.Scenario: The Service in the clusters of v1.25 or later has discarded annotation: tolerate-unready-endpoints.E", + "doc_type":"usermanual2", + "kw":"Discarded Kubernetes Resources,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Discarded Kubernetes Resources", + "githuburl":"" + }, + { + "uri":"cce_10_0441.html", + "node_id":"cce_10_0441.xml", + "product_code":"cce", + "code":"81", + "des":"Read the version compatibility differences and ensure that they are not affected. The patch upgrade does not involve version compatibility differences.", + "doc_type":"usermanual2", + "kw":"Compatibility Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Compatibility Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0442.html", + "node_id":"cce_10_0442.xml", + "product_code":"cce", + "code":"82", + "des":"Check whether cce-agent on the current node is of the latest version.Scenario 1: The error message \"you cce-agent no update, please restart it\" is displayed.cce-agent doe", + "doc_type":"usermanual2", + "kw":"CCE Agent Versions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CCE Agent Versions", + "githuburl":"" + }, + { + "uri":"cce_10_0443.html", + "node_id":"cce_10_0443.xml", + "product_code":"cce", + "code":"83", + "des":"Check whether the CPU usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule pod", + "doc_type":"usermanual2", + "kw":"Node CPU Usage,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node CPU Usage", + "githuburl":"" + }, + { + "uri":"cce_10_0444.html", + "node_id":"cce_10_0444.xml", + "product_code":"cce", + "code":"84", + "des":"Check the following items:Check whether the key CRD packageversions.version.cce.io of the cluster is deleted.Check whether the cluster key CRD network-attachment-definiti", + "doc_type":"usermanual2", + "kw":"CRDs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CRDs", + "githuburl":"" + }, + { + "uri":"cce_10_0445.html", + "node_id":"cce_10_0445.xml", + "product_code":"cce", + "code":"85", + "des":"Check the following items:Check whether the key data disks on the node meet the upgrade requirements.Check whether the /tmp directory has 500 MB available space.During th", + "doc_type":"usermanual2", + "kw":"Node Disks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Disks", + "githuburl":"" + }, + { + "uri":"cce_10_0446.html", + "node_id":"cce_10_0446.xml", + "product_code":"cce", + "code":"86", + "des":"Check the following items:Check whether the DNS configuration of the current node can resolve the OBS address.Check whether the current node can access the OBS address of", + "doc_type":"usermanual2", + "kw":"Node DNS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node DNS", + "githuburl":"" + }, + { + "uri":"cce_10_0447.html", + "node_id":"cce_10_0447.xml", + "product_code":"cce", + "code":"87", + "des":"Check whether the owner and owner group of the files in the /var/paas directory used by the CCE are both paas.Scenario 1: The error message \"xx file permission has been c", + "doc_type":"usermanual2", + "kw":"Node Key Directory File Permissions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Key Directory File Permissions", + "githuburl":"" + }, + { + "uri":"cce_10_0448.html", + "node_id":"cce_10_0448.xml", + "product_code":"cce", + "code":"88", + "des":"Check whether the kubelet on the node is running properly.Scenario 1: The kubelet status is abnormal.If the kubelet malfunctions, the node is unavailable. Restore the nod", + "doc_type":"usermanual2", + "kw":"kubelet,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"kubelet", + "githuburl":"" + }, + { + "uri":"cce_10_0449.html", + "node_id":"cce_10_0449.xml", + "product_code":"cce", + "code":"89", + "des":"Check whether the memory usage of the node exceeds 90%.Upgrade the cluster during off-peak hours.Check whether too many pods are deployed on the node. If yes, reschedule ", + "doc_type":"usermanual2", + "kw":"Node Memory,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Memory", + "githuburl":"" + }, + { + "uri":"cce_10_0450.html", + "node_id":"cce_10_0450.xml", + "product_code":"cce", + "code":"90", + "des":"Check whether the clock synchronization server ntpd or chronyd of the node is running properly.Scenario 1: ntpd is running abnormally.Log in to the node and run the syste", + "doc_type":"usermanual2", + "kw":"Node Clock Synchronization Server,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Clock Synchronization Server", + "githuburl":"" + }, + { + "uri":"cce_10_0451.html", + "node_id":"cce_10_0451.xml", + "product_code":"cce", + "code":"91", + "des":"Check whether the OS kernel version of the node is supported by CCE.Case 1: The node image is not a standard CCE image.CCE nodes run depending on the initial standard ker", + "doc_type":"usermanual2", + "kw":"Node OS,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node OS", + "githuburl":"" + }, + { + "uri":"cce_10_0452.html", + "node_id":"cce_10_0452.xml", + "product_code":"cce", + "code":"92", + "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", + "doc_type":"usermanual2", + "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node CPUs", + "githuburl":"" + }, + { + "uri":"cce_10_0453.html", + "node_id":"cce_10_0453.xml", + "product_code":"cce", + "code":"93", + "des":"Check whether the Python commands are available on a node.If the command output is not 0, the check fails.Install Python before the upgrade.", + "doc_type":"usermanual2", + "kw":"Node Python Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Python Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0455.html", + "node_id":"cce_10_0455.xml", + "product_code":"cce", + "code":"94", + "des":"Check whether the nodes in the cluster are ready.Scenario 1: The nodes are in the unavailable status.Log in to the CCE console and click the cluster name to access the cl", + "doc_type":"usermanual2", + "kw":"Node Readiness,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Readiness", + "githuburl":"" + }, + { + "uri":"cce_10_0456.html", + "node_id":"cce_10_0456.xml", + "product_code":"cce", + "code":"95", + "des":"Check whether journald of a node is normal.Log in to the node and run the systemctl is-active systemd-journald command to obtain the running status of journald. If the co", + "doc_type":"usermanual2", + "kw":"Node journald,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node journald", + "githuburl":"" + }, + { + "uri":"cce_10_0457.html", + "node_id":"cce_10_0457.xml", + "product_code":"cce", + "code":"96", + "des":"Check whether the containerd.sock file exists on the node. This file affects the startup of container runtime in the Euler OS.Scenario: The Docker used by the node is the", + "doc_type":"usermanual2", + "kw":"containerd.sock,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"containerd.sock", + "githuburl":"" + }, + { + "uri":"cce_10_0458.html", + "node_id":"cce_10_0458.xml", + "product_code":"cce", + "code":"97", + "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", + "doc_type":"usermanual2", + "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Internal Errors", + "githuburl":"" + }, + { + "uri":"cce_10_0459.html", + "node_id":"cce_10_0459.xml", + "product_code":"cce", + "code":"98", + "des":"Check whether inaccessible mount points exist on the node.Scenario: There are inaccessible mount points on the node.If NFS (such as obsfs or SFS) is used by the node and ", + "doc_type":"usermanual2", + "kw":"Node Mount Points,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Mount Points", + "githuburl":"" + }, + { + "uri":"cce_10_0460.html", + "node_id":"cce_10_0460.xml", + "product_code":"cce", + "code":"99", + "des":"Check whether the taint needed for cluster upgrade exists on the node.Scenario 1: The node is skipped during the cluster upgrade.If the version of the node is different f", + "doc_type":"usermanual2", + "kw":"Kubernetes Node Taints,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Kubernetes Node Taints", + "githuburl":"" + }, + { + "uri":"cce_10_0478.html", + "node_id":"cce_10_0478.xml", + "product_code":"cce", + "code":"100", + "des":"Check whether there are any compatibility restrictions on the current Everest add-on.There are compatibility restrictions on the current Everest add-on and it cannot be u", + "doc_type":"usermanual2", + "kw":"Everest Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Everest Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0479.html", + "node_id":"cce_10_0479.xml", + "product_code":"cce", + "code":"101", + "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", + "doc_type":"usermanual2", + "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"cce-hpa-controller Restrictions", + "githuburl":"" + }, + { + "uri":"cce_10_0480.html", + "node_id":"cce_10_0480.xml", + "product_code":"cce", + "code":"102", + "des":"Check whether the current cluster version and the target version support enhanced CPU policy.Scenario: Only the current cluster version supports the enhanced CPU policy f", + "doc_type":"usermanual2", + "kw":"Enhanced CPU Policies,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Enhanced CPU Policies", + "githuburl":"" + }, + { + "uri":"cce_10_0484.html", + "node_id":"cce_10_0484.xml", + "product_code":"cce", + "code":"103", + "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", + "doc_type":"usermanual2", + "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Health of Worker Node Components", + "githuburl":"" + }, + { + "uri":"cce_10_0485.html", + "node_id":"cce_10_0485.xml", + "product_code":"cce", + "code":"104", + "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", + "doc_type":"usermanual2", + "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Health of Master Node Components", + "githuburl":"" + }, + { + "uri":"cce_10_0486.html", + "node_id":"cce_10_0486.xml", + "product_code":"cce", + "code":"105", + "des":"Check whether the resources of Kubernetes components, such as etcd and kube-controller-manager, exceed the upper limit.Solution 1: Reduce Kubernetes resources that are ne", + "doc_type":"usermanual2", + "kw":"Memory Resource Limit of Kubernetes Components,Troubleshooting for Pre-upgrade Check Exceptions,User", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Memory Resource Limit of Kubernetes Components", + "githuburl":"" + }, + { + "uri":"cce_10_0487.html", + "node_id":"cce_10_0487.xml", + "product_code":"cce", + "code":"106", + "des":"The system scans the audit logs of the past day to check whether the user calls the deprecated APIs of the target Kubernetes version.Due to the limited time range of audi", + "doc_type":"usermanual2", + "kw":"Discarded Kubernetes APIs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Discarded Kubernetes APIs", + "githuburl":"" + }, + { + "uri":"cce_10_0488.html", + "node_id":"cce_10_0488.xml", + "product_code":"cce", + "code":"107", + "des":"If IPv6 is enabled for a CCE Turbo cluster, check whether the target cluster version supports IPv6.CCE Turbo clusters support IPv6 since v1.23. This feature is available ", + "doc_type":"usermanual2", + "kw":"IPv6 Support in CCE Turbo Clusters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"IPv6 Support in CCE Turbo Clusters", + "githuburl":"" + }, + { + "uri":"cce_10_0489.html", + "node_id":"cce_10_0489.xml", + "product_code":"cce", + "code":"108", + "des":"Check whether NetworkManager of a node is normal.Log in to the node and run the systemctl is-active NetworkManager command to obtain the running status of NetworkManager.", + "doc_type":"usermanual2", + "kw":"NetworkManager,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"NetworkManager", + "githuburl":"" + }, + { + "uri":"cce_10_0490.html", + "node_id":"cce_10_0490.xml", + "product_code":"cce", + "code":"109", + "des":"Check the ID file format.", + "doc_type":"usermanual2", + "kw":"Node ID File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node ID File", + "githuburl":"" + }, + { + "uri":"cce_10_0491.html", + "node_id":"cce_10_0491.xml", + "product_code":"cce", + "code":"110", + "des":"When you upgrade a cluster to v1.19 or later, the system checks whether the following configuration files have been modified on the backend:/opt/cloud/cce/kubernetes/kube", + "doc_type":"usermanual2", + "kw":"Node Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Configuration Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0492.html", + "node_id":"cce_10_0492.xml", + "product_code":"cce", + "code":"111", + "des":"Check whether the configuration files of key components exist on the node.The following table lists the files to be checked.Contact technical support to restore the confi", + "doc_type":"usermanual2", + "kw":"Node Configuration File,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Configuration File", + "githuburl":"" + }, + { + "uri":"cce_10_0493.html", + "node_id":"cce_10_0493.xml", + "product_code":"cce", + "code":"112", + "des":"Check whether the current CoreDNS key configuration Corefile is different from the Helm release record. The difference may be overwritten during the add-on upgrade, affec", + "doc_type":"usermanual2", + "kw":"CoreDNS Configuration Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CoreDNS Configuration Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0494.html", + "node_id":"cce_10_0494.xml", + "product_code":"cce", + "code":"113", + "des":"Check whether the sudo commands and sudo-related files of the node are working.Scenario 1: The sudo command fails to be executed.During the in-place cluster upgrade, the ", + "doc_type":"usermanual2", + "kw":"sudo,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"sudo", + "githuburl":"" + }, + { + "uri":"cce_10_0495.html", + "node_id":"cce_10_0495.xml", + "product_code":"cce", + "code":"114", + "des":"Whether some key commands that the node upgrade depends on are workingScenario 1: Executing the package manager command failed.Executing the rpm or dpkg command failed. I", + "doc_type":"usermanual2", + "kw":"Key Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Key Node Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0496.html", + "node_id":"cce_10_0496.xml", + "product_code":"cce", + "code":"115", + "des":"Check whether the docker/containerd.sock file is directly mounted to the pods on a node. During an upgrade, Docker or containerd restarts and the sock file on the host ch", + "doc_type":"usermanual2", + "kw":"Mounting of a Sock File on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Mounting of a Sock File on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0497.html", + "node_id":"cce_10_0497.xml", + "product_code":"cce", + "code":"116", + "des":"Check whether the certificate used by an HTTPS load balancer has been modified on ELB.The certificate referenced by an HTTPS ingress created on CCE is modified on the ELB", + "doc_type":"usermanual2", + "kw":"HTTPS Load Balancer Certificate Consistency,Troubleshooting for Pre-upgrade Check Exceptions,User Gu", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"HTTPS Load Balancer Certificate Consistency", + "githuburl":"" + }, + { + "uri":"cce_10_0498.html", + "node_id":"cce_10_0498.xml", + "product_code":"cce", + "code":"117", + "des":"Check whether the default mount directory and soft link on the node have been manually mounted or modified.Non-shared diskBy default, /var/lib/docker, containerd, or /mnt", + "doc_type":"usermanual2", + "kw":"Node Mounting,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Mounting", + "githuburl":"" + }, + { + "uri":"cce_10_0499.html", + "node_id":"cce_10_0499.xml", + "product_code":"cce", + "code":"118", + "des":"Check whether user paas is allowed to log in to a node.Run the following command to check whether user paas is allowed to log in to a node:If the permissions assigned to ", + "doc_type":"usermanual2", + "kw":"Login Permissions of User paas on a Node,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Login Permissions of User paas on a Node", + "githuburl":"" + }, + { + "uri":"cce_10_0500.html", + "node_id":"cce_10_0500.xml", + "product_code":"cce", + "code":"119", + "des":"Check whether the load balancer associated with a Service is allocated with a private IPv4 address.Solution 1: Delete the Service that is associated with a load balancer ", + "doc_type":"usermanual2", + "kw":"Private IPv4 Addresses of Load Balancers,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Private IPv4 Addresses of Load Balancers", + "githuburl":"" + }, + { + "uri":"cce_10_0501.html", + "node_id":"cce_10_0501.xml", + "product_code":"cce", + "code":"120", + "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", + "doc_type":"usermanual2", + "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Historical Upgrade Records", + "githuburl":"" + }, + { + "uri":"cce_10_0502.html", + "node_id":"cce_10_0502.xml", + "product_code":"cce", + "code":"121", + "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", + "doc_type":"usermanual2", + "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"CIDR Block of the Cluster Management Plane", + "githuburl":"" + }, + { + "uri":"cce_10_0503.html", + "node_id":"cce_10_0503.xml", + "product_code":"cce", + "code":"122", + "des":"The GPU add-on is involved in the upgrade, which may affect the GPU driver installation during the creation of a GPU node.The GPU add-on driver needs to be configured by ", + "doc_type":"usermanual2", + "kw":"GPU Add-on,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"GPU Add-on", + "githuburl":"" + }, + { + "uri":"cce_10_0504.html", + "node_id":"cce_10_0504.xml", + "product_code":"cce", + "code":"123", + "des":"Check whether the default system parameter settings on your nodes are modified.If the MTU value of the bond0 network on your BMS node is not the default value 1500, this ", + "doc_type":"usermanual2", + "kw":"Nodes' System Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Nodes' System Parameters", + "githuburl":"" + }, + { + "uri":"cce_10_0505.html", + "node_id":"cce_10_0505.xml", + "product_code":"cce", + "code":"124", + "des":"Check whether there are residual package version data in the current cluster.A message is displayed indicating that there are residual 10.12.1.109 CRD resources in your c", + "doc_type":"usermanual2", + "kw":"Residual Package Version Data,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Residual Package Version Data", + "githuburl":"" + }, + { + "uri":"cce_10_0506.html", + "node_id":"cce_10_0506.xml", + "product_code":"cce", + "code":"125", + "des":"Check whether the commands required for the upgrade are available on the node.The cluster upgrade failure is typically caused by the lack of key node commands that are re", + "doc_type":"usermanual2", + "kw":"Node Commands,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Commands", + "githuburl":"" + }, + { + "uri":"cce_10_0507.html", + "node_id":"cce_10_0507.xml", + "product_code":"cce", + "code":"126", + "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", + "doc_type":"usermanual2", + "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Swap", + "githuburl":"" + }, + { + "uri":"cce_10_0510.html", + "node_id":"cce_10_0510.xml", + "product_code":"cce", + "code":"127", + "des":"Check whether the service pods running on a containerd node are restarted when containerd is upgraded.Upgrade the cluster when the impact on services is controllable (for", + "doc_type":"usermanual2", + "kw":"containerd Pod Restart Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"containerd Pod Restart Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0511.html", + "node_id":"cce_10_0511.xml", + "product_code":"cce", + "code":"128", + "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", + "doc_type":"usermanual2", + "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Key GPU Add-on Parameters", + "githuburl":"" + }, + { + "uri":"cce_10_0512.html", + "node_id":"cce_10_0512.xml", + "product_code":"cce", + "code":"129", + "des":"Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services is con", + "doc_type":"usermanual2", + "kw":"GPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"GPU Pod Rebuild Risks", + "githuburl":"" + }, + { + "uri":"cce_10_0513.html", + "node_id":"cce_10_0513.xml", + "product_code":"cce", + "code":"130", + "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", + "doc_type":"usermanual2", + "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"ELB Listener Access Control", + "githuburl":"" + }, + { + "uri":"cce_10_0514.html", + "node_id":"cce_10_0514.xml", + "product_code":"cce", + "code":"131", + "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", + "doc_type":"usermanual2", + "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Master Node Flavor", + "githuburl":"" + }, + { + "uri":"cce_10_0515.html", + "node_id":"cce_10_0515.xml", + "product_code":"cce", + "code":"132", + "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", + "doc_type":"usermanual2", + "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Subnet Quota of Master Nodes", + "githuburl":"" + }, + { + "uri":"cce_10_0516.html", + "node_id":"cce_10_0516.xml", + "product_code":"cce", + "code":"133", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", + "doc_type":"usermanual2", + "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0517.html", + "node_id":"cce_10_0517.xml", + "product_code":"cce", + "code":"134", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", + "doc_type":"usermanual2", + "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Node Pool Runtime", + "githuburl":"" + }, + { + "uri":"cce_10_0518.html", + "node_id":"cce_10_0518.xml", + "product_code":"cce", + "code":"135", + "des":"Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions ", + "doc_type":"usermanual2", + "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Number of Node Images", "githuburl":"" }, { "uri":"cce_10_0183.html", "node_id":"cce_10_0183.xml", "product_code":"cce", - "code":"133", + "code":"136", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Nodes", @@ -2404,7 +2457,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Nodes", @@ -2414,7 +2467,7 @@ "uri":"cce_10_0180.html", "node_id":"cce_10_0180.xml", "product_code":"cce", - "code":"134", + "code":"137", "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "doc_type":"usermanual2", "kw":"paas,user group,Node Overview,Nodes,User Guide", @@ -2422,7 +2475,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Overview", @@ -2432,51 +2485,51 @@ "uri":"cce_10_0462.html", "node_id":"cce_10_0462.xml", "product_code":"cce", - "code":"135", + "code":"138", "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "doc_type":"usermanual2", - "kw":"Container Engine,Nodes,User Guide", + "kw":"Container Engines,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Container Engine", + "title":"Container Engines", "githuburl":"" }, { "uri":"cce_10_0476.html", "node_id":"cce_10_0476.xml", "product_code":"cce", - "code":"136", + "code":"139", "des":"This section describes the mappings between released cluster versions and OS versions.", "doc_type":"usermanual2", - "kw":"Node OS,Nodes,User Guide", + "kw":"Node OSs,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Node OS", + "title":"Node OSs", "githuburl":"" }, { "uri":"cce_10_0363.html", "node_id":"cce_10_0363.xml", "product_code":"cce", - "code":"137", - "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The node has at least 2 vCPUs and 4 GiB of memory.To ", + "code":"140", + "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The DNS configuration of a subnet where a node is loc", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Node", @@ -2486,15 +2539,15 @@ "uri":"cce_10_0198.html", "node_id":"cce_10_0198.xml", "product_code":"cce", - "code":"138", - "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.While an ECS is being accepted into a cluster, the operating s", + "code":"141", + "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.When accepting an ECS, you can reset the ECS OS to a standard ", "doc_type":"usermanual2", "kw":"Accepting Nodes for Management,Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Accepting Nodes for Management", @@ -2504,7 +2557,7 @@ "uri":"cce_10_0185.html", "node_id":"cce_10_0185.xml", "product_code":"cce", - "code":"139", + "code":"142", "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", "doc_type":"usermanual2", "kw":"Logging In to a Node,Nodes,User Guide", @@ -2512,7 +2565,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Logging In to a Node", @@ -2522,7 +2575,7 @@ "uri":"cce_10_0672.html", "node_id":"cce_10_0672.xml", "product_code":"cce", - "code":"140", + "code":"143", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -2530,7 +2583,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Management Nodes", @@ -2540,7 +2593,7 @@ "uri":"cce_10_0004.html", "node_id":"cce_10_0004.xml", "product_code":"cce", - "code":"141", + "code":"144", "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", "doc_type":"usermanual2", "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Management Nodes,User Guide", @@ -2548,7 +2601,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Node Labels", @@ -2558,7 +2611,7 @@ "uri":"cce_10_0352.html", "node_id":"cce_10_0352.xml", "product_code":"cce", - "code":"142", + "code":"145", "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.On the CCE console, you can also batch manage nodes' taints.Enter the k", "doc_type":"usermanual2", "kw":"NoSchedule,PreferNoSchedule,NoExecute,System Taints,Managing Node Taints,Management Nodes,User Guide", @@ -2566,7 +2619,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Node Taints", @@ -2576,7 +2629,7 @@ "uri":"cce_10_0003.html", "node_id":"cce_10_0003.xml", "product_code":"cce", - "code":"143", + "code":"146", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"reset a node,Resetting a Node,Management Nodes,User Guide", @@ -2584,7 +2637,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Resetting a Node", @@ -2594,7 +2647,7 @@ "uri":"cce_10_0338.html", "node_id":"cce_10_0338.xml", "product_code":"cce", - "code":"144", + "code":"147", "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Management Nodes,User Guide", @@ -2602,7 +2655,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Removing a Node", @@ -2612,7 +2665,7 @@ "uri":"cce_10_0184.html", "node_id":"cce_10_0184.xml", "product_code":"cce", - "code":"145", + "code":"148", "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required. Modifyi", "doc_type":"usermanual2", "kw":"synchronize the ECS,Synchronizing the Data of Cloud Servers,Management Nodes,User Guide", @@ -2620,7 +2673,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Synchronizing the Data of Cloud Servers", @@ -2630,15 +2683,15 @@ "uri":"cce_10_0605.html", "node_id":"cce_10_0605.xml", "product_code":"cce", - "code":"146", - "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Nodal Drainage Rules on the no", + "code":"149", + "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Rules for Draining Nodes on th", "doc_type":"usermanual2", "kw":"nodal drainage,nodal drainage,Draining a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Draining a Node", @@ -2648,15 +2701,15 @@ "uri":"cce_10_0186.html", "node_id":"cce_10_0186.xml", "product_code":"cce", - "code":"147", - "des":"When a node in a CCE cluster is deleted, services running on the node will also be deleted. Exercise caution when performing this operation.VM nodes that are being used b", + "code":"150", + "des":"You can delete a pay-per-use node that is not needed from the node list.Deleting or unsubscribing from a node in a CCE cluster will release the node and services running ", "doc_type":"usermanual2", "kw":"Deleting a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Node", @@ -2666,15 +2719,15 @@ "uri":"cce_10_0036.html", "node_id":"cce_10_0036.xml", "product_code":"cce", - "code":"148", - "des":"After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not resu", + "code":"151", + "des":"When a node in the cluster is stopped, all services on that node will also be stopped, and the node will no longer be available for scheduling. Check if your services wil", "doc_type":"usermanual2", "kw":"Stopping a Node,Management Nodes,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Stopping a Node", @@ -2684,7 +2737,7 @@ "uri":"cce_10_0276.html", "node_id":"cce_10_0276.xml", "product_code":"cce", - "code":"149", + "code":"152", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Management Nodes,User Guide", @@ -2692,7 +2745,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Performing Rolling Upgrade for Nodes", @@ -2702,7 +2755,7 @@ "uri":"cce_10_0704.html", "node_id":"cce_10_0704.xml", "product_code":"cce", - "code":"150", + "code":"153", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node O&M", @@ -2710,7 +2763,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node O&M", @@ -2720,7 +2773,7 @@ "uri":"cce_10_0178.html", "node_id":"cce_10_0178.xml", "product_code":"cce", - "code":"151", + "code":"154", "des":"Some node resources are used to run mandatory Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node res", "doc_type":"usermanual2", "kw":"total number of node resources,Node Resource Reservation Policy,Node O&M,User Guide", @@ -2728,7 +2781,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Resource Reservation Policy", @@ -2738,15 +2791,15 @@ "uri":"cce_10_0341.html", "node_id":"cce_10_0341.xml", "product_code":"cce", - "code":"152", + "code":"155", "des":"This section describes how to allocate data disk space to nodes so that you can configure the data disk space accordingly.When creating a node, configure data disks for t", "doc_type":"usermanual2", - "kw":"data disk space allocation,Container engine and container image space,basesize,basesize,Container St", + "kw":"data disk space allocation,Container engine and container image space,container engine and container", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Data Disk Space Allocation", @@ -2756,25 +2809,43 @@ "uri":"cce_10_0348.html", "node_id":"cce_10_0348.xml", "product_code":"cce", - "code":"153", - "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:For a cluster using the container tunnel network model, the value depends", + "code":"156", + "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:When creating a cluster using a VPC network, you need to configure the nu", "doc_type":"usermanual2", - "kw":"Maximum Number of Pods on a Node,alpha.cce/fixPoolMask,maximum number of pods,Maximum Number of Pods", + "kw":"Maximum Number of Pods on a Node,maximum number of pods,Maximum Number of Pods That Can Be Created o", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Maximum Number of Pods That Can Be Created on a Node", "githuburl":"" }, + { + "uri":"cce_10_0883.html", + "node_id":"cce_10_0883.xml", + "product_code":"cce", + "code":"157", + "des":"To maintain the stability of nodes, CCE stores Kubernetes and container runtime components on separate data disks. Kubernetes uses the /mnt/paas/kubernetes directory, and", + "doc_type":"usermanual2", + "kw":"Differences Between CCE Node mountPath Configurations and Community Native Configurations,Node O&M,U", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Differences Between CCE Node mountPath Configurations and Community Native Configurations", + "githuburl":"" + }, { "uri":"cce_10_0601.html", "node_id":"cce_10_0601.xml", "product_code":"cce", - "code":"154", + "code":"158", "des":"Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE is going to stop the support for Docker. Change the node container engine from Do", "doc_type":"usermanual2", "kw":"Migrating Nodes from Docker to containerd,Node O&M,User Guide", @@ -2782,7 +2853,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Migrating Nodes from Docker to containerd", @@ -2792,25 +2863,25 @@ "uri":"cce_10_0659.html", "node_id":"cce_10_0659.xml", "product_code":"cce", - "code":"155", + "code":"159", "des":"The node fault detection function depends on the NPD add-on. The add-on instances run on nodes and monitor nodes. This section describes how to enable node fault detectio", "doc_type":"usermanual2", - "kw":"Node Fault Detection,Check Items,Node Fault Detection Policy,Node O&M,User Guide", + "kw":"Node Fault Detection,Check Items,Configuring Node Fault Detection Policies,Node O&M,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Node Fault Detection Policy", + "title":"Configuring Node Fault Detection Policies", "githuburl":"" }, { "uri":"cce_10_0035.html", "node_id":"cce_10_0035.xml", "product_code":"cce", - "code":"156", + "code":"160", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -2818,7 +2889,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Pools", @@ -2828,7 +2899,7 @@ "uri":"cce_10_0081.html", "node_id":"cce_10_0081.xml", "product_code":"cce", - "code":"157", + "code":"161", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"DefaultPool,DefaultPool,Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,", @@ -2836,7 +2907,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Node Pool Overview", @@ -2846,25 +2917,43 @@ "uri":"cce_10_0012.html", "node_id":"cce_10_0012.xml", "product_code":"cce", - "code":"158", - "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.The Autoscaler a", + "code":"162", + "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.Basic SettingsCo", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Node Pool", "githuburl":"" }, + { + "uri":"cce_10_0658.html", + "node_id":"cce_10_0658.xml", + "product_code":"cce", + "code":"163", + "des":"You can specify a specification in a node pool for scaling.The default node pool does not support scaling. Use Creating a Node to add a node.Number of Scaling Targets: Th", + "doc_type":"usermanual2", + "kw":"Scaling a Node Pool,Node Pools,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Scaling a Node Pool", + "githuburl":"" + }, { "uri":"cce_10_0222.html", "node_id":"cce_10_0222.xml", "product_code":"cce", - "code":"159", + "code":"164", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Node Pool", @@ -2872,7 +2961,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing a Node Pool", @@ -2882,15 +2971,15 @@ "uri":"cce_10_0653.html", "node_id":"cce_10_0653.xml", "product_code":"cce", - "code":"160", - "des":"The modification of resource tags of a node pool takes effect only on new nodes. To synchronize the modification onto existing nodes, manually reset the existing nodes.Ch", + "code":"165", + "des":"Changes to the container engine, OS, or pre-/post-installation script in a node pool take effect only on new nodes. To synchronize the modification onto existing nodes, m", "doc_type":"usermanual2", "kw":"Updating a Node Pool,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Updating a Node Pool", @@ -2900,7 +2989,7 @@ "uri":"cce_10_0727.html", "node_id":"cce_10_0727.xml", "product_code":"cce", - "code":"161", + "code":"166", "des":"Auto Scaling (AS) enables elastic scaling of nodes in a node pool based on scaling policies. Without this function, you have to manually adjust the number of nodes in a n", "doc_type":"usermanual2", "kw":"Updating an AS Configuration,Managing a Node Pool,User Guide", @@ -2908,7 +2997,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Updating an AS Configuration", @@ -2918,25 +3007,43 @@ "uri":"cce_10_0652.html", "node_id":"cce_10_0652.xml", "product_code":"cce", - "code":"162", - "des":"The default node pool DefaultPool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components", + "code":"167", + "des":"The default node pool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components in a cluste", "doc_type":"usermanual2", - "kw":"Configuring a Node Pool,Managing a Node Pool,User Guide", + "kw":"Modifying Node Pool Configurations,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Configuring a Node Pool", + "title":"Modifying Node Pool Configurations", + "githuburl":"" + }, + { + "uri":"cce_10_0886.html", + "node_id":"cce_10_0886.xml", + "product_code":"cce", + "code":"168", + "des":"If you want to add a newly created ECS to a node pool in a cluster, or remove a node from a node pool and add it to the node pool again, accept the node.When an ECS is ac", + "doc_type":"usermanual2", + "kw":"Accepting Nodes in a Node Pool,Managing a Node Pool,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Accepting Nodes in a Node Pool", "githuburl":"" }, { "uri":"cce_10_0655.html", "node_id":"cce_10_0655.xml", "product_code":"cce", - "code":"163", + "code":"169", "des":"You can copy the configuration of an existing node pool on the CCE console to create new node pools.", "doc_type":"usermanual2", "kw":"Copying a Node Pool,Managing a Node Pool,User Guide", @@ -2944,7 +3051,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Copying a Node Pool", @@ -2954,7 +3061,7 @@ "uri":"cce_10_0654.html", "node_id":"cce_10_0654.xml", "product_code":"cce", - "code":"164", + "code":"170", "des":"After the configuration of a node pool is updated, some configurations cannot be automatically synchronized for existing nodes. You can manually synchronize configuration", "doc_type":"usermanual2", "kw":"Synchronizing Node Pools,Managing a Node Pool,User Guide", @@ -2962,7 +3069,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Synchronizing Node Pools", @@ -2972,15 +3079,15 @@ "uri":"cce_10_0660.html", "node_id":"cce_10_0660.xml", "product_code":"cce", - "code":"165", - "des":"When CCE releases a new OS image, existing nodes cannot be automatically upgraded. You can manually upgrade them in batches.This section describes how to upgrade an OS by", + "code":"171", + "des":"After CCE releases a new OS image, if existing nodes cannot be automatically upgraded, you can manually upgrade them in batches.This section describes how to upgrade an O", "doc_type":"usermanual2", "kw":"Upgrading an OS,Managing a Node Pool,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Upgrading an OS", @@ -2990,7 +3097,7 @@ "uri":"cce_10_0656.html", "node_id":"cce_10_0656.xml", "product_code":"cce", - "code":"166", + "code":"172", "des":"Nodes in a node pool can be migrated to the default node pool. Nodes in the default node pool or a custom node pool cannot be migrated to other custom node pools.The migr", "doc_type":"usermanual2", "kw":"Migrating a Node,Managing a Node Pool,User Guide", @@ -2998,7 +3105,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Migrating a Node", @@ -3008,7 +3115,7 @@ "uri":"cce_10_0657.html", "node_id":"cce_10_0657.xml", "product_code":"cce", - "code":"167", + "code":"173", "des":"Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.Deleting a node pool will de", "doc_type":"usermanual2", "kw":"Deleting a Node Pool,Managing a Node Pool,User Guide", @@ -3016,7 +3123,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Deleting a Node Pool", @@ -3026,7 +3133,7 @@ "uri":"cce_10_0046.html", "node_id":"cce_10_0046.xml", "product_code":"cce", - "code":"168", + "code":"174", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs", @@ -3034,7 +3141,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Workloads", @@ -3044,7 +3151,7 @@ "uri":"cce_10_0006.html", "node_id":"cce_10_0006.xml", "product_code":"cce", - "code":"169", + "code":"175", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs,Overview,Workloads,User Guide", @@ -3052,7 +3159,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Overview", @@ -3062,7 +3169,7 @@ "uri":"cce_10_0673.html", "node_id":"cce_10_0673.xml", "product_code":"cce", - "code":"170", + "code":"176", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Workload", @@ -3070,7 +3177,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Workload", @@ -3080,7 +3187,7 @@ "uri":"cce_10_0047.html", "node_id":"cce_10_0047.xml", "product_code":"cce", - "code":"171", + "code":"177", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a Deployment,Creating a Workload,User Guide", @@ -3088,7 +3195,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Deployment", @@ -3098,7 +3205,7 @@ "uri":"cce_10_0048.html", "node_id":"cce_10_0048.xml", "product_code":"cce", - "code":"172", + "code":"178", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", "kw":"Using kubectl,Creating a StatefulSet,Creating a Workload,User Guide", @@ -3106,7 +3213,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a StatefulSet", @@ -3116,7 +3223,7 @@ "uri":"cce_10_0216.html", "node_id":"cce_10_0216.xml", "product_code":"cce", - "code":"173", + "code":"179", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a DaemonSet,Creating a Workload,User Guide", @@ -3124,7 +3231,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a DaemonSet", @@ -3134,7 +3241,7 @@ "uri":"cce_10_0150.html", "node_id":"cce_10_0150.xml", "product_code":"cce", - "code":"174", + "code":"180", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", "kw":"Creating a Job,Creating a Workload,User Guide", @@ -3142,7 +3249,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Job", @@ -3152,7 +3259,7 @@ "uri":"cce_10_0151.html", "node_id":"cce_10_0151.xml", "product_code":"cce", - "code":"175", + "code":"181", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", "kw":"time synchronization,Creating a Cron Job,Creating a Workload,User Guide", @@ -3160,7 +3267,7 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Creating a Cron Job", @@ -3170,33 +3277,51 @@ "uri":"cce_10_0130.html", "node_id":"cce_10_0130.xml", "product_code":"cce", - "code":"176", + "code":"182", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", - "kw":"Configuring a Container", + "kw":"Configuring a Workload", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Configuring a Container", + "title":"Configuring a Workload", + "githuburl":"" + }, + { + "uri":"cce_10_0463.html", + "node_id":"cce_10_0463.xml", + "product_code":"cce", + "code":"183", + "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", + "doc_type":"usermanual2", + "kw":"Secure Runtime and Common Runtime,Configuring a Workload,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"Secure Runtime and Common Runtime", "githuburl":"" }, { "uri":"cce_10_0354.html", "node_id":"cce_10_0354.xml", "product_code":"cce", - "code":"177", + "code":"184", "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", - "kw":"Configuring Time Zone Synchronization,Configuring a Container,User Guide", + "kw":"Configuring Time Zone Synchronization,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Time Zone Synchronization", @@ -3206,15 +3331,15 @@ "uri":"cce_10_0353.html", "node_id":"cce_10_0353.xml", "product_code":"cce", - "code":"178", + "code":"185", "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", - "kw":"Configuring an Image Pull Policy,Configuring a Container,User Guide", + "kw":"Configuring an Image Pull Policy,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring an Image Pull Policy", @@ -3224,15 +3349,15 @@ "uri":"cce_10_0009.html", "node_id":"cce_10_0009.xml", "product_code":"cce", - "code":"179", + "code":"186", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", - "kw":"Using Third-Party Images,Configuring a Container,User Guide", + "kw":"Using Third-Party Images,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Using Third-Party Images", @@ -3242,15 +3367,15 @@ "uri":"cce_10_0163.html", "node_id":"cce_10_0163.xml", "product_code":"cce", - "code":"180", + "code":"187", "des":"CCE allows you to set resource requirements and limits, such as CPU and RAM, for added containers during workload creation. Kubernetes also allows using YAML to set requi", "doc_type":"usermanual2", - "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Container,User Guide", + "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Specifications", @@ -3260,15 +3385,15 @@ "uri":"cce_10_0105.html", "node_id":"cce_10_0105.xml", "product_code":"cce", - "code":"181", + "code":"188", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", - "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Contain", + "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Workloa", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Lifecycle Parameters", @@ -3278,15 +3403,15 @@ "uri":"cce_10_0112.html", "node_id":"cce_10_0112.xml", "product_code":"cce", - "code":"182", + "code":"189", "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", - "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Container,Us", + "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Workload,Use", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Container Health Check", @@ -3296,15 +3421,15 @@ "uri":"cce_10_0113.html", "node_id":"cce_10_0113.xml", "product_code":"cce", - "code":"183", + "code":"190", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", - "kw":"Configuring Environment Variables,Configuring a Container,User Guide", + "kw":"Configuring Environment Variables,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Configuring Environment Variables", @@ -3314,33 +3439,33 @@ "uri":"cce_10_0397.html", "node_id":"cce_10_0397.xml", "product_code":"cce", - "code":"184", + "code":"191", "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", - "kw":"Workload Upgrade Policies,Configuring a Container,User Guide", + "kw":"Configuring Workload Upgrade Policies,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Workload Upgrade Policies", + "title":"Configuring Workload Upgrade Policies", "githuburl":"" }, { "uri":"cce_10_0232.html", "node_id":"cce_10_0232.xml", "product_code":"cce", - "code":"185", + "code":"192", "des":"Kubernetes supports node affinity and pod affinity/anti-affinity. You can configure custom rules to achieve affinity and anti-affinity scheduling. For example, you can de", "doc_type":"usermanual2", - "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Container,User Guide", + "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Scheduling Policies (Affinity/Anti-affinity)", @@ -3350,79 +3475,79 @@ "uri":"cce_10_0728.html", "node_id":"cce_10_0728.xml", "product_code":"cce", - "code":"186", + "code":"193", "des":"Tolerations allow the scheduler to schedule pods to nodes with target taints. Tolerances work with node taints. Each node allows one or more taints. If no tolerance is co", "doc_type":"usermanual2", - "kw":"Taints and Tolerations,Configuring a Container,User Guide", + "kw":"Configuring Tolerance Policies,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Taints and Tolerations", + "title":"Configuring Tolerance Policies", "githuburl":"" }, { "uri":"cce_10_0386.html", "node_id":"cce_10_0386.xml", "product_code":"cce", - "code":"187", + "code":"194", "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "doc_type":"usermanual2", - "kw":"Labels and Annotations,Configuring a Container,User Guide", + "kw":"Configuring Labels and Annotations,Configuring a Workload,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Labels and Annotations", + "title":"Configuring Labels and Annotations", "githuburl":"" }, { "uri":"cce_10_00356.html", "node_id":"cce_10_00356.xml", "product_code":"cce", - "code":"188", + "code":"195", "des":"If you encounter unexpected problems when using a container, you can log in to the container to debug it.The example output is as follows:NAME ", "doc_type":"usermanual2", - "kw":"Accessing a Container,Workloads,User Guide", + "kw":"Logging In to a Container,Workloads,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Accessing a Container", + "title":"Logging In to a Container", "githuburl":"" }, { "uri":"cce_10_0007.html", "node_id":"cce_10_0007.xml", "product_code":"cce", - "code":"189", + "code":"196", "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", - "kw":"Managing Workloads and Jobs,Workloads,User Guide", + "kw":"Managing Workloads,Workloads,User Guide", "search_title":"", "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], - "title":"Managing Workloads and Jobs", + "title":"Managing Workloads", "githuburl":"" }, { "uri":"cce_10_0833.html", "node_id":"cce_10_0833.xml", "product_code":"cce", - "code":"190", + "code":"197", "des":"Custom Resource Definition (CRD) is an extension of Kubernetes APIs. When default Kubernetes resources cannot meet service requirements, you can use CRDs to define new re", "doc_type":"usermanual2", "kw":"Managing Custom Resources,Workloads,User Guide", @@ -3430,3023 +3555,17 @@ "metedata":[ { "prodname":"cce", - "documenttype":"usermanual2" + "documenttype":"usermanual" } ], "title":"Managing Custom Resources", "githuburl":"" }, - { - "uri":"cce_10_0463.html", - "node_id":"cce_10_0463.xml", - "product_code":"cce", - "code":"191", - "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", - "doc_type":"usermanual2", - "kw":"Kata Runtime and Common Runtime,Workloads,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Kata Runtime and Common Runtime", - "githuburl":"" - }, - { - "uri":"cce_10_0674.html", - "node_id":"cce_10_0674.xml", - "product_code":"cce", - "code":"192", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0702.html", - "node_id":"cce_10_0702.xml", - "product_code":"cce", - "code":"193", - "des":"CCE supports different types of resource scheduling and task scheduling, improving application performance and overall cluster resource utilization. This section describe", - "doc_type":"usermanual2", - "kw":"Overview,Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0551.html", - "node_id":"cce_10_0551.xml", - "product_code":"cce", - "code":"194", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"CPU Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CPU Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0351.html", - "node_id":"cce_10_0351.xml", - "product_code":"cce", - "code":"195", - "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether t", - "doc_type":"usermanual2", - "kw":"CPU Policy,CPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"CPU Policy", - "githuburl":"" - }, - { - "uri":"cce_10_0552.html", - "node_id":"cce_10_0552.xml", - "product_code":"cce", - "code":"196", - "des":"Kubernetes provides two CPU policies: none and static.none: The CPU policy is disabled by default, indicating the existing scheduling behavior.static: The static CPU core", - "doc_type":"usermanual2", - "kw":"Enhanced CPU Policy,CPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Enhanced CPU Policy", - "githuburl":"" - }, - { - "uri":"cce_10_0720.html", - "node_id":"cce_10_0720.xml", - "product_code":"cce", - "code":"197", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"GPU Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"GPU Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0345.html", - "node_id":"cce_10_0345.xml", - "product_code":"cce", - "code":"198", - "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The gpu-device-plugin (previously gpu-beta add-on) has been installed. Du", - "doc_type":"usermanual2", - "kw":"Default GPU Scheduling in Kubernetes,GPU Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Default GPU Scheduling in Kubernetes", - "githuburl":"" - }, - { - "uri":"cce_10_0423.html", - "node_id":"cce_10_0423.xml", - "product_code":"cce", - "code":"199", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Volcano Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Volcano Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0721.html", - "node_id":"cce_10_0721.xml", - "product_code":"cce", - "code":"200", - "des":"Volcano is a Kubernetes-based batch processing platform that supports machine learning, deep learning, bioinformatics, genomics, and other big data applications. It provi", - "doc_type":"usermanual2", - "kw":"Overview,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0722.html", - "node_id":"cce_10_0722.xml", - "product_code":"cce", - "code":"201", - "des":"Volcano is a Kubernetes-based batch processing platform with high-performance general computing capabilities like task scheduling engine, heterogeneous chip management, a", - "doc_type":"usermanual2", - "kw":"Scheduling Workloads,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Scheduling Workloads", - "githuburl":"" - }, - { - "uri":"cce_10_0768.html", - "node_id":"cce_10_0768.xml", - "product_code":"cce", - "code":"202", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Resource Usage-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Resource Usage-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0773.html", - "node_id":"cce_10_0773.xml", - "product_code":"cce", - "code":"203", - "des":"Bin packing is an optimization algorithm that aims to properly allocate resources to each job and get the jobs done using the minimum amount of resources. After bin packi", - "doc_type":"usermanual2", - "kw":"Bin Packing,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Bin Packing", - "githuburl":"" - }, - { - "uri":"cce_10_0766.html", - "node_id":"cce_10_0766.xml", - "product_code":"cce", - "code":"204", - "des":"Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano Scheduler. The scheduler uses a ", - "doc_type":"usermanual2", - "kw":"Descheduling,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Descheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0767.html", - "node_id":"cce_10_0767.xml", - "product_code":"cce", - "code":"205", - "des":"In scenarios such as node pool replacement and rolling node upgrade, an old resource pool needs to be replaced with a new one. To prevent the node pool replacement from a", - "doc_type":"usermanual2", - "kw":"Node Pool Affinity,Resource Usage-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Node Pool Affinity", - "githuburl":"" - }, - { - "uri":"cce_10_0774.html", - "node_id":"cce_10_0774.xml", - "product_code":"cce", - "code":"206", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Priority-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Priority-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0775.html", - "node_id":"cce_10_0775.xml", - "product_code":"cce", - "code":"207", - "des":"A pod priority indicates the importance of a pod relative to other pods. Volcano supports pod PriorityClasses in Kubernetes. After PriorityClasses are configured, the sch", - "doc_type":"usermanual2", - "kw":"Priority-based Scheduling,Priority-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Priority-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0776.html", - "node_id":"cce_10_0776.xml", - "product_code":"cce", - "code":"208", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"AI Performance-based Scheduling", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"AI Performance-based Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0777.html", - "node_id":"cce_10_0777.xml", - "product_code":"cce", - "code":"209", - "des":"Dominant Resource Fairness (DRF) is a scheduling algorithm based on the dominant resource of a container group. DRF scheduling can be used to enhance the service throughp", - "doc_type":"usermanual2", - "kw":"DRF,AI Performance-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"DRF", - "githuburl":"" - }, - { - "uri":"cce_10_0778.html", - "node_id":"cce_10_0778.xml", - "product_code":"cce", - "code":"210", - "des":"Gang scheduling is a scheduling algorithm that schedules correlated processes or threads to run simultaneously on different processors. It meets the scheduling requiremen", - "doc_type":"usermanual2", - "kw":"Gang,AI Performance-based Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Gang", - "githuburl":"" - }, - { - "uri":"cce_10_0425.html", - "node_id":"cce_10_0425.xml", - "product_code":"cce", - "code":"211", - "des":"When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduli", - "doc_type":"usermanual2", - "kw":"NUMA Affinity Scheduling,Volcano Scheduling,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"NUMA Affinity Scheduling", - "githuburl":"" - }, - { - "uri":"cce_10_0709.html", - "node_id":"cce_10_0709.xml", - "product_code":"cce", - "code":"212", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Cloud Native Hybrid Deployment", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cloud Native Hybrid Deployment", - "githuburl":"" - }, - { - "uri":"cce_10_0384.html", - "node_id":"cce_10_0384.xml", - "product_code":"cce", - "code":"213", - "des":"Many services see surges in traffic. To ensure performance and stability, resources are often requested at the maximum needed. However, the surges may ebb very shortly an", - "doc_type":"usermanual2", - "kw":"Dynamic Resource Oversubscription,Cloud Native Hybrid Deployment,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Dynamic Resource Oversubscription", - "githuburl":"" - }, - { - "uri":"cce_10_0020.html", - "node_id":"cce_10_0020.xml", - "product_code":"cce", - "code":"214", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Network", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Network", - "githuburl":"" - }, - { - "uri":"cce_10_0010.html", - "node_id":"cce_10_0010.xml", - "product_code":"cce", - "code":"215", - "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", - "doc_type":"usermanual2", - "kw":"Overview,Network,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0280.html", - "node_id":"cce_10_0280.xml", - "product_code":"cce", - "code":"216", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Container Network Models", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Container Network Models", - "githuburl":"" - }, - { - "uri":"cce_10_0281.html", - "node_id":"cce_10_0281.xml", - "product_code":"cce", - "code":"217", - "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Tun", - "doc_type":"usermanual2", - "kw":"Overview,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0282.html", - "node_id":"cce_10_0282.xml", - "product_code":"cce", - "code":"218", - "des":"The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet pac", - "doc_type":"usermanual2", - "kw":"Container Tunnel Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Container Tunnel Network", - "githuburl":"" - }, - { - "uri":"cce_10_0283.html", - "node_id":"cce_10_0283.xml", - "product_code":"cce", - "code":"219", - "des":"The VPC network uses VPC routing to integrate with the underlying network. This network model is suitable for performance-intensive scenarios. The maximum number of nodes", - "doc_type":"usermanual2", - "kw":"VPC Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"VPC Network", - "githuburl":"" - }, - { - "uri":"cce_10_0284.html", - "node_id":"cce_10_0284.xml", - "product_code":"cce", - "code":"220", - "des":"Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and sub-ENIs of Virtual Private Cloud (VPC). Container IP addresses are all", - "doc_type":"usermanual2", - "kw":"Cloud Native 2.0 Network,Container Network Models,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Cloud Native 2.0 Network", - "githuburl":"" - }, - { - "uri":"cce_10_0247.html", - "node_id":"cce_10_0247.xml", - "product_code":"cce", - "code":"221", - "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", - "doc_type":"usermanual2", - "kw":"Service", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Service", - "githuburl":"" - }, - { - "uri":"cce_10_0249.html", - "node_id":"cce_10_0249.xml", - "product_code":"cce", - "code":"222", - "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", - "doc_type":"usermanual2", - "kw":"Overview,Service,User Guide", - "search_title":"", - "metedata":[ - { - "prodname":"cce", - "documenttype":"usermanual2" - } - ], - "title":"Overview", - "githuburl":"" - }, - { - "uri":"cce_10_0011.html", - "node_id":"cce_10_0011.xml", - "product_code":"cce", - "code":"223", - "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is -

2024-05-30

+

2024-08-30

+ +

Update:

+ + + +

2024-08-15

+ +

Add:

+ + + +

2024-08-07

+ +

Add:

+ +

Update:

+ + + +

2024-06-26

+ + + + +

2024-05-30

  • Deleted section "OS Patch Notes for Cluster Nodes".
  • Added Node OS.
  • Describes how to obtain the value of the available_zone, l4_flavor_name and l7_flavor_name.
diff --git a/docs/cce/umn/cce_10_0003.html b/docs/cce/umn/cce_10_0003.html index a39d3d97..cf00e839 100644 --- a/docs/cce/umn/cce_10_0003.html +++ b/docs/cce/umn/cce_10_0003.html @@ -4,40 +4,38 @@

Scenario

You can reset a node to modify the node configuration, such as the node OS and login mode.

Resetting a node will reinstall the node OS and the Kubernetes software on the node. If a node is unavailable because you modify the node configuration, you can reset the node to rectify the fault.

-

Constraints

  • For CCE standard clusters and CCE Turbo clusters to support node resetting, the version must be v1.13 or later.
+

Notes and Constraints

  • For CCE standard clusters and CCE Turbo clusters to support node resetting, the version must be v1.13 or later.
-

Precautions

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • After a node is reset, the node OS will be reinstalled. Before resetting a node, drain the node to gracefully evict the pods running on the node to other available nodes. Perform this operation during off-peak hours.
  • After a node is reset, its system disk and data disks will be cleared. Back up important data before resetting a node.
  • After a worker node with an extra data disk attached is reset, the attachment will be cleared. In this case, attach the disk again and data will be retained.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Resetting a node will clear the Kubernetes labels and taints you added (those added by editing a node pool will not be lost). As a result, node-specific resources (such as local storage and workloads scheduled to this node) may be unavailable.
  • Resetting a node will cause PVC/PV data loss for the local PV associated with the node. These PVCs and PVs cannot be restored or used again. In this scenario, the pod that uses the local PV is evicted from the reset node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
+

Precautions

  • Only worker nodes can be reset. If the node is still unavailable after the resetting, delete the node and create a new one.
  • After a node is reset, the node OS will be reinstalled. Before resetting a node, drain the node to gracefully evict the pods running on the node to other available nodes. Perform this operation during off-peak hours.
  • After a node is reset, its system disk and data disks will be cleared. Back up important data before resetting a node.
  • After a worker node with an extra data disk attached is reset on the ECS console, the attachment will be cleared. In this case, attach the disk again and data will be retained.
  • The IP addresses of the workload pods on the node will change, but the container network access is not affected.
  • There is remaining EVS disk quota.
  • While the node is being deleted, the backend will set the node to the unschedulable state.
  • Resetting a node will clear the Kubernetes labels and taints you added (those added by editing a node pool will not be lost). As a result, node-specific resources (such as local storage and workloads scheduled to this node) may be unavailable.
  • Resetting a node will cause PVC/PV data loss for the local PV associated with the node. These PVCs and PVs cannot be restored or used again. In this scenario, the pod that uses the local PV is evicted from the reset node. A new pod is created and stays in the pending state. This is because the PVC used by the pod has a node label, due to which the pod cannot be scheduled. After the node is reset, the pod may be scheduled to the reset node. In this case, the pod remains in the creating state because the underlying logical volume corresponding to the PVC does not exist.
-

Procedure

You can batch reset nodes using private images.

-
  1. Log in to the CCE console and click the cluster name to access the cluster console.
  2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
  3. In the node list, select one or more nodes to be reset and choose More > Reset Node in the Operation column.
  4. In the displayed dialog box, click Next.

    • For nodes in the DefaultPool node pool, the parameter setting page is displayed. Set the parameters by referring to 5.
    • For a node you create in a node pool, resetting the node does not support parameter configuration. You can directly use the configuration image of the node pool to reset the node.
    -

  5. Specify node parameters.

    Compute Settings -
    Table 1 Configuration parameters

    Parameter

    +

    Resetting Nodes in the Default Pool

    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
    3. In the node list of the default pool, select one or more nodes to be reset and choose More > Reset Node in the Operation column.
    4. In the displayed dialog box, click Next.
    5. Specify node parameters.

      Compute Settings +
      - - - - - - - - - @@ -48,20 +46,20 @@

      Storage Settings

      Configure storage resources on a node for the containers running on it. -
      Table 1 Configuration parameters

      Parameter

      Description

      +

      Description

      Specifications

      +

      Specifications

      Specifications cannot be modified when you reset a node.

      +

      Specifications cannot be modified when you reset a node.

      Container Engine

      +

      Container Engine

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      +

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      OS

      +

      OS

      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      +
      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      NOTE:

      Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

      Login Mode

      +

      Login Mode

      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        +
      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      Table 2 Configuration parameters

      Parameter

      +
      - - - - - @@ -71,48 +69,48 @@
      Advanced Settings -
      Table 2 Storage configuration parameters

      Parameter

      Description

      +

      Description

      System Disk

      +

      System Disk

      Directly use the system disk of the cloud server.

      +

      Directly use the system disk of the cloud server.

      Data Disk

      +

      Data Disk

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      +

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      Click Expand to configure Data Disk Space Allocation, which is used to allocate space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.

      For other data disks, a raw disk is created without any processing by default. You can also click Expand and select Mount Disk to mount the data disk to a specified directory.

      + + + - - -
      Table 3 Advanced configuration parameters

      Parameter

      +
      - - - - - - - - - - - - - @@ -121,7 +119,46 @@

    6. Click Next: Confirm.
    7. Click Submit.
    8. -

      + +

      Resetting Nodes in a Node Pool

      Parameter configurations are not supported when resetting a node you created in a node pool. The image configured for the node pool is used to reset the node.

      +
      +
      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
      3. In the node list of the target node pool, select a node to be reset and choose More > Reset Node in the Operation column.
      4. In the displayed dialog box, click Yes.
      +
      +

      Resetting Nodes in a Batch

      Resetting nodes in a batch varies depending on application scenarios.

      + +
      Table 3 Advanced configuration parameters

      Parameter

      Description

      +

      Description

      Resource Tag

      +

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      -

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=node id" tag.

      +

      You can add resource tags to classify resources. A maximum of eight resource tags can be added.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

      Kubernetes Label

      +

      Kubernetes Label

      Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

      -

      Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      +

      Click Add Label to set the key-value pair attached to the Kubernetes objects (such as pods). A maximum of 20 labels can be added.

      +

      Labels can be used to distinguish nodes. With workload affinity settings, pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      Taint

      +

      Taint

      This parameter is left blank by default. You can add taints to configure node anti-affinity. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      +
      This field is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      NOTICE:
      • If taints are used, you must configure tolerations in the YAML files of pods. Otherwise, scale-up may fail or pods cannot be scheduled onto the added nodes.
      • After a node pool is created, you can click Edit to modify its configuration. The modification will be synchronized to all nodes in the node pool.

      Max. Pods

      +

      Max. Pods

      Maximum number of pods that can run on the node, including the default system pods.

      +

      Maximum number of pods that can run on the node, including the default system pods.

      This limit prevents the node from being overloaded with pods.

      Pre-installation Command

      +

      Pre-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

      Post-installation Command

      +

      Post-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed after Kubernetes software is installed, which does not affect the installation.

      + + + + + + + + + + + + + + + + + +

      Scenario

      +

      Supported or Not

      +

      Description

      +

      Resetting nodes in the default pool in a batch

      +

      Supported in some scenarios

      +

      This operation can be performed only if the flavors, AZs, and disk configurations of all nodes are the same.

      +

      Resetting nodes in a node pool in a batch

      +

      Supported in some scenarios

      +

      This operation can be performed only if the disk configurations of all nodes are the same.

      +

      Resetting nodes in different node pools in a batch

      +

      Not supported

      +

      Only the nodes in the same node pool can be reset in a batch.

      +
      +
      +

      diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html index 4a2ed4e0..5ddb441b 100644 --- a/docs/cce/umn/cce_10_0004.html +++ b/docs/cce/umn/cce_10_0004.html @@ -33,6 +33,12 @@

      false indicates that the node is not a bare metal node.

      node.kubernetes.io/container-engine

      +

      Container engine

      +

      Example: docker or containerd

      +

      node.kubernetes.io/instance-type

      Node specifications

      @@ -74,11 +80,6 @@

      Node OS kernel version

      node.kubernetes.io/container-engine

      -

      Container engine used by the node.

      -

      accelerator

      GPU node labels.

      @@ -93,7 +94,7 @@
      -

      Adding or Deleting a Node Label

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
      3. In the displayed dialog box, click Add batch operations under Batch Operation, and then choose Add/Update or Delete.

        Enter the key and value of the label to be added or deleted, and click OK.

        +

        Adding or Deleting a Node Label

        1. Log in to the CCE console and click the cluster name to access the cluster console.
        2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab, select the target node and click Labels and Taints in the upper left corner.
        3. In the displayed dialog box, click Add operation under Batch Operation, and then choose Add/Update or Delete.

          Enter the key and value of the label to be added or deleted, and click OK.

          For example, the key is deploy_qa and the value is true, indicating that the node is used to deploy the QA (test) environment.

        4. After the label is added, check the added label in node data.
        diff --git a/docs/cce/umn/cce_10_0006.html b/docs/cce/umn/cce_10_0006.html index b3d3578e..d55f867c 100644 --- a/docs/cce/umn/cce_10_0006.html +++ b/docs/cce/umn/cce_10_0006.html @@ -25,8 +25,8 @@

        DaemonSets are closely related to nodes. If a node becomes faulty, the DaemonSet will not create the same pods on other nodes.

        Figure 3 DaemonSet
      -

      Overview of Job and CronJob

      Jobs and cron jobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      -
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A cron job runs a job periodically on a specified schedule. A cron job object is similar to a line of a crontab file in Linux.
      +

      Overview of Job and CronJob

      Jobs and CronJobs allow you to run short lived, one-off tasks in batch. They ensure the task pods run to completion.

      +
      • A job is a resource object used by Kubernetes to control batch tasks. Jobs are different from long-term servo tasks (such as Deployments and StatefulSets). The former is started and terminated at specific times, while the latter runs unceasingly unless being terminated. The pods managed by a job will be automatically removed after successfully completing tasks based on user configurations.
      • A CronJob runs a job periodically on a specified schedule. A CronJob object is similar to a line of a crontab file in Linux.

      This run-to-completion feature of jobs is especially suitable for one-off tasks, such as continuous integration (CI).

      Workload Lifecycle

      diff --git a/docs/cce/umn/cce_10_0007.html b/docs/cce/umn/cce_10_0007.html index 22595b6d..15e58c92 100644 --- a/docs/cce/umn/cce_10_0007.html +++ b/docs/cce/umn/cce_10_0007.html @@ -1,6 +1,6 @@ -

      Managing Workloads and Jobs

      +

      Managing Workloads

      Scenario

      After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.
      @@ -25,8 +25,8 @@ - @@ -76,7 +76,7 @@

      Viewing Logs

      You can view logs of Deployments, StatefulSets, DaemonSets, and jobs. This section uses a Deployment as an example to describe how to view logs.

      Before viewing logs, ensure that the time of the browser is the same as that on the backend server.

      -
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and click the View Log of the target workload.

        In the displayed View Log window, you can view logs.

        +
        1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
        2. Click the Deployments tab and click View Log of the target workload.

          In the displayed View Log window, you can view logs.

          The displayed logs are standard output logs of containers and do not have persistence and advanced O&M capabilities. To use more comprehensive log capabilities, see Logs. If the function of collecting standard output is enabled for the workload (enabled by default), you can go to AOM to view more workload logs. For details, see Collecting Container Logs Using ICAgent.

        @@ -88,11 +88,11 @@

    9. Upgrade the workload based on service requirements. The method for setting parameter is the same as that for creating a workload.
    10. After the update is complete, click Upgrade Workload, manually confirm the YAML file, and submit the upgrade.
    11. -

      Editing a YAML file

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and containers on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

      +

      Editing a YAML file

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and pods on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded. This section uses a Deployment as an example to describe how to edit the YAML file.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Edit YAML in the Operation column of the target workload. In the dialog box that is displayed, modify the YAML file.
      3. Click OK.
      4. (Optional) In the Edit YAML window, click Download to download the YAML file.

      Rolling Back a Workload (Available Only for Deployments)

      CCE records the release history of all Deployments. You can roll back a Deployment to a specified version.

      -
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab, choose More > Roll Back in the Operation column of the target workload.
      3. Switch to the Change History tab page, click Roll Back to This Version of the target version, manually confirm the YAML file, and click OK.
      +
      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Roll Back in the Operation column of the target workload.
      3. Switch to the Change History tab page, click Roll Back to This Version of the target version, manually confirm the YAML file, and click OK.

      Redeploying a Workload

      After you redeploy a workload, all pods in the workload will be restarted. This section uses Deployments as an example to illustrate how to redeploy a workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. Click the Deployments tab and choose More > Redeploy in the Operation column of the target workload.
      3. In the dialog box that is displayed, click Yes to redeploy the workload.
      @@ -109,13 +109,13 @@

      -

      Deleting a Workload/Job

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. Exercise caution when you perform this operation. This section uses a Deployment as an example to describe how to delete a workload.

      +

      Deleting a Workload/Job

      You can delete a workload or job that is no longer needed. Deleted workloads or jobs cannot be recovered. This section uses a Deployment as an example to describe how to delete a workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. In the same row as the workload you will delete, choose Operation > More > Delete.

        Read the system prompts carefully. A workload cannot be recovered after it is deleted. Exercise caution when performing this operation.

      3. Click Yes.

        • If the node where the pod is located is unavailable or shut down and the workload cannot be deleted, you can forcibly delete the pod from the pod list on the workload details page.
        • Ensure that the storage volumes to be deleted are not used by other workloads. If these volumes are imported or have snapshots, you can only unbind them.

      -

      Events

      This section uses Deployments as an example to illustrate how to view events of a workload. To view the event of a job or cron jon, click View Event in the Operation column of the target workload.

      +

      Events

      This section uses a Deployment as an example to describe how to view events of a workload. To view the event of a job or CronJob, click View Event in the Operation column of the target workload.

      1. Log in to the CCE console, go to an existing cluster, and choose Workloads in the navigation pane.
      2. On the Deployments tab page, click the target workload. In the Pods tab page, click the View Events to view the event name, event type, number of occurrences, Kubernetes event, first occurrence time, and last occurrence time.

        Event data will be retained for one hour and then automatically deleted.

      diff --git a/docs/cce/umn/cce_10_0009.html b/docs/cce/umn/cce_10_0009.html index 99b00079..6caddac7 100644 --- a/docs/cce/umn/cce_10_0009.html +++ b/docs/cce/umn/cce_10_0009.html @@ -10,7 +10,7 @@

      Enter the username and password used to access the third-party image repository.

    12. When creating a workload, enter a private image path in the format of domainname/namespace/imagename:tag in Image Name and select the key created in 1.
    13. Set other parameters and click Create Workload.
    14. -

      Using kubectl

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Use kubectl to create a secret of the kubernetes.io/dockerconfigjson.

        kubectl create secret docker-registry myregistrykey  -n default --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
        +

        Using kubectl

        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Use kubectl to create a secret of the kubernetes.io/dockerconfigjson.

          kubectl create secret docker-registry myregistrykey  -n default --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL

          In the preceding command, myregistrykey indicates the key name, default indicates the namespace where the key is located, and other parameters are as follows:

          • DOCKER_REGISTRY_SERVER: address of a third-party image repository, for example, www.3rdregistry.com or 10.10.10.10:443
          • DOCKER_USER: account used for logging in to a third-party image repository
          • DOCKER_PASSWORD: password used for logging in to a third-party image repository
          • DOCKER_EMAIL: email of a third-party image repository

        3. Use a third-party image to create a workload.

          A kubernetes.io/dockerconfigjson secret is used for authentication when you obtain a private image. The following is an example of using the myregistrykey for authentication.
          apiVersion: v1
          @@ -30,7 +30,7 @@ spec:
           
          diff --git a/docs/cce/umn/cce_10_0010.html b/docs/cce/umn/cce_10_0010.html index dbbe0ae1..8fe2b3c3 100644 --- a/docs/cce/umn/cce_10_0010.html +++ b/docs/cce/umn/cce_10_0010.html @@ -4,11 +4,11 @@

          You can learn about a cluster network from the following two aspects:

          • What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are running on the nodes. Nodes and containers need to communicate with each other. For details about the cluster network types and their functions, see Cluster Network Structure.
          • How is pod access implemented in a cluster? Accessing a pod or container is a process of accessing services of a user. Kubernetes provides Service and Ingress to address pod access issues. This section summarizes common network access scenarios. You can select the proper scenario based on site requirements. For details about the network access scenarios, see Access Scenarios.

          Cluster Network Structure

          All nodes in the cluster are located in a VPC and use the VPC network. The container network is managed by dedicated network add-ons.

          -

          +

          • Node Network

            A node network assigns IP addresses to hosts (nodes in the figure above) in a cluster. Select a VPC subnet as the node network of the CCE cluster. The number of available IP addresses in a subnet determines the maximum number of nodes (including master nodes and worker nodes) that can be created in a cluster. This quantity is also affected by the container network. For details, see the container network model.

            -
          • Container Network

            A container network assigns IP addresses to containers in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

            +
          • Container Network

            A container network assigns IP addresses to pods in a cluster. CCE inherits the IP-Per-Pod-Per-Network network model of Kubernetes. That is, each pod has an independent IP address on a network plane and all containers in a pod share the same network namespace. All pods in a cluster exist in a directly connected flat network. They can access each other through their IP addresses without using NAT. Kubernetes only provides a network mechanism for pods, but does not directly configure pod networks. The configuration of pod networks is implemented by specific container network add-ons. The container network add-ons are responsible for configuring networks for pods and managing container IP addresses.

            Currently, CCE supports the following container network models:

            -
            • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
            • VPC network: The VPC network uses VPC routing to integrate with the underlying network. This network model applies to performance-intensive scenarios. The maximum number of nodes allowed in a cluster depends on the route quota in a VPC network. Each node is assigned a CIDR block of a fixed size. This networking model is free from tunnel encapsulation overhead and outperforms the container tunnel network model. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
            • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.
            +
            • Container tunnel network: The container tunnel network is constructed on but independent of the node network through tunnel encapsulation. This network model uses VXLAN to encapsulate Ethernet packets into UDP packets and transmits them in tunnels. Open vSwitch serves as the backend virtual switch.
            • VPC network: The VPC network model seamlessly combines VPC routing with the underlying network, making it ideal for high-performance scenarios. However, the maximum number of nodes allowed in a cluster is determined by the VPC route quota. Each node is assigned a CIDR block of a fixed size. The VPC network model outperforms the container tunnel network model in terms of performance because it does not have tunnel encapsulation overhead. In addition, as VPC routing includes routes to node IP addresses and the container CIDR block, container pods in a cluster can be directly accessed from outside the cluster.
            • Developed by CCE, Cloud Native 2.0 network deeply integrates Elastic Network Interfaces (ENIs) and Sub Network Interfaces (sub-ENIs) of VPC. Container IP addresses are allocated from the VPC CIDR block. ELB passthrough networking is supported to direct access requests to containers. Security groups and EIPs are bound to deliver high performance.

            The performance, networking scale, and application scenarios of a container network vary according to the container network model. For details about the functions and features of different container network models, see Overview.

          • Service Network

            Service is also a Kubernetes object. Each Service has a static IP address. When creating a cluster on CCE, you can specify the Service CIDR block. The Service CIDR block cannot overlap with the node or container CIDR block. The Service CIDR block can be used only within a cluster.

          @@ -27,7 +27,7 @@
          • Intra-cluster access: A ClusterIP Service is used for workloads in the same cluster to access each other.
          • Access from outside a cluster: A Service (NodePort or LoadBalancer type) or an ingress is recommended for a workload outside a cluster to access workloads in the cluster.
            • Access through the public network: An EIP should be bound to the node or load balancer.
            • Access through the private network: The workload can be accessed through the internal IP address of the node or load balancer. If workloads are located in different VPCs, a peering connection is required to enable communication between different VPCs.
          • The workload can access the external network as follows:
            • Accessing an intranet: The workload accesses the intranet address, but the implementation method varies depending on container network models. Ensure that the peer security group allows the access requests from the container CIDR block.
            • Accessing a public network: Assign an EIP to the node where the workload runs (when the VPC network or tunnel network model is used), bind an EIP to the pod IP address (when the Cloud Native Network 2.0 model is used), or configure SNAT rules through the NAT gateway. For details, see Accessing the Internet from a Container.
          -
          Figure 3 Network access diagram
          +
          Figure 3 Network access diagram
          diff --git a/docs/cce/umn/cce_10_0011.html b/docs/cce/umn/cce_10_0011.html index 54cfdbfd..89d12a2e 100644 --- a/docs/cce/umn/cce_10_0011.html +++ b/docs/cce/umn/cce_10_0011.html @@ -4,14 +4,14 @@

          Scenario

          ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.

          The cluster-internal domain name format is <Service name>.<Namespace of the workload>.svc.cluster.local:<Port>, for example, nginx.default.svc.cluster.local:80.

          Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

          -
          Figure 1 Intra-cluster access (ClusterIP)
          +
          Figure 1 Intra-cluster access (ClusterIP)
          -

          Creating a ClusterIP Service

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
          3. Configure intra-cluster access parameters.

            • Service Name: Specify a Service name, which can be the same as the workload name.
            • Service Type: Select ClusterIP.
            • Namespace: Namespace to which the workload belongs.
            • Selector: Add a label and click Confirm. A Service selects a pod based on the added label. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
            • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
            • Port Settings
              • Protocol: protocol used by the Service.
              • Service Port: port used by the Service. The port number ranges from 1 to 65535.
              • Container Port: port on which the workload listens. For example, Nginx uses port 80 by default.
              +

              Creating a ClusterIP Service

              1. Log in to the CCE console and click the cluster name to access the cluster console.
              2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
              3. Configure intra-cluster access parameters.

                • Service Name: Specify a Service name, which can be the same as the workload name.
                • Service Type: Select ClusterIP.
                • Namespace: namespace that the workload belongs to.
                • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
                • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
                • Ports
                  • Protocol: protocol used by the Service.
                  • Service Port: port used by the Service. The port number ranges from 1 to 65535.
                  • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.

              4. Click OK.
              -

              Setting the Access Type Using kubectl

              You can run kubectl commands to set the access type (Service). This section uses an Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

              -
              1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
              2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

                The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

                +

                Setting the Access Type Using kubectl

                You can configure Service access using kubectl. This section uses an Nginx workload as an example to describe how to implement intra-cluster access using kubectl.

                +
                1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
                2. Create and edit the nginx-deployment.yaml and nginx-clusterip-svc.yaml files.

                  The file names are user-defined. nginx-deployment.yaml and nginx-clusterip-svc.yaml are merely example file names.

                  vi nginx-deployment.yaml
                  apiVersion: apps/v1
                   kind: Deployment
                   metadata:
                  @@ -64,7 +64,7 @@ spec:
                   NAME              TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
                   kubernetes        ClusterIP   10.247.0.1     <none>        443/TCP    4d6h
                   nginx-clusterip   ClusterIP   10.247.74.52   <none>        8080/TCP   14m
                  -

                3. Access a Service.

                  A Service can be accessed from containers or nodes in a cluster.

                  +

                4. Access the Service.

                  A Service can be accessed from containers or nodes in a cluster.

                  Create a pod, access the pod, and run the curl command to access IP address:Port or the domain name of the Service, as shown in the following figure.

                  The domain name suffix can be omitted. In the same namespace, you can directly use nginx-clusterip:8080 for access. In other namespaces, you can use nginx-clusterip.default:8080 for access.

                  # kubectl run -i --tty --image nginx:alpine test --rm /bin/sh
                  diff --git a/docs/cce/umn/cce_10_0012.html b/docs/cce/umn/cce_10_0012.html
                  index 59f8bd58..164f3039 100644
                  --- a/docs/cce/umn/cce_10_0012.html
                  +++ b/docs/cce/umn/cce_10_0012.html
                  @@ -3,24 +3,22 @@
                   

                  Creating a Node Pool

                  Scenario

                  This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.

                  -

                  Constraints

                  • The Autoscaler add-on needs to be installed for node auto scaling. For details about the add-on installation and parameter configuration, see CCE Cluster Autoscaler.
                  -
                  -

                  Procedure

                  1. Log in to the CCE console.
                  2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane and click the Node Pools tab on the right.
                  3. In the upper right corner of the page, click Create Node Pool.

                    Basic Settings

                    +

                    Procedure

                    1. Log in to the CCE console.
                    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane. In the right pane, click the Node Pools tab.
                    3. In the upper right corner of the page, click Create Node Pool.

                      Basic Settings

                      -
      Table 1 Workload/Job management

      Operation

      Edit YAML

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and containers on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded.

      -
      NOTE:

      If an existing CronJob is modified, the new configuration takes effect for the new pods, and the existing pod continues to run without any change.

      +

      You can modify and download YAML files of Deployments, StatefulSets, DaemonSets, CronJobs, and pods on the CCE console. YAML files of jobs can only be viewed, copied, and downloaded.

      +
      NOTE:

      If an existing CronJob is modified, the new configuration takes effect for the new pods, and the existing pods continue to run without any change.

      Table 1 Basic settings

      Parameter

      +
      - - - - - @@ -28,49 +26,44 @@

      Configurations

      You can configure the flavor and OS of a cloud server, on which your containerized applications run. -
      Table 1 Basic settings

      Parameter

      Description

      +

      Description

      Node Pool Name

      +

      Node Pool Name

      Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

      +

      Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

      Expected Initial Nodes

      +

      Expected Initial Nodes

      Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

      +

      Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

      Table 2 Node configuration parameters

      Parameter

      +
      - - - - - - - - - - - - - - @@ -81,35 +74,35 @@

      Storage Settings

      Configure storage resources on a node for the containers running on it. Select a disk type and configure its size based on service requirements. -
      Table 2 Node configuration parameters

      Parameter

      Description

      +

      Description

      AZ

      +

      Node Type

      AZ where the node is located. Nodes in a cluster can be created in different AZs for higher reliability. The value cannot be changed after the node is created.

      -

      Select Random to deploy your node in a random AZ based on the selected node flavor.

      -

      An AZ is a physical region where resources use independent power supply and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs.

      -

      Node Type

      -

      Select a node type based on service requirements. Then, you can select a proper flavor from the node flavor list.

      +

      Select a node type based on service requirements. Then, you can select a proper flavor from the node flavor list.

      CCE standard clusters support the following node types:
      • ECS (VM): A virtualized ECS is used as a cluster node.
      CCE Turbo clusters support the following node types:
      • ECS (VM): A virtualized ECS is used as a cluster node. A CCE Turbo cluster supports only the cloud servers that allow multiple ENIs. Select a server type displayed on the CCE console.

      Specifications

      +

      Specifications

      Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.

      +
      Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.
      NOTE:
      • If a node pool is configured with multiple node flavors, only the flavors (which can be located in different AZs) of the same node type are supported. For example, a node pool consisting of general computing-plus nodes supports only general computing-plus node flavors, but not the flavors of general computing nodes.
      • A maximum of 10 node flavors can be added to a node pool (the flavors in different AZs are counted separately). When adding a node flavor, you can choose multiple AZs, but you need to specify them.
      • Nodes in a newly created node pool are created using the default flavor. If the resources for the default flavor are insufficient, node creation will fail.
      • After a node pool is created, the flavors of existing nodes cannot be deleted.
      +
      +

      Container Engine

      +

      Container Engine

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      +

      The container engines supported by CCE include Docker and containerd, which may vary depending on cluster types, cluster versions, and OSs. Select a container engine based on the information displayed on the CCE console. For details, see Mapping between Node OSs and Container Engines.

      OS

      +

      OS

      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      +
      Select an OS type. Different types of nodes support different OSs.
      • Public image: Select a public image for the node.
      • Private image: Select a private image for the node.
      NOTE:

      Service container runtimes share the kernel and underlying calls of nodes. To ensure compatibility, select a Linux distribution version that is the same as or close to that of the final service container image for the node OS.

      Login Mode

      +

      Login Mode

      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        +
      • Key Pair

        Select the key pair used to log in to the node. You can select a shared key.

        A key pair is used for identity authentication when you remotely log in to a node. If no key pair is available, click Create Key Pair.

      Table 3 Configuration parameters

      Parameter

      +
      - - - - - @@ -119,31 +112,31 @@

      Network Settings

      Configure networking resources to allow node and containerized application access. -
      Table 3 Storage configuration parameters

      Parameter

      Description

      +

      Description

      System Disk

      +

      System Disk

      System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

      -
      Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
      • Encryption is not selected by default.
      • After setting System Disk Encryption to Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
      +

      System disk used by the node OS. The value ranges from 40 GiB to 1024 GiB. The default value is 50 GiB.

      +
      Encryption: System disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. Only the nodes of the Elastic Cloud Server (VM) type in certain regions support system disk encryption. For details, see the console.
      • Not encrypted is selected by default.
      • If you select Enabled (key) for System Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
      • If you select Enabled (KMS key ID) for System Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

      Data Disk

      +

      Data Disk

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      +

      At least one data disk is required for the container runtime and kubelet. The data disk cannot be deleted or uninstalled. Otherwise, the node will be unavailable.

      • First data disk: used for container runtime and kubelet components. The value ranges from 20 GiB to 32768 GiB. The default value is 100 GiB.
      • Other data disks: You can set the data disk size to a value ranging from 10 GiB to 32768 GiB. The default value is 100 GiB.
      NOTE:
      • If the node flavor is disk-intensive or ultra-high I/O, one data disk can be a local disk.
      • Local disks may break down and do not ensure data reliability. Store your service data in EVS disks, which are more reliable than local disks.

      Advanced Settings

      -

      Click Expand and configure the following parameters:

      -
      • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
      • Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. This function is available only in certain regions.
        • Encryption is not selected by default.
        • After selecting Encryption, you can select an existing key in the displayed dialog box. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the Encryption text box.
        +

        Expand the area and configure the following parameters:

        +
        • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
        • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. BMS nodes do not support data disk encryption that is available only in certain regions. For details, see the console.
          • Not encrypted is selected by default.
          • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
          • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

        Adding data disks

        -

        A maximum of four data disks can be added. By default, raw disks are created without any processing. You can also click Expand and select any of the following options:

        -
        • Default: By default, a raw disk is created without any processing.
        • Mount Disk: The data disk is attached to a specified directory.
        • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
        • Use as ephemeral volume: applicable when there is a high performance requirement on EmptyDir.
        +

        A maximum of 16 data disks can be attached to an ECS and 10 to a BMS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

        +
        • Default: By default, a raw disk is created without any processing.
        • Mount Disk: The data disk is attached to a specified directory.
        • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
        • Use as ephemeral volume: applicable when there is a high performance requirement on emptyDir.
        NOTE:
        • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
        • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
        -
        Local Persistent Volumes and Local EVs support the following write modes:
        • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
        • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence, allowing data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
        +
        Local PVs and local EVs can be written in the following modes:
        • Linear: A linear logical volume integrates one or more physical volumes. Data is written to the next physical volume when the previous one is used up.
        • Striped: A striped logical volume stripes data into blocks of the same size and stores them in multiple physical volumes in sequence. This allows data to be concurrently read and written. A storage pool consisting of striped volumes cannot be scaled-out. This option can be selected only when multiple volumes exist.
      Table 4 Configuration parameters

      Parameter

      +
      - - - - - - - - -
      Table 4 Configuration parameters

      Parameter

      Description

      +

      Description

      Virtual Private Cloud

      +

      Virtual Private Cloud

      The VPC to which the cluster belongs by default, which cannot be changed.

      +

      The VPC to which the cluster belongs by default, which cannot be changed.

      Node Subnet

      +

      Node Subnet

      The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

      -
      • Multiple subnets: You can select multiple subnets in the same VPC for your node pool. Newly added nodes for a scale-out will preferentially consume the IP addresses of the subnets in the top order.
      • Single subnet: Only one subnet is configured for your node pool. If the IP addresses of a single subnet are insufficient, configure multiple subnets. Otherwise, a node pool scale-out may fail.
      +

      The node subnet selected during cluster creation is used by default. You can choose another subnet instead.

      +
      • Multiple subnets: You can select multiple subnets in the same VPC for nodes. Newly added nodes will preferentially use the IP addresses from the top-ranking subnet.
      • Single subnet: Only one subnet is configured for your node pool. If the IP addresses of a single subnet are insufficient, configure multiple subnets. Otherwise, a node pool scale-out may fail.

      Node IP Address

      +

      Node IP Address

      Random allocation is supported.

      +

      Random allocation is supported.

      Associate Security Group

      +

      Associate Security Group

      Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

      +

      Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

      When a cluster is created, a node security group named {Cluster name}-cce-node-{Random ID} is created and used by default.

      Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group.

      NOTE:

      After a node pool is created, its associated security group cannot be modified.

      @@ -156,68 +149,89 @@

      Advanced Settings

      Configure advanced node capabilities such as labels, taints, and startup command. -
      @@ -188,7 +188,7 @@ spec:
      Hello

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-configmap.yaml and edit it.

        vi nginx-configmap.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          As shown in the following example, after the ConfigMap volume is mounted, a configuration file with the key as the file name and value as the file content is generated in the /etc/config directory of the container.

          apiVersion: apps/v1
           kind: Deployment
          @@ -216,7 +216,7 @@ spec:
                 configMap:
                   name: cce-configmap                 # Name of the referenced ConfigMap.

        3. Create a workload.

          kubectl apply -f nginx-configmap.yaml

          -

        4. After the workload runs properly, the SPECIAL_LEVEL and SPECIAL_TYPE files are generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively.

          1. Run the following command to view the created pod:
            kubectl get pod | grep nginx-configmap
            +

          2. After the workload runs properly, the SPECIAL_LEVEL and SPECIAL_TYPE files will be generated in the /etc/config directory. The contents of the files are Hello and CCE, respectively.

            1. Run the following command to view the created pod:
              kubectl get pod | grep nginx-configmap
              Expected output:
              nginx-configmap-***   1/1     Running   0              2m18s
            2. Run the following command to view the SPECIAL_LEVEL or SPECIAL_TYPE file in the pod:
              kubectl exec nginx-configmap-*** -- cat /etc/config/SPECIAL_LEVEL
              diff --git a/docs/cce/umn/cce_10_0016.html b/docs/cce/umn/cce_10_0016.html index bb7466e2..ef23fe95 100644 --- a/docs/cce/umn/cce_10_0016.html +++ b/docs/cce/umn/cce_10_0016.html @@ -22,12 +22,12 @@ data:
              • Added from secret: Select a secret and import all keys in the secret as environment variables.
              • Added from secret key: Import the value of a key in a secret as the value of an environment variable.
                • Variable Name: name of an environment variable in the workload. The name can be customized and is set to the key name selected in the secret by default.
                • Variable Value/Reference: Select a secret and the key to be imported. The corresponding value is imported as a workload environment variable.

                For example, after you import the value of username in secret mysecret as the value of workload environment variable username, an environment variable named username exists in the container.

              -

            3. Set other workload parameters and click Create Workload.

              After the workload runs properly, log in to the container and run the following statement to check whether the secret has been set as an environment variable of the workload:

              +

            4. Configure other workload parameters and click Create Workload.

              After the workload runs properly, log in to the container and run the following statement to check whether the secret has been set as an environment variable of the workload:

              printenv username

              If the output is the same as the content in the secret, the secret has been set as an environment variable of the workload.

            Using kubectl

            -
            1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
            2. Create a file named nginx-secret.yaml and edit it.

              vi nginx-secret.yaml

              +
              1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
              2. Create a file named nginx-secret.yaml and edit it.

                vi nginx-secret.yaml

                Content of the YAML file:

                • Added from secret: To add all data in a secret to environment variables, use the envFrom parameter. The keys in the secret will become names of environment variables in a workload.
                  apiVersion: apps/v1
                   kind: Deployment
                  @@ -93,7 +93,7 @@ spec:
                   
                   

                  Configuring the Data Volume of a Workload

                  You can mount a secret as a volume to the specified container path. Contents in a secret are user-defined. Before that, create a secret. For details, see Creating a Secret.

                  Using the CCE console

                  -
                  1. Log in to the CCE console and click the cluster name to access the cluster console.
                  2. In the navigation pane on the left, click Workloads. In the right pane, click the Deployments tab. Click Create Workload in the upper right corner.

                    When creating a workload, click Data Storage in the Container Settings area. Click Add Volume and select Secret from the drop-down list.

                    +
                    1. Log in to the CCE console and click the cluster name to access the cluster console.
                    2. Choose Workloads in the navigation pane. In the right pane, click the Deployments tab. Click Create Workload in the upper right corner.

                      When creating a workload, click Data Storage in the Container Settings area. Click Add Volume and select Secret from the drop-down list.

                    3. Select parameters for mounting a secret volume, as shown in Table 1.

      Table 5 Advanced configuration parameters

      Parameter

      +
      - - - - - - - - - + + + + + + - - - - - - - - + + +
      Table 5 Advanced configuration parameters

      Parameter

      Description

      +

      Description

      Resource Tag

      +

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      You can add resource tags to classify resources.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      CCE will automatically create the "CCE-Dynamic-Provisioning-Node=Node ID" tag.

      Kubernetes Label

      +

      Kubernetes Label

      A Kubernetes label is a key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add. A maximum of 20 labels can be added.

      -

      Labels can be used to distinguish nodes. With workload affinity settings, container pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      +

      A key-value pair added to a Kubernetes object (such as a pod). After specifying a label, click Add Label for more. A maximum of 20 labels can be added.

      +

      Labels can be used to distinguish nodes. With workload affinity settings, pods can be scheduled to a specified node. For more information, see Labels and Selectors.

      Taint

      +

      Taint

      This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must start with a letter or digit and can contain a maximum of 63 characters, including letters, digits, hyphens (-), underscores (_), and periods (.).
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.
      +
      This parameter is left blank by default. You can add taints to configure anti-affinity for the node. A maximum of 20 taints are allowed for each node. Each taint contains the following parameters:
      • Key: A key must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed. A DNS subdomain name can be used as the prefix of a key.
      • Value: A value must contain 1 to 63 characters, starting with a letter or digit. Only letters, digits, hyphens (-), underscores (_), and periods (.) are allowed.
      • Effect: Available options are NoSchedule, PreferNoSchedule, and NoExecute.

      For details, see Managing Node Taints.

      NOTE:

      For a cluster of v1.19 or earlier, the workload may have been scheduled to a node before the taint is added. To avoid such a situation, select a cluster of v1.19 or later.

      Max. Pods

      +

      Synchronization for Existing Nodes

      Maximum number of pods that can run on the node, including the default system pods.

      +

      After the options are selected, changes to resource tags and Kubernetes labels/taints in a node pool will be synchronized to existing nodes in the node pool.

      +

      New Node Scheduling

      +

      Default scheduling policy for the nodes newly added to a node pool. If you select Unschedulable, newly created nodes in the node pool will be labeled as unschedulable. In this way, you can perform some operations on the nodes before pods are scheduled to these nodes.

      +

      Scheduled Scheduling: After scheduled scheduling is enabled, new nodes will be automatically scheduled after the custom time expires.

      +
      • Disabled: By default, scheduled scheduling is not enabled for new nodes. To manually enable this function, go to the node list. For details, see Configuring a Node Scheduling Policy in One-Click Mode.
      • Custom: the default timeout for unschedulable nodes. The value ranges from 0 to 99 in the unit of minutes.
      +
      NOTE:
      • If auto scaling of node pools is also required, ensure the scheduled scheduling is less than 15 minutes. If a node added through Autoscaler cannot be scheduled for more than 15 minutes, Autoscaler determines that the scale-out failed and triggers another scale-out. Additionally, if the node cannot be scheduled for more than 20 minutes, the node will be scaled in by Autoscaler.
      • After this function is enabled, nodes will be tainted with node.cloudprovider.kubernetes.io/uninitialized during a node pool creation or update.
      +
      +

      Max. Pods

      +

      Maximum number of pods that can run on the node, including the default system pods.

      This limit prevents the node from being overloaded with pods.

      This number is also decided by other factors. For details, see Maximum Number of Pods That Can Be Created on a Node.

      ECS Group

      +

      ECS Group

      An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

      +

      An ECS group logically groups ECSs. The ECSs in the same ECS group comply with the same policy associated with the ECS group.

      Anti-affinity: ECSs in an ECS group are deployed on different physical hosts to improve service reliability.

      Select an existing ECS group, or click Add ECS Group to create one. After the ECS group is created, click the refresh icon.

      Pre-installation Command

      +

      Pre-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed before Kubernetes software is installed. Note that if the script is incorrect, Kubernetes software may fail to be installed.

      Post-installation Command

      +

      Post-installation Command

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded.

      +

      Pre-installation script command, in which Chinese characters are not allowed. The script command will be Base64-transcoded. The characters of both the pre-installation and post-installation scripts are centrally calculated, and the total number of characters after transcoding cannot exceed 10240.

      The script will be executed after Kubernetes software is installed, which does not affect the installation.

      NOTE:

      Do not run the reboot command in the post-installation script to restart the system immediately. To restart the system, run the shutdown -r 1 command to restart with a delay of one minute.

      Agency

      +

      Agency

      An agency is created by the account administrator on the IAM console. By creating an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

      +

      An agency is created by the account administrator on the IAM console. Using an agency, you can share your cloud server resources with another account, or entrust a more professional person or team to manage your resources.

      If no agency is available, click Create Agency on the right to create one.

      User-defined node name prefix and suffix

      +

      Custom name prefix and suffix of a node in a node pool. After the configuration, the nodes in the node pool will be named with the configured prefix and suffix. For example, if the prefix is prefix- and the suffix is -suffix, the nodes in the node pool will be named in the format of "prefix-Node pool name with five-digit random characters-suffix".

      +
      NOTICE:
      • A prefix and suffix can be customized only when a node pool is created, and they cannot be modified after the node pool is created.
      • A prefix can end with a special character, and a suffix can start with a special character.
      • A node name consists of a maximum of 56 characters in the format of "Prefix-Node pool name with five-digit random characters-Suffix".
      • A node name does not support the combination of a period (.) and special characters (such as .., .-, or -.).
      • This function is available only in clusters of v1.28.1, v1.27.3, v1.25.6, v1.23.11, v1.21.12, or later.
      +
      +
      diff --git a/docs/cce/umn/cce_10_0014.html b/docs/cce/umn/cce_10_0014.html index 4604a94b..4e004f0e 100644 --- a/docs/cce/umn/cce_10_0014.html +++ b/docs/cce/umn/cce_10_0014.html @@ -8,16 +8,22 @@ - - - + + + + - diff --git a/docs/cce/umn/cce_10_0015.html b/docs/cce/umn/cce_10_0015.html index b2ade808..61939768 100644 --- a/docs/cce/umn/cce_10_0015.html +++ b/docs/cce/umn/cce_10_0015.html @@ -24,7 +24,7 @@ data:
      Hello

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-configmap.yaml and edit it.

        vi nginx-configmap.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          Content of the YAML file:

          • Added from ConfigMap: To add all data in a ConfigMap to environment variables, use the envFrom parameter. The keys in the ConfigMap will become names of environment variables in the workload.
            apiVersion: apps/v1
             kind: Deployment
            @@ -99,13 +99,13 @@ CCE
            -c echo $SPECIAL_LEVEL $SPECIAL_TYPE > /usr/share/nginx/html/index.html
          -

        3. Set other workload parameters and click Create Workload.

          After the workload runs properly, log in to the container and run the following statement to check whether the ConfigMap has been set as an environment variable of the workload:

          +

        4. Configure other workload parameters and click Create Workload.

          After the workload runs properly, log in to the container and run the following statement to check whether the ConfigMap has been set as an environment variable of the workload:

          cat /usr/share/nginx/html/index.html

          The example output is as follows:

          Hello CCE

        Using kubectl

        -
        1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-configmap.yaml and edit it.

          vi nginx-configmap.yaml

          +
          1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
          2. Create a file named nginx-configmap.yaml and edit it.

            vi nginx-configmap.yaml

            As shown in the following example, the cce-configmap ConfigMap is imported to the workload. SPECIAL_LEVEL and SPECIAL_TYPE are the environment variable names in the workload, that is, the key names in the cce-configmap ConfigMap.
            apiVersion: apps/v1
             kind: Deployment
             metadata:
            @@ -162,7 +162,7 @@ spec:
             

      Mount Path

      Enter a mount point. After the ConfigMap volume is mounted, a configuration file with the key as the file name and value as the file content is generated in the mount path of the container.

      -
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may lead to container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      +
      This parameter specifies a container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may lead to container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      @@ -110,7 +110,7 @@ spec: @@ -135,7 +135,7 @@ spec:

      The expected output is the same as the content in the secret.

      Using kubectl

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a file named nginx-secret.yaml and edit it.

        vi nginx-secret.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create a file named nginx-secret.yaml and edit it.

          vi nginx-secret.yaml

          In the following example, the username and password in the mysecret secret are saved in the /etc/foo directory as files.
          apiVersion: apps/v1
           kind: Deployment
           metadata:
          diff --git a/docs/cce/umn/cce_10_0018.html b/docs/cce/umn/cce_10_0018.html
          index 40d068a3..1c7b087d 100644
          --- a/docs/cce/umn/cce_10_0018.html
          +++ b/docs/cce/umn/cce_10_0018.html
          @@ -4,9 +4,9 @@
           

          CCE works with AOM to collect workload logs. When a node is created, ICAgent (a DaemonSet named icagent in the kube-system namespace of a cluster) of AOM is installed by default. ICAgent collects workload logs and reports them to AOM. You can view workload logs on the CCE or AOM console.

          Constraints

          ICAgent only collects text logs in .log, .trace, and .out formats.

          -

          Using ICAgent to Collect Logs

          1. When creating a workload, set logging for the container.
          2. Click to add a log policy.

            The following uses Nginx as an example. Log policies vary depending on workloads.
            Figure 1 Adding a log policy
            +

            Using ICAgent to Collect Logs

            1. When creating a workload, set logging for the container.
            2. Click to add a log policy.

              The following uses Nginx as an example. Log policies vary depending on workloads.
              Figure 1 Adding a log policy
              -

            3. Set Volume Type to hostPath or EmptyDir.

              +

            4. Set Volume Type to hostPath or emptyDir.

      Table 1 Mounting a secret volume

      Parameter

      Mount Path

      Enter a mount point. After the secret volume is mounted, a secret file with the key as the file name and value as the file content is generated in the mount path of the container.

      -
      This parameter indicates the container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may cause container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      +
      This parameter specifies a container path to which a data volume will be mounted. Do not mount the volume to a system directory such as / or /var/run. This may cause container errors. Mount the volume to an empty directory. If the directory is not empty, ensure that there are no files that affect container startup. Otherwise, the files will be replaced, which leads to a container startup failure or workload creation failure.
      NOTICE:

      If the container is mounted to a high-risk directory, use an account with minimum permissions to start the container. Otherwise, high-risk files on the host may be damaged.

      @@ -174,7 +174,7 @@ spec: + + + - - - + + + @@ -258,7 +270,12 @@ - + + + diff --git a/docs/cce/umn/cce_10_0031.html b/docs/cce/umn/cce_10_0031.html index 29fe7e0c..4eb6b617 100644 --- a/docs/cce/umn/cce_10_0031.html +++ b/docs/cce/umn/cce_10_0031.html @@ -4,15 +4,17 @@
      diff --git a/docs/cce/umn/cce_10_0035.html b/docs/cce/umn/cce_10_0035.html index 1742be9b..6bbd136d 100644 --- a/docs/cce/umn/cce_10_0035.html +++ b/docs/cce/umn/cce_10_0035.html @@ -8,6 +8,8 @@ + diff --git a/docs/cce/umn/cce_10_00356.html b/docs/cce/umn/cce_10_00356.html index b647dab1..77ff2edc 100644 --- a/docs/cce/umn/cce_10_00356.html +++ b/docs/cce/umn/cce_10_00356.html @@ -1,9 +1,9 @@ -

      Accessing a Container

      +

      Logging In to a Container

      Scenario

      If you encounter unexpected problems when using a container, you can log in to the container to debug it.

      -

      Logging In to a Container Using kubectl

      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Run the following command to view the created pod:

        kubectl get pod
        +

        Using kubectl

        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Run the following command to view the created pod:

          kubectl get pod
          The example output is as follows:
          NAME                               READY   STATUS    RESTARTS       AGE
           nginx-59d89cb66f-mhljr             1/1     Running   0              11m
          diff --git a/docs/cce/umn/cce_10_0036.html b/docs/cce/umn/cce_10_0036.html index 0c1e60e5..19475938 100644 --- a/docs/cce/umn/cce_10_0036.html +++ b/docs/cce/umn/cce_10_0036.html @@ -1,11 +1,11 @@

          Stopping a Node

          -

          Scenario

          After a node in the cluster is stopped, services on the node are also stopped. Before stopping a node, ensure that discontinuity of the services on the node will not result in adverse impacts.

          +

          Scenario

          When a node in the cluster is stopped, all services on that node will also be stopped, and the node will no longer be available for scheduling. Check if your services will be affected before stopping a node.

          -

          Constraints

          • Deleting a node will lead to pod migration, which may affect services. Therefore, delete nodes during off-peak hours.
          • Unexpected risks may occur during the operation. Back up related data in advance.
          • While the node is being deleted, the backend will set the node to the unschedulable state.
          • Only worker nodes can be stopped.
          +

          Precautions

          • Deleting a node will lead to pod migration, which may affect services. Perform this operation during off-peak hours.
          • Unexpected risks may occur during the operation. Back up data beforehand.
          -

          Procedure

          1. Log in to the CCE console and click the cluster name to access the cluster console.
          2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
          3. Locate the target node and click its name.
          4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

            Figure 1 ECS details page
            +

            Procedure

            1. Log in to the CCE console and click the cluster name to access the cluster console.
            2. In the navigation pane, choose Nodes. On the displayed page, click the Nodes tab.
            3. Locate the target node and click its name.
            4. In the upper right corner of the ECS details page, click Stop. In the displayed dialog box, click Yes.

              Figure 1 ECS details page

          diff --git a/docs/cce/umn/cce_10_0044.html b/docs/cce/umn/cce_10_0044.html index 33cf02bd..b6c77e47 100644 --- a/docs/cce/umn/cce_10_0044.html +++ b/docs/cce/umn/cce_10_0044.html @@ -12,6 +12,10 @@
        3. + + diff --git a/docs/cce/umn/cce_10_0046.html b/docs/cce/umn/cce_10_0046.html index 13f96000..a2010651 100644 --- a/docs/cce/umn/cce_10_0046.html +++ b/docs/cce/umn/cce_10_0046.html @@ -8,15 +8,15 @@ - - - -
        diff --git a/docs/cce/umn/cce_10_0047.html b/docs/cce/umn/cce_10_0047.html index 15ab1241..ceea984a 100644 --- a/docs/cce/umn/cce_10_0047.html +++ b/docs/cce/umn/cce_10_0047.html @@ -7,7 +7,7 @@
      -

      Using the CCE Console

      1. Log in to the CCE console.
      2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
      3. Set basic information about the workload.

        Basic Info
        • Workload Type: Select Deployment. For details about workload types, see Overview.
        • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
        • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
        • Pods: Enter the number of pods of the workload.
        • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
        • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
        +

        Using the CCE Console

        1. Log in to the CCE console.
        2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
        3. Set basic information about the workload.

          Basic Info
          • Workload Type: Select Deployment. For details about workload types, see Overview.
          • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
          • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
          • Pods: Enter the number of pods of the workload.
          • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
          • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
          Container Settings
          • Container Information
            Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
            • Basic Info: Configure basic information about the container.
      Table 1 Configuring log policies

      Parameter

      Description

      @@ -155,8 +155,8 @@ spec:

      Extended host path

      Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

      -

      A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

      -
      • None: No extended path is configured.
      • PodUID: ID of a pod.
      • PodName: name of a pod.
      • PodUID/ContainerName: ID of a pod or name of a container.
      • PodName/ContainerName: name of a pod or container.
      +

      A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

      +
      • None: No extended path is configured.
      • PodUID: ID of a pod.
      • PodName: name of a pod.
      • PodUID/ContainerName: ID of a pod or name of a container.
      • PodName/ContainerName: name of a pod or container.

      policy.logs.rotate

      @@ -164,7 +164,7 @@ spec:

      Log dump

      Log dump refers to rotating log files on a local host.

      -
      • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
      • Disabled: AOM does not dump log files.
      +
      • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
      • Disabled: AOM does not dump log files.
      NOTE:
      • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
      • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have already set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
      • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.

      Collection path

      A collection path narrows down the scope of collection to specified logs.

      -
      • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
      • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
      • * in log file names indicates a fuzzy match.
      +
      • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
      • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
      • * in log file names indicates a fuzzy match.

      Example: The collection path /tmp/**/test*.log indicates that all .log files prefixed with test will be collected from /tmp and subdirectories at 5 levels deep.

      CAUTION:

      Ensure that ICAgent is of v5.12.22 or later.

      diff --git a/docs/cce/umn/cce_10_0020.html b/docs/cce/umn/cce_10_0020.html index cfb96256..8fecd17e 100644 --- a/docs/cce/umn/cce_10_0020.html +++ b/docs/cce/umn/cce_10_0020.html @@ -6,7 +6,7 @@

      Master Nodes

      Select the number of master nodes. The master nodes are automatically hosted by CCE and deployed with Kubernetes cluster management components such as kube-apiserver, kube-controller-manager, and kube-scheduler.

      -
      • Multiple: Three master nodes will be created for high cluster availability.
      • Single: Only one master node will be created in your cluster.
      +
      • 3 Masters: Three master nodes will be created for high cluster availability.
      • Single: Only one master node will be created in your cluster.
      You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
      • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If the number of available AZs is less than the number of nodes to be created, CCE will create the nodes in the AZs with sufficient resources to preferentially ensure cluster creation. In this case, AZ-level DR may not be ensured.
      • Custom: Master nodes are deployed in specific AZs.
        If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
        • AZ: Master nodes are deployed in different AZs for cluster DR.
        • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
        • Custom: Master nodes are deployed in the AZs you specified.
      @@ -70,6 +70,13 @@

      Select the subnet to which the master nodes belong. If no subnet is available, click Create Subnet to create one. The value cannot be changed after the cluster is created.

      Default Security Group

      +
      Select the security group automatically generated by CCE or use the existing one as the default security group of the node.
      NOTICE:

      The default security group must allow traffic from certain ports to ensure normal communication. Otherwise, the node cannot be created.

      +
      +
      +

      IPv6

      If enabled, cluster resources, including nodes and workloads, can be accessed through IPv6 CIDR blocks.

      @@ -155,7 +162,7 @@

      Overload Control

      After this function is enabled, concurrent requests will be dynamically controlled based on the resource demands received by master nodes to ensure the stable running of the master nodes and the cluster. For details, see Cluster Overload Control.

      +

      After this function is enabled, concurrent requests will be dynamically controlled based on the resource demands received by master nodes to ensure the stable running of the master nodes and the cluster. For details, see Enabling Overload Control for a Cluster.

      Disk Encryption for Master Nodes

      @@ -167,8 +174,8 @@

      Resource Tag

      You can add resource tags to classify resources.

      -

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      +

      You can add resource tags to classify resources. A maximum of 20 resource tags can be added.

      +

      You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

      Description

      @@ -214,7 +221,12 @@

      CCE Node Problem Detector

      +

      Cloud Native Cluster Monitoring

      +

      (Optional) If selected, this add-on (Cloud Native Cluster Monitoring) will be automatically installed. Cloud Native Cluster Monitoring collects monitoring metrics for your cluster and reports the metrics to AOM. The agent mode does not support HPA based on custom Prometheus statements. If related functions are required, install this add-on manually after the cluster is created.

      +

      CCE Node Problem Detector

      (Optional) If selected, this add-on (CCE Node Problem Detector) will be automatically installed to detect faults and isolate nodes for prompt cluster troubleshooting.

      CCE Node Problem Detector

      +

      Cloud Native Cluster Monitoring

      +

      Select an AOM instance for Cloud Native Cluster Monitoring to report metrics. If no AOM instance is available, click Creating Instance to create one.

      +

      CCE Node Problem Detector

      This add-on is unconfigurable. After the cluster is created, choose Add-ons in the navigation pane of the cluster console and modify the configuration.

      Parameter

      @@ -81,16 +81,16 @@

      (Optional) Service Settings

      A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

      You can also create a Service after creating a workload. For details about Services of different types, see Overview.

      -
      (Optional) Advanced Settings
      • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
      • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
        • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
          • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
          • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
          • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
          +
          (Optional) Advanced Settings
          • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Configuring Workload Upgrade Policies.
          • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
            • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
              • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
              • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
              • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
            -
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration +
          • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Configuring Tolerance Policies.
          • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
          • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
          • Network Configuration

        • Click Create Workload in the lower right corner.

      Using kubectl

      The following procedure uses Nginx as an example to describe how to create a workload using kubectl.

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can rename it as required.

        vi nginx-deployment.yaml

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create and edit the nginx-deployment.yaml file. nginx-deployment.yaml is an example file name, and you can rename it as required.

          vi nginx-deployment.yaml

          The following is an example YAML file. For more information about Deployments, see Kubernetes documentation.

          apiVersion: apps/v1
           kind: Deployment
          @@ -114,7 +114,7 @@ spec:
                   name: nginx
                 imagePullSecrets:
                 - name: default-secret
          -

          For details about these parameters, see Table 1.

          +

          For details about the parameters, see Table 1.

          diff --git a/docs/cce/umn/cce_10_0048.html b/docs/cce/umn/cce_10_0048.html index 3d94fea4..b8bc2e92 100644 --- a/docs/cce/umn/cce_10_0048.html +++ b/docs/cce/umn/cce_10_0048.html @@ -4,13 +4,13 @@

          Scenario

          StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.

          A container can be migrated between different hosts, but data is not stored on the hosts. To store StatefulSet data persistently, attach HA storage volumes provided by CCE to the container.

          -

          Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Services.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.
          +

          Notes and Constraints

          • When you delete or scale a StatefulSet, the system does not delete the storage volumes associated with the StatefulSet to ensure data security.
          • When you delete a StatefulSet, reduce the number of replicas to 0 before deleting the StatefulSet so that pods in the StatefulSet can be stopped in order.
          • When you create a StatefulSet, a headless Service is required for pod access. For details, see Headless Services.
          • When a node is unavailable, pods become Unready. In this case, manually delete the pods of the StatefulSet so that the pods can be migrated to a normal node.

          Prerequisites

          • Before creating a workload, you must have an available cluster. For details on how to create a cluster, see Creating a CCE Standard/Turbo Cluster.
          • To enable public access to a workload, ensure that an EIP or load balancer has been bound to at least one node in the cluster.

            If a pod has multiple containers, ensure that the ports used by the containers do not conflict with each other. Otherwise, creating the StatefulSet will fail.

          -

          Using the CCE Console

          1. Log in to the CCE console.
          2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
          3. Set basic information about the workload.

            Basic Info
            • Workload Type: Select StatefulSet. For details about workload types, see Overview.
            • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
            • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
            • Pods: Enter the number of pods of the workload.
            • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Kata Runtime and Common Runtime.
            • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
            +

            Using the CCE Console

            1. Log in to the CCE console.
            2. Click the cluster name to go to the cluster console, choose Workloads in the navigation pane, and click the Create Workload in the upper right corner.
            3. Set basic information about the workload.

              Basic Info
              • Workload Type: Select StatefulSet. For details about workload types, see Overview.
              • Workload Name: Enter the name of the workload. Enter 1 to 63 characters starting with a lowercase letter and ending with a lowercase letter or digit. Only lowercase letters, digits, and hyphens (-) are allowed.
              • Namespace: Select the namespace of the workload. The default value is default. You can also click Create Namespace to create one. For details, see Creating a Namespace.
              • Pods: Enter the number of pods of the workload.
              • Container Runtime: A CCE standard cluster uses runC by default, whereas a CCE Turbo cluster supports both runC and Kata. For details about the differences, see Secure Runtime and Common Runtime.
              • Time Zone Synchronization: Specify whether to enable time zone synchronization. After time zone synchronization is enabled, the container and node use the same time zone. The time zone synchronization function depends on the local disk mounted to the container. Do not modify or delete the time zone. For details, see Configuring Time Zone Synchronization.
              Container Settings
              • Container Information
                Multiple containers can be configured in a pod. You can click Add Container on the right to configure multiple containers for the pod.
                • Basic Info: Configure basic information about the container.
          Table 1 Deployment YAML parameters

          Parameter

          Parameter

          @@ -74,7 +74,7 @@
          -
        3. (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
        4. (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
        5. (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
        6. (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
          • StatefulSets support dynamic attachment of EVS disks. For details, see Dynamically Mounting an EVS Disk to a StatefulSet and Dynamically Mounting a Local PV to a StatefulSet.

            Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a PVC using the volumeClaimTemplates field, and the PVC is bound to the corresponding PV. Therefore, after the pod is rescheduled, the original data can still be mounted based on the PVC name.

            +
          • (Optional) Lifecycle: Configure operations to be performed in a specific phase of the container lifecycle, such as Startup Command, Post-Start, and Pre-Stop. For details, see Configuring Container Lifecycle Parameters.
          • (Optional) Health Check: Set the liveness probe, ready probe, and startup probe as required. For details, see Configuring Container Health Check.
          • (Optional) Environment Variables: Configure variables for the container running environment using key-value pairs. These variables transfer external information to containers running in pods and can be flexibly modified after application deployment. For details, see Configuring Environment Variables.
          • (Optional) Data Storage: Mount local storage or cloud storage to the container. The application scenarios and mounting modes vary with the storage type. For details, see Storage.
            • StatefulSets support dynamic attachment of EVS disks. For details, see Dynamically Mounting an EVS Disk to a StatefulSet or Dynamically Mounting a Local PV to a StatefulSet.

              Dynamic mounting is achieved by using the volumeClaimTemplates field and depends on the dynamic creation capability of StorageClass. A StatefulSet associates each pod with a PVC using the volumeClaimTemplates field, and the PVC is bound to the corresponding PV. Therefore, after the pod is rescheduled, the original data can still be mounted based on the PVC name.

            • After a workload is created, the storage that is dynamically mounted cannot be updated.
          • (Optional) Security Context: Assign container permissions to protect the system and other containers from being affected. Enter the user ID to assign container permissions and prevent systems and other containers from being affected.
          • (Optional) Logging: Report standard container output logs to AOM by default, without requiring manual settings. You can manually configure the log collection path. For details, see Collecting Container Logs Using ICAgent.

            To disable the standard output of the current workload, add the annotation kubernetes.AOM.log.stdout: [] in Labels and Annotations. For details about how to use this annotation, see Table 1.

            @@ -87,18 +87,18 @@

            (Optional) Service Settings

            A Service provides external access for pods. With a static IP address, a Service forwards access traffic to pods and automatically balances load for these pods.

            You can also create a Service after creating a workload. For details about Services of different types, see Overview.

            -
            (Optional) Advanced Settings
            • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Workload Upgrade Policies.
            • Pod Management Policies

              For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

              +
              (Optional) Advanced Settings
              • Upgrade: Specify the upgrade mode and parameters of the workload. Rolling upgrade and Replace upgrade are available. For details, see Configuring Workload Upgrade Policies.
              • Pod Management Policies

                For some distributed systems, the StatefulSet sequence is unnecessary and/or should not occur. These systems require only uniqueness and identifiers.

                • OrderedReady: The StatefulSet will deploy, delete, or scale pods in order and one by one. (The StatefulSet continues only after the previous pod is ready or deleted.) This is the default policy.
                • Parallel: The StatefulSet will create pods in parallel to match the desired scale without waiting, and will delete all pods at once.
              • Scheduling: Configure affinity and anti-affinity policies for flexible workload scheduling. Load affinity and node affinity are provided.
                • Load Affinity: Common load affinity policies are offered for quick load affinity deployment.
                  • Multi-AZ deployment is preferred: Workload pods are preferentially scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ but onto different nodes for high availability. If there are fewer nodes than pods, the extra pods will fail to run.
                  • Forcible multi-AZ deployment: Workload pods are forcibly scheduled to nodes in different AZs through pod anti-affinity (podAntiAffinity). If there are fewer AZs than pods, the extra pods will fail to run.
                  • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
                • Node Affinity: Common load affinity policies are offered for quick load affinity deployment.
                  • Node Affinity: Workload pods can be deployed on specified nodes through node affinity (nodeAffinity). If no node is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                  • Specified node pool scheduling: Workload pods can be deployed in a specified node pool through node affinity (nodeAffinity). If no node pool is specified, the pods will be randomly scheduled based on the default scheduling policy of the cluster.
                  • Custom policies: Affinity and anti-affinity policies can be customized as needed. For details, see Scheduling Policies (Affinity/Anti-affinity).
                -
              • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Taints and Tolerations.
              • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Labels and Annotations.
              • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
              • Network Configuration
                • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
                • Whether to enable the static IP address: available only for clusters that support this function. After this function is enabled, you can set the interval for reclaiming expired pod IP addresses. For details, see Configuring a Static IP Address for a Pod.
                • IPv6 shared bandwidth: available only for clusters that support this function. After this function is enabled, you can configure a shared bandwidth for a pod with IPv6 dual-stack ENIs. For details, see Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs.
                +
              • Toleration: Using both taints and tolerations allows (not forcibly) the pod to be scheduled to a node with the matching taints, and controls the pod eviction policies after the node where the pod is located is tainted. For details, see Configuring Tolerance Policies.
              • Labels and Annotations: Add labels or annotations for pods using key-value pairs. After entering the key and value, click Confirm. For details about how to use and configure labels and annotations, see Configuring Labels and Annotations.
              • DNS: Configure a separate DNS policy for the workload. For details, see DNS Configuration.
              • Network Configuration
                • Pod ingress/egress bandwidth limitation: You can set ingress/egress bandwidth limitation for pods. For details, see Configuring QoS for a Pod.
                • Whether to enable the static IP address: available only for clusters that support this function. After this function is enabled, you can set the interval for reclaiming expired pod IP addresses. For details, see Configuring a Static IP Address for a Pod.
                • IPv6 shared bandwidth: available only for clusters that support this function. After this function is enabled, you can configure a shared bandwidth for a pod with IPv6 dual-stack ENIs. For details, see Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs.

            • Click Create Workload in the lower right corner.

      Using kubectl

      In this example, a Nginx workload is used and the EVS volume is dynamically mounted to it using the volumeClaimTemplates field.

      -
      1. Use kubectl to connect to the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create and edit the nginx-statefulset.yaml file.

        nginx-statefulset.yaml is an example file name, and you can change it as required.

        +
        1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
        2. Create and edit the nginx-statefulset.yaml file.

          nginx-statefulset.yaml is an example file name, and you can change it as required.

          vi nginx-statefulset.yaml

          The following provides an example of the file contents. For more information on StatefulSet, see the Kubernetes documentation.

          apiVersion: apps/v1
          @@ -153,7 +153,7 @@ spec:
                   resources:
                     requests:
                       storage: 10Gi
          -        storageClassName: csi-disk # Storage class name. The value is csi-disk for the EVS volume.
          +        storageClassName: csi-disk # StorageClass name. The value is csi-disk for the EVS volume.
             updateStrategy:
               type: RollingUpdate

          vi nginx-headless.yaml

          diff --git a/docs/cce/umn/cce_10_0054.html b/docs/cce/umn/cce_10_0054.html index 97fe3a2b..34c770c5 100644 --- a/docs/cce/umn/cce_10_0054.html +++ b/docs/cce/umn/cce_10_0054.html @@ -1,6 +1,6 @@ -

          High-Risk Operations and Solutions

          +

          High-Risk Operations

          During service deployment or running, you may trigger high-risk operations at different levels, causing service faults or interruption. To help you better estimate and avoid operation risks, this section introduces the consequences and solutions of high-risk operations from multiple dimensions, such as clusters, nodes, networking, load balancing, logs, and EVS disks.

          Clusters and Nodes

          - - -
          Table 1 High-risk operations and solutions

          Category

          @@ -63,7 +63,7 @@

          The master node may be unavailable.

          Restore the parameter settings to the recommended values. For details, see Cluster Configuration Management.

          +

          Restore the parameter settings to the recommended values. For details, see Modifying Cluster Configurations.

          Replacing the master or etcd certificate

          @@ -107,7 +107,7 @@

          Reset the node. For details, see Resetting a Node.

          Upgrading the kernel or components on which the container platform depends (such as Open vSwitch, IPvlan, Docker, and containerd)

          +

          Upgrading the kernel or components on which the container platform depends (such as Open vSwitch, IPVLAN, Docker, and containerd)

          The node may be unavailable or the network may be abnormal.

          NOTE:

          Node running depends on the system kernel version. Do not use the yum update command to update or reinstall the operating system kernel of a node unless necessary. (Reinstalling the operating system kernel using the original image or other images is a risky operation.)

          @@ -127,7 +127,7 @@

          The node may become unavailable, and components may be insecure if security-related configurations are modified.

          Restore the parameter settings to the recommended values. For details, see Configuring a Node Pool.

          +

          Restore the parameter settings to the recommended values. For details, see Modifying Node Pool Configurations.

          Modifying OS configuration

          diff --git a/docs/cce/umn/cce_10_0059.html b/docs/cce/umn/cce_10_0059.html index 02f70447..8c519eb5 100644 --- a/docs/cce/umn/cce_10_0059.html +++ b/docs/cce/umn/cce_10_0059.html @@ -1,11 +1,11 @@ -

          Network Policies

          +

          Configuring Network Policies to Restrict Pod Access

          Network policies are designed by Kubernetes to restrict pod access. It is equivalent to a firewall at the application layer to enhance network security. The capabilities supported by network policies depend on the capabilities of the network add-ons of the cluster.

          By default, if a namespace does not have any policy, pods in the namespace accept traffic from any source and send traffic to any destination.

          Network policies are classified into the following types:

          • namespaceSelector: selects particular namespaces for which all pods should be allowed as ingress sources or egress destinations.
          • podSelector: selects particular pods in the same namespace as the network policy which should be allowed as ingress sources or egress destinations.
          • ipBlock: selects particular IP blocks to allow as ingress sources or egress destinations. (Only egress rules support IP blocks.)
          -

          Constraints