diff --git a/.gitignore b/.gitignore index ad375bb..904457f 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,5 @@ releasenotes/build .ropeproject test.py .vscode +umn/build +api-ref/build diff --git a/umn/source/_static/images/ExpandCollapse.js b/umn/source/_static/images/ExpandCollapse.js new file mode 100644 index 0000000..3f1b7db --- /dev/null +++ b/umn/source/_static/images/ExpandCollapse.js @@ -0,0 +1 @@ +var expandClassName="dropdownexpand";var collapseClassName="dropdowncollapse";var collapseTableClassName="dropdowncollapsetable";var href="";function ExpandorCollapseNode(a){a=a.parentNode;if(a.className==expandClassName){a.className=collapseClassName}else{a.className=expandClassName}}function ExpandorCollapseTableNode(a){a=a.parentNode;if(a.className==expandClassName){a.className=collapseTableClassName}else{a.className=expandClassName}}function ExpandorCollapseAllNodes(g,h,c){var a=g.getAttribute("title");var b=g.parentNode;if(a=="collapse"){g.setAttribute("title","expand");g.className="dropdownAllButtonexpand";g.innerHTML=h}else{g.setAttribute("title","collapse");g.className="dropdownAllButtoncollapse";g.innerHTML=c}var f=b.getElementsByTagName("*");for(var d=0;d-1){ExpandForHref(href.substring(href.lastIndexOf("#")+1))}}catch(a){}}; \ No newline at end of file diff --git a/umn/source/_static/images/caution_3.0-en-us.png b/umn/source/_static/images/caution_3.0-en-us.png new file mode 100644 index 0000000..60f6076 Binary files /dev/null and b/umn/source/_static/images/caution_3.0-en-us.png differ diff --git a/umn/source/_static/images/commonltr.css b/umn/source/_static/images/commonltr.css new file mode 100644 index 0000000..c5480b0 --- /dev/null +++ b/umn/source/_static/images/commonltr.css @@ -0,0 +1 @@ +body{font-size:10pt;font-family:Arial;margin:1.5em;border-top:2pt;padding-top:1em;padding-bottom:2em}.msgph{font-family:Courier New}.rowlinecopyright{color:red;margin-top:10pt}.unresolved{background-color:skyblue}.noTemplate{background-color:yellow}.base{background-color:#fff}.nested0{margin-top:1em}.p{margin-top:.6em;margin-bottom:.6em}p{margin-top:.5em;margin-bottom:.5em}.note p{margin-top:.5em;margin-bottom:.5em}.tip p{margin-top:.5em;margin-bottom:.5em}.danger p{margin-top:.5em;margin-bottom:.5em}.notice p{margin-top:.5em;margin-bottom:.5em}.warning p{margin-top:.5em;margin-bottom:.5em}.caution p{margin-top:.5em;margin-bottom:.5em}.attention p{margin-top:.5em;margin-bottom:.5em}table p{margin-top:.2em;margin-bottom:.2em}table .p{margin-top:.4em;margin-bottom:.2em}.figcap{font-size:10pt}img{margin-top:.3em}.figdesc{font-style:normal}.figborder{border-style:solid;padding-left:3px;border-width:2px;padding-right:3px;margin-top:1em;border-color:Silver}.figsides{border-left:2px solid;padding-left:3px;border-right:2px solid;padding-right:3px;margin-top:1em;border-color:Silver}.figtop{border-top:2px solid;margin-top:1em;border-color:Silver}.figbottom{border-bottom:2px solid;border-color:Silver}.figtopbot{border-top:2px solid;border-bottom:2px solid;margin-top:1em;border-color:Silver}.fignone{font-size:10pt;margin-top:8pt;margin-bottom:8pt}.familylinks{margin-top:1.5em;margin-bottom:1em}.ullinks{list-style-type:none}.linklist{margin-bottom:1em}.linklistwithchild{margin-left:1.5em;margin-bottom:1em}.sublinklist{margin-left:1.5em;margin-bottom:1em}.relconcepts{margin-top:.6em;margin-bottom:.6em}.reltasks{margin-top:.6em;margin-bottom:.6em}.relref{margin-top:.6em;margin-bottom:.6em}.relinfo{margin-top:.6em;margin-bottom:.6em}.breadcrumb{font-size:smaller;margin-bottom:.6em}.prereq{margin-left:20px}.parentlink{margin-top:.6em;margin-bottom:.6em}.nextlink{margin-top:.6em;margin-bottom:.6em}.previouslink{margin-top:.6em;margin-bottom:.6em}.topictitle1{margin-top:0;margin-bottom:1em;font-size:14pt;color:#007af4}.topictitle2{margin-top:1pc;margin-bottom:.45em;font-size:1.17em;color:#007af4}.topictitle3{margin-top:1pc;margin-bottom:.17em;font-size:1.17em;font-weight:bold;color:#007af4}.topictitle4{margin-top:.83em;font-size:1.17em;font-weight:bold}.topictitle5{font-size:1.17em;font-weight:bold}.topictitle6{font-size:1.17em;font-style:italic}.sectiontitle{margin-top:1em;margin-bottom:1em;color:black;font-size:10.5pt;font-weight:bold;color:#007af4;overflow:auto}.section{margin-top:1em;margin-bottom:1em}.example{margin-top:1em;margin-bottom:1em}.sectiontitle2contents:link{color:#007af4}.sectiontitle2contents:visited{color:#800080}.note{margin-top:1em;margin-bottom:1em;background-color:#ffc}.notetitle{font-weight:bold}.notelisttitle{font-weight:bold}.tip{margin-top:1em;margin-bottom:1em;background-color:#ffc}.tiptitle{font-weight:bold}.fastpath{margin-top:1em;margin-bottom:1em;background-color:#ffc}.fastpathtitle{font-weight:bold}.important{margin-top:1em;margin-bottom:1em;background-color:#ffc}.importanttitle{font-weight:bold}.remember{margin-top:1em;margin-bottom:1em;background-color:#ffc}.remembertitle{font-weight:bold}.restriction{margin-top:1em;margin-bottom:1em;background-color:#ffc}.restrictiontitle{font-weight:bold}.attention{margin-top:1em;margin-bottom:1em;background-color:#ffc}.attentiontitle{font-weight:bold}.dangertitle{font-weight:bold}.danger{margin-top:1em;margin-bottom:1em;background-color:#ffc}.noticetitle{font-weight:bold}.notice{margin-top:1em;margin-bottom:1em;background-color:#ffc}.warningtitle{font-weight:bold}.warning{margin-top:1em;margin-bottom:1em;background-color:#ffc}.cautiontitle{font-weight:bold}.caution{margin-top:1em;margin-bottom:1em;background-color:#ffc}ul.simple{list-style-type:none}li ul{margin-top:.6em}li{margin-top:.6em;margin-bottom:.6em}.note li{margin-top:.2em;margin-bottom:.2em}.tip li{margin-top:.2em;margin-bottom:.2em}.danger li{margin-top:.2em;margin-bottom:.2em}.warning li{margin-top:.2em;margin-bottom:.2em}.notice li{margin-top:.2em;margin-bottom:.2em}.caution li{margin-top:.2em;margin-bottom:.2em}.attention li{margin-top:.2em;margin-bottom:.2em}table li{margin-top:.2em;margin-bottom:.2em}ol{margin-top:1em;margin-bottom:1em;margin-left:2.4em;padding-left:0}ul{margin-top:1em;margin-bottom:1em;margin-left:2.0em;padding-left:0}ol ul{list-style:disc}ul ul{list-style:square}ol ul ul{list-style:square}ol ul{list-style-type:disc}table ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}table ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ul{margin-top:.4em;margin-bottom:.4em;list-style:square}table ol ol{margin-top:.4em;margin-bottom:.4em;list-style:lower-alpha}table ol ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}.substepthirdol{list-style-type:lower-roman}.firstcol{font-weight:bold}th{background-color:#cfcfcf}table{margin-top:8pt;margin-bottom:12pt;width:100%}table caption{margin-top:8pt;text-align:left}.bold{font-weight:bold}.boldItalic{font-weight:bold;font-style:italic}.italic{font-style:italic}.underlined{text-decoration:underline}.var{font-style:italic}.shortcut{text-decoration:underline}.dlterm{font-weight:bold}dd{margin-top:.5em;margin-bottom:.5em}.dltermexpand{font-weight:bold;margin-top:1em}*[compact="yes"]>li{margin-top:0}*[compact="no"]>li{margin-top:.53em}.liexpand{margin-top:1em;margin-bottom:1em}.sliexpand{margin-top:1em;margin-bottom:1em}.dlexpand{margin-top:1em;margin-bottom:1em}.ddexpand{margin-top:1em;margin-bottom:1em}.stepexpand{margin-top:.3em;margin-bottom:.3em}.substepexpand{margin-top:.3em;margin-bottom:.3em}div.imageleft{text-align:left}div.imagecenter{text-align:center}div.imageright{text-align:right}div.imagejustify{text-align:justify}div.noblankline{text-align:center}div.noblankline img{margin-top:0}pre.screen{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;background-color:#ddd;white-space:pre}pre.codeblock{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;background-color:#ddd;white-space:pre}.hrcopyright{color:#3f4e5d;margin-top:18pt}.hwcopyright{text-align:center}.comment{margin:2px 2px 2px 2px;font-family:Arial;font-size:10pt;background-color:#bfb;color:#000}.dropdownAllButtonexpand{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdownAllButtoncollapse{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;text-decoration:underline;color:#007af4}.dropdowntitle{background-repeat:no-repeat;background-position:0 4px;padding-left:15px;cursor:pointer;text-decoration:underline;color:#007af4}.dropdownexpand .dropdowntitle{background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapse .dropdowncontext{display:none}.dropdowncollapse .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdowncollapsetable{border:0}.dropdowncollapsetable .dropdowncontext{display:none}.dropdowncollapsetable .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}pre{font-size:10pt;font-weight:normal;margin-left:9;margin-top:2;margin-bottom:2}.termcolor{color:blue;cursor:pointer}#dhtmlgoodies_tooltip{background-color:#f0f0d2;border:1px solid #000;position:absolute;display:none;z-index:20000;padding:2px;font-size:.9em;-moz-border-radius:6px;font-family:"Trebuchet MS","Lucida Sans Unicode",Arial,sans-serif}#dhtmlgoodies_tooltipShadow{position:absolute;background-color:#555;display:none;z-index:10000;opacity:.7;filter:alpha(opacity=70);-khtml-opacity:.7;-moz-opacity:.7;-moz-border-radius:6px}.freeze{position:fixed;_position:absolute;_top:expression(eval(document.documentElement.scrollTop));left:10;top:0} \ No newline at end of file diff --git a/umn/source/_static/images/commonltr_print.css b/umn/source/_static/images/commonltr_print.css new file mode 100644 index 0000000..a598231 --- /dev/null +++ b/umn/source/_static/images/commonltr_print.css @@ -0,0 +1 @@ +body{font-size:12.0pt;margin:1.5em;margin-left:1.6cm}.msgph{font-family:Courier New}.rowlinecopyright{color:red;margin-top:10pt}.unresolved{background-color:skyblue}.noTemplate{background-color:yellow}.base{background-color:#fff}.nested0{margin-top:1em}.p{margin-top:1em}p{margin-top:.5em;margin-bottom:.5em}.note p{margin-top:.5em;margin-bottom:.5em}.tip p{margin-top:.5em;margin-bottom:.5em}.danger p{margin-top:.5em;margin-bottom:.5em}.warning p{margin-top:.5em;margin-bottom:.5em}.notice p{margin-top:.5em;margin-bottom:.5em}.caution p{margin-top:.5em;margin-bottom:.5em}.attention p{margin-top:.5em;margin-bottom:.5em}table p{margin-top:.2em;margin-bottom:.2em}table .p{margin-top:.4em;margin-bottom:.2em}.covertable{border:0;width:100% cellpadding:8pt;cellspacing:8pt}.cover_productname{font-size:15.0pt;font-family:"Arial"}.cover_manualtitle{font-size:24.0pt;font-weight:bold;font-family:"Arial"}.cover_manualsubtitle{font-size:18.0pt;font-weight:bold;font-family:"Arial"}.cover_heading{font-size:12.0pt;font-weight:bold;font-family:"Arial"}.cover_text{font-size:9.0pt;font-family:"Arial"}.tocheading,.heading1,.topictitle1{margin-top:40.0pt;margin-right:0;margin-bottom:20.0pt;margin-left:-1cm;text-align:left;border:0;border-bottom:solid windowtext .5pt;font-size:22.0pt;font-family:"Arial";font-weight:bold}.topictitlenumber1{font-size:72.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle2{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:-1cm;text-indent:0;font-size:18.0pt;font-family:"Arial";font-weight:bold}.topictitle3{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:16.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle4{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:14.0pt;font-family:"Book Antiqua";font-weight:bold}.topictitle5{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.blocklabel,.topictitle6{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.sectiontitle{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:-1cm;text-indent:0;font-size:13.0pt;font-family:"Arial";font-weight:bold}.tocentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:12.0pt;font-family:"Book Antiqua";font-weight:bold}.tocentry2{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry3{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry4{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tocentry5{margin-top:4.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman"}.tofentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman";font-weight:normal}.totentry1{margin-top:8.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;line-height:12.0pt;font-size:11.0pt;font-family:"Times New Roman";font-weight:normal}.indexheading{margin-top:15.0pt;margin-right:0;margin-bottom:4.0pt;margin-left:0;text-indent:0;font-size:13.0pt;font-family:"Book Antiqua";font-weight:bold}.indexentry1{margin-top:4pt;margin-right:0;margin-bottom:0;margin-left:0;line-height:12.0pt;font-size:12.0pt;font-family:"Times New Roman"}.indexentry2{margin-top:0;margin-right:0;margin-bottom:0;margin-left:24.0pt;line-height:12.0pt;font-size:12.0pt}.indexentry3{margin-top:0;margin-right:0;margin-bottom:0;margin-left:48pt;line-height:12.0pt;font-size:12.0pt}.figurenumber{font-weight:bold}.tablenumber{font-weight:bold}.familylinks{margin-top:1.5em;margin-bottom:1em}.figcap{font-size:11.0pt}.tablecap{font-size:11.0pt}.figdesc{font-style:normal}.fignone{margin-top:8.0pt}.figborder{border-style:solid;padding-left:3px;border-width:2px;padding-right:3px;margin-top:1em;border-color:Silver}.figsides{border-left:2px solid;padding-left:3px;border-right:2px solid;padding-right:3px;margin-top:1em;border-color:Silver}.figtop{border-top:2px solid;margin-top:1em;border-color:Silver}.figbottom{border-bottom:2px solid;border-color:Silver}.figtopbot{border-top:2px solid;border-bottom:2px solid;margin-top:1em;border-color:Silver}.ullinks{margin-left:0;list-style-type:none}.ulchildlink{margin-top:1em;margin-bottom:1em}.olchildlink{margin-top:1em;margin-bottom:1em;margin-left:1em}.linklist{margin-bottom:1em}.linklistwithchild{margin-left:1.5em;margin-bottom:1em}.sublinklist{margin-left:1.5em;margin-bottom:1em}.relconcepts{margin-left:1cm;margin-top:1em;margin-bottom:1em}.reltasks{margin-left:1cm;margin-top:1em;margin-bottom:1em}.relref{margin-left:1cm;margin-top:1em;margin-bottom:1em}.relinfo{margin-top:1em;margin-bottom:1em}.breadcrumb{font-size:smaller;margin-bottom:1em}.prereq{margin-left:0}.parentlink{margin-top:.6em;margin-bottom:.6em}.nextlink{margin-top:.6em;margin-bottom:.6em}.previouslink{margin-top:.6em;margin-bottom:.6em}.section{margin-top:1em;margin-bottom:1em}.example{margin-top:1em;margin-bottom:1em}table .note{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.note{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.notetitle{font-weight:bold;font-size:11.0pt}.notelisttitle{font-weight:bold}table .tip{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.tip{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.tiptitle{font-weight:bold;font-size:11.0pt}table .fastpath{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.fastpath{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.fastpathtitle{font-weight:bold;font-size:11.0pt}table .important{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.important{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.importanttitle{font-weight:bold;font-size:11.0pt}table .remember{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.remember{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.remembertitle{font-weight:bold;font-size:11.0pt}table .restriction{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman";font-style:italic}.restriction{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;border-top:solid .5pt;border-bottom:solid .5pt}.restrictiontitle{font-weight:bold;font-size:11.0pt}table .attention{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.attention{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}.attentiontitle{font-weight:bold}table .danger{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.dangertitle{font-weight:bold}.danger{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .notice{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.noticetitle{font-weight:bold}.notice{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .warning{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}.warningtitle{font-weight:bold}.warning{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}table .caution{margin-top:1em;margin-bottom:1em;border:0;font-size:10.0pt;font-family:"Times New Roman"}table caption{margin-top:8pt;text-align:left;font-weight:bold}.tablenoborder{margin-top:8pt}.cautiontitle{font-weight:bold}.caution{margin-top:1em;margin-bottom:1em;border:0;border-top:solid .5pt;border-bottom:solid .5pt}ul.simple{list-style-type:none}li ul{margin-top:.6em}li{margin-top:.6em;margin-bottom:.6em}.note li{margin-top:.2em;margin-bottom:.2em}.tip li{margin-top:.2em;margin-bottom:.2em}.danger li{margin-top:.2em;margin-bottom:.2em}.warning li{margin-top:.2em;margin-bottom:.2em}.notice li{margin-top:.2em;margin-bottom:.2em}.caution li{margin-top:.2em;margin-bottom:.2em}.attention li{margin-top:.2em;margin-bottom:.2em}table li{margin-top:.2em;margin-bottom:.2em}.firstcol{font-weight:bold}th{background-color:#cfcfcf}.bold{font-weight:bold}.boldItalic{font-weight:bold;font-style:italic}.italic{font-style:italic}.underlined{text-decoration:underline}.var{font-style:italic}.shortcut{text-decoration:underline}.dlterm{font-weight:bold}dd{margin-top:.5em;margin-bottom:.5em}.dltermexpand{font-weight:bold;margin-top:1em}*[compact="yes"]>li{margin-top:0}*[compact="no"]>li{margin-top:.53em}.liexpand{margin-top:1em;margin-bottom:1em}.sliexpand{margin-top:1em;margin-bottom:1em}.dlexpand{margin-top:1em;margin-bottom:1em}.ddexpand{margin-top:1em;margin-bottom:1em}.stepexpand{margin-top:1em;margin-bottom:1em}.substepexpand{margin-top:1em;margin-bottom:1em}table{margin-top:8pt;margin-bottom:10.0pt;width:100%}thead{font-size:10.0pt;font-family:"Book Antiqua";font-weight:bold}tbody{font-size:11.0pt}ol{margin-top:1em;margin-bottom:1em;margin-left:1.7em;-webkit-padding-start:0}ul{margin-top:1em;margin-bottom:1em;margin-left:1.2em;-webkit-padding-start:0}ol ul{list-style:disc}ul ul{list-style:square}ol ol{list-style-type:lower-alpha}table ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}table ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ul{margin-top:.4em;margin-bottom:.4em;list-style:square}table ol ol{margin-top:.4em;margin-bottom:.4em;list-style:lower-alpha}table ol ul{margin-top:.4em;margin-bottom:.4em;list-style:disc}table ul ol{margin-top:.4em;margin-bottom:.4em;list-style:decimal}.substepthirdol{list-style-type:lower-roman}div.imageleft{text-align:left}div.imagecenter{text-align:center}div.imageright{text-align:right}div.imagejustify{text-align:justify}div.noblankline{text-align:center}div.noblankline img{margin-top:0}pre{font-size:10.0pt;border-width:2px;padding:2px;margin-top:5px;margin-bottom:5px;white-space:pre-wrap;white-space:-moz-pre-wrap;white-space:-pre-wrap;white-space:-o-pre-wrap;word-wrap:break-word}pre.screen{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;white-space:pre}pre.codeblock{margin-top:2px;margin-bottom:2px;padding:1.5px 1.5px 0 1.5px;border:0;white-space:pre}.dropdownAllButtonexpand{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4}.dropdownAllButtoncollapse{cursor:pointer;background-repeat:no-repeat;background-position:0 4px;padding-left:15px;background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;text-decoration:underline;color:#007af4}.dropdowntitle{background-repeat:no-repeat;background-position:0 4px;padding-left:15px;cursor:pointer;text-decoration:underline;color:#007af4}.dropdownexpand .dropdowntitle{background-image:url(icon-arrowdn.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapse .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.dropdowncollapsetable .dropdowntitle{background-image:url(icon-arrowrt.gif);text-decoration:underline;color:#007af4;margin:0 0 8px 0}.prefacesectiontitle1{margin-top:10.0pt;margin-right:0;margin-bottom:8.0pt;margin-left:-1cm;text-indent:0;font-size:18.0pt;font-family:"Book Antiqua";font-weight:bold;overflow:auto}.termcolor{color:blue;cursor:pointer}#dhtmlgoodies_tooltip{background-color:#f0f0d2;border:1px solid #000;position:absolute;display:none;z-index:20000;padding:2px;font-size:.9em;-moz-border-radius:6px;font-family:"Trebuchet MS","Lucida Sans Unicode",Arial,sans-serif}#dhtmlgoodies_tooltipShadow{position:absolute;background-color:#555;display:none;z-index:10000;opacity:.7;filter:alpha(opacity=70);-khtml-opacity:.7;-moz-opacity:.7;-moz-border-radius:6px}.freeze{position:fixed;_position:absolute;_top:expression(eval(document.documentElement.scrollTop));left:10;top:0}.hrcopyright{color:#3f4e5d;margin-top:18pt;margin-left:-1cm}.hwcopyright{text-align:center;font-family:Arial;margin-left:-1cm} \ No newline at end of file diff --git a/umn/source/_static/images/commonrtl.css b/umn/source/_static/images/commonrtl.css new file mode 100644 index 0000000..947a9a0 --- /dev/null +++ b/umn/source/_static/images/commonrtl.css @@ -0,0 +1,2 @@ +.msgph{font-family:Courier New}.unresolved{background-color:#87ceeb}.noTemplate{background-color:#ff0}.base{background-color:#fff}/*! Add space for top level topics */.nested0,.p{margin-top:1em}/*! div with class=p is used for paragraphs that contain blocks, to keep the XHTML valid *//*! Default of italics to set apart figure captions */.figcap,.italic,.var{font-style:italic}.figdesc{font-style:normal}/*! Use @frame to create frames on figures */.figborder{padding-left:3px;padding-right:3px;margin-top:1em;border:2px solid Silver}.figsides{margin-top:1em;padding-left:3px;padding-right:3px;border-left:2px solid Silver;border-right:2px solid Silver}.figtop{border-top:2px solid Silver;margin-top:1em}.figbottom{border-bottom:2px solid Silver}.figtopbot{border-top:2px solid Silver;border-bottom:2px solid Silver;margin-top:1em}/*! Most link groups are created with
. Ensure they have space before and after. */.ullinks,ul.simple{list-style-type:none}.attention,.danger,.ddexpand,.dlexpand,.example,.fastpath,.important,.liexpand,.linklist,.note,.notice,.olchildlink,.relconcepts,.relinfo,.relref,.reltasks,.remember,.restriction,.section,.sliexpand,.stepexpand,.substepexpand,.tip,.ulchildlink,.warning{margin-top:1em;margin-bottom:1em}.linklistwithchild,.sublinklist{margin-top:1em;margin-right:1.5em;margin-bottom:1em}.breadcrumb{font-size:smaller;margin-bottom:1em}.prereq{margin-right:20px}/*! Set heading sizes, getting smaller for deeper nesting */.topictitle1{font-size:1.34em;margin-top:0;margin-bottom:.1em}.topictitle2,.topictitle3,.topictitle4,.topictitle5,.topictitle6,.sectiontitle{font-size:1.17em}.topictitle2{margin-top:1pc;margin-bottom:.45em}.topictitle3{margin-top:1pc;margin-bottom:.17em;font-weight:700}.topictitle4{margin-top:.83em;font-weight:700}.topictitle5{font-weight:700}.topictitle6{font-style:italic}.sectiontitle{margin-top:1em;margin-bottom:0;color:#000;font-weight:700}/*! All note formats have the same default presentation */.attentiontitle,.bold,.cautiontitle,.dangertitle,.dlterm,.fastpathtitle,.firstcol,.importanttitle,.notelisttitle,.notetitle,.noticetitle,.parmname,.remembertitle,.restrictiontitle,.tiptitle,.uicontrol,.warningtitle{font-weight:700}.caution{font-weight:700;margin-bottom:1em}/*! Simple lists do not get a bullet *//*! Used on the first column of a table, when rowheader="firstcol" is used *//*! Various basic phrase styles */.boldItalic{font-weight:700;font-style:italic}.shortcut,.underlined{text-decoration:underline}/*! 2008-10-27 keyword采用跟随上下文的样式 +*//*! Default of bold for definition list terms *//*! Use CSS to expand lists with @compact="no" */.dltermexpand{font-weight:700;margin-top:1em}[compact="yes"]>li{margin-top:0}[compact="no"]>li{margin-top:.53em}/*! Align images based on @align on topic/image */div.imageleft,.text-align-left{text-align:left}div.imagecenter,.text-align-center{text-align:center}div.imageright,.text-align-right{text-align:right}div.imagejustify,.text-align-justify{text-align:justify}.cellrowborder{border-right:0;border-top:0;border-left:1px solid;border-bottom:1px solid}.row-nocellborder{border-left:hidden;border-right:0;border-top:0;border-bottom:1px solid}.cell-norowborder{border-top:0;border-bottom:hidden;border-right:0;border-left:1px solid}.nocellnorowborder{border:0;border-left:hidden;border-bottom:hidden}pre.codeblock,pre.screen{padding:5px;border:outset;background-color:#ccc;margin-top:2px;margin-bottom:2px;white-space:pre} \ No newline at end of file diff --git a/umn/source/_static/images/danger_3.0-en-us.png b/umn/source/_static/images/danger_3.0-en-us.png new file mode 100644 index 0000000..47a9c72 Binary files /dev/null and b/umn/source/_static/images/danger_3.0-en-us.png differ diff --git a/umn/source/_static/images/delta.gif b/umn/source/_static/images/delta.gif new file mode 100644 index 0000000..0d1b1f6 Binary files /dev/null and b/umn/source/_static/images/delta.gif differ diff --git a/umn/source/_static/images/deltaend.gif b/umn/source/_static/images/deltaend.gif new file mode 100644 index 0000000..cc7da0f Binary files /dev/null and b/umn/source/_static/images/deltaend.gif differ diff --git a/umn/source/_static/images/en-us_image_0000001110760906.png b/umn/source/_static/images/en-us_image_0000001110760906.png new file mode 100644 index 0000000..a64ad56 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760906.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760908.png b/umn/source/_static/images/en-us_image_0000001110760908.png new file mode 100644 index 0000000..4e6b9c7 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760908.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760910.png b/umn/source/_static/images/en-us_image_0000001110760910.png new file mode 100644 index 0000000..8215367 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760910.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760912.png b/umn/source/_static/images/en-us_image_0000001110760912.png new file mode 100644 index 0000000..4eb5325 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760912.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760934.png b/umn/source/_static/images/en-us_image_0000001110760934.png new file mode 100644 index 0000000..30a7499 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760934.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760936.png b/umn/source/_static/images/en-us_image_0000001110760936.png new file mode 100644 index 0000000..75490ee Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760936.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760956.png b/umn/source/_static/images/en-us_image_0000001110760956.png new file mode 100644 index 0000000..f3e351f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760956.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760968.png b/umn/source/_static/images/en-us_image_0000001110760968.png new file mode 100644 index 0000000..78e4f09 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760968.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110760970.png b/umn/source/_static/images/en-us_image_0000001110760970.png new file mode 100644 index 0000000..504bb63 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110760970.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761010.png b/umn/source/_static/images/en-us_image_0000001110761010.png new file mode 100644 index 0000000..14ca594 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761010.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761012.png b/umn/source/_static/images/en-us_image_0000001110761012.png new file mode 100644 index 0000000..9e1c9f7 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761012.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761016.png b/umn/source/_static/images/en-us_image_0000001110761016.png new file mode 100644 index 0000000..ca13257 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761016.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761018.png b/umn/source/_static/images/en-us_image_0000001110761018.png new file mode 100644 index 0000000..f05f690 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761018.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761020.png b/umn/source/_static/images/en-us_image_0000001110761020.png new file mode 100644 index 0000000..0b7e0bf Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761020.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761034.png b/umn/source/_static/images/en-us_image_0000001110761034.png new file mode 100644 index 0000000..301f16d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761034.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761038.png b/umn/source/_static/images/en-us_image_0000001110761038.png new file mode 100644 index 0000000..42b3a9e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761038.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761044.png b/umn/source/_static/images/en-us_image_0000001110761044.png new file mode 100644 index 0000000..ab1271d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761044.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761054.png b/umn/source/_static/images/en-us_image_0000001110761054.png new file mode 100644 index 0000000..3b3371e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761054.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761058.png b/umn/source/_static/images/en-us_image_0000001110761058.png new file mode 100644 index 0000000..76d22b0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761058.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761062.png b/umn/source/_static/images/en-us_image_0000001110761062.png new file mode 100644 index 0000000..ed68c91 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761062.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761072.png b/umn/source/_static/images/en-us_image_0000001110761072.png new file mode 100644 index 0000000..642d449 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761072.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761074.png b/umn/source/_static/images/en-us_image_0000001110761074.png new file mode 100644 index 0000000..56c075f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761074.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761076.png b/umn/source/_static/images/en-us_image_0000001110761076.png new file mode 100644 index 0000000..e12b79e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761076.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761078.png b/umn/source/_static/images/en-us_image_0000001110761078.png new file mode 100644 index 0000000..38ddf54 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761078.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761080.png b/umn/source/_static/images/en-us_image_0000001110761080.png new file mode 100644 index 0000000..56c075f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761080.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761082.png b/umn/source/_static/images/en-us_image_0000001110761082.png new file mode 100644 index 0000000..ac7d756 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761082.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761086.gif b/umn/source/_static/images/en-us_image_0000001110761086.gif new file mode 100644 index 0000000..333527e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761086.gif differ diff --git a/umn/source/_static/images/en-us_image_0000001110761088.png b/umn/source/_static/images/en-us_image_0000001110761088.png new file mode 100644 index 0000000..ad140a5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761088.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761092.png b/umn/source/_static/images/en-us_image_0000001110761092.png new file mode 100644 index 0000000..21bbdd5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761092.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761098.png b/umn/source/_static/images/en-us_image_0000001110761098.png new file mode 100644 index 0000000..b516f88 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761098.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761112.png b/umn/source/_static/images/en-us_image_0000001110761112.png new file mode 100644 index 0000000..9394397 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761112.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761148.png b/umn/source/_static/images/en-us_image_0000001110761148.png new file mode 100644 index 0000000..4e6b9c7 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761148.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761158.png b/umn/source/_static/images/en-us_image_0000001110761158.png new file mode 100644 index 0000000..748f1cc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761158.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110761582.png b/umn/source/_static/images/en-us_image_0000001110761582.png new file mode 100644 index 0000000..0886ce4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110761582.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920802.png b/umn/source/_static/images/en-us_image_0000001110920802.png new file mode 100644 index 0000000..97fd85e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920802.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920824.png b/umn/source/_static/images/en-us_image_0000001110920824.png new file mode 100644 index 0000000..cf66af9 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920824.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920858.png b/umn/source/_static/images/en-us_image_0000001110920858.png new file mode 100644 index 0000000..e5e1698 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920858.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920874.png b/umn/source/_static/images/en-us_image_0000001110920874.png new file mode 100644 index 0000000..ea59a9b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920874.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920880.png b/umn/source/_static/images/en-us_image_0000001110920880.png new file mode 100644 index 0000000..7ae049c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920880.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920910.png b/umn/source/_static/images/en-us_image_0000001110920910.png new file mode 100644 index 0000000..b2c6a8b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920910.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920912.png b/umn/source/_static/images/en-us_image_0000001110920912.png new file mode 100644 index 0000000..e1372b0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920912.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920914.png b/umn/source/_static/images/en-us_image_0000001110920914.png new file mode 100644 index 0000000..ab6b8c0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920914.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920916.png b/umn/source/_static/images/en-us_image_0000001110920916.png new file mode 100644 index 0000000..fdb0e68 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920916.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920918.png b/umn/source/_static/images/en-us_image_0000001110920918.png new file mode 100644 index 0000000..ec22a72 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920918.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920920.png b/umn/source/_static/images/en-us_image_0000001110920920.png new file mode 100644 index 0000000..ffacffc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920920.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920924.png b/umn/source/_static/images/en-us_image_0000001110920924.png new file mode 100644 index 0000000..f34ea83 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920924.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920930.png b/umn/source/_static/images/en-us_image_0000001110920930.png new file mode 100644 index 0000000..80e80a6 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920930.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920934.png b/umn/source/_static/images/en-us_image_0000001110920934.png new file mode 100644 index 0000000..99dc5f0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920934.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920936.png b/umn/source/_static/images/en-us_image_0000001110920936.png new file mode 100644 index 0000000..5e78893 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920936.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920938.png b/umn/source/_static/images/en-us_image_0000001110920938.png new file mode 100644 index 0000000..3bc3395 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920938.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920940.png b/umn/source/_static/images/en-us_image_0000001110920940.png new file mode 100644 index 0000000..6ff6b1a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920940.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920960.png b/umn/source/_static/images/en-us_image_0000001110920960.png new file mode 100644 index 0000000..d2a14cf Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920960.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920962.png b/umn/source/_static/images/en-us_image_0000001110920962.png new file mode 100644 index 0000000..08cfb80 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920962.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920964.png b/umn/source/_static/images/en-us_image_0000001110920964.png new file mode 100644 index 0000000..8c46853 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920964.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920974.png b/umn/source/_static/images/en-us_image_0000001110920974.png new file mode 100644 index 0000000..ac7d756 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920974.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920976.png b/umn/source/_static/images/en-us_image_0000001110920976.png new file mode 100644 index 0000000..c1360a8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920976.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920978.png b/umn/source/_static/images/en-us_image_0000001110920978.png new file mode 100644 index 0000000..c74c1dc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920978.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920980.png b/umn/source/_static/images/en-us_image_0000001110920980.png new file mode 100644 index 0000000..0698281 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920980.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920982.png b/umn/source/_static/images/en-us_image_0000001110920982.png new file mode 100644 index 0000000..c1360a8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920982.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920984.png b/umn/source/_static/images/en-us_image_0000001110920984.png new file mode 100644 index 0000000..ad140a5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920984.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920986.png b/umn/source/_static/images/en-us_image_0000001110920986.png new file mode 100644 index 0000000..6d31edb Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920986.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920992.png b/umn/source/_static/images/en-us_image_0000001110920992.png new file mode 100644 index 0000000..cb38ef3 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920992.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920994.png b/umn/source/_static/images/en-us_image_0000001110920994.png new file mode 100644 index 0000000..3699a9b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920994.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920996.png b/umn/source/_static/images/en-us_image_0000001110920996.png new file mode 100644 index 0000000..38ddf54 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920996.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110920998.png b/umn/source/_static/images/en-us_image_0000001110920998.png new file mode 100644 index 0000000..b155bec Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110920998.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110921000.png b/umn/source/_static/images/en-us_image_0000001110921000.png new file mode 100644 index 0000000..4f6d3dc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110921000.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110921004.png b/umn/source/_static/images/en-us_image_0000001110921004.png new file mode 100644 index 0000000..a3a91d4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110921004.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110921012.png b/umn/source/_static/images/en-us_image_0000001110921012.png new file mode 100644 index 0000000..953bf37 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110921012.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110921016.png b/umn/source/_static/images/en-us_image_0000001110921016.png new file mode 100644 index 0000000..4f04a9c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110921016.png differ diff --git a/umn/source/_static/images/en-us_image_0000001110921482.png b/umn/source/_static/images/en-us_image_0000001110921482.png new file mode 100644 index 0000000..14dc8ec Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001110921482.png differ diff --git a/umn/source/_static/images/en-us_image_0000001115974588.png b/umn/source/_static/images/en-us_image_0000001115974588.png new file mode 100644 index 0000000..648379c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001115974588.png differ diff --git a/umn/source/_static/images/en-us_image_0000001115974770.png b/umn/source/_static/images/en-us_image_0000001115974770.png new file mode 100644 index 0000000..f3742dd Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001115974770.png differ diff --git a/umn/source/_static/images/en-us_image_0000001115981688.png b/umn/source/_static/images/en-us_image_0000001115981688.png new file mode 100644 index 0000000..5e6bf28 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001115981688.png differ diff --git a/umn/source/_static/images/en-us_image_0000001120894978.png b/umn/source/_static/images/en-us_image_0000001120894978.png new file mode 100644 index 0000000..81c1433 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001120894978.png differ diff --git a/umn/source/_static/images/en-us_image_0000001125463277.png b/umn/source/_static/images/en-us_image_0000001125463277.png new file mode 100644 index 0000000..2cbe301 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001125463277.png differ diff --git a/umn/source/_static/images/en-us_image_0000001135576038.png b/umn/source/_static/images/en-us_image_0000001135576038.png new file mode 100644 index 0000000..92379c3 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001135576038.png differ diff --git a/umn/source/_static/images/en-us_image_0000001135576398.png b/umn/source/_static/images/en-us_image_0000001135576398.png new file mode 100644 index 0000000..0d394c2 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001135576398.png differ diff --git a/umn/source/_static/images/en-us_image_0000001150131972.png b/umn/source/_static/images/en-us_image_0000001150131972.png new file mode 100644 index 0000000..5fac963 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001150131972.png differ diff --git a/umn/source/_static/images/en-us_image_0000001150291788.png b/umn/source/_static/images/en-us_image_0000001150291788.png new file mode 100644 index 0000000..608279a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001150291788.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920767.png b/umn/source/_static/images/en-us_image_0000001156920767.png new file mode 100644 index 0000000..eaba603 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920767.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920769.png b/umn/source/_static/images/en-us_image_0000001156920769.png new file mode 100644 index 0000000..af3da77 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920769.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920823.png b/umn/source/_static/images/en-us_image_0000001156920823.png new file mode 100644 index 0000000..b9e5021 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920823.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920845.png b/umn/source/_static/images/en-us_image_0000001156920845.png new file mode 100644 index 0000000..6791a5b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920845.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920871.png b/umn/source/_static/images/en-us_image_0000001156920871.png new file mode 100644 index 0000000..c6a10e6 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920871.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920885.png b/umn/source/_static/images/en-us_image_0000001156920885.png new file mode 100644 index 0000000..69b153f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920885.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920887.png b/umn/source/_static/images/en-us_image_0000001156920887.png new file mode 100644 index 0000000..92c235b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920887.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920893.png b/umn/source/_static/images/en-us_image_0000001156920893.png new file mode 100644 index 0000000..e2fa235 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920893.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920897.png b/umn/source/_static/images/en-us_image_0000001156920897.png new file mode 100644 index 0000000..afa7e64 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920897.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920899.png b/umn/source/_static/images/en-us_image_0000001156920899.png new file mode 100644 index 0000000..a3eca2a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920899.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920901.png b/umn/source/_static/images/en-us_image_0000001156920901.png new file mode 100644 index 0000000..fe03e68 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920901.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920903.png b/umn/source/_static/images/en-us_image_0000001156920903.png new file mode 100644 index 0000000..44e8779 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920903.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920919.png b/umn/source/_static/images/en-us_image_0000001156920919.png new file mode 100644 index 0000000..5053f3c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920919.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920929.png b/umn/source/_static/images/en-us_image_0000001156920929.png new file mode 100644 index 0000000..28a377c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920929.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920931.png b/umn/source/_static/images/en-us_image_0000001156920931.png new file mode 100644 index 0000000..8eea1bd Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920931.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920933.png b/umn/source/_static/images/en-us_image_0000001156920933.png new file mode 100644 index 0000000..34ec4c8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920933.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920935.png b/umn/source/_static/images/en-us_image_0000001156920935.png new file mode 100644 index 0000000..091cf30 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920935.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920939.png b/umn/source/_static/images/en-us_image_0000001156920939.png new file mode 100644 index 0000000..e153cf5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920939.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920943.png b/umn/source/_static/images/en-us_image_0000001156920943.png new file mode 100644 index 0000000..f15d82b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920943.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920951.png b/umn/source/_static/images/en-us_image_0000001156920951.png new file mode 100644 index 0000000..c74c1dc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920951.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920955.gif b/umn/source/_static/images/en-us_image_0000001156920955.gif new file mode 100644 index 0000000..bd90904 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920955.gif differ diff --git a/umn/source/_static/images/en-us_image_0000001156920959.png b/umn/source/_static/images/en-us_image_0000001156920959.png new file mode 100644 index 0000000..75490ee Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920959.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920961.png b/umn/source/_static/images/en-us_image_0000001156920961.png new file mode 100644 index 0000000..4f6d3dc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920961.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920965.png b/umn/source/_static/images/en-us_image_0000001156920965.png new file mode 100644 index 0000000..61b1073 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920965.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920967.png b/umn/source/_static/images/en-us_image_0000001156920967.png new file mode 100644 index 0000000..a3a91d4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920967.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920969.png b/umn/source/_static/images/en-us_image_0000001156920969.png new file mode 100644 index 0000000..b3c287f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920969.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920971.png b/umn/source/_static/images/en-us_image_0000001156920971.png new file mode 100644 index 0000000..474d16c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920971.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920973.png b/umn/source/_static/images/en-us_image_0000001156920973.png new file mode 100644 index 0000000..21bbdd5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920973.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920981.png b/umn/source/_static/images/en-us_image_0000001156920981.png new file mode 100644 index 0000000..a2c95fc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920981.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920983.png b/umn/source/_static/images/en-us_image_0000001156920983.png new file mode 100644 index 0000000..d6b1e46 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920983.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920985.png b/umn/source/_static/images/en-us_image_0000001156920985.png new file mode 100644 index 0000000..e9711b1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920985.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156920989.png b/umn/source/_static/images/en-us_image_0000001156920989.png new file mode 100644 index 0000000..953bf37 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156920989.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156921011.png b/umn/source/_static/images/en-us_image_0000001156921011.png new file mode 100644 index 0000000..cc4155a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156921011.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156921015.png b/umn/source/_static/images/en-us_image_0000001156921015.png new file mode 100644 index 0000000..206e484 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156921015.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156921017.png b/umn/source/_static/images/en-us_image_0000001156921017.png new file mode 100644 index 0000000..45e1008 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156921017.png differ diff --git a/umn/source/_static/images/en-us_image_0000001156921451.png b/umn/source/_static/images/en-us_image_0000001156921451.png new file mode 100644 index 0000000..d98fad4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001156921451.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080759.png b/umn/source/_static/images/en-us_image_0000001157080759.png new file mode 100644 index 0000000..a8c64fb Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080759.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080805.png b/umn/source/_static/images/en-us_image_0000001157080805.png new file mode 100644 index 0000000..64c1cc0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080805.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080847.png b/umn/source/_static/images/en-us_image_0000001157080847.png new file mode 100644 index 0000000..c1625cf Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080847.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080853.png b/umn/source/_static/images/en-us_image_0000001157080853.png new file mode 100644 index 0000000..c4910df Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080853.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080859.png b/umn/source/_static/images/en-us_image_0000001157080859.png new file mode 100644 index 0000000..819ac91 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080859.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080869.png b/umn/source/_static/images/en-us_image_0000001157080869.png new file mode 100644 index 0000000..3e045af Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080869.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080871.png b/umn/source/_static/images/en-us_image_0000001157080871.png new file mode 100644 index 0000000..0264b30 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080871.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080873.png b/umn/source/_static/images/en-us_image_0000001157080873.png new file mode 100644 index 0000000..572be9b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080873.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080875.png b/umn/source/_static/images/en-us_image_0000001157080875.png new file mode 100644 index 0000000..9e2caee Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080875.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080879.png b/umn/source/_static/images/en-us_image_0000001157080879.png new file mode 100644 index 0000000..80cb452 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080879.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080885.png b/umn/source/_static/images/en-us_image_0000001157080885.png new file mode 100644 index 0000000..8ad3090 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080885.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080893.png b/umn/source/_static/images/en-us_image_0000001157080893.png new file mode 100644 index 0000000..a87a76b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080893.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080895.png b/umn/source/_static/images/en-us_image_0000001157080895.png new file mode 100644 index 0000000..9243e1a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080895.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080897.png b/umn/source/_static/images/en-us_image_0000001157080897.png new file mode 100644 index 0000000..720ee11 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080897.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080899.png b/umn/source/_static/images/en-us_image_0000001157080899.png new file mode 100644 index 0000000..f5dec5d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080899.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080903.png b/umn/source/_static/images/en-us_image_0000001157080903.png new file mode 100644 index 0000000..d2a14cf Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080903.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080905.png b/umn/source/_static/images/en-us_image_0000001157080905.png new file mode 100644 index 0000000..ec0041d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080905.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080907.png b/umn/source/_static/images/en-us_image_0000001157080907.png new file mode 100644 index 0000000..4449333 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080907.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080911.png b/umn/source/_static/images/en-us_image_0000001157080911.png new file mode 100644 index 0000000..8619777 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080911.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080919.png b/umn/source/_static/images/en-us_image_0000001157080919.png new file mode 100644 index 0000000..24a6676 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080919.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080923.png b/umn/source/_static/images/en-us_image_0000001157080923.png new file mode 100644 index 0000000..a6b85c6 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080923.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080931.png b/umn/source/_static/images/en-us_image_0000001157080931.png new file mode 100644 index 0000000..75490ee Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080931.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080933.png b/umn/source/_static/images/en-us_image_0000001157080933.png new file mode 100644 index 0000000..2f901fd Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080933.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080935.png b/umn/source/_static/images/en-us_image_0000001157080935.png new file mode 100644 index 0000000..b1cc374 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080935.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080965.png b/umn/source/_static/images/en-us_image_0000001157080965.png new file mode 100644 index 0000000..4ea6a5e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080965.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157080983.png b/umn/source/_static/images/en-us_image_0000001157080983.png new file mode 100644 index 0000000..23d28a1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157080983.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157081001.png b/umn/source/_static/images/en-us_image_0000001157081001.png new file mode 100644 index 0000000..21bbdd5 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157081001.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157081003.png b/umn/source/_static/images/en-us_image_0000001157081003.png new file mode 100644 index 0000000..c235696 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157081003.png differ diff --git a/umn/source/_static/images/en-us_image_0000001157081267.png b/umn/source/_static/images/en-us_image_0000001157081267.png new file mode 100644 index 0000000..df875d8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001157081267.png differ diff --git a/umn/source/_static/images/en-us_image_0000001160373426.png b/umn/source/_static/images/en-us_image_0000001160373426.png new file mode 100644 index 0000000..f30ecf0 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001160373426.png differ diff --git a/umn/source/_static/images/en-us_image_0000001160533378.png b/umn/source/_static/images/en-us_image_0000001160533378.png new file mode 100644 index 0000000..7373f02 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001160533378.png differ diff --git a/umn/source/_static/images/en-us_image_0000001161784976.png b/umn/source/_static/images/en-us_image_0000001161784976.png new file mode 100644 index 0000000..e5578b4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001161784976.png differ diff --git a/umn/source/_static/images/en-us_image_0000001163728097.png b/umn/source/_static/images/en-us_image_0000001163728097.png new file mode 100644 index 0000000..1ba69de Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001163728097.png differ diff --git a/umn/source/_static/images/en-us_image_0000001167495475.png b/umn/source/_static/images/en-us_image_0000001167495475.png new file mode 100644 index 0000000..81c1433 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001167495475.png differ diff --git a/umn/source/_static/images/en-us_image_0000001181376003.png b/umn/source/_static/images/en-us_image_0000001181376003.png new file mode 100644 index 0000000..44a99a8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001181376003.png differ diff --git a/umn/source/_static/images/en-us_image_0000001181535567.png b/umn/source/_static/images/en-us_image_0000001181535567.png new file mode 100644 index 0000000..9d37c19 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001181535567.png differ diff --git a/umn/source/_static/images/en-us_image_0000001195393079.png b/umn/source/_static/images/en-us_image_0000001195393079.png new file mode 100644 index 0000000..fa8f3e4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001195393079.png differ diff --git a/umn/source/_static/images/en-us_image_0000001196171669.png b/umn/source/_static/images/en-us_image_0000001196171669.png new file mode 100644 index 0000000..ad729f4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001196171669.png differ diff --git a/umn/source/_static/images/en-us_image_0000001205813423.png b/umn/source/_static/images/en-us_image_0000001205813423.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001205813423.png differ diff --git a/umn/source/_static/images/en-us_image_0000001205955477.png b/umn/source/_static/images/en-us_image_0000001205955477.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001205955477.png differ diff --git a/umn/source/_static/images/en-us_image_0000001205974859.png b/umn/source/_static/images/en-us_image_0000001205974859.png new file mode 100644 index 0000000..72f6b62 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001205974859.png differ diff --git a/umn/source/_static/images/en-us_image_0000001206035439.png b/umn/source/_static/images/en-us_image_0000001206035439.png new file mode 100644 index 0000000..1ba69de Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001206035439.png differ diff --git a/umn/source/_static/images/en-us_image_0000001206511791.png b/umn/source/_static/images/en-us_image_0000001206511791.png new file mode 100644 index 0000000..81c1433 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001206511791.png differ diff --git a/umn/source/_static/images/en-us_image_0000001214778791.png b/umn/source/_static/images/en-us_image_0000001214778791.png new file mode 100644 index 0000000..f0b9571 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0000001214778791.png differ diff --git a/umn/source/_static/images/en-us_image_0114721717.jpg b/umn/source/_static/images/en-us_image_0114721717.jpg new file mode 100644 index 0000000..2738de3 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0114721717.jpg differ diff --git a/umn/source/_static/images/en-us_image_0114944782.jpg b/umn/source/_static/images/en-us_image_0114944782.jpg new file mode 100644 index 0000000..f673198 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0114944782.jpg differ diff --git a/umn/source/_static/images/en-us_image_0129028346.png b/umn/source/_static/images/en-us_image_0129028346.png new file mode 100644 index 0000000..fd0403b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0129028346.png differ diff --git a/umn/source/_static/images/en-us_image_0152727234.png b/umn/source/_static/images/en-us_image_0152727234.png new file mode 100644 index 0000000..35e2831 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0152727234.png differ diff --git a/umn/source/_static/images/en-us_image_0152872722.png b/umn/source/_static/images/en-us_image_0152872722.png new file mode 100644 index 0000000..7edd447 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0152872722.png differ diff --git a/umn/source/_static/images/en-us_image_0153115564.png b/umn/source/_static/images/en-us_image_0153115564.png new file mode 100644 index 0000000..ae6bdb3 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0153115564.png differ diff --git a/umn/source/_static/images/en-us_image_0153115565.png b/umn/source/_static/images/en-us_image_0153115565.png new file mode 100644 index 0000000..6716357 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0153115565.png differ diff --git a/umn/source/_static/images/en-us_image_0166358924.png b/umn/source/_static/images/en-us_image_0166358924.png new file mode 100644 index 0000000..f30233e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0166358924.png differ diff --git a/umn/source/_static/images/en-us_image_0166358926.png b/umn/source/_static/images/en-us_image_0166358926.png new file mode 100644 index 0000000..01221d1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0166358926.png differ diff --git a/umn/source/_static/images/en-us_image_0166358967.png b/umn/source/_static/images/en-us_image_0166358967.png new file mode 100644 index 0000000..ecbbee2 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0166358967.png differ diff --git a/umn/source/_static/images/en-us_image_0166358972.png b/umn/source/_static/images/en-us_image_0166358972.png new file mode 100644 index 0000000..1e84539 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0166358972.png differ diff --git a/umn/source/_static/images/en-us_image_0166358975.png b/umn/source/_static/images/en-us_image_0166358975.png new file mode 100644 index 0000000..0069e29 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0166358975.png differ diff --git a/umn/source/_static/images/en-us_image_0167649598.png b/umn/source/_static/images/en-us_image_0167649598.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167649598.png differ diff --git a/umn/source/_static/images/en-us_image_0167652140.png b/umn/source/_static/images/en-us_image_0167652140.png new file mode 100644 index 0000000..ecbbee2 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167652140.png differ diff --git a/umn/source/_static/images/en-us_image_0167652142.png b/umn/source/_static/images/en-us_image_0167652142.png new file mode 100644 index 0000000..1e84539 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167652142.png differ diff --git a/umn/source/_static/images/en-us_image_0167652143.png b/umn/source/_static/images/en-us_image_0167652143.png new file mode 100644 index 0000000..0069e29 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167652143.png differ diff --git a/umn/source/_static/images/en-us_image_0167655332.png b/umn/source/_static/images/en-us_image_0167655332.png new file mode 100644 index 0000000..f30233e Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167655332.png differ diff --git a/umn/source/_static/images/en-us_image_0167655334.png b/umn/source/_static/images/en-us_image_0167655334.png new file mode 100644 index 0000000..01221d1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0167655334.png differ diff --git a/umn/source/_static/images/en-us_image_0168438373.jpg b/umn/source/_static/images/en-us_image_0168438373.jpg new file mode 100644 index 0000000..b08e262 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0168438373.jpg differ diff --git a/umn/source/_static/images/en-us_image_0168438378.jpg b/umn/source/_static/images/en-us_image_0168438378.jpg new file mode 100644 index 0000000..7e420ae Binary files /dev/null and b/umn/source/_static/images/en-us_image_0168438378.jpg differ diff --git a/umn/source/_static/images/en-us_image_0168612036.jpg b/umn/source/_static/images/en-us_image_0168612036.jpg new file mode 100644 index 0000000..7c4ec3b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0168612036.jpg differ diff --git a/umn/source/_static/images/en-us_image_0169513446.png b/umn/source/_static/images/en-us_image_0169513446.png new file mode 100644 index 0000000..1ba69de Binary files /dev/null and b/umn/source/_static/images/en-us_image_0169513446.png differ diff --git a/umn/source/_static/images/en-us_image_0171245428.jpg b/umn/source/_static/images/en-us_image_0171245428.jpg new file mode 100644 index 0000000..3233b97 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171245428.jpg differ diff --git a/umn/source/_static/images/en-us_image_0171245430.jpg b/umn/source/_static/images/en-us_image_0171245430.jpg new file mode 100644 index 0000000..efabdf3 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171245430.jpg differ diff --git a/umn/source/_static/images/en-us_image_0171245432.jpg b/umn/source/_static/images/en-us_image_0171245432.jpg new file mode 100644 index 0000000..1bafc03 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171245432.jpg differ diff --git a/umn/source/_static/images/en-us_image_0171245434.jpg b/umn/source/_static/images/en-us_image_0171245434.jpg new file mode 100644 index 0000000..27662ad Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171245434.jpg differ diff --git a/umn/source/_static/images/en-us_image_0171245435.jpg b/umn/source/_static/images/en-us_image_0171245435.jpg new file mode 100644 index 0000000..b7ece11 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171245435.jpg differ diff --git a/umn/source/_static/images/en-us_image_0171437555.png b/umn/source/_static/images/en-us_image_0171437555.png new file mode 100644 index 0000000..92f4549 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171437555.png differ diff --git a/umn/source/_static/images/en-us_image_0171437556.png b/umn/source/_static/images/en-us_image_0171437556.png new file mode 100644 index 0000000..9f8d0cc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0171437556.png differ diff --git a/umn/source/_static/images/en-us_image_0174899056.jpg b/umn/source/_static/images/en-us_image_0174899056.jpg new file mode 100644 index 0000000..5f12e41 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0174899056.jpg differ diff --git a/umn/source/_static/images/en-us_image_0174914269.jpg b/umn/source/_static/images/en-us_image_0174914269.jpg new file mode 100644 index 0000000..97bb8c9 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0174914269.jpg differ diff --git a/umn/source/_static/images/en-us_image_0184026531.png b/umn/source/_static/images/en-us_image_0184026531.png new file mode 100644 index 0000000..1303a51 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0184026531.png differ diff --git a/umn/source/_static/images/en-us_image_0198606126.png b/umn/source/_static/images/en-us_image_0198606126.png new file mode 100644 index 0000000..9d4444b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198606126.png differ diff --git a/umn/source/_static/images/en-us_image_0198606821.png b/umn/source/_static/images/en-us_image_0198606821.png new file mode 100644 index 0000000..585f244 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198606821.png differ diff --git a/umn/source/_static/images/en-us_image_0198606826.png b/umn/source/_static/images/en-us_image_0198606826.png new file mode 100644 index 0000000..a4438f1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198606826.png differ diff --git a/umn/source/_static/images/en-us_image_0198606850.png b/umn/source/_static/images/en-us_image_0198606850.png new file mode 100644 index 0000000..37bd295 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198606850.png differ diff --git a/umn/source/_static/images/en-us_image_0198607819.png b/umn/source/_static/images/en-us_image_0198607819.png new file mode 100644 index 0000000..1315c0c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198607819.png differ diff --git a/umn/source/_static/images/en-us_image_0198607824.png b/umn/source/_static/images/en-us_image_0198607824.png new file mode 100644 index 0000000..1315c0c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198607824.png differ diff --git a/umn/source/_static/images/en-us_image_0198607873.png b/umn/source/_static/images/en-us_image_0198607873.png new file mode 100644 index 0000000..3ae76f8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198607873.png differ diff --git a/umn/source/_static/images/en-us_image_0198607881.png b/umn/source/_static/images/en-us_image_0198607881.png new file mode 100644 index 0000000..3ae76f8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0198607881.png differ diff --git a/umn/source/_static/images/en-us_image_0202102748.png b/umn/source/_static/images/en-us_image_0202102748.png new file mode 100644 index 0000000..52797ef Binary files /dev/null and b/umn/source/_static/images/en-us_image_0202102748.png differ diff --git a/umn/source/_static/images/en-us_image_0202311381.png b/umn/source/_static/images/en-us_image_0202311381.png new file mode 100644 index 0000000..4ba757a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0202311381.png differ diff --git a/umn/source/_static/images/en-us_image_0207374995.jpg b/umn/source/_static/images/en-us_image_0207374995.jpg new file mode 100644 index 0000000..d3d43b4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0207374995.jpg differ diff --git a/umn/source/_static/images/en-us_image_0207375188.jpg b/umn/source/_static/images/en-us_image_0207375188.jpg new file mode 100644 index 0000000..d3d43b4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0207375188.jpg differ diff --git a/umn/source/_static/images/en-us_image_0238256358.jpg b/umn/source/_static/images/en-us_image_0238256358.jpg new file mode 100644 index 0000000..b9febcc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238256358.jpg differ diff --git a/umn/source/_static/images/en-us_image_0238395032.png b/umn/source/_static/images/en-us_image_0238395032.png new file mode 100644 index 0000000..688d92d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238395032.png differ diff --git a/umn/source/_static/images/en-us_image_0238395033.png b/umn/source/_static/images/en-us_image_0238395033.png new file mode 100644 index 0000000..46eaddb Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238395033.png differ diff --git a/umn/source/_static/images/en-us_image_0238408792.png b/umn/source/_static/images/en-us_image_0238408792.png new file mode 100644 index 0000000..f1403c1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238408792.png differ diff --git a/umn/source/_static/images/en-us_image_0238408794.png b/umn/source/_static/images/en-us_image_0238408794.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238408794.png differ diff --git a/umn/source/_static/images/en-us_image_0238446387.png b/umn/source/_static/images/en-us_image_0238446387.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238446387.png differ diff --git a/umn/source/_static/images/en-us_image_0238446941.png b/umn/source/_static/images/en-us_image_0238446941.png new file mode 100644 index 0000000..2af028b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238446941.png differ diff --git a/umn/source/_static/images/en-us_image_0238447292.png b/umn/source/_static/images/en-us_image_0238447292.png new file mode 100644 index 0000000..ff1296a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0238447292.png differ diff --git a/umn/source/_static/images/en-us_image_0241225827.png b/umn/source/_static/images/en-us_image_0241225827.png new file mode 100644 index 0000000..f1403c1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0241225827.png differ diff --git a/umn/source/_static/images/en-us_image_0241356603.png b/umn/source/_static/images/en-us_image_0241356603.png new file mode 100644 index 0000000..f1403c1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0241356603.png differ diff --git a/umn/source/_static/images/en-us_image_0267429969.png b/umn/source/_static/images/en-us_image_0267429969.png new file mode 100644 index 0000000..47b2135 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267429969.png differ diff --git a/umn/source/_static/images/en-us_image_0267431325.png b/umn/source/_static/images/en-us_image_0267431325.png new file mode 100644 index 0000000..784ee7a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267431325.png differ diff --git a/umn/source/_static/images/en-us_image_0267432483.png b/umn/source/_static/images/en-us_image_0267432483.png new file mode 100644 index 0000000..04b31be Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267432483.png differ diff --git a/umn/source/_static/images/en-us_image_0267434399.png b/umn/source/_static/images/en-us_image_0267434399.png new file mode 100644 index 0000000..3b45ecc Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267434399.png differ diff --git a/umn/source/_static/images/en-us_image_0267440227.png b/umn/source/_static/images/en-us_image_0267440227.png new file mode 100644 index 0000000..73cec8c Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267440227.png differ diff --git a/umn/source/_static/images/en-us_image_0267442311.png b/umn/source/_static/images/en-us_image_0267442311.png new file mode 100644 index 0000000..30edeb1 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267442311.png differ diff --git a/umn/source/_static/images/en-us_image_0267446611.png b/umn/source/_static/images/en-us_image_0267446611.png new file mode 100644 index 0000000..cfaeed9 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0267446611.png differ diff --git a/umn/source/_static/images/en-us_image_0275513364.png b/umn/source/_static/images/en-us_image_0275513364.png new file mode 100644 index 0000000..1909444 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275513364.png differ diff --git a/umn/source/_static/images/en-us_image_0275816100.jpg b/umn/source/_static/images/en-us_image_0275816100.jpg new file mode 100644 index 0000000..983795a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275816100.jpg differ diff --git a/umn/source/_static/images/en-us_image_0275818423.jpg b/umn/source/_static/images/en-us_image_0275818423.jpg new file mode 100644 index 0000000..2d18290 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275818423.jpg differ diff --git a/umn/source/_static/images/en-us_image_0275818458.jpg b/umn/source/_static/images/en-us_image_0275818458.jpg new file mode 100644 index 0000000..925f4d2 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275818458.jpg differ diff --git a/umn/source/_static/images/en-us_image_0275818808.jpg b/umn/source/_static/images/en-us_image_0275818808.jpg new file mode 100644 index 0000000..518ec98 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275818808.jpg differ diff --git a/umn/source/_static/images/en-us_image_0275826373.jpg b/umn/source/_static/images/en-us_image_0275826373.jpg new file mode 100644 index 0000000..8fefa02 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275826373.jpg differ diff --git a/umn/source/_static/images/en-us_image_0275826374.jpg b/umn/source/_static/images/en-us_image_0275826374.jpg new file mode 100644 index 0000000..4d59421 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0275826374.jpg differ diff --git a/umn/source/_static/images/en-us_image_0276143526.png b/umn/source/_static/images/en-us_image_0276143526.png new file mode 100644 index 0000000..65d58fa Binary files /dev/null and b/umn/source/_static/images/en-us_image_0276143526.png differ diff --git a/umn/source/_static/images/en-us_image_0276220702.png b/umn/source/_static/images/en-us_image_0276220702.png new file mode 100644 index 0000000..ae2f170 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0276220702.png differ diff --git a/umn/source/_static/images/en-us_image_0276223899.png b/umn/source/_static/images/en-us_image_0276223899.png new file mode 100644 index 0000000..4ee7c5d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0276223899.png differ diff --git a/umn/source/_static/images/en-us_image_0276225173.png b/umn/source/_static/images/en-us_image_0276225173.png new file mode 100644 index 0000000..dcde013 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0276225173.png differ diff --git a/umn/source/_static/images/en-us_image_0277560434.png b/umn/source/_static/images/en-us_image_0277560434.png new file mode 100644 index 0000000..d9ed988 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0277560434.png differ diff --git a/umn/source/_static/images/en-us_image_0277560435.png b/umn/source/_static/images/en-us_image_0277560435.png new file mode 100644 index 0000000..1d4ddc4 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0277560435.png differ diff --git a/umn/source/_static/images/en-us_image_0277560436.png b/umn/source/_static/images/en-us_image_0277560436.png new file mode 100644 index 0000000..1d2ac6f Binary files /dev/null and b/umn/source/_static/images/en-us_image_0277560436.png differ diff --git a/umn/source/_static/images/en-us_image_0277560437.png b/umn/source/_static/images/en-us_image_0277560437.png new file mode 100644 index 0000000..989fc4b Binary files /dev/null and b/umn/source/_static/images/en-us_image_0277560437.png differ diff --git a/umn/source/_static/images/en-us_image_0280169359.png b/umn/source/_static/images/en-us_image_0280169359.png new file mode 100644 index 0000000..72628c7 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0280169359.png differ diff --git a/umn/source/_static/images/en-us_image_0280246566.png b/umn/source/_static/images/en-us_image_0280246566.png new file mode 100644 index 0000000..37e9ec8 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0280246566.png differ diff --git a/umn/source/_static/images/en-us_image_0280246602.png b/umn/source/_static/images/en-us_image_0280246602.png new file mode 100644 index 0000000..a8d38ba Binary files /dev/null and b/umn/source/_static/images/en-us_image_0280246602.png differ diff --git a/umn/source/_static/images/en-us_image_0291936910.png b/umn/source/_static/images/en-us_image_0291936910.png new file mode 100644 index 0000000..5cec7d6 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0291936910.png differ diff --git a/umn/source/_static/images/en-us_image_0291937212.png b/umn/source/_static/images/en-us_image_0291937212.png new file mode 100644 index 0000000..af4569a Binary files /dev/null and b/umn/source/_static/images/en-us_image_0291937212.png differ diff --git a/umn/source/_static/images/en-us_image_0291937584.png b/umn/source/_static/images/en-us_image_0291937584.png new file mode 100644 index 0000000..ff93e0d Binary files /dev/null and b/umn/source/_static/images/en-us_image_0291937584.png differ diff --git a/umn/source/_static/images/en-us_image_0291965517.png b/umn/source/_static/images/en-us_image_0291965517.png new file mode 100644 index 0000000..e94a637 Binary files /dev/null and b/umn/source/_static/images/en-us_image_0291965517.png differ diff --git a/umn/source/_static/images/icon-arrowdn.gif b/umn/source/_static/images/icon-arrowdn.gif new file mode 100644 index 0000000..84eec9b Binary files /dev/null and b/umn/source/_static/images/icon-arrowdn.gif differ diff --git a/umn/source/_static/images/icon-arrowrt.gif b/umn/source/_static/images/icon-arrowrt.gif new file mode 100644 index 0000000..39583d1 Binary files /dev/null and b/umn/source/_static/images/icon-arrowrt.gif differ diff --git a/umn/source/_static/images/icon-caution.gif b/umn/source/_static/images/icon-caution.gif new file mode 100644 index 0000000..079c79b Binary files /dev/null and b/umn/source/_static/images/icon-caution.gif differ diff --git a/umn/source/_static/images/icon-danger.gif b/umn/source/_static/images/icon-danger.gif new file mode 100644 index 0000000..079c79b Binary files /dev/null and b/umn/source/_static/images/icon-danger.gif differ diff --git a/umn/source/_static/images/icon-huawei.gif b/umn/source/_static/images/icon-huawei.gif new file mode 100644 index 0000000..a31d60f Binary files /dev/null and b/umn/source/_static/images/icon-huawei.gif differ diff --git a/umn/source/_static/images/icon-note.gif b/umn/source/_static/images/icon-note.gif new file mode 100644 index 0000000..31be2b0 Binary files /dev/null and b/umn/source/_static/images/icon-note.gif differ diff --git a/umn/source/_static/images/icon-notice.gif b/umn/source/_static/images/icon-notice.gif new file mode 100644 index 0000000..4090706 Binary files /dev/null and b/umn/source/_static/images/icon-notice.gif differ diff --git a/umn/source/_static/images/icon-tip.gif b/umn/source/_static/images/icon-tip.gif new file mode 100644 index 0000000..c47bae0 Binary files /dev/null and b/umn/source/_static/images/icon-tip.gif differ diff --git a/umn/source/_static/images/icon-warning.gif b/umn/source/_static/images/icon-warning.gif new file mode 100644 index 0000000..079c79b Binary files /dev/null and b/umn/source/_static/images/icon-warning.gif differ diff --git a/umn/source/_static/images/note_3.0-en-us.png b/umn/source/_static/images/note_3.0-en-us.png new file mode 100644 index 0000000..57a0e1f Binary files /dev/null and b/umn/source/_static/images/note_3.0-en-us.png differ diff --git a/umn/source/_static/images/notice_3.0-en-us.png b/umn/source/_static/images/notice_3.0-en-us.png new file mode 100644 index 0000000..fa4b649 Binary files /dev/null and b/umn/source/_static/images/notice_3.0-en-us.png differ diff --git a/umn/source/_static/images/popup.js b/umn/source/_static/images/popup.js new file mode 100644 index 0000000..5305962 --- /dev/null +++ b/umn/source/_static/images/popup.js @@ -0,0 +1 @@ +var i=0;var dhtmlgoodies_tooltipFlag=false;var dhtmlgoodies_tooltip="";var dhtmlgoodies_tooltipShadow="";var dhtmlgoodies_shadowSize=3;var dhtmlgoodies_tooltipMaxWidth=500;var dhtmlgoodies_tooltipMinWidth=100;var dhtmlgoodies_iframe=false;var timeId;var clickFlag=false;var tooltip_is_msie=(navigator.userAgent.indexOf("MSIE")>=0&&navigator.userAgent.indexOf("opera")==-1&&document.all)?true:false;var xPos;var yPos;window.document.onmousemove=function(a){a=a||window.event;if(a.pageX){xPos=a.pageX;yPos=a.pageY}else{if(document.body!=null){xPos=a.clientX+document.body.scrollLeft-document.body.clientLeft;yPos=a.clientY+document.body.scrollTop-document.body.clientTop}}};function showTooltip(tooltipTxt){if(document.body==null){return}if(i==0){return}clickFlag=true;var text=eval("jsonData."+tooltipTxt);var bodyWidth=Math.max(document.body.clientWidth,document.documentElement.clientWidth)-20;if(!dhtmlgoodies_tooltipFlag){dhtmlgoodies_tooltip=document.createElement("DIV");dhtmlgoodies_tooltip.id="dhtmlgoodies_tooltip";dhtmlgoodies_tooltipShadow=document.createElement("DIV");dhtmlgoodies_tooltipShadow.id="dhtmlgoodies_tooltipShadow";document.body.appendChild(dhtmlgoodies_tooltip);document.body.appendChild(dhtmlgoodies_tooltipShadow);if(tooltip_is_msie){dhtmlgoodies_iframe=document.createElement("IFRAME");dhtmlgoodies_iframe.frameborder="5";dhtmlgoodies_iframe.style.backgroundColor="#FFFFFF";dhtmlgoodies_iframe.src="#";dhtmlgoodies_iframe.style.zIndex=100;dhtmlgoodies_iframe.style.position="absolute";document.body.appendChild(dhtmlgoodies_iframe)}}dhtmlgoodies_tooltip.style.display="block";dhtmlgoodies_tooltipShadow.style.display="block";if(tooltip_is_msie){dhtmlgoodies_iframe.style.display="block"}var st=Math.max(document.body.scrollTop,document.documentElement.scrollTop);if(navigator.userAgent.toLowerCase().indexOf("safari")>=0){st=0}var leftPos=xPos+10;dhtmlgoodies_tooltip.style.width=null;dhtmlgoodies_tooltip.innerHTML=text;dhtmlgoodies_tooltip.style.left=leftPos+"px";if(tooltip_is_msie){dhtmlgoodies_tooltip.style.top=yPos+20+st+"px"}else{dhtmlgoodies_tooltip.style.top=yPos+20+"px"}dhtmlgoodies_tooltipShadow.style.left=leftPos+dhtmlgoodies_shadowSize+"px";if(tooltip_is_msie){dhtmlgoodies_tooltipShadow.style.top=yPos+20+st+dhtmlgoodies_shadowSize+"px"}else{dhtmlgoodies_tooltipShadow.style.top=yPos+20+dhtmlgoodies_shadowSize+"px"}if(dhtmlgoodies_tooltip.offsetWidth>dhtmlgoodies_tooltipMaxWidth){dhtmlgoodies_tooltip.style.width=dhtmlgoodies_tooltipMaxWidth+"px"}var tooltipWidth=dhtmlgoodies_tooltip.offsetWidth;if(tooltipWidthbodyWidth){dhtmlgoodies_tooltip.style.left=(dhtmlgoodies_tooltipShadow.style.left.replace("px","")-((leftPos+tooltipWidth)-bodyWidth))+"px";dhtmlgoodies_tooltipShadow.style.left=(dhtmlgoodies_tooltipShadow.style.left.replace("px","")-((leftPos+tooltipWidth)-bodyWidth)+dhtmlgoodies_shadowSize)+"px"}if(tooltip_is_msie){dhtmlgoodies_iframe.style.left=dhtmlgoodies_tooltip.style.left;dhtmlgoodies_iframe.style.top=dhtmlgoodies_tooltip.style.top;dhtmlgoodies_iframe.style.width=dhtmlgoodies_tooltip.offsetWidth+"px";dhtmlgoodies_iframe.style.height=dhtmlgoodies_tooltip.offsetHeight+"px"}}function hideTooltip(){i=0;clickFlag=false;if(dhtmlgoodies_tooltip!=null&&dhtmlgoodies_tooltip.style!=null){dhtmlgoodies_tooltip.style.display="none";dhtmlgoodies_tooltipShadow.style.display="none";if(tooltip_is_msie){dhtmlgoodies_iframe.style.display="none"}}if(timeId!=null&&timeId!=""){clearTimeout(timeId)}}function showText(a){i=1;timeId=setTimeout(function(){showTooltip(a)},500)}function showText2(a){if(!clickFlag){i=1;showTooltip(a);i=0;if(timeId!=null&&timeId!=""){clearTimeout(timeId)}}}function anchorScroll(b){var d=document.getElementsByName(b);if(d!=null&&d.length>0){var c=d[0];var a=c.getBoundingClientRect().left+(document.body.scrollLeft||(document.documentElement&&document.documentElement.scrollLeft));var e=c.getBoundingClientRect().top+(document.body.scrollTop||(document.documentElement&&document.documentElement.scrollTop));window.scrollTo(a,e-30)}}; \ No newline at end of file diff --git a/umn/source/_static/images/warning_3.0-en-us.png b/umn/source/_static/images/warning_3.0-en-us.png new file mode 100644 index 0000000..def5c35 Binary files /dev/null and b/umn/source/_static/images/warning_3.0-en-us.png differ diff --git a/umn/source/change_history.rst b/umn/source/change_history.rst new file mode 100644 index 0000000..5c7a8f1 --- /dev/null +++ b/umn/source/change_history.rst @@ -0,0 +1,12 @@ +Change History +============== + + + +.. _modelarts040099enustopic0135264638enustopic0135264638table4331195115321: + +=========== =================================== +Released On Description +=========== =================================== +2021-04-30 This is the first official release. +=========== =================================== diff --git a/umn/source/conf.py b/umn/source/conf.py new file mode 100644 index 0000000..61cb331 --- /dev/null +++ b/umn/source/conf.py @@ -0,0 +1,151 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +import os +import sys + +extensions = [ + 'otcdocstheme' +] + + +html_theme = 'otcdocs' +html_theme_options = { +} +otcdocs_auto_name = False +otcdocs_auto_version = False +project = 'ModelArts' +otcdocs_repo_name = 'opentelekomcloud-docs/modelarts' + + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../../')) +sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath('./')) + +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +copyright = u'2022-present, Open Telekom Cloud' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# The reST default role (used for this markup: `text`) to use +# for all documents. +# default_role = None + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# -- Options for man page output ---------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +html_title = "Model Arts UMN" + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'madoc' diff --git a/umn/source/custom_images/creating_and_uploading_a_custom_image.rst b/umn/source/custom_images/creating_and_uploading_a_custom_image.rst new file mode 100644 index 0000000..4ad409b --- /dev/null +++ b/umn/source/custom_images/creating_and_uploading_a_custom_image.rst @@ -0,0 +1,25 @@ +Creating and Uploading a Custom Image +===================================== + +ModelArts allows you to use custom images to create training jobs and import models. Before creating and uploading a custom image, understand the following information: + +- Software Repository for Container (SWR) + + SWR provides easy, secure, and reliable management over Docker container images throughout their lifecycle, facilitating the deployment of containerized applications. You can push, pull, and manage container images through SWR console, SWR APIs, or community Command Line Interface (CLI). + + Obtain the custom images used by ModelArts for model training and import from the SWR service management list. Upload the custom images you create to SWR. + +- Specifications for custom images. For details about how to use a custom image for a training job, see `Specifications for Custom Images Used for Training Jobs <../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. For details about how to use a custom image for model import, see `Specifications for Custom Images Used for Importing Models <../custom_images/for_importing_models/specifications_for_custom_images_used_for_importing_models.html>`__. + +.. _creating-and-uploading-a-custom-image-1: + +Creating and Uploading a Custom Image +------------------------------------- + +#. Purchase a cloud server or use a local host to set up the Docker environment. +#. Obtain the basic image from the local environment. +#. Compile a Dockerfile based on your requirements to build a custom image. For details about how to efficiently compile a Dockerfile, see *SoftWare Repository for Container Best Practices*. + +4. After customizing an image, upload the image to SWR by referring to "Uploading an Image Through a Docker Client" in *Software Repository for Container User Guide*. + + diff --git a/umn/source/custom_images/for_importing_models/importing_a_model_using_a_custom_image.rst b/umn/source/custom_images/for_importing_models/importing_a_model_using_a_custom_image.rst new file mode 100644 index 0000000..e33385c --- /dev/null +++ b/umn/source/custom_images/for_importing_models/importing_a_model_using_a_custom_image.rst @@ -0,0 +1,35 @@ +Importing a Model Using a Custom Image +====================================== + +After creating and uploading a custom image to SWR, you can use the image to import a model and deploy the model as a service on the ModelArts management console. + +Prerequisites +------------- + +- You have created a custom image package based on ModelArts specifications. For details about the specifications you need to comply with when using a custom image to import a model, see `Specifications for Custom Images Used for Importing Models <../../custom_images/for_importing_models/specifications_for_custom_images_used_for_importing_models.html>`__. +- You have uploaded the custom image to SWR. For details, see `Creating and Uploading a Custom Image <../../custom_images/creating_and_uploading_a_custom_image.html#creating-and-uploading-a-custom-image>`__. + +Importing a Model +----------------- + +Set basic parameters for importing a model according to `Importing a Meta Model from a Container Image <../../model_management/importing_a_model/importing_a_meta_model_from_a_container_image.html>`__. When importing a model using a custom image, pay attention to the settings of **Meta Model Source** and **Configuration File**. + +- **Meta Model Source** + + Select **Container Image**. Click |image1| in the edit box of **Container Image Path** to select an image. The system automatically lists all images uploaded to SWR. Select an image based on the site requirements. + +- **Configuration File** + + The model configuration file needs to be compiled independently. For details about how to compile the model configuration file, see `Specifications for Compiling the Model Configuration File <../../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html>`__. For details about the configuration file examples of a custom image, see `Example of the Custom Image Model Configuration File <../../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html#example-of-the-custom-image-model-configuration-file>`__. After editing the model configuration file based on the ModelArts specifications, upload it to OBS or use **Edit online** on the **Import Model** page. + +Deploying a Service +------------------- + +After a model is successfully imported using a custom image, that is, the model status is normal, you can deploy the model as a service. On the **Models** page, click **Deploy** in the **Operation** column and select a service type, for example, **Real-time Service**. + +You can deploy models as real-time or batch services based on the business logic of your custom image. The procedure for deploying a model imported using other methods is the same as that for deploying a model imported using a custom image. For details, see `Introduction to Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__. + + + +.. |image1| image:: /_static/images/en-us_image_0000001156920767.png + diff --git a/umn/source/custom_images/for_importing_models/index.rst b/umn/source/custom_images/for_importing_models/index.rst new file mode 100644 index 0000000..608bc3f --- /dev/null +++ b/umn/source/custom_images/for_importing_models/index.rst @@ -0,0 +1,9 @@ +==================== +For Importing Models +==================== + +.. toctree:: + :maxdepth: 1 + + specifications_for_custom_images_used_for_importing_models + importing_a_model_using_a_custom_image diff --git a/umn/source/custom_images/for_importing_models/specifications_for_custom_images_used_for_importing_models.rst b/umn/source/custom_images/for_importing_models/specifications_for_custom_images_used_for_importing_models.rst new file mode 100644 index 0000000..fa0a24e --- /dev/null +++ b/umn/source/custom_images/for_importing_models/specifications_for_custom_images_used_for_importing_models.rst @@ -0,0 +1,69 @@ +Specifications for Custom Images Used for Importing Models +========================================================== + +When creating an image using locally developed models, ensure that they meet the specifications defined by ModelArts. + +Specifications for Custom Images Used for Model Management +---------------------------------------------------------- + +- Custom images cannot contain malicious code. + +- The size of a custom image cannot exceed 30 GB. + +- **External port of images** + + The external service port of the image must be **8080**. The inference interface must be consistent with the URL defined by **apis** in the **config.json** file. The inference interface can be directly accessed when the image is started. The following is an example of accessing the **mnist** image. The image contains the model trained with the **mnist** dataset. The model can identify handwritten digits in images. In this example, *listen_ip* indicates the IP address of the container. + + - Sample request: **curl -X POST \\ http://{listen_ip}:8080/ \\ -F images=@seven.jpg** + + - Sample response + + .. code-block:: + + {"mnist_result": 7} + +- **Health check port** + + A custom image must provide a health check interface for ModelArts to call. The health check interface is configured in the **config.json** file. For details, see the model configuration file compilation description. A sample health check interface is as follows: + + - URI + + .. code-block:: + + GET /health + + - Sample request: **curl -X GET \\ http://{listen_ip}:8080/health** + + - Sample response + + .. code-block:: + + {"health": "true"} + + - Status code + +.. _modelarts230219enustopic0212179953table19701134515351: + + .. table:: **Table 1** Status code + + =========== ======= ================== + Status Code Message Description + =========== ======= ================== + 200 OK Successful request + =========== ======= ================== + +- **Log file output** + + To ensure that the log content can be displayed normally, the logs must be standard output. + +- **Image boot file** + + To deploy a batch service, set the boot file of an image to **/home/run.sh** and use CMD to set the default boot path. The following is a sample **Dockerfile**. + + **CMD /bin/sh /home/run.sh** + +- **Image dependencies** + + To deploy a batch service, install component packages such as Python, JRE/JDK, and ZIP in the image. + + diff --git a/umn/source/custom_images/for_training_models/creating_a_training_job_using_a_custom_image_(gpu).rst b/umn/source/custom_images/for_training_models/creating_a_training_job_using_a_custom_image_(gpu).rst new file mode 100644 index 0000000..0bbcfe9 --- /dev/null +++ b/umn/source/custom_images/for_training_models/creating_a_training_job_using_a_custom_image_(gpu).rst @@ -0,0 +1,98 @@ +Creating a Training Job Using a Custom Image (GPU) +================================================== + +After creating and uploading a custom image to SWR, you can use the image to create a training job on the ModelArts management console to complete model training. + +Prerequisites +------------- + +- You have created a custom image package based on ModelArts specifications. For details about the specifications you need to comply with when using a custom image to create training jobs, see `Specifications for Custom Images Used for Training Jobs <../../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. +- You have uploaded the custom image to SWR. For details, see `Creating and Uploading a Custom Image <../../custom_images/creating_and_uploading_a_custom_image.html>`__. + +Creating a Training Job +----------------------- + +Log in to the ModelArts management console and create a training job according to `Creating a Training Job <../../training_management/index.html>`__. When using a custom image to create a job, pay attention to the settings of **Algorithm Source**, **Environment Variable**, and **Resource Pool**. + +- **Algorithm Source** + + Select **Custom**. + + - **Image Path**: SWR URL after the image is uploaded to SWR + + .. figure:: /_static/images/en-us_image_0000001156920769.png + :alt: **Figure 1** SWR image address + + + **Figure 1** SWR image address + + - **Code Directory**: OBS path for storing the training code file. + + - **Boot Command**: boot command after the image is started. The basic format is as follows: + + **bash /home/work/run_train.sh {UserCommand}** + + **bash /home/work/run_train.sh [python/bash/..] {file_location} {file_parameter}** + + **run_train.sh** is the training boot script. After this script is executed, ModelArts recursively downloads all content in the code directory to the local path of the container. The local path is in the format of **/home/work/user-job-dir/${**\ *Name of the last level in the code directory*}\ **/**. + + For example, if the OBS path of the training code file is **obs://obs-bucket/new/train.py** and the code directory is **obs://obs-bucket/new/**, the local path of the container is **/home/work/user-job-dir/new/**. The local training code path of the container is **/home/work/user-job-dir/new/train.py**. Then, you can set the boot command to the following: **bash /home/work/run_train.sh python /home/work/user-job-dir/new/train.py {python_file_parameter}** + + .. note:: + + If you create a training job using a custom image, ModelArts allows you to customize the boot command. The following are two basic formats for the boot command: + + **bash /home/work/run_train.sh {UserCommand}** + + **bash /home/work/run_train.sh [python/bash/..] {file_location} {file_parameter}** + + **run_train.sh** is the training boot script. When creating a custom image, you can implement the training boot script or place the training code in the custom image environment in advance to customize the boot command (in the basic formats or any other formats). + +- **Environment Variable** + + After the container is started, besides the environment variables added by configuring **Environment Variable** during training job creation, `Table 1 <#modelarts230087enustopic0171858299table341782301619>`__ lists other environment variables to be loaded. You can determine whether to use these environment variables in your own Python training script, or run the **{python_file_parameter}** command to pass the required parameters. + + + +.. _modelarts230087enustopic0171858299table341782301619: + + .. table:: **Table 1** Optional environment variables + + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Environment Variable | Description | + +======================================+==============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | DLS_TASK_INDEX | Container index, starting from 0. | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | DLS_TASK_NUMBER | Number of containers, corresponding to **Compute Nodes** | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | DLS_APP_URL | Code directory, corresponding to **Code Dir** with the protocol name added. For example, you can use **$DLS_APP_URL/*.py** to read files in OBS. | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | DLS_DATA_URL | Dataset path, corresponding to **Data Source** with the protocol name added | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | DLS_TRAIN_URL | Training output path, corresponding to **Training Output Path** with the protocol name added | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | BATCH_{jobName}.0_HOSTS (standalone) | For standalone training, that is, when the number of compute nodes is 1, the environment variable is **BATCH_{jobName}.0_HOSTS**. | + | | | + | | The format of the **HOSTS** environment variable is **hostname:port**. A container can view the **HOSTS** of all containers in the same job, such as **BATCH_CUSTOM0_HOSTS** and **BATCH_CUSTOM1_HOSTS**, varying according to the indexes. If the resource pool is a dedicated resource pool with the **8GPU** specifications, the network type of the container is a host network, and the host IB network can be used to accelerate communications. If other resource pools are used, the network is a container network. | + | | | + | | .. note:: | + | | | + | | When the host IB network is used for communication acceleration, the **ip_mapper.py** tool is required to obtain the IP address of the **ib0** NIC for using the IPoIB feature. | + +--------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +- **Resource Pool** + + If you select a resource pool of the GPU type, ModelArts mounts NVME SSDs to the **/cache** directory. You can use this directory to store temporary files. + +Running a Training Job Created Using a Custom Image +--------------------------------------------------- + +After a custom image is uploaded to SWR, ModelArts is authorized to obtain and run the image by default when you create a training job using the custom image. When a custom image is run for the first time, the image is checked first. For details about the check, see `Specifications for Custom Images Used for Training Jobs <../../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. The check failure cause is outputted in the log, and you can modify the image based on the log. + +After the image is checked, the backend starts the custom image container to run the training job. You can view the training status based on the log. + +.. note:: + + After an image is reviewed, the image does not need to be reviewed again when being used to create training jobs again. + + diff --git a/umn/source/custom_images/for_training_models/index.rst b/umn/source/custom_images/for_training_models/index.rst new file mode 100644 index 0000000..616e1b9 --- /dev/null +++ b/umn/source/custom_images/for_training_models/index.rst @@ -0,0 +1,9 @@ +=================== +For Training Models +=================== + +.. toctree:: + :maxdepth: 1 + + specifications_for_custom_images_used_for_training_jobs + creating_a_training_job_using_a_custom_image_(gpu) diff --git a/umn/source/custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.rst b/umn/source/custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.rst new file mode 100644 index 0000000..86d7925 --- /dev/null +++ b/umn/source/custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.rst @@ -0,0 +1,134 @@ +Specifications for Custom Images Used for Training Jobs +======================================================= + +When creating an image using locally developed models and training scripts, ensure that they meet the specifications defined by ModelArts. + +Specifications +-------------- + +- Custom images cannot contain malicious code. +- Part of content in the basic images cannot be changed, including all the files in **/bin**, **/sbin**, **/usr**, and **/lib(64)**, some important configuration files in **/etc**, and the ModelArts tools in **$HOME**. +- A file cannot be added whose owner is **root** and has permission **setuid** or **setgid**. +- The size of a custom image cannot exceed 9.5 GB. + +- To ensure that the log content can be displayed normally, the logs must be standard output. +- The default user of a custom image must be the user whose UID is **1101**. +- Custom images can be developed based on basic ModelArts images. For details about the supported basic images, see `Overview of a Basic Image Package <#overview-of-a-basic-image-package>`__. +- Currently, the ModelArts backend does not support the download of open source installation packages. You are advised to install the dependency packages required for training in the custom image. + +Overview of a Basic Image Package +--------------------------------- + +To facilitate code download, training log output, and log file upload to OBS, ModelArts provides basic image packages for creating custom images. The basic images provided by ModelArts have the following features: + +- Some necessary tools are available in the basic image. You need to create a custom image based on the basic images provided by ModelArts. +- ModelArts continuously updates the basic image versions. For compatible updates, after the basic images are updated, you can still use the old images. For incompatible updates, the custom images created based on the old version cannot run on ModelArts, but the approved custom images can still be used. +- If a custom image fails to be approved and the audit log contains an error message indicating that the basic image does not match, you need to use a new basic image to create an image. + +Run the following command to obtain a ModelArts image: + +.. code-block:: + + docker pull
+ +After customizing an image, upload it to SWR. Make sure that you have created an organization and obtained the password for logging in to SWR. For details, see "Image Management" > "Uploading an Image Through SWR Console" in *Software Repository for Container User Guide*. + +.. code-block:: + + docker push swr..xxx.com// + +Obtain basic images based on chip requirements: + +- `CPU-based Basic Images <#cpu-based-basic-images>`__ +- `GPU-based Basic Images <#gpu-based-basic-images>`__ + +CPU-based Basic Images +---------------------- + +Address for obtaining a basic image + +.. code-block:: + + swr..xxx.com/modelarts-job-dev-image/custom-cpu-base:1.3 + +`Table 1 <#modelarts230217enustopic0212179951table42317014714>`__ and `Table 2 <#modelarts230217enustopic0212179951table624501372>`__ list the components and tools used by basic images. + + + +.. _modelarts230217enustopic0212179951table42317014714: + +.. table:: **Table 1** Components + + +--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Component | Description | + +==============+===================================================================================================================================================================================+ + | run_train.sh | Training boot script. You can download the code directory, run training commands, redirect training log output, and upload log files to OBS after training commands are executed. | + +--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230217enustopic0212179951table624501372: + +.. table:: **Table 2** Tool list + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Tool | Description | + +===================================+==========================================================================================================================================================+ + | utils.sh | Tool script. The **run_train.sh** script depends on this script. | + | | | + | | It provides methods such as SK decryption, code directory download, and log file upload. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ip_mapper.py | Script for obtaining NIC addresses. | + | | | + | | By default, the IP address of the **ib0** NIC is obtained. Training code can use the IP address of the **ib0** NIC to accelerate network communications. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | dls-downloader.py | OBS download script. The **utils.sh** script depends on this script. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + +GPU-based Basic Images +---------------------- + +Addresses for obtaining a basic image + +.. code-block:: + + swr..xxx.com/modelarts-job-dev-image/custom-gpu-cuda9-base:1.3 + swr..xxx.com/modelarts-job-dev-image/custom-gpu-cuda92-base:1.3 + swr..xxx.com/modelarts-job-dev-image/custom-gpu-cuda10-cudnn74-base:1.1-4 + swr..xxx.com/modelarts-job-dev-image/custom-base-cuda10.0-cp36-ubuntu18.04-x86:1.1 + swr..xxx.com/modelarts-job-dev-image/custom-base-cuda10.1-cp36-ubuntu18.04-x86:1.1 + swr..xxx.com/modelarts-job-dev-image/custom-base-cuda10.2-cp36-ubuntu18.04-x86:1.1 + + + +.. _modelarts230217enustopic0212179951table137851182312: + +.. table:: **Table 3** Components + + +--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Component | Description | + +==============+===================================================================================================================================================================================+ + | run_train.sh | Training boot script. You can download the code directory, run training commands, redirect training log output, and upload log files to OBS after training commands are executed. | + +--------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230217enustopic0212179951table135271650237: + +.. table:: **Table 4** Tool list + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Tool | Description | + +===================================+==========================================================================================================================================================+ + | utils.sh | Tool script. The **run_train.sh** script depends on this script. | + | | | + | | It provides methods such as SK decryption, code directory download, and log file upload. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ip_mapper.py | Script for obtaining NIC addresses. | + | | | + | | By default, the IP address of the **ib0** NIC is obtained. Training code can use the IP address of the **ib0** NIC to accelerate network communications. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + | dls-downloader.py | OBS download script. The **utils.sh** script depends on this script. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/custom_images/index.rst b/umn/source/custom_images/index.rst new file mode 100644 index 0000000..59673e9 --- /dev/null +++ b/umn/source/custom_images/index.rst @@ -0,0 +1,11 @@ +============= +Custom Images +============= + +.. toctree:: + :maxdepth: 1 + + introduction_to_custom_images + creating_and_uploading_a_custom_image + for_training_models/index + for_importing_models/index diff --git a/umn/source/custom_images/introduction_to_custom_images.rst b/umn/source/custom_images/introduction_to_custom_images.rst new file mode 100644 index 0000000..c25eb4c --- /dev/null +++ b/umn/source/custom_images/introduction_to_custom_images.rst @@ -0,0 +1,21 @@ +Introduction to Custom Images +============================= + +ModelArts provides multiple frequently-used built-in engines. However, when users have special requirements for the deep learning engine and development library, the built-in AI engines cannot meet user requirements. ModelArts provides the custom image function to allow users to customize engines. + +The bottom layer of ModelArts uses the container technology. Custom images refer to that users create container images and run them on ModelArts. The custom image function supports command line parameters and environment variables in free-text format. The custom images are highly flexible and support the job boot requirements of any computing engine. + +The following services are also required for creating a custom image: Software Repository for Container (SWR), OBS, and Elastic Cloud Server (ECS) + +Application Scenarios of Custom Images +-------------------------------------- + +- **For Training Models** + + If you have developed a model or training script locally and the AI engine you use is not supported by ModelArts, you can create a custom image based on the basic image packages provided by ModelArts and upload the custom image to SWR. Then, you can use the custom image to create a training job on ModelArts and use the resources provided by ModelArts to train models. + +- **For Importing Models** + + If you use an AI engine that is not supported by ModelArts to develop a model, you can create a custom image, import the image to ModelArts for unified management, and deploy the model as a service. + + diff --git a/umn/source/data_management/creating_a_dataset.rst b/umn/source/data_management/creating_a_dataset.rst new file mode 100644 index 0000000..cd2e928 --- /dev/null +++ b/umn/source/data_management/creating_a_dataset.rst @@ -0,0 +1,335 @@ +Creating a Dataset +================== + +To manage data using ModelArts, create a dataset. Then you can perform operations on the dataset, such as labeling data, importing data, and publishing the dataset. + +Prerequisites +------------- + +- Before using the data management function, you need permissions to access OBS. This function cannot be used if you are not authorized to access OBS. Before using the data management function, go to the **Settings** page and complete access authorization using an agency. +- You have created OBS buckets and folders for storing data. In addition, the OBS buckets and ModelArts are in the same region. +- You have uploaded data to be used to OBS. + +Procedure +--------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. +#. Click **Create Dataset**. On the **Create Dataset** page, create datasets of different types based on the data type and data labeling requirements. + + a. Set the basic information, the name and description of the dataset. + + .. figure:: /_static/images/en-us_image_0000001157080905.png + :alt: **Figure 1** Basic information about a dataset + + + **Figure 1** Basic information about a dataset + + b. Select a labeling scene and type as required. For details about the types supported by ModelArts, see `Dataset Types <../data_management/introduction_to_data_management.html#dataset-types>`__. + + .. figure:: /_static/images/en-us_image_0000001110761058.png + :alt: **Figure 2** Selecting a labeling scene and type + + + **Figure 2** Selecting a labeling scene and type + + c. Set the parameters based on the dataset type. For details, see the parameters of the following dataset types: + + - `Images (Image Classification, Object Detection, and Image Segmentation) <#images-(image-classification,-object-detection,-and-image-segmentation)>`__ + - `Audio (Sound Classification, Speech Labeling, and Speech Paragraph Labeling) <#audio-(sound-classification,-speech-labeling,-and-speech-paragraph-labeling)>`__ + - `Text (Text Classification, Named Entity Recognition, and Text Triplet) <#text-(text-classification,-named-entity-recognition,-and-text-triplet)>`__ + - `Table <#table>`__ + - `Video <#video>`__ + - `Other (Free Format) <#other-(free-format)>`__ + + d. Click **Create** in the lower right corner of the page. + + After the dataset is created, the dataset management page is displayed. You can perform the following operations on the dataset: label data, publish dataset versions, manage dataset versions, modify the dataset, import data, and delete the dataset. For details about the operations supported by different types of datasets, see . + +Images (Image Classification, Object Detection, and Image Segmentation) +----------------------------------------------------------------------- + +.. figure:: /_static/images/en-us_image_0000001157080911.png + :alt: **Figure 3** Parameters of datasets for image classification and object detection + + + **Figure 3** Parameters of datasets for image classification and object detection + + + +.. _modelarts230004enustopic0170886809table169611557277: + +.. table:: **Table 1** Dataset parameters + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=====================================================================================================================================================================================================================================================================================================================================================================================+ + | Input Dataset Path | Select the OBS path to the input dataset. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path to the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. Select an empty directory as the **Output Dataset Path**. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set | - **Label Name**: Enter a label name. The label name can contain only letters, digits, underscores (_), and hyphens (-). The name contains 1 to 32 characters. | + | | | + | | - **Add Label**: Click **Add Label** to add more labels. | + | | | + | | - Setting a label color: This function is available only for datasets of the object detection type. Select a color from the color palette on the right of a label, or enter the hexadecimal color code to set the color. | + | | | + | | - Setting label attributes: For an object detection dataset, you can click the plus sign (+) on the right to add label attributes after setting a label color. Label attributes are used to distinguish different attributes of the objects with the same label. For example, yellow kittens and black kittens have the same label **cat** and their label attribute is **color**. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Team Labeling | Enable or disable team labeling. Image segmentation does not support team labeling. Therefore, this parameter is unavailable when you use image segmentation. | + | | | + | | After enabling team labeling, enter the name and type of the team labeling task, and select the labeling team and team members. For details about the parameter settings, see `Creating Team Labeling Tasks <../data_management/team_labeling/managing_team_labeling_tasks.html#creating-team-labeling-tasks>`__. | + | | | + | | Before enabling team labeling, ensure that you have added a team and members on the **Labeling Teams** page. If no labeling team is available, click the link on the page to go to the **Labeling Teams** page, and add your team and members. For details, see `Introduction to Team Labeling <../data_management/team_labeling/introduction_to_team_labeling.html>`__. | + | | | + | | After a dataset is created with team labeling enabled, you can view the **Team Labeling** mark in **Labeling Type**. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Audio (Sound Classification, Speech Labeling, and Speech Paragraph Labeling) +---------------------------------------------------------------------------- + +.. figure:: /_static/images/en-us_image_0000001157080903.png + :alt: **Figure 4** Parameters of datasets for sound classification, speech labeling, and speech paragraph labeling + + + **Figure 4** Parameters of datasets for sound classification, speech labeling, and speech paragraph labeling + + + +.. _modelarts230004enustopic0170886809table46851641358: + ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Parameter | Description | ++==============================================+======================================================================================================================================================================================================================================================================================================================================================================================================================================================+ +| Input Dataset Path | Select the OBS path to the input dataset. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Output Dataset Path | Select the OBS path to the output dataset. | +| | | +| | .. note:: | +| | | +| | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. Select an empty directory as the **Output Dataset Path**. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Label Set (Sound Classification) | Set labels only for datasets of the sound classification type. | +| | | +| | - **Label Name**: Enter a label name. The label name can contain only letters, digits, underscores (_), and hyphens (-). The name contains 1 to 32 characters. | +| | - **Add Label**: Click **Add Label** to add more labels. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Label Management (Speech Paragraph Labeling) | Only datasets for speech paragraph labeling support multiple labels. | +| | | +| | - **Single Label** | +| | | +| | A single label is used to label a piece of audio that has only one class. | +| | | +| | - **Label Name**: Enter a label name. The label name can contain contains 1 to 32 characters. Only letters, digits, underscores (_), and hyphens (-) are allowed. | +| | - **Label Color**: Set the label color in the **Label Color** column. You can select a color from the color palette or enter a hexadecimal color code to set the color. | +| | | +| | - **Multiple Labels** | +| | | +| | Multiple labels are suitable for multi-dimensional labeling. For example, you can label a piece of audio as both noise and speech. For speech, you can label the audio with different speakers. You can click **Add Label Class** to add multiple label classes. A label class can contain multiple labels. The label class and name can contain contains 1 to 32 characters. Only letters, digits, underscores (_), and hyphens (-) are allowed. | +| | | +| | - **Label Class**: Set a label class. | +| | - **Label Name**: Enter a label name. | +| | - **Add Label**: Click **Add Label** to add more labels. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Speech Labeling (Speech Paragraph Labeling) | Only datasets for speech paragraph labeling support speech labeling. By default, speech labeling is disabled. If this function is enabled, you can label speech content. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Team Labeling | Only datasets of speech paragraph labeling support team labeling. | +| | | +| | After enabling team labeling, set the name and type of the team labeling task, and select the team and team members. For details about the parameter settings, see `Creating Team Labeling Tasks <../data_management/team_labeling/managing_team_labeling_tasks.html#creating-team-labeling-tasks>`__. | +| | | +| | Before enabling team labeling, ensure that you have added a team and members on the **Labeling Teams** page. If no labeling team is available, click the link on the page to go to the **Labeling Teams** page, and add your team and members. For details, see `Introduction to Team Labeling <../data_management/team_labeling/introduction_to_team_labeling.html>`__. | +| | | +| | After a dataset is created with team labeling enabled, you can view the **Team Labeling** mark in **Labeling Type**. | ++----------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Text (Text Classification, Named Entity Recognition, and Text Triplet) +---------------------------------------------------------------------- + +.. figure:: /_static/images/en-us_image_0000001110920960.png + :alt: **Figure 5** Parameters of datasets for text classification, named entity recognition, and text triplet + + + **Figure 5** Parameters of datasets for text classification, named entity recognition, and text triplet + + + +.. _modelarts230004enustopic0170886809table8639141818387: + +.. table:: **Table 2** Dataset parameters + + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +==================================================================+=================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | Input Dataset Path | Select the OBS path to the input dataset. | + | | | + | | .. note:: | + | | | + | | Labeled text classification data can be identified only when you import data. When creating a dataset, set an empty OBS directory. After the dataset is created, import the labeled data into it. For details about the format of the data to be imported, see `Specifications for Importing Data from an OBS Directory <../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html>`__. | + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path to the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. Select an empty directory as the **Output Dataset Path**. | + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set (for text classification and named entity recognition) | - **Label Name**: Enter a label name. The label name can contain only letters, digits, underscores (_), and hyphens (-). The name contains 1 to 32 characters. | + | | | + | | - **Add Label**: Click **Add Label** to add more labels. | + | | | + | | - Setting a label color: Select a color from the color palette or enter the hexadecimal color code to set the color. | + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set (for text triplet) | For datasets of the text triplet type, set entity labels and relationship labels. | + | | | + | | - **Entity Label**: Set the label name and label color. You can click the plus sign (+) on the right of the color area to add multiple labels. | + | | - **Relationship Label**: a relationship between two entities. Set the source entity and target entity. Therefore, add at least two entity labels before adding a relationship label. | + | | | + | | |image1| | + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Team Labeling | Enable or disable team labeling. | + | | | + | | After enabling team labeling, enter the name and type of the team labeling task, and select the labeling team and team members. For details about the parameter settings, see `Creating Team Labeling Tasks <../data_management/team_labeling/managing_team_labeling_tasks.html#creating-team-labeling-tasks>`__. | + | | | + | | Before enabling team labeling, ensure that you have added a team and members on the **Labeling Teams** page. If no labeling team is available, click the link on the page to go to the **Labeling Teams** page, and add your team and members. For details, see `Introduction to Team Labeling <../data_management/team_labeling/introduction_to_team_labeling.html>`__. | + | | | + | | After a dataset is created with team labeling enabled, you can view the **Team Labeling** mark in **Labeling Type**. | + +------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Table +----- + +.. note:: + + When using a CSV file, pay attention to the following: + + - When the data type is set to **String**, the data in the double quotation marks is regarded as one record by default. Ensure that the double quotation marks in the same row are closed. Otherwise, the data will be too large to display. + - If the number of columns in a row of the CSV file is different from that defined in the schema, the row will be ignored. + + + +.. _modelarts230004enustopic0170886809table23707015477: + +.. table:: **Table 3** Dataset parameters + + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+====================================================================================================================================================================================================================================================================================+ + | Storage Path | Select the OBS path for storing table data. The data imported from the data source is stored in this path. The path cannot be the same as or a subdirectory of the file path in the OBS data source. | + | | | + | | After a table dataset is created, the following four directories are automatically generated in the storage path: | + | | | + | | - **annotation**: version publishing directory. Each time a version is published, a subdirectory with the same name as the version is generated in this directory. | + | | - **data**: data storage directory. Imported data is stored in this directory. | + | | - **logs**: directory for storing logs | + | | - **temp**: temporary working directory | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Import | If you have stored table data on other cloud services, you can enable this function to import data stored on OBS, DLI, or MRS. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Source (OBS) | - **File Path**: Browse all OBS buckets of the account and select the directory where the data file to be imported is located. | + | | - **Contain Table Header**: If this parameter is enabled, the imported file contains table headers. In this case, the first row of the imported file is used as the column name. Otherwise, the default column name is added and automatically filled in the schema information. | + | | | + | | For details about OBS functions, see *Object Storage Service Console Operation Guide*. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Schema | Names and types of table columns, which must be the same as those of the imported data. Set the column name based on the imported data and select the column type. For details about the supported types, see `Table 4 <#modelarts230004enustopic0170886809table1916832104917>`__. | + | | | + | | Click **Add Schema** to add a new record. When creating a dataset, you must specify a schema. Once created, the schema cannot be modified. | + | | | + | | When data is imported from OBS, the schema of the CSV file in the file path is automatically obtained. If the schemas of multiple CSV files are inconsistent, an error is reported. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230004enustopic0170886809table1916832104917: + +.. table:: **Table 4** Migration data types + + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Type | Description | Storage Space | Range | + +===========+========================================================================+===============+=============================================+ + | String | String | - | - | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Short | Signed integer | 2 bytes | -32768 to 32767 | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Int | Signed integer | 4 bytes | –2147483648 to 2147483647 | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Long | Signed integer | 8 bytes | –9223372036854775808 to 9223372036854775807 | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Double | Double-precision floating point | 8 bytes | - | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Float | Single-precision floating point | 4 bytes | - | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Byte | Signed integer | 1 byte | -128 to 127 | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Date | Date type in the format of *yyyy-MM-dd*, for example, 2014-05-29 | - | - | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Timestamp | Timestamp that represents date and time. Format: *yyyy-MM-dd HH:mm:ss* | - | - | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + | Boolean | Boolean | 1 byte | TRUE or FALSE | + +-----------+------------------------------------------------------------------------+---------------+---------------------------------------------+ + +Video +----- + +.. figure:: /_static/images/en-us_image_0000001157080907.png + :alt: **Figure 6** Parameters of datasets of the video type + + + **Figure 6** Parameters of datasets of the video type + + + +.. _modelarts230004enustopic0170886809table623753175616: + +.. table:: **Table 5** Dataset parameters + + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+============================================================================================================================================================================================+ + | Input Dataset Path | Select the OBS path to the input dataset. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path to the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. Select an empty directory as the **Output Dataset Path**. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set | - **Label Name**: Enter a label name. The label name can contain only letters, digits, underscores (_), and hyphens (-). The name contains 1 to 32 characters. | + | | | + | | - **Add Label**: Click **Add Label** to add more labels. | + | | | + | | - Setting a label color: Select a color from the color palette or enter the hexadecimal color code to set the color. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Other (Free Format) +------------------- + +.. figure:: /_static/images/en-us_image_0000001156920933.png + :alt: **Figure 7** Parameters of datasets of the free format type + + + **Figure 7** Parameters of datasets of the free format type + + + +.. _modelarts230004enustopic0170886809table115315465714: + +.. table:: **Table 6** Dataset parameters + + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+============================================================================================================================================================================================+ + | Input Dataset Path | Select the OBS path to the input dataset. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path to the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. Select an empty directory as the **Output Dataset Path**. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. |image1| image:: /_static/images/en-us_image_0000001156920935.png + diff --git a/umn/source/data_management/deleting_a_dataset.rst b/umn/source/data_management/deleting_a_dataset.rst new file mode 100644 index 0000000..62772ba --- /dev/null +++ b/umn/source/data_management/deleting_a_dataset.rst @@ -0,0 +1,20 @@ +Deleting a Dataset +================== + +If a dataset is no longer in use, you can delete it to release resources. + +.. note:: + + After a dataset is deleted, if you need to delete the data in the dataset input and output paths in OBS to release resources, delete the data and the OBS folders on the OBS Console. + +Procedure +--------- + +#. In the left navigation pane, choose **Data Management > Datasets**. On the **Datasets** page, choose **More > Delete** in the **Operation** column of the dataset. +#. In the displayed dialog box, click **OK**. + + .. note:: + + After a dataset is deleted, some functions such as dataset version management become unavailable. Exercise caution when performing this operation. However, the original data and labeling data of the dataset are still stored in OBS. + + diff --git a/umn/source/data_management/exporting_data.rst b/umn/source/data_management/exporting_data.rst new file mode 100644 index 0000000..5754d5f --- /dev/null +++ b/umn/source/data_management/exporting_data.rst @@ -0,0 +1,81 @@ +Exporting Data +============== + +A dataset includes labeled and unlabeled data. You can select images or filter data based on the filter criteria and export to a new dataset or the specified OBS directory. In addition, you can view the task history to learn about the export records. + +.. note:: + + Only datasets of image classification, object detection, image segmentation, and free format types can be exported. + + - For image classification datasets, only the label files in TXT format can be exported. + - For object detection datasets, only XML label files in Pascal VOC format can be exported. + - For image segmentation datasets, only XML label files in Pascal VOC format and mask images can be exported. + - For free format datasets, all files of the datasets can be exported. + +Exporting Data to a New Dataset +------------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset of the object detection or image classification type and click the dataset name to go to the **Dashboard** tab page of the dataset. + + .. note:: + + For a dataset of the free format type, you can click the dataset name to directly access the dataset details page and go to `4 <#modelarts230214enustopic0209632492li114071010139>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. + +#. On the dataset details page, select or filter data to be exported. Click **Export To** and choose **New Dataset** from the drop-down list. + +#. In the displayed **Export to New Dataset** dialog box, enter the related information and click **OK**. + + **Name**: name of the new dataset + + **Storage Path**: input path of the new dataset, that is, the OBS path where the data to be exported is stored + + **Output Path**: output path of the new dataset, that is, the output path after labeling is complete. The output path cannot be the same as the storage path, and the output path cannot be a subdirectory of the storage path. + + **Export Content**: The options are **Export the selected samples** and **Export all samples meeting filtering criteria**. + +#. After the data is exported, you can view the new dataset in the dataset list. + +Exporting Data to OBS +--------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset of the object detection or image classification type and click the dataset name to go to the **Dashboard** tab page of the dataset. + + .. note:: + + For a dataset of the free format type, you can click the dataset name to directly access the dataset details page and go to `4 <#modelarts230214enustopic0209632492li2056103713438>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. + +#. On the dataset details page, select or filter data to be exported. Click **Export To** and choose **OBS** from the drop-down list. + +#. In the displayed **Export to OBS** dialog box, enter the related information and click **OK**. + + **Storage Path**: path where the data to be exported is stored. You are advised not to save data to the input or output path of the current dataset. + + **Export Content**: The options are **Export the selected samples** and **Export all samples meeting filtering criteria**. + +#. After the data is exported, you can view it in the specified path. + +Viewing the Task History +------------------------ + +When you export data to a new dataset or OBS, you can view the export task details in the **View Task History** dialog box. + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. +#. In the dataset list, select the dataset of the object detection or image classification type and click the dataset name to go to the **Dashboard** tab page of the dataset. + + .. note:: + + For a dataset of the free format type, you can click the dataset name to directly access the dataset details page and go to `4 <#modelarts230214enustopic0209632492li19995141771413>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. +#. On the dataset details page, select or filter data to be exported. Click **Export To** and choose **View Task History** from the drop-down list. +#. In the **View Task History** dialog box, view the export task history of the current dataset. Information about **Task ID**, **Created**, **Type**, **Path**, **Total**, and **Status** is included. + + diff --git a/umn/source/data_management/importing_data/import_operation.rst b/umn/source/data_management/importing_data/import_operation.rst new file mode 100644 index 0000000..ff5739e --- /dev/null +++ b/umn/source/data_management/importing_data/import_operation.rst @@ -0,0 +1,116 @@ +Import Operation +================ + +After a dataset is created, you can directly synchronize data from the dataset. Alternatively, you can import more data by importing the dataset. Data can be imported from an OBS directory or the manifest file. + +Prerequisites +------------- + +- You have created a dataset. +- You have stored the data to be imported in OBS. You have stored the manifest file in OBS. +- The OBS buckets and ModelArts are in the same region. + +Import Modes +------------ + +There are two import modes: **OBS path** and **Manifest file**. + +- **OBS path**: indicates that the dataset to be imported has been stored in an OBS directory in advance. In this case, you need to select an OBS path that you can access. In addition, the directory structure in the OBS path must comply with the specifications. For details, see `Specifications for Importing Data from an OBS Directory <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html>`__. Only the following types of dataset support the **OBS path** import mode: **Image classification**, **Object detection**, **Text classification**, **Table**, and **Sound classification**. +- **Manifest file**: indicates that the dataset file is in the manifest format and data is imported from the manifest file. The manifest file defines the mapping between labeling objects and content. In addition, the manifest file has been uploaded to OBS. For details about the specifications of the manifest file, see `Specifications for Importing the Manifest File <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html>`__. + +.. note:: + + Before importing an object detection dataset, ensure that the labeling range of the labeling file does not exceed the size of the original image. Otherwise, the import may fail. + + + +.. _modelarts230006enustopic0171025430table11677122420123: + +.. table:: **Table 1** Import modes supported by datasets + + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Dataset Type | Importing Data from an OBS Path | Importing Data from a Manifest File | + +===========================+==================================================================================================================================================================================================+===================================================================================================================================================================================================+ + | Image classification | Supported | Supported | + | | | | + | | Follow the format specifications described in `Image Classification <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#image-classification>`__. | Follow the format specifications described in `Image Classification <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#image-classification>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Object detection | Supported | Supported | + | | | | + | | Follow the format specifications described in `Object Detection <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#object-detection>`__. | Follow the format specifications described in `Object Detection <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#object-detection>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Image segmentation | Supported | Supported | + | | | | + | | Follow the format specifications described in `Image Segmentation <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#image-segmentation>`__. | Follow the format specifications described in `Image Segmentation <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#image-segmentation>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Sound classification | Supported | Supported | + | | | | + | | Follow the format specifications described in `Sound Classification <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#sound-classification>`__. | Follow the format specifications described in `Sound Classification <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#sound-classification>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Speech labeling | N/A | Supported | + | | | | + | | | Follow the format specifications described in `Speech Paragraph Labeling <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#speech-paragraph-labeling>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Speech paragraph labeling | N/A | Supported | + | | | | + | | | Follow the format specifications described in `Speech Labeling <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#speech-labeling>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Text classification | Supported | Supported | + | | | | + | | Follow the format specifications described in `Text Classification <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#text-classification>`__. | Follow the format specifications described in `Text Classification <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#text-classification>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Named entity recognition | N/A | Supported | + | | | | + | | | Follow the format specifications described in `Named Entity Recognition <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#named-entity-recognition>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Text triplet | N/A | Supported | + | | | | + | | | Follow the format specifications described in `Text Triplet <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#text-triplet>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Table | Supported | N/A | + | | | | + | | Follow the format specifications described in `Table <../../data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.html#table>`__. | | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Video | N/A | Supported | + | | | | + | | | Follow the format specifications described in `Video Labeling <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#video-labeling>`__. | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Free format | N/A | N/A | + +---------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Importing Data from an OBS Path +------------------------------- + +The parameters on the GUI for data import vary according to the dataset type. The following uses a dataset of the image classification type as an example. + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. Locate the row that contains the desired dataset and choose **More > Import** in the **Operation** column. + + Alternatively, you can click the dataset name to go to the **Dashboard** tab page of the dataset, and click **Import** in the upper right corner. + +#. In the **Import** dialog box, set **Import Mode** to **OBS path** and set **OBS path** to the path for storing data. Then click **OK**. + + After the data import is successful, the data is automatically synchronized to the dataset. On the **Datasets** page, you can click the dataset name to view its details and label the data. + +Importing Data from a Manifest File +----------------------------------- + +The parameters on the GUI for data import vary according to the dataset type. The following uses a dataset of the object detection type as an example. Datasets of the table type cannot be imported from the manifest file. + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. Locate the row that contains the desired dataset and choose **More > Import** in the **Operation** column. + + Alternatively, you can click the dataset name to go to the **Dashboard** tab page of the dataset, and click **Import** in the upper right corner. + +#. In the **Import** dialog box, set the parameters as follows and click **OK**. + + - **Import Mode**: Select **Manifest file**. + - **Manifest file**: Select the OBS path for storing the manifest file. + - **Import by Label**: The system automatically obtains the labels of the dataset. You can click **Add Label** to add a label or click the deletion icon on the right to delete a label. This field is optional. After importing a dataset, you can add or delete labels during data labeling. + - **Import labels**: If this parameter is selected, the labels defined in the manifest file are imported to the ModelArts dataset. + + After the data import is successful, the data is automatically synchronized to the dataset. On the **Datasets** page, you can click the dataset name to go to the **Dashboard** tab page of the dataset, and click **Label** in the upper right corner. On the displayed dataset details page, view detailed data and label data. + + diff --git a/umn/source/data_management/importing_data/index.rst b/umn/source/data_management/importing_data/index.rst new file mode 100644 index 0000000..f2e283c --- /dev/null +++ b/umn/source/data_management/importing_data/index.rst @@ -0,0 +1,10 @@ +============== +Importing Data +============== + +.. toctree:: + :maxdepth: 1 + + import_operation + specifications_for_importing_data_from_an_obs_directory + specifications_for_importing_the_manifest_file diff --git a/umn/source/data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.rst b/umn/source/data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.rst new file mode 100644 index 0000000..6c3abab --- /dev/null +++ b/umn/source/data_management/importing_data/specifications_for_importing_data_from_an_obs_directory.rst @@ -0,0 +1,295 @@ +Specifications for Importing Data from an OBS Directory +======================================================= + +When a dataset is imported, the data storage directory and file name must comply with the ModelArts specifications if the data to be used is stored in OBS. + +Only the following types of dataset support the **OBS path** import mode: **Image classification**, **Object detection**, **Text classification**, **Table**, and **Sound classification**. + +.. note:: + + To import data from an OBS directory, you must have the read permission on the OBS directory. + +Image Classification +-------------------- + +- Image classification data can be in two modes. The first mode (directory mode) supports only single labels. The second mode (**.txt** label files) supports multiple labels. + + - Images with the same label must be stored in the same directory, and the label name is the directory name. If there are multiple levels of directories, the last level is used as the label name. + + In the following example, **Cat** and **Dog** are label names. + + .. code-block:: + + dataset-import-example + ├─Cat + │ 10.jpg + │ 11.jpg + │ 12.jpg + │ + └─Dog + 1.jpg + 2.jpg + 3.jpg + + - If **.txt** files exist in the directory, the content in the **.txt** files is used as the image label. This mode is better than the previous one. + + In the following example, **import-dir-1** and **import-dir-2** are the imported subdirectories: + + .. code-block:: + + dataset-import-example + ├─import-dir-1 + │ 10.jpg + │ 10.txt + │ 11.jpg + │ 11.txt + │ 12.jpg + │ 12.txt + └─import-dir-2 + 1.jpg + 1.txt + 2.jpg + 2.txt + + The following shows a label file for a single label, for example, the **1.txt** file: + + .. code-block:: + + Cat + + The following shows a label file for multiple labels, for example, the **1.txt** file: + + .. code-block:: + + Cat + Dog + +- Only images in JPG, JPEG, PNG, and BMP formats are supported. The size of a single image cannot exceed 5 MB, and the total size of all images uploaded at a time cannot exceed 8 MB. + +Object Detection +---------------- + +- The simple mode of object detection requires users store labeled objects and their label files (in one-to-one relationship with the labeled objects) in the same directory. For example, if the name of the labeled object file is **IMG_20180919_114745.jpg**, the name of the label file must be **IMG_20180919_114745.xml**. + + The label files for object detection must be in PASCAL VOC format. For details about the format, see `Table 8 <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#modelarts230009enustopic0170886817table77167388472>`__. + + Example: + + .. code-block:: + + ├─dataset-import-example + │ IMG_20180919_114732.jpg + │ IMG_20180919_114732.xml + │ IMG_20180919_114745.jpg + │ IMG_20180919_114745.xml + │ IMG_20180919_114945.jpg + │ IMG_20180919_114945.xml + + A label file example is as follows: + + +-----------------------------------+-----------------------------------------------------------+ + | :: | :: | + | | | + | 1 | | + | 2 | | + | 3 | NA | + | 4 | bike_1_1593531469339.png | + | 5 | | + | 6 | Unknown | + | 7 | | + | 8 | | + | 9 | 554 | + | 10 | 606 | + | 11 | 3 | + | 12 | | + | 13 | 0 | + | 14 | | + | 15 | Dog | + | 16 | Unspecified | + | 17 | 0 | + | 18 | 0 | + | 19 | 0 | + | 20 | | + | 21 | 279 | + | 22 | 52 | + | 23 | 474 | + | 24 | 278 | + | 25 | | + | 26 | | + | 27 | | + | 28 | Cat | + | 29 | Unspecified | + | 30 | 0 | + | 31 | 0 | + | 32 | 0 | + | 33 | | + | 34 | 279 | + | 35 | 198 | + | 36 | 456 | + | 37 | 421 | + | 38 | | + | 39 | | + | 40 | | + +-----------------------------------+-----------------------------------------------------------+ + +- Only images in JPG, JPEG, PNG, and BMP formats are supported. The size of a single image cannot exceed 5 MB, and the total size of all images uploaded at a time cannot exceed 8 MB. + +Image Segmentation +------------------ + +- The simple mode of image segmentation requires users store labeled objects and their label files (in one-to-one relationship with the labeled objects) in the same directory. For example, if the name of the labeled object file is **IMG_20180919_114746.jpg**, the name of the label file must be **IMG_20180919_114746.xml**. + + Fields **mask_source** and **mask_color** are added to the label file in PASCAL VOC format. For details about the format, see `Table 4 <../../data_management/importing_data/specifications_for_importing_the_manifest_file.html#modelarts230009enustopic0170886817table1516151991311>`__. + + Example: + + .. code-block:: + + ├─dataset-import-example + │ IMG_20180919_114732.jpg + │ IMG_20180919_114732.xml + │ IMG_20180919_114745.jpg + │ IMG_20180919_114745.xml + │ IMG_20180919_114945.jpg + │ IMG_20180919_114945.xml + + A label file example is as follows: + + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | | + | 2 | | + | 3 | NA | + | 4 | image_0006.jpg | + | 5 | | + | 6 | Unknown | + | 7 | | + | 8 | | + | 9 | 230 | + | 10 | 300 | + | 11 | 3 | + | 12 | | + | 13 | 1 | + | 14 | obs://xianao/out/dataset-8153-Jmf5ylLjRmSacj9KevS/annotation/V001/segmentationClassRaw/image_0006.png | + | 15 | | + | 16 | bike | + | 17 | Unspecified | + | 18 | 0 | + | 19 | 0 | + | 20 | 193,243,53 | + | 21 | 0 | + | 22 | | + | 23 | 71 | + | 24 | 48 | + | 25 | 75 | + | 26 | 73 | + | 27 | 49 | + | 28 | 69 | + | 29 | 68 | + | 30 | 92 | + | 31 | 90 | + | 32 | 101 | + | 33 | 45 | + | 34 | 110 | + | 35 | 71 | + | 36 | 48 | + | 37 | | + | 38 | | + | 39 | | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + +Text Classification +------------------- + +Text classification supports two import modes. + +- The labeled objects and labels for text classification are in the same text file. You can specify a separator to separate the labeled objects and labels, as well as multiple labeled objects. + + For example, the following shows an example text file. The **Tab** key is used to separate the labeled object from the label. + + .. code-block:: + + It touches good and responds quickly. I don't know how it performs in the future. positive + Three months ago, I bought a very good phone and replaced my old one with it. It can operate longer between charges. positive + Why does my phone heat up if I charge it for a while? The volume button stuck after being pressed down. negative + It's a gift for Father's Day. The logistics is fast and I received it in 24 hours. I like the earphones because the bass sounds feel good and they would not fall off. positive + +- The labeled objects and label files for text classification are text files, and correspond to each other based on the rows. For example, the first row in a label file indicates the label of the first row in the file of the labeled object. + + For example, the content of labeled object **COMMENTS_20180919_114745.txt** is as follows: + + .. code-block:: + + It touches good and responds quickly. I don't know how it performs in the future. + Three months ago, I bought a very good phone and replaced my old one with it. It can operate longer between charges. + Why does my phone heat up if I charge it for a while? The volume button stuck after being pressed down. + It's a gift for Father's Day. The logistics is fast and I received it in 24 hours. I like the earphones because the bass sounds feel good and they would not fall off. + + The content of label file **COMMENTS_20180919_114745_result.txt** is as follows: + + .. code-block:: + + positive + negative + negative + positive + + The data format requires users to store labeled objects and their label files (in one-to-one relationship with the labeled objects) in the same directory. For example, if the name of the labeled object file is **COMMENTS_20180919_114745.txt**, the name of the label file must be **COMMENTS \_20180919_114745_result.txt**. + + Example of data file storage: + + .. code-block:: + + ├─dataset-import-example + │ COMMENTS_20180919_114732.txt + │ COMMENTS _20180919_114732_result.txt + │ COMMENTS _20180919_114745.txt + │ COMMENTS _20180919_114745_result.txt + │ COMMENTS _20180919_114945.txt + │ COMMENTS _20180919_114945_result.txt + +Sound Classification +-------------------- + +For sound classification, sound files with the same label must be stored in the same directory, and the label name is the directory name. + +Example: + +.. code-block:: + + dataset-import-example + ├─Cat + │ 10.wav + │ 11.wav + │ 12.wav + │ + └─Dog + 1.wav + 2.wav + 3.wav + +Table +----- + +You can import data from OBS. + +Import description: + +#. The prerequisite for successful import is that the schema of the data source must be the same as that specified during dataset creation. The schema indicates column names and types of a table. Once specified during dataset creation, the values cannot be changed. +#. If the data format is invalid, the data is set to null values. For details, see `Table 4 <../../data_management/creating_a_dataset.html#modelarts230004enustopic0170886809table1916832104917>`__. +#. When a CSV file is imported from OBS, the data type is not verified, but the number of columns must be the same as that in the schema of the dataset. + +- From OBS + + CSV files can be imported from OBS. You need to select the directory where the files are stored. The number of columns in the CSV file must be the same as that in the dataset schema. The schema of the CSV file can be automatically obtained. + + .. code-block:: + + ├─dataset-import-example + │ table_import_1.csv + │ table_import_2.csv + │ table_import_3.csv + │ table_import_4.csv + + diff --git a/umn/source/data_management/importing_data/specifications_for_importing_the_manifest_file.rst b/umn/source/data_management/importing_data/specifications_for_importing_the_manifest_file.rst new file mode 100644 index 0000000..ac155ec --- /dev/null +++ b/umn/source/data_management/importing_data/specifications_for_importing_the_manifest_file.rst @@ -0,0 +1,959 @@ +Specifications for Importing the Manifest File +============================================== + +The manifest file defines the mapping between labeling objects and content. The **Manifest file** import mode means that the manifest file is used for dataset import. The manifest file can be imported from OBS. When importing a manifest file from OBS, ensure that the current user has the permissions to access the directory housing the manifest file. + +.. note:: + + There are many requirements on the Manifest file compilation. Import new data from OBS. Generally, Manifest file import is used for data migration of ModelArts in different regions or using different accounts. If you have labeled data in a region using ModelArts, you can obtain the manifest file of the published dataset from the output path. Then you can import the dataset using the manifest file to ModelArts of other regions or accounts. The imported data carries the labeling information and does not need to be labeled again, improving development efficiency. + +The manifest file that contains information about the original file and labeling can be used in labeling, training, and inference scenarios. The manifest file that contains only information about the original file can be used in inference scenarios or used to generate an unlabeled dataset. The manifest file must meet the following requirements: + +- The manifest file uses the UTF-8 encoding format. The **source** value of text classification can contain Chinese characters. However, Chinese characters are not recommended for other parameters. + +- The manifest file uses the JSON Lines format (**jsonlines.org**). A line contains one JSON object. + + .. code-block:: + + {"source": "/path/to/image1.jpg", "annotation": ... } + {"source": "/path/to/image2.jpg", "annotation": ... } + {"source": "/path/to/image3.jpg", "annotation": ... } + + In the preceding example, the manifest file contains multiple lines of JSON object. + +- The manifest file can be generated by users, third-party tools, or ModelArts Data Labeling. The file name can be any valid file name. To facilitate the internal use of the ModelArts system, the file name generated by the ModelArts Data Labeling function consists of the following character strings: *DatasetName*-*VersionName*\ **.manifest**. For example, **animal-v201901231130304123.manifest**. + +Image Classification +-------------------- + ++-----------------------------------+--------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | { | +| 2 | "source":"s3://path/to/image1.jpg", | +| 3 | "usage":"TRAIN", | +| 4 | "id":"0162005993f8065ef47eefb59d1e4970", | +| 5 | "annotation": [ | +| 6 | { | +| 7 | "type": "modelarts/image_classification", | +| 8 | "name": "cat", | +| 9 | "property": { | +| 10 | "color":"white", | +| 11 | "kind":"Persian cat" | +| 12 | }, | +| 13 | "annotated-by":"human", | +| 14 | "creation-time":"2019-01-23 11:30:30" | +| 15 | }, | +| 16 | { | +| 17 | "type": "modelarts/image_classification", | +| 18 | "name":"animal", | +| 19 | "annotated-by":"modelarts/active-learning", | +| 20 | "confidence": 0.8, | +| 21 | "creation-time":"2019-01-23 11:30:30" | +| 22 | }], | +| 23 | "inference-loc":"/path/to/inference-output" | +| 24 | } | ++-----------------------------------+--------------------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table598984218223: + +.. table:: **Table 1** Parameters + + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+======================================================================================================================================================================================================================================+ + | source | Yes | URI of an object to be labeled. For details about data source types and examples, see `Table 2 <#modelarts230009enustopic0170886817table9303122642318>`__. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | usage | No | By default, the parameter value is left blank. Possible values are as follows: | + | | | | + | | | - **TRAIN**: The object is used for training. | + | | | - **EVAL**: The object is used for evaluation. | + | | | - **TEST**: The object is used for testing. | + | | | - **INFERENCE**: The object is used for inference. | + | | | | + | | | If the parameter value is left blank, the user decides how to use the object. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | id | No | Sample ID exported from the system. You do not need to set this parameter when importing the sample. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | annotation | No | If the parameter value is left blank, the object is not labeled. The value of **annotation** consists of an object list. For details about the parameters, see `Table 3 <#modelarts230009enustopic0170886817table48141825192716>`__. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | inference-loc | No | This parameter is available when the file is generated by the inference service, indicating the location of the inference result file. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table9303122642318: + +.. table:: **Table 2** Data source types + + ======= ============================================ + Type Example + ======= ============================================ + OBS "source":"s3://path-to-jpg" + Content "source":"content://I love machine learning" + ======= ============================================ + + + +.. _modelarts230009enustopic0170886817table48141825192716: + +.. table:: **Table 3** **annotation** objects + + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+================================================================================================================================================================================================================================================================================+ + | type | Yes | Label type. Possible values are as follows: | + | | | | + | | | - **image_classification**: image classification | + | | | - **text_classification**: text classification | + | | | - **text_entity**: named entity recognition | + | | | - **object_detection**: object detection | + | | | - **audio_classification**: sound classification | + | | | - **audio_content**: speech labeling | + | | | - **audio_segmentation**: speech paragraph labeling | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | name | Yes/No | This parameter is mandatory for the classification type but optional for other types. This example uses the image classification type. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | id | Yes/No | Label ID. This parameter is mandatory for triplets but optional for other types. The entity label ID of a triplet is in **E+number** format, for example, **E1** and **E2**. The relationship label ID of a triplet is in **R+number** format, for example, **R1** and **R2**. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | property | No | Labeling property. In this example, the cat has two properties: color and kind. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | annotated-by | No | The default value is **human**, indicating manual labeling. | + | | | | + | | | - human | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | creation-time | No | Time when the labeling job was created. It is the time when labeling information was written, not the time when the manifest file was generated. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | confidence | No | Confidence score of machine labeling. The value ranges from 0 to 1. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Image Segmentation +------------------ + +.. code-block:: + + { + "annotation": [{ + "annotation-format": "PASCAL VOC", + "type": "modelarts/image_segmentation", + "annotation-loc": "s3://path/to/annotation/image1.xml", + "creation-time": "2020-12-16 21:36:27", + "annotated-by": "human" + }], + "usage": "train", + "source": "s3://path/to/image1.jpg", + "id": "16d196c19bf61994d7deccafa435398c", + "sample-type": 0 + } + +- The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. +- **annotation-loc** indicates the path for saving the label file. This parameter is mandatory for image segmentation and object detection but optional for other labeling types. +- **annotation-format** indicates the format of the label file. This parameter is optional. The default value is **PASCAL VOC**. Only **PASCAL VOC** is supported. +- **sample-type** indicates a sample format. Value **0** indicates image, **1** text, **2** audio, **4** table, and **6** video. + + + +.. _modelarts230009enustopic0170886817table1516151991311: + +.. table:: **Table 4** PASCAL VOC format parameters + + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+===============================================================================================================================================================================+ + | folder | Yes | Directory where the data source is located | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | filename | Yes | Name of the file to be labeled | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | size | Yes | Image pixel | + | | | | + | | | - **width**: image width. This parameter is mandatory. | + | | | - **height**: image height. This parameter is mandatory. | + | | | - **depth**: number of image channels. This parameter is mandatory. | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | segmented | Yes | Segmented or not | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | mask_source | No | Segmentation mask path | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | object | Yes | Object detection information. Multiple **object{}** functions are generated for multiple objects. | + | | | | + | | | - **name**: class of the labeled content. This parameter is mandatory. | + | | | - **pose**: shooting angle of the labeled content. This parameter is mandatory. | + | | | - **truncated**: whether the labeled content is truncated (**0** indicates that the content is not truncated). This parameter is mandatory. | + | | | - **occluded**: whether the labeled content is occluded (**0** indicates that the content is not occluded). This parameter is mandatory. | + | | | - **difficult**: whether the labeled object is difficult to identify (**0** indicates that the object is easy to identify). This parameter is mandatory. | + | | | - **confidence**: confidence score of the labeled object. The value ranges from 0 to 1. This parameter is optional. | + | | | - **bndbox**: bounding box type. This parameter is mandatory. For details about the possible values, see `Table 5 <#modelarts230009enustopic0170886817table181711917139>`__. | + | | | - **mask_color**: label color, which is represented by the RGB value. This parameter is mandatory. | + +-----------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table181711917139: + +.. table:: **Table 5** Bounding box types + + +-----------------------+-----------------------+-----------------------+ + | Type | Shape | Labeling Information | + +=======================+=======================+=======================+ + | polygon | Polygon | Coordinates of points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 250 | + | | | | + | | | 150 | + | | | | + | | | 200 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 50 | + | | | | + | | | 150 | + | | | | + | | | 100 | + | | | | + | | | 100 | + +-----------------------+-----------------------+-----------------------+ + +Example: + +.. code-block:: + + + + NA + image_0006.jpg + + Unknown + + + 230 + 300 + 3 + + 1 + obs://xianao/out/dataset-8153-Jmf5ylLjRmSacj9KevS/annotation/V001/segmentationClassRaw/image_0006.png + + bike + Unspecified + 0 + 0 + 193,243,53 + 0 + + 71 + 48 + 75 + 73 + 49 + 69 + 68 + 92 + 90 + 101 + 45 + 110 + 71 + 48 + + + + +Text Classification +------------------- + +.. code-block:: + + { + "source": "content://I like this product ", + "id":"XGDVGS", + "annotation": [ + { + "type": "modelarts/text_classification", + "name": " positive", + "annotated-by": "human", + "creation-time": "2019-01-23 11:30:30" + } ] + } + +The **content** parameter indicates the text to be labeled (in UTF-8 encoding format, which can be Chinese). The other parameters are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. + +Named Entity Recognition +------------------------ + +.. code-block:: + + { + "source":"content://Michael Jordan is the most famous basketball player in the world.", + "usage":"TRAIN", + "annotation":[ + { + "type":"modelarts/text_entity", + "name":"Person", + "property":{ + "@modelarts:start_index":0, + "@modelarts:end_index":14 + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }, + { + "type":"modelarts/text_entity", + "name":"Category", + "property":{ + "@modelarts:start_index":34, + "@modelarts:end_index":44 + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + } + ] + } + +The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. + +`Table 6 <#modelarts230009enustopic0170886817table8486339124912>`__ describes the property parameters. For example, if you want to extract **Michael** from **"source":"content://Michael Jordan"**, the value of **start_index** is **0** and that of **end_index** is **7**. + + + +.. _modelarts230009enustopic0170886817table8486339124912: + +.. table:: **Table 6** Description of **property** parameters + + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------+ + | Parameter | Data Type | Description | + +========================+===========+=============================================================================================================+ + | @modelarts:start_index | Integer | Start position of the text. The value starts from 0, including the characters specified by **start_index**. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------+ + | @modelarts:end_index | Integer | End position of the text, excluding the characters specified by **end_index**. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------+ + +Text Triplet +------------ + +.. code-block:: + + { + "source":"content://"Three Body" is a series of long science fiction novels created by Liu Cix.", + "usage":"TRAIN", + "annotation":[ + { + "type":"modelarts/text_entity", + "name":"Person", + "id":"E1", + "property":{ + "@modelarts:start_index":67, + "@modelarts:end_index":74 + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }, + { + "type":"modelarts/text_entity", + "name":"Book", + "id":"E2", + "property":{ + "@modelarts:start_index":0, + "@modelarts:end_index":12 + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }, + { + "type":"modelarts/text_triplet", + "name":"Author", + "id":"R1", + "property":{ + "@modelarts:from":"E1", + "@modelarts:to":"E2" + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }, + { + "type":"modelarts/text_triplet", + "name":"Works", + "id":"R2", + "property":{ + "@modelarts:from":"E2", + "@modelarts:to":"E1" + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + } + ] + } + +The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. + +`Table 5 property parameters <#modelarts230009enustopic0170886817table134893213914>`__ describes the **property** parameters. **@modelarts:start_index** and **@modelarts:end_index** are the same as those of named entity recognition. For example, when **source** is set to **content://"Three Body" is a series of long science fiction novels created by Liu Cix.**, **Liu Cix** is an entity person, **Three Body** is an entity book, the person is the author of the book, and the book is works of the person. + + + +.. _modelarts230009enustopic0170886817table134893213914: + +.. table:: **Table 7** Description of **property** parameters + + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Data Type | Description | + +========================+===========+=========================================================================================================================+ + | @modelarts:start_index | Integer | Start position of the triplet entities. The value starts from 0, including the characters specified by **start_index**. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:end_index | Integer | End position of the triplet entities, excluding the characters specified by **end_index**. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:from | String | Start entity ID of the triplet relationship. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:to | String | Entity ID pointed to in the triplet relationship. | + +------------------------+-----------+-------------------------------------------------------------------------------------------------------------------------+ + +Object Detection +---------------- + +.. code-block:: + + { + "source":"s3://path/to/image1.jpg", + "usage":"TRAIN", + "annotation": [ + { + "type":"modelarts/object_detection", + "annotation-loc": "s3://path/to/annotation1.xml", + "annotation-format":"PASCAL VOC", + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }] + } + +- The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. +- **annotation-loc** indicates the path for saving the label file. This parameter is mandatory for object detection and image segmentation but optional for other labeling types. +- **annotation-format** indicates the format of the label file. This parameter is optional. The default value is **PASCAL VOC**. Only **PASCAL VOC** is supported. + + + +.. _modelarts230009enustopic0170886817table77167388472: + +.. table:: **Table 8** PASCAL VOC format parameters + + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+================================================================================================================================================================================+ + | folder | Yes | Directory where the data source is located | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | filename | Yes | Name of the file to be labeled | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | size | Yes | Image pixel | + | | | | + | | | - **width**: image width. This parameter is mandatory. | + | | | - **height**: image height. This parameter is mandatory. | + | | | - **depth**: number of image channels. This parameter is mandatory. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | segmented | Yes | Segmented or not | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | object | Yes | Object detection information. Multiple **object{}** functions are generated for multiple objects. | + | | | | + | | | - **name**: class of the labeled content. This parameter is mandatory. | + | | | - **pose**: shooting angle of the labeled content. This parameter is mandatory. | + | | | - **truncated**: whether the labeled content is truncated (**0** indicates that the content is not truncated). This parameter is mandatory. | + | | | - **occluded**: whether the labeled content is occluded (**0** indicates that the content is not occluded). This parameter is mandatory. | + | | | - **difficult**: whether the labeled object is difficult to identify (**0** indicates that the object is easy to identify). This parameter is mandatory. | + | | | - **confidence**: confidence score of the labeled object. The value ranges from 0 to 1. This parameter is optional. | + | | | - **bndbox**: bounding box type. This parameter is mandatory. For details about the possible values, see `Table 9 <#modelarts230009enustopic0170886817table1770752310500>`__. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table1770752310500: + +.. table:: **Table 9** Description of bounding box types + + +-----------------------+-----------------------+------------------------------------------------------+ + | Type | Shape | Labeling Information | + +=======================+=======================+======================================================+ + | point | Point | Coordinates of a point | + | | | | + | | | 100 | + | | | | + | | | 100 | + +-----------------------+-----------------------+------------------------------------------------------+ + | line | Line | Coordinates of points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 200 | + +-----------------------+-----------------------+------------------------------------------------------+ + | bndbox | Rectangle | Coordinates of the upper left and lower right points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 200 | + +-----------------------+-----------------------+------------------------------------------------------+ + | polygon | Polygon | Coordinates of points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 250 | + | | | | + | | | 150 | + | | | | + | | | 200 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 50 | + | | | | + | | | 150 | + +-----------------------+-----------------------+------------------------------------------------------+ + | circle | Circle | Center coordinates and radius | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 50 | + +-----------------------+-----------------------+------------------------------------------------------+ + +Example: + +.. code-block:: + + + test_data + 260730932.jpg + + 767 + 959 + 3 + + 0 + + point + Unspecified + 0 + 0 + 0 + + 456 + 596 + + + + line + Unspecified + 0 + 0 + 0 + + 133 + 651 + 229 + 561 + + + + bag + Unspecified + 0 + 0 + 0 + + 108 + 101 + 251 + 238 + + + + boots + Unspecified + 0 + 0 + 0 + + + 373 + 264 + 500 + 198 + 437 + 76 + 310 + 142 + + + + circle + Unspecified + 0 + 0 + 0 + + 405 + 170 + 100 + + + + +Sound Classification +-------------------- + +.. code-block:: + + { + "source": + "s3://path/to/pets.wav", + "annotation": [ + { + "type": "modelarts/audio_classification", + "name":"cat", + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + } + ] + } + +The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. + +Speech Labeling +--------------- + +.. code-block:: + + { + "source":"s3://path/to/audio1.wav", + "annotation":[ + { + "type":"modelarts/audio_content", + "property":{ + "@modelarts:content":"Today is a good day." + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + } + ] + } + +- The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. +- The **@modelarts:content** parameter in **property** indicates speech labeling. The data type is **String**. + +Speech Paragraph Labeling +------------------------- + +.. code-block:: + + { + "source":"s3://path/to/audio1.wav", + "usage":"TRAIN", + "annotation":[ + { + + "type":"modelarts/audio_segmentation", + "property":{ + "@modelarts:start_time":"00:01:10.123", + "@modelarts:end_time":"00:01:15.456", + + "@modelarts:source":"Tom", + + "@modelarts:content":"How are you?" + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + }, + { + "type":"modelarts/audio_segmentation", + "property":{ + "@modelarts:start_time":"00:01:22.754", + "@modelarts:end_time":"00:01:24.145", + "@modelarts:source":"Jerry", + "@modelarts:content":"I'm fine, thank you." + }, + "annotated-by":"human", + "creation-time":"2019-01-23 11:30:30" + } + ] + } + +- The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. + +- `Table 10 <#modelarts230009enustopic0170886817table1151144815513>`__ describes the **property** parameters. + +.. _modelarts230009enustopic0170886817table1151144815513: + + .. table:: **Table 10** Description of **property** parameters + + +-----------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Data Type | Description | + +=======================+=======================+=============================================================================================================================+ + | @modelarts:start_time | String | Start time of the sound. The format is **hh:mm:ss.SSS**. | + | | | | + | | | **hh** indicates the hour, **mm** indicates the minute, **ss** indicates the second, and **SSS** indicates the millisecond. | + +-----------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:end_time | String | End time of the sound. The format is **hh:mm:ss.SSS**. | + | | | | + | | | **hh** indicates the hour, **mm** indicates the minute, **ss** indicates the second, and **SSS** indicates the millisecond. | + +-----------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:source | String | Sound source | + +-----------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ + | @modelarts:content | String | Sound content | + +-----------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------+ + +Video Labeling +-------------- + +.. code-block:: + + { + "annotation": [{ + "annotation-format": "PASCAL VOC", + "type": "modelarts/object_detection", + "annotation-loc": "s3://path/to/annotation1_t1.473722.xml", + "creation-time": "2020-10-09 14:08:24", + "annotated-by": "human" + }], + "usage": "train", + "property": { + "@modelarts:parent_duration": 8, + "@modelarts:parent_source": "s3://path/to/annotation1.mp4", + "@modelarts:time_in_video": 1.473722 + }, + "source": "s3://input/path/to/annotation1_t1.473722.jpg", + "id": "43d88677c1e9a971eeb692a80534b5d5", + "sample-type": 0 + } + +- The parameters such as **source**, **usage**, and **annotation** are the same as those described in `Image Classification <#image-classification>`__. For details, see `Table 1 <#modelarts230009enustopic0170886817table598984218223>`__. +- **annotation-loc** indicates the path for saving the label file. This parameter is mandatory for object detection but optional for other labeling types. +- **annotation-format** indicates the format of the label file. This parameter is optional. The default value is **PASCAL VOC**. Only **PASCAL VOC** is supported. +- **sample-type** indicates a sample format. Value **0** indicates image, **1** text, **2** audio, **4** table, and **6** video. + + + +.. _modelarts230009enustopic0170886817table178351411132818: + +.. table:: **Table 11** **property** parameters + + +----------------------------+-----------+--------------------------------------------------+ + | Parameter | Data Type | Description | + +============================+===========+==================================================+ + | @modelarts:parent_duration | Double | Duration of the labeled video, in seconds | + +----------------------------+-----------+--------------------------------------------------+ + | @modelarts:time_in_video | Double | Timestamp of the labeled video frame, in seconds | + +----------------------------+-----------+--------------------------------------------------+ + | @modelarts:parent_source | String | OBS path of the labeled video | + +----------------------------+-----------+--------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table259920384918: + +.. table:: **Table 12** PASCAL VOC format parameters + + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+================================================================================================================================================================================+ + | folder | Yes | Directory where the data source is located | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | filename | Yes | Name of the file to be labeled | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | size | Yes | Image pixel | + | | | | + | | | - **width**: image width. This parameter is mandatory. | + | | | - **height**: image height. This parameter is mandatory. | + | | | - **depth**: number of image channels. This parameter is mandatory. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | segmented | Yes | Segmented or not | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | object | Yes | Object detection information. Multiple **object{}** functions are generated for multiple objects. | + | | | | + | | | - **name**: class of the labeled content. This parameter is mandatory. | + | | | - **pose**: shooting angle of the labeled content. This parameter is mandatory. | + | | | - **truncated**: whether the labeled content is truncated (**0** indicates that the content is not truncated). This parameter is mandatory. | + | | | - **occluded**: whether the labeled content is occluded (**0** indicates that the content is not occluded). This parameter is mandatory. | + | | | - **difficult**: whether the labeled object is difficult to identify (**0** indicates that the object is easy to identify). This parameter is mandatory. | + | | | - **confidence**: confidence score of the labeled object. The value ranges from 0 to 1. This parameter is optional. | + | | | - **bndbox**: bounding box type. This parameter is mandatory. For details about the possible values, see `Table 13 <#modelarts230009enustopic0170886817table869624041814>`__. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230009enustopic0170886817table869624041814: + +.. table:: **Table 13** Bounding box types + + +-----------------------+-----------------------+------------------------------------------------------+ + | Type | Shape | Labeling Information | + +=======================+=======================+======================================================+ + | point | Point | Coordinates of a point | + | | | | + | | | 100 | + | | | | + | | | 100 | + +-----------------------+-----------------------+------------------------------------------------------+ + | line | Line | Coordinates of points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 200 | + +-----------------------+-----------------------+------------------------------------------------------+ + | bndbox | Rectangle | Coordinates of the upper left and lower right points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 200 | + +-----------------------+-----------------------+------------------------------------------------------+ + | polygon | Polygon | Coordinates of points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 250 | + | | | | + | | | 150 | + | | | | + | | | 200 | + | | | | + | | | 200 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 50 | + | | | | + | | | 150 | + +-----------------------+-----------------------+------------------------------------------------------+ + | circle | Circle | Center coordinates and radius | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 50 | + +-----------------------+-----------------------+------------------------------------------------------+ + +Example: + +.. code-block:: + + + test_data + 260730932_t1.473722.jpg.jpg + + 767 + 959 + 3 + + 0 + + point + Unspecified + 0 + 0 + 0 + + 456 + 596 + + + + line + Unspecified + 0 + 0 + 0 + + 133 + 651 + 229 + 561 + + + + bag + Unspecified + 0 + 0 + 0 + + 108 + 101 + 251 + 238 + + + + boots + Unspecified + 0 + 0 + 0 + + 373 + 264 + 500 + 198 + 437 + 76 + 310 + 142 + + + + circle + Unspecified + 0 + 0 + 0 + + 405 + 170 + 100 + + + + + diff --git a/umn/source/data_management/index.rst b/umn/source/data_management/index.rst new file mode 100644 index 0000000..bc5fa0f --- /dev/null +++ b/umn/source/data_management/index.rst @@ -0,0 +1,17 @@ +=============== +Data Management +=============== + +.. toctree:: + :maxdepth: 1 + + introduction_to_data_management + creating_a_dataset + labeling_data/index + importing_data/index + exporting_data + modifying_a_dataset + publishing_a_dataset + deleting_a_dataset + managing_dataset_versions + team_labeling/index diff --git a/umn/source/data_management/introduction_to_data_management.rst b/umn/source/data_management/introduction_to_data_management.rst new file mode 100644 index 0000000..8be6e3b --- /dev/null +++ b/umn/source/data_management/introduction_to_data_management.rst @@ -0,0 +1,94 @@ +Introduction to Data Management +=============================== + +In ModelArts, you can import and label data on the **Data Management** page to prepare for model building. ModelArts uses datasets as the basis for model development or training. + +Dataset Types +------------- + +ModelArts supports datasets of images, audio, text, tables, videos, and other types for the following purposes: + +- Images + + - Image classification: identifies a class of objects in images. + - Object detection: identifies the position and class of each object in an image. + - Image segmentation: identifies the outline of each object in an image. + +- Audio + + - Sound classification: classifies and identifies different sounds. + - Speech labeling: labels speech content. + - Speech paragraph labeling: segments and labels speech content. + +- Text + + - Text classification: assigns labels to text according to its content. + - Named entity recognition: assigns labels to named entities in text, such as time and locations. + - Text triplet: assigns labels to entity segments and entity relationships in the text. + +- Tables + + - Table: applies to structured data processing such as tables. The file format can be CSV. You can preview a maximum of 100 records in a table. + +- Videos + + - Video labeling: identifies the position and class of each object in a video. Only the MP4 format is supported. + +- Others + + - Free format: manages data in any format. Labeling is not available for data of the free format type. The free format type is applicable to scenarios where labeling is not required or developers customize labeling. If your dataset needs to contain data in multiple formats or your data format does not meet the requirements of other types of datasets, you can select a dataset in free format. + + .. figure:: /_static/images/en-us_image_0000001156920919.png + :alt: **Figure 1** Example of a dataset in free format + + + **Figure 1** Example of a dataset in free format + +Dataset Management Process and Functions +---------------------------------------- + + + +.. _modelarts230003enustopic0171496996table145501032184813: + +.. table:: **Table 1** Function description + + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Function | Description | + +=========================================================================================================+=============================================================================================================================================================================================+ + | `Creating a Dataset <../data_management/creating_a_dataset.html>`__ | Create a dataset. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Image Classification <../data_management/labeling_data/image_classification.html>`__ | Label data based on the types of datasets. Data labeling is not supported for datasets in free format or table format. | + | | | + | `Object Detection <../data_management/labeling_data/object_detection.html>`__ | | + | | | + | `Text Classification <../data_management/labeling_data/text_classification.html>`__ | | + | | | + | `Named Entity Recognition <../data_management/labeling_data/named_entity_recognition.html>`__ | | + | | | + | `Text Triplet <../data_management/labeling_data/text_triplet.html>`__ | | + | | | + | `Sound Classification <../data_management/labeling_data/sound_classification.html>`__ | | + | | | + | `Speech Labeling <../data_management/labeling_data/speech_labeling.html>`__ | | + | | | + | `Speech Paragraph Labeling <../data_management/labeling_data/speech_paragraph_labeling.html>`__ | | + | | | + | `Video Labeling <../data_management/labeling_data/video_labeling.html>`__ | | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Import Operation <../data_management/importing_data/import_operation.html>`__ | Import the local manifest file or data stored in OBS to the dataset. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Exporting Data <../data_management/exporting_data.html>`__ | Export part of the data as a new dataset or to OBS. Historical tasks can be viewed and managed. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Modifying a Dataset <../data_management/modifying_a_dataset.html>`__ | Modify the basic information about a dataset, such as the dataset name, description, and labels. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Publishing a Dataset <../data_management/publishing_a_dataset.html>`__ | Publish the labeled dataset as a new version for model building. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Managing Dataset Versions <../data_management/managing_dataset_versions.html>`__ | View data version updates. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Introduction to Team Labeling <../data_management/team_labeling/introduction_to_team_labeling.html>`__ | Allow multiple users to label the same dataset and enable the dataset creator to manage labeling tasks in a unified manner. Add a team and its members to participate in labeling datasets. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Deleting a Dataset <../data_management/deleting_a_dataset.html>`__ | Delete a dataset to release resources. | + +---------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/data_management/labeling_data/image_classification.rst b/umn/source/data_management/labeling_data/image_classification.rst new file mode 100644 index 0000000..11d2e29 --- /dev/null +++ b/umn/source/data_management/labeling_data/image_classification.rst @@ -0,0 +1,129 @@ +Image Classification +==================== + +Model training uses a large number of labeled images. Therefore, before the model training, add labels to the images that are not labeled. You can add labels to images by manual labeling or auto labeling. In addition, you can modify the labels of images, or remove their labels and label the images again. + +Before labeling an image in image classification scenarios, you need to understand the following: + +- You can add multiple labels to an image. +- A label name can contain a maximum of 32 characters, including Chinese characters, letters, digits, hyphens (-), and underscores (_). + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing Data Sources +-------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +- For an image classification dataset, the .txt file with the same name in the same directory as the data source is used as the label of the target image. +- For an object detection dataset or image segmentation dataset, the .xml file with the same name in the same directory is used as the label of the target image. + +To quickly obtain the latest data in the OBS bucket, on the **All** or **Unlabeled** tab page of the dataset details page, click **Synchronize Data Source** to add data from OBS to the dataset. + +Filtering Data +-------------- + +On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed, showing all data in the dataset by default. On the **All**, **Unlabeled**, or **Labeled** tab page, you can add filter criteria in the filter criteria area to quickly filter the data you want to view. + +The following filter criteria are supported. You can set one or more filter criteria. + +- **Label**: Select **All** or one or more labels you specified. +- **Sample Creation Time**: Select **Within 1 month**, **Within 1 day**, or **Custom** to customize a time range. +- **File Name** or **Path**: Filter files by file name or file storage path. +- **Labeled By**: Select the name of the user who performs the labeling operation. + +Labeling Images (Manually) +-------------------------- + +The dataset details page displays images on the **All**, **Labeled**, and **Unlabeled** tabs. Images on the **All** tab page are displayed by default. Click an image to preview it. For the images that have been labeled, the label information is displayed at the bottom of the preview page. + +#. On the **Unlabeled** tab page, select the images to be labeled. + + - Manual selection: In the image list, click the selection box in the upper left corner of an image to enter the selection mode, indicating that the image is selected. You can select multiple images of the same type and add labels to them together. + - Batch selection: If all the images on the current page of the image list belong to the same type, you can click **Select Images on Current Page** in the upper right corner to select all the images on the current page. + +#. Add labels to the selected images. + + a. In the label adding area on the right, set the label in the **Label** text box. + + Click the **Label** text box and select an existing label from the drop-down list. If the existing labels cannot meet the requirements, you can go to the page for `modifying the dataset <../../data_management/modifying_a_dataset.html>`__ and add labels. + + b. Confirm the **Labels of Selected Image** information and click **OK**. The selected image is automatically moved to the **Labeled** tab page. On the **Unlabeled** and **All** tab pages, the labeling information is updated along with the labeling process, including the added label names and the number of images for each label. + +Viewing Labeled Images +---------------------- + +On the dataset details page, click the **Labeled** tab to view the list of the labeled images. By default, the corresponding labels are displayed under the image thumbnails. You can also select an image and view the label information of the image in the **File Labels** area on the right. + +Modifying Labeling Information +------------------------------ + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- **Modifying based on images** + + On the dataset details page, click the **Labeled** tab, and select one or more images to be modified from the image list. Modify the image information in the label information area on the right. + + Modifying a label: In the **File Labels** area, click the edit icon in the **Operation** column, enter the correct label name in the text box, and click the check mark to complete the modification. + + Deleting a label: In the **File Labels** area, click the delete icon in the **Operation** column to delete the label. This operation deletes only the labels added to the selected image. + + .. figure:: /_static/images/en-us_image_0000001156921011.png + :alt: **Figure 1** Modifying a label + + + **Figure 1** Modifying a label + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Modifying a label: Click the editing icon in the **Operation** column. In the dialog box that is displayed, enter the new label name and click **OK**. After the modification, the images that have been added with the label use the new label name. + - Deleting a label: Click the deletion icon in the **Operation** column. In the displayed dialog box, select **Delete label**, **Delete label and images with only the label (Do not delete source files)**, or **Delete label and images with only the label (Delete source files)**, and click **OK**. + + .. figure:: /_static/images/en-us_image_0000001157080983.png + :alt: **Figure 2** Information about all labels + + + **Figure 2** Information about all labels + +Adding Images +------------- + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add images on ModelArts for data labeling. + +#. On the dataset details page, click the **All** or **Unlabeled** tab. Then click **Add**. + +#. On the **Add** page that is displayed, click **Add Image**. + + Select one or more images to be uploaded in the local environment. Images in JPG, JPEG, PNG, and BMP formats are supported. The size of a single image cannot exceed 5 MB, and the total size of all images uploaded at a time cannot exceed 8 MB. + + After the images are selected, their thumbnails and sizes are displayed on the **Add** page. + +#. On the **Add** page, click **OK**. + + The images you have added will be automatically displayed in the image list on the **Unlabeled** tab page. In addition, the images are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Images +--------------- + +You can quickly delete the images you want to discard. + +On the **All**, **Unlabeled**, or **Labeled** tab page, select the images to be deleted or click **Select Images on Current Page** to select all images on the page, and click **Delete** in the upper left corner to delete the images. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the images. + +If a tick is displayed in the upper left corner of an image, the image is selected. If no image is selected on the page, the **Delete** button is unavailable. + +.. note:: + + If you select **Delete source files**, images stored in the corresponding OBS directory will be deleted when you delete the selected images. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + diff --git a/umn/source/data_management/labeling_data/image_segmentation.rst b/umn/source/data_management/labeling_data/image_segmentation.rst new file mode 100644 index 0000000..62167fa --- /dev/null +++ b/umn/source/data_management/labeling_data/image_segmentation.rst @@ -0,0 +1,227 @@ +Image Segmentation +================== + +Training a model uses a large number of labeled images. Therefore, label images before the model training. You can label images on the ModelArts management console. Alternatively, modify labels, or delete them and label them again. + +Before labeling an image in image segmentation scenarios, you need to understand the following: + +- All objects whose contours need to be extracted from the image must be labeled. +- Polygons and points can be used for labeling. + + - In polygon labeling, draw a polygon based on the outline of the target object. + - In point labeling, label the top, bottom, leftmost, and rightmost points on the object contour. The system will infer the outline of the object based on the labeled points. For images with complex backgrounds, it is a good practice to use polygons for labeling. + +- When labeling an image, ensure that the polygons or points are within the image. Otherwise, an error will occur in subsequent operations. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing Data Sources +-------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +- For an image classification dataset, the .txt file with the same name in the same directory as the data source is used as the label of the target image. +- For an object detection dataset or image segmentation dataset, the .xml file with the same name in the same directory is used as the label of the target image. + +To quickly obtain the latest data in the OBS bucket, on the **All** or **Unlabeled** tab page of the dataset details page, click **Synchronize Data Source** to add data from OBS to the dataset. + +Filtering Data +-------------- + +On the **Dashboard** tab page of the dataset, the summary of the dataset is displayed by default. In the upper right corner of the page, click **Label**. The dataset details page is displayed, showing all data in the dataset by default. On the **All**, **Unlabeled**, or **Labeled** tab page, you can add filter criteria in the filter criteria area to quickly filter the data you want to view. + +The following filter criteria are supported. You can set one or more filter criteria. + +- **Label**: Select **All** or one or more labels you specified. +- **Sample Creation Time**: Select **Within 1 month**, **Within 1 day**, or **Custom** to customize a time range. +- **File Name** or **Path**: Filter files by file name or file storage path. +- **Labeled By**: Select the name of the user who labeled the image. + +Manually Labeling Images +------------------------ + +The dataset details page provides the **Labeled** and **Unlabeled** tabs. The **All** tab page is displayed by default. + +#. On the **Unlabeled** tab page, click an image. The system automatically directs you to the page for labeling the image. For details about how to use common buttons on this page, see `Table 2 <#modelarts230345enustopic0000001126398947table194471512463>`__. + +#. Select a labeling method. + + On the labeling page, common `labeling methods <#modelarts230345enustopic0000001126398947table165201739119>`__ and `buttons <#modelarts230345enustopic0000001126398947table194471512463>`__ are provided in the toolbar. By default, polygon labeling is selected. Use polygon or point labeling as needed. + + .. note:: + + After you select a method to label the first image, the labeling method automatically applies to subsequent images. + + .. figure:: /_static/images/en-us_image_0000001110920986.png + :alt: **Figure 1** Toolbar + + + **Figure 1** Toolbar + + + +.. _modelarts230345enustopic0000001126398947table165201739119: + + .. table:: **Table 1** Labeling methods + + +----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Icon | Description | + +==========+================================================================================================================================================================================================================================================================================================================+ + | |image3| | Polygon labeling. In the area where the object to be labeled is located, click to label a point, move the mouse and click multiple points along the edge of the object, and then click the first point again. All the points form a polygon. In this way, the object to be labeled is within the bounding box. | + +----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image4| | Point labeling. Label the top, bottom, leftmost, and rightmost points on the object contour. The system will infer the outline of the object based on the labeled points. | + +----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230345enustopic0000001126398947table194471512463: + + .. table:: **Table 2** Toolbar buttons + + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Icon | Description | + +===========+========================================================================================================================================================+ + | |image14| | Cancel the previous operation. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image15| | Redo the previous operation. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image16| | Zoom in an image. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image17| | Zoom out an image. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image18| | Delete all bounding boxes on the current image. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image19| | Display or hide a bounding box. This operation can be performed only on a labeled image. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image20| | Drag a bounding box to another position or drag the edge of the bounding box to resize it. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image21| | Reset a bounding box. After dragging a bounding box, you can click this button to quickly restore the bounding box to its original shape and position. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image22| | Display the labeled image in full screen. | + +-----------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. Label an object. + + This section uses point labeling as an example. Identify an object in an image. Click to label the top, bottom, leftmost, and rightmost points on the object contour. In the dialog box that is displayed, set the label name and click **Add**. Then, the system automatically infers the object contour. + + After labeling an image, click an image that has not been labeled in the image list below to label the new image. + + .. figure:: /_static/images/en-us_image_0000001110761086.gif + :alt: **Figure 2** Labeling an object outline + + + **Figure 2** Labeling an object outline + +#. Click **Back to Data Labeling Preview** in the upper left part of the page to view the labeling information. In the dialog box that is displayed, click **OK** to save the labeling settings. + + The selected image is automatically moved to the **Labeled** tab page. On the **Unlabeled** and **All** tab pages, the labeling information is updated along with the labeling process, including the added label names and the number of images for each label. + +Viewing Labeled Images +---------------------- + +On the dataset details page, click the **Labeled** tab to view the list of labeled images. Click an image to view its labeling information in the **File Labels** area on the right. + +Modifying a Label +----------------- + +After labeling an object, you can modify labeled data on the **Labeled** tab page. + +On the dataset details page, click the **Labeled** tab and then the image to be modified. On the labeling page that is displayed, modify the labeling information in the **File Labels** area on the right. + +- Modifying a label: In the **Labeling** area, click the edit icon, set the target label name or color in the displayed dialog box, and click the save icon to save the modification. Alternatively, click a label to be modified. In the image labeling area, adjust the position and size of the bounding box. After the adjustment is complete, click another label to save the modification. +- Modifying image labeling information: In the area for displaying images, click the target bounding box. Then, blue points on the bounding box are displayed. Drag a blue point and adjust the bounding box to the edge of the object. +- Deleting a label: In the **Labeling** area, click the deletion icon to delete a label from the image. After all labels of an image are deleted, the image is displayed on the **Unlabeled** tab page. + +After the labeling information is modified, click **Back to Data Labeling Preview** in the upper left part of the page to exit the labeling page. In the dialog box that is displayed, click **OK** to save the modification. + +.. figure:: /_static/images/en-us_image_0000001156920955.gif + :alt: **Figure 3** Editing image labeling information + + + **Figure 3** Editing image labeling information + +Adding Images +------------- + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add images on ModelArts for data labeling. + +#. On the dataset details page, click the **All** or **Unlabeled** tab. Then click **Add**. + +#. On the **Add** page that is displayed, click **Add Image**. + + Select one or more images to be uploaded in the local environment. Images in JPG, JPEG, PNG, and BMP formats are supported. The size of a single image cannot exceed 5 MB, and the total size of all images uploaded at a time cannot exceed 8 MB. + + After the images are selected, their thumbnails and sizes are displayed on the **Add** page. + +#. On the **Add** page, click **OK**. + + The images you have added will be automatically displayed in the image list on the **Unlabeled** tab page. In addition, the images are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Images +--------------- + +You can quickly delete the images you want to discard. + +On the **All**, **Unlabeled**, or **Labeled** tab page, select the images to be deleted or click **Select Images on Current Page** to select all images on the page, and click **Delete** in the upper left corner to delete the images. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the images. + +If a tick is displayed in the upper left corner of an image, the image is selected. If no image is selected on the page, the **Delete** button is unavailable. + +.. note:: + + If you select **Delete source files**, images stored in the corresponding OBS directory will be deleted when you delete the selected images. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920998.png + +.. |image2| image:: /_static/images/en-us_image_0000001156920965.png + +.. |image3| image:: /_static/images/en-us_image_0000001110920998.png + +.. |image4| image:: /_static/images/en-us_image_0000001156920965.png + +.. |image5| image:: /_static/images/en-us_image_0000001110761078.png + +.. |image6| image:: /_static/images/en-us_image_0000001110761088.png + +.. |image7| image:: /_static/images/en-us_image_0000001110920974.png + +.. |image8| image:: /_static/images/en-us_image_0000001110920976.png + +.. |image9| image:: /_static/images/en-us_image_0000001157080931.png + +.. |image10| image:: /_static/images/en-us_image_0000001156920961.png + +.. |image11| image:: /_static/images/en-us_image_0000001110761074.png + +.. |image12| image:: /_static/images/en-us_image_0000001156920967.png + +.. |image13| image:: /_static/images/en-us_image_0000001156920951.png + +.. |image14| image:: /_static/images/en-us_image_0000001110761078.png + +.. |image15| image:: /_static/images/en-us_image_0000001110761088.png + +.. |image16| image:: /_static/images/en-us_image_0000001110920974.png + +.. |image17| image:: /_static/images/en-us_image_0000001110920976.png + +.. |image18| image:: /_static/images/en-us_image_0000001157080931.png + +.. |image19| image:: /_static/images/en-us_image_0000001156920961.png + +.. |image20| image:: /_static/images/en-us_image_0000001110761074.png + +.. |image21| image:: /_static/images/en-us_image_0000001156920967.png + +.. |image22| image:: /_static/images/en-us_image_0000001156920951.png + diff --git a/umn/source/data_management/labeling_data/index.rst b/umn/source/data_management/labeling_data/index.rst new file mode 100644 index 0000000..62ba02b --- /dev/null +++ b/umn/source/data_management/labeling_data/index.rst @@ -0,0 +1,17 @@ +============= +Labeling Data +============= + +.. toctree:: + :maxdepth: 1 + + image_classification + object_detection + image_segmentation + text_classification + named_entity_recognition + text_triplet + sound_classification + speech_labeling + speech_paragraph_labeling + video_labeling diff --git a/umn/source/data_management/labeling_data/named_entity_recognition.rst b/umn/source/data_management/labeling_data/named_entity_recognition.rst new file mode 100644 index 0000000..59b4a6f --- /dev/null +++ b/umn/source/data_management/labeling_data/named_entity_recognition.rst @@ -0,0 +1,100 @@ +Named Entity Recognition +======================== + +Named entity recognition assigns labels to named entities in text, such as time and locations. Before labeling, you need to understand the following: + +- A label name can contain a maximum of 32 characters, including Chinese characters, letters, digits, hyphens (-), and underscores (_). + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Labeling Content +---------------- + +The dataset details page displays the labeled and unlabeled text files in the dataset. The **Unlabeled** tab page is displayed by default. + +#. On the **Unlabeled** tab page, the objects to be labeled are listed in the left pane. In the list, click the text object to be labeled, select a part of text displayed under **Label Set** for labeling, and select a label in the **Label Set** area in the right pane. Multiple labels can be added to a labeling object. + + You can repeat this operation to select objects and add labels to the objects. + +#. Click **Save Current Page** in the lower part of the page to complete the labeling. + +Adding Labels +------------- + +- Adding labels on the **Unlabeled** tab page: Click the plus sign (+) next to **Label Set**. On the **Add Label** page that is displayed, add a label name, select a label color, and click **OK**. + + .. figure:: /_static/images/en-us_image_0000001156921015.png + :alt: **Figure 1** Adding a named entity label (1) + + + **Figure 1** Adding a named entity label (1) + +- Adding labels on the **Labeled** tab page: Click the plus sign (+) next to **All Labels**. On the **Add Label** page that is displayed, add a label name, select a label color, and click **OK**. + + .. figure:: /_static/images/en-us_image_0000001156921017.png + :alt: **Figure 2** Adding a named entity label (2) + + + **Figure 2** Adding a named entity label (2) + +Viewing the Labeled Text +------------------------ + +On the dataset details page, click the **Labeled** tab to view the list of the labeled text. You can also view all labels supported by the dataset in the **All Labels** area on the right. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +On the dataset details page, click the **Labeled** tab, and modify the text information in the label information area on the right. + +- **Modifying based on texts** + + On the dataset details page, click the **Labeled** tab, and select the text to be modified from the text list. + + Manual deletion: In the text list, click the text. When the text background turns blue, the text is selected. On the right of the page, click |image1| above a text label to delete the label. + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Batch modification: In the **All Labels** area, click the editing icon in the **Operation** column, add a label name in the text box, select a label color, and click **OK**. + - Batch deletion: In the **All Labels** area, click the deletion icon in the **Operation** column to delete the label. In the dialog box that is displayed, select **Delete label** or **Delete label and objects with only the label**, and click **OK**. + +Adding Files +------------ + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add text files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add File**. + +#. In the displayed **Add File** dialog box, set the parameters as required and then select the file to be uploaded. + + Select one or more files to be uploaded in the local environment. Only **.txt** and **.csv** files are supported. The total size of files uploaded at a time cannot exceed 8 MB. + +#. In the **Add File** dialog box, click **Upload**. The files you add will be automatically displayed on the **Unlabeled** tab page. + +Deleting a File +--------------- + +You can quickly delete the files you want to discard. + +- On the **Unlabeled** tab page, select the text to be deleted, and click **Delete** in the upper left corner to delete the text. +- On the **Labeled** tab page, select the text to be deleted and click **Delete**. Alternatively, you can tick **Select Images on Current Page** to select all text objects on the current page and click **Delete** in the upper left corner. + +The background of the selected text is blue. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110761148.png + diff --git a/umn/source/data_management/labeling_data/object_detection.rst b/umn/source/data_management/labeling_data/object_detection.rst new file mode 100644 index 0000000..4bc31a9 --- /dev/null +++ b/umn/source/data_management/labeling_data/object_detection.rst @@ -0,0 +1,246 @@ +Object Detection +================ + +Model training uses a large number of labeled images. Therefore, before the model training, add labels to the images that are not labeled. You can add labels to images by manual labeling or auto labeling. In addition, you can modify the labels of images, or remove their labels and label the images again. + +Before labeling an image in object detection scenarios, pay attention to the following: + +- All target objects in the image must be labeled. +- Target objects are clear without any blocking and contained within bounding boxes. +- Only the entire object must be contained within a bounding box. The edge of the bounding box cannot intersect the edge outline of the target object. Additionally, there must not be a gap between the edge and the target object to prevent the background from interfering with the model training. + +Labeling the Dataset +-------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, click the dataset to be labeled based on the labeling type. The **Dashboard** tab page of the dataset is displayed. + + By default, the **Dashboard** tab page of the current dataset version is displayed. To label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing Data Sources +-------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +- For an image classification dataset, the .txt file with the same name in the same directory as the data source is used as the label of the target image. +- For an object detection dataset or image segmentation dataset, the .xml file with the same name in the same directory is used as the label of the target image. + +To quickly obtain the latest data in the OBS bucket, on the **All** or **Unlabeled** tab page of the dataset details page, click **Synchronize Data Source**. + +Filtering Data +-------------- + +On the **Dashboard** tab page of the dataset, the summary of the dataset is displayed by default. In the upper left corner of the page, click **Label**. The dataset details page is displayed, showing all data in the dataset by default. On the **All**, **Unlabeled**, or **Labeled** tab page, you can add filter criteria in the filter criteria area to quickly filter the data you want to view. + +The following filter criteria are supported. You can set one or more filter criteria. + +- **Label**: Select **All** or one or more labels you specified. +- **Sample Creation Time**: Select **Within 1 month**, **Within 1 day**, or **Custom** to customize a time range. +- **File Name** or **Path**: Filter files by file name or file storage path. +- **Labeled By**: Select the name of the user who performs the labeling operation. + +Labeling Images (Manually) +-------------------------- + +The dataset details page provides the **Labeled** and **Unlabeled** tabs. The **All** tab page is displayed by default. + +#. On the **Unlabeled** tab page, click an image. The image labeling page is displayed. For details about how to use common buttons on the **Labeled** tab page, see `Table 2 <#modelarts230012enustopic0170889732table194471512463>`__. + +#. In the left tool bar, select a proper labeling shape. The default labeling shape is a rectangle. In this example, the rectangle is used for labeling. + + .. note:: + + On the left of the page, multiple tools are provided for you to label images. However, you can use only one tool at a time. + + + +.. _modelarts230012enustopic0170889732table165201739119: + + .. table:: **Table 1** Supported bounding box + + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Icon | Description | + +===========+=================================================================================================================================================================================================================================================================================================+ + | |image7| | Rectangle. Click the edge of the upper left corner of the object to be labeled. A rectangle will be displayed. Drag the rectangle to cover the object and click to label the object. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image8| | Polygon. In the area where the object to be labeled is located, click to label a point, move the mouse and click multiple points along the edge of the object, and then click the first point again. All the points form a polygon. Therefore, the object to be labeled is in the bounding box. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image9| | Circle. Click the center point of an object, and move the mouse to draw a circle to cover the object and click to label the object. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image10| | Straight line. Click to specify the start and end points of an object, and move the mouse to draw a straight line to cover the object and click to label the object. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image11| | Dotted line. Click to specify the start and end points of an object, and move the mouse to draw a dotted line to cover the object and click to label the object. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | |image12| | Point. Click the object in an image to label a point. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. In the **Add Label** text box, enter a new label name, select the label color, and click **Add**. Alternatively, select an existing label from the drop-down list. + + Label all objects in an image. Multiple labels can be added to an image. After labeling an image, you can click the image list below the image to quickly select other images that are not labeled and label them on the labeling page. + +#. Click **Back to Data Labeling Preview** in the upper left part of the page to view the labeling information. In the dialog box that is displayed, click **OK** to save the labeling settings. + + The selected image is automatically moved to the **Labeled** tab page. On the **Unlabeled** and **All** tab pages, the labeling information is updated along with the labeling process, including the added label names and the number of images for each label. + + + +.. _modelarts230012enustopic0170889732table194471512463: + +.. table:: **Table 2** Common icons on the labeling page + + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | Icon | Description | + +===========+=========================================================================================================================================+ + | |image22| | Cancel the previous operation. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image23| | Redo the previous operation. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image24| | Zoom in an image. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image25| | Zoom out an image. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image26| | Delete all bounding boxes on the current image. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image27| | Display or hide a bounding box. You can perform this operation only on a labeled image. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image28| | Drag a bounding box to another position or drag the edge of the bounding box to resize it. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image29| | Reset. After dragging a bounding box, you can click this button to quickly restore the bounding box to its original shape and position. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + | |image30| | Display the labeled image in full screen. | + +-----------+-----------------------------------------------------------------------------------------------------------------------------------------+ + +Viewing Labeled Images +---------------------- + +On the dataset details page, click the **Labeled** tab to view the list of the labeled images. You can click an image to view the label information about the image in the **All Labels** area on the right. + +Modifying Labeling Information +------------------------------ + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- **Modifying based on images** + + On the dataset details page, click the **Labeled** tab, click the image to be modified. The labeling page is displayed. Modify the image information in the label information area on the right. + + - Modifying a label: In the **Labeling** area, click the edit icon, enter the correct label name in the text box, and click the check mark to complete the modification. Alternatively, click a label. In the image labeling area, adjust the position and size of the bounding box. After the adjustment is complete, click another label to save the modification. + + - Deleting a label: In the **Labeling** area, click the deletion icon to delete a label from the image. + + After deleting the label, click **Back to Data Labeling Preview** in the upper left corner of the page to exit the labeling page. In the dialog box that is displayed, save the modification. After all labels of an image are deleted, the image is displayed on the **Unlabeled** tab page. + + .. figure:: /_static/images/en-us_image_0000001157080933.png + :alt: **Figure 1** Editing an object detection label + + + **Figure 1** Editing an object detection label + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Modifying a label: Click the edit icon in the **Operation** column. In the dialog box that is displayed, enter the new label name, select the new label color, and click **OK**. After the modification, the images that have been added with the label use the new label name. + - Deleting a label: Click the deletion icon in the **Operation** column to delete a label. + + .. figure:: /_static/images/en-us_image_0000001157080935.png + :alt: **Figure 2** All labels for object detection + + + **Figure 2** All labels for object detection + +Adding Images +------------- + +In addition to the data automatically synchronized from **Input Dataset Path**, you can directly add images on ModelArts for labeling. + +#. On the dataset details page, click the **All** or **Unlabeled** tab. Then, click **Add**. + +#. On the **Add** page that is displayed, click **Add Image**. + + Select one or more images to be uploaded in the local environment. Images in JPG, JPEG, PNG, or BMP formats are supported. The size of a single image cannot exceed 5 MB, and the total size of all images uploaded at a time cannot exceed 8 MB. + + After the images are selected, their thumbnails and total size are displayed on the **Add** page. + +#. On the **Add** page, click **OK**. + + The images you have added will be automatically displayed in the image list on the **Unlabeled** tab page. In addition, the images are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Images +--------------- + +You can quickly delete the images you want to discard. + +On the **All**, **Unlabeled**, or **Labeled** tab page, select the images to be deleted or click **Select Images on Current Page**, and click **Delete** in the upper left corner to delete them. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the images. + +If a tick is displayed in the upper left corner of an image, the image is selected. If no image is selected on the page, the **Delete** button is unavailable. + +.. note:: + + If you select **Delete source files**, images stored in the OBS directory will be deleted accordingly. This operation may affect other dataset versions or datasets using those files, for example, leading to an error in page display, training, or inference. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001156920971.png + +.. |image2| image:: /_static/images/en-us_image_0000001156920969.png + +.. |image3| image:: /_static/images/en-us_image_0000001157080923.png + +.. |image4| image:: /_static/images/en-us_image_0000001110761098.png + +.. |image5| image:: /_static/images/en-us_image_0000001110920992.png + +.. |image6| image:: /_static/images/en-us_image_0000001110920994.png + +.. |image7| image:: /_static/images/en-us_image_0000001156920971.png + +.. |image8| image:: /_static/images/en-us_image_0000001156920969.png + +.. |image9| image:: /_static/images/en-us_image_0000001157080923.png + +.. |image10| image:: /_static/images/en-us_image_0000001110761098.png + +.. |image11| image:: /_static/images/en-us_image_0000001110920992.png + +.. |image12| image:: /_static/images/en-us_image_0000001110920994.png + +.. |image13| image:: /_static/images/en-us_image_0000001110920996.png + +.. |image14| image:: /_static/images/en-us_image_0000001110920984.png + +.. |image15| image:: /_static/images/en-us_image_0000001110761082.png + +.. |image16| image:: /_static/images/en-us_image_0000001110920982.png + +.. |image17| image:: /_static/images/en-us_image_0000001156920959.png + +.. |image18| image:: /_static/images/en-us_image_0000001110921000.png + +.. |image19| image:: /_static/images/en-us_image_0000001110761080.png + +.. |image20| image:: /_static/images/en-us_image_0000001110921004.png + +.. |image21| image:: /_static/images/en-us_image_0000001110920978.png + +.. |image22| image:: /_static/images/en-us_image_0000001110920996.png + +.. |image23| image:: /_static/images/en-us_image_0000001110920984.png + +.. |image24| image:: /_static/images/en-us_image_0000001110761082.png + +.. |image25| image:: /_static/images/en-us_image_0000001110920982.png + +.. |image26| image:: /_static/images/en-us_image_0000001156920959.png + +.. |image27| image:: /_static/images/en-us_image_0000001110921000.png + +.. |image28| image:: /_static/images/en-us_image_0000001110761080.png + +.. |image29| image:: /_static/images/en-us_image_0000001110921004.png + +.. |image30| image:: /_static/images/en-us_image_0000001110920978.png + diff --git a/umn/source/data_management/labeling_data/sound_classification.rst b/umn/source/data_management/labeling_data/sound_classification.rst new file mode 100644 index 0000000..dcb828a --- /dev/null +++ b/umn/source/data_management/labeling_data/sound_classification.rst @@ -0,0 +1,111 @@ +Sound Classification +==================== + +Model training requires a large amount of labeled data. Therefore, before the model training, label the unlabeled audio files. ModelArts enables you to label audio files in batches by one click. In addition, you can modify the labels of audio files, or remove their labels and label the audio files again. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing the Data Source +----------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +To quickly obtain the latest data in the OBS bucket, click **Synchronize Data Source** on the **Unlabeled** tab page of the dataset details page to add the data uploaded using OBS to the dataset. + +Labeling Audio Files +-------------------- + +The dataset details page displays the labeled and unlabeled audio files. The **Unlabeled** tab page is displayed by default. Click |image1| on the left of the audio to preview the audio. + +#. On the **Unlabeled** tab page, select the audio files to be labeled. + + - Manual selection: In the audio list, click the target audio. If the blue check box is displayed in the upper right corner, the audio is selected. You can select multiple audio files of the same type and label them together. + - Batch selection: If all audio files of the current page belong to one type, you can click **Select Images on Current Page** in the upper right corner of the list to select all the audio files on the page. + +#. Add labels. + + a. In the right pane, set a label name in the **Label** text box. + + Method 1 (the required label already exists): In the right pane, select a shortcut from the **Shortcut** drop-down list, select an existing label name from the **Label** text box, and click **OK**. + + Method 2 (adding a label): In the right pane, select a shortcut from the **Shortcut** drop-down list, and enter a new label name in the **Label** text box. + + b. The selected audio files are automatically moved to the **Labeled** tab page. On the **Unlabeled** tab page, the labeling information is updated along with the labeling process, including the added label names and the number of audio files corresponding to each label. + + .. note:: + + **Shortcut key description**: After specifying a shortcut key for a label, you can select an audio file and press the shortcut key to add a label for the audio file. Example: Specify **1** as the shortcut key for the **aa** label. Select one or more files and press **1** during data labeling. A message is displayed, asking you whether to label the files with **aa**. Click **OK**. + + Each label has a shortcut key. A shortcut key cannot be specified for different labels. Shortcut keys can greatly improve the labeling efficiency. + +Viewing the Labeled Audio Files +------------------------------- + +On the dataset details page, click the **Labeled** tab to view the list of the labeled audio files. Click an audio file. You can view the label information about the audio file in the **File Labels** area on the right. + +Modifying Labels +---------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- **Modifying based on audio** + + On the data labeling page, click the **Labeled** tab. Select one or more audio files to be modified from the audio list. Modify the label in the label details area on the right. + + - Modifying a label: In the **File Labels** area, click the edit icon in the **Operation** column, enter the correct label name in the text box, and click the check mark to complete the modification. + - Deleting a label: In the **File Labels** area, click the delete icon in the **Operation** column to delete the label. + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + .. figure:: /_static/images/en-us_image_0000001110761044.png + :alt: **Figure 1** Information about all labels + + + **Figure 1** Information about all labels + + - Modifying a label: Click the editing icon in the **Operation** column. In the dialog box that is displayed, enter the new label name and click **OK**. After the modification, the new label applies to the audio files that contain the original label. + - Deleting a label: Click the deletion icon in the **Operation** column. In the displayed dialog box, select the object to be deleted as prompted and click **OK**. + +Adding Audio Files +------------------ + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add audio files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add Audio** in the upper left corner. + +#. In the **Add Audio** dialog box that is displayed, click **Add Audio**. + + Select the audio files to be uploaded in the local environment. Only WAV audio files are supported. The size of an audio file cannot exceed 4 MB. The total size of audio files uploaded at a time cannot exceed 8 MB. + +#. In the **Add Audio** dialog box, click **OK**. + + The audio files you add will be automatically displayed on the **Unlabeled** tab page. In addition, the audio files are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Audio Files +-------------------- + +You can quickly delete the audio files you want to discard. + +On the **Unlabeled** or **Labeled** tab page, select the audio files to be deleted one by one or tick **Select Images on Current Page** to select all audio files on the page, and then click **Delete File** in the upper left corner. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the audio files. + +If a tick is displayed in the upper right corner of an audio file, the audio file is selected. If no audio file is selected on the page, the **Delete File** button is unavailable. + +.. note:: + + If you select **Delete source files**, audio files stored in the corresponding OBS directory will be deleted when you delete the selected audio files. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001157080893.png + diff --git a/umn/source/data_management/labeling_data/speech_labeling.rst b/umn/source/data_management/labeling_data/speech_labeling.rst new file mode 100644 index 0000000..f1ba90a --- /dev/null +++ b/umn/source/data_management/labeling_data/speech_labeling.rst @@ -0,0 +1,82 @@ +Speech Labeling +=============== + +Model training requires a large amount of labeled data. Therefore, before the model training, label the unlabeled audio files. ModelArts enables you to label audio files in batches by one click. In addition, you can modify the labels of audio files, or remove their labels and label the audio files again. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing the Data Source +----------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +To quickly obtain the latest data in the OBS bucket, click **Synchronize Data Source** on the **Unlabeled** tab page of the dataset details page to add the data uploaded using OBS to the dataset. + +Labeling Audio Files +-------------------- + +The dataset details page displays the labeled and unlabeled audio files. The **Unlabeled** tab page is displayed by default. + +#. In the audio file list on the **Unlabeled** tab page, click the target audio file. In the area on the right, the audio file is displayed. Click |image1| below the audio file to play the audio. + +#. In **Speech Content**, enter the speech content. + +#. After entering the content, click **OK** to complete the labeling. The audio file is automatically moved to the **Labeled** tab page. + + .. figure:: /_static/images/en-us_image_0000001110920914.png + :alt: **Figure 1** Labeling an audio file + + + **Figure 1** Labeling an audio file + +Viewing the Labeled Audio Files +------------------------------- + +On the dataset details page, click the **Labeled** tab to view the list of the labeled audio files. Click the audio file to view the audio content in the **Speech Content** text box on the right. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +On the data labeling page, click the **Labeled** tab, and select the audio file to be modified from the audio file list. In the label information area on the right, modify the content of the **Speech Content** text box, and click **OK** to complete the modification. + +Adding Audio Files +------------------ + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add audio files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add Audio** in the upper left corner. + +#. In the **Add Audio** dialog box that is displayed, click **Add Audio**. + + Select the audio files to be uploaded in the local environment. Only WAV audio files are supported. The size of an audio file cannot exceed 4 MB. The total size of audio files uploaded at a time cannot exceed 8 MB. + +#. In the **Add Audio** dialog box, click **OK**. + + The audio files you add will be automatically displayed on the **Unlabeled** tab page. In addition, the audio files are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Audio Files +-------------------- + +You can quickly delete the audio files you want to discard. + +On the **Unlabeled** or **Labeled** tab page, select the audio files to be deleted, and then click **Delete File** in the upper left corner. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the audio files. + +.. note:: + + If you select **Delete source files**, audio files stored in the corresponding OBS directory will be deleted when you delete the selected audio files. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110761012.png + diff --git a/umn/source/data_management/labeling_data/speech_paragraph_labeling.rst b/umn/source/data_management/labeling_data/speech_paragraph_labeling.rst new file mode 100644 index 0000000..cf0725a --- /dev/null +++ b/umn/source/data_management/labeling_data/speech_paragraph_labeling.rst @@ -0,0 +1,85 @@ +Speech Paragraph Labeling +========================= + +Model training requires a large amount of labeled data. Therefore, before the model training, label the unlabeled audio files. ModelArts enables you to label audio files. In addition, you can modify the labels of audio files, or remove their labels and label the audio files again. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing the Data Source +----------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +To quickly obtain the latest data in the OBS bucket, click **Synchronize Data Source** on the **Unlabeled** tab page of the dataset details page to add the data uploaded using OBS to the dataset. + +Labeling Audio Files +-------------------- + +The dataset details page displays the labeled and unlabeled audio files. The **Unlabeled** tab page is displayed by default. + +#. In the audio file list on the **Unlabeled** tab page, click the target audio file. In the area on the right, the audio file is displayed. Click |image1| below the audio file to play the audio. + +#. Select an audio segment based on the content being played, and enter the audio file label and content in the **Speech Content** text box. + + .. figure:: /_static/images/en-us_image_0000001157080965.png + :alt: **Figure 1** Labeling an audio file + + + **Figure 1** Labeling an audio file + +#. After entering the content, click **OK** to complete the labeling. The audio file is automatically moved to the **Labeled** tab page. + +Viewing the Labeled Audio Files +------------------------------- + +On the dataset details page, click the **Labeled** tab to view the list of the labeled audio files. Click the audio file to view the audio content in the **Speech Content** text box on the right. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- Modifying a label: On the dataset details page, click the **Labeled** tab, and select the audio file to be modified from the audio file list. In the **Speech Content** area, modify **Label** and **Content**, and click **OK** to complete the modification. +- Deleting a label: Click |image2| in the **Operation** column of the target number to delete the label of the audio segment. Alternatively, you can click the cross (x) icon above the labeled audio file to delete the label. Then click **OK**. + +Adding Audio Files +------------------ + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add audio files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add Audio** in the upper left corner. + +#. In the **Add Audio** dialog box that is displayed, click **Add Audio**. + + Select the audio files to be uploaded in the local environment. Only WAV audio files are supported. The size of an audio file cannot exceed 4 MB. The total size of audio files uploaded at a time cannot exceed 8 MB. + +#. In the **Add Audio** dialog box, click **OK**. + + The audio files you add will be automatically displayed on the **Unlabeled** tab page. In addition, the audio files are automatically saved to the OBS directory specified by **Input Dataset Path**. + +Deleting Audio Files +-------------------- + +You can quickly delete the audio files you want to discard. + +On the **Unlabeled** or **Labeled** tab page, select the audio files to be deleted, and then click **Delete File** in the upper left corner. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the audio files. + +.. note:: + + If you select **Delete source files**, audio files stored in the corresponding OBS directory will be deleted when you delete the selected audio files. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110761012.png + +.. |image2| image:: /_static/images/en-us_image_0000001156920989.png + diff --git a/umn/source/data_management/labeling_data/text_classification.rst b/umn/source/data_management/labeling_data/text_classification.rst new file mode 100644 index 0000000..bbec22a --- /dev/null +++ b/umn/source/data_management/labeling_data/text_classification.rst @@ -0,0 +1,111 @@ +Text Classification +=================== + +Model training requires a large amount of labeled data. Therefore, before the model training, add labels to the files that are not labeled. In addition, you can modify, delete, and re-label the labeled text. + +Text classification classifies text content based on labels. Before labeling text content, you need to understand the following: + +- Text labeling supports multiple labels. That is, you can add multiple labels to a labeling object. +- A label name can contain a maximum of 32 characters, including Chinese characters, letters, digits, hyphens (-), and underscores (_). + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Labeling Content +---------------- + +The dataset details page displays the labeled and unlabeled text files in the dataset. The **Unlabeled** tab page is displayed by default. + +#. On the **Unlabeled** tab page, the objects to be labeled are listed in the left pane. In the list, click the text object to be labeled, and select a label in the **Label Set** area in the right pane. Multiple labels can be added to a labeling object. + + You can repeat this operation to select objects and add labels to the objects. + + .. figure:: /_static/images/en-us_image_0000001110760906.png + :alt: **Figure 1** Labeling for text classification + + + **Figure 1** Labeling for text classification + +#. After all objects are labeled, click **Save Current Page** at the bottom of the page to complete labeling text files on the **Unlabeled** tab page. + +Adding Labels +------------- + +- Adding labels on the **Unlabeled** tab page: Click the plus sign (+) next to **Label Set**. On the **Add Label** page that is displayed, add a label name, select a label color, and click **OK**. + + .. figure:: /_static/images/en-us_image_0000001157080759.png + :alt: **Figure 2** Adding a label (1) + + + **Figure 2** Adding a label (1) + +- Adding labels on the **Labeled** tab page: Click the plus sign (+) next to **All Labels**. On the **Add Label** page that is displayed, add a label name, select a label color, and click **OK**. + + .. figure:: /_static/images/en-us_image_0000001110760912.png + :alt: **Figure 3** Adding a label (2) + + + **Figure 3** Adding a label (2) + +Viewing the Labeled Text +------------------------ + +On the dataset details page, click the **Labeled** tab to view the list of the labeled text. You can also view all labels supported by the dataset in the **All Labels** area on the right. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- **Modifying based on texts** + + On the dataset details page, click the **Labeled** tab, and select the text to be modified from the text list. + + In the text list, click the text. When the text background turns blue, the text is selected. If a text file has multiple labels, you can click |image1| above a label to delete the label. + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Batch modification: In the **All Labels** area, click the editing icon in the **Operation** column, modify the label name in the text box, select a label color, and click **OK**. + - Batch deletion: In the **All Labels** area, click the deletion icon in the **Operation** column to delete the label. In the dialog box that is displayed, select **Delete label** or **Delete label and objects with only the label**, and click **OK**. + +Adding Files +------------ + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add text files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add File**. + +#. In the displayed **Add File** dialog box, set the parameters as required and then select the file to be uploaded. + + Select one or more files to be uploaded in the local environment. Only **.txt** and **.csv** files are supported. The total size of files uploaded at a time cannot exceed 8 MB. **Text and Label Separator** and **Label Separator** must be different. + + - **Pattern**: Select **Merge text objects and labels** or **Separate text objects and labels**. An example is provided. Determine the mode of the file to be added by referring to the example. + - **Text and Label Separator**: Select **Tab**, **Space**, **Semicolon**, **Comma**, or **Other**. If you select **Other**, enter a separator in the text box on the right. + - **Label Separator**: Select **Tab**, **Space**, **Semicolon**, **Comma**, or **Other**. If you select **Other**, enter a separator in the text box on the right. + +#. In the **Add File** dialog box, click **Upload**. The files you add will be automatically displayed on the **Unlabeled** or **Labeled** tab page. + +Deleting a File +--------------- + +You can quickly delete the files you want to discard. + +- On the **Unlabeled** tab page, select the text to be deleted, and click **Delete** in the upper left corner to delete the text. +- On the **Labeled** tab page, select the text to be deleted and click **Delete**. Alternatively, you can tick **Select Images on Current Page** to select all text objects on the current page and click **Delete** in the upper left corner. + +The background of the selected text is blue. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110760908.png + diff --git a/umn/source/data_management/labeling_data/text_triplet.rst b/umn/source/data_management/labeling_data/text_triplet.rst new file mode 100644 index 0000000..1f97e03 --- /dev/null +++ b/umn/source/data_management/labeling_data/text_triplet.rst @@ -0,0 +1,88 @@ +Text Triplet +============ + +Triplet labeling is suitable for scenarios where structured information, such as subjects, predicates, and objects, needs to be labeled in statements. With this function, not only entities in statements, but also relationships between entities can be labeled. Triplet labeling is often used in natural language processing tasks such as dependency syntax analysis and information extraction. + +Text triplet labeling involves two classes of important labels: **Entity Label** and **Relationship Label**. For the **Relationship Label**, you need to set its **Source entity** and **Target entity**. + +- You can define multiple entity and relationship labels for a text object. +- The **Entity Label** defined during dataset creation cannot be deleted. + +Precautions +----------- + +Before labeling, ensure that the **Entity Label** and **Relationship Label** of a dataset have been defined. For the **Relationship Label**, you need to set its **Source entity** and **Target entity**. The **Relationship Label** must be between the defined **Source entity** and **Target entity**. + +For example, if two entities are labeled as **Place**, you cannot add any relationship label between them. If a relationship label cannot be added, a red cross is displayed. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Labeling Content +---------------- + +The dataset details page displays the labeled and unlabeled text objects in the dataset. The **Unlabeled** tab page is displayed by default. + +#. On the **Unlabeled** tab page, the objects to be labeled are listed in the left pane. In the list, click a text object, select the corresponding text content on the right pane, and select an entity name from the displayed entity list to label the content. + + .. figure:: /_static/images/en-us_image_0000001110760968.png + :alt: **Figure 1** Labeling an entity + + + **Figure 1** Labeling an entity + +#. After labeling multiple entities, click the source entity and target entity in sequence and select a relationship type from the displayed relationship list to label the relationship. + + .. figure:: /_static/images/en-us_image_0000001110920874.png + :alt: **Figure 2** Labeling a relationship + + + **Figure 2** Labeling a relationship + +#. After all objects are labeled, click **Save Current Page** at the bottom of the page. + +.. note:: + + You cannot modify the labels of a dataset in the text triplet type on the labeling page. Instead, click **Edit** to enter the **Modify Dataset** page and modify the **Entity Label** and **Relationship Label**. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +On the dataset details page, click the **Labeled** tab. Select a text object in the left pane and the right pane displays the detailed label information. You can move your cursor to the entity or relationship label, and right-click to delete it. You can also click the source entity and target entity in sequence to add a relationship label. + +You can click **Delete Labels on Current Item** at the bottom of the page to delete all labels in the selected text object. + +Adding a File +------------- + +In addition to automatically synchronizing data from **Input Dataset Path**, you can directly add text files on ModelArts for data labeling. + +#. On the dataset details page, click the **Unlabeled** tab. Then click **Add File**. + +#. In the **Add File** dialog box that is displayed, select the files to be uploaded. + + Select one or more files to be uploaded in the local environment. Only **.txt** and **.csv** files are supported. The total size of files uploaded at a time cannot exceed 8 MB. + +#. In the **Add File** dialog box, click **Upload**. The files you add will be automatically displayed in the **Labeling Objects** list on the **Unlabeled** tab page. + +Deleting a File +--------------- + +You can quickly delete the files you want to discard. + +- On the **Unlabeled** tab page, select the text to be deleted, and click **Delete** in the upper left corner to delete the text. +- On the **Labeled** tab page, select the text to be deleted and click **Delete**. Alternatively, you can tick **Select Images on Current Page** to select all text objects on the current page and click **Delete** in the upper left corner. + +The background of the selected text is blue. If no text is selected on the page, the **Delete** button is unavailable. + + diff --git a/umn/source/data_management/labeling_data/video_labeling.rst b/umn/source/data_management/labeling_data/video_labeling.rst new file mode 100644 index 0000000..54d7cb3 --- /dev/null +++ b/umn/source/data_management/labeling_data/video_labeling.rst @@ -0,0 +1,89 @@ +Video Labeling +============== + +Model training requires a large amount of labeled video data. Therefore, before the model training, label the unlabeled video files. ModelArts enables you to label video files. In addition, you can modify the labels of video files, or remove their labels and label the video files again. + +Starting Labeling +----------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, select the dataset to be labeled based on the labeling type, and click the dataset name to go to the **Dashboard** tab page of the dataset. + + By default, the **Dashboard** tab page of the current dataset version is displayed. If you need to label the dataset of another version, click the **Versions** tab and then click **Set to Current Version** in the right pane. For details, see `Managing Dataset Versions <../../data_management/managing_dataset_versions.html>`__. + +#. On the **Dashboard** page of the dataset, click **Label** in the upper right corner. The dataset details page is displayed. By default, all data of the dataset is displayed on the dataset details page. + +Synchronizing Data Sources +-------------------------- + +ModelArts automatically synchronizes data and labeling information from **Input Dataset Path** to the dataset details page. + +To quickly obtain the latest data in the OBS bucket, click **Synchronize Data Source** on the **Unlabeled** tab page of the dataset details page to add the data uploaded using OBS to the dataset. + +Labeling Video Files +-------------------- + +On the dataset details page, both unlabeled and labeled video files in the dataset are displayed. + +#. On the **Unlabeled** tab page, click the target video file in the video list on the left. The labeling page is displayed. + +#. Play the video. When the video is played to the time point to be labeled, click the pause button in the progress bar to pause the video to a specific image. + +#. In the left pane, select a bounding box. By default, a rectangular box is selected. Drag the mouse to select an object in the video image, enter a new label name in the displayed **Add Label** text box, select a label color, and click **Add** to label the object. Alternatively, select an existing label from the drop-down list and click **Add** to label the object. Label all objects in the image. Multiple labels can be added to an image. + + The supported bounding boxes are the same as those supported by Object Detection. For details, see `Table 1 <../../data_management/labeling_data/object_detection.html#modelarts230012enustopic0170889732table165201739119>`__ in `Object Detection <../../data_management/labeling_data/object_detection.html>`__. + + .. figure:: /_static/images/en-us_image_0000001110761112.png + :alt: **Figure 1** Labeling video files + + + **Figure 1** Labeling video files + +#. After the previous image is labeled, click the play button on the progress bar to resume the playback. Then, repeat `3 <#modelarts230282enustopic0257844727li993163014399>`__ to complete labeling on the entire video. + + The labeled time points of the current video are displayed on the right of the page. + + .. figure:: /_static/images/en-us_image_0000001156920985.png + :alt: **Figure 2** File labels + + + **Figure 2** File labels + +#. Click **Back to Data Labeling Preview** in the upper left corner of the page. The dataset details page is displayed, and the labeled video file is displayed on the **Labeled** tab page. + +Modifying Labeled Data +---------------------- + +After labeling data, you can delete labeled data on the **Labeled** tab page. + +- Click |image1| in the **Operation** column of the target number to delete the label of the video segment. Alternatively, you can click the cross (x) icon above the labeled video file to delete the label. Then click **OK**. + +On the **Labeled** tab page, click the target video file. In the **All Labels** area on the right of the labeling page, click the triangle icon on the right of the time point to view details. You can modify or delete a label. + +- Modifying a label: Click the edit icon on the right of a label to modify the label name. +- Deleting a label: Click the delete icon on the right of a label to delete the label. If you click the delete icon on the right of the image time, all labels on the image are deleted. + +.. figure:: /_static/images/en-us_image_0000001156920983.png + :alt: **Figure 3** Modifying labeled data + + + **Figure 3** Modifying labeled data + +Deleting a Video File +--------------------- + +You can quickly delete the video files you want to discard. + +On the **All**, **Unlabeled**, or **Labeled** tab page, select the video files to be deleted or click **Select Images on Current Page** to select all video files on the page, and click **Delete** in the upper left corner to delete the video files. In the displayed dialog box, select or deselect **Delete source files** as required. After confirmation, click **OK** to delete the videos. + +If a tick is displayed in the upper left corner of a video file, the video file is selected. If no video file is selected on the page, the **Delete File** button is unavailable. + +.. note:: + + If you select **Delete source files**, video files stored in the corresponding OBS directory will be deleted when you delete the selected video files. Deleting source files may affect other dataset versions or datasets using those files. As a result, the page display, training, or inference is abnormal. Deleted data cannot be recovered. Exercise caution when performing this operation. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110921012.png + diff --git a/umn/source/data_management/managing_dataset_versions.rst b/umn/source/data_management/managing_dataset_versions.rst new file mode 100644 index 0000000..c946eba --- /dev/null +++ b/umn/source/data_management/managing_dataset_versions.rst @@ -0,0 +1,39 @@ +Managing Dataset Versions +========================= + +After labeling data, you can publish the dataset to multiple versions for management. For the published versions, you can view the dataset version updates, set the current version, and delete versions. For details about dataset versions, see `About Dataset Versions <../data_management/publishing_a_dataset.html#about-dataset-versions>`__. + +For details about how to publish a new version, see `Publishing a Dataset <../data_management/publishing_a_dataset.html>`__. + +Viewing Dataset Version Updates +------------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, choose **More > Manage Version** in the **Operation** column. The **Manage Version** tab page is displayed. + + You can view basic information about the dataset, and view the versions and publish time on the left. + +Setting to Current Version +-------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. +#. In the dataset list, choose **More > Manage Version** in the **Operation** column. The **Manage Version** tab page is displayed. +#. On the **Manage Version** tab page, select the desired dataset version, and click **Set to Current Version** in the basic information area on the right. After the setting is complete, **Current version** is displayed to the right of the version name. + + .. note:: + + Only the version in **Normal** status can be set to the current version. + +Deleting a Dataset Version +-------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. +#. In the dataset list, choose **More > Manage Version** in the **Operation** column. The **Manage Version** tab page is displayed. +#. Locate the row that contains the target version, and click **Delete** in the **Operation** column. In the dialog box that is displayed, click **OK**. + + .. note:: + + Deleting a dataset version does not remove the original data. Data and its labeling information are still stored in the OBS directory. However, if it is deleted, you cannot manage the dataset versions on the ModelArts management console. Exercise caution when performing this operation. + + diff --git a/umn/source/data_management/modifying_a_dataset.rst b/umn/source/data_management/modifying_a_dataset.rst new file mode 100644 index 0000000..bbb7e27 --- /dev/null +++ b/umn/source/data_management/modifying_a_dataset.rst @@ -0,0 +1,36 @@ +Modifying a Dataset +=================== + +For a created dataset, you can modify its basic information to match service changes. + +Prerequisites +------------- + +You have created a dataset. + +Modifying the Basic Information About a Dataset +----------------------------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, choose **More > Modify** in the **Operation** column. + + Alternatively, you can click the dataset name to go to the **Dashboard** tab page of the dataset, and click **Modify** in the upper right corner. + +#. Modify basic information about the dataset and then click **OK**. Refer to `Table 1 <#modelarts230020enustopic0170886811table151481125214>`__ for details. + +.. _modelarts230020enustopic0170886811table151481125214: + + .. table:: **Table 1** Parameters + + +-------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=============+==========================================================================================================================================================================================================================+ + | Name | Enter the name of the dataset. A dataset name can contain only letters, digits, underscores (_), and hyphens (-). | + +-------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Enter a brief description for the dataset. | + +-------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set | The label set varies depending on the dataset type. For details about how to modify the label set, see the parameters of different dataset types in `Creating a Dataset <../data_management/creating_a_dataset.html>`__. | + +-------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/data_management/publishing_a_dataset.rst b/umn/source/data_management/publishing_a_dataset.rst new file mode 100644 index 0000000..ba431a9 --- /dev/null +++ b/umn/source/data_management/publishing_a_dataset.rst @@ -0,0 +1,89 @@ +Publishing a Dataset +==================== + +ModelArts distinguishes data of the same source according to versions labeled at different time, which facilitates the selection of dataset versions during subsequent model building and development. After labeling the data, you can publish the dataset to generate a new dataset version. + +About Dataset Versions +---------------------- + +- For a newly created dataset (before publishing), there is no dataset version information. The dataset must be published before being used for model development or training. +- The default naming rules of dataset versions are V001 and V002 in ascending order. You can customize the version number during publishing. +- You can set any version to the current directory. Then the details of the version are displayed on the dataset details page. +- You can obtain the dataset in the manifest file format corresponding to each dataset version based on the value of **Storage Path**. The dataset can be used when you import data or filter hard examples. +- The version of a table dataset cannot be changed. + +.. _publishing-a-dataset-1: + +Publishing a Dataset +-------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. The **Datasets** page is displayed. + +#. In the dataset list, click **Publish** in the **Operation** column. + + Alternatively, you can click the dataset name to go to the **Dashboard** tab page of the dataset, and click **Publish** in the upper right corner. + +#. In the displayed dialog box, set the parameters and click **OK**. + +.. _modelarts230018enustopic0170886812table856411819131: + + .. table:: **Table 1** Parameters for publishing a dataset + + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==================================================================================================================================================================================================================================================+ + | Version Name | The naming rules of V001 and V002 in ascending order are used by default. A version name can be customized. Only letters, digits, hyphens (-), and underscores (_) are allowed. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Format | Only table datasets support version format setting. Available values are **CSV** and **CarbonData**. | + | | | + | | .. note:: | + | | | + | | If the exported CSV file contains any command starting with =, +, -, or @, ModelArts automatically adds the Tab setting and escapes the double quotation marks (") for security purposes. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Splitting | Only image classification, object detection, text classification, and sound classification datasets support data splitting. | + | | | + | | By default, this function is disabled. After this function is enabled, you need to set the training and validation ratios. | + | | | + | | Enter a value ranging from 0 to 1 for **Training Set Ratio**. After the training set ratio is set, the validation set ratio is determined. The sum of the training set ratio and the validation set ratio is 1. | + | | | + | | The training set ratio is the ratio of sample data used for model training. The validation set ratio is the ratio of the sample data used for model validation. The training and validation ratios affect the performance of training templates. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Description of the current dataset version. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + After the version is published, you can go to the **Version Manager** tab page to view the detailed information. By default, the system sets the latest version to the current directory. + +Directory Structure of Related Files After the Dataset Is Published +------------------------------------------------------------------- + +Datasets are managed based on OBS directories. After a new version is published, the directory is generated based on the new version in the output dataset path. + +Take an image classification dataset as an example. After the dataset is published, the directory structure of related files generated in OBS is as follows: + +.. code-block:: + + |-- user-specified-output-path + |-- DatasetName-datasetId + |-- annotation + |-- VersionMame1 + |-- VersionMame1.manifest + |-- VersionMame2 + ... + |-- ... + +The following uses object detection as an example. If a manifest file is imported to the dataset, the following provides the directory structure of related files after the dataset is published: + +.. code-block:: + + |-- user-specified-output-path + |-- DatasetName-datasetId + |-- annotation + |-- VersionMame1 + |-- VersionMame1.manifest + |-- annotation + |-- file1.xml + |-- VersionMame2 + ... + |-- ... + + diff --git a/umn/source/data_management/team_labeling/index.rst b/umn/source/data_management/team_labeling/index.rst new file mode 100644 index 0000000..aca1524 --- /dev/null +++ b/umn/source/data_management/team_labeling/index.rst @@ -0,0 +1,11 @@ +============= +Team Labeling +============= + +.. toctree:: + :maxdepth: 1 + + introduction_to_team_labeling + team_management + member_management + managing_team_labeling_tasks diff --git a/umn/source/data_management/team_labeling/introduction_to_team_labeling.rst b/umn/source/data_management/team_labeling/introduction_to_team_labeling.rst new file mode 100644 index 0000000..b47bc5a --- /dev/null +++ b/umn/source/data_management/team_labeling/introduction_to_team_labeling.rst @@ -0,0 +1,48 @@ +Introduction to Team Labeling +============================= + +Generally, a small data labeling task can be completed by an individual. However, team work is required to label a large dataset. ModelArts provides the team labeling function. A labeling team can be formed to manage labeling for the same dataset. + +.. note:: + + The team labeling function supports only datasets for image classification, object detection, text classification, named entity recognition, text triplet, and speech paragraph labeling. + +How to Enable Team Labeling +--------------------------- + +- When creating a dataset, enable **Team Labeling** and select a team or task manager. + + .. figure:: /_static/images/en-us_image_0000001157080899.png + :alt: **Figure 1** Enabling during dataset creation + + + **Figure 1** Enabling during dataset creation + +- If team labeling is not enabled for a dataset that has been created, create a team labeling task to enable team labeling. For details about how to create a team labeling task, see `Creating Team Labeling Tasks <../../data_management/team_labeling/managing_team_labeling_tasks.html#creating-team-labeling-tasks>`__. + + .. figure:: /_static/images/en-us_image_0000001156921451.png + :alt: **Figure 2** Creating a team labeling task in a dataset list + + + **Figure 2** Creating a team labeling task in a dataset list + + .. figure:: /_static/images/en-us_image_0000001110761582.png + :alt: **Figure 3** Creating a team labeling task + + + **Figure 3** Creating a team labeling task + + .. figure:: /_static/images/en-us_image_0000001110761054.png + :alt: **Figure 4** Creating a team labeling task on the dataset details page + + + **Figure 4** Creating a team labeling task on the dataset details page + +Operations Related to Team Labeling +----------------------------------- + +- `Team Management <../../data_management/team_labeling/team_management.html>`__ +- `Member Management <../../data_management/team_labeling/member_management.html>`__ +- `Managing Team Labeling Tasks <../../data_management/team_labeling/managing_team_labeling_tasks.html>`__ + + diff --git a/umn/source/data_management/team_labeling/managing_team_labeling_tasks.rst b/umn/source/data_management/team_labeling/managing_team_labeling_tasks.rst new file mode 100644 index 0000000..5892030 --- /dev/null +++ b/umn/source/data_management/team_labeling/managing_team_labeling_tasks.rst @@ -0,0 +1,120 @@ +Managing Team Labeling Tasks +============================ + +For datasets with team labeling enabled, you can create team labeling tasks and assign the labeling tasks to different teams so that team members can complete the labeling tasks together. During data labeling, members can initiate acceptance, continue acceptance, and view acceptance reports. + +Creating Team Labeling Tasks +---------------------------- + +If you enable team labeling when creating a dataset and assign a team to label the dataset, the system creates a labeling task based on the team by default. After the dataset is created, you can view the labeling task on the **Labeling Progress** tab page of the dataset. + +You can also create a team marking task and assign it to different members in the same team or to other labeling teams. + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Data Management** > **Datasets**. A dataset list is displayed. +#. In the dataset list, select a dataset that supports team labeling, and click the dataset name to go to the **Dashboard** tab page of the dataset. +#. Click the **Labeling Progress** tab to view existing labeling tasks of the dataset. Click **Create Team Labeling Task** in the upper right corner to create a task. +#. In the displayed **Create Team Labeling Task** dialog box, set related parameters and click **OK**. + + - **Name**: Enter a task name. + + - **Type**: Select a task type, **Team** or **Task Manager**. + + - **Select Team**: If **Type** is set to **Team**, you need to select a team and members for labeling. The **Select Team** drop-down list lists the labeling teams and members created by the current account. For details about team management, see `Introduction to Team Labeling <../../data_management/team_labeling/introduction_to_team_labeling.html>`__. + + - **Select Task Manager**: If **Type** is set to **Task Manager**, you need to select one **Team Manager** member from all teams as the task manager. + + - **Label Set**: All existing labels and label attributes of the dataset are displayed. You can also select **Automatically synchronize new images to the team labeling task** or **Automatically load the intelligent labeling results to images that need to be labeled** under **Label Set**. + + The process of loading auto labeling results to a team labeling task is as follows: + + - If you set **Type** to **Team**, you are required to create a team labeling task before executing the task. + - If you set **Type** to **Task Manager**, you are required to log in to the data labeling console and assign a labeling task before executing the task. + + After the task is created, you can view the new task on the **Labeling Progress** tab page. + +Labeling (Team Member) +---------------------- + +After a labeling task is created, the team member to which the task is assigned receives a labeling notification email. + +In the email details, click the labeling task link and use your email address and initial password to log in to the labeling platform. After login, change the password. After logging in to the labeling platform, you can view the assigned labeling task and click the task name to go to the labeling page. The labeling method varies depending on the dataset type. For details, see the following: + +- `Image Classification <../../data_management/labeling_data/image_classification.html#labeling-images-(manually)>`__ +- `Object Detection <../../data_management/labeling_data/object_detection.html#labeling-images-(manually)>`__ +- `Text Classification <../../data_management/labeling_data/text_classification.html#labeling-content>`__ +- `Named Entity Recognition <../../data_management/labeling_data/named_entity_recognition.html#labeling-content>`__ +- `Text Triplet <../../data_management/labeling_data/text_triplet.html#labeling-content>`__ + +On the labeling platform, each member can view the images that are not labeled, to be corrected, rejected, to be reviewed, approved, and accepted. Pay attention to the images rejected by the administrator and the images to be corrected. + +If the Reviewer role is assigned for a team labeling task, the labeling result needs to be reviewed. After the labeling result is reviewed, it is submitted to the administrator for acceptance. + +.. figure:: /_static/images/en-us_image_0000001110760934.png + :alt: **Figure 1** Labeling platform + + + **Figure 1** Labeling platform + +Task Acceptance (Administrator) +------------------------------- + +- **Initiating acceptance** + + After team members complete data labeling, the dataset creator can initiate acceptance to check labeling results. The acceptance can be initiated only when a labeling member has labeled data. Otherwise, the acceptance initiation button is unavailable. + + #. On the **Labeling Progress** tab page, click **Initiate Acceptance** to accept tasks. + + #. In the displayed dialog box, set **Sample Policy** to **By percentage** or **By quantity**. Click **OK** to start the acceptance. + + **By percentage**: Sampling is performed based on a percentage for acceptance. + + **By quantity**: Sampling is performed based on quantity for acceptance. + + #. After the acceptance is initiated, an acceptance report is displayed on the console in real time. In the **Acceptance Result** area on the right, select **Pass** or **Reject**. + + If you select **Pass**, set **Rating** to **A**, **B**, **C**, or **D**. Option **A** indicates the highest score. If you select **Reject**, enter your rejection reasons in the text box. + +- **Continuing acceptance** + + You can continue accepting tasks whose acceptance is not completed. For tasks for which an acceptance process is not initiated, the **Continue Acceptance** button is unavailable. + + On the **Labeling Progress** tab page, click **Continue Acceptance** to continue accepting tasks. The **Real-Time Acceptance Report** page is displayed. You can continue to accept the images that are not accepted. + +- **Finishing acceptance** + + In the acceptance completion window, you can view dataset acceptance details, such as the number of sample files, set the following parameters, and perform acceptance. The labeling information is synchronized to the **Labeled** tab page of the dataset only after the acceptance is complete. + + Once the labeled data is accepted, team members cannot modify the labeling information. Only the dataset creator can modify the labeling information. + + + +.. _modelarts230210enustopic0209053802table1372918217370: + + .. table:: **Table 1** Parameters for finishing acceptance + + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=====================================================================================================================================================================================================================+ + | Modifying Labeled Data | - **Not overwrite**: For the same data, do not overwrite the existing data with the labeling result of the current team. | + | | - **Overlays**: For the same data, overwrite the existing data with the labeling result of the current team. Overwritten data cannot be recovered. Exercise caution when performing this operation. | + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Acceptance Scope | - All: all data that has been labeled by the current team, including **Accepted**, **Pending Acceptance**, and **Rejected** data. It refers to all sample files in the dataset. | + | | | + | | - All rejects: rejects all data that has been labeled by the current team. That is, all labeled data is rejected to the labeling personnel. | + | | | + | | - Accepted and pending acceptance: accepts the data that passes the acceptance or is in the Pending Acceptance state in the sample files and rejects the data that fails the acceptance to the labeling personnel. | + | | | + | | - Accepted: accepts the data that has passed the acceptance in the sample files and rejects the data that is in the Pending Acceptance state or fails the acceptance to the labeling personnel. | + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Viewing an Acceptance Report +---------------------------- + +You can view the acceptance report of an ongoing or finished labeling task. On the **Labeling Progress** tab page, click **Acceptance Report**. In the displayed **Acceptance Report** dialog box, view report details. + +Deleting a Labeling Task +------------------------ + +On the **Labeling Progress** tab page, click **Delete** in the row where a labeling task to be deleted. After a task is deleted, the labeling details that are not accepted will be lost. Exercise caution when performing this operation. However, the original data in the dataset and the labeled data that has been accepted are still stored in the corresponding OBS bucket. + + diff --git a/umn/source/data_management/team_labeling/member_management.rst b/umn/source/data_management/team_labeling/member_management.rst new file mode 100644 index 0000000..1b5f204 --- /dev/null +++ b/umn/source/data_management/team_labeling/member_management.rst @@ -0,0 +1,61 @@ +Member Management +================= + +There is no member in a new team. You need to add members who will participate in a team labeling task. + +A maximum of 100 members can be added to a team. If there are more than 100 members, add them to different teams for better management. + +Adding a Member +--------------- + +#. In the left navigation pane of the ModelArts management console, choose **Data Management > Labeling Teams**. The **Labeling Teams** page is displayed. + +#. On the **Labeling Teams** page, select a team from the team list on the left and click a team name. The team details are displayed in the right pane. + +#. In the **Team Details** area, click **Add Member**. + +#. In the displayed **Add Member** dialog box, enter an email address, description, and a role for a member and click **OK**. + + An email address uniquely identifies a team member. Different members cannot use the same email address. The email address you enter will be recorded and saved in ModelArts. It is used only for ModelArts team labeling. After a member is deleted, the email address will also be deleted. + + Possible values of **Role** are **Labeler**, **Reviewer**, and **Team Manager**. Only one **Team Manager** can be set. + + .. figure:: /_static/images/en-us_image_0000001156920939.png + :alt: **Figure 1** Adding a member + + + **Figure 1** Adding a member + + .. figure:: /_static/images/en-us_image_0000001157081267.png + :alt: **Figure 2** Adding a member + + + **Figure 2** Adding a member + + Information about the added member is displayed in the **Team Details** area. + +Modifying Member Information +---------------------------- + +You can modify member information if it is changed. + +#. In the **Team Details** area, select the desired member. + +#. In the row containing the desired member, click **Modify** in the **Operation** column. In the displayed dialog box, modify the description or role. + + The email address of a member cannot be changed. To change the email address of a member, delete the member, and set a new email address when adding a member. + + Possible values of **Role** are **Labeler**, **Reviewer**, and **Team Manager**. Only one **Team Manager** can be set. + +Deleting Members +---------------- + +- **Deleting a single member** + + In the **Team Details** area, select the desired member, and click **Delete** in the **Operation** column. In the dialog box that is displayed, click **OK**. + +- **Batch Deletion** + + In the **Team Details** area, select members to be deleted and click **Delete**. In the dialog box that is displayed, click **OK**. + + diff --git a/umn/source/data_management/team_labeling/team_management.rst b/umn/source/data_management/team_labeling/team_management.rst new file mode 100644 index 0000000..5cde3f2 --- /dev/null +++ b/umn/source/data_management/team_labeling/team_management.rst @@ -0,0 +1,30 @@ +Team Management +=============== + +Team labeling is managed in a unit of teams. To enable team labeling for a dataset, a team must be specified. Multiple members can be added to a team. + +Background +---------- + +- An account can have a maximum of 10 teams. +- An account must have at least one team to enable team labeling for datasets. If the account has no team, add a team by referring to `Adding a Team <#adding-a-team>`__. + +Adding a Team +------------- + +#. In the left navigation pane of the ModelArts management console, choose **Data Management > Labeling Teams**. The **Labeling Teams** page is displayed. + +#. On the **Labeling Teams** page, click **Add Team**. + +#. In the displayed **Add Team** dialog box, enter a team name and description and click **OK**. The labeling team is added. + + The new team is displayed on the **Labeling Teams** page. You can view team details in the right pane. There is no member in the new team. Add members to the new team by referring to `Adding a Member <../../data_management/team_labeling/member_management.html#adding-a-member>`__. + +Deleting a Team +--------------- + +You can delete a team that is no longer used. + +On the **Labeling Teams** page, select the target team and click **Delete**. In the dialog box that is displayed, click **OK**. + + diff --git a/umn/source/devenviron_(notebook)/index.rst b/umn/source/devenviron_(notebook)/index.rst new file mode 100644 index 0000000..e410c87 --- /dev/null +++ b/umn/source/devenviron_(notebook)/index.rst @@ -0,0 +1,11 @@ +===================== +DevEnviron (Notebook) +===================== + +.. toctree:: + :maxdepth: 1 + + introduction_to_notebook + managing_notebook_instances/index + using_jupyter_notebook/index + using_jupyterlab/index diff --git a/umn/source/devenviron_(notebook)/introduction_to_notebook.rst b/umn/source/devenviron_(notebook)/introduction_to_notebook.rst new file mode 100644 index 0000000..28f0259 --- /dev/null +++ b/umn/source/devenviron_(notebook)/introduction_to_notebook.rst @@ -0,0 +1,60 @@ +Introduction to Notebook +======================== + +ModelArts integrates the open-source Jupyter Notebook and JupyterLab to provide you with online interactive development and debugging environments. You can use the Notebook on the ModelArts management console to compile and debug code and train models based on the code, without concerning installation and configurations. + +- Jupyter Notebook is an interactive notebook. For details about how to perform operations on Jupyter Notebook, see `Jupyter Notebook Documentation `__. +- JupyterLab is an interactive development environment. It is a next-generation product of Jupyter Notebook. JupyterLab enables you to compile notebooks, operate terminals, edit MarkDown text, open interaction modes, and view CSV files and images. For details about how to perform operations on JupyterLab, see `JupyterLab Documentation `__. + +Supported AI Engines +-------------------- + +Each development environment supports multiple AI engines that run independently. All supported AI engines can be used in the same notebook instance, and these engines can be switched quickly and conveniently. + +.. note:: + + - Each ModelArts notebook instance can use all supported engines. + + + +.. _modelarts230033enustopic0162690357table13949522712: + +.. table:: **Table 1** AI engines + + +------------------------------------------+--------------------------------+----------------+ + | Work Environment | Built-in AI Engine and Version | Supported Chip | + +==========================================+================================+================+ + | Multi-Engine 1.0 (Python 3, Recommended) | MXNet-1.2.1 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | PySpark-2.3.2 | CPU | + +------------------------------------------+--------------------------------+----------------+ + | | Pytorch-1.0.0 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-1.13.1 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | XGBoost-Sklearn | CPU | + +------------------------------------------+--------------------------------+----------------+ + | Multi-Engine 2.0 (Python3) | Pytorch-1.4.0 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-2.1.0 | CPU/GPU | + +------------------------------------------+--------------------------------+----------------+ + | Ascend-Powered-Engine 1.0 (Python3) | MindSpore-1.1.1 | Ascend 910 | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-1.15.0 | Ascend 910 | + +------------------------------------------+--------------------------------+----------------+ + +Constraints +----------- + +- For security purposes, the root permission is not granted to the notebook instances integrated in ModelArts. You can use the non-privileged user **jovyan** or **ma-user** (using **Multi-Engine**) to perform operations. Therefore, you cannot use **apt-get** to install the OS software. +- Notebook instances support only standalone training under the current AI engine framework. If you need to use distributed training, use ModelArts training jobs and specify multiple nodes in the resource pool. +- ModelArts DevEnviron does not support apt-get. You can use a `custom image <../custom_images/introduction_to_custom_images.html>`__ to train a model. +- Notebook instances do not support GUI-related libraries, such as PyQt. +- Notebook instances created using Ascend specifications cannot be attached to EVS disks. +- Notebook instances cannot be connected to DWS and database services. +- Notebook instances cannot directly read files in OBS. You need to download the files to the local host. To access data in OBS, use Moxing or SDK for interaction. +- DevEnviron does not support TensorBoard. Use the visualization job function under **Training Jobs**. +- After a notebook instance is created, you cannot modify its specifications. For example, you cannot change the CPU specifications to GPU specifications or change the work environment. Therefore, select the specifications required by the service when creating a notebook instance, or save your code and data to OBS in a timely manner during development so that you can quickly upload the code and data to a new notebook instance. +- If the code output is still displayed after you close the page and open it again, use Terminal. + + diff --git a/umn/source/devenviron_(notebook)/managing_notebook_instances/creating_a_notebook_instance.rst b/umn/source/devenviron_(notebook)/managing_notebook_instances/creating_a_notebook_instance.rst new file mode 100644 index 0000000..cc297fc --- /dev/null +++ b/umn/source/devenviron_(notebook)/managing_notebook_instances/creating_a_notebook_instance.rst @@ -0,0 +1,95 @@ +Creating a Notebook Instance +============================ + +Before developing a model, create a notebook instance, open it, and perform encoding. + +Background +---------- + +- Only notebook instances in the **Running** state can be started. +- A maximum of 10 notebook instances can be created for an account. By default, the notebook instances created by the current user are displayed. If **Display Only My Instances** is disabled, all notebook instances created by the current account and its IAM users are displayed. +- If OBS storage is used, ensure that the OBS directory you use and ModelArts are in the same region. +- Before creating a notebook instance, learn about the `AI engines supported by ModelArts <../../devenviron_(notebook)/introduction_to_notebook.html#supported-ai-engines>`__ and their versions. + +.. _creating-a-notebook-instance-1: + +Creating a Notebook Instance +---------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **DevEnviron > Notebooks** to switch to the **Notebooks** page. + +#. Click **Create**. On the displayed page, set the required parameters. + + a. Enter the basic information about the notebook instance, including the name, description, and whether to automatically stop the notebook instance. For details about the parameters, see `Table 1 <#modelarts230034enustopic0162690358table1669535791517>`__. + + .. figure:: /_static/images/en-us_image_0000001156920885.png + :alt: **Figure 1** Basic information about a notebook instance + + + **Figure 1** Basic information about a notebook instance + + + +.. _modelarts230034enustopic0162690358table1669535791517: + + .. table:: **Table 1** Parameters of basic information + + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=======================================================================================================================================================================================+ + | Name | Name of a notebook instance, which contains a maximum of 64 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed. This parameter is mandatory. | + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of a notebook instance. | + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Auto Stop | This function is enabled by default. The default value is **1 hour later**, indicating that the notebook instance automatically stops after running for 1 hour and its billing stops. | + | | | + | | The options are **1 hour later**, **2 hours later**, **4 hours later**, **6 hours later**, and **Custom**. You can select **Custom** to specify any integer from 1 to 24 hours. | + +-----------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Set notebook parameters, such as the work environment and instance flavor. For details, see `Table 2 <#modelarts230034enustopic0162690358table4606194015227>`__. + +.. _modelarts230034enustopic0162690358table4606194015227: + + .. table:: **Table 2** Notebook instance parameters + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | Work Environment | Only public images, which are the AI frameworks built in ModelArts are supported. | + | | | + | | All supported AI engines can be used in the same notebook instance. Different engines can be switched quickly and conveniently, and run in independent development environments. After the notebook instance is created, go to the **Jupyter** page to create the development environment with the desired AI engine. The AI engine varies depending on the operating environment. For details, see `Supported AI Engines <../../devenviron_(notebook)/introduction_to_notebook.html#supported-ai-engines>`__. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Resource Pool | Select **Public resource pools** or **Dedicated resource pools**. For details about the dedicated resource pools and how to buy them, see `Resource Pools <../..//resource_pools.html>`__. | + | | | + | | Public resource pools are available immediately after being provisioned. Dedicated resource pools are queue-free. When a large number of users use the public resource pool, they may queue for resources. Purchase a dedicated resource pool to improve development efficiency. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Type | The CPU and GPU types are supported. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Instance Flavor | If you select a public resource pool, available flavors vary depending on the selected type. | + | | | + | | - If you select **CPU** for **Type**, available options include **2 vCPUs \| 8 GiB** and **8 vCPUs \| 32 GiB**. | + | | - If you select **GPU** for **Type**, the available option is **GPU: 1 x v100NV32 CPU: 8 vCPUs \| 64 GiB**. | + | | - If you select **Ascend** for **Type**, available options include **Ascend: 1 x Ascend 910 CPU: 24 vCPUs \| 96 GiB** and **Ascend: 8 x Ascend 910 CPU: 192 vCPUs \| 720 GiB**. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Storage | The **EVS** and **OBS** options are available. | + | | | + | | - Selecting **EVS** | + | | | + | | Set **Disk Space** based on the actual usage. The default value of **Disk Space** is 5 GB. ModelArts provides 5 GB disk space for you to use for free. If the disk space exceeds 5 GB, the additional space is billed by GB according to pricing of **ultra-high I/O** disks. The value of **Disk Space** ranges from 5 to 4096. | + | | | + | | If you select this storage mode, all read and write operations on files on the notebook instances take effect on the data stored in your notebook instances. There is no data loss after you restart notebook instances. | + | | | + | | - Selecting **OBS** | + | | | + | | Click **Select** next to the **Storage Path** text box to set the OBS path for storing notebook instance data. If you want to use existing files or data, upload the files or data to the corresponding OBS path in advance. **Storage Path** must be set to a specific directory in an OBS bucket rather than the root directory of the OBS bucket. | + | | | + | | If you select this storage mode, all read and write operations on files on the notebook instances take effect on the data stored in your selected OBS path. To synchronize data in a file stored in OBS to a notebook instance, select the file and click **Sync OBS**. For details, see `Synchronizing Files with OBS <../../devenviron_(notebook)/using_jupyter_notebook/synchronizing_files_with_obs.html>`__. There is no data loss after you restart this notebook instance. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. Click **Next**. + +#. After confirming the parameter configurations, click **Submit**. + + Switch to the notebook instance list. **Status** of the notebook instance being created is **Starting**. If **Status** of the notebook instance changes to **Running**, the notebook instance has been created. + + diff --git a/umn/source/devenviron_(notebook)/managing_notebook_instances/deleting_a_notebook_instance.rst b/umn/source/devenviron_(notebook)/managing_notebook_instances/deleting_a_notebook_instance.rst new file mode 100644 index 0000000..784ae99 --- /dev/null +++ b/umn/source/devenviron_(notebook)/managing_notebook_instances/deleting_a_notebook_instance.rst @@ -0,0 +1,13 @@ +Deleting a Notebook Instance +============================ + +You can delete notebook instances that are no longer used to release resources. + +#. Log in to the ModelArts management console. In the left navigation pane, choose **DevEnviron > Notebooks** to switch to the **Notebooks** page. +#. In the notebook instance list, locate the row where the target notebook instance resides and click **Delete** in the **Operation** column. In the dialog box that is displayed, click **OK**. + + .. note:: + + Deleted notebook instances cannot be recovered. Therefore, exercise caution when performing this operation. However, the files created in notebook instances are still stored in OBS specified during creation of the notebook instances. + + diff --git a/umn/source/devenviron_(notebook)/managing_notebook_instances/index.rst b/umn/source/devenviron_(notebook)/managing_notebook_instances/index.rst new file mode 100644 index 0000000..a2bba1c --- /dev/null +++ b/umn/source/devenviron_(notebook)/managing_notebook_instances/index.rst @@ -0,0 +1,11 @@ +=========================== +Managing Notebook Instances +=========================== + +.. toctree:: + :maxdepth: 1 + + creating_a_notebook_instance + opening_a_notebook_instance + starting_or_stopping_a_notebook_instance + deleting_a_notebook_instance diff --git a/umn/source/devenviron_(notebook)/managing_notebook_instances/opening_a_notebook_instance.rst b/umn/source/devenviron_(notebook)/managing_notebook_instances/opening_a_notebook_instance.rst new file mode 100644 index 0000000..20d3616 --- /dev/null +++ b/umn/source/devenviron_(notebook)/managing_notebook_instances/opening_a_notebook_instance.rst @@ -0,0 +1,25 @@ +Opening a Notebook Instance +=========================== + +You can open a created notebook instance (that is, an instance in the **Running** state) and start coding in the development environment. + +Instance Opening +---------------- + +- Go to the **Jupyter Notebook** page. + + In the notebook instance list, locate the row where the target notebook instance resides and click **Open** in the **Operation** column or click the notebook instance name. + +- Go to the **JupyterLab** page. + + In the notebook instance list, select the notebook instance to be opened and click **Open JupyterLab** in the **Operation** column. + +Code Development +---------------- + +ModelArts provides two environments for code development: Jupyter Notebook and JupyterLab. + +- `Jupyter Notebook <../../devenviron_(notebook)/using_jupyter_notebook/introduction_to_jupyter_notebook.html>`__: a web-based application for interactive computing. It can be applied to full-process computing: development, documentation, running code, and presenting results. +- `JupyterLab <../../devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.html>`__: an interactive development environment. It is a next-generation product of Jupyter Notebook. JupyterLab enables you to compile notebooks, operate terminals, edit MarkDown text, open interaction modes, and view CSV files and images. + + diff --git a/umn/source/devenviron_(notebook)/managing_notebook_instances/starting_or_stopping_a_notebook_instance.rst b/umn/source/devenviron_(notebook)/managing_notebook_instances/starting_or_stopping_a_notebook_instance.rst new file mode 100644 index 0000000..c42d986 --- /dev/null +++ b/umn/source/devenviron_(notebook)/managing_notebook_instances/starting_or_stopping_a_notebook_instance.rst @@ -0,0 +1,11 @@ +Starting or Stopping a Notebook Instance +======================================== + +You can stop unwanted notebook instances to prevent unnecessary fees. You can also start a notebook instance that is in the **Stopped** state to use it again. + +Log in to the ModelArts management console. In the left navigation pane, choose **DevEnviron > Notebooks** to switch to the **Notebooks** page. Perform the following operations to stop or start a notebook instance: + +- To stop a notebook instance, locate the row where the notebook instance resides and click **Stop** in the **Operation** column. Only notebook instances in the **Running** state can be stopped. +- To start a notebook instance, locate the row where the notebook instance resides and click **Start** in the **Operation** column. Only notebook instances in the **Stopped** state can be started. + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/common_operations_on_jupyter_notebook.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/common_operations_on_jupyter_notebook.rst new file mode 100644 index 0000000..5573285 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/common_operations_on_jupyter_notebook.rst @@ -0,0 +1,101 @@ +Common Operations on Jupyter Notebook +===================================== + +This section describes common operations on Jupyter Notebook. + +Opening Jupyter Notebook +------------------------ + +In the notebook instance list, locate the row where the target notebook instance resides and click **Open** in the **Operation** column to switch to the **Jupyter Notebook** page. + +Two tab pages are available on the **Jupyter Notebook** page: **Files** and **Running**. + +.. figure:: /_static/images/en-us_image_0000001110761034.png + :alt: **Figure 1** Jupyter Notebook + + + **Figure 1** Jupyter Notebook + +Selecting Different AI Engines to Create Files +---------------------------------------------- + +Open a notebook instance and go to the **Jupyter Notebook** page. On the **Files** tab page, click **New** in the upper right corner, select the required AI engine, and create a file for encoding. + +.. figure:: /_static/images/en-us_image_0000001157080885.png + :alt: **Figure 2** Selecting different AI engines + + + **Figure 2** Selecting different AI engines + +Uploading a File +---------------- + +Open a notebook instance and go to the **Jupyter Notebook** page. On the **Files** tab page, click **Upload** in the upper right corner to select a file from the local PC and upload it. + +.. figure:: /_static/images/en-us_image_0000001110920940.png + :alt: **Figure 3** Uploading a file + + + **Figure 3** Uploading a file + +Compiling a File +---------------- + +After a file is created, click the file name to go to the file compilation page. + +.. figure:: /_static/images/en-us_image_0000001110920938.png + :alt: **Figure 4** Compiling a file + + + **Figure 4** Compiling a file + + + +.. _modelarts230120enustopic0188347008table9727162374411: + +.. table:: **Table 1** Introduction to the file compilation page + + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | No. | Area | Description | + +=======================+==============================+=============================================================================================================================================================================================================================================================================================================================================================================================================================+ + | 1 | File name | You can enter a user-defined file name in this area. After the file name is changed and saved, the new file name is updated to the file list accordingly. | + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | 2 | Menu bar | The menu bar provides rich functions such as File, Edit, View, Insert, Cell, Kernel, and Help. For details, see `Jupyter Notebook Documentation `__. The following toolbar provides common functions for compiling common Python running files. | + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | 3 | Toolbar | The toolbar lists the common shortcut operations. From left to right, the shortcut operations are as follows: saving a file, adding a new cell, cutting a selected cell, copying a selected cell, pasting a selected cell, moving a selected cell upwards, moving a selected cell downwards, running a selected cell, terminating the kernel, restarting the kernel, and restarting the kernel and running all cells again. | + | | | | + | | | The **Code** drop-down list contains the following options: | + | | | | + | | | - Code: Write Python code. | + | | | - MarkDown: Write MarkDown code, which is usually used for comments. | + | | | - Raw NBConvert: conversion tool. | + | | | - Heading: Quickly add a MarkDown title. | + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | 4 | AI engine and Python version | Displays the AI engine and Python version corresponding to the current file. For details about all AI engines and Python versions supported by ModelArts, see `Supported AI Engines <../../devenviron_(notebook)/introduction_to_notebook.html#supported-ai-engines>`__. | + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | 5 | Code cell | Each cell has two modes: command mode and editing mode. | + | | | | + | | | The blue bar on the left indicates the command mode, and the green bar indicates the editing mode (in this mode, the cursor exists in the cell and you can write code). In command mode, you can press **Enter** or click the code box to enter the editing mode. In editing mode, you can press **ESC** or click the left area of the code box to enter the command mode. | + | | | | + | | | |image1| | + +-----------------------+------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Deleting a File or Folder +------------------------- + +To delete a file or folder from Jupyter Notebook, select the file or folder in the **Files** list and click the **Delete** button. + +.. note:: + + After the file or folder is deleted, click the **Refresh** button in the upper right corner to refresh the Jupyter page and clear the cache. + +.. figure:: /_static/images/en-us_image_0000001110761038.png + :alt: **Figure 5** Jupyter page + + + **Figure 5** Jupyter page + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920936.png + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/index.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/index.rst new file mode 100644 index 0000000..13cbdda --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/index.rst @@ -0,0 +1,10 @@ +============================================ +Configuring the Jupyter Notebook Environment +============================================ + +.. toctree:: + :maxdepth: 1 + + using_the_notebook_terminal_function + switching_the_cuda_version_on_the_terminal_page_of_a_gpu-based_notebook_instance + installing_external_libraries_and_kernels_in_notebook_instances diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/installing_external_libraries_and_kernels_in_notebook_instances.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/installing_external_libraries_and_kernels_in_notebook_instances.rst new file mode 100644 index 0000000..d0ce432 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/installing_external_libraries_and_kernels_in_notebook_instances.rst @@ -0,0 +1,50 @@ +Installing External Libraries and Kernels in Notebook Instances +=============================================================== + +Multiple environments have been installed in ModelArts notebook instances, including TensorFlow. You can use **pip install** to install external libraries from a Jupyter notebook or terminal to facilitate use. + +Installing an External Library from a Jupyter Notebook +------------------------------------------------------ + +Assume that you want to install Shapely from a notebook instance. Follow the following instructions: + +#. In the left navigation pane of the ModelArts management console, choose **DevEnviron > Notebooks**. Open a notebook instance in the displayed notebook instance list. + +#. In the **Jupyter Notebook** page that is displayed, click **New** and select the required AI engine from the drop-down list. + +#. In the displayed window, type the following command in the code input bar to install Shapely: + + **pip install shapely** + +Installing an External Library from a Terminal +---------------------------------------------- + +Assume that you want to install Shapely from the terminal of a notebook instance by using pip. Follow the following instructions: + +#. In the left navigation pane of the ModelArts management console, choose **DevEnviron > Notebooks**. Open a notebook instance in the displayed notebook instance list. + +#. In the displayed Jupyter dashboard, click **New** and choose **Terminal** from the shortcut menu. + +#. For a notebook instance that does not use the AI engine of the **Multi-Engine** type, enter the following command in the code input bar to install Shapely: + + **/opt/conda/envs/python27_tf/bin/pip install Shapely** + +#. The **Multi-Engine** notebook instance can use multiple engines. By referring to the **README** file in the **/home/ma-user/** path, switch to the installation package of the corresponding engine environment and install Shapely. For example, you can install Shapely from TensorFlow-1.13.1 with the following code: + + .. code-block:: + + source /home/ma-user/anaconda3/bin/activate TensorFlow-1.13.1 + pip install shapely + +.. note:: + + When you create a ModelArts training job, a new independent running environment is started, which is not associated with the packages installed in the Notebook environment. Therefore, add **os.system('pip install xxx')** to the startup code before importing the installation package. + + For example, if you need to use the Shapely dependency in the training job, add the following code to the startup code: + + .. code-block:: + + os.system('pip install Shapely') + import Shapely + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/switching_the_cuda_version_on_the_terminal_page_of_a_gpu-based_notebook_instance.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/switching_the_cuda_version_on_the_terminal_page_of_a_gpu-based_notebook_instance.rst new file mode 100644 index 0000000..df9d8a0 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/switching_the_cuda_version_on_the_terminal_page_of_a_gpu-based_notebook_instance.rst @@ -0,0 +1,30 @@ +Switching the CUDA Version on the Terminal Page of a GPU-based Notebook Instance +================================================================================ + +For a GPU-based notebook instance, you can switch different versions of CUDA on the **Terminal** page of Jupyter. + +CPU-based notebook instances do not use CUDA. Therefore, the following operations apply only to GPU-based notebook instances. + +#. Create and open a notebook instance or open an existing notebook instance in the notebook instance list. + +#. On the **Files** tab page of the Jupyter page, click **New** and select **Terminal**. The **Terminal** page is displayed. + +#. Run the following command to go to **/usr/local**: + + .. code-block:: + + cd /usr/local + +#. For example, to switch to CUDA 10, run the following command: + + .. code-block:: + + sudo ln -snf /usr/local/cuda-10.0 cuda + + .. figure:: /_static/images/en-us_image_0000001156920929.png + :alt: **Figure 1** Example of switching the CUDA version + + + **Figure 1** Example of switching the CUDA version + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/using_the_notebook_terminal_function.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/using_the_notebook_terminal_function.rst new file mode 100644 index 0000000..f68a0f5 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/configuring_the_jupyter_notebook_environment/using_the_notebook_terminal_function.rst @@ -0,0 +1,39 @@ +Using the Notebook Terminal Function +==================================== + +For developers who are used to coding, the terminal function is very convenient and practical. This section describes how to enable the terminal function in a notebook instance and switch the engine environment in the terminal. + +Enabling the Notebook Terminal Function +--------------------------------------- + +#. In the notebook instance list, click **Open** in the **Operation** column of the target notebook instance to go to the **Jupyter Notebook** page. + +#. On the **Files** tab page of the Jupyter page, click **New** and select **Terminal**. The **Terminal** page is displayed. + + .. figure:: /_static/images/en-us_image_0000001110920980.png + :alt: **Figure 1** Going to the Terminal page + + + **Figure 1** Going to the Terminal page + +Switching Engine Environments on the Terminal +--------------------------------------------- + +You can switch to another AI engine environment in the terminal environment of Jupyter. + +#. Create and open a notebook instance or open an existing notebook instance in the notebook instance list. + +#. On the **Files** tab page of the Jupyter page, click **New** and select **Terminal**. The **Terminal** page is displayed. + +#. + + .. container:: + + + .. figure:: /_static/images/en-us_image_0000001110761076.png + :alt: **Figure 2** Output after command execution + + + **Figure 2** Output after command execution + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/index.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/index.rst new file mode 100644 index 0000000..9e51f8a --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/index.rst @@ -0,0 +1,12 @@ +====================== +Using Jupyter Notebook +====================== + +.. toctree:: + :maxdepth: 1 + + introduction_to_jupyter_notebook + common_operations_on_jupyter_notebook + configuring_the_jupyter_notebook_environment/index + synchronizing_files_with_obs + using_the_convert_to_python_file_function diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/introduction_to_jupyter_notebook.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/introduction_to_jupyter_notebook.rst new file mode 100644 index 0000000..8a8980e --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/introduction_to_jupyter_notebook.rst @@ -0,0 +1,20 @@ +Introduction to Jupyter Notebook +================================ + +Jupyter Notebook is a web-based application for interactive computing. It can be applied to full-process computing: development, documentation, running code, and presenting results. + +ModelArts integrates the open-source Jupyter Notebook. After creating a notebook instance, you can open the instance for development without the need for installation and configuration. + +Notebook Kernel +--------------- + +- A notebook kernel is an independent code execution environment. ModelArts Notebook supports multiple kernel types, such as TensorFlow 1.13.1 and PyTorch 1.0. A code execution environment contains the pre-installed and commissioned AI engines and dependencies. +- When a kernel is selected to open a notebook instance, an IPython process is started at the backend of the notebook instance as the running environment to execute the code and command input on the page. +- Each kernel type contains an independent Conda running environment to ensure that the AI engines are independent of each other. For example, if the Keras library is updated in a kernel of the TensorFlow type, the kernel of the MindSpore type will not be affected. + +Differences Between Notebook Kernels and Common Interactive Python Interpreters +------------------------------------------------------------------------------- + +A notebook kernel is an IPython running environment, which can be considered as an enhanced Python shell. Compared with a Python interpreter, a notebook kernel can execute shell scripts and integrate more visualized tools and magic commands. For details, see `IPython Documentation `__. + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/synchronizing_files_with_obs.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/synchronizing_files_with_obs.rst new file mode 100644 index 0000000..5d77097 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/synchronizing_files_with_obs.rst @@ -0,0 +1,28 @@ +Synchronizing Files with OBS +============================ + +If you specify **Storage Path** during notebook instance creation, your compiled code will be automatically stored in your specified OBS bucket. If code invocation among different **.ipynb** files is required, you can use the Sync OBS function. + +The Sync OBS function is used to synchronize the objects selected in the list of notebook instance files from the OBS bucket to the current container directory **~/work**. + +Precautions +----------- + +- The maximum size of files to be synchronized at a time is 500 MB, and the maximum number of files to be synchronized at a time is 1,024. +- The total size of objects to be synchronized cannot exceed 5 GB. For example, if 2 GB files exist in the **~/work** container directory, you can use Sync OBS to synchronize a maximum of 3 GB files. +- The Sync OBS function only takes effect on notebook instances for which **Storage** is **OBS**. For notebook instances whose **Storage** is not **OBS**, all files are read and written in the **~/work** container directory. + +Procedure +--------- + +The Sync OBS function can be used in notebook instances. The following describes how to use the function. + +For example, if the **Example1.ipynb** file needs to call **module** in the **Example2.ipynb** file, select both files and click **Sync OBS**. + +.. figure:: /_static/images/en-us_image_0000001156920981.png + :alt: **Figure 1** Using the Sync OBS function + + + **Figure 1** Using the Sync OBS function + + diff --git a/umn/source/devenviron_(notebook)/using_jupyter_notebook/using_the_convert_to_python_file_function.rst b/umn/source/devenviron_(notebook)/using_jupyter_notebook/using_the_convert_to_python_file_function.rst new file mode 100644 index 0000000..95b7f96 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyter_notebook/using_the_convert_to_python_file_function.rst @@ -0,0 +1,28 @@ +Using the Convert to Python File Function +========================================= + +After code compiling is finished, you can save the entered code as a **.py** file which can be used for starting training jobs. + +#. Create and open a notebook instance or open an existing notebook instance in the notebook instance list. + +#. On the **Files** tab page, click **New** and choose the required AI engine from the drop-down list to access the code development page. + +#. After code compiling is complete, click the save button in the upper left corner. Then, click **Convert to Python File** to convert the current **ipynb** file into a **Python** file. This function can be used to directly save your entered code as a **.py** file to the working directory. + + The generated **.py** file can be used to start ModelArts training jobs. + + .. figure:: /_static/images/en-us_image_0000001156920943.png + :alt: **Figure 1** Convert to Python File + + + **Figure 1** Convert to Python File + +#. In the dialog box that is displayed, enter the file name as required, and select or deselect **Force overwrite if file already exists**. By default, the item is not selected, indicating that the file will not be overwritten when a file with the same name exists in the directory. Click **Convert**. + + .. figure:: /_static/images/en-us_image_0000001110761072.png + :alt: **Figure 2** Setting and saving the configuration + + + **Figure 2** Setting and saving the configuration + + diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/index.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/index.rst new file mode 100644 index 0000000..5daf6bf --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/index.rst @@ -0,0 +1,10 @@ +================ +Using JupyterLab +================ + +.. toctree:: + :maxdepth: 1 + + introduction_to_jupyterlab_and_common_operations + uploading_and_downloading_data/index + using_modelarts_sdks diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.rst new file mode 100644 index 0000000..ca3f554 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.rst @@ -0,0 +1,210 @@ +Introduction to JupyterLab and Common Operations +================================================ + +JupyterLab is an interactive development environment. It is a next-generation product of Jupyter Notebook. JupyterLab enables you to compile notebooks, operate terminals, edit MarkDown text, open interaction modes, and view CSV files and images. + +JupyterLab will be a mainstream development environment for developers. JupyterLab supports more flexible and powerful project operations, but has the same components as Jupyter Notebook. + +ModelArts supports Jupyter Notebook and JupyterLab. You can use different tools to develop code in the same notebook instance. + +Opening JupyterLab +------------------ + +#. Log in to the ModelArts management console. In the left navigation pane, choose **DevEnviron > Notebooks** to switch to the **Notebooks** page. + +#. Select a notebook instance in the **Running** state and click **Open** in the **Operation** column to access the notebook instance. + +#. On the **Jupyter** page, click **Open JupyterLab** in the upper right corner to access the JupyterLab page of the notebook instance. + +#. The **Launcher** page is automatically displayed. You can use all open-source functions. For details, see `JupyterLab Documentation `__. + + .. figure:: /_static/images/en-us_image_0000001110920930.png + :alt: **Figure 1** JupyterLab homepage + + + **Figure 1** JupyterLab homepage + +Creating and Opening a Notebook Instance +---------------------------------------- + +On the JupyterLab homepage, click an applicable AI engine in the **Notebook** area to create a notebook file with the selected framework. + +The AI framework supported by each notebook instance varies according to the working environment. The following figure is only an example. Select an AI framework based on the site requirements. For details about all framework versions and Python versions supported by ModelArts, see `Supported AI Engines <../../devenviron_(notebook)/introduction_to_notebook.html#supported-ai-engines>`__. + +.. figure:: /_static/images/en-us_image_0000001157080871.png + :alt: **Figure 2** Selecting an AI engine and creating a notebook instance + + + **Figure 2** Selecting an AI engine and creating a notebook instance + +The created notebook file is displayed in the navigation pane on the left. + +.. figure:: /_static/images/en-us_image_0000001110920924.png + :alt: **Figure 3** Creating a notebook file + + + **Figure 3** Creating a notebook file + +Creating a Notebook File and Opening the Console +------------------------------------------------ + +A console is essentially a Python terminal, which is similar to the native IDE of Python, displaying the output after a statement is entered. + +On the JupyterLab homepage, click an applicable AI engine in the **Console** area to create a notebook file with the selected framework. + +The AI framework supported by each notebook instance varies according to the working environment. The following figure is only an example. Select an AI framework based on the site requirements. + +.. figure:: /_static/images/en-us_image_0000001156920897.png + :alt: **Figure 4** Selecting an AI engine and creating a console + + + **Figure 4** Selecting an AI engine and creating a console + +After the file is created, the console page is displayed. + +.. figure:: /_static/images/en-us_image_0000001110761020.png + :alt: **Figure 5** Creating a notebook file (console) + + + **Figure 5** Creating a notebook file (console) + +Uploading a File +---------------- + +On the JupyterLab page, you can click **Upload File** in the upper left corner and select a local file to upload. + +The size of the file to be uploaded using this method is limited. If the file size exceeds the limit, use other methods to upload the file. For details, see `Uploading Data to JupyterLab <../../devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/uploading_data_to_jupyterlab.html>`__. + +.. figure:: /_static/images/en-us_image_0000001110920918.png + :alt: **Figure 6** Uploading a file + + + **Figure 6** Uploading a file + +Editing a File +-------------- + +JupyterLab allows you to open multiple notebook instances or files (such as HTML, TXT, and Markdown files) in the same window and displays them on different tab pages. + +Using JupyterLab, you can customize the display of multiple files. In the file display area on the right, you can drag a file to adjust its position. Multiple files can be concurrently displayed. + +.. figure:: /_static/images/en-us_image_0000001157080869.png + :alt: **Figure 7** Customized display of multiple files + + + **Figure 7** Customized display of multiple files + +When writing code in a notebook instance, you can create multiple views of a file to synchronously edit the file and view the execution result in real time. + +To open multiple views, open the file and choose **File** > **New View for Notebook**. + +.. figure:: /_static/images/en-us_image_0000001110920916.png + :alt: **Figure 8** Multiple views of a file + + + **Figure 8** Multiple views of a file + +Downloading a File to a Local Computer +-------------------------------------- + +Files created in JupyterLab can be directly downloaded to a local computer. The size of the file to be downloaded using this method is limited. If the file size exceeds the limit, use other methods to download the file. For details, see `Downloading a File from JupyterLab <../../devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/downloading_a_file_from_jupyterlab.html>`__. + +In the JupyterLab file list, right-click the file to be downloaded and choose **Download** from the shortcut menu. The file is downloaded to the directory set for your browser. + +.. figure:: /_static/images/en-us_image_0000001157080879.png + :alt: **Figure 9** Downloading a file + + + **Figure 9** Downloading a file + +Common Icons and Plug-ins of JupyterLab +--------------------------------------- + +.. figure:: /_static/images/en-us_image_0000001110761018.png + :alt: **Figure 10** Common icons and plug-ins of JupyterLab + + + **Figure 10** Common icons and plug-ins of JupyterLab + + + +.. _modelarts230209enustopic0208766071table17325391430: + +.. table:: **Table 1** Icon description + + +----------+----------------------------------------------------------------------------------------------------+ + | Icon | Description | + +==========+====================================================================================================+ + | |image5| | Opens the Launcher page. Then you can quickly create notebook instances, consoles, or other files. | + +----------+----------------------------------------------------------------------------------------------------+ + | |image6| | Creates a folder. | + +----------+----------------------------------------------------------------------------------------------------+ + | |image7| | Uploads a file. For details, see `Uploading a File <#uploading-a-file>`__. | + +----------+----------------------------------------------------------------------------------------------------+ + | |image8| | Updates a folder. | + +----------+----------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230209enustopic0208766071table8147032134415: + +.. table:: **Table 2** Common plug-ins in the plug-in area + + +-----------+-------------------------------------------------------------------------------------------------------+ + | Plug-in | Description | + +===========+=======================================================================================================+ + | |image15| | Lists files. You can click here to display the list of all files in the notebook instance. | + +-----------+-------------------------------------------------------------------------------------------------------+ + | |image16| | Lists ModelArts examples. You can click any example in the list to view its code and version mapping. | + +-----------+-------------------------------------------------------------------------------------------------------+ + | |image17| | Displays the terminals and kernels that are running in the current instance. | + +-----------+-------------------------------------------------------------------------------------------------------+ + | |image18| | Quick start command. | + +-----------+-------------------------------------------------------------------------------------------------------+ + | |image19| | Displays the tab page listing the files that are being opened. | + +-----------+-------------------------------------------------------------------------------------------------------+ + | |image20| | Document organization. | + +-----------+-------------------------------------------------------------------------------------------------------+ + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920920.png + +.. |image2| image:: /_static/images/en-us_image_0000001157080875.png + +.. |image3| image:: /_static/images/en-us_image_0000001156920903.png + +.. |image4| image:: /_static/images/en-us_image_0000001156920893.png + +.. |image5| image:: /_static/images/en-us_image_0000001110920920.png + +.. |image6| image:: /_static/images/en-us_image_0000001157080875.png + +.. |image7| image:: /_static/images/en-us_image_0000001156920903.png + +.. |image8| image:: /_static/images/en-us_image_0000001156920893.png + +.. |image9| image:: /_static/images/en-us_image_0000001110920934.png + +.. |image10| image:: /_static/images/en-us_image_0000001110761016.png + +.. |image11| image:: /_static/images/en-us_image_0000001157080873.png + +.. |image12| image:: /_static/images/en-us_image_0000001156920899.png + +.. |image13| image:: /_static/images/en-us_image_0000001156920901.png + +.. |image14| image:: /_static/images/en-us_image_0000001156920887.png + +.. |image15| image:: /_static/images/en-us_image_0000001110920934.png + +.. |image16| image:: /_static/images/en-us_image_0000001110761016.png + +.. |image17| image:: /_static/images/en-us_image_0000001157080873.png + +.. |image18| image:: /_static/images/en-us_image_0000001156920899.png + +.. |image19| image:: /_static/images/en-us_image_0000001156920901.png + +.. |image20| image:: /_static/images/en-us_image_0000001156920887.png + diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/downloading_a_file_from_jupyterlab.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/downloading_a_file_from_jupyterlab.rst new file mode 100644 index 0000000..4956821 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/downloading_a_file_from_jupyterlab.rst @@ -0,0 +1,31 @@ +Downloading a File from JupyterLab +================================== + +Only files within 100 MB in JupyterLab can be downloaded to a local PC. You can perform operations in different scenarios based on the storage location selected when creating a notebook instance. + +Notebook Instances with EVS Attached +------------------------------------ + +For notebook instances with EVS attached, you can perform the following operations to download large files to the local PC: + +#. In the notebook instance, create an **ipynb** file. Use MoXing to upload the large files from notebook instances to OBS. The sample code is as follows: + + +-----------------------------------+---------------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | import moxing as mox | + | 2 | mox.file.copy('/home/ma-user/work/obs_file.txt', 'obs://bucket_name/obs_file.txt') | + +-----------------------------------+---------------------------------------------------------------------------------------+ + + In the preceding code, **/home/ma-user/work/obs_file.txt** indicates a file storage path in a notebook instance, and **obs://bucket_name/obs_file.txt** indicates a file storage path on OBS. + +#. Use OBS or the ModelArts SDKs to download the files from OBS to the local PC. + +Notebook Instances Using OBS Storage +------------------------------------ + +For notebook instances that use OBS storage, you can use OBS or the ModelArts SDK to download files from OBS to a local PC. + +Use OBS for download. + + diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/index.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/index.rst new file mode 100644 index 0000000..779afb1 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/index.rst @@ -0,0 +1,9 @@ +============================== +Uploading and Downloading Data +============================== + +.. toctree:: + :maxdepth: 1 + + uploading_data_to_jupyterlab + downloading_a_file_from_jupyterlab diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/uploading_data_to_jupyterlab.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/uploading_data_to_jupyterlab.rst new file mode 100644 index 0000000..f76f2d0 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/uploading_and_downloading_data/uploading_data_to_jupyterlab.rst @@ -0,0 +1,57 @@ +Uploading Data to JupyterLab +============================ + +On the **JupyterLab** page, click **Upload Files** to upload a file. For details, see `Uploading a File <../../../devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.html#uploading-a-file>`__ in `Introduction to JupyterLab and Common Operations <../../../devenviron_(notebook)/using_jupyterlab/introduction_to_jupyterlab_and_common_operations.html>`__. If a message is displayed indicating that the size of the files to be uploaded exceeds the upper limit when uploading files to notebook instances or JupyterLab, you can upload the files to OBS and then download them to notebook instances. + +Step 1: Uploading Files to OBS +------------------------------ + +Use the OBS API to upload large files because OBS Console has restrictions on the file size and quantity. + +Step 2: Downloading Files from OBS to Notebook Instances +-------------------------------------------------------- + +A notebook instance can be mounted to OBS or EVS as the storage location. The operation method varies depending on the instance types. + +- Downloading files to notebook instances with EVS attached + + - Use the following MoXing API to synchronize files from OBS to notebook instances. + + Read an OBS file. For example, if you read the **obs://bucket_name/obs_file.txt** file, the content is returned as strings. + + +-----------------------------------+---------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | file_str = mox.file.read('obs://bucket_name/obs_file.txt') | + +-----------------------------------+---------------------------------------------------------------+ + + You can also open the file object and read data from it. Both methods are equivalent. + + +-----------------------------------+--------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | with mox.file.File('obs://bucket_name/obs_file.txt', 'r') as f: | + | 2 | file_str = f.read() | + +-----------------------------------+--------------------------------------------------------------------+ + + - Use the OBS API in the ModelArts SDK to download data from OBS to notebook instances. + + .. note:: + + If the size of a single file exceeds 5 GB, the file cannot be uploaded in this mode. Use the MoXing API to upload large files. + + Sample code: + + +-----------------------------------+--------------------------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | from modelarts.session import Session | + | 2 | session = Session() | + | 3 | session.download_data(bucket_path="/bucket-name/dir1/sdk.txt", path="/home/user/sdk/obs.txt") | + +-----------------------------------+--------------------------------------------------------------------------------------------------+ + +- Downloading files to notebook instances using OBS for data storage + + Upload files to the OBS path specified during notebook instance creation and synchronize the files from OBS to the notebook instances using Sync OBS. + + diff --git a/umn/source/devenviron_(notebook)/using_jupyterlab/using_modelarts_sdks.rst b/umn/source/devenviron_(notebook)/using_jupyterlab/using_modelarts_sdks.rst new file mode 100644 index 0000000..2cb5931 --- /dev/null +++ b/umn/source/devenviron_(notebook)/using_jupyterlab/using_modelarts_sdks.rst @@ -0,0 +1,64 @@ +Using ModelArts SDKs +==================== + +In notebook instances, you can use ModelArts SDKs to manage OBS, training jobs, models, and real-time services. + +For details about how to use ModelArts SDKs, see *ModelArts SDK Reference*. + +Notebooks carry the authentication (AK/SK) and region information about login users. Therefore, SDK session authentication can be completed without entering parameters. + +Example Code +------------ + +- Creating a training job + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | from modelarts.session import Session | + | 2 | from modelarts.estimator import Estimator | + | 3 | session = Session() | + | 4 | estimator = Estimator( | + | 5 | modelarts_session=session, | + | 6 | framework_type='PyTorch', # AI engine name | + | 7 | framework_version='PyTorch-1.0.0-python3.6', # AI engine version | + | 8 | code_dir='/obs-bucket-name/src/', # Training script directory | + | 9 | boot_file='/obs-bucket-name/src/pytorch_sentiment.py', # Training startup script directory | + | 10 | log_url='/obs-bucket-name/log/', # Training log directory | + | 11 | hyperparameters=[ | + | 12 | {"label":"classes", | + | 13 | "value": "10"}, | + | 14 | {"label":"lr", | + | 15 | "value": "0.001"} | + | 16 | ], | + | 17 | output_path='/obs-bucket-name/output/', # Training output directory | + | 18 | train_instance_type='modelarts.vm.gpu.p100', # Training environment specifications | + | 19 | train_instance_count=1, # Number of training nodes | + | 20 | job_description='pytorch-sentiment with ModelArts SDK') # Training job description | + | 21 | job_instance = estimator.fit(inputs='/obs-bucket-name/data/train/', wait=False, job_name='my_training_job') | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------+ + +- Querying a model list + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | from modelarts.session import Session | + | 2 | from modelarts.model import Model | + | 3 | session = Session() | + | 4 | model_list_resp = Model.get_model_list(session, model_status="published", model_name="digit", order="desc") | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------+ + +- Querying service details + + +-----------------------------------+--------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | from modelarts.session import Session | + | 2 | from modelarts.model import Predictor | + | 3 | session = Session() | + | 4 | predictor_instance = Predictor(session, service_id="input your service_id") | + | 5 | predictor_info_resp = predictor_instance.get_service_info() | + +-----------------------------------+--------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/caffe.rst b/umn/source/examples_of_custom_scripts/caffe.rst new file mode 100644 index 0000000..f7a1d24 --- /dev/null +++ b/umn/source/examples_of_custom_scripts/caffe.rst @@ -0,0 +1,413 @@ +Caffe +===== + +Training and Saving a Model +--------------------------- + +**lenet_train_test.prototxt** file + ++-----------------------------------+--------------------------------------------------+ +| :: | :: | +| | | +| 1 | name: "LeNet" | +| 2 | layer { | +| 3 | name: "mnist" | +| 4 | type: "Data" | +| 5 | top: "data" | +| 6 | top: "label" | +| 7 | include { | +| 8 | phase: TRAIN | +| 9 | } | +| 10 | transform_param { | +| 11 | scale: 0.00390625 | +| 12 | } | +| 13 | data_param { | +| 14 | source: "examples/mnist/mnist_train_lmdb" | +| 15 | batch_size: 64 | +| 16 | backend: LMDB | +| 17 | } | +| 18 | } | +| 19 | layer { | +| 20 | name: "mnist" | +| 21 | type: "Data" | +| 22 | top: "data" | +| 23 | top: "label" | +| 24 | include { | +| 25 | phase: TEST | +| 26 | } | +| 27 | transform_param { | +| 28 | scale: 0.00390625 | +| 29 | } | +| 30 | data_param { | +| 31 | source: "examples/mnist/mnist_test_lmdb" | +| 32 | batch_size: 100 | +| 33 | backend: LMDB | +| 34 | } | +| 35 | } | +| 36 | layer { | +| 37 | name: "conv1" | +| 38 | type: "Convolution" | +| 39 | bottom: "data" | +| 40 | top: "conv1" | +| 41 | param { | +| 42 | lr_mult: 1 | +| 43 | } | +| 44 | param { | +| 45 | lr_mult: 2 | +| 46 | } | +| 47 | convolution_param { | +| 48 | num_output: 20 | +| 49 | kernel_size: 5 | +| 50 | stride: 1 | +| 51 | weight_filler { | +| 52 | type: "xavier" | +| 53 | } | +| 54 | bias_filler { | +| 55 | type: "constant" | +| 56 | } | +| 57 | } | +| 58 | } | +| 59 | layer { | +| 60 | name: "pool1" | +| 61 | type: "Pooling" | +| 62 | bottom: "conv1" | +| 63 | top: "pool1" | +| 64 | pooling_param { | +| 65 | pool: MAX | +| 66 | kernel_size: 2 | +| 67 | stride: 2 | +| 68 | } | +| 69 | } | +| 70 | layer { | +| 71 | name: "conv2" | +| 72 | type: "Convolution" | +| 73 | bottom: "pool1" | +| 74 | top: "conv2" | +| 75 | param { | +| 76 | lr_mult: 1 | +| 77 | } | +| 78 | param { | +| 79 | lr_mult: 2 | +| 80 | } | +| 81 | convolution_param { | +| 82 | num_output: 50 | +| 83 | kernel_size: 5 | +| 84 | stride: 1 | +| 85 | weight_filler { | +| 86 | type: "xavier" | +| 87 | } | +| 88 | bias_filler { | +| 89 | type: "constant" | +| 90 | } | +| 91 | } | +| 92 | } | +| 93 | layer { | +| 94 | name: "pool2" | +| 95 | type: "Pooling" | +| 96 | bottom: "conv2" | +| 97 | top: "pool2" | +| 98 | pooling_param { | +| 99 | pool: MAX | +| 100 | kernel_size: 2 | +| 101 | stride: 2 | +| 102 | } | +| 103 | } | +| 104 | layer { | +| 105 | name: "ip1" | +| 106 | type: "InnerProduct" | +| 107 | bottom: "pool2" | +| 108 | top: "ip1" | +| 109 | param { | +| 110 | lr_mult: 1 | +| 111 | } | +| 112 | param { | +| 113 | lr_mult: 2 | +| 114 | } | +| 115 | inner_product_param { | +| 116 | num_output: 500 | +| 117 | weight_filler { | +| 118 | type: "xavier" | +| 119 | } | +| 120 | bias_filler { | +| 121 | type: "constant" | +| 122 | } | +| 123 | } | +| 124 | } | +| 125 | layer { | +| 126 | name: "relu1" | +| 127 | type: "ReLU" | +| 128 | bottom: "ip1" | +| 129 | top: "ip1" | +| 130 | } | +| 131 | layer { | +| 132 | name: "ip2" | +| 133 | type: "InnerProduct" | +| 134 | bottom: "ip1" | +| 135 | top: "ip2" | +| 136 | param { | +| 137 | lr_mult: 1 | +| 138 | } | +| 139 | param { | +| 140 | lr_mult: 2 | +| 141 | } | +| 142 | inner_product_param { | +| 143 | num_output: 10 | +| 144 | weight_filler { | +| 145 | type: "xavier" | +| 146 | } | +| 147 | bias_filler { | +| 148 | type: "constant" | +| 149 | } | +| 150 | } | +| 151 | } | +| 152 | layer { | +| 153 | name: "accuracy" | +| 154 | type: "Accuracy" | +| 155 | bottom: "ip2" | +| 156 | bottom: "label" | +| 157 | top: "accuracy" | +| 158 | include { | +| 159 | phase: TEST | +| 160 | } | +| 161 | } | +| 162 | layer { | +| 163 | name: "loss" | +| 164 | type: "SoftmaxWithLoss" | +| 165 | bottom: "ip2" | +| 166 | bottom: "label" | +| 167 | top: "loss" | +| 168 | } | ++-----------------------------------+--------------------------------------------------+ + +**lenet_solver.prototxt** file + ++-----------------------------------+---------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # The train/test net protocol buffer definition | +| 2 | net: "examples/mnist/lenet_train_test.prototxt" | +| 3 | # test_iter specifies how many forward passes the test should carry out. | +| 4 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, | +| 5 | # covering the full 10,000 testing images. | +| 6 | test_iter: 100 | +| 7 | # Carry out testing every 500 training iterations. | +| 8 | test_interval: 500 | +| 9 | # The base learning rate, momentum and the weight decay of the network. | +| 10 | base_lr: 0.01 | +| 11 | momentum: 0.9 | +| 12 | weight_decay: 0.0005 | +| 13 | # The learning rate policy | +| 14 | lr_policy: "inv" | +| 15 | gamma: 0.0001 | +| 16 | power: 0.75 | +| 17 | # Display every 100 iterations | +| 18 | display: 100 | +| 19 | # The maximum number of iterations | +| 20 | max_iter: 1000 | +| 21 | # snapshot intermediate results | +| 22 | snapshot: 5000 | +| 23 | snapshot_prefix: "examples/mnist/lenet" | +| 24 | # solver mode: CPU or GPU | +| 25 | solver_mode: CPU | ++-----------------------------------+---------------------------------------------------------------------------------+ + +Train the model. + +.. code-block:: + + ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt + +The **caffemodel** file is generated after model training. Rewrite the **lenet_train_test.prototxt** file to the **lenet_deploy.prototxt** file used for deployment by modifying input and output layers. + ++-----------------------------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | name: "LeNet" | +| 2 | layer { | +| 3 | name: "data" | +| 4 | type: "Input" | +| 5 | top: "data" | +| 6 | input_param { shape: { dim: 1 dim: 1 dim: 28 dim: 28 } } | +| 7 | } | +| 8 | layer { | +| 9 | name: "conv1" | +| 10 | type: "Convolution" | +| 11 | bottom: "data" | +| 12 | top: "conv1" | +| 13 | param { | +| 14 | lr_mult: 1 | +| 15 | } | +| 16 | param { | +| 17 | lr_mult: 2 | +| 18 | } | +| 19 | convolution_param { | +| 20 | num_output: 20 | +| 21 | kernel_size: 5 | +| 22 | stride: 1 | +| 23 | weight_filler { | +| 24 | type: "xavier" | +| 25 | } | +| 26 | bias_filler { | +| 27 | type: "constant" | +| 28 | } | +| 29 | } | +| 30 | } | +| 31 | layer { | +| 32 | name: "pool1" | +| 33 | type: "Pooling" | +| 34 | bottom: "conv1" | +| 35 | top: "pool1" | +| 36 | pooling_param { | +| 37 | pool: MAX | +| 38 | kernel_size: 2 | +| 39 | stride: 2 | +| 40 | } | +| 41 | } | +| 42 | layer { | +| 43 | name: "conv2" | +| 44 | type: "Convolution" | +| 45 | bottom: "pool1" | +| 46 | top: "conv2" | +| 47 | param { | +| 48 | lr_mult: 1 | +| 49 | } | +| 50 | param { | +| 51 | lr_mult: 2 | +| 52 | } | +| 53 | convolution_param { | +| 54 | num_output: 50 | +| 55 | kernel_size: 5 | +| 56 | stride: 1 | +| 57 | weight_filler { | +| 58 | type: "xavier" | +| 59 | } | +| 60 | bias_filler { | +| 61 | type: "constant" | +| 62 | } | +| 63 | } | +| 64 | } | +| 65 | layer { | +| 66 | name: "pool2" | +| 67 | type: "Pooling" | +| 68 | bottom: "conv2" | +| 69 | top: "pool2" | +| 70 | pooling_param { | +| 71 | pool: MAX | +| 72 | kernel_size: 2 | +| 73 | stride: 2 | +| 74 | } | +| 75 | } | +| 76 | layer { | +| 77 | name: "ip1" | +| 78 | type: "InnerProduct" | +| 79 | bottom: "pool2" | +| 80 | top: "ip1" | +| 81 | param { | +| 82 | lr_mult: 1 | +| 83 | } | +| 84 | param { | +| 85 | lr_mult: 2 | +| 86 | } | +| 87 | inner_product_param { | +| 88 | num_output: 500 | +| 89 | weight_filler { | +| 90 | type: "xavier" | +| 91 | } | +| 92 | bias_filler { | +| 93 | type: "constant" | +| 94 | } | +| 95 | } | +| 96 | } | +| 97 | layer { | +| 98 | name: "relu1" | +| 99 | type: "ReLU" | +| 100 | bottom: "ip1" | +| 101 | top: "ip1" | +| 102 | } | +| 103 | layer { | +| 104 | name: "ip2" | +| 105 | type: "InnerProduct" | +| 106 | bottom: "ip1" | +| 107 | top: "ip2" | +| 108 | param { | +| 109 | lr_mult: 1 | +| 110 | } | +| 111 | param { | +| 112 | lr_mult: 2 | +| 113 | } | +| 114 | inner_product_param { | +| 115 | num_output: 10 | +| 116 | weight_filler { | +| 117 | type: "xavier" | +| 118 | } | +| 119 | bias_filler { | +| 120 | type: "constant" | +| 121 | } | +| 122 | } | +| 123 | } | +| 124 | layer { | +| 125 | name: "prob" | +| 126 | type: "Softmax" | +| 127 | bottom: "ip2" | +| 128 | top: "prob" | +| 129 | } | ++-----------------------------------+-----------------------------------------------------------------+ + +Inference Code +-------------- + ++-----------------------------------+-----------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from model_service.caffe_model_service import CaffeBaseService | +| 2 | | +| 3 | import numpy as np | +| 4 | | +| 5 | import os, json | +| 6 | | +| 7 | import caffe | +| 8 | | +| 9 | from PIL import Image | +| 10 | | +| 11 | | +| 12 | class LenetService(CaffeBaseService): | +| 13 | | +| 14 | def __init__(self, model_name, model_path): | +| 15 | # Call the inference method of the parent class. | +| 16 | super(LenetService, self).__init__(model_name, model_path) | +| 17 | | +| 18 | # Configure preprocessing information. | +| 19 | transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape}) | +| 20 | # Transform to NCHW. | +| 21 | transformer.set_transpose('data', (2, 0, 1)) | +| 22 | # Perform normalization. | +| 23 | transformer.set_raw_scale('data', 255.0) | +| 24 | | +| 25 | # If the batch size is set to 1, inference is supported for only one image. | +| 26 | self.net.blobs['data'].reshape(1, 1, 28, 28) | +| 27 | self.transformer = transformer | +| 28 | | +| 29 | # Define the class labels. | +| 30 | self.label = [0,1,2,3,4,5,6,7,8,9] | +| 31 | | +| 32 | | +| 33 | def _preprocess(self, data): | +| 34 | | +| 35 | for k, v in data.items(): | +| 36 | for file_name, file_content in v.items(): | +| 37 | im = caffe.io.load_image(file_content, color=False) | +| 38 | # Pre-process the images. | +| 39 | self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im) | +| 40 | | +| 41 | return | +| 42 | | +| 43 | def _postprocess(self, data): | +| 44 | | +| 45 | data = data['prob'][0, :] | +| 46 | predicted = np.argmax(data) | +| 47 | predicted = {"predicted" : str(predicted) } | +| 48 | | +| 49 | return predicted | ++-----------------------------------+-----------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/index.rst b/umn/source/examples_of_custom_scripts/index.rst new file mode 100644 index 0000000..e02fe2c --- /dev/null +++ b/umn/source/examples_of_custom_scripts/index.rst @@ -0,0 +1,13 @@ +========================== +Examples of Custom Scripts +========================== + +.. toctree:: + :maxdepth: 1 + + tensorflow + pytorch + caffe + xgboost + pyspark + scikit_learn diff --git a/umn/source/examples_of_custom_scripts/pyspark.rst b/umn/source/examples_of_custom_scripts/pyspark.rst new file mode 100644 index 0000000..20f7ea6 --- /dev/null +++ b/umn/source/examples_of_custom_scripts/pyspark.rst @@ -0,0 +1,90 @@ +PySpark +======= + +Training and Saving a Model +--------------------------- + ++-----------------------------------+------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from pyspark.ml import Pipeline, PipelineModel | +| 2 | from pyspark.ml.linalg import Vectors | +| 3 | from pyspark.ml.classification import LogisticRegression | +| 4 | | +| 5 | # Prepare training data using tuples. | +| 6 | # Prepare training data from a list of (label, features) tuples. | +| 7 | training = spark.createDataFrame([ | +| 8 | (1.0, Vectors.dense([0.0, 1.1, 0.1])), | +| 9 | (0.0, Vectors.dense([2.0, 1.0, -1.0])), | +| 10 | (0.0, Vectors.dense([2.0, 1.3, 1.0])), | +| 11 | (1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"]) | +| 12 | | +| 13 | # Create a training instance. The logistic regression algorithm is used for training. | +| 14 | # Create a LogisticRegression instance. This instance is an Estimator. | +| 15 | lr = LogisticRegression(maxIter=10, regParam=0.01) | +| 16 | | +| 17 | # Train the logistic regression model. | +| 18 | # Learn a LogisticRegression model. This uses the parameters stored in lr. | +| 19 | model = lr.fit(training) | +| 20 | | +| 21 | # Save the model to a local directory. | +| 22 | # Save model to local path. | +| 23 | model.save("/tmp/spark_model") | ++-----------------------------------+------------------------------------------------------------------------------------------+ + +After the model is saved, it must be uploaded to the OBS directory before being published. The **config.json** configuration and **customize_service.py** must be contained during publishing. For details about the definition method, see `Model Package Specifications <../model_package_specifications/model_package_specifications.html>`__. + +Inference Code +-------------- + ++-----------------------------------+------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # coding:utf-8 | +| 2 | import collections | +| 3 | import json | +| 4 | import traceback | +| 5 | | +| 6 | import model_service.log as log | +| 7 | from model_service.spark_model_service import SparkServingBaseService | +| 8 | from pyspark.ml.classification import LogisticRegression | +| 9 | | +| 10 | logger = log.getLogger(__name__) | +| 11 | | +| 12 | | +| 13 | class user_Service(SparkServingBaseService): | +| 14 | # Pre-process data. | +| 15 | def _preprocess(self, data): | +| 16 | logger.info("Begin to handle data from user data...") | +| 17 | # Read data. | +| 18 | req_json = json.loads(data, object_pairs_hook=collections.OrderedDict) | +| 19 | try: | +| 20 | # Convert data to the spark dataframe format. | +| 21 | predict_spdf = self.spark.createDataFrame(pd.DataFrame(req_json["data"]["req_data"])) | +| 22 | except Exception as e: | +| 23 | logger.error("check your request data does meet the requirements ?") | +| 24 | logger.error(traceback.format_exc()) | +| 25 | raise Exception("check your request data does meet the requirements ?") | +| 26 | return predict_spdf | +| 27 | | +| 28 | # Perform model inference. | +| 29 | def _inference(self, data): | +| 30 | try: | +| 31 | # Load a model file. | +| 32 | predict_model = LogisticRegression.load(self.model_path) | +| 33 | # Perform data inference. | +| 34 | prediction_result = predict_model.transform(data) | +| 35 | except Exception as e: | +| 36 | logger.error(traceback.format_exc()) | +| 37 | raise Exception("Unable to load model and do dataframe transformation.") | +| 38 | return prediction_result | +| 39 | | +| 40 | # Post-process data. | +| 41 | def _postprocess(self, pre_data): | +| 42 | logger.info("Get new data to respond...") | +| 43 | predict_str = pre_data.toPandas().to_json(orient='records') | +| 44 | predict_result = json.loads(predict_str) | +| 45 | return predict_result | ++-----------------------------------+------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/pytorch.rst b/umn/source/examples_of_custom_scripts/pytorch.rst new file mode 100644 index 0000000..d43657b --- /dev/null +++ b/umn/source/examples_of_custom_scripts/pytorch.rst @@ -0,0 +1,210 @@ +PyTorch +======= + +Training a Model +---------------- + ++-----------------------------------+------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from __future__ import print_function | +| 2 | import argparse | +| 3 | import torch | +| 4 | import torch.nn as nn | +| 5 | import torch.nn.functional as F | +| 6 | import torch.optim as optim | +| 7 | from torchvision import datasets, transforms | +| 8 | | +| 9 | # Define a network structure. | +| 10 | class Net(nn.Module): | +| 11 | def __init__(self): | +| 12 | super(Net, self).__init__() | +| 13 | # The second dimension of the input must be 784. | +| 14 | self.hidden1 = nn.Linear(784, 5120, bias=False) | +| 15 | self.output = nn.Linear(5120, 10, bias=False) | +| 16 | | +| 17 | def forward(self, x): | +| 18 | x = x.view(x.size()[0], -1) | +| 19 | x = F.relu((self.hidden1(x))) | +| 20 | x = F.dropout(x, 0.2) | +| 21 | x = self.output(x) | +| 22 | return F.log_softmax(x) | +| 23 | | +| 24 | def train(model, device, train_loader, optimizer, epoch): | +| 25 | model.train() | +| 26 | for batch_idx, (data, target) in enumerate(train_loader): | +| 27 | data, target = data.to(device), target.to(device) | +| 28 | optimizer.zero_grad() | +| 29 | output = model(data) | +| 30 | loss = F.cross_entropy(output, target) | +| 31 | loss.backward() | +| 32 | optimizer.step() | +| 33 | if batch_idx % 10 == 0: | +| 34 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( | +| 35 | epoch, batch_idx * len(data), len(train_loader.dataset), | +| 36 | 100. * batch_idx / len(train_loader), loss.item())) | +| 37 | | +| 38 | def test( model, device, test_loader): | +| 39 | model.eval() | +| 40 | test_loss = 0 | +| 41 | correct = 0 | +| 42 | with torch.no_grad(): | +| 43 | for data, target in test_loader: | +| 44 | data, target = data.to(device), target.to(device) | +| 45 | output = model(data) | +| 46 | test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss | +| 47 | pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability | +| 48 | correct += pred.eq(target.view_as(pred)).sum().item() | +| 49 | | +| 50 | test_loss /= len(test_loader.dataset) | +| 51 | | +| 52 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( | +| 53 | test_loss, correct, len(test_loader.dataset), | +| 54 | 100. * correct / len(test_loader.dataset))) | +| 55 | | +| 56 | device = torch.device("cpu") | +| 57 | | +| 58 | batch_size=64 | +| 59 | | +| 60 | kwargs={} | +| 61 | | +| 62 | train_loader = torch.utils.data.DataLoader( | +| 63 | datasets.MNIST('.', train=True, download=True, | +| 64 | transform=transforms.Compose([ | +| 65 | transforms.ToTensor() | +| 66 | ])), | +| 67 | batch_size=batch_size, shuffle=True, **kwargs) | +| 68 | test_loader = torch.utils.data.DataLoader( | +| 69 | datasets.MNIST('.', train=False, transform=transforms.Compose([ | +| 70 | transforms.ToTensor() | +| 71 | ])), | +| 72 | batch_size=1000, shuffle=True, **kwargs) | +| 73 | | +| 74 | model = Net().to(device) | +| 75 | optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) | +| 76 | optimizer = optim.Adam(model.parameters()) | +| 77 | | +| 78 | for epoch in range(1, 2 + 1): | +| 79 | train(model, device, train_loader, optimizer, epoch) | +| 80 | test(model, device, test_loader) | ++-----------------------------------+------------------------------------------------------------------------------------------------------+ + +Saving a Model +-------------- + ++-----------------------------------+-----------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # The model must be saved using state_dict and can be deployed remotely. | +| 2 | torch.save(model.state_dict(), "pytorch_mnist/mnist_mlp.pt") | ++-----------------------------------+-----------------------------------------------------------------------------+ + +Inference Code +-------------- + ++-----------------------------------+----------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from PIL import Image | +| 2 | import log | +| 3 | from model_service.pytorch_model_service import PTServingBaseService | +| 4 | import torch.nn.functional as F | +| 5 | | +| 6 | import torch.nn as nn | +| 7 | import torch | +| 8 | import json | +| 9 | | +| 10 | import numpy as np | +| 11 | | +| 12 | logger = log.getLogger(__name__) | +| 13 | | +| 14 | import torchvision.transforms as transforms | +| 15 | | +| 16 | # Define model preprocessing. | +| 17 | infer_transformation = transforms.Compose([ | +| 18 | transforms.Resize((28,28)), | +| 19 | # Transform to a PyTorch tensor. | +| 20 | transforms.ToTensor() | +| 21 | ]) | +| 22 | | +| 23 | | +| 24 | import os | +| 25 | | +| 26 | | +| 27 | class PTVisionService(PTServingBaseService): | +| 28 | | +| 29 | def __init__(self, model_name, model_path): | +| 30 | # Call the constructor of the parent class. | +| 31 | super(PTVisionService, self).__init__(model_name, model_path) | +| 32 | # Call the customized function to load the model. | +| 33 | self.model = Mnist(model_path) | +| 34 | # Load tags. | +| 35 | self.label = [0,1,2,3,4,5,6,7,8,9] | +| 36 | # Labels can also be loaded by label file. | +| 37 | # Store the label.json file in the model directory. The following information is read: | +| 38 | dir_path = os.path.dirname(os.path.realpath(self.model_path)) | +| 39 | with open(os.path.join(dir_path, 'label.json')) as f: | +| 40 | self.label = json.load(f) | +| 41 | | +| 42 | | +| 43 | def _preprocess(self, data): | +| 44 | | +| 45 | preprocessed_data = {} | +| 46 | for k, v in data.items(): | +| 47 | input_batch = [] | +| 48 | for file_name, file_content in v.items(): | +| 49 | with Image.open(file_content) as image1: | +| 50 | # Gray processing | +| 51 | image1 = image1.convert("L") | +| 52 | if torch.cuda.is_available(): | +| 53 | input_batch.append(infer_transformation(image1).cuda()) | +| 54 | else: | +| 55 | input_batch.append(infer_transformation(image1)) | +| 56 | input_batch_var = torch.autograd.Variable(torch.stack(input_batch, dim=0), volatile=True) | +| 57 | print(input_batch_var.shape) | +| 58 | preprocessed_data[k] = input_batch_var | +| 59 | | +| 60 | return preprocessed_data | +| 61 | | +| 62 | def _postprocess(self, data): | +| 63 | results = [] | +| 64 | for k, v in data.items(): | +| 65 | result = torch.argmax(v[0]) | +| 66 | result = {k: self.label[result]} | +| 67 | results.append(result) | +| 68 | return results | +| 69 | | +| 70 | class Net(nn.Module): | +| 71 | def __init__(self): | +| 72 | super(Net, self).__init__() | +| 73 | self.hidden1 = nn.Linear(784, 5120, bias=False) | +| 74 | self.output = nn.Linear(5120, 10, bias=False) | +| 75 | | +| 76 | def forward(self, x): | +| 77 | x = x.view(x.size()[0], -1) | +| 78 | x = F.relu((self.hidden1(x))) | +| 79 | x = F.dropout(x, 0.2) | +| 80 | x = self.output(x) | +| 81 | return F.log_softmax(x) | +| 82 | | +| 83 | | +| 84 | | +| 85 | def Mnist(model_path, **kwargs): | +| 86 | # Generate a network. | +| 87 | model = Net() | +| 88 | # Load the model. | +| 89 | if torch.cuda.is_available(): | +| 90 | device = torch.device('cuda') | +| 91 | model.load_state_dict(torch.load(model_path, map_location="cuda:0")) | +| 92 | else: | +| 93 | device = torch.device('cpu') | +| 94 | model.load_state_dict(torch.load(model_path, map_location=device)) | +| 95 | # CPU or GPU mapping | +| 96 | model.to(device) | +| 97 | # Declare an inference mode. | +| 98 | model.eval() | +| 99 | | +| 100 | return model | ++-----------------------------------+----------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/scikit_learn.rst b/umn/source/examples_of_custom_scripts/scikit_learn.rst new file mode 100644 index 0000000..0a2c0e0 --- /dev/null +++ b/umn/source/examples_of_custom_scripts/scikit_learn.rst @@ -0,0 +1,68 @@ +Scikit Learn +============ + +Training and Saving a Model +--------------------------- + ++-----------------------------------+----------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | import json | +| 2 | import pandas as pd | +| 3 | from sklearn.datasets import load_iris | +| 4 | from sklearn.model_selection import train_test_split | +| 5 | from sklearn.linear_model import LogisticRegression | +| 6 | from sklearn.externals import joblib | +| 7 | iris = pd.read_csv('/data/iris.csv') | +| 8 | X = iris.drop(['virginica'],axis=1) | +| 9 | y = iris[['virginica']] | +| 10 | # Create a LogisticRegression instance and train model | +| 11 | logisticRegression = LogisticRegression(C=1000.0, random_state=0) | +| 12 | logisticRegression.fit(X,y) | +| 13 | # Save model to local path | +| 14 | joblib.dump(logisticRegression, '/tmp/sklearn.m') | ++-----------------------------------+----------------------------------------------------------------------+ + +After the model is saved, it must be uploaded to the OBS directory before being published. The **config.json** and **customize_service.py** files must be contained during publishing. For details about the definition method, see `Model Package Specifications <../model_package_specifications/model_package_specifications.html>`__. + +Inference Code +-------------- + ++-----------------------------------+------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # coding:utf-8 | +| 2 | import collections | +| 3 | import json | +| 4 | from sklearn.externals import joblib | +| 5 | from model_service.python_model_service import XgSklServingBaseService | +| 6 | | +| 7 | class user_Service(XgSklServingBaseService): | +| 8 | | +| 9 | # request data preprocess | +| 10 | def _preprocess(self, data): | +| 11 | list_data = [] | +| 12 | json_data = json.loads(data, object_pairs_hook=collections.OrderedDict) | +| 13 | for element in json_data["data"]["req_data"]: | +| 14 | array = [] | +| 15 | for each in element: | +| 16 | array.append(element[each]) | +| 17 | list_data.append(array) | +| 18 | return list_data | +| 19 | | +| 20 | # predict | +| 21 | def _inference(self, data): | +| 22 | sk_model = joblib.load(self.model_path) | +| 23 | pre_result = sk_model.predict(data) | +| 24 | pre_result = pre_result.tolist() | +| 25 | return pre_result | +| 26 | | +| 27 | # predict result process | +| 28 | def _postprocess(self,data): | +| 29 | resp_data = [] | +| 30 | for element in data: | +| 31 | resp_data.append({"predictresult": element}) | +| 32 | return resp_data | ++-----------------------------------+------------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/tensorflow.rst b/umn/source/examples_of_custom_scripts/tensorflow.rst new file mode 100644 index 0000000..729a9c1 --- /dev/null +++ b/umn/source/examples_of_custom_scripts/tensorflow.rst @@ -0,0 +1,405 @@ +TensorFlow +========== + +TensorFlow has two types of APIs: Keras and tf. Keras and tf use different code for training and saving models, but the same code for inference. + +Training a Model (Keras API) +---------------------------- + ++-----------------------------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from keras.models import Sequential | +| 2 | model = Sequential() | +| 3 | from keras.layers import Dense | +| 4 | import tensorflow as tf | +| 5 | | +| 6 | # Import a training dataset. | +| 7 | mnist = tf.keras.datasets.mnist | +| 8 | (x_train, y_train),(x_test, y_test) = mnist.load_data() | +| 9 | x_train, x_test = x_train / 255.0, x_test / 255.0 | +| 10 | | +| 11 | print(x_train.shape) | +| 12 | | +| 13 | from keras.layers import Dense | +| 14 | from keras.models import Sequential | +| 15 | import keras | +| 16 | from keras.layers import Dense, Activation, Flatten, Dropout | +| 17 | | +| 18 | # Define a model network. | +| 19 | model = Sequential() | +| 20 | model.add(Flatten(input_shape=(28,28))) | +| 21 | model.add(Dense(units=5120,activation='relu')) | +| 22 | model.add(Dropout(0.2)) | +| 23 | | +| 24 | model.add(Dense(units=10, activation='softmax')) | +| 25 | | +| 26 | # Define an optimizer and loss functions. | +| 27 | model.compile(optimizer='adam', | +| 28 | loss='sparse_categorical_crossentropy', | +| 29 | metrics=['accuracy']) | +| 30 | | +| 31 | model.summary() | +| 32 | # Train the model. | +| 33 | model.fit(x_train, y_train, epochs=2) | +| 34 | # Evaluate the model. | +| 35 | model.evaluate(x_test, y_test) | ++-----------------------------------+-----------------------------------------------------------------+ + +Saving a Model (Keras API) +-------------------------- + ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from keras import backend as K | +| 2 | | +| 3 | # K.get_session().run(tf.global_variables_initializer()) | +| 4 | | +| 5 | # Define the inputs and outputs of the prediction API. | +| 6 | # The key values of the inputs and outputs dictionaries are used as the index keys for the input and output tensors of the model. | +| 7 | # The input and output definitions of the model must match the custom inference script. | +| 8 | predict_signature = tf.saved_model.signature_def_utils.predict_signature_def( | +| 9 | inputs={"images" : model.input}, | +| 10 | outputs={"scores" : model.output} | +| 11 | ) | +| 12 | | +| 13 | # Define a save path. | +| 14 | builder = tf.saved_model.builder.SavedModelBuilder('./mnist_keras/') | +| 15 | | +| 16 | builder.add_meta_graph_and_variables( | +| 17 | | +| 18 | sess = K.get_session(), | +| 19 | # The tf.saved_model.tag_constants.SERVING tag needs to be defined for inference and deployment. | +| 20 | tags=[tf.saved_model.tag_constants.SERVING], | +| 21 | """ | +| 22 | signature_def_map: Only single items can exist, or the corresponding key needs to be defined as follows: | +| 23 | tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY | +| 24 | """ | +| 25 | signature_def_map={ | +| 26 | tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: | +| 27 | predict_signature | +| 28 | } | +| 29 | | +| 30 | ) | +| 31 | builder.save() | ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ + +Training a Model (tf API) +------------------------- + ++-----------------------------------+--------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from __future__ import print_function | +| 2 | | +| 3 | import gzip | +| 4 | import os | +| 5 | import urllib | +| 6 | | +| 7 | import numpy | +| 8 | import tensorflow as tf | +| 9 | from six.moves import urllib | +| 10 | | +| 11 | # Training data is obtained from the Yann LeCun official website http://yann.lecun.com/exdb/mnist/. | +| 12 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' | +| 13 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' | +| 14 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' | +| 15 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz' | +| 16 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz' | +| 17 | VALIDATION_SIZE = 5000 | +| 18 | | +| 19 | | +| 20 | def maybe_download(filename, work_directory): | +| 21 | """Download the data from Yann's website, unless it's already here.""" | +| 22 | if not os.path.exists(work_directory): | +| 23 | os.mkdir(work_directory) | +| 24 | filepath = os.path.join(work_directory, filename) | +| 25 | if not os.path.exists(filepath): | +| 26 | filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) | +| 27 | statinfo = os.stat(filepath) | +| 28 | print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size)) | +| 29 | return filepath | +| 30 | | +| 31 | | +| 32 | def _read32(bytestream): | +| 33 | dt = numpy.dtype(numpy.uint32).newbyteorder('>') | +| 34 | return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] | +| 35 | | +| 36 | | +| 37 | def extract_images(filename): | +| 38 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" | +| 39 | print('Extracting %s' % filename) | +| 40 | with gzip.open(filename) as bytestream: | +| 41 | magic = _read32(bytestream) | +| 42 | if magic != 2051: | +| 43 | raise ValueError( | +| 44 | 'Invalid magic number %d in MNIST image file: %s' % | +| 45 | (magic, filename)) | +| 46 | num_images = _read32(bytestream) | +| 47 | rows = _read32(bytestream) | +| 48 | cols = _read32(bytestream) | +| 49 | buf = bytestream.read(rows * cols * num_images) | +| 50 | data = numpy.frombuffer(buf, dtype=numpy.uint8) | +| 51 | data = data.reshape(num_images, rows, cols, 1) | +| 52 | return data | +| 53 | | +| 54 | | +| 55 | def dense_to_one_hot(labels_dense, num_classes=10): | +| 56 | """Convert class labels from scalars to one-hot vectors.""" | +| 57 | num_labels = labels_dense.shape[0] | +| 58 | index_offset = numpy.arange(num_labels) * num_classes | +| 59 | labels_one_hot = numpy.zeros((num_labels, num_classes)) | +| 60 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 | +| 61 | return labels_one_hot | +| 62 | | +| 63 | | +| 64 | def extract_labels(filename, one_hot=False): | +| 65 | """Extract the labels into a 1D uint8 numpy array [index].""" | +| 66 | print('Extracting %s' % filename) | +| 67 | with gzip.open(filename) as bytestream: | +| 68 | magic = _read32(bytestream) | +| 69 | if magic != 2049: | +| 70 | raise ValueError( | +| 71 | 'Invalid magic number %d in MNIST label file: %s' % | +| 72 | (magic, filename)) | +| 73 | num_items = _read32(bytestream) | +| 74 | buf = bytestream.read(num_items) | +| 75 | labels = numpy.frombuffer(buf, dtype=numpy.uint8) | +| 76 | if one_hot: | +| 77 | return dense_to_one_hot(labels) | +| 78 | return labels | +| 79 | | +| 80 | | +| 81 | class DataSet(object): | +| 82 | """Class encompassing test, validation and training MNIST data set.""" | +| 83 | | +| 84 | def __init__(self, images, labels, fake_data=False, one_hot=False): | +| 85 | """Construct a DataSet. one_hot arg is used only if fake_data is true.""" | +| 86 | | +| 87 | if fake_data: | +| 88 | self._num_examples = 10000 | +| 89 | self.one_hot = one_hot | +| 90 | else: | +| 91 | assert images.shape[0] == labels.shape[0], ( | +| 92 | 'images.shape: %s labels.shape: %s' % (images.shape, | +| 93 | labels.shape)) | +| 94 | self._num_examples = images.shape[0] | +| 95 | | +| 96 | # Convert shape from [num examples, rows, columns, depth] | +| 97 | # to [num examples, rows*columns] (assuming depth == 1) | +| 98 | assert images.shape[3] == 1 | +| 99 | images = images.reshape(images.shape[0], | +| 100 | images.shape[1] * images.shape[2]) | +| 101 | # Convert from [0, 255] -> [0.0, 1.0]. | +| 102 | images = images.astype(numpy.float32) | +| 103 | images = numpy.multiply(images, 1.0 / 255.0) | +| 104 | self._images = images | +| 105 | self._labels = labels | +| 106 | self._epochs_completed = 0 | +| 107 | self._index_in_epoch = 0 | +| 108 | | +| 109 | @property | +| 110 | def images(self): | +| 111 | return self._images | +| 112 | | +| 113 | @property | +| 114 | def labels(self): | +| 115 | return self._labels | +| 116 | | +| 117 | @property | +| 118 | def num_examples(self): | +| 119 | return self._num_examples | +| 120 | | +| 121 | @property | +| 122 | def epochs_completed(self): | +| 123 | return self._epochs_completed | +| 124 | | +| 125 | def next_batch(self, batch_size, fake_data=False): | +| 126 | """Return the next `batch_size` examples from this data set.""" | +| 127 | if fake_data: | +| 128 | fake_image = [1] * 784 | +| 129 | if self.one_hot: | +| 130 | fake_label = [1] + [0] * 9 | +| 131 | else: | +| 132 | fake_label = 0 | +| 133 | return [fake_image for _ in range(batch_size)], [ | +| 134 | fake_label for _ in range(batch_size) | +| 135 | ] | +| 136 | start = self._index_in_epoch | +| 137 | self._index_in_epoch += batch_size | +| 138 | if self._index_in_epoch > self._num_examples: | +| 139 | # Finished epoch | +| 140 | self._epochs_completed += 1 | +| 141 | # Shuffle the data | +| 142 | perm = numpy.arange(self._num_examples) | +| 143 | numpy.random.shuffle(perm) | +| 144 | self._images = self._images[perm] | +| 145 | self._labels = self._labels[perm] | +| 146 | # Start next epoch | +| 147 | start = 0 | +| 148 | self._index_in_epoch = batch_size | +| 149 | assert batch_size <= self._num_examples | +| 150 | end = self._index_in_epoch | +| 151 | return self._images[start:end], self._labels[start:end] | +| 152 | | +| 153 | | +| 154 | def read_data_sets(train_dir, fake_data=False, one_hot=False): | +| 155 | """Return training, validation and testing data sets.""" | +| 156 | | +| 157 | class DataSets(object): | +| 158 | pass | +| 159 | | +| 160 | data_sets = DataSets() | +| 161 | | +| 162 | if fake_data: | +| 163 | data_sets.train = DataSet([], [], fake_data=True, one_hot=one_hot) | +| 164 | data_sets.validation = DataSet([], [], fake_data=True, one_hot=one_hot) | +| 165 | data_sets.test = DataSet([], [], fake_data=True, one_hot=one_hot) | +| 166 | return data_sets | +| 167 | | +| 168 | local_file = maybe_download(TRAIN_IMAGES, train_dir) | +| 169 | train_images = extract_images(local_file) | +| 170 | | +| 171 | local_file = maybe_download(TRAIN_LABELS, train_dir) | +| 172 | train_labels = extract_labels(local_file, one_hot=one_hot) | +| 173 | | +| 174 | local_file = maybe_download(TEST_IMAGES, train_dir) | +| 175 | test_images = extract_images(local_file) | +| 176 | | +| 177 | local_file = maybe_download(TEST_LABELS, train_dir) | +| 178 | test_labels = extract_labels(local_file, one_hot=one_hot) | +| 179 | | +| 180 | validation_images = train_images[:VALIDATION_SIZE] | +| 181 | validation_labels = train_labels[:VALIDATION_SIZE] | +| 182 | train_images = train_images[VALIDATION_SIZE:] | +| 183 | train_labels = train_labels[VALIDATION_SIZE:] | +| 184 | | +| 185 | data_sets.train = DataSet(train_images, train_labels) | +| 186 | data_sets.validation = DataSet(validation_images, validation_labels) | +| 187 | data_sets.test = DataSet(test_images, test_labels) | +| 188 | return data_sets | +| 189 | | +| 190 | training_iteration = 1000 | +| 191 | | +| 192 | modelarts_example_path = './modelarts-mnist-train-save-deploy-example' | +| 193 | | +| 194 | export_path = modelarts_example_path + '/model/' | +| 195 | data_path = './' | +| 196 | | +| 197 | print('Training model...') | +| 198 | mnist = read_data_sets(data_path, one_hot=True) | +| 199 | sess = tf.InteractiveSession() | +| 200 | serialized_tf_example = tf.placeholder(tf.string, name='tf_example') | +| 201 | feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32), } | +| 202 | tf_example = tf.parse_example(serialized_tf_example, feature_configs) | +| 203 | x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name | +| 204 | y_ = tf.placeholder('float', shape=[None, 10]) | +| 205 | w = tf.Variable(tf.zeros([784, 10])) | +| 206 | b = tf.Variable(tf.zeros([10])) | +| 207 | sess.run(tf.global_variables_initializer()) | +| 208 | y = tf.nn.softmax(tf.matmul(x, w) + b, name='y') | +| 209 | cross_entropy = -tf.reduce_sum(y_ * tf.log(y)) | +| 210 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) | +| 211 | values, indices = tf.nn.top_k(y, 10) | +| 212 | table = tf.contrib.lookup.index_to_string_table_from_tensor( | +| 213 | tf.constant([str(i) for i in range(10)])) | +| 214 | prediction_classes = table.lookup(tf.to_int64(indices)) | +| 215 | for _ in range(training_iteration): | +| 216 | batch = mnist.train.next_batch(50) | +| 217 | train_step.run(feed_dict={x: batch[0], y_: batch[1]}) | +| 218 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) | +| 219 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) | +| 220 | print('training accuracy %g' % sess.run( | +| 221 | accuracy, feed_dict={ | +| 222 | x: mnist.test.images, | +| 223 | y_: mnist.test.labels | +| 224 | })) | +| 225 | print('Done training!') | ++-----------------------------------+--------------------------------------------------------------------------------------------------------+ + +Saving a Model (tf API) +----------------------- + ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # Export the model. | +| 2 | # The model needs to be saved using the saved_model API. | +| 3 | print('Exporting trained model to', export_path) | +| 4 | builder = tf.saved_model.builder.SavedModelBuilder(export_path) | +| 5 | | +| 6 | tensor_info_x = tf.saved_model.utils.build_tensor_info(x) | +| 7 | tensor_info_y = tf.saved_model.utils.build_tensor_info(y) | +| 8 | | +| 9 | # Define the inputs and outputs of the prediction API. | +| 10 | # The key values of the inputs and outputs dictionaries are used as the index keys for the input and output tensors of the model. | +| 11 | # The input and output definitions of the model must match the custom inference script. | +| 12 | prediction_signature = ( | +| 13 | tf.saved_model.signature_def_utils.build_signature_def( | +| 14 | inputs={'images': tensor_info_x}, | +| 15 | outputs={'scores': tensor_info_y}, | +| 16 | method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) | +| 17 | | +| 18 | legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') | +| 19 | builder.add_meta_graph_and_variables( | +| 20 | # Set tag to serve/tf.saved_model.tag_constants.SERVING. | +| 21 | sess, [tf.saved_model.tag_constants.SERVING], | +| 22 | signature_def_map={ | +| 23 | 'predict_images': | +| 24 | prediction_signature, | +| 25 | }, | +| 26 | legacy_init_op=legacy_init_op) | +| 27 | | +| 28 | builder.save() | +| 29 | | +| 30 | print('Done exporting!') | ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ + +Inference Code (Keras and tf APIs) +---------------------------------- + ++-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | from PIL import Image | +| 2 | import numpy as np | +| 3 | from model_service.tfserving_model_service import TfServingBaseService | +| 4 | | +| 5 | | +| 6 | class mnist_service(TfServingBaseService): | +| 7 | | +| 8 | # Match the model input with the user's HTTPS API input during preprocessing. | +| 9 | # The model input corresponding to the preceding training part is {"images":}. | +| 10 | def _preprocess(self, data): | +| 11 | | +| 12 | preprocessed_data = {} | +| 13 | images = [] | +| 14 | # Iterate the input data. | +| 15 | for k, v in data.items(): | +| 16 | for file_name, file_content in v.items(): | +| 17 | image1 = Image.open(file_content) | +| 18 | image1 = np.array(image1, dtype=np.float32) | +| 19 | image1.resize((1,784)) | +| 20 | images.append(image1) | +| 21 | # Return the numpy array. | +| 22 | images = np.array(images,dtype=np.float32) | +| 23 | # Perform batch processing on multiple input samples and ensure that the shape is the same as that inputted during training. | +| 24 | images.resize((len(data), 784)) | +| 25 | preprocessed_data['images'] = images | +| 26 | return preprocessed_data | +| 27 | | +| 28 | # Processing logic of the inference for invoking the parent class. | +| 29 | | +| 30 | # The output corresponding to model saving in the preceding training part is {"scores":}. | +| 31 | # Postprocess the HTTPS output. | +| 32 | def _postprocess(self, data): | +| 33 | infer_output = {"mnist_result": []} | +| 34 | # Iterate the model output. | +| 35 | for output_name, results in data.items(): | +| 36 | for result in results: | +| 37 | infer_output["mnist_result"].append(result.index(max(result))) | +| 38 | return infer_output | ++-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/examples_of_custom_scripts/xgboost.rst b/umn/source/examples_of_custom_scripts/xgboost.rst new file mode 100644 index 0000000..aee3abd --- /dev/null +++ b/umn/source/examples_of_custom_scripts/xgboost.rst @@ -0,0 +1,81 @@ +XGBoost +======= + +Training and Saving a Model +--------------------------- + ++-----------------------------------+---------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | import pandas as pd | +| 2 | import xgboost as xgb | +| 3 | from sklearn.model_selection import train_test_split | +| 4 | | +| 5 | # Prepare training data and setting parameters | +| 6 | iris = pd.read_csv('/data/iris.csv') | +| 7 | X = iris.drop(['virginica'],axis=1) | +| 8 | y = iris[['virginica']] | +| 9 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234565) | +| 10 | params = { | +| 11 | 'booster': 'gbtree', | +| 12 | 'objective': 'multi:softmax', | +| 13 | 'num_class': 3, | +| 14 | 'gamma': 0.1, | +| 15 | 'max_depth': 6, | +| 16 | 'lambda': 2, | +| 17 | 'subsample': 0.7, | +| 18 | 'colsample_bytree': 0.7, | +| 19 | 'min_child_weight': 3, | +| 20 | 'silent': 1, | +| 21 | 'eta': 0.1, | +| 22 | 'seed': 1000, | +| 23 | 'nthread': 4, | +| 24 | } | +| 25 | plst = params.items() | +| 26 | dtrain = xgb.DMatrix(X_train, y_train) | +| 27 | num_rounds = 500 | +| 28 | model = xgb.train(plst, dtrain, num_rounds) | +| 29 | model.save_model('/tmp/xgboost.m') | ++-----------------------------------+---------------------------------------------------------------------------------------------------+ + +After the model is saved, it must be uploaded to the OBS directory before being published. The **config.json** and **customize_service.py** files must be contained during publishing. For details about the definition method, see `Model Package Specifications <../model_package_specifications/model_package_specifications.html>`__. + +Inference Code +-------------- + +.. code-block:: + + # coding:utf-8 + import collections + import json + import xgboost as xgb + from model_service.python_model_service import XgSklServingBaseService + class user_Service(XgSklServingBaseService): + + # request data preprocess + def _preprocess(self, data): + list_data = [] + json_data = json.loads(data, object_pairs_hook=collections.OrderedDict) + for element in json_data["data"]["req_data"]: + array = [] + for each in element: + array.append(element[each]) + list_data.append(array) + return list_data + + # predict + def _inference(self, data): + xg_model = xgb.Booster(model_file=self.model_path) + pre_data = xgb.DMatrix(data) + pre_result = xg_model.predict(pre_data) + pre_result = pre_result.tolist() + return pre_result + + # predict result process + def _postprocess(self,data): + resp_data = [] + for element in data: + resp_data.append({"predictresult": element}) + return resp_data + + diff --git a/umn/source/exeml/image_classification/creating_a_project.rst b/umn/source/exeml/image_classification/creating_a_project.rst new file mode 100644 index 0000000..89e91f5 --- /dev/null +++ b/umn/source/exeml/image_classification/creating_a_project.rst @@ -0,0 +1,56 @@ +Creating a Project +================== + +ModelArts ExeML supports image classification and object detection projects. You can create any of them based on your needs. Perform the following operations to create an ExeML project. + +Procedure +--------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **ExeML**. The **ExeML** page is displayed. + +#. Click **Create Project** in the box of your desired project. The page for creating an ExeML project is displayed. + +#. On the displayed page, set the parameters by referring to `Table 1 <#modelarts210004enustopic0284258833enustopic0169446153table14961618163816>`__. + +.. _modelarts210004enustopic0284258833enustopic0169446153table14961618163816: + + .. table:: **Table 1** Parameters + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==============================================================================================================================================================================================================================================================================================+ + | Name | Name of an ExeML project | + | | | + | | - Enter a maximum of 20 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed. This parameter is mandatory. | + | | - The name must start with a letter. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of a project | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Dataset Source | You can create a dataset or specify an existing dataset. | + | | | + | | - **Create**: Configure parameters such as **Dataset Name**, **Input Dataset Path**, **Output Dataset Path**, and **Label Set**. | + | | - **Specify**: Select a dataset of the same type from ModelArts Data Management to create an ExeML project. Only datasets of the same type are displayed in the **Dataset Name** drop-down list. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Dataset Name | If you select **Create** for **Dataset Source**, enter a dataset name based on required rules in the text box on the right. If you select **Specify** for **Dataset Source**, select one from available datasets of the same type under the current account displayed in the drop-down list. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Input Dataset Path | Select the OBS path to the input dataset. For details about dataset input specifications, see `Preparing Data <../../exeml/image_classification/preparing_data.html>`__. | + | | | + | | - Except the files and folders described in **Preparing Data > Requirements for Files Uploaded to OBS**, no other files or folders can be saved in the training data path. Otherwise, an error will be reported. | + | | - Do not modify the files in the training data path. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path for storing the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. It is a good practice to select an empty directory in **Output Dataset Path**. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set | - **Label Name**: Enter a label name. The label name can contain only Chinese characters, letters, digits, underscores (_), and hyphens (-), which contains 1 to 32 characters. | + | | | + | | - **Add Label**: Click **Add Label** to add one or more labels. | + | | | + | | - Set the label color: You need to set label colors for object detection datasets, but you do not need to set label colors for image classification datasets. Select a color from the color palette on the right of a label, or enter the hexadecimal color code to set the color. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. Click **Create Project**. The system displays a message indicating that the project has been created. Then, the **Label Data** tab page is displayed. Alternatively, view the created project on the **ExeML** page and click the project name to go to the **Label Data** page. + + diff --git a/umn/source/exeml/image_classification/deploying_a_model_as_a_service.rst b/umn/source/exeml/image_classification/deploying_a_model_as_a_service.rst new file mode 100644 index 0000000..4ddb8bb --- /dev/null +++ b/umn/source/exeml/image_classification/deploying_a_model_as_a_service.rst @@ -0,0 +1,57 @@ +Deploying a Model as a Service +============================== + +Deploying a Model +----------------- + +You can deploy a model as a real-time service that provides a real-time test UI and monitoring capabilities. After model training is complete, you can deploy a version with the ideal accuracy and in the **Successful** status as a service. The procedure is as follows: + +#. On the **Train Model** tab page, wait until the training status changes to **Successful**. Click **Deploy** in the **Version Manager** pane to deploy the model as a real-time service. + +#. In the **Deploy** dialog box, select resource flavor, set the **Auto Stop** function, and click **OK** to start the deployment. + + - **Specifications**: The GPU specifications are better, and the CPU specifications are more cost-effective. + - **Compute Nodes**: The default value is **1** and cannot be changed. + - **Auto Stop**: After this function is enabled and the auto stop time is set, a service automatically stops at the specified time. + + The options are **1 hour later**, **2 hours later**, **4 hours later**, **6 hours later**, and **Custom**. If you select **Custom**, you can enter any integer from 1 to 24 hours in the text box on the right. + +#. After the model deployment is started, view the deployment status on the **Service Deployment** page. + + It takes a certain period of time to deploy a model. When the status in the **Version Manager** pane changes from **Deploying** to **Running**, the deployment is complete. + + .. note:: + + On the **ExeML** page, trained models can only be deployed as real-time services. For details about how to deploy them as batch services, see `Where Are Models Generated by ExeML Stored? What Other Operations Are Supported? <../../exeml/tips/where_are_models_generated_by_exeml_stored_what_other_operations_are_supported.html>`__ + +Testing a Service +----------------- + +- On the **Service Deployment** page, select a service type. For example, on the ExeML page, the image classification model is deployed as a real-time service by default. On the **Real-Time Services** page, click **Prediction** in the **Operation** column of the target service to perform a service test. For details, see "Testing a Service". +- You can also use code to test a service. For details, see "Accessing a Real-Time Service". +- The following describes the procedure for performing a service test after the image classification model is deployed as a service on the ExeML page. + + #. After the model is deployed, test the service using an image. On the **ExeML** page, click the target project, go to the **Deploy Service** tab page, select the service version in the **Running** status, click **Upload** in the service test area, and upload a local image to perform the test. + + #. Click **Prediction** to conduct the test. After the prediction is complete, label **sunflowers** and its detection score are displayed in the prediction result area on the right. If the model accuracy does not meet your expectation, add images on the **Label Data** tab page, label the images, and train and deploy the model again. `Table 1 <#modelarts210007enustopic0284258836enustopic0169446156table27341946101510>`__ describes the parameters in the prediction result. If you are satisfied with the model prediction result, call the API to access the real-time service as prompted. For details, see Accessing a Real-Time Service. + + Currently, only JPG, JPEG, BMP, and PNG images are supported. + + + +.. _modelarts210007enustopic0284258836enustopic0169446156table27341946101510: + + .. table:: **Table 1** Parameters in the prediction result + + ============= ===================================== + Parameter Description + ============= ===================================== + predict_label Image prediction label + scores Prediction confidence of top 5 labels + ============= ===================================== + + .. note:: + + A running real-time service keeps consuming resources. If you do not need to use the real-time service, click **Stop** in the **Version Manager** pane to stop the service. If you want to use the service again, click **Start**. + + diff --git a/umn/source/exeml/image_classification/index.rst b/umn/source/exeml/image_classification/index.rst new file mode 100644 index 0000000..439ef08 --- /dev/null +++ b/umn/source/exeml/image_classification/index.rst @@ -0,0 +1,12 @@ +==================== +Image Classification +==================== + +.. toctree:: + :maxdepth: 1 + + preparing_data + creating_a_project + labeling_data + training_a_model + deploying_a_model_as_a_service diff --git a/umn/source/exeml/image_classification/labeling_data.rst b/umn/source/exeml/image_classification/labeling_data.rst new file mode 100644 index 0000000..4a796c2 --- /dev/null +++ b/umn/source/exeml/image_classification/labeling_data.rst @@ -0,0 +1,53 @@ +Labeling Data +============= + +Model training requires a large number of labeled images. Therefore, before model training, add labels to the images that are not labeled. ModelArts allows you to add labels in batches by one click. You can also modify or delete labels that have been added to images. Prepare at least two classes of images for training. Each class contains at least five images. To achieve better effect, prepare at least 50 images for each class. If the image classes are similar, more images are required. + +Labeling Images +--------------- + +#. On the **Label Data** tab page, click the **Unlabeled** tab. All unlabeled images are displayed. Select the images to be labeled in sequence, or tick **Select Current Page** to select all images on the page, and then add labels to the images in the right pane. +#. After selecting an image, input a label in the **Label** text box, or select an existing label from the drop-down list. Click **OK**. The selected image is labeled. For example, you can select multiple images containing tulips and add label **tulips** to them. Then select other unlabeled images and label them as **sunflowers** and **roses**. After the labeling is complete, the images are saved on the **Labeled** tab page. + + a. You can add multiple labels to an image. + b. A label name can contain a maximum of 32 characters, including Chinese characters, letters, digits, hyphens (-), and underscores (_). + +#. After all the images are labeled, view them on the **Labeled** tab page or view **All Labels** in the right pane to check the name and quantity of the labels. + +Synchronizing or Adding Images +------------------------------ + +On the **ExeML** page, click the project name. The **Label Data** tab page is displayed. When creating a project, you can add images from a local PC or synchronize image data from OBS. + +- **Add Image**: You can quickly add images on a local PC to ModelArts and synchronize the images to the OBS path specified during project creation. Click **Add Image**. In the dialog box that is displayed, click **Add Image** and add images. The total size of all images uploaded in one attempt cannot exceed 8 MB. The size of a single image cannot exceed 5 MB. +- **Synchronize Data Source**: You can upload images to the OBS directory specified during project creation and click **Synchronize Data Source** to quickly add the images in the OBS directory to ModelArts. +- **Delete Image**: You can delete images one by one, or tick **Select Current Page** to delete all images on the page. + + .. note:: + + The deleted images cannot be recovered. Exercise caution when performing this operation. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify the labeled data on the **Labeled** tab page. + +- **Modifying based on images** + + On the data labeling page, click the **Labeled** tab, and select one or more images to be modified from the image list. Modify the image information in the label information area on the right. + + - Adding a label: In the **Label** text box, select an existing label, or enter a new label name and click **OK** to add the label to the selected image. + - Modifying a label: In the **File Labels** area, click the editing icon in the **Operation** column, enter the correct label name in the text box, and click the check mark icon to complete the modification. + - Deleting a label: In the **Labels of Selected Image** area, click |image1| in the **Operation** column to delete the label. + +- **Modifying based on labels** + + On the dataset labeling page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Modifying a label: Click the editing icon in the **Operation** column. In the dialog box that is displayed, enter the new label name and click **OK**. After the modification, the images that have been added with the label use the new label name. + - Deleting a label: Click the deletion icon in the **Operation** column. In the displayed dialog box, select **Delete label**, **Delete label and images with only the label (Do not delete source files)**, or **Delete label and images with only the label (Delete source files)**, and click **OK**. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110760936.png + diff --git a/umn/source/exeml/image_classification/preparing_data.rst b/umn/source/exeml/image_classification/preparing_data.rst new file mode 100644 index 0000000..4557717 --- /dev/null +++ b/umn/source/exeml/image_classification/preparing_data.rst @@ -0,0 +1,61 @@ +Preparing Data +============== + +Before using ModelArts ExeML to build a model, upload data to an OBS bucket. + +Uploading Data to OBS +--------------------- + +This operation uses the OBS console to upload data. For more information about how to create a bucket and upload files, see Creating a Bucket and Uploading an Object. + +Perform the following operations to import data to the dataset for model training and building. + +#. Log in to OBS Console and create a bucket. +#. Upload the local data to the OBS bucket. If you have a large amount of data, use OBS Browser+ to upload data or folders. The uploaded data must meet the dataset requirements of the ExeML project. + +Requirements on Datasets +------------------------ + +- The name of files in a dataset cannot contain Chinese characters, plus signs (+), spaces, or tabs. +- Ensure that no damaged image exists. The supported image formats include JPG, JPEG, BMP, and PNG. +- Do not store data of different projects in the same dataset. +- Prepare sufficient data and balance each class of data. To achieve better results, prepare at least 100 images of each class in a training set for image classification. +- To ensure the prediction accuracy of models, the training samples must be similar to the actual application scenarios. +- To ensure the generalization capability of models, datasets should cover all possible scenarios. + +Requirements for Files Uploaded to OBS +-------------------------------------- + +- If you do not need to upload training data in advance, create an empty folder to store files generated in the future, for example, **/bucketName/data-cat**. +- If you need to upload images to be labeled in advance, create an empty folder and save the images in the folder. An example of the image directory structure is **/bucketName/data-cat/cat.jpg**. +- If you want to upload labeled images to the OBS bucket, upload them according to the following specifications: + + - The dataset for image classification requires storing labeled objects and their label files (in one-to-one relationship with the labeled objects) in the same directory. For example, if the name of the labeled object is **10.jpg**, the name of the label file must be **10.txt**. + + Example of data files: + + .. code-block:: + + ├─ + │ 10.jpg + │ 10.txt + │ 11.jpg + │ 11.txt + │ 12.jpg + │ 12.txt + + - Images in JPG, JPEG, PNG, and BMP formats are supported. When uploading images on the ModelArts management console, ensure that the size of an image does not exceed 5 MB and the total size of images to be uploaded in one attempt does not exceed 8 MB. If the data volume is large, use OBS Browser+ to upload images. + + - A label name can contain a maximum of 32 characters, including Chinese characters, letters, digits, hyphens (-), and underscores (_). + + - Image classification label file (**.txt**) rule: + + Each row contains only one label. + + .. code-block:: + + cat + dog + ... + + diff --git a/umn/source/exeml/image_classification/training_a_model.rst b/umn/source/exeml/image_classification/training_a_model.rst new file mode 100644 index 0000000..aa70f50 --- /dev/null +++ b/umn/source/exeml/image_classification/training_a_model.rst @@ -0,0 +1,67 @@ +Training a Model +================ + +After labeling the images, you can train a model. You can perform model training to obtain the required image classification model. Training images must be classified into at least two classes, and each class must contain at least five images. Before training, ensure that the labeled images meet the requirements. Otherwise, the **Train** button is unavailable. + +Procedure +--------- + +#. On the **ExeML** page, click the name of the project that is successfully created. The **Label Data** tab page is displayed. + +#. On the **Label Data** tab page, click **Train** in the upper right corner. In the displayed **Training Configuration** dialog box, set related parameters. `Table 1 <#modelarts210006enustopic0284258835enustopic0169446155table56110116164>`__ describes the parameters. + +.. _modelarts210006enustopic0284258835enustopic0169446155table56110116164: + + .. table:: **Table 1** Parameter description + + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Parameter | Description | Default Value | + +=================================+=======================================================================================================================================================================================================================================================================================================+=================================+ + | Dataset Version | This version is the one when the dataset is published in **Data Management**. In an ExeML project, when a training job is started, the dataset is published as a version based on the previous data labeling. | Randomly provided by the system | + | | | | + | | The system automatically provides a version number. You can change it to the version number that you want. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Training and Validation Ratios | The labeled sample is randomly divided into a training set and a validation set. By default, the ratio for the training set is 0.8, and that for the validation set is 0.2. The **usage** field in the manifest file records the set type. The value ranges from 0 to 1. | 0.8 | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Incremental Training Version | Select the version with the highest precision to perform training again. This accelerates model convergence and improves training precision. | None | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Max. Training Duration (Minute) | If training is not completed within the maximum training duration, the model is saved and training stops. To prevent the model from exiting before convergence, set this parameter to a large value. The value ranges from 6 to 6000. It is a good practice to properly extend the training duration. | 60 | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Training Preference | - **performance_first**: performance first. The training duration is short and the generated model is small. | balance | + | | - **balance**: balanced performance and precision | | + | | - **accuracy_first**: precision first. The training duration is long and the generated model is large. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Instance Flavor | Select the resource specifications used for training. By default, the following specifications are supported: | **ExeML (GPU)** | + | | | | + | | - **Compute-intensive instance (GPU)** | | + | | | | + | | The compute flavors are for reference only. Obtain the flavors on the management console. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + +#. After configuring training parameters, click **Next** to go to the configuration page, confirm the specifications, and click **Submit** to start auto model training. The training takes a certain period of time. Wait until the training is complete. If you close or exit this page, the system still performs the training operation. + +#. On the **Train Model** tab page, wait until the training status changes from **Running** to **Completed**. + +#. View the training details, such as **Accuracy**, **Evaluation Result**, **Training Parameters**, and **Classification Statistics**. For details about the evaluation result parameters, see `Table 2 <#modelarts210006enustopic0284258835enustopic0169446155table19888201216>`__. + +.. _modelarts210006enustopic0284258835enustopic0169446155table19888201216: + + .. table:: **Table 2** Evaluation result parameters + + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===========+=================================================================================================================================================+ + | Recall | Fraction of correctly predicted samples over all samples predicted as a class. It shows the ability of a model to distinguish positive samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Precision | Fraction of correctly predicted samples over all samples predicted as a class. It shows the ability of a model to distinguish negative samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Accuracy | Fraction of correctly predicted samples over all samples. It shows the general ability of a model to recognize samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | F1 Score | Harmonic average of the precision and recall of a model. It is used to evaluate the quality of a model. A high F1 score indicates a good model. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + +.. note:: + + An ExeML project supports multiple rounds of training, and each round generates a version. For example, the first training version is **V001 (**\ *xxx*\ **)**, and the next version is **V002 (**\ *xxx*\ **)**. The trained models can be managed by training version. After the trained model meets your requirements, deploy the model as a service. + + diff --git a/umn/source/exeml/index.rst b/umn/source/exeml/index.rst new file mode 100644 index 0000000..8510bec --- /dev/null +++ b/umn/source/exeml/index.rst @@ -0,0 +1,11 @@ +===== +ExeML +===== + +.. toctree:: + :maxdepth: 1 + + introduction_to_exeml + image_classification/index + object_detection/index + tips/index diff --git a/umn/source/exeml/introduction_to_exeml.rst b/umn/source/exeml/introduction_to_exeml.rst new file mode 100644 index 0000000..540288b --- /dev/null +++ b/umn/source/exeml/introduction_to_exeml.rst @@ -0,0 +1,36 @@ +Introduction to ExeML +===================== + +ExeML +----- + +ModelArts ExeML is a customized code-free model development tool that helps users start AI application development from scratch with high flexibility. ExeML automates model design, parameter tuning and training, and model compression and deployment with the labeled data. Developers do not need to develop basic and encoding capabilities, but only to upload data and complete model training and deployment as prompted by ExeML. + +Currently, you can use ExeML to quickly create image classification, and object detection models. It can be widely used in industrial, retail, and security fields. + +- Image classification classifies and identifies objects in images. +- Object detection identifies the position and class of each object in images. + +ExeML Usage Process +------------------- + +With ModelArts ExeML, you can develop AI models without coding. You only need to upload data, create a project, label the data, publish training, and deploy the trained model. Up to 100 ExeML projects can be created. For details, see `Figure 1 <#modelarts210001enustopic0284258830enustopic0169445434fig3917183328>`__. + +.. figure:: /_static/images/en-us_image_0000001110921482.png + :alt: **Figure 1** Usage process of ExeML + + + **Figure 1** Usage process of ExeML + +ExeML Projects +-------------- + +- **Image Classification** + + An image classification project aims to classify images. You only need to add images and label the images. After the images are labeled, an image classification model can be quickly generated. It can automatically classify offerings, vehicle types, and defective goods. For example, in the quality check scenario, you can upload a product image, label the image as qualified or unqualified, and train and deploy a model to inspect product quality. + +- **Object Detection** + + An object detection project aims to identify the class and location of objects in images. You only need to add images and label objects in the images with proper bounding boxes. The labled images will be used as the training set for creating a model. The model can identify multiple objects and count the number of objects in a single image, as well as inspect employees' dress code and perform unattended inspection of article placement. + + diff --git a/umn/source/exeml/object_detection/creating_a_project.rst b/umn/source/exeml/object_detection/creating_a_project.rst new file mode 100644 index 0000000..f465186 --- /dev/null +++ b/umn/source/exeml/object_detection/creating_a_project.rst @@ -0,0 +1,56 @@ +Creating a Project +================== + +ModelArts ExeML supports image classification and object detection projects. You can create any of them based on your needs. Perform the following operations to create an ExeML project. + +Procedure +--------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **ExeML**. The **ExeML** page is displayed. + +#. Click **Create Project** in the box of your desired project. The page for creating an ExeML project is displayed. + +#. On the displayed page, set the parameters by referring to `Table 1 <#modelarts210010enustopic0284258839enustopic0169446159enustopic0169446153table14961618163816>`__. + +.. _modelarts210010enustopic0284258839enustopic0169446159enustopic0169446153table14961618163816: + + .. table:: **Table 1** Parameters + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==============================================================================================================================================================================================================================================================================================+ + | Name | Name of an ExeML project | + | | | + | | - Enter a maximum of 20 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed. This parameter is mandatory. | + | | - The name must start with a letter. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of a project | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Dataset Source | You can create a dataset or specify an existing dataset. | + | | | + | | - **Create**: Configure parameters such as **Dataset Name**, **Input Dataset Path**, **Output Dataset Path**, and **Label Set**. | + | | - **Specify**: Select a dataset of the same type from ModelArts Data Management to create an ExeML project. Only datasets of the same type are displayed in the **Dataset Name** drop-down list. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Dataset Name | If you select **Create** for **Dataset Source**, enter a dataset name based on required rules in the text box on the right. If you select **Specify** for **Dataset Source**, select one from available datasets of the same type under the current account displayed in the drop-down list. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Input Dataset Path | Select the OBS path to the input dataset. For details about dataset input specifications, see `Preparing Data <../../exeml/image_classification/preparing_data.html>`__. | + | | | + | | - Except the files and folders described in **Preparing Data > Requirements for Files Uploaded to OBS**, no other files or folders can be saved in the training data path. Otherwise, an error will be reported. | + | | - Do not modify the files in the training data path. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Dataset Path | Select the OBS path for storing the output dataset. | + | | | + | | .. note:: | + | | | + | | The output dataset path cannot be the same as the input dataset path or cannot be the subdirectory of the input dataset path. It is a good practice to select an empty directory in **Output Dataset Path**. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label Set | - **Label Name**: Enter a label name. The label name can contain only Chinese characters, letters, digits, underscores (_), and hyphens (-), which contains 1 to 32 characters. | + | | | + | | - **Add Label**: Click **Add Label** to add one or more labels. | + | | | + | | - Set the label color: You need to set label colors for object detection datasets, but you do not need to set label colors for image classification datasets. Select a color from the color palette on the right of a label, or enter the hexadecimal color code to set the color. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. Click **Create Project**. The system displays a message indicating that the project has been created. Then, the **Label Data** tab page is displayed. Alternatively, view the created project on the **ExeML** page and click the project name to go to the **Label Data** page. + + diff --git a/umn/source/exeml/object_detection/deploying_a_model_as_a_service.rst b/umn/source/exeml/object_detection/deploying_a_model_as_a_service.rst new file mode 100644 index 0000000..d240550 --- /dev/null +++ b/umn/source/exeml/object_detection/deploying_a_model_as_a_service.rst @@ -0,0 +1,68 @@ +Deploying a Model as a Service +============================== + +Deploying a Model +----------------- + +You can deploy a model as a real-time service that provides a real-time test UI and monitoring capabilities. After model training is complete, you can deploy a version with the ideal accuracy and in the **Successful** status as a service. The procedure is as follows: + +#. On the **Train Model** tab page, wait until the training status changes to **Successful**. Click **Deploy** in the **Version Manager** pane to deploy the model as a real-time service. + +#. In the **Deploy** dialog box, select resource flavor, set the **Auto Stop** function, and click **OK** to start the deployment. + + - **Specifications**: The GPU specifications are better, and the CPU specifications are more cost-effective. + - **Compute Nodes**: The default value is **1** and cannot be changed. + - **Auto Stop**: After this function is enabled and the auto stop time is set, a service automatically stops at the specified time. + + The options are **1 hour later**, **2 hours later**, **4 hours later**, **6 hours later**, and **Custom**. If you select **Custom**, you can enter any integer from 1 to 24 hours in the text box on the right. + +#. After the model deployment is started, view the deployment status on the **Service Deployment** page. + + It takes a certain period of time to deploy a model. When the status in the **Version Manager** pane changes from **Deploying** to **Running**, the deployment is complete. + + .. note:: + + On the **ExeML** page, trained models can only be deployed as real-time services. For details about how to deploy them as batch services, see `Where Are Models Generated by ExeML Stored? What Other Operations Are Supported? <../../exeml/tips/where_are_models_generated_by_exeml_stored_what_other_operations_are_supported.html>`__ + +Testing a Service +----------------- + +- On the **Service Deployment** page, select a service type. For example, on the ExeML page, the object detection model is deployed as a real-time service by default. On the **Real-Time Services** page, click **Prediction** in the **Operation** column of the target service to perform a service test. For details, see "Testing a Service". +- You can also use code to test a service. For details, see "Accessing a Real-Time Service". +- The following describes the procedure for performing a service test after the object detection model is deployed as a service on the ExeML page. + + #. After the model is deployed, test the service using an image. On the **ExeML** page, click the target project, go to the **Deploy Service** tab page, select the service version in the **Running** status, click **Upload** in the service test area, and upload a local image to perform the test. + + #. Click **Predict** to perform the test. After the prediction is complete, the result is displayed in the **Test Result** pane on the right. If the model accuracy does not meet your expectation, add images on the **Label Data** tab page, label the images, and train and deploy the model again. `Table 1 <#modelarts210013enustopic0284258842enustopic0169446262table27971626122015>`__ describes the parameters in the prediction result. If you are satisfied with the model prediction result, call the API to access the real-time service as prompted. For details, see "Accessing a Real-Time Service". + + Currently, only JPG, JPEG, BMP, and PNG images are supported. + + + +.. _modelarts210013enustopic0284258842enustopic0169446262table27971626122015: + + .. table:: **Table 1** Parameters in the prediction result + + +-------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================+======================================================================================================================================================================================+ + | detection_classes | Label of each detection box | + +-------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | detection_boxes | Coordinates of four points (y_min, x_min, y_max, and x_max) of each detection box, as shown in `Figure 1 <#modelarts210013enustopic0284258842enustopic0169446262fig8987811133012>`__ | + +-------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | detection_scores | Confidence of each detection box | + +-------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + .. figure:: /_static/images/en-us_image_0000001157080853.png + :alt: **Figure 1** Illustration for coordinates of four points of a detection box + + + **Figure 1** Illustration for coordinates of four points of a detection box + + .. note:: + + A running real-time service keeps consuming resources. If you do not need to use the real-time service, click **Stop** in the **Version Manager** pane to stop the service. If you want to use the service again, click **Start**. + + If you enable the auto stop function, the service automatically stops after the specified time and no fee is generated. + + diff --git a/umn/source/exeml/object_detection/index.rst b/umn/source/exeml/object_detection/index.rst new file mode 100644 index 0000000..e76cb47 --- /dev/null +++ b/umn/source/exeml/object_detection/index.rst @@ -0,0 +1,12 @@ +================ +Object Detection +================ + +.. toctree:: + :maxdepth: 1 + + preparing_data + creating_a_project + labeling_data + training_a_model + deploying_a_model_as_a_service diff --git a/umn/source/exeml/object_detection/labeling_data.rst b/umn/source/exeml/object_detection/labeling_data.rst new file mode 100644 index 0000000..6175cb2 --- /dev/null +++ b/umn/source/exeml/object_detection/labeling_data.rst @@ -0,0 +1,61 @@ +Labeling Data +============= + +Before data labeling, consider how to design labels. The labels must correspond to the distinct characteristics of the detected images and are easy to identify (the detected object in an image is highly distinguished from the background). Each label specifies the expected recognition result of the detected images. After the label design is complete, prepare images based on the designed labels. It is recommended that the number of all images to be detected be greater than 100. If the labels of some images are similar, prepare more images. + +- During labeling, the variance of a class should be as small as possible. That is, the labeled objects of the same class should be as similar as possible. The labeled objects of different classes should be as different as possible. +- The contrast between the labeled objects and the image background should be as stark as possible. +- In object detection labeling, a target object must be entirely contained within a labeling box. If there are multiple objects in an image, do not relabel or miss any objects. + +Labeling Images +--------------- + +#. On the **Label Data** tab page, click the **Unlabeled** tab. All unlabeled images are displayed. Click an image to go to the labeling page. + +#. Left-click and drag the mouse to select the area where the target object is located. In the dialog box that is displayed, select the label color, enter the label name, and press **Enter**. After the labeling is complete, the status of the images changes to **Labeled**. + + More descriptions of data labeling are as follows: + + - You can click the arrow keys in the upper and lower parts of the image, or press the left and right arrow keys on the keyboard to select another image. Then, repeat the preceding operations to label the image. If an image contains more than one object, you can label all the objects. + - You can add multiple labels with different colors for an object detection ExeML project for easy identification. After selecting an object, select a new color and enter a new label name in the dialog box that is displayed to add a new label. + - In an ExeML project, object detection supports only rectangular labeling boxes. In the **Data Management** function, more types of labeling boxes are supported for object detection datasets. + - In the **Label Data** window, you can scroll the mouse to zoom in or zoom out on the image to quickly locate the object. + +#. After all images in the image directory are labeled, click **ExeML** in the upper left corner. In the dialog box that is displayed, click **OK** to save the labeling information. The **Label Data** page is displayed. On the **Labeled** tab page, you can view the labeled images or view the label names and quantity in the right pane. + +Synchronizing or Adding Images +------------------------------ + +On the **ExeML** page, click the project name. The **Label Data** tab page is displayed. When creating a project, you can add images from a local PC or synchronize image data from OBS. + +- **Add**: You can quickly add images on a local PC to ModelArts and synchronize the images to the OBS path specified during project creation. Click **Add**. In the dialog box that is displayed, click **Add Image** and add images. The total size of all images uploaded in one attempt cannot exceed 8 MB. The size of a single image cannot exceed 5 MB. +- **Synchronize Data Source**: You can upload images to the OBS directory specified during project creation and click **Synchronize Data Source** to quickly add the images in the OBS directory to ModelArts. +- **Delete**: You can delete images one by one, or select **Select Images on Current Page** to delete all images on the page. + + .. note:: + + The deleted images cannot be recovered. Exercise caution when performing this operation. + +Modifying Labeled Data +---------------------- + +After labeling data, you can modify labeled data on the **Labeled** tab page. + +- **Modifying based on images** + + On the dataset details page, click the **Labeled** tab, click the image to be modified. The labeling page is displayed. Modify the image information in the label information area on the right. + + - Modifying a label: In the **Labeling** area, click the edit icon, enter the correct label name in the text box, and click the check mark to complete the modification. The label color cannot be modified. + + - Deleting a label: In the **Labeling** area, click the deletion icon to delete a label from the image. + + After the label is deleted, click the project name in the upper left corner of the page to exit the labeling page. The image will be returned to the **Unlabeled** tab page. + +- **Modifying based on labels** + + On the dataset details page, click the **Labeled** tab. The information about all labels is displayed on the right. + + - Modifying a label: Click the edit icon in the **Operation** column. In the dialog box that is displayed, enter the new label name and click **OK**. After the modification, the images that have been added with the label use the new label name. + - Deleting a label: Click the deletion icon in the **Operation** column. In the displayed dialog box, select the object to be deleted as prompted and click **OK**. + + diff --git a/umn/source/exeml/object_detection/preparing_data.rst b/umn/source/exeml/object_detection/preparing_data.rst new file mode 100644 index 0000000..7fcc11a --- /dev/null +++ b/umn/source/exeml/object_detection/preparing_data.rst @@ -0,0 +1,131 @@ +Preparing Data +============== + +Before using ModelArts ExeML to build a model, upload data to an OBS bucket. + +Uploading Data to OBS +--------------------- + +This operation uses the OBS console to upload data. For more information about how to create a bucket and upload files, see Creating a Bucket and Uploading an Object. + +Perform the following operations to import data to the dataset for model training and building. + +#. Log in to OBS Console and create a bucket. +#. Upload the local data to the OBS bucket. If you have a large amount of data, use OBS Browser+ to upload data or folders. The uploaded data must meet the dataset requirements of the ExeML project. + +Requirements on Datasets +------------------------ + +- The name of files in a dataset cannot contain Chinese characters, plus signs (+), spaces, or tabs. +- Ensure that no damaged image exists. The supported image formats include JPG, JPEG, BMP, and PNG. +- Do not store data of different projects in the same dataset. +- To ensure the prediction accuracy of models, the training samples must be similar to the actual application scenarios. +- To ensure the generalization capability of models, datasets should cover all possible scenarios. +- In an object detection dataset, if the coordinates of the bounding box exceed the boundaries of an image, the image cannot be identified as a labeled image. + +Requirements for Files Uploaded to OBS +-------------------------------------- + +- If you do not need to upload training data in advance, create an empty folder to store files generated in the future, for example, **/bucketName/data-cat**. +- If you need to upload images to be labeled in advance, create an empty folder and save the images in the folder. An example of the image directory structure is **/bucketName/data-cat/cat.jpg**. +- If you want to upload labeled images to the OBS bucket, upload them according to the following specifications: + + - The dataset for object detection requires storing labeled objects and their label files (in one-to-one relationship with the labeled objects) in the same directory. For example, if the name of the labeled object is **IMG_20180919_114745.jpg**, the name of the label file must be **IMG_20180919_114745.xml**. + + The label files for object detection must be in PASCAL VOC format. For details about the format, see `Table 1 <#modelarts210009enustopic0284258838enustopic0169446158table18220153119617>`__. + + Example of data files: + + .. code-block:: + + ├─ + │ IMG_20180919_114732.jpg + │ IMG_20180919_114732.xml + │ IMG_20180919_114745.jpg + │ IMG_20180919_114745.xml + │ IMG_20180919_114945.jpg + │ IMG_20180919_114945.xml + + - Images in JPG, JPEG, PNG, and BMP formats are supported. When uploading images on the ModelArts console, ensure that the size of an image does not exceed 5 MB and the total size of images to be uploaded in one attempt does not exceed 8 MB. If the data volume is large, use OBS Browser+ to upload images. + + - A label name can contain a maximum of 32 characters, including letters, digits, hyphens (-), and underscores (_). + +.. _modelarts210009enustopic0284258838enustopic0169446158table18220153119617: + + .. table:: **Table 1** PASCAL VOC format description + + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Mandatory | Description | + +=======================+=======================+==================================================================================================================================================================================================+ + | folder | Yes | Directory where the data source is located | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | filename | Yes | Name of the file to be labeled | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | size | Yes | Image pixel | + | | | | + | | | - **width**: image width. This parameter is mandatory. | + | | | - **height**: image height. This parameter is mandatory. | + | | | - **depth**: number of image channels. This parameter is mandatory. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | segmented | Yes | Segmented or not | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | object | Yes | Object detection information. Multiple **object{}** functions are generated for multiple objects. | + | | | | + | | | - **name**: class of the labeled object. This parameter is mandatory. | + | | | - **pose**: shooting angle of the labeled object. This parameter is mandatory. | + | | | - **truncated**: whether the labeled object is truncated (**0** indicates that the object is not truncated). This parameter is mandatory. | + | | | - **occluded**: whether the labeled object is occluded (**0** indicates that the object is not occluded). This parameter is mandatory. | + | | | - **difficult**: whether the labeled object is difficult to identify (**0** indicates that the object is easy to identify). This parameter is mandatory. | + | | | - **confidence**: confidence score of the labeled object. The value range is 0 to 1. This parameter is optional. | + | | | - **bndbox**: bounding box type. This parameter is mandatory. For details about the possible values, see `Table 2 <#modelarts210009enustopic0284258838enustopic0169446158table102211311866>`__. | + +-----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts210009enustopic0284258838enustopic0169446158table102211311866: + + .. table:: **Table 2** Description of bounding box types + + +-----------------------+-----------------------+------------------------------------------------------+ + | type | Shape | Labeling Information | + +=======================+=======================+======================================================+ + | bndbox | Rectangle | Coordinates of the upper left and lower right points | + | | | | + | | | 100 | + | | | | + | | | 100 | + | | | | + | | | 200 | + | | | | + | | | 200 | + +-----------------------+-----------------------+------------------------------------------------------+ + + Example of the label file in KITTI format: + + .. code-block:: + + + test_data + 260730932.jpg + + 767 + 959 + 3 + + 0 + + bag + Unspecified + 0 + 0 + 0 + + 108 + 101 + 251 + 238 + + + + + diff --git a/umn/source/exeml/object_detection/training_a_model.rst b/umn/source/exeml/object_detection/training_a_model.rst new file mode 100644 index 0000000..8de2c7d --- /dev/null +++ b/umn/source/exeml/object_detection/training_a_model.rst @@ -0,0 +1,67 @@ +Training a Model +================ + +After labeling the images, perform auto training to obtain an appropriate model version. + +Procedure +--------- + +#. On the **ExeML** page, click the name of the project that is successfully created. The **Label Data** tab page is displayed. + +#. On the **Label Data** tab page, click **Train** in the upper right corner. In the displayed **Training Configuration** dialog box, set related parameters. `Table 1 <#modelarts210012enustopic0284258841enustopic0169446261table56110116164>`__ describes the parameters. + +.. _modelarts210012enustopic0284258841enustopic0169446261table56110116164: + + .. table:: **Table 1** Parameter description + + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Parameter | Description | Default Value | + +=================================+=====================================================================================================================================================================================================================================================================================================================================================================================+=================================+ + | Dataset Version | This version is the one when the dataset is published in **Data Management**. In an ExeML project, when a training job is started, the dataset is published as a version based on the previous data labeling. | Randomly provided by the system | + | | | | + | | The system automatically provides a version number. You can change it to the version number that you want. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Training and Validation Ratios | The labeled sample is randomly divided into a training set and a validation set. By default, the ratio for the training set is 0.8, and that for the validation set is 0.2. The **usage** field in the manifest file records the set type. The value ranges from 0 to 1. | 0.8 | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Incremental Training Version | Select the version with the highest precision to perform training again. This accelerates model convergence and improves training precision. | None | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Max. Training Duration (Minute) | If training is not completed within the maximum training duration, the model is saved and training stops. To prevent the model from exiting before convergence, set this parameter to a large value. The value ranges from 6 to 6000. You are advised to properly extend the training duration. Set the training duration to more than 1 hour for a training set with 2,000 images. | 60 | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Training Preference | - **performance_first**: performance first. The training duration is short and the generated model is small. | balance | + | | - **balance**: balanced performance and precision | | + | | - **accuracy_first**: precision first. The training duration is long and the generated model is large. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + | Instance Flavor | Select the resource specifications used for training. By default, the following specifications are supported: | **ExeML (GPU)** | + | | | | + | | - **Compute-intensive instance (GPU)** | | + | | | | + | | The compute flavors are for reference only. Obtain the flavors on the management console. | | + +---------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------+ + +#. After configuring training parameters, click **Next** to go to the configuration page, confirm the specifications, and click **Submit** to start auto model training. The training takes a certain period of time. Wait until the training is complete. If you close or exit this page, the system still performs the training operation. + +#. On the **Train Model** tab page, wait until the training status changes from **Running** to **Completed**. + +#. View the training details, such as **Accuracy**, **Evaluation Result**, **Training Parameters**, and **Classification Statistics**. For details about the evaluation result parameters, see `Table 2 <#modelarts210012enustopic0284258841enustopic0169446261table15870125755817>`__. + +.. _modelarts210012enustopic0284258841enustopic0169446261table15870125755817: + + .. table:: **Table 2** Evaluation result parameters + + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===========+=================================================================================================================================================+ + | Recall | Fraction of correctly predicted samples over all samples predicted as a class. It shows the ability of a model to distinguish positive samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Precision | Fraction of correctly predicted samples over all samples predicted as a class. It shows the ability of a model to distinguish negative samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | Accuracy | Fraction of correctly predicted samples over all samples. It shows the general ability of a model to recognize samples. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + | F1 Score | Harmonic average of the precision and recall of a model. It is used to evaluate the quality of a model. A high F1 score indicates a good model. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------+ + +.. note:: + + An ExeML project supports multiple rounds of training, and each round generates a version. For example, the first training version is **V001 (**\ *xxx*\ **)**, and the next version is **V002 (**\ *xxx*\ **)**. The trained models can be managed by training version. After the trained model meets your requirements, deploy the model as a service. + + diff --git a/umn/source/exeml/tips/how_do_i_perform_incremental_training_in_an_exeml_project.rst b/umn/source/exeml/tips/how_do_i_perform_incremental_training_in_an_exeml_project.rst new file mode 100644 index 0000000..c6bfd1e --- /dev/null +++ b/umn/source/exeml/tips/how_do_i_perform_incremental_training_in_an_exeml_project.rst @@ -0,0 +1,25 @@ +How Do I Perform Incremental Training in an ExeML Project? +========================================================== + +Each round of training generates a training version in an ExeML project. If a training result is unsatisfactory (for example, if the precision is not good enough), you can add high-quality data or add or delete labels, and perform training again. + +.. note:: + + - For better training results, use high-quality data for incremental training to improve data labeling performance. + +Incremental Training Procedure +------------------------------ + +#. Log in to the ModelArts console, and click **ExeML** in the left navigation pane. + +#. On the **ExeML** page, click a project name. The ExeML details page of the project is displayed. + +#. On the **Label Data** page, click the **Unlabeled** tab. On the **Unlabeled** tab page, you can add images or add or delete labels. + + If you add images, label the added images again. If you add or delete labels, check all images and label them again. You also need to check whether new labels need to be added for the labeled data. + +#. After all images are labeled, click **Train** in the upper right corner. In the **Training Configuration** dialog box that is displayed, set **Incremental Training Version** to the training version that has been completed to perform incremental training based on this version. Set other parameters as prompted. + + After the settings are complete, click **Yes** to start incremental training. The system automatically switches to the **Train Model** page. After the training is complete, you can view the training details, such as training precision, evaluation result, and training parameters. + + diff --git a/umn/source/exeml/tips/how_do_i_quickly_create_an_obs_bucket_and_a_folder_when_creating_a_project.rst b/umn/source/exeml/tips/how_do_i_quickly_create_an_obs_bucket_and_a_folder_when_creating_a_project.rst new file mode 100644 index 0000000..1b88df5 --- /dev/null +++ b/umn/source/exeml/tips/how_do_i_quickly_create_an_obs_bucket_and_a_folder_when_creating_a_project.rst @@ -0,0 +1,26 @@ +How Do I Quickly Create an OBS Bucket and a Folder When Creating a Project? +=========================================================================== + +When creating a project, select a training data path. This section describes how to quickly create an OBS bucket and folder when you select the training data path. + +#. On the page for creating an ExeML project, click |image1| on the right of **Input Dataset Path**. The **Input Dataset Path** dialog box is displayed. + +#. Click **Create Bucket**. The **Create Bucket** page is displayed. For details about how to create a bucket, see **Creating a Bucket** in the *Object Storage Service Console Operation Guide*. + + .. figure:: /_static/images/en-us_image_0000001157080895.png + :alt: **Figure 1** Creating an OBS bucket + + + **Figure 1** Creating an OBS bucket + +#. Select the bucket, and click **Create Folder**. In the dialog box that is displayed, enter the folder name and click **OK**. + + - The name cannot contain the following special characters: \\/:\*?"<>\| + - The name cannot start or end with a period (.) or slash (/). + - The absolute path of a folder cannot exceed 1,023 characters. + - Any single slash (/) separates and creates multiple levels of folders at once. + + + +.. |image1| image:: /_static/images/en-us_image_0000001157080897.png + diff --git a/umn/source/exeml/tips/how_do_i_view_the_added_data_in_an_exeml_project.rst b/umn/source/exeml/tips/how_do_i_view_the_added_data_in_an_exeml_project.rst new file mode 100644 index 0000000..dc99e17 --- /dev/null +++ b/umn/source/exeml/tips/how_do_i_view_the_added_data_in_an_exeml_project.rst @@ -0,0 +1,30 @@ +How Do I View the Added Data in an ExeML Project? +================================================= + +To add data for an existing project, perform the following operations. The operations described in this section apply only to object detection and image classification projects. + +Obtaining the Data Source of an ExeML Project +--------------------------------------------- + +#. Log in to the ModelArts management console and choose **ExeML** from the left navigation pane. +#. In the ExeML project list, you can view the data source corresponding to the project in the **Data Source** column. Click your desired data source link to go to the dataset selected or created during project creation. + +Uploading New Data to OBS +------------------------- + +Log in to OBS Console, access the data storage path, and upload new data to OBS. + +For details about how to upload files to OBS, see **Uploading an Object**. + +Synchronizing Data to ModelArts +------------------------------- + +#. After data is uploaded to OBS, go to the **ExeML** page on the ModelArts management console. + +#. In the ExeML project list, select the project to which data is to be added and click the project name. The **Label Data** page is displayed. + +#. On the **Label Data** page, click **Synchronize Data Source**. + + It takes several minutes to complete data synchronization. After the synchronization is complete, the new data is synchronized to the **Unlabeled** or **Labeled** tab page. + + diff --git a/umn/source/exeml/tips/index.rst b/umn/source/exeml/tips/index.rst new file mode 100644 index 0000000..0c03c14 --- /dev/null +++ b/umn/source/exeml/tips/index.rst @@ -0,0 +1,11 @@ +==== +Tips +==== + +.. toctree:: + :maxdepth: 1 + + how_do_i_quickly_create_an_obs_bucket_and_a_folder_when_creating_a_project + how_do_i_view_the_added_data_in_an_exeml_project + how_do_i_perform_incremental_training_in_an_exeml_project + where_are_models_generated_by_exeml_stored_what_other_operations_are_supported diff --git a/umn/source/exeml/tips/where_are_models_generated_by_exeml_stored_what_other_operations_are_supported.rst b/umn/source/exeml/tips/where_are_models_generated_by_exeml_stored_what_other_operations_are_supported.rst new file mode 100644 index 0000000..e0f39c9 --- /dev/null +++ b/umn/source/exeml/tips/where_are_models_generated_by_exeml_stored_what_other_operations_are_supported.rst @@ -0,0 +1,28 @@ +Where Are Models Generated by ExeML Stored? What Other Operations Are Supported? +================================================================================ + +Unified Model Management +------------------------ + +For an ExeML project, after the model training is complete, the generated model is automatically displayed on the **Model Management > Models** page. The model name is automatically generated by the system. Its prefix is the same as the name of the ExeML project for easy identification. + +.. caution:: + + Models generated by ExeML cannot be downloaded. + +What Other Operations Are Supported for Models Generated by ExeML? +------------------------------------------------------------------ + +- **Deploying models as real-time and batch services** + + On the **ExeML** page, models can only be deployed as real-time services. You can deploy models as batch services on the **Model Management > Models** page. + + It should be noted that resources with other specifications can be used when you create a model deployment task on the **Model Management > Models** page. On the ExeML project page, only **Compute-intensive 2 instance (NPU)** can be used to deploy models. + +- **Creating a version** + + When creating a new version, you can select a meta model only from a ModelArts training job, OBS, model template, or custom image. You cannot create a version from the original ExeML project. + +- **Deleting a model or its version** + + diff --git a/umn/source/faqs/data_management/index.rst b/umn/source/faqs/data_management/index.rst new file mode 100644 index 0000000..998738d --- /dev/null +++ b/umn/source/faqs/data_management/index.rst @@ -0,0 +1,8 @@ +=============== +Data Management +=============== + +.. toctree:: + :maxdepth: 1 + + why_does_data_fail_to_be_imported_using_the_manifest_file diff --git a/umn/source/faqs/data_management/why_does_data_fail_to_be_imported_using_the_manifest_file.rst b/umn/source/faqs/data_management/why_does_data_fail_to_be_imported_using_the_manifest_file.rst new file mode 100644 index 0000000..40d0bbd --- /dev/null +++ b/umn/source/faqs/data_management/why_does_data_fail_to_be_imported_using_the_manifest_file.rst @@ -0,0 +1,20 @@ +Why Does Data Fail to Be Imported Using the Manifest File? +========================================================== + +Symptom +------- + +Failed to use the manifest file of the published dataset to import data again. + +Possible Cause +-------------- + +Data has been changed in the OBS directory of the published dataset, for example, images have been deleted. Therefore, the manifest file is inconsistent with data in the OBS directory. As a result, an error occurs when the manifest file is used to import data again. + +Solution +-------- + +- Method 1 (recommended): Publish a new version of the dataset again and use the new manifest file to import data. +- Method 2: Modify the manifest file on your local PC, search for data changes in the OBS directory, and modify the manifest file accordingly. Ensure that the manifest file is consistent with data in the OBS directory, and then import data using the new manifest file. + + diff --git a/umn/source/faqs/development_environment/index.rst b/umn/source/faqs/development_environment/index.rst new file mode 100644 index 0000000..910a064 --- /dev/null +++ b/umn/source/faqs/development_environment/index.rst @@ -0,0 +1,8 @@ +======================= +Development Environment +======================= + +.. toctree:: + :maxdepth: 1 + + notebook/index diff --git a/umn/source/faqs/development_environment/notebook/do_files_in__cache_still_exist_after_a_notebook_instance_is_stopped_or_restarted_how_do_i_avoid_a_restart.rst b/umn/source/faqs/development_environment/notebook/do_files_in__cache_still_exist_after_a_notebook_instance_is_stopped_or_restarted_how_do_i_avoid_a_restart.rst new file mode 100644 index 0000000..7436342 --- /dev/null +++ b/umn/source/faqs/development_environment/notebook/do_files_in__cache_still_exist_after_a_notebook_instance_is_stopped_or_restarted_how_do_i_avoid_a_restart.rst @@ -0,0 +1,8 @@ +Do Files in /cache Still Exist After a Notebook Instance is Stopped or Restarted? How Do I Avoid a Restart? +=========================================================================================================== + +**/cache** is a temporary directory and will not be saved. After an instance using OBS storage is stopped, data in the **~work** directory will be deleted. After a notebook instance is restarted, all cached data except the data in the OBS bucket is lost, and your model or code is unavailable. + +To avoid a restart, do not train heavy-load jobs that consume large amounts of CPU, GPU, or memory resources in DevEnviron. + + diff --git a/umn/source/faqs/development_environment/notebook/how_do_i_enable_the_terminal_function_in_devenviron_of_modelarts.rst b/umn/source/faqs/development_environment/notebook/how_do_i_enable_the_terminal_function_in_devenviron_of_modelarts.rst new file mode 100644 index 0000000..1908e6b --- /dev/null +++ b/umn/source/faqs/development_environment/notebook/how_do_i_enable_the_terminal_function_in_devenviron_of_modelarts.rst @@ -0,0 +1,16 @@ +How Do I Enable the Terminal Function in DevEnviron of ModelArts? +================================================================= + +#. Log in to the ModelArts management console, and choose **DevEnviron > Notebooks**. + +#. In the notebook list, click **Open** in the **Operation** column of the target notebook instance to go to the **Jupyter** page. + +#. On the **Files** tab page of the Jupyter page, click **New** and select **Terminal**. The **Terminal** page is displayed. + + .. figure:: /_static/images/en-us_image_0000001110760910.png + :alt: **Figure 1** Going to the **Terminal** page + + + **Figure 1** Going to the **Terminal** page + + diff --git a/umn/source/faqs/development_environment/notebook/index.rst b/umn/source/faqs/development_environment/notebook/index.rst new file mode 100644 index 0000000..db8ca9f --- /dev/null +++ b/umn/source/faqs/development_environment/notebook/index.rst @@ -0,0 +1,11 @@ +======== +Notebook +======== + +.. toctree:: + :maxdepth: 1 + + how_do_i_enable_the_terminal_function_in_devenviron_of_modelarts + where_will_the_data_be_uploaded_to + do_files_in__cache_still_exist_after_a_notebook_instance_is_stopped_or_restarted_how_do_i_avoid_a_restart + where_is_data_stored_after_the_sync_obs_function_is_used diff --git a/umn/source/faqs/development_environment/notebook/where_is_data_stored_after_the_sync_obs_function_is_used.rst b/umn/source/faqs/development_environment/notebook/where_is_data_stored_after_the_sync_obs_function_is_used.rst new file mode 100644 index 0000000..e8f1493 --- /dev/null +++ b/umn/source/faqs/development_environment/notebook/where_is_data_stored_after_the_sync_obs_function_is_used.rst @@ -0,0 +1,20 @@ +Where Is Data Stored After the Sync OBS Function Is Used? +========================================================= + +#. Log in to the ModelArts management console, and choose **DevEnviron > Notebooks**. + +#. In the **Operation** column of the target notebook instance in the notebook list, click **Open** to go to the **Jupyter** page. + +#. On the **Files** tab page of the **Jupyter** page, select the target file and click **Sync OBS** in the upper part of the page to synchronize the file. The file is stored in the **~/work** directory of the instance. + +#. On the **Files** tab page of the **Jupyter** page, click **New** and select **Terminal**. The **Terminal** page is displayed. + +#. Run the following command to go to the **~/work** directory. + + .. code-block:: + + cd work + +#. Run the **ls** command in the **~/work** directory to view the files. + + diff --git a/umn/source/faqs/development_environment/notebook/where_will_the_data_be_uploaded_to.rst b/umn/source/faqs/development_environment/notebook/where_will_the_data_be_uploaded_to.rst new file mode 100644 index 0000000..ceeeefa --- /dev/null +++ b/umn/source/faqs/development_environment/notebook/where_will_the_data_be_uploaded_to.rst @@ -0,0 +1,14 @@ +Where Will the Data Be Uploaded to? +=================================== + +Data may be stored in OBS or EVS, depending on which kind of storage you have configured for your Notebook instances: + +- OBS + + After you click **upload**, the data is directly uploaded to the target OBS path specified when the notebook instance was created. + +- EVS + + After you click **upload**, the data is uploaded to the instance container, that is, the **~/work** directory on the **Terminal** page. + + diff --git a/umn/source/faqs/exeml/can_i_add_multiple_labels_to_an_image_for_an_object_detection_project.rst b/umn/source/faqs/exeml/can_i_add_multiple_labels_to_an_image_for_an_object_detection_project.rst new file mode 100644 index 0000000..864c09c --- /dev/null +++ b/umn/source/faqs/exeml/can_i_add_multiple_labels_to_an_image_for_an_object_detection_project.rst @@ -0,0 +1,6 @@ +Can I Add Multiple Labels to an Image for an Object Detection Project? +====================================================================== + +Yes. You can add multiple labels to an image. + + diff --git a/umn/source/faqs/exeml/index.rst b/umn/source/faqs/exeml/index.rst new file mode 100644 index 0000000..9a174bb --- /dev/null +++ b/umn/source/faqs/exeml/index.rst @@ -0,0 +1,13 @@ +===== +ExeML +===== + +.. toctree:: + :maxdepth: 1 + + what_is_exeml + what_are_image_classification_and_object_detection + what_should_i_do_when_the_train_button_is_unavailable_after_i_create_an_image_classification_project_and_label_the_images + can_i_add_multiple_labels_to_an_image_for_an_object_detection_project + what_type_of_service_is_deployed_in_exeml + what_formats_of_images_are_supported_by_object_detection_or_image_classification_projects diff --git a/umn/source/faqs/exeml/what_are_image_classification_and_object_detection.rst b/umn/source/faqs/exeml/what_are_image_classification_and_object_detection.rst new file mode 100644 index 0000000..f0cda34 --- /dev/null +++ b/umn/source/faqs/exeml/what_are_image_classification_and_object_detection.rst @@ -0,0 +1,20 @@ +What Are Image Classification and Object Detection? +=================================================== + +Image classification is an image processing method that separates different classes of targets according to the features reflected in the images. With quantitative analysis on images, it classifies an image or each pixel or area in an image into different categories to replace human visual interpretation. In general, image classification aims to identify a class, status, or scene in an image. It is applicable to scenarios where an image contains only one object. `Figure 1 <#modelarts050018enustopic0000001096467407enustopic0285164820enustopic0147657895fig630464819155>`__ shows an example of identifying a car in an image. + +.. figure:: /_static/images/en-us_image_0000001156920931.png + :alt: **Figure 1** Image classification + + + **Figure 1** Image classification + +Object detection is one of the classical problems in computer vision. It intends to label objects with frames and identify the object classes in an image. Generally, if an image contains multiple objects, object detection can identify the location, quantity, and name of each object in the image. It is suitable for scenarios where an image contains multiple objects. `Figure 2 <#modelarts050018enustopic0000001096467407enustopic0285164820enustopic0147657895fig522176141613>`__ shows an example of identifying a tree and a car in an image. + +.. figure:: /_static/images/en-us_image_0000001110920962.png + :alt: **Figure 2** Object detection + + + **Figure 2** Object detection + + diff --git a/umn/source/faqs/exeml/what_formats_of_images_are_supported_by_object_detection_or_image_classification_projects.rst b/umn/source/faqs/exeml/what_formats_of_images_are_supported_by_object_detection_or_image_classification_projects.rst new file mode 100644 index 0000000..77ae6d1 --- /dev/null +++ b/umn/source/faqs/exeml/what_formats_of_images_are_supported_by_object_detection_or_image_classification_projects.rst @@ -0,0 +1,6 @@ +What Formats of Images Are Supported by Object Detection or Image Classification Projects? +========================================================================================== + +Images in JPG, JPEG, PNG, or BMP format are supported. + + diff --git a/umn/source/faqs/exeml/what_is_exeml.rst b/umn/source/faqs/exeml/what_is_exeml.rst new file mode 100644 index 0000000..c08086f --- /dev/null +++ b/umn/source/faqs/exeml/what_is_exeml.rst @@ -0,0 +1,8 @@ +What Is ExeML? +============== + +ExeML is the process of automating model design, parameter tuning, and model training, compression, and deployment with the labeled data. The process is free of coding and does not require developers' experience in model development. + +Users who do not have encoding capability can use the labeling, one-click model training, and model deployment functions of ExeML to build AI models. + + diff --git a/umn/source/faqs/exeml/what_should_i_do_when_the_train_button_is_unavailable_after_i_create_an_image_classification_project_and_label_the_images.rst b/umn/source/faqs/exeml/what_should_i_do_when_the_train_button_is_unavailable_after_i_create_an_image_classification_project_and_label_the_images.rst new file mode 100644 index 0000000..7c022a6 --- /dev/null +++ b/umn/source/faqs/exeml/what_should_i_do_when_the_train_button_is_unavailable_after_i_create_an_image_classification_project_and_label_the_images.rst @@ -0,0 +1,6 @@ +What Should I Do When the Train Button Is Unavailable After I Create an Image Classification Project and Label the Images? +========================================================================================================================== + +The **Train** button turns to be available when the training images for an image classification project are classified into at least two categories, and each category contains at least five images. + + diff --git a/umn/source/faqs/exeml/what_type_of_service_is_deployed_in_exeml.rst b/umn/source/faqs/exeml/what_type_of_service_is_deployed_in_exeml.rst new file mode 100644 index 0000000..0d2cc32 --- /dev/null +++ b/umn/source/faqs/exeml/what_type_of_service_is_deployed_in_exeml.rst @@ -0,0 +1,8 @@ +What Type of Service Is Deployed in ExeML? +========================================== + +Models created in ExeML are deployed as real-time services. You can add images or compile code to test the services, as well as call the APIs using the URLs. + +After model development is successful, you can choose **Service Deployment** > **Real-Time Services** in the left navigation pane of the ModelArts console to view running services, and stop or delete services. + + diff --git a/umn/source/faqs/general_issues/how_do_i_obtain_access_keys.rst b/umn/source/faqs/general_issues/how_do_i_obtain_access_keys.rst new file mode 100644 index 0000000..9721b16 --- /dev/null +++ b/umn/source/faqs/general_issues/how_do_i_obtain_access_keys.rst @@ -0,0 +1,11 @@ +How Do I Obtain Access Keys? +============================ + +Obtaining an Access Key +----------------------- + +#. Log in to the console, enter the **My Credentials** page, and choose **Access Keys** > **Create Access Key**. +#. In the **Create Access Key** dialog box that is displayed, use the login password for verification. +#. Click **OK**, open the **credentials.csv** file, and save the key file as prompted. The access key file is saved in the default download folder of the browser. Then, the access key (**Access Key Id** and **Secret Access Key**) is obtained. + + diff --git a/umn/source/faqs/general_issues/how_do_i_upload_data_to_obs.rst b/umn/source/faqs/general_issues/how_do_i_upload_data_to_obs.rst new file mode 100644 index 0000000..e931fae --- /dev/null +++ b/umn/source/faqs/general_issues/how_do_i_upload_data_to_obs.rst @@ -0,0 +1,6 @@ +How Do I Upload Data to OBS? +============================ + +Before using ModelArts to develop AI models, data needs to be uploaded to an OBS bucket. You can log in to the OBS console to create an OBS bucket, create a folder, and upload data. For details about how to upload data, see *Object Storage Service Getting Started*. + + diff --git a/umn/source/faqs/general_issues/index.rst b/umn/source/faqs/general_issues/index.rst new file mode 100644 index 0000000..8e318ad --- /dev/null +++ b/umn/source/faqs/general_issues/index.rst @@ -0,0 +1,12 @@ +============== +General Issues +============== + +.. toctree:: + :maxdepth: 1 + + what_is_modelarts + what_are_the_relationships_between_modelarts_and_other_services + how_do_i_obtain_access_keys + how_do_i_upload_data_to_obs + which_ai_frameworks_does_modelarts_support diff --git a/umn/source/faqs/general_issues/what_are_the_relationships_between_modelarts_and_other_services.rst b/umn/source/faqs/general_issues/what_are_the_relationships_between_modelarts_and_other_services.rst new file mode 100644 index 0000000..127dcfc --- /dev/null +++ b/umn/source/faqs/general_issues/what_are_the_relationships_between_modelarts_and_other_services.rst @@ -0,0 +1,24 @@ +What Are the Relationships Between ModelArts and Other Services +=============================================================== + +OBS +--- + +ModelArts uses Object Storage Service (OBS) to store data and model backups and snapshots. OBS provides secure, reliable, low-cost storage. For more details, see *Object Storage Service Console Function Overview*. + +CCE +--- + +ModelArts uses Cloud Container Engine (CCE) to deploy models as real-time services. CCE enables high concurrency and provides elastic scaling. For more information about CCE, see *Cloud Container Engine User Guide*. + +SWR +--- + +To use an AI framework that is not supported by ModelArts, use SoftWare Repository for Container (SWR) to customize an image and import the image to ModelArts for training or inference. For more details, see . + +Cloud Eye +--------- + +ModelArts uses Cloud Eye to monitor online services and model loads in real time and send alarms and notifications automatically. For details about Cloud Eye, see *Cloud Eye User Guide*. + + diff --git a/umn/source/faqs/general_issues/what_is_modelarts.rst b/umn/source/faqs/general_issues/what_is_modelarts.rst new file mode 100644 index 0000000..8c40860 --- /dev/null +++ b/umn/source/faqs/general_issues/what_is_modelarts.rst @@ -0,0 +1,10 @@ +What Is ModelArts? +================== + +ModelArts is a one-stop development platform for AI developers. With data preprocessing, semi-automated data labeling, distributed training, automated model building, and model deployment, ModelArts helps you build models quickly and manage the lifecycle of AI development. + +The one-stop ModelArts platform covers all stages of AI development, including data processing and model training and deployment. The underlying layer of ModelArts supports various heterogeneous computing resources. You can flexibly select and use the resources without having to consider the underlying technologies. In addition, ModelArts supports popular open-source AI development frameworks such as TensorFlow. Developers can also use self-developed algorithm frameworks to match their usage habits. + +ModelArts aims to simplify AI development. + + diff --git a/umn/source/faqs/general_issues/which_ai_frameworks_does_modelarts_support.rst b/umn/source/faqs/general_issues/which_ai_frameworks_does_modelarts_support.rst new file mode 100644 index 0000000..9bd18a8 --- /dev/null +++ b/umn/source/faqs/general_issues/which_ai_frameworks_does_modelarts_support.rst @@ -0,0 +1,142 @@ +Which AI Frameworks Does ModelArts Support? +=========================================== + +Supported AI frameworks and versions of ModelArts vary slightly based on the development environment, training jobs, and model inference (model management and deployment). The following describes the AI frameworks supported by each module. + +Development Environment +----------------------- + +Notebook instances in the development environment support different AI engines and versions based on specific work environments (that is, different Python versions). After creating a notebook instance in the corresponding work environment, create a file based on the corresponding version in `Table 1 <#modelarts050128enustopic0246510446table4362414101>`__. ModelArts notebook instances support multiple engines. That is, a notebook instance can use all supported engines. Different engines can be switched quickly and conveniently. + + + +.. _modelarts050128enustopic0246510446table4362414101: + +.. table:: **Table 1** AI engines + + +------------------------------------------+--------------------------------+----------------+ + | Work Environment | Built-in AI Engine and Version | Supported Chip | + +==========================================+================================+================+ + | Multi-Engine 1.0 (Python 3, Recommended) | MXNet-1.2.1 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | PySpark-2.3.2 | CPU | + +------------------------------------------+--------------------------------+----------------+ + | | Pytorch-1.0.0 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-1.13.1 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | XGBoost-Sklearn | CPU | + +------------------------------------------+--------------------------------+----------------+ + | Multi-Engine 2.0 (Python3) | Pytorch-1.4.0 | GPU | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-2.1.0 | CPU/GPU | + +------------------------------------------+--------------------------------+----------------+ + | Ascend-Powered-Engine 1.0 (Python3) | MindSpore-1.1.1 | Ascend 910 | + +------------------------------------------+--------------------------------+----------------+ + | | TensorFlow-1.15.0 | Ascend 910 | + +------------------------------------------+--------------------------------+----------------+ + +Training Jobs +------------- + +Supported AI engines and versions when creating training jobs are as follows: + + + +.. _modelarts050128enustopic0246510446table97515527121: + +.. table:: **Table 2** AI engines supported by training jobs + + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Environment | Supported Chip | System Architecture | System Version | AI Engine and Version | Supported CUDA or Ascend Version | + +=======================+================+=====================+================+===================================+==================================+ + | TensorFlow | CPU and GPU | x86_64 | Ubuntu 16.04 | TF-1.13.1-python3.6 | CUDA 10.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | TF-2.1.0-python3.6 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Caffe | CPU and GPU | x86_64 | Ubuntu 16.04 | Caffe-1.0.0-python2.7 | CUDA 8.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Spark_MLlib | CPU | x86_64 | Ubuntu 16.04 | Spark-2.3.2-python3.6 | N/A | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | XGBoost-Sklearn | CPU | x86_64 | Ubuntu 16.04 | Scikit_Learn-0.18.1-python3.6 | N/A | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | PyTorch | CPU and GPU | x86_64 | Ubuntu 16.04 | PyTorch-1.3.0-python3.6 | CUDA 10.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | PyTorch-1.4.0-python3.6 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Ascend-Powered-Engine | Ascend 910 | AArch64 | EulerOS 2.8 | Mindspore-1.1.1-python3.7-aarch64 | C76 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | TF-1.15-python3.7-aarch64 | C76 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | MindSpore-GPU | CPU and GPU | x86_64 | Ubuntu 18.04 | MindSpore-1.1.0-python3.7 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + +Model Inference +--------------- + +For imported models and model inference is completed on ModelArts, supported engines and their runtime are as follows: + + + +.. _modelarts050128enustopic0246510446table195551745191318: + +.. table:: **Table 3** Supported AI engines and their runtime + + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Engine | Runtime | Precautions | + +=======================+=========================+============================================================================================================================================================================================================================================================================================+ + | TensorFlow | python3.6 | - TensorFlow 1.8.0 is used in **python2.7** and **python3.6**. | + | | | - **python3.6**, **python2.7**, and **tf2.1-python3.7** indicate that the model can run on both CPUs and GPUs. For other runtime values, if the suffix contains **cpu** or **gpu**, the model can run only on CPUs or GPUs. | + | | python2.7 | - The default runtime is **python2.7**. | + | | | | + | | tf1.13-python2.7-gpu | | + | | | | + | | tf1.13-python2.7-cpu | | + | | | | + | | tf1.13-python3.6-gpu | | + | | | | + | | tf1.13-python3.6-cpu | | + | | | | + | | tf1.13-python3.7-cpu | | + | | | | + | | tf1.13-python3.7-gpu | | + | | | | + | | tf2.1-python3.7 | | + | | | | + | | tf1.15-aarch64-c76-d910 | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | MXNet | python3.7 | - MXNet 1.2.1 is used in **python3.6** and **python3.7**. | + | | | - **python3.6** and **python3.7** indicate that the model can run on both CPUs and GPUs. | + | | python3.6 | - The default runtime is **python3.6**. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Caffe | python3.6 | - Caffe 1.0.0 is used in **python3.6**, **python3.7**, **python3.6-gpu**, **python3.7-gpu**, **python3.6-cpu**, and **python3.7-cpu**. | + | | | - **python 3.6** and **python3.7** can only be used to run models on CPUs. For other runtime values, if the suffix contains **cpu** or **gpu**, the model can run only on CPUs or GPUs. Use the runtime of **python3.6-gpu**, **python3.7-gpu**, **python3.6-cpu**, or **python3.7-cpu**. | + | | python3.7 | - The default runtime is **python3.6**. | + | | | | + | | python3.6-gpu | | + | | | | + | | python3.7-gpu | | + | | | | + | | python3.6-cpu | | + | | | | + | | python3.7-cpu | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Spark_MLlib | python3.6 | - Spark_MLlib 2.3.2 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Scikit_Learn | python3.6 | - Scikit_Learn 0.18.1 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | XGBoost | python3.6 | - XGBoost 0.80 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | PyTorch | python3.6 | - PyTorch 1.0 is used in **python3.6** and **python3.7**. | + | | | - **python3.6**, **python3.7**, and **pytorch1.4-python3.7** indicate that the model can run on both CPUs and GPUs. | + | | python3.7 | - The default runtime is **python3.6**. | + | | | | + | | pytorch1.4-python3.7 | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | MindSpore | ms1.1-python3.7-c76 | MindSpore 1.1.1 is used. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/faqs/index.rst b/umn/source/faqs/index.rst new file mode 100644 index 0000000..c0b5cce --- /dev/null +++ b/umn/source/faqs/index.rst @@ -0,0 +1,14 @@ +==== +FAQs +==== + +.. toctree:: + :maxdepth: 1 + + general_issues/index + exeml/index + data_management/index + development_environment/index + training_jobs/index + model_management/index + service_deployment/index diff --git a/umn/source/faqs/model_management/how_do_i_import_a_model_downloaded_from_obs_to_modelarts.rst b/umn/source/faqs/model_management/how_do_i_import_a_model_downloaded_from_obs_to_modelarts.rst new file mode 100644 index 0000000..a371fa3 --- /dev/null +++ b/umn/source/faqs/model_management/how_do_i_import_a_model_downloaded_from_obs_to_modelarts.rst @@ -0,0 +1,8 @@ +How Do I Import a Model Downloaded from OBS to ModelArts? +========================================================= + +ModelArts allows you to upload local models to OBS or import models stored in OBS directly into ModelArts. + +For details about how to import a model from OBS, see Importing a Meta Model from OBS. + + diff --git a/umn/source/faqs/model_management/index.rst b/umn/source/faqs/model_management/index.rst new file mode 100644 index 0000000..3b1b6e0 --- /dev/null +++ b/umn/source/faqs/model_management/index.rst @@ -0,0 +1,8 @@ +================ +Model Management +================ + +.. toctree:: + :maxdepth: 1 + + how_do_i_import_a_model_downloaded_from_obs_to_modelarts diff --git a/umn/source/faqs/service_deployment/how_do_i_describe_the_dependencies_between_installation_packages_and_model_configuration_files_when_a_model_is_imported.rst b/umn/source/faqs/service_deployment/how_do_i_describe_the_dependencies_between_installation_packages_and_model_configuration_files_when_a_model_is_imported.rst new file mode 100644 index 0000000..dfa43c7 --- /dev/null +++ b/umn/source/faqs/service_deployment/how_do_i_describe_the_dependencies_between_installation_packages_and_model_configuration_files_when_a_model_is_imported.rst @@ -0,0 +1,61 @@ +How Do I Describe the Dependencies Between Installation Packages and Model Configuration Files When a Model Is Imported? +======================================================================================================================== + +Symptom +------- + +When importing a model from OBS or a container image, compile a model configuration file. The model configuration file describes the model usage, computing framework, precision, inference code dependency package, and model API. The configuration file must be in JSON format. In the configuration file, **dependencies** indicates the packages that the model inference code depends on. Model developers need to provide the package name, installation mode, and version constraints. For details about the parameters, see . The dependency structure array needs to be set for the **dependencies** parameter. + +Solution +-------- + +When the installation package has dependency relationships, the **dependencies** parameter in the model configuration file supports multiple **dependency** structure arrays, which are entered in list format. + +The dependencies in list format must be installed in sequence. For example, install **Cython**, **pytest-runner**, and **pytest** before installing **mmcv-full**. When entering the installation packages in list format in the configuration file, write **Cython**, **pytest-runner**, and **pytest** in front of the **mmcv-full** structure array. + +Example: + +.. code-block:: + + "dependencies": [ + { + "installer": "pip", + "packages": [ + { + "package_name": "Cython" + }, + { + "package_name": "pytest-runner" + }, + { + "package_name": "pytest" + }] + }, + + { + "installer": "pip", + "packages": [ + { + "restraint": "ATLEAST", + "package_version": "5.0.0", + "package_name": "Pillow" + }, + { + "restraint": "ATLEAST", + "package_version": "1.4.0", + "package_name": "torch" + }, + { + "restraint": "ATLEAST", + "package_version": "1.19.1", + "package_name": "numpy" + }, + { + "restraint": "ATLEAST", + "package_version": "1.2.0", + "package_name": "mmcv-full" + }] + } + ] + + diff --git a/umn/source/faqs/service_deployment/index.rst b/umn/source/faqs/service_deployment/index.rst new file mode 100644 index 0000000..67130c4 --- /dev/null +++ b/umn/source/faqs/service_deployment/index.rst @@ -0,0 +1,10 @@ +================== +Service Deployment +================== + +.. toctree:: + :maxdepth: 1 + + what_types_of_services_can_models_be_deployed_as_on_modelarts + what_should_i_do_if_a_conflict_occurs_when_deploying_a_model_as_a_real-time_service + how_do_i_describe_the_dependencies_between_installation_packages_and_model_configuration_files_when_a_model_is_imported diff --git a/umn/source/faqs/service_deployment/what_should_i_do_if_a_conflict_occurs_when_deploying_a_model_as_a_real-time_service.rst b/umn/source/faqs/service_deployment/what_should_i_do_if_a_conflict_occurs_when_deploying_a_model_as_a_real-time_service.rst new file mode 100644 index 0000000..27fc090 --- /dev/null +++ b/umn/source/faqs/service_deployment/what_should_i_do_if_a_conflict_occurs_when_deploying_a_model_as_a_real-time_service.rst @@ -0,0 +1,8 @@ +What Should I Do If a Conflict Occurs When Deploying a Model As a Real-Time Service? +==================================================================================== + +Before importing a model, you need to place the corresponding inference code and configuration file in the model folder. When encoding with Python, you are advised to use a relative import (Python import) to import custom packages. + +If the relative import mode is not used, a conflict will occur once a package with the same name exists in a real-time service. As a result, model deployment or prediction fails. + + diff --git a/umn/source/faqs/service_deployment/what_types_of_services_can_models_be_deployed_as_on_modelarts.rst b/umn/source/faqs/service_deployment/what_types_of_services_can_models_be_deployed_as_on_modelarts.rst new file mode 100644 index 0000000..ee4cb63 --- /dev/null +++ b/umn/source/faqs/service_deployment/what_types_of_services_can_models_be_deployed_as_on_modelarts.rst @@ -0,0 +1,6 @@ +What Types of Services Can Models Be Deployed as on ModelArts? +============================================================== + +Currently, models can only be deployed as real-time services and batch services. + + diff --git a/umn/source/faqs/training_jobs/error_message_no_such_file_or_directory_displayed_in_training_job_logs.rst b/umn/source/faqs/training_jobs/error_message_no_such_file_or_directory_displayed_in_training_job_logs.rst new file mode 100644 index 0000000..3f7aae1 --- /dev/null +++ b/umn/source/faqs/training_jobs/error_message_no_such_file_or_directory_displayed_in_training_job_logs.rst @@ -0,0 +1,42 @@ +Error Message "No such file or directory" Displayed in Training Job Logs +======================================================================== + +Issue Analysis +-------------- + +When you use ModelArts, your data is stored in the OBS bucket. The data has a corresponding OBS path, for example, **bucket_name/dir/image.jpg**. ModelArts training jobs run in containers, and if they need to access OBS data, they need to know what path to access it from. If ModelArts cannot find the configured path, it is possible that the selected data storage path was configured incorrectly when the training job was created or that the OBS path in the code file is incorrect. + +Solution +-------- + +#. Confirm that the OBS path in the log exists. + + Locate the incorrect OBS path in the log, for example, **obs-test/ModelArts/examples/**. There are two methods to check whether it exists. + + - On OBS Console, check whether the OBS path exists. + + Log in to OBS console using the current account, and check whether the OBS buckets, folders, and files exist in the OBS path displayed in the log. For example, you can confirm that a given bucket is there and then check if that bucket contains the folder you are looking for based on the configured path. + + - If the file path exists, go to `2 <#modelarts050032enustopic0000001096606439enustopic0285164857enustopic0166743701li77081222112915>`__. + - If it does not exist, change the path configured for the training job to an OBS bucket path that is actually there. + + - Create a notebook instance, and use an API to check whether the directory exists. In an existing notebook instance or after creating a new notebook instance, run the following command to check whether the directory exists: + + .. code-block:: + + import moxing as mox + mox.file.exists('obs://obs-test/ModelArts/examples/') + + - If it exists, go to `2 <#modelarts050032enustopic0000001096606439enustopic0285164857enustopic0166743701li77081222112915>`__. + - If it does not exist, change it to an available OBS bucket path in the training job. + +#. After confirming that the path exists, check whether OBS and ModelArts are in the same region and whether the OBS bucket belongs to another account. + + Log in to the ModelArts console and view the region where ModelArts resides. Log in to the OBS console and view the region where the OBS bucket resides. Check whether they reside in the same region and whether the OBS bucket belongs to another account. + + - If they are in the same region and the OBS bucket does not belong to another account, go to `3 <#modelarts050032enustopic0000001096606439enustopic0285164857enustopic0166743701li166204369185>`__. + - If they are not in the same region or the OBS bucket belongs to another account, create a bucket and a folder in OBS that is in the same region as ModelArts using the same account, and upload data to the bucket. + +#. In the script of the training job, check whether the API for reading the OBS path in the code file is correct. + + diff --git a/umn/source/faqs/training_jobs/how_do_i_create_a_training_job_when_a_dependency_package_is_referenced_in_a_model.rst b/umn/source/faqs/training_jobs/how_do_i_create_a_training_job_when_a_dependency_package_is_referenced_in_a_model.rst new file mode 100644 index 0000000..b0fbe72 --- /dev/null +++ b/umn/source/faqs/training_jobs/how_do_i_create_a_training_job_when_a_dependency_package_is_referenced_in_a_model.rst @@ -0,0 +1,50 @@ +How Do I Create a Training Job When a Dependency Package Is Referenced in a Model? +================================================================================== + +When a model references a dependency package, select a frequently-used framework to create training jobs. In addition, place the required file or installation package in the code directory. The requirements vary based on the dependency package that you use. + +- **Open-source installation package** + + .. note:: + + It is not allowed to install the package using the GitHub source code. + + Create a file named **pip-requirements.txt** in the code directory. In this file, specify the name and version of the dependency package in the format of *Package name*\ **==**\ *Version*. + + For example, the OBS path specified by **Code Directory** contains model files and the **pip-requirements.txt** file. The following shows the code directory structure: + + .. code-block:: + + |---OBS path to the model boot file + |---model.py #Model boot file + |---pip-requirements.txt #Customized configuration file, which specifies the name and version of the dependency package + + The following shows the content of the **pip-requirements.txt** file: + + .. code-block:: + + alembic==0.8.6 + bleach==1.4.3 + click==6.6 + +- **Customized WHL file** + + When you use a customized .whl file, the system cannot automatically download and install the file. Place the .whl file in the code directory, create a file named **pip-requirements.txt**, and specify the name of the .whl file in the created file. The dependency package must be a .whl file. + + For example, the OBS path specified by **Code Directory** contains model files, .whl file, and **pip-requirements.txt** file. The following shows the code directory structure: + + .. code-block:: + + |---OBS path to the model boot file + |---model.py #Model boot file + |---XXX.whl #Dependency package. If multiple dependencies are required, place all of them here. + |---pip-requirements.txt #Customized configuration file, which specifies the name of the dependency package + + The following shows the content of the **pip-requirements.txt** file: + + .. code-block:: + + numpy-1.15.4-cp36-cp36m-manylinux1_x86_64.whl + tensorflow-1.8.0-cp36-cp36m-manylinux1_x86_64.whl + + diff --git a/umn/source/faqs/training_jobs/index.rst b/umn/source/faqs/training_jobs/index.rst new file mode 100644 index 0000000..6e8a448 --- /dev/null +++ b/umn/source/faqs/training_jobs/index.rst @@ -0,0 +1,11 @@ +============= +Training Jobs +============= + +.. toctree:: + :maxdepth: 1 + + what_can_i_do_if_the_message_object_directory_size_quantity_exceeds_the_limit_is_displayed_when_i_create_a_training_job + error_message_no_such_file_or_directory_displayed_in_training_job_logs + how_do_i_create_a_training_job_when_a_dependency_package_is_referenced_in_a_model + what_are_sizes_of_the__cache_directories_for_different_resource_specifications_in_the_training_environment diff --git a/umn/source/faqs/training_jobs/what_are_sizes_of_the__cache_directories_for_different_resource_specifications_in_the_training_environment.rst b/umn/source/faqs/training_jobs/what_are_sizes_of_the__cache_directories_for_different_resource_specifications_in_the_training_environment.rst new file mode 100644 index 0000000..14b50b2 --- /dev/null +++ b/umn/source/faqs/training_jobs/what_are_sizes_of_the__cache_directories_for_different_resource_specifications_in_the_training_environment.rst @@ -0,0 +1,47 @@ +What Are Sizes of the /cache Directories for Different Resource Specifications in the Training Environment? +=========================================================================================================== + +When creating a training job, you can select CPU, GPU, or Ascend resources based on the size of the training job. + +ModelArts mounts the disk to the **/cache** directory. You can use this directory to store temporary files. The **/cache** directory shares resources with the code directory. The directory has different capacities for different resource specifications. + +- GPU resources + +.. _modelarts050090enustopic0000001096855431enustopic0200343601table9533182215915: + + .. table:: **Table 1** Capacities of the cache directories for GPU resources + + ================== ======================== + GPU Specifications cache Directory Capacity + ================== ======================== + V100 800G + 8*V100 3T + P100 800G + ================== ======================== + +- CPU resources + +.. _modelarts050090enustopic0000001096855431enustopic0200343601table2764175317167: + + .. table:: **Table 2** Capacities of the cache directories for CPU resources + + ================== ======================== + CPU Specifications cache Directory Capacity + ================== ======================== + 2 vCPUs \| 8 GiB 50G + 8 vCPUs \| 32 GiB 50G + ================== ======================== + +- Ascend resources + +.. _modelarts050090enustopic0000001096855431enustopic0200343601table1652218538206: + + .. table:: **Table 3** Capacities of the cache directories for Ascend resources + + ===================== ======================== + Ascend Specifications cache Directory Capacity + ===================== ======================== + Ascend 910 3T + ===================== ======================== + + diff --git a/umn/source/faqs/training_jobs/what_can_i_do_if_the_message_object_directory_size_quantity_exceeds_the_limit_is_displayed_when_i_create_a_training_job.rst b/umn/source/faqs/training_jobs/what_can_i_do_if_the_message_object_directory_size_quantity_exceeds_the_limit_is_displayed_when_i_create_a_training_job.rst new file mode 100644 index 0000000..dd63331 --- /dev/null +++ b/umn/source/faqs/training_jobs/what_can_i_do_if_the_message_object_directory_size_quantity_exceeds_the_limit_is_displayed_when_i_create_a_training_job.rst @@ -0,0 +1,14 @@ +What Can I Do If the Message "Object directory size/quantity exceeds the limit" Is Displayed When I Create a Training Job? +========================================================================================================================== + +Issue Analysis +-------------- + +The code directory for creating a training job has limits on the size and number of files. + +Solution +-------- + +Delete the files except the code from the code directory or save the files in other directories. Ensure that the size of the code directory does not exceed 128 MB and the number of files does not exceed 4,096. + + diff --git a/umn/source/index.rst b/umn/source/index.rst new file mode 100644 index 0000000..cb29d52 --- /dev/null +++ b/umn/source/index.rst @@ -0,0 +1,24 @@ +========== +Main Index +========== + +.. toctree:: + :maxdepth: 1 + + service_overview/index + preparations/index + exeml/index + data_management/index + devenviron_(notebook)/index + training_management/index + model_management/index + model_deployment/index + resource_pools + custom_images/index + model_package_specifications/index + model_templates/index + examples_of_custom_scripts/index + permissions_management/index + monitoring/index + faqs/index + change_history diff --git a/umn/source/model_deployment/batch_services/deploying_a_model_as_a_batch_service.rst b/umn/source/model_deployment/batch_services/deploying_a_model_as_a_batch_service.rst new file mode 100644 index 0000000..c7561e1 --- /dev/null +++ b/umn/source/model_deployment/batch_services/deploying_a_model_as_a_batch_service.rst @@ -0,0 +1,244 @@ +Deploying a Model as a Batch Service +==================================== + +After a model is prepared, you can deploy it as a batch service. The **Service Deployment > Batch Services** page lists all batch services. You can enter a service name in the search box in the upper right corner and click |image1| to query the service. + +Prerequisites +------------- + +- Data has been prepared. Specifically, you have created a model in the **Normal** state in ModelArts. +- Data to be batch processed is ready and has been upload to an OBS directory. +- At least one empty folder has been created on OBS for storing the output. + +Background +---------- + +- A maximum of 1,000 batch services can be created. +- Based on the input request (JSON or other file) defined by the model, different parameter are entered. If the model input is a JSON file, a configuration file is required to generate a mapping file. If the model input is other file, no mapping file is required. +- Batch services can only be deployed in a public resource pool, but not a dedicated resource pool. + +Procedure +--------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Service Deployment** > **Batch Services**. By default, the **Batch Services** page is displayed. + +#. In the batch service list, click **Deploy** in the upper left corner. The **Deploy** page is displayed. + +#. Set parameters for a batch service. + + a. Set the basic information, including **Name** and **Description**. The name is generated by default, for example, **service-bc0d**. You can specify **Name** and **Description** according to actual requirements. + + b. Set other parameters, including model configurations. For details, see `Table 1 <#modelarts230066enustopic0171858292table1029041641314>`__. + +.. _modelarts230066enustopic0171858292table1029041641314: + + .. table:: **Table 1** Parameters + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+======================================================================================================================================================================================================================================================================================================================+ + | Model and Version | Select the model and version that are in the **Normal** state. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Input Path | Select the OBS directory where the data is to be uploaded. Select a folder or a **.manifest** file. For details about the specifications of the **.manifest** file, see `Manifest File Specifications <#manifest-file-specifications>`__. | + | | | + | | .. note:: | + | | | + | | - If the input data is an image, ensure that the size of a single image is less than 10 MB. | + | | - If the input data is in CSV format, ensure that no Chinese character is included. To use Chinese, set the file encoding format to UTF-8. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Request Path | API URI of a batch service. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Mapping Relationship | If the model input is in JSON format, the system automatically generates the mapping based on the configuration file corresponding to the model. If the model input is other file, mapping is not required. | + | | | + | | Automatically generated mapping file. Enter the field index corresponding to each parameter in the CSV file. The index starts from 0. | + | | | + | | Mapping rule: The mapping rule comes from the input parameter (**request**) in the model configuration file **config.json**. When **type** is set to **string/number/integer/boolean**, you are required to set the index parameter. For details about the mapping rule, see `Example Mapping <#example-mapping>`__. | + | | | + | | The index must be a positive integer starting from 0. If the value of index does not comply with the rule, this parameter is ignored in the request. After the mapping rule is configured, the corresponding CSV data must be separated by commas (,). | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Output Path | Select the path for saving the batch prediction result. You can select the empty folder that you create. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Specifications | Select available specifications based on the list displayed on the console. The specifications in gray cannot be used at the current site. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Compute Nodes | Set the number of instances for the current model version. If you set **Instances** to **1**, the standalone computing mode is used. If you set **Instances** to a value greater than 1, the distributed computing mode is used. Select a computing mode based on the actual requirements. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Environment Variable | Set environment variables and inject them to the container instance. To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. After setting the parameters, deploy the model as a batch service as prompted. Generally, service deployment jobs run for a period of time, which may be several minutes or tens of minutes depending on the amount of your selected data and resources. + + You can go to the batch service list to view the basic information about the batch service. In the batch service list, after the status of the newly deployed service changes from **Deploying** to **Running**, the service is deployed successfully. + +Manifest File Specifications +---------------------------- + +Batch services of the inference platform support the manifest file. The manifest file describes the input and output of data. + +**Example input manifest file** + +- File name: **test.manifest** + +- File content: + + .. code-block:: + + {"source": "/test/data/1.jpg"} + {"source": "https://infers-data.obs.xxx.com:443/xgboosterdata/data.csv?AccessKeyId=2Q0V0TQ461N26DDL18RB&Expires=1550611914&Signature=wZBttZj5QZrReDhz1uDzwve8GpY%3D&x-obs-security-token=gQpzb3V0aGNoaW5hixvY8V9a1SnsxmGoHYmB1SArYMyqnQT-ZaMSxHvl68kKLAy5feYvLDM..."} + +- File requirements: + + #. The file name extension must be **.manifest**. + #. The file content is in JSON format. Each row describes a piece of input data, which must be accurate to a file instead of a folder. + +**Example output manifest file** + +If you use an input manifest file, the output directory will contain an output manifest file. + +- Assume that the output path is **//test-bucket/test/**. The result is stored in the following path: + + .. code-block:: + + OBS bucket/directory name + ├── test-bucket + │ ├── test + │ │ ├── infer-result-0.manifest + │ │ ├── infer-result + │ │ │ ├── 1.jpg_result.txt + │ │ │ ├── 2.jpg_result.txt + +- Content of the **infer-result-0.manifest** file: + + .. code-block:: + + {"source": "/obs-data-bucket/test/data/1.jpg", "inference-loc": "/test-bucket/test/infer-result/1.jpg_result.txt"} + {"source ": "https://infers-data.obs.xxx.com:443/xgboosterdata/2.jpg?AccessKeyId=2Q0V0TQ461N26DDL18RB&Expires=1550611914&Signature=wZBttZj5QZrReDhz1uDzwve8GpY%3D&x-obs-security-token=gQpzb3V0aGNoaW5hixvY8V9a1SnsxmGoHYmB1SArYMyqnQT-ZaMSxHvl68kKLAy5feYvLDMNZWxzhBZ6Q-3HcoZMh9gISwQOVBwm4ZytB_m8sg1fL6isU7T3CnoL9jmvDGgT9VBC7dC1EyfSJrUcqfB...", "inference-loc": "obs://test-bucket/test/infer-result/2.jpg_result.txt"} + +- File format: + + #. The file name is **infer-result-{{index}}.manifest**, where **index** is the instance ID. Each running instance of a batch service generates a manifest file. + #. The **infer-result** directory is created in the manifest directory to store the result. + #. The file content is in JSON format. Each row describes the output result of a piece of input data. + #. The content contains two fields: + + a. **source**: input data description, which is the same as that of the input manifest file + b. **inference-loc**: output result path in the format of **/{{Bucket name}}/{{Object name}}** + +Example Mapping +--------------- + +The following example shows the relationship between the configuration file, mapping rule, CSV data, and inference request. + +Assume that the **apis** parameter in the configuration file used by your model is as follows: + ++-----------------------------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | [ | +| 2 | { | +| 3 | "protocol": "http", | +| 4 | "method": "post", | +| 5 | "url": "/", | +| 6 | "request": { | +| 7 | "type": "object", | +| 8 | "properties": { | +| 9 | "data": { | +| 10 | "type": "object", | +| 11 | "properties": { | +| 12 | "req_data": { | +| 13 | "type": "array", | +| 14 | "items": [ | +| 15 | { | +| 16 | "type": "object", | +| 17 | "properties": { | +| 18 | "input_1": { | +| 19 | "type": "number" | +| 20 | }, | +| 21 | "input_2": { | +| 22 | "type": "number" | +| 23 | }, | +| 24 | "input_3": { | +| 25 | "type": "number" | +| 26 | }, | +| 27 | "input_4": { | +| 28 | "type": "number" | +| 29 | } | +| 30 | } | +| 31 | } | +| 32 | ] | +| 33 | } | +| 34 | } | +| 35 | } | +| 36 | } | +| 37 | } | +| 38 | } | +| 39 | ] | ++-----------------------------------+-----------------------------------------------------------------+ + +At this point, the corresponding mapping relationship is shown below. The ModelArts management console automatically resolves the mapping relationship from the configuration file. When calling a ModelArts API, write the mapping relationship by yourself according to the rule. + +.. code-block:: + + { + "type": "object", + "properties": { + "data": { + "type": "object", + "properties": { + "req_data": { + "type": "array", + "items": [ + { + "type": "object", + "properties": { + "input_1": { + "type": "number", + "index": 0 + }, + "input_2": { + "type": "number", + "index": 1 + }, + "input_3": { + "type": "number", + "index": 2 + }, + "input_4": { + "type": "number", + "index": 3 + } + } + } + ] + } + } + } + } + } + +The data for inference, that is, the CSV data, is in the following format. The data must be separated by commas (,). + +.. code-block:: + + 5.1,3.5,1.4,0.2 + 4.9,3.0,1.4,0.2 + 4.7,3.2,1.3,0.2 + +Depending on the defined mapping relationship, the inference request is shown below. The format is similar to the format used by the real-time service. + +.. code-block:: + + { + "data": { + "req_data": [{ + "input_1": 5.1, + "input_2": 3.5, + "input_3": 1.4, + "input_4": 0.2 + }] + } + } + + + +.. |image1| image:: /_static/images/en-us_image_0000001110760970.png + diff --git a/umn/source/model_deployment/batch_services/index.rst b/umn/source/model_deployment/batch_services/index.rst new file mode 100644 index 0000000..84cb599 --- /dev/null +++ b/umn/source/model_deployment/batch_services/index.rst @@ -0,0 +1,9 @@ +============== +Batch Services +============== + +.. toctree:: + :maxdepth: 1 + + deploying_a_model_as_a_batch_service + viewing_the_batch_service_prediction_result diff --git a/umn/source/model_deployment/batch_services/viewing_the_batch_service_prediction_result.rst b/umn/source/model_deployment/batch_services/viewing_the_batch_service_prediction_result.rst new file mode 100644 index 0000000..63ed397 --- /dev/null +++ b/umn/source/model_deployment/batch_services/viewing_the_batch_service_prediction_result.rst @@ -0,0 +1,24 @@ +Viewing the Batch Service Prediction Result +=========================================== + +When deploying a batch service, you can select the location of the output data directory. You can view the running result of the batch service that is in the **Running completed** status. + +Procedure +--------- + +#. Log in to the ModelArts management console and choose **Service Deployment** > **Batch Services**. +#. Click the name of the target service in the **Running completed** status. The service details page is displayed. + + - You can view the service name, status, ID, input path, output path, and description. + - You can click |image1| in the **Description** area to edit the description. + +#. Obtain the detailed OBS path next to **Output Path**, switch to the path and obtain the batch service prediction result. + + - If images are entered, a result file is generated for each image in the *Image name*\ **\__result.txt** format, for example, **IMG_20180919_115016.jpg_result.txt**. + - If audio files are entered, a result file is generated for each audio file in the *Audio file name*\ **\__result.txt** format, for example, **1-36929-A-47.wav_result.txt**. + - If table data is entered, the result file is generated in the *Table name*\ **\__result.txt** format, for example, **train.csv_result.txt**. + + + +.. |image1| image:: /_static/images/en-us_image_0000001157080919.png + diff --git a/umn/source/model_deployment/deleting_a_service.rst b/umn/source/model_deployment/deleting_a_service.rst new file mode 100644 index 0000000..0f32232 --- /dev/null +++ b/umn/source/model_deployment/deleting_a_service.rst @@ -0,0 +1,15 @@ +Deleting a Service +================== + +If a service is no longer in use, you can delete it to release resources. + +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. + + a. For a real-time service, choose **More > Delete** in the **Operation** column to delete it. + b. For a batch service, click **Delete** in the **Operation** column to delete it. + + .. note:: + + A deleted service cannot be recovered. Exercise caution when performing this operation. + + diff --git a/umn/source/model_deployment/index.rst b/umn/source/model_deployment/index.rst new file mode 100644 index 0000000..f9717c7 --- /dev/null +++ b/umn/source/model_deployment/index.rst @@ -0,0 +1,13 @@ +================ +Model Deployment +================ + +.. toctree:: + :maxdepth: 1 + + introduction_to_model_deployment + real-time_services/index + batch_services/index + modifying_a_service + starting_or_stopping_a_service + deleting_a_service diff --git a/umn/source/model_deployment/introduction_to_model_deployment.rst b/umn/source/model_deployment/introduction_to_model_deployment.rst new file mode 100644 index 0000000..ca01710 --- /dev/null +++ b/umn/source/model_deployment/introduction_to_model_deployment.rst @@ -0,0 +1,14 @@ +Introduction to Model Deployment +================================ + +After a training job is complete and a model is generated, you can deploy the model on the **Service Deployment** page. You can also deploy the model imported from OBS. ModelArts supports the following deployment types: + +- `Real-Time Services <../model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.html>`__ + + Deploy a model as a web service to provide real-time test UI and monitoring capabilities. + +- `Batch Services <../model_deployment/batch_services/deploying_a_model_as_a_batch_service.html>`__ + + A batch service can perform inference on batch data. After data processing is complete, the batch service automatically stops. + + diff --git a/umn/source/model_deployment/modifying_a_service.rst b/umn/source/model_deployment/modifying_a_service.rst new file mode 100644 index 0000000..0ba496e --- /dev/null +++ b/umn/source/model_deployment/modifying_a_service.rst @@ -0,0 +1,38 @@ +Modifying a Service +=================== + +For a deployed service, you can modify its basic information to match service changes. You can modify the basic information about a service in either of the following ways: + +`Method 1: Modify Service Information on the Service Management Page <#method-1:-modify-service-information-on-the-service-management-page>`__ + +`Method 2: Modify Service Information on the Service Details Page <#method-2:-modify-service-information-on-the-service-details-page>`__ + +Prerequisites +------------- + +A service has been deployed. + +Method 1: Modify Service Information on the Service Management Page +------------------------------------------------------------------- + +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. +#. In the service list, click **Modify** in the **Operation** column of the target service, modify basic service information, and click **OK**. + + - For details about the real-time service parameters, see `Deploying a Model as a Real-Time Service <../model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.html>`__. + - For details about the batch service parameters, see `Deploying a Model as a Batch Service <../model_deployment/batch_services/deploying_a_model_as_a_batch_service.html>`__. + + .. note:: + + Services in the **Deploying** status cannot be modified. + +Method 2: Modify Service Information on the Service Details Page +---------------------------------------------------------------- + +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. +#. Click the name of the target service. The service details page is displayed. +#. Click **Modify** in the upper right corner of the page, modify the service details, and click **OK**. + + - For details about the real-time service parameters, see `Deploying a Model as a Real-Time Service <../model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.html>`__. + - For details about the batch service parameters, see `Deploying a Model as a Batch Service <../model_deployment/batch_services/deploying_a_model_as_a_batch_service.html>`__. + + diff --git a/umn/source/model_deployment/real-time_services/accessing_a_real-time_service_(token-based_authentication).rst b/umn/source/model_deployment/real-time_services/accessing_a_real-time_service_(token-based_authentication).rst new file mode 100644 index 0000000..2dbf847 --- /dev/null +++ b/umn/source/model_deployment/real-time_services/accessing_a_real-time_service_(token-based_authentication).rst @@ -0,0 +1,83 @@ +Accessing a Real-Time Service (Token-based Authentication) +========================================================== + +If a real-time service is in the **Running** state, the real-time service has been deployed successfully. This service provides a standard RESTful API for users to call. Before integrating the API to the production environment, commission the API. You can use either of the following methods to send an inference request to the real-time service: + +`Method 1: Use GUI-based Software for Inference (Postman) <#method-1:-use-gui-based-software-for-inference-(postman)>`__ (Postman is recommended for Windows.) + +`Method 2: Run the cURL Command to Send an Inference Request <#method-2:-run-the-curl-command-to-send-an-inference-request>`__ (curl commands are recommended for Linux.) + +Method 1: Use GUI-based Software for Inference (Postman) +-------------------------------------------------------- + +#. Download Postman and install it, or install the Postman Chrome extension. Alternatively, use other software that can send POST requests. Postman 7.24.0 is recommended. +#. Open Postman. +#. Set parameters on Postman. The following uses image classification as an example. + + - Select a POST task and copy the API URL to the POST text box. To obtain the API URL of the real-time service, switch to the **Usage Guides** tab on the page providing details about the real-time service. On the **Headers** tab page, set **Key** to **X-Auth-Token** and **Value** to the obtained token. + - On the **Body** tab page, file input and text input are available. + + - **File input** + + Select **form-data**. Set **KEY** to the input parameter of the model, for example, **images**. Set **VALUE** to an image to be inferred (only one image can be inferred). + + - **Text input** + + Select **raw** and then **JSON(application/json)**. Enter the request body in the text box below. An example request body is as follows: + + .. code-block:: + + { + "meta": { + "uuid": "10eb0091-887f-4839-9929-cbc884f1e20e" + }, + "data": { + "req_data": [ + { + "sepal_length": 3, + "sepal_width": 1, + "petal_length": 2.2, + "petal_width": 4 + } + ] + } + } + + **meta** can carry a universally unique identifier (UUID). When you call an API, the system provides a UUID. When the inference result is returned, the UUID is returned to trace the request. If you do not need this function, leave **meta** blank. **data** contains a **req_data** array for one or multiple pieces of input data. The parameters of each piece of data are determined by the model, such as **sepal_length** and **sepal_width** in this example. + +#. After setting the parameters, click **Send** to send the request. The result is displayed in the response. + + - Inference result using file input: The field values in the return result vary with the model. + - Inference result using text input: The request body contains **meta** and **data**. If the request contains **uuid**, **uuid** will be returned in the response. Otherwise, **uuid** is left blank. **data** contains a **resp_data** array for the inference results of one or multiple pieces of input data. The parameters of each result are determined by the model, for example, **sepal_length** and **predictresult** in this example. + +Method 2: Run the cURL Command to Send an Inference Request +----------------------------------------------------------- + +The command for sending inference requests can be input as a file or text. + +#. File input + + .. code-block:: + + curl -k -F 'images=@Image path' -H 'X-Auth-Token:Token value' -X POST Real-time service URL + + - **-k** indicates that SSL websites can be accessed without using a security certificate. + - **-F** indicates file input. In this example, the parameter name is **images**, which can be changed as required. The image storage path follows **@**. + - **-H** indicates the header of the POST command. **X-Auth-Token** is the **KEY** value on the **Headers** page. *Token value* indicates the obtained token. For details about how to obtain the token, see . + - **POST** is followed by the API URL of the real-time service. + + The following is an example of the cURL command for inference with file input: + + .. code-block:: + + curl -k -F 'images=@/home/data/test.png' -H 'X-Auth-Token:MIISkAY***80T9wHQ==' -X POST https://modelarts-infers-1.xxx/v1/infers/eb3e0c54-3dfa-4750-af0c-95c45e5d3e83 + +#. Text input + + .. code-block:: + + curl -k -d '{"data":{"req_data":[{"sepal_length":3,"sepal_width":1,"petal_length":2.2,"petal_width":4}]}}' -H 'X-Auth-Token:MIISkAY***80T9wHQ==' -H 'Content-type: application/json' -X POST https://modelarts-infers-1.xxx/v1/infers/eb3e0c54-3dfa-4750-af0c-95c45e5d3e83 + + **-d** indicates the text input of the request body. + + diff --git a/umn/source/model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.rst b/umn/source/model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.rst new file mode 100644 index 0000000..3b37546 --- /dev/null +++ b/umn/source/model_deployment/real-time_services/deploying_a_model_as_a_real-time_service.rst @@ -0,0 +1,112 @@ +Deploying a Model as a Real-Time Service +======================================== + +After a model is prepared, you can deploy the model as a real-time service and predict and call the service. + +.. note:: + + A maximum of 20 real-time services can be deployed by a user. + +Prerequisites +------------- + +- Data has been prepared. Specifically, you have created a model in the **Normal** state in ModelArts. + +Procedure +--------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Service Deployment** > **Real-Time Services**. By default, the system switches to the **Real-Time Services** page. + +#. In the real-time service list, click **Deploy** in the upper left corner. The **Deploy** page is displayed. + +#. Set parameters for a real-time service. + + a. Set basic information about model deployment. For details about the parameters, see `Table 1 <#modelarts230060enustopic0165025304table16373156155613>`__. + +.. _modelarts230060enustopic0165025304table16373156155613: + + .. table:: **Table 1** Basic parameters of model deployment + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+================================================================================================================================================================================================================+ + | Name | Name of the real-time service. Set this parameter as prompted. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Auto Stop | After this parameter is enabled and the auto stop time is set, a service automatically stops at the specified time. The auto stop function is enabled by default, and the default value is **1 hour later**. | + | | | + | | The options are **1 hour later**, **2 hours later**, **4 hours later**, **6 hours later**, and **Custom**. If you select **Custom**, you can enter any integer from 1 to 24 hours in the textbox on the right. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of the real-time service. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Enter key information including the resource pool and model configurations. For details, see `Table 2 <#modelarts230060enustopic0165025304table10352134481117>`__. + +.. _modelarts230060enustopic0165025304table10352134481117: + + .. table:: **Table 2** Parameters + + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Sub-Parameter | Description | + +=========================+=============================+============================================================================================================================================================================================================================================================================================+ + | Resource Pool | Public resource pools | Instances in the public resource pool can be of the CPU or GPU type. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Resource Pool | Dedicated resource pools | For details about how to create a dedicated resource pool, see `Creating a Dedicated Resource Pool <../..//resource_pools.html#creating-a-dedicated-resource-pool>`__. You can select a specification from the resource pool specifications. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Model and Configuration | Model Source | You can select **My Models** or **My Subscriptions** based on site requirements. The models that match the model sources are displayed. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Model | The system automatically associates with the list of available models. Select a model in the **Normal** status and its version. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Traffic Ratio (%) | Set the traffic proportion of the current instance node. Service calling requests are allocated to the current version based on this proportion. | + | | | | + | | | If you deploy only one version of a model, set this parameter to **100%**. If you select multiple versions for gated launch, ensure that the sum of the traffic ratios of multiple versions is **100%**. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Specifications | If you select **Public resource pools**, you can select the CPU or GPU resources based on site requirements. For details, see `Table 3 <#modelarts230060enustopic0165025304table117211414482>`__. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Compute Nodes | Set the number of instances for the current model version. If you set **Instances** to **1**, the standalone computing mode is used. If you set **Instances** to a value greater than 1, the distributed computing mode is used. Select a computing mode based on the actual requirements. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Environment Variable | Set environment variables and inject them to the container instance. To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Add Model and Configuration | ModelArts supports multiple model versions and flexible traffic policies. You can use gated launch to smoothly upgrade the model version. | + | | | | + | | | .. note:: | + | | | | + | | | If the selected model has only one version, the system does not display **Add Model Version and Configuration**. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Traffic Limit | N/A | Maximum number of times a service can be accessed within a second. You can set this parameter as needed. | + +-------------------------+-----------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230060enustopic0165025304table117211414482: + + .. table:: **Table 3** Supported specifications + + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | Specifications | Description | + +================================================+===========================================================================================+ + | ExeML specifications (CPU) | Only be used by models trained in ExeML projects. | + | | | + | ExeML specifications (GPU) | | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | CPU: 2 vCPUs \| 8 GiB | Suitable for models with only CPU loads. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | CPU: 8 vCPUs \| 64 GiB GPU: 1 x V100 | Suitable for running GPU models. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | CPU: 8 vCPUs \| 32 GiB GPU: 1 x T4 | Suitable for models requiring CPU and GPU (NVIDIA T4) resources. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | Arm: 3 vCPUs \| 6 GiB Ascend: 1 x Ascend 310 | Carrying one Ascend 310 chip, suitable for models requiring Ascend 310 chip resources. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | Ascend: 1 x Ascend 910 CPU: 24 vCPUs \| 96 GiB | Carrying one Ascend 910 chip, suitable for models requiring Ascend 910 chip resources. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + | Ascend: 8 Ascend 910 CPU: 192 vCPUs \| 720 GiB | Carrying eight Ascend 910 chips, suitable for models requiring Ascend 910 chip resources. | + +------------------------------------------------+-------------------------------------------------------------------------------------------+ + +#. After confirming the entered information, complete service deployment as prompted. Generally, service deployment jobs run for a period of time, which may be several minutes or tens of minutes depending on the amount of your selected data and resources. + + .. note:: + + After a real-time service is deployed, it is started immediately. + + You can go to the real-time service list to view the basic information about the real-time service. In the real-time service list, after the status of the newly deployed service changes from **Deploying** to **Running**, the service is deployed successfully. + + diff --git a/umn/source/model_deployment/real-time_services/index.rst b/umn/source/model_deployment/real-time_services/index.rst new file mode 100644 index 0000000..d0a305c --- /dev/null +++ b/umn/source/model_deployment/real-time_services/index.rst @@ -0,0 +1,11 @@ +================== +Real-Time Services +================== + +.. toctree:: + :maxdepth: 1 + + deploying_a_model_as_a_real-time_service + viewing_service_details + testing_a_service + accessing_a_real-time_service_(token-based_authentication) diff --git a/umn/source/model_deployment/real-time_services/testing_a_service.rst b/umn/source/model_deployment/real-time_services/testing_a_service.rst new file mode 100644 index 0000000..89c1bb6 --- /dev/null +++ b/umn/source/model_deployment/real-time_services/testing_a_service.rst @@ -0,0 +1,45 @@ +Testing a Service +================= + +After a model is deployed as a real-time service, you can debug code or add files for testing on the **Prediction** tab page. Based on the input request (JSON text or file) defined by the model, the service can be tested in either of the following ways: + +#. `JSON Text Prediction <#json-text-prediction>`__: If the input type of the model of the deployed service is JSON text, that is, the input does not contain files, you can enter the JSON code on the **Prediction** tab page for service testing. +#. `File Prediction (Images and Audios) <#file-prediction-(images-and-audios)>`__: If the input type of the model of the deployed service is file, including images, audios, and videos, you can add images on the **Prediction** tab page for service testing. + +.. note:: + + - If the input type is image, the size of a single image must be less than 10 MB. + - The following image types are supported: png, psd, jpg, jpeg, bmp, gif, webp, psd, svg, and tiff. + +Input Parameters +---------------- + +For the service that you have deployed, you can learn about its input parameters of the service, that is, the input request type mentioned above, on the **Usage Guides** tab page of the service details page. + +The input parameters displayed on the **Usage Guides** tab page depend on the model source that you select. + +- If your model comes from ExeML or a built-in algorithm, the input and output parameters are defined by ModelArts. For details, see the **Usage Guides** tab page. On the **Prediction** tab page, enter the corresponding JSON text or file for service testing. + +- If you use a custom model with the inference code and configuration file compiled by yourself (`Specifications for Compiling the Model Configuration File <../../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html>`__), the **Usage Guides** tab page only visualizes your data. The following figure shows the mapping between the input parameters displayed on the **Usage Guides** tab page and the configuration file. + + .. figure:: /_static/images/en-us_image_0000001156920823.png + :alt: **Figure 1** Mapping between the configuration file and Usage Guides + + + **Figure 1** Mapping between the configuration file and Usage Guides + +- If your model is imported using a model template, the input and output parameters vary with the template. For details, see `Introduction to Model Templates <../../model_templates/introduction_to_model_templates.html>`__. + +JSON Text Prediction +-------------------- + +#. Log in to the ModelArts management console and choose **Service Deployment** > **Real-Time Services**. +#. On the **Real-Time Services** page, click the name of the target service. The service details page is displayed. On the **Prediction** tab page, enter the prediction code and click **Predict** to perform prediction. + +File Prediction (Images and Audios) +----------------------------------- + +#. Log in to the ModelArts management console and choose **Service Deployment** > **Real-Time Services**. +#. On the **Real-Time Services** page, click the name of the target service. The service details page is displayed. On the **Prediction** tab page, click **Upload** and select a test file. After the file is uploaded successfully, click **Predict** to perform a prediction test. + + diff --git a/umn/source/model_deployment/real-time_services/viewing_service_details.rst b/umn/source/model_deployment/real-time_services/viewing_service_details.rst new file mode 100644 index 0000000..2fa419d --- /dev/null +++ b/umn/source/model_deployment/real-time_services/viewing_service_details.rst @@ -0,0 +1,234 @@ +Viewing Service Details +======================= + +After a model is deployed as a real-time service, you can access the service page to view its details. + +#. Log in to the ModelArts management console and choose **Service Deployment** > **Real-Time Services**. + +#. On the **Real-Time Services** page, click the name of the target service. The service details page is displayed. + + You can view the service name and status. For details, see `Table 1 <#modelarts230061enustopic0165025305table54131529105213>`__. + + + +.. _modelarts230061enustopic0165025305table54131529105213: + + .. table:: **Table 1** Real-time service parameters + + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+===========================================================================================================================================================================================================================================================================================================+ + | Name | Name of the real-time service. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Status | Current status of the real-time service. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Source | Model source of the real-time service. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Failed Calls/Total Calls | Number of service calls, which is counted from the time when the service was created. | + | | | + | | If the number of models is changed or a service is invoked when a model is not ready, the number of calls is not counted. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Network Configuration | Customized network configuration of the used dedicated resource pool. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Service description, which can be edited after you click the edit button on the right side. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Custom Settings | Customized configurations based on real-time service versions. This allows version-based traffic distribution policies and configurations. Enable this option and click **View Settings** to customize the settings. For details, see `Modifying Customized Settings <#modifying-customized-settings>`__. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Collection | Enable this option to store the data generated when the real-time service is invoked to a specified OBS path. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Filter | Enable this option so that the system automatically identifies hard examples in all sample data. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Synchronize Data | Synchronize the collected data to a dataset for centralized management and utilization. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Traffic Limit | Maximum number of times a service can be accessed within a second. | + +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. You can switch between tabs on the details page of a real-time service to view more details. For details, see `Table 2 <#modelarts230061enustopic0165025305table62441712183917>`__. + +.. _modelarts230061enustopic0165025305table62441712183917: + + .. table:: **Table 2** Service details + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=====================================================================================================================================================================================+ + | Usage Guides | Displays the API address, model information, input parameters, and output parameters. You can click |image1| to copy the API address to call the service. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Prediction | Performs a prediction test on the real-time service. For details, see `Testing a Service <../../model_deployment/real-time_services/testing_a_service.html>`__. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Configuration Updates | Displays **Existing Configuration** and **Historical Updates**. | + | | | + | | - **Existing Configuration**: includes the model name, version, status, traffic ratio, . | + | | - **Historical Updates**: displays historical model information. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Monitoring | Displays **Resource Usage** and **Model Calls**. | + | | | + | | - **Resource Usage**: includes the used and available CPU, memory, and GPU resources. | + | | - **Model Calls**: indicates the number of model calls. The statistics collection starts after the model status changes to **Ready**. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Logs | Displays the log information about each model in the service. You can view logs generated in the latest 5 minutes, latest 30 minutes, latest 1 hour, and user-defined time segment. | + | | | + | | - You can select the start time and end time when defining the time segment. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Modifying Customized Settings +----------------------------- + +A customized configuration rule consists of the configuration condition (**Setting**), access version (**Version**), and customized running parameters (including **Setting Name** and **Setting Value**). + +You can configure different settings with customized running parameters for different versions of a real-time service. + +The priorities of customized configuration rules are in descending order. You can change the priorities by dragging the sequence of customized configuration rules. + +After a rule is matched, the system will no longer match subsequent rules. A maximum of 10 configuration rules can be configured. + + + +.. _modelarts230061enustopic0165025305table569619576249: + +.. table:: **Table 3** Parameters for **Custom Settings** + + +-----------------------+-----------------------+----------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Description | + +=======================+=======================+========================================================================================================================================+ + | Setting | Yes | Expression of the Spring Expression Language (SPEL) rule. Only the equal and matches expressions of the character type are supported. | + +-----------------------+-----------------------+----------------------------------------------------------------------------------------------------------------------------------------+ + | Version | Yes | Access version for a customized service configuration rule. When a rule is matched, the real-time service of the version is requested. | + +-----------------------+-----------------------+----------------------------------------------------------------------------------------------------------------------------------------+ + | Setting Name | No | Key of a customized running parameter, consisting of a maximum of 128 characters. | + | | | | + | | | Configure this parameter if the HTTP message header is used to carry customized running parameters to a real-time service. | + +-----------------------+-----------------------+----------------------------------------------------------------------------------------------------------------------------------------+ + | Setting Value | No | Value of a customized running parameter, consisting of a maximum of 256 characters. | + | | | | + | | | Configure this parameter if the HTTP message header is used to carry customized running parameters to a real-time service. | + +-----------------------+-----------------------+----------------------------------------------------------------------------------------------------------------------------------------+ + +Customized settings can be used in the following scenarios: + +- If multiple versions of a real-time service are deployed for dark launch, customized settings can be used to distribute traffic by user. + +.. _modelarts230061enustopic0165025305table19377505490: + + .. table:: **Table 4** Built-in variables + + +-------------------+-----------------------------------------------------------+ + | Built-in Variable | Description | + +===================+===========================================================+ + | DOMAIN_NAME | Account name that is used to invoke the inference request | + +-------------------+-----------------------------------------------------------+ + | DOMAIN_ID | Account ID that is used to invoke the inference request | + +-------------------+-----------------------------------------------------------+ + | PROJECT_NAME | Project name that is used to invoke the inference request | + +-------------------+-----------------------------------------------------------+ + | PROJECT_ID | Project ID that invokes the inference request | + +-------------------+-----------------------------------------------------------+ + | USER_NAME | Username that is used to invoke the inference request | + +-------------------+-----------------------------------------------------------+ + | USER_ID | User ID that is used to invoke the inference request | + +-------------------+-----------------------------------------------------------+ + + Pound key (#) indicates that a variable is referenced. The matched character string must be enclosed in single quotation marks. + + .. code-block:: + + #{Built-in variable} == 'Character string' + #{Built-in variable} matches 'Regular expression' + + - Example 1: + + If the account name for invoking the inference request is **User A**, the specified version is matched. + + .. code-block:: + + #DOMAIN_NAME == 'User A' + + - Example 2: + + If the account name in the inference request starts with **op**, the specified version is matched. + + .. code-block:: + + #DOMAIN_NAME matches 'op.*' + + + +.. _modelarts230061enustopic0165025305table52770525547: + + .. table:: **Table 5** Common regular expressions + + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Character | Description | + +===========+=============================================================================================================================================================+ + | . | Match any single character except **\\n**. To match any character including **\\n**, use **(.|\n)**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \* | Match the subexpression that it follows for zero or multiple times. For example, **zo\*** can match **z** and **zoo**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | + | Match the subexpression that it follows for once or multiple times. For example, **zo+** can match **zo** and **zoo**, but cannot match **z**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ? | Match the subexpression that it follows for zero or one time. For example, **do(es)?** can match **does** or **do** in **does**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ^ | Match the start of the input string. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | $ | Match the end of the input string. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | {n} | Match for the number specified by *n*, a non-negative integer. For example, **o{2}** cannot match **o** in **Bob**, but can match two **o**\ s in **food**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | x|y | Match x or y. For example, **z|food** can match **z** or **food**, and **(z|f)ood** can match **zood** or **food**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | [xyz] | Match any single character contained in a character set. For example, **[abc]** can match **a** in **plain**. | + +-----------+-------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + .. figure:: /_static/images/en-us_image_0000001157080859.png + :alt: **Figure 1** Traffic distribution by user + + + **Figure 1** Traffic distribution by user + +- If multiple versions of a real-time service are deployed for dark launch, customized settings can be used to access different versions through the header. + + Start with **#HEADER\_**, indicating that the header is referenced as a condition. + + .. code-block:: + + #HEADER_{key} == '{value}' + #HEADER_{key} matches '{value}' + + - Example 1: + + If the header of an inference HTTP request contains a version and the value is **0.0.1**, the condition is met. Otherwise, the condition is not met. + + .. code-block:: + + #HEADER_version == '0.0.1' + + - Example 2: + + If the header of an inference HTTP request contains **testheader** and the value starts with **mock**, the rule is matched. + + .. code-block:: + + #HEADER_testheader matches 'mock.*' + + .. figure:: /_static/images/en-us_image_0000001110920910.png + :alt: **Figure 2** Using the header to access different versions + + + **Figure 2** Using the header to access different versions + +- If a real-time service version supports different running configurations, you can use **Setting Name** and **Setting Value** to specify customized running parameters so that different users can use different running configurations. + + Example: + + When user A accesses the model, the user uses configuration A. When user B accesses the model, the user uses configuration B. When matching a running configuration, ModelArts adds a header to the request and also the customized running parameters specified by **Setting Name** and **Setting Value**. + + .. figure:: /_static/images/en-us_image_0000001110761010.png + :alt: **Figure 3** Customized running parameters added for a customized configuration rule + + + **Figure 3** Customized running parameters added for a customized configuration rule + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920912.png + diff --git a/umn/source/model_deployment/starting_or_stopping_a_service.rst b/umn/source/model_deployment/starting_or_stopping_a_service.rst new file mode 100644 index 0000000..f92c571 --- /dev/null +++ b/umn/source/model_deployment/starting_or_stopping_a_service.rst @@ -0,0 +1,20 @@ +Starting or Stopping a Service +============================== + +Starting a Service +------------------ + +You can start services in the **Successful**, **Abnormal**, or **Stopped** status. Services in the **Deploying** status cannot be started. You can start a service in either of the following ways: + +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. Click **Start** in the **Operation** column to start the target service. +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. Click the name of the target service. The service details page is displayed. Click **Start** in the upper right corner of the page to start the service. + +Stopping a Service +------------------ + +You can stop services in the **Running** or **Alarm** status. Services in the **Deploying** status cannot be stopped. After a service is stopped, ModelArts stops charging. You can stop a service in either of the following ways: + +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. Click **Stop** in the **Operation** column to stop the target service. +#. Log in to the ModelArts management console and choose **Service Deployment** from the left navigation pane. Go to the service management page of the target service. Click the name of the target service. The service details page is displayed. Click **Stop** in the upper right corner of the page to stop the service. + + diff --git a/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_container_image.rst b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_container_image.rst new file mode 100644 index 0000000..28e0946 --- /dev/null +++ b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_container_image.rst @@ -0,0 +1,81 @@ +Importing a Meta Model from a Container Image +============================================= + +For AI engines that are not supported by ModelArts, you can import the model you compile to ModelArts from custom images. + +Prerequisites +------------- + +- For details about the specifications and description of custom images, see `Importing a Model Using a Custom Image <../../custom_images/for_importing_models/importing_a_model_using_a_custom_image.html>`__. +- The configuration must be provided for a model that you have developed and trained. The file must comply with ModelArts specifications. For details about the specifications, see `Specifications for Compiling the Model Configuration File <../../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html>`__. After the compilation is complete, upload the file to the specified OBS directory. +- The OBS directory you use and ModelArts are in the same region. + +Procedure +--------- + +#. Log in to the ModelArts management console, and choose **Model Management** > **Models** in the left navigation pane. The **Models** page is displayed. +#. Click **Import** in the upper left corner. The **Import** page is displayed. +#. On the **Import** page, set related parameters. + + a. Set basic information about the model. For details about the parameters, see `Table 1 <#modelarts230206enustopic0207629477table19428112584211>`__. + +.. _modelarts230206enustopic0207629477table19428112584211: + + .. table:: **Table 1** Parameters of basic model information + + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=============+=====================================================================================================================================================================================+ + | Name | Model name. The value can contain 1 to 64 visible characters, including Chinese characters. Only letters, Chinese characters, digits, hyphens (-), and underscores (_) are allowed. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Version | Version of the model to be created. For the first import, the default value is **0.0.1**. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label | Model label. A maximum of five model labels are supported. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of the model | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Select the meta model source and set related parameters. **Meta Model Source** has four options based on the scenario. For details, see `Methods of Importing a Model <../../model_management/introduction_to_model_management.html#methods-of-importing-a-model>`__. Set **Meta Model Source** to **Container image**. For details about the parameters, see `Table 2 <#modelarts230206enustopic0207629477table104931647171713>`__. + +.. _modelarts230206enustopic0207629477table104931647171713: + + .. table:: **Table 2** Parameters of the meta model source + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | Container Image Path | Click |image1| to import the model image from the container image. The model is of the Image type, and you do not need to use **swr_location** in the configuration file to specify the image location. | + | | | + | | For details about how to create a custom image, see `Introduction to Custom Images <../../custom_images/introduction_to_custom_images.html>`__. | + | | | + | | .. note:: | + | | | + | | The model image you select will be shared with the administrator, so ensure you have the permission to share the image (images shared with other accounts are unsupported). When you deploy a service, ModelArts deploys the image as an inference service. Ensure that your image can be properly started and provide an inference interface. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Deployment Type | After the model is imported, select the service type that the model is deployed. When deploying a service, you can only deploy the service type selected here. For example, if you only select **Real-time services** here, you can only deploy real-time services after importing the model. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Configuration File | The **Import from OBS** and **Edit online** methods are available. The configuration file must comply with certain specifications in `Model Package Specifications <../../model_package_specifications/model_package_specifications.html>`__. If you select **Import from OBS**, you need to specify the OBS path for storing the configuration file. You can enable **View Configuration File** to view or edit the configuration file online. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter Configuration | Click |image2| on the right to view the input and output parameters of the model. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Set the inference specifications and model description. + + - **Min. Inference Specs**: If your model requires certain resources to complete inference, you can configure this parameter to set the minimum specifications required for normal inference after the model is deployed as a service. In later versions, the system will allocate resources based on the inference specifications in service deployment. You can also modify the specifications as required during deployment. Note that the specifications configured here are valid only when real-time services are deployed and the dedicated resource pool is used. + - **Model Description**: To help other model developers better understand and use your models, provide model descriptions. Click **Add Model Description** and then set the document name and URL. A maximum of three model descriptions are supported. + + d. Check the information and click **Next**. The model is imported. + + In the model list, you can view the imported model and its version. When the model status changes to **Normal**, the model is successfully imported. On this page, you can create new versions, quickly deploy models, publish models to the market, and perform other operations. + +Follow-Up Procedure +------------------- + +- `Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__: On the **Models** page, click the triangle next to a model name to view all versions of the model. Locate the row that contains the target version, click **Deploy** in the **Operation** column, and select the deployment type configured when importing the model from the drop-down list. On the **Deploy** page, set parameters by referring to `Introduction to Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__. + + + +.. |image1| image:: /_static/images/en-us_image_0000001157081003.png + +.. |image2| image:: /_static/images/en-us_image_0000001157081001.png + diff --git a/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_template.rst b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_template.rst new file mode 100644 index 0000000..6822c64 --- /dev/null +++ b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_template.rst @@ -0,0 +1,79 @@ +Importing a Meta Model from a Template +====================================== + +Because the configurations of models with the same functions are similar, ModelArts integrates the configurations of such models into a common template. By using this template, you can easily and quickly import models without compiling the **config.json** configuration file. + +Background +---------- + +- Because the configurations of models with the same functions are similar, ModelArts integrates the configurations of such models into a common template. By using this template, you can easily and quickly import the model. For details about the template, see `Introduction to Model Templates <../../model_templates/introduction_to_model_templates.html>`__. +- For details about the supported templates, see `Supported Templates <../../model_templates/introduction_to_model_templates.html#supported-templates>`__. For details about the input and output modes of each template, see `Supported Input and Output Modes <../../model_templates/introduction_to_model_templates.html#supported-input-and-output-modes>`__. +- Ensure that you have uploaded the model to OBS based on the model package specifications of the corresponding template. +- The OBS directory you use and ModelArts are in the same region. + +Procedure +--------- + +#. Log in to the ModelArts management console, and choose **Model Management** > **Models** in the left navigation pane. The **Models** page is displayed. +#. Click **Import** in the upper left corner. The **Import** page is displayed. +#. On the **Import** page, set related parameters. + + a. Set basic information about the model. For details about the parameters, see `Table 1 <#modelarts230205enustopic0207629476table83985217130>`__. + +.. _modelarts230205enustopic0207629476table83985217130: + + .. table:: **Table 1** Parameters of basic model information + + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=============+=====================================================================================================================================================================================+ + | Name | Model name. The value can contain 1 to 64 visible characters, including Chinese characters. Only letters, Chinese characters, digits, hyphens (-), and underscores (_) are allowed. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Version | Version of the model to be created. For the first import, the default value is **0.0.1**. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label | Model label. A maximum of five model labels are supported. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of the model | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Select the meta model source and set related parameters. Set **Meta Model Source** based on your application scenario. For details, see `Methods of Importing a Model <../../model_management/introduction_to_model_management.html#methods-of-importing-a-model>`__.If **Meta Model Source** is set to **Template**, set other parameters by referring to `Table 2 <#modelarts230205enustopic0207629476table104931647171713>`__. + +.. _modelarts230205enustopic0207629476table104931647171713: + + .. table:: **Table 2** Parameters of the meta model source + + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | Model Template | Select a template from the existing ModelArts template list . | + | | | + | | ModelArts also provides three filter criteria: **Type**, **Engine**, and **Environment**, helping you quickly find the desired template. If the three filter criteria cannot meet your requirements, you can enter keywords to search for the target template. For details about the supported templates, see `Supported Templates <../../model_templates/introduction_to_model_templates.html#supported-templates>`__. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Model Directory | OBS path where a model is saved. Select an OBS path for storing the model based on the input requirements of the selected model template. | + | | | + | | .. note:: | + | | | + | | If a training job is executed for multiple times, different version directories are generated, such as V001 and V002, and the generated models are stored in the **model** folder in different version directories. When selecting model files, specify the **model** folder in the corresponding version directory. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Input and Output Mode | If the default input and output mode of the selected template can be overwritten, you can select an input and output mode based on the model function or application scenario. **Input and Output Mode** is an abstract of the API (**apis**) in **config.json**. It describes the interface provided by the model for external inference. An input and output mode describes one or more APIs, and corresponds to a template. | + | | | + | | For details about the supported input and output modes, see `Supported Input and Output Modes <../../model_templates/introduction_to_model_templates.html#supported-input-and-output-modes>`__. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Deployment Type | After the model is imported, select the service type that the model is deployed. When deploying a service, you can only deploy the service type selected here. For example, if you only select **Real-time services** here, you can only deploy real-time services after importing the model. | + +-----------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Set the inference specifications and model description. + + - **Min. Inference Specs**: If your model requires certain resources to complete inference, you can configure this parameter to set the minimum specifications required for normal inference after the model is deployed as a service. In later versions, the system will allocate resources based on the inference specifications in service deployment. You can also modify the specifications as required during deployment. Note that the specifications configured here are valid only when real-time services are deployed and the dedicated resource pool is used. + - **Model Description**: To help other model developers better understand and use your models, provide model descriptions. Click **Add Model Description** and then set the document name and URL. A maximum of three model descriptions are supported. + + d. Check the information and click **Next**. The model is imported. + + In the model list, you can view the imported model and its version. When the model status changes to **Normal**, the model is successfully imported. On this page, you can create new versions, quickly deploy models, publish models to the market, and perform other operations. + +Follow-Up Procedure +------------------- + +- `Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__: On the **Models** page, click the triangle next to a model name to view all versions of the model. Locate the row that contains the target version, click **Deploy** in the **Operation** column, and select the deployment type configured when importing the model from the drop-down list. On the **Deploy** page, set parameters by referring to `Introduction to Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__. + + diff --git a/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_training_job.rst b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_training_job.rst new file mode 100644 index 0000000..d670c14 --- /dev/null +++ b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_a_training_job.rst @@ -0,0 +1,79 @@ +Importing a Meta Model from a Training Job +========================================== + +You can create a training job on ModelArts and perform training to obtain a satisfactory model. Then import the model to **Model Management** for unified management. In addition, you can quickly deploy the model as a service. + +Background +---------- + +- If a model generated by the ModelArts training job is used, ensure that the training job has been successfully executed and the model has been stored in the corresponding OBS directory. +- If a model is generated from a training job that uses built-in algorithms, the model can be directly imported to ModelArts without using the inference code and configuration file. +- If a model is generated from a training job that uses a frequently-used framework or custom image, upload the inference code and configuration file to the storage directory of the model by referring to `Model Package Specifications <../../model_package_specifications/model_package_specifications.html>`__. +- The OBS directory you use and ModelArts are in the same region. +- ModelArts of the Arm version does not support model import from training. + +Procedure +--------- + +#. Log in to the ModelArts management console, and choose **Model Management** > **Models** in the left navigation pane. The **Models** page is displayed. +#. Click **Import** in the upper left corner. The **Import** page is displayed. +#. On the **Import** page, set related parameters. + + a. Set basic information about the model. For details about the parameters, see `Table 1 <#modelarts230054enustopic0207629475table19428112584211>`__. + +.. _modelarts230054enustopic0207629475table19428112584211: + + .. table:: **Table 1** Parameters of basic model information + + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=============+=====================================================================================================================================================================================+ + | Name | Model name. The value can contain 1 to 64 visible characters, including Chinese characters. Only letters, Chinese characters, digits, hyphens (-), and underscores (_) are allowed. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Version | Version of the model to be created. For the first import, the default value is **0.0.1**. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label | Model label. A maximum of five model labels are supported. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of the model | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Select the meta model source and set related parameters. Set **Meta Model Source** based on your application scenario. For details, see `Methods of Importing a Model <../../model_management/introduction_to_model_management.html#methods-of-importing-a-model>`__. If **Meta Model Source** is set to **Training job**, set other parameters by referring to `Table 2 <#modelarts230054enustopic0207629475table104931647171713>`__. + +.. _modelarts230054enustopic0207629475table104931647171713: + + .. table:: **Table 2** Parameters of the meta model source + + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=========================+===============================================================================================================================================================================================================================================================================================+ + | Meta Model Source | Select **Training job**, and select a specified training job that has completed training under the current account and its version from the drop-down lists on the right of **Training Job** and **Version** respectively. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Deployment Type | After the model is imported, select the service type that the model is deployed. When deploying a service, you can only deploy the service type selected here. For example, if you only select **Real-time services** here, you can only deploy real-time services after importing the model. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Inference Code | Display the model inference code URL. You can copy this URL directly. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter Configuration | Click |image2| on the right to view the input and output parameters of the model. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Runtime Dependency | List the dependencies of the selected model on the environment. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Set the inference specifications and model description. + + - **Min. Inference Specs**: If your model requires certain resources to complete inference, you can configure this parameter to set the minimum specifications required for normal inference after the model is deployed as a service. In later versions, the system will allocate resources based on the inference specifications in service deployment. You can also modify the specifications as required during deployment. Note that the specifications configured here are valid only when real-time services are deployed and the dedicated resource pool is used. + - **Model Description**: To help other model developers better understand and use your models, provide model descriptions. Click **Add Model Description** and then set the document name and URL. A maximum of three model descriptions are supported. + + d. Check the information and click **Next**. The model is imported. + + In the model list, you can view the imported model and its version. When the model status changes to **Normal**, the model is successfully imported. On this page, you can create new versions, quickly deploy models, publish models to the market, and perform other operations. + +Follow-Up Procedure +------------------- + +- `Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__: On the **Models** page, click the triangle next to a model name to view all versions of the model. Locate the row that contains the target version, click **Deploy** in the **Operation** column, and select the deployment type configured when importing the model from the drop-down list. On the **Deploy** page, set parameters by referring to `Introduction to Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110761092.png + +.. |image2| image:: /_static/images/en-us_image_0000001110761092.png + diff --git a/umn/source/model_management/importing_a_model/importing_a_meta_model_from_obs.rst b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_obs.rst new file mode 100644 index 0000000..4b3b11e --- /dev/null +++ b/umn/source/model_management/importing_a_model/importing_a_meta_model_from_obs.rst @@ -0,0 +1,150 @@ +Importing a Meta Model from OBS +=============================== + +In scenarios where frequently-used frameworks are used for model development and training, you can import the model to ModelArts for unified management. + +Prerequisites +------------- + +- The model has been developed and trained, and the type and version of the AI engine it uses is supported by ModelArts. Common engines supported by ModelArts and their runtime ranges are described as follows: + +.. _modelarts230207enustopic0207629478table108792813184: + + .. table:: **Table 1** Supported AI engines and their runtime + + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Engine | Runtime | Precautions | + +=======================+=========================+============================================================================================================================================================================================================================================================================================+ + | TensorFlow | python3.6 | - TensorFlow 1.8.0 is used in **python2.7** and **python3.6**. | + | | | - **python3.6**, **python2.7**, and **tf2.1-python3.7** indicate that the model can run on both CPUs and GPUs. For other runtime values, if the suffix contains **cpu** or **gpu**, the model can run only on CPUs or GPUs. | + | | python2.7 | - The default runtime is **python2.7**. | + | | | | + | | tf1.13-python2.7-gpu | | + | | | | + | | tf1.13-python2.7-cpu | | + | | | | + | | tf1.13-python3.6-gpu | | + | | | | + | | tf1.13-python3.6-cpu | | + | | | | + | | tf1.13-python3.7-cpu | | + | | | | + | | tf1.13-python3.7-gpu | | + | | | | + | | tf2.1-python3.7 | | + | | | | + | | tf1.15-aarch64-c76-d910 | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | MXNet | python3.7 | - MXNet 1.2.1 is used in **python3.6** and **python3.7**. | + | | | - **python3.6** and **python3.7** indicate that the model can run on both CPUs and GPUs. | + | | python3.6 | - The default runtime is **python3.6**. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Caffe | python3.6 | - Caffe 1.0.0 is used in **python3.6**, **python3.7**, **python3.6-gpu**, **python3.7-gpu**, **python3.6-cpu**, and **python3.7-cpu**. | + | | | - **python 3.6** and **python3.7** can only be used to run models on CPUs. For other runtime values, if the suffix contains **cpu** or **gpu**, the model can run only on CPUs or GPUs. Use the runtime of **python3.6-gpu**, **python3.7-gpu**, **python3.6-cpu**, or **python3.7-cpu**. | + | | python3.7 | - The default runtime is **python3.6**. | + | | | | + | | python3.6-gpu | | + | | | | + | | python3.7-gpu | | + | | | | + | | python3.6-cpu | | + | | | | + | | python3.7-cpu | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Spark_MLlib | python3.6 | - Spark_MLlib 2.3.2 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Scikit_Learn | python3.6 | - Scikit_Learn 0.18.1 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | XGBoost | python3.6 | - XGBoost 0.80 is used in **python3.6**. | + | | | - **python 3.6** can only be used to run models on CPUs. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | PyTorch | python3.6 | - PyTorch 1.0 is used in **python3.6** and **python3.7**. | + | | | - **python3.6**, **python3.7**, and **pytorch1.4-python3.7** indicate that the model can run on both CPUs and GPUs. | + | | python3.7 | - The default runtime is **python3.6**. | + | | | | + | | pytorch1.4-python3.7 | | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | MindSpore | ms1.1-python3.7-c76 | MindSpore 1.1.1 is used. | + +-----------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +- The imported model, inference code, and configuration file must comply with the requirements of ModelArts. For details, see `Model Package Specifications <../../model_package_specifications/model_package_specifications.html>`__, `Specifications for Compiling the Model Configuration File <../../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html>`__, and `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The model package that has completed training, inference code, and configuration file have been uploaded to the OBS directory. + +- The OBS directory you use and ModelArts are in the same region. + +- ModelArts of the Arm version does not support model import from OBS. + +Procedure +--------- + +#. Log in to the ModelArts management console, and choose **Model Management** > **Models** in the left navigation pane. The **Models** page is displayed. +#. Click **Import** in the upper left corner. The **Import** page is displayed. +#. On the **Import** page, set related parameters. + + a. Set basic information about the model. For details about the parameters, see `Table 2 <#modelarts230207enustopic0207629478table19428112584211>`__. + +.. _modelarts230207enustopic0207629478table19428112584211: + + .. table:: **Table 2** Parameters of basic model information + + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=============+=====================================================================================================================================================================================+ + | Name | Model name. The value can contain 1 to 64 visible characters, including Chinese characters. Only letters, Chinese characters, digits, hyphens (-), and underscores (_) are allowed. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Version | Version of the model to be created. For the first import, the default value is **0.0.1**. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Label | Model label. A maximum of five model labels are supported. | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of the model | + +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + b. Select the meta model source and set related parameters. **Meta Model Source** has four options based on the scenario. For details, see `Methods of Importing a Model <../../model_management/introduction_to_model_management.html#methods-of-importing-a-model>`__. Set **Meta Model Source** to **OBS**. For details about the parameters, see `Table 3 <#modelarts230207enustopic0207629478table1631162916535>`__. + + For the meta model imported from OBS, you need to compile the inference code and configuration file by referring to `Model Package Specifications <../../model_package_specifications/model_package_specifications.html>`__ and place the inference code and configuration files in the **model** folder storing the meta model. If the selected directory does not contain the corresponding inference code and configuration files, the model cannot be imported. + + + +.. _modelarts230207enustopic0207629478table1631162916535: + + .. table:: **Table 3** Parameters of the meta model source + + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=========================+===============================================================================================================================================================================================================================================================================================+ + | Meta Model | Select the model storage path. This path is the training output path specified in the training job. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | AI Engine | The corresponding AI engine is automatically associated based on the selected meta model storage path. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Deployment Type | After the model is imported, select the service type that the model is deployed. When deploying a service, you can only deploy the service type selected here. For example, if you only select **Real-time services** here, you can only deploy real-time services after importing the model. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Configuration File | By default, the system associates the configuration file stored in OBS. Enable the function to view, edit, or import the model configuration file from OBS. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter Configuration | Click |image2| on the right to view the input and output parameters of the model. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Runtime Dependency | List the dependencies of the selected model on the environment. | + +-------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Set the inference specifications and model description. + + - **Min. Inference Specs**: If your model requires certain resources to complete inference, you can configure this parameter to set the minimum specifications required for normal inference after the model is deployed as a service. In later versions, the system will allocate resources based on the inference specifications in service deployment. You can also modify the specifications as required during deployment. Note that the specifications configured here are valid only when real-time services are deployed and the dedicated resource pool is used. + - **Model Description**: To help other model developers better understand and use your models, provide model descriptions. Click **Add Model Description** and then set the document name and URL. A maximum of three model descriptions are supported. + + d. Check the information and click **Next**. The model is imported. + + In the model list, you can view the imported model and its version. When the model status changes to **Normal**, the model is successfully imported. On this page, you can create new versions, quickly deploy models, publish models to the market, and perform other operations. + +Follow-Up Procedure +------------------- + +- `Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__: On the **Models** page, click the triangle next to a model name to view all versions of the model. Locate the row that contains the target version, click **Deploy** in the **Operation** column, and select the deployment type configured when importing the model from the drop-down list. On the **Deploy** page, set parameters by referring to `Introduction to Model Deployment <../../model_deployment/introduction_to_model_deployment.html>`__. + + + +.. |image1| image:: /_static/images/en-us_image_0000001156920973.png + +.. |image2| image:: /_static/images/en-us_image_0000001156920973.png + diff --git a/umn/source/model_management/importing_a_model/index.rst b/umn/source/model_management/importing_a_model/index.rst new file mode 100644 index 0000000..9e05372 --- /dev/null +++ b/umn/source/model_management/importing_a_model/index.rst @@ -0,0 +1,11 @@ +================= +Importing a Model +================= + +.. toctree:: + :maxdepth: 1 + + importing_a_meta_model_from_a_training_job + importing_a_meta_model_from_a_template + importing_a_meta_model_from_a_container_image + importing_a_meta_model_from_obs diff --git a/umn/source/model_management/index.rst b/umn/source/model_management/index.rst new file mode 100644 index 0000000..6b441dc --- /dev/null +++ b/umn/source/model_management/index.rst @@ -0,0 +1,11 @@ +================ +Model Management +================ + +.. toctree:: + :maxdepth: 1 + + introduction_to_model_management + importing_a_model/index + managing_model_versions + model_compression_and_conversion/index diff --git a/umn/source/model_management/introduction_to_model_management.rst b/umn/source/model_management/introduction_to_model_management.rst new file mode 100644 index 0000000..cdf9024 --- /dev/null +++ b/umn/source/model_management/introduction_to_model_management.rst @@ -0,0 +1,44 @@ +Introduction to Model Management +================================ + +AI model development and optimization require frequent iterations and debugging. Changes in datasets, training code, or parameters may affect the quality of models. If the metadata of the development process cannot be managed in a unified manner, the optimal model may fail to be reproduced. + +ModelArts model management allows you to import models generated with all training versions to manage all iterated and debugged models in a unified manner. + +Usage Restrictions +------------------ + +- In an automatic learning project, after a model is deployed, the model is automatically uploaded to the model management list. However, models generated by automatic learning cannot be downloaded and can be used only for deployment and rollout. + +Methods of Importing a Model +---------------------------- + +- `Importing from Trained Models <../model_management/importing_a_model/importing_a_meta_model_from_a_training_job.html>`__: You can create a training job on ModelArts and complete model training. After obtaining a satisfactory model, import the model to the **Model Management** page for model deployment. +- `Importing from a Template <../model_management/importing_a_model/importing_a_meta_model_from_a_template.html>`__: Because the configurations of models with the same functions are similar, ModelArts integrates the configurations of such models into a common template. By using this template, you can easily and quickly import models without compiling the **config.json** configuration file. +- `Importing from a Container Image <../model_management/importing_a_model/importing_a_meta_model_from_a_container_image.html>`__: For AI engines that are not supported by ModelArts, you can import the model you compile to ModelArts using custom images. +- `Importing from OBS <../model_management/importing_a_model/importing_a_meta_model_from_obs.html>`__: If you use a frequently-used framework to develop and train a model locally, you can import the model to ModelArts for model deployment. + +Model Management Functions +-------------------------- + + + +.. _modelarts230052enustopic0171858287table129381852171817: + +.. table:: **Table 1** Model management functions + + +-------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Supported Function | Description | + +=====================================================================================================================================+=============================================================================================================================================================================================================================================================+ + | `Importing a Model <../model_management/index.html>`__ | Import the trained models to ModelArts for unified management. You can import models using four methods. The following provides the operation guide for each method. | + | | | + | | - `Importing a Meta Model from a Training Job <../model_management/importing_a_model/importing_a_meta_model_from_a_training_job.html>`__ | + | | - `Importing a Meta Model from a Template <../model_management/importing_a_model/importing_a_meta_model_from_a_template.html>`__ | + | | - `Importing a Meta Model from a Container Image <../model_management/importing_a_model/importing_a_meta_model_from_a_container_image.html>`__ | + +-------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Managing Model Versions <../model_management/managing_model_versions.html>`__ | To facilitate source tracing and repeated model tuning, ModelArts provides the model version management function. You can manage models based on versions. | + +-------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | `Compressing and Converting Models <../model_management/model_compression_and_conversion/compressing_and_converting_models.html>`__ | To obtain higher and more economical computing power, you can deploy the models created on ModelArts or a local PC on the Ascend chip, Arm, or GPU. In this case, you need to compress or convert the models to the required formats before deploying them. | + +-------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/model_management/managing_model_versions.rst b/umn/source/model_management/managing_model_versions.rst new file mode 100644 index 0000000..84fbc63 --- /dev/null +++ b/umn/source/model_management/managing_model_versions.rst @@ -0,0 +1,25 @@ +Managing Model Versions +======================= + +To facilitate source tracing and repeated model tuning, ModelArts provides the model version management function. You can manage models based on versions. + +Prerequisites +------------- + +You have imported a model to ModelArts, + +Creating a New Version +---------------------- + +On the **Model Management > Models** page, click **Create New Version** in the **Operation** column. The **Create New Version** page is displayed. Set related parameters by following the instructions in **Importing Models** and click **Next**. + +Deleting a Version +------------------ + +On the **Model Management > Models** page, click the triangle on the left of the model name to expand a model version list. In the model version list, click **Delete** in the **Operation** column to delete the corresponding version. + +.. note:: + + A deleted version cannot be recovered. Exercise caution when performing this operation. + + diff --git a/umn/source/model_management/model_compression_and_conversion/compressing_and_converting_models.rst b/umn/source/model_management/model_compression_and_conversion/compressing_and_converting_models.rst new file mode 100644 index 0000000..2f8cd8a --- /dev/null +++ b/umn/source/model_management/model_compression_and_conversion/compressing_and_converting_models.rst @@ -0,0 +1,46 @@ +Compressing and Converting Models +================================= + +To obtain higher computing power, you can deploy the models created on ModelArts or a local PC on the Ascend chip, Arm, or GPU. In this case, you need to compress or convert the models to the required formats before deploying them. + +ModelArts supports model conversion, allowing you to convert a model to a required format before deploying the model on a chip with higher computing power and performance. + +Model conversion applies to the following scenarios: + +- If you use the TensorFlow framework (in **frozen_graph** or **saved_model** format) to train a model, you can convert the model to the **.om** format. The converted model can be deployed and run on Ascend chips. + +Constraints +----------- + +- Only Ascend chips are supported for model conversion. +- Only Caffe and TensorFlow models can be converted. For a TensorFlow model, the input data type is of the INT32, BOOL, UINT8, or FLOAT type. +- ModelArts provides conversion templates for you to choose. For details about the supported templates, see `Conversion Templates <../../model_management/model_compression_and_conversion/conversion_templates.html>`__. +- The **.tflite** and TensorRT formats support fewer operators and quantization operators. Therefore, some models may fail to be converted. If the conversion fails, view the log dialog box or check error logs in the conversion output directory. +- An OBS directory must be specified in compression/conversion tasks. Ensure that the OBS directory you use and ModelArts are in the same region. +- When importing the converted model to ModelArts, you need to use the `model template <../../model_management/importing_a_model/importing_a_meta_model_from_a_template.html>`__. +- For a TensorFlow model, the FrozenGraphDef and SavedModel formats are supported. If a model is in the SavedModel format, the model is converted to the FrozenGraphDef format and then to the OM format. +- Inputs with dynamic shapes are not supported, for example, NHWC = [?,?,?,3]. A fixed value needs to be specified during model conversion. +- The input can be up to 4-dimensional. Operators involving dimension changes (such as reshape and expanddim) cannot output five dimensions. +- Except the const operator, the input and output at all layers in a model must meet the condition **dim!=0**. +- Model conversion does not support models that contain training operators. +- A UINT8 quantized model cannot be converted. +- Model operators support only 2D convolution but do not support 3D convolution. The batch_normalization_1 and FusedBatchNorm operators cannot be converted in batches. + +Deleting a Model Compression/Conversion Task +-------------------------------------------- + +You can delete unnecessary conversion tasks. However, tasks in the **Running** or **Initializing** status cannot be deleted. + +.. note:: + + Deleted tasks cannot be recovered. Exercise caution when performing this operation. + +- Deleting a single task: + + On the **Compression/Conversion** page, click **Delete** in the **Operation** column of the target task. + +- Deleting a batch of tasks: + + On the **Compression/Conversion** page, select multiple tasks to be deleted and click **Delete** in the upper left corner. + + diff --git a/umn/source/model_management/model_compression_and_conversion/conversion_templates.rst b/umn/source/model_management/model_compression_and_conversion/conversion_templates.rst new file mode 100644 index 0000000..1101d6d --- /dev/null +++ b/umn/source/model_management/model_compression_and_conversion/conversion_templates.rst @@ -0,0 +1,62 @@ +Conversion Templates +==================== + +ModelArts provides the following conversion templates based on different AI frameworks: + +- `TF-FrozenGraph-To-Ascend-C32 <#tf-frozengraph-to-ascend-c32>`__ + +TF-FrozenGraph-To-Ascend-C32 +---------------------------- + +Convert the model trained by the TensorFlow framework and saved in **frozen_graph** format. The converted model can run on the Ascend. The custom operators (TBE operators) developed based on Tensor Based Engine (TBE) can be used for conversion. + + + +.. _modelarts230110enustopic0177612243table397415449135: + +.. table:: **Table 1** Advanced settings of the custom operator conversion template + + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +================+==========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | input_shape | Enter the shape of the input data of the model, for example, **input_name1:n1,c1,h1,w1;input_name2:n2,c2,h2,w2**. **input_name** must be the node name in the network model before model conversion. This parameter is mandatory when the model has dynamic shape input. For example, in **input_name1:? ,h,w,c**, the question mark (?) indicates the batch size, that is, the number of images processed at a time. It is used to convert the original model with a dynamic shape into an offline model with a fixed shape. The batch feature is not supported. The batch value of the **input_shape** can only be **1**. During the conversion, the system parses the input model to obtain the input tensor and prints it in the log. If you do not know the input tensor of the used model, refer to the parsing result in the log. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | input_format | **NCHW** and **NHWC** are supported. The default format is **NHWC**. For the TensorFlow framework, the default value is **NHWC**. To use the NCHW format, you need to specify **NCHW**. For the Caffe framework, only the NCHW format is supported. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | out_nodes | Specifies the output node, for example, **node_name1:0;node_name1:1;node_name2:0**. **node_name** must be the node name in the network model before model conversion. The digit after each colon (:) indicates the sequence number of the output. For example, **node_name1:0** indicates the 0th output of **node_name1**. If the output node is not specified, the output of the last operator layer serves as the model output by default. To check the parameters of a specific operator layer, specify the operator layer by using this parameter. During the conversion, the system parses the input model to obtain the output node and prints it in the log. If you do not know the input tensor of the used model, refer to the parsing result in the log. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | net_format | Specifies the preferred data format for network operators. Possible values are **ND** (N cannot be more than 4) and **5D**. This parameter only takes effect if the input data of operators on the network supports both **ND** and **5D** formats. **ND** indicates that operators in the model are converted into the NCHW format. **5D** indicates that operators in the model are converted into the 5D format. **5D** is the default value. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | fp16_high_prec | Specifies whether to generate a high-precision **FP16 Davinci** model. **0** is the default value, indicating that a common FP16 Da Vinci model with better inference performance is generated. The value **1** indicates that a high-precision FP16 Da Vinci model with better inference precision is generated. High-precision models support only Caffe operators (Convolution, Pooling, and FullConnection) and TensorFlow operators (tf.nn.conv2d and tf.nn.max_poo). | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | output_type | **FP32** is the default value and is recommended for classification and detection networks. For image super-resolution networks, UINT8 is recommended for better inference performance. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +TF-SavedModel-To-Ascend-C32 +--------------------------- + +Convert the model trained by the TensorFlow framework and saved in **saved_model** format. The converted model can run on the Ascend. The custom operators (TE operators) developed based on TE can be used for conversion. + + + +.. _modelarts230110enustopic0177612243table17573123151414: + +.. table:: **Table 2** Advanced settings of the custom operator conversion template + + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +================+==========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | input_shape | Enter the shape of the input data of the model, for example, **input_name1:n1,c1,h1,w1;input_name2:n2,c2,h2,w2**. **input_name** must be the node name in the network model before model conversion. This parameter is mandatory when the model has dynamic shape input. For example, in **input_name1:? ,h,w,c**, the question mark (?) indicates the batch size, that is, the number of images processed at a time. It is used to convert the original model with a dynamic shape into an offline model with a fixed shape. The batch feature is not supported. The batch value of the **input_shape** can only be **1**. During the conversion, the system parses the input model to obtain the input tensor and prints it in the log. If you do not know the input tensor of the used model, refer to the parsing result in the log. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | input_format | **NCHW** and **NHWC** are supported. The default format is **NHWC**. For the TensorFlow framework, the default value is **NHWC**. To use the NCHW format, you need to specify **NCHW**. For the Caffe framework, only the NCHW format is supported. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | out_nodes | Specifies the output node, for example, **node_name1:0;node_name1:1;node_name2:0**. **node_name** must be the node name in the network model before model conversion. The digit after each colon (:) indicates the sequence number of the output. For example, **node_name1:0** indicates the 0th output of **node_name1**. If the output node is not specified, the output of the last operator layer serves as the model output by default. To check the parameters of a specific operator layer, specify the operator layer by using this parameter. During the conversion, the system parses the input model to obtain the output node and prints it in the log. If you do not know the input tensor of the used model, refer to the parsing result in the log. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | net_format | Specifies the preferred data format for network operators. Possible values are **ND** (N cannot be more than 4) and **5D**. This parameter only takes effect if the input data of operators on the network supports both **ND** and **5D** formats. **ND** indicates that operators in the model are converted into the NCHW format. **5D** indicates that operators in the model are converted into the 5D format. **5D** is the default value. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | fp16_high_prec | Specifies whether to generate a high-precision **FP16 Davinci** model. **0** is the default value, indicating that a common FP16 Da Vinci model with better inference performance is generated. The value **1** indicates that a high-precision FP16 Da Vinci model with better inference precision is generated. High-precision models support only Caffe operators (Convolution, Pooling, and FullConnection) and TensorFlow operators (tf.nn.conv2d and tf.nn.max_poo). | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | output_type | **FP32** is the default value and is recommended for classification and detection networks. For image super-resolution networks, UINT8 is recommended for better inference performance. | + +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/model_management/model_compression_and_conversion/index.rst b/umn/source/model_management/model_compression_and_conversion/index.rst new file mode 100644 index 0000000..6577862 --- /dev/null +++ b/umn/source/model_management/model_compression_and_conversion/index.rst @@ -0,0 +1,11 @@ +================================ +Model Compression and Conversion +================================ + +.. toctree:: + :maxdepth: 1 + + compressing_and_converting_models + model_input_path_specifications + model_output_path_description + conversion_templates diff --git a/umn/source/model_management/model_compression_and_conversion/model_input_path_specifications.rst b/umn/source/model_management/model_compression_and_conversion/model_input_path_specifications.rst new file mode 100644 index 0000000..39884db --- /dev/null +++ b/umn/source/model_management/model_compression_and_conversion/model_input_path_specifications.rst @@ -0,0 +1,32 @@ +Model Input Path Specifications +=============================== + +Ascend Chip +----------- + +The requirements for converting the models run on the Ascend chip are as follows: + +- For TensorFlow-based models (in **frozen_graph** or **saved_model** format), the input path must comply with the following specifications during model conversion: + + **frozen_graph** format + + .. code-block:: + + | + |---xxxx.pb (Mandatory) Model network file. Only one model network file can exist in the input path. The model must be in frozen_graph or saved_model format. + |---insert_op_conf.cfg (Optional) Insertion operator configuration file. Only one insertion operator configuration file can exist in the input path. + |---plugin (Optional) Custom operator directory. The input directory can contain only one plugin folder. Only custom operators developed based on Tensor Engine (TE) are supported. + + **saved_model** format + + .. code-block:: + + | + |---saved_model.pb (Mandatory) Model network file. Only one model network file can exist in the input path. The model must be in frozen_graph or saved_model format. + |---variables (Mandatory) Fixed subdirectory name, including the model weight deviation. + |---variables.index Mandatory + |---variables.data-00000-of-00001 Mandatory + |---insert_op_conf.cfg (Optional) Insertion operator configuration file. Only one insertion operator configuration file can exist in the input path. + |---plugin (Optional) Custom operator directory. The input directory can contain only one plugin folder. Only custom operators developed based on Tensor Engine (TE) are supported. + + diff --git a/umn/source/model_management/model_compression_and_conversion/model_output_path_description.rst b/umn/source/model_management/model_compression_and_conversion/model_output_path_description.rst new file mode 100644 index 0000000..261cc23 --- /dev/null +++ b/umn/source/model_management/model_compression_and_conversion/model_output_path_description.rst @@ -0,0 +1,17 @@ +Model Output Path Description +============================= + +Ascend Chip +----------- + +The following describes the output path of the model run on the Ascend chip after conversion: + +- For TensorFlow-based models, the output path must comply with the following specifications during model conversion: + + .. code-block:: + + | + |---xxxx.om Converted model to run on the Ascend chip. The model file name extension is .om. + |---job_log.txt Conversion log file + + diff --git a/umn/source/model_package_specifications/index.rst b/umn/source/model_package_specifications/index.rst new file mode 100644 index 0000000..f3f0cd8 --- /dev/null +++ b/umn/source/model_package_specifications/index.rst @@ -0,0 +1,10 @@ +============================ +Model Package Specifications +============================ + +.. toctree:: + :maxdepth: 1 + + model_package_specifications + specifications_for_compiling_the_model_configuration_file + specifications_for_compiling_model_inference_code diff --git a/umn/source/model_package_specifications/model_package_specifications.rst b/umn/source/model_package_specifications/model_package_specifications.rst new file mode 100644 index 0000000..1758e49 --- /dev/null +++ b/umn/source/model_package_specifications/model_package_specifications.rst @@ -0,0 +1,146 @@ +Model Package Specifications +============================ + +When you import models in **Model Management**, if the meta model is imported from OBS or a container image, the model package must meet the following specifications: + +- The model package must contain the **model** directory. The **model** directory stores the model file, model configuration file, and model inference code. +- The model configuration file must exist and its name is fixed to **config.json**. There exists only one model configuration file. For details about how to compile the model configuration file, see `Specifications for Compiling the Model Configuration File <../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html>`__. +- The model inference code file is optional. If this file is required, the file name is fixed to **customize_service.py**. There must be one and only one such file. For details about how to compile the model inference code, see `Specifications for Compiling Model Inference Code <../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + + .. note:: + + - The **.py** file on which **customize_service.py** depends can be directly stored in the **model** directory. Use the Python import mode to import the custom package. + - The other files on which **customize_service.py** depends can be stored in the **model** directory. You must use absolute paths to access these files. For more details, see `Obtaining an Absolute Path <../model_package_specifications/specifications_for_compiling_model_inference_code.html#modelarts230093enustopic0172466150li135956421288>`__. + +ModelArts also provides custom script examples of common AI engines. For details, see `Examples of Custom Scripts <../examples_of_custom_scripts/tensorflow.html>`__. + +Model Package Example +--------------------- + +- Structure of the TensorFlow-based model package + + When publishing the model, you only need to specify the **ocr** directory. + + .. code-block:: + + OBS bucket/directory name + |── ocr + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | │ ├── saved_model.pb (Mandatory) Protocol buffer file, which contains the diagram description of the model + | │ ├── variables Name of a fixed sub-directory, which contains the weight and deviation rate of the model. It is mandatory for the main file of the *.pb model. + | │ │ ├── variables.index Mandatory + | │ │ ├── variables.data-00000-of-00001 Mandatory + | │ ├──config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | │ ├──customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the MindSpore-based model package + + .. code-block:: + + OBS bucket/directory name + |── resnet + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | │ ├── checkpoint_lenet_1-1_1875.ckpt (Mandatory) Model file in ckpt format trained using MindSpore + | │ ├──config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | │ ├──customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file is supported. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the MXNet-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | │ ├── resnet-50-symbol.json (Mandatory) Model definition file, which contains the neural network description of the model + | │ ├── resnet-50-0000.params (Mandatory) Model variable parameter file, which contains parameter and weight information + | │ ├──config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | │ ├──customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the Image-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├──config.json (Mandatory) Model configuration file (the address of the SWR image must be configured). The file name is fixed to config.json. Only one model configuration file is supported. + +- Structure of the PySpark-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | │ ├── spark_model (Mandatory) Model directory, which contains the model content saved by PySpark + | │ ├──config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | │ ├──customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the PyTorch-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | ├── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | │ ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | │ ├── resnet50.pth (Mandatory) PyTorch model file, which contains variable and weight information and is saved as state_dict + | │ ├──config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | │ ├──customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the Caffe-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | |── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | | |── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | | |── deploy.prototxt (Mandatory) Caffe model file, which contains information such as the model network structure + | | |── resnet.caffemodel (Mandatory) Caffe model file, which contains variable and weight information + | | |── config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | | |── customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the XGBoost-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | |── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | | |── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | | |── *.m (Mandatory): Model file whose extension name is .m + | | |── config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | | |── customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + +- Structure of the Scikit_Learn-based model package + + When publishing the model, you only need to specify the **resnet** directory. + + .. code-block:: + + OBS bucket/directory name + |── resnet + | |── model (Mandatory) Name of a fixed subdirectory, which is used to store model-related files + | | |── <> (Optional) User's Python package, which can be directly referenced in the model inference code + | | |── *.m (Mandatory): Model file whose extension name is .m + | | |── config.json (Mandatory) Model configuration file. The file name is fixed to config.json. Only one model configuration file is supported. + | | |── customize_service.py (Optional) Model inference code. The file name is fixed to customize_service.py. Only one model inference code file exists. The files on which customize_service.py depends can be directly stored in the model directory. + + diff --git a/umn/source/model_package_specifications/specifications_for_compiling_model_inference_code.rst b/umn/source/model_package_specifications/specifications_for_compiling_model_inference_code.rst new file mode 100644 index 0000000..05dcd61 --- /dev/null +++ b/umn/source/model_package_specifications/specifications_for_compiling_model_inference_code.rst @@ -0,0 +1,445 @@ +Specifications for Compiling Model Inference Code +================================================= + +This section describes how to compile model inference code in ModelArts. The following also provides an example of inference code for the TensorFlow engine and an example of customizing inference logic in an inference script. + +Specifications for Compiling Inference Code +------------------------------------------- + +#. All custom Python code must be inherited from the BaseService class. `Table 1 <#modelarts230093enustopic0172466150table55021545175412>`__ lists the import statements of different types of model parent classes. + +.. _modelarts230093enustopic0172466150table55021545175412: + + .. table:: **Table 1** Import statements of the **BaseService** class + + +--------------+-------------------------+------------------------------------------------------------------------+ + | Model Type | Parent Class | Import Statement | + +==============+=========================+========================================================================+ + | TensorFlow | TfServingBaseService | from model_service.tfserving_model_service import TfServingBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | MXNet | MXNetBaseService | from mms.model_service.mxnet_model_service import MXNetBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | PyTorch | PTServingBaseService | from model_service.pytorch_model_service import PTServingBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | Pyspark | SparkServingBaseService | from model_service.spark_model_service import SparkServingBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | Caffe | CaffeBaseService | from model_service.caffe_model_service import CaffeBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | XGBoost | XgSklServingBaseService | from model_service.python_model_service import XgSklServingBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | Scikit_Learn | XgSklServingBaseService | from model_service.python_model_service import XgSklServingBaseService | + +--------------+-------------------------+------------------------------------------------------------------------+ + | MindSpore | SingleNodeService | from model_service.model_service import SingleNodeService | + +--------------+-------------------------+------------------------------------------------------------------------+ + +#. The following methods can be rewritten: + +.. _modelarts230093enustopic0172466150table119897712529: + + .. table:: **Table 2** Methods to be rewritten + + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Method | Description | + +=========================================+==========================================================================================================================================================================================================================================================+ + | \__init__(self, model_name, model_path) | Initialization method, which is suitable for models created based on deep learning frameworks. Models and labels are loaded using this method. This method must be rewritten for models based on PyTorch and Caffe to implement the model loading logic. | + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \__init__(self, model_path) | Initialization method, which is suitable for models created based on machine learning frameworks. The model path (**self.model_path**) is initialized using this method. In Spark_MLlib, this method also initializes SparkSession (**self.spark**). | + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \_preprocess(self, data) | Preprocess method, which is called before an inference request and is used to convert the original request data of an API into the expected input data of a model | + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \_inference(self, data) | Inference request method. You are not advised to rewrite the method because once the method is rewritten, the built-in inference process of ModelArts will be overwritten and the custom inference logic will run. | + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | \_postprocess(self, data) | Postprocess method, which is called after an inference request is complete and is used to convert the model output to the API output | + +-----------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + .. note:: + + - You can choose to rewrite the preprocess and postprocess methods to implement preprocessing of the API input and postprocessing of the inference output. + - Rewriting the init method of the BaseService inheritance class may cause a model to run abnormally. + +#. The attribute that can be used is the local path where the model resides. The attribute name is **self.model_path**. In addition, PySpark-based models can use **self.spark** to obtain the SparkSession object in **customize_service.py**. + + .. note:: + + An absolute path is required for reading files in the inference code. You can obtain the absolute path of the model from the **self.model_path** attribute. + + - When TensorFlow, Caffe, or MXNet is used, **self.model_path** indicates the path of the model file. See the following example: + + .. code-block:: + + # Store the label.json file in the model directory. The following information is read: + with open(os.path.join(self.model_path, 'label.json')) as f: + self.label = json.load(f) + + - When PyTorch, Scikit_Learn, or PySpark is used, **self.model_path** indicates the path of the model file. See the following example: + + .. code-block:: + + # Store the label.json file in the model directory. The following information is read: + dir_path = os.path.dirname(os.path.realpath(self.model_path)) + with open(os.path.join(dir_path, 'label.json')) as f: + self.label = json.load(f) + +#. Two types of **content-type** APIs can be used for inputting data: **multipart/form-data** and **application/json** + + - **multipart/form-data** request + + .. code-block:: + + curl -X POST \ + \ + -F image1=@cat.jpg \ + -F images2=@horse.jpg + + The corresponding input data is as follows: + + .. code-block:: + + [ + { + "image1":{ + "cat.jpg":"" + } + }, + { + "image2":{ + "horse.jpg":"" + } + } + ] + + - **application/json** request + + .. code-block:: + + curl -X POST \ + \ + -d '{ + "images":"base64 encode image" + }' + + The corresponding input data is **python dict**. + + .. code-block:: + + { + "images":"base64 encode image" + + } + +TensorFlow Inference Script Example +----------------------------------- + +The following is an example of TensorFlow MnistService. + +- Inference code + + +-----------------------------------+-------------------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | from PIL import Image | + | 2 | import numpy as np | + | 3 | from model_service.tfserving_model_service import TfServingBaseService | + | 4 | | + | 5 | class mnist_service(TfServingBaseService): | + | 6 | | + | 7 | def _preprocess(self, data): | + | 8 | preprocessed_data = {} | + | 9 | | + | 10 | for k, v in data.items(): | + | 11 | for file_name, file_content in v.items(): | + | 12 | image1 = Image.open(file_content) | + | 13 | image1 = np.array(image1, dtype=np.float32) | + | 14 | image1.resize((1, 784)) | + | 15 | preprocessed_data[k] = image1 | + | 16 | | + | 17 | return preprocessed_data | + | 18 | | + | 19 | def _postprocess(self, data): | + | 20 | | + | 21 | infer_output = {} | + | 22 | | + | 23 | for output_name, result in data.items(): | + | 24 | | + | 25 | infer_output["mnist_result"] = result[0].index(max(result[0])) | + | 26 | | + | 27 | return infer_output | + +-----------------------------------+-------------------------------------------------------------------------------+ + +- Request + + .. code-block:: + + curl -X POST \ Real-time service address \ -F images=@test.jpg + +- Response + + .. code-block:: + + {"mnist_result": 7} + +The preceding code example resizes images imported to the user's form to adapt to the model input shape. The **32×32** image is read from the Pillow library and resized to **1×784** to match the model input. In subsequent processing, convert the model output into a list for the RESTful API to display. + +XGBoost Inference Script Example +-------------------------------- + +.. code-block:: + + # coding:utf-8 + import collections + import json + import xgboost as xgb + from model_service.python_model_service import XgSklServingBaseService + + + class user_Service(XgSklServingBaseService): + + # request data preprocess + def _preprocess(self, data): + list_data = [] + json_data = json.loads(data, object_pairs_hook=collections.OrderedDict) + for element in json_data["data"]["req_data"]: + array = [] + for each in element: + array.append(element[each]) + list_data.append(array) + return list_data + + # predict + def _inference(self, data): + xg_model = xgb.Booster(model_file=self.model_path) + pre_data = xgb.DMatrix(data) + pre_result = xg_model.predict(pre_data) + pre_result = pre_result.tolist() + return pre_result + + # predict result process + def _postprocess(self, data): + resp_data = [] + for element in data: + resp_data.append({"predict_result": element}) + return resp_data + +Inference Script Example of the Custom Inference Logic +------------------------------------------------------ + +First, define a dependency package in the configuration file. For details, see `Example of a Model Configuration File Using a Custom Dependency Package <../model_package_specifications/specifications_for_compiling_the_model_configuration_file.html#example-of-a-model-configuration-file-using-a-custom-dependency-package>`__. Then, use the following code example to implement the loading and inference of the model in **saved_model** format. + ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | # -*- coding: utf-8 -*- | +| 2 | import json | +| 3 | import os | +| 4 | import threading | +| 5 | | +| 6 | import numpy as np | +| 7 | import tensorflow as tf | +| 8 | from PIL import Image | +| 9 | | +| 10 | from model_service.tfserving_model_service import TfServingBaseService | +| 11 | import logging | +| 12 | | +| 13 | logger = logging.getLogger(__name__) | +| 14 | | +| 15 | | +| 16 | class MnistService(TfServingBaseService): | +| 17 | | +| 18 | def __init__(self, model_name, model_path): | +| 19 | self.model_name = model_name | +| 20 | self.model_path = model_path | +| 21 | self.model_inputs = {} | +| 22 | self.model_outputs = {} | +| 23 | | +| 24 | # The label file can be loaded here and used in the post-processing function. | +| 25 | # Directories for storing the label.txt file on OBS and in the model package | +| 26 | | +| 27 | # with open(os.path.join(self.model_path, 'label.txt')) as f: | +| 28 | # self.label = json.load(f) | +| 29 | | +| 30 | # Load the model in saved_model format in non-blocking mode to prevent blocking timeout. | +| 31 | thread = threading.Thread(target=self.get_tf_sess) | +| 32 | thread.start() | +| 33 | | +| 34 | def get_tf_sess(self): | +| 35 | # Load the model in saved_model format. | +| 36 | | +| 37 | # The session will be reused. Do not use the with statement. | +| 38 | sess = tf.Session(graph=tf.Graph()) | +| 39 | meta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], self.model_path) | +| 40 | signature_defs = meta_graph_def.signature_def | +| 41 | | +| 42 | self.sess = sess | +| 43 | | +| 44 | signature = [] | +| 45 | | +| 46 | # only one signature allowed | +| 47 | for signature_def in signature_defs: | +| 48 | signature.append(signature_def) | +| 49 | if len(signature) == 1: | +| 50 | model_signature = signature[0] | +| 51 | else: | +| 52 | logger.warning("signatures more than one, use serving_default signature") | +| 53 | model_signature = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY | +| 54 | | +| 55 | logger.info("model signature: %s", model_signature) | +| 56 | | +| 57 | for signature_name in meta_graph_def.signature_def[model_signature].inputs: | +| 58 | tensorinfo = meta_graph_def.signature_def[model_signature].inputs[signature_name] | +| 59 | name = tensorinfo.name | +| 60 | op = self.sess.graph.get_tensor_by_name(name) | +| 61 | self.model_inputs[signature_name] = op | +| 62 | | +| 63 | logger.info("model inputs: %s", self.model_inputs) | +| 64 | | +| 65 | for signature_name in meta_graph_def.signature_def[model_signature].outputs: | +| 66 | tensorinfo = meta_graph_def.signature_def[model_signature].outputs[signature_name] | +| 67 | name = tensorinfo.name | +| 68 | op = self.sess.graph.get_tensor_by_name(name) | +| 69 | | +| 70 | self.model_outputs[signature_name] = op | +| 71 | | +| 72 | logger.info("model outputs: %s", self.model_outputs) | +| 73 | | +| 74 | def _preprocess(self, data): | +| 75 | # Two request modes using HTTPS | +| 76 | # 1. The request in form-data file format is as follows: data = {"Request key value":{"File name":}} | +| 77 | # 2. Request in JSON format is as follows: data = json.loads("JSON body transferred by the API") | +| 78 | preprocessed_data = {} | +| 79 | | +| 80 | for k, v in data.items(): | +| 81 | for file_name, file_content in v.items(): | +| 82 | image1 = Image.open(file_content) | +| 83 | image1 = np.array(image1, dtype=np.float32) | +| 84 | image1.resize((1, 28, 28)) | +| 85 | preprocessed_data[k] = image1 | +| 86 | | +| 87 | return preprocessed_data | +| 88 | | +| 89 | def _inference(self, data): | +| 90 | | +| 91 | feed_dict = {} | +| 92 | for k, v in data.items(): | +| 93 | if k not in self.model_inputs.keys(): | +| 94 | logger.error("input key %s is not in model inputs %s", k, list(self.model_inputs.keys())) | +| 95 | raise Exception("input key %s is not in model inputs %s" % (k, list(self.model_inputs.keys()))) | +| 96 | feed_dict[self.model_inputs[k]] = v | +| 97 | | +| 98 | result = self.sess.run(self.model_outputs, feed_dict=feed_dict) | +| 99 | logger.info('predict result : ' + str(result)) | +| 100 | | +| 101 | return result | +| 102 | | +| 103 | def _postprocess(self, data): | +| 104 | infer_output = {"mnist_result": []} | +| 105 | for output_name, results in data.items(): | +| 106 | | +| 107 | for result in results: | +| 108 | infer_output["mnist_result"].append(np.argmax(result)) | +| 109 | | +| 110 | return infer_output | +| 111 | | +| 112 | def __del__(self): | +| 113 | self.sess.close() | ++-----------------------------------+--------------------------------------------------------------------------------------------------------------------------+ + +MindSpore Inference Script Example +---------------------------------- + ++-----------------------------------+-----------------------------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | import threading | +| 2 | | +| 3 | import mindspore | +| 4 | import mindspore.nn as nn | +| 5 | import numpy as np | +| 6 | import logging | +| 7 | from mindspore import Tensor, context | +| 8 | from mindspore.common.initializer import Normal | +| 9 | from mindspore.train.serialization import load_checkpoint, load_param_into_net | +| 10 | from model_service.model_service import SingleNodeService | +| 11 | from PIL import Image | +| 12 | | +| 13 | logger = logging.getLogger(__name__) | +| 14 | logger.setLevel(logging.INFO) | +| 15 | | +| 16 | | +| 17 | | +| 18 | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | +| 19 | | +| 20 | | +| 21 | class LeNet5(nn.Cell): | +| 22 | """Lenet network structure.""" | +| 23 | | +| 24 | # define the operator required | +| 25 | def __init__(self, num_class=10, num_channel=1): | +| 26 | super(LeNet5, self).__init__() | +| 27 | self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') | +| 28 | self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') | +| 29 | self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) | +| 30 | self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) | +| 31 | self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) | +| 32 | self.relu = nn.ReLU() | +| 33 | self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) | +| 34 | self.flatten = nn.Flatten() | +| 35 | | +| 36 | # use the preceding operators to construct networks | +| 37 | def construct(self, x): | +| 38 | x = self.max_pool2d(self.relu(self.conv1(x))) | +| 39 | x = self.max_pool2d(self.relu(self.conv2(x))) | +| 40 | x = self.flatten(x) | +| 41 | x = self.relu(self.fc1(x)) | +| 42 | x = self.relu(self.fc2(x)) | +| 43 | x = self.fc3(x) | +| 44 | return x | +| 45 | | +| 46 | | +| 47 | class mnist_service(SingleNodeService): | +| 48 | def __init__(self, model_name, model_path): | +| 49 | self.model_name = model_name | +| 50 | self.model_path = model_path | +| 51 | logger.info("self.model_name:%s self.model_path: %s", self.model_name, | +| 52 | self.model_path) | +| 53 | self.network = None | +| 54 | # Load the model in non-blocking mode to prevent blocking timeout. | +| 55 | thread = threading.Thread(target=self.load_model) | +| 56 | thread.start() | +| 57 | | +| 58 | def load_model(self): | +| 59 | logger.info("load network ... \n") | +| 60 | self.network = LeNet5() | +| 61 | ckpt_file = self.model_path + "/checkpoint_lenet_1-1_1875.ckpt" | +| 62 | logger.info("ckpt_file: %s", ckpt_file) | +| 63 | param_dict = load_checkpoint(ckpt_file) | +| 64 | load_param_into_net(self.network, param_dict) | +| 65 | logger.info("load network successfully ! \n") | +| 66 | | +| 67 | def _preprocess(self, input_data): | +| 68 | preprocessed_result = {} | +| 69 | images = [] | +| 70 | for k, v in input_data.items(): | +| 71 | for file_name, file_content in v.items(): | +| 72 | image1 = Image.open(file_content) | +| 73 | image1 = image1.resize((1, 32 * 32)) | +| 74 | image1 = np.array(image1, dtype=np.float32) | +| 75 | images.append(image1) | +| 76 | | +| 77 | images = np.array(images, dtype=np.float32) | +| 78 | logger.info(images.shape) | +| 79 | images.resize([len(input_data), 1, 32, 32]) | +| 80 | logger.info("images shape: %s", images.shape) | +| 81 | inputs = Tensor(images, mindspore.float32) | +| 82 | preprocessed_result['images'] = inputs | +| 83 | | +| 84 | return preprocessed_result | +| 85 | | +| 86 | def _inference(self, preprocessed_result): | +| 87 | inference_result = self.network(preprocessed_result['images']) | +| 88 | return inference_result | +| 89 | | +| 90 | def _postprocess(self, inference_result): | +| 91 | return str(inference_result) | ++-----------------------------------+-----------------------------------------------------------------------------------+ + + diff --git a/umn/source/model_package_specifications/specifications_for_compiling_the_model_configuration_file.rst b/umn/source/model_package_specifications/specifications_for_compiling_the_model_configuration_file.rst new file mode 100644 index 0000000..5454f4a --- /dev/null +++ b/umn/source/model_package_specifications/specifications_for_compiling_the_model_configuration_file.rst @@ -0,0 +1,805 @@ +Specifications for Compiling the Model Configuration File +========================================================= + +A model developer needs to compile a configuration file when publishing a model. The model configuration file describes the model usage, computing framework, precision, inference code dependency package, and model API. + +Configuration File Format +------------------------- + +The configuration file is in JSON format. `Table 1 <#modelarts230092enustopic0172466149table7143191919436>`__ describes the parameters. + + + +.. _modelarts230092enustopic0172466149table7143191919436: + +.. table:: **Table 1** Parameters + + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +=================+=================+===========================+=============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | model_algorithm | Yes | String | Model algorithm, which is set by the model developer to help model users understand the usage of the model. The value must start with a letter and contain no more than 36 characters. Chinese characters and special characters (&!'\"<>=) are not allowed. Common model algorithms include **image_classification** (image classification), **object_detection** (object detection), and **predict_analysis** (prediction analysis). | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | model_type | Yes | String | Model AI engine, which indicates the computing framework used by a model. The options are **TensorFlow**, **MXNet**, **Spark_MLlib**, **Caffe**, **Scikit_Learn**, **XGBoost**, **PyTorch**, **MindSpore**, and **Image**. | + | | | | | + | | | | **Image** is not a common AI framework. When **model_type** is set to **Image**, a model is imported from a custom image. In this case, **swr_location** is mandatory. For details about how to make Image images, see `Custom Image Specifications <../custom_images/introduction_to_custom_images.html>`__. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | runtime | No | String | Model runtime environment. Python 3.6 is used by default. The value of **runtime** depends on the value of **model_type**. If **model_type** is set to **Image**, you do not need to set **runtime**. If **model_type** is set to another frequently-used framework, select the engine and development environment. For details about the supported running environments, see `Table 1 <../model_management/importing_a_model/importing_a_meta_model_from_obs.html#modelarts230207enustopic0207629478table108792813184>`__. | + | | | | | + | | | | If your model needs to run on a specified CPU or GPU, select the runtime based on the suffix information. If the runtime does not contain the CPU or GPU information, read the description of each runtime in `Table 1 <../model_management/importing_a_model/importing_a_meta_model_from_obs.html#modelarts230207enustopic0207629478table108792813184>`__. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | swr_location | No | String | SWR image address. | + | | | | | + | | | | - If you import a custom image model from a container image, you do not need to set **swr_location**. | + | | | | - If you import a custom image model from OBS (not recommended) and set **model_type** to **Image**, you must set **swr_location**. **swr_location** indicates the address of the Docker image on SWR, indicating that the Docker image on SWR is used to publish the model. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | metrics | No | Object | Model precision information, including the average value, recall rate, precision, and accuracy. For details about the **metrics** object structure, see `Table 2 <#modelarts230092enustopic0172466149table81712704511>`__. | + | | | | | + | | | | This parameter is used only to display model information and is optional. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | apis | No | api array | Format of the requests received and returned by a model. The value is structure data. | + | | | | | + | | | | It is the RESTful API array provided by a model. For details about the API data structure, see `Table 3 <#modelarts230092enustopic0172466149table1683418482455>`__. | + | | | | | + | | | | - When **model_type** is set to **Image**, that is, in the model scenario of a custom image, APIs with different paths can be declared in **apis** based on the request path exposed by the image. | + | | | | - When **model_type** is not **Image**, only one API whose request path is **/** can be declared in **apis** because the preconfigured AI engine exposes only one inference API whose request path is **/**. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | dependencies | No | dependency array | Package on which the model inference code depends, which is structure data. | + | | | | | + | | | | Model developers need to provide the package name, installation mode, and version constraints. Only the pip installation mode is supported. `Table 6 <#modelarts230092enustopic0172466149table13709813144819>`__ describes the dependency array. | + | | | | | + | | | | If the model package does not contain the **customize_service.py** file, you do not need to set this parameter. Dependency packages cannot be installed for custom image models. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | health | No | **health** data structure | Configuration of an image health interface. This parameter is mandatory only when **model_type** is set to **Image**. For details about the health data structure, see `Table 8 <#modelarts230092enustopic0172466149table115896191852>`__. | + +-----------------+-----------------+---------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table81712704511: + +.. table:: **Table 2** **metrics** object description + + +-----------+-----------+-----------+---------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +===========+===========+===========+=========================================================+ + | f1 | No | Number | F1 score. The value is rounded to 17 decimal places. | + +-----------+-----------+-----------+---------------------------------------------------------+ + | recall | No | Number | Recall rate. The value is rounded to 17 decimal places. | + +-----------+-----------+-----------+---------------------------------------------------------+ + | precision | No | Number | Precision. The value is rounded to 17 decimal places. | + +-----------+-----------+-----------+---------------------------------------------------------+ + | accuracy | No | Number | Accuracy. The value is rounded to 17 decimal places. | + +-----------+-----------+-----------+---------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table1683418482455: + +.. table:: **Table 3** **api** array + + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +===========+===========+===========+================================================================================================================================================================================================================================================================================================+ + | protocol | No | String | Request protocol. The default value is **http**. Set the parameter value to **http** or **https** based on your custom image. For details about other parameter, see `Example of the Object Detection Model Configuration File <#example-of-the-object-detection-model-configuration-file>`__. | + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | url | No | String | Request path. The default value is a slash (**/**). For a custom image model (**model_type** is **Image**), set this parameter to the actual request path exposed in the image. For a non-custom image model (**model_type** is not **Image**), the URL can only be **/**. | + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | method | No | String | Request method. The default value is **POST**. | + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | request | No | Object | Request body. For details about the **request** structure, see `Table 4 <#modelarts230092enustopic0172466149table332913335466>`__. | + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | response | No | Object | Response body. For details about the **response** structure, see `Table 5 <#modelarts230092enustopic0172466149table17521240184711>`__. | + +-----------+-----------+-----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table332913335466: + +.. table:: **Table 4** **request** description + + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +=================+============================+=================+========================================================================================+ + | Content-type | Yes for real-time services | String | Data is sent in a specified content format. The default value is **application/json**. | + | | | | | + | | No for batch services | | The options are as follows: | + | | | | | + | | | | - **application/json**: sends JSON data. | + | | | | - **multipart/form-data**: uploads a file. | + | | | | | + | | | | .. note:: | + | | | | | + | | | | For machine learning models, only **application/json** is supported. | + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + | data | Yes for real-time services | String | The request body is described in JSON schema. | + | | | | | + | | No for batch services | | | + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table17521240184711: + +.. table:: **Table 5** **response** description + + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +=================+============================+=================+========================================================================================+ + | Content-type | Yes for real-time services | String | Data is sent in a specified content format. The default value is **application/json**. | + | | | | | + | | No for batch services | | The options are as follows: | + | | | | | + | | | | - **application/json**: sends JSON data. | + | | | | - **multipart/form-data**: uploads a file. | + | | | | | + | | | | .. note:: | + | | | | | + | | | | For machine learning models, only **application/json** is supported. | + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + | data | Yes for real-time services | String | The response body is described in JSON schema. | + | | | | | + | | No for batch services | | | + +-----------------+----------------------------+-----------------+----------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table13709813144819: + +.. table:: **Table 6** **dependency** array + + +-----------+-----------+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Data Type | Description | + +===========+===========+===============+======================================================================================================================================================+ + | installer | Yes | String | Installation method. Only **pip** is supported. | + +-----------+-----------+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | packages | Yes | package array | Dependency package collection. For details about the package structure array, see `Table 7 <#modelarts230092enustopic0172466149table47885356482>`__. | + +-----------+-----------+---------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table47885356482: + +.. table:: **Table 7** package array + + +-----------------+-----------------+-----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Type | Description | + +=================+=================+=================+=========================================================================================================================================================================================+ + | package_name | Yes | String | Dependency package name. Chinese characters and special characters (&!'"<>=) are not allowed. | + +-----------------+-----------------+-----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | package_version | No | String | Dependency package version. If the dependency package does not rely on the version number, leave this field blank. Chinese characters and special characters (&!'"<>=) are not allowed. | + +-----------------+-----------------+-----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | restraint | No | String | Version restriction. This parameter is mandatory only when **package_version** is configured. Possible values are **EXACT**, **ATLEAST**, and **ATMOST**. | + | | | | | + | | | | - **EXACT** indicates that a specified version is installed. | + | | | | - **ATLEAST** indicates that the version of the installation package is not earlier than the specified version. | + | | | | - **ATMOST** indicates that the version of the installation package is not later than the specified version. | + | | | | | + | | | | .. note:: | + | | | | | + | | | | - If there are specific requirements on the version, preferentially use **EXACT**. If **EXACT** conflicts with the system installation packages, you can select **ATLEAST**. | + | | | | - If there is no specific requirement on the version, retain only the **package_name** parameter and leave **restraint** and **package_version** blank. | + +-----------------+-----------------+-----------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230092enustopic0172466149table115896191852: + +.. table:: **Table 8** **health** data structure description + + +-----------------------+-----------+--------+------------------------------------------------------------------------------------------------------------+ + | Parameter | Mandatory | Type | Description | + +=======================+===========+========+============================================================================================================+ + | url | Yes | String | Request URL of the health check interface | + +-----------------------+-----------+--------+------------------------------------------------------------------------------------------------------------+ + | protocol | No | String | Request protocol of the health check interface. Only HTTP is supported. | + +-----------------------+-----------+--------+------------------------------------------------------------------------------------------------------------+ + | initial_delay_seconds | No | String | After an instance is started, a health check starts after seconds configured in **initial_delay_seconds**. | + +-----------------------+-----------+--------+------------------------------------------------------------------------------------------------------------+ + | timeout_seconds | No | String | Health check timeout | + +-----------------------+-----------+--------+------------------------------------------------------------------------------------------------------------+ + +Example of the Object Detection Model Configuration File +-------------------------------------------------------- + +The following code uses the TensorFlow engine as an example. You can modify the **model_type** parameter based on the actual engine type. + +- Model input + + Key: images + + Value: image files + +- Model output + + +-----------------------------------+-----------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "detection_classes": [ | + | 4 | "face", | + | 5 | "arm" | + | 6 | ], | + | 7 | "detection_boxes": [ | + | 8 | [ | + | 9 | 33.6, | + | 10 | 42.6, | + | 11 | 104.5, | + | 12 | 203.4 | + | 13 | ], | + | 14 | [ | + | 15 | 103.1, | + | 16 | 92.8, | + | 17 | 765.6, | + | 18 | 945.7 | + | 19 | ] | + | 20 | ], | + | 21 | "detection_scores": [0.99, 0.73] | + | 22 | } | + | 23 | ``` | + +-----------------------------------+-----------------------------------------+ + +- Configuration file + + +-----------------------------------+-------------------------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "model_type": "TensorFlow", | + | 4 | "model_algorithm": "object_detection", | + | 5 | "metrics": { | + | 6 | "f1": 0.345294, | + | 7 | "accuracy": 0.462963, | + | 8 | "precision": 0.338977, | + | 9 | "recall": 0.351852 | + | 10 | }, | + | 11 | "apis": [{ | + | 12 | "protocol": "http", | + | 13 | "url": "/", | + | 14 | "method": "post", | + | 15 | "request": { | + | 16 | "Content-type": "multipart/form-data", | + | 17 | "data": { | + | 18 | "type": "object", | + | 19 | "properties": { | + | 20 | "images": { | + | 21 | "type": "file" | + | 22 | } | + | 23 | } | + | 24 | } | + | 25 | }, | + | 26 | "response": { | + | 27 | "Content-type": "multipart/form-data", | + | 28 | "data": { | + | 29 | "type": "object", | + | 30 | "properties": { | + | 31 | "detection_classes": { | + | 32 | "type": "array", | + | 33 | "items": [{ | + | 34 | "type": "string" | + | 35 | }] | + | 36 | }, | + | 37 | "detection_boxes": { | + | 38 | "type": "array", | + | 39 | "items": [{ | + | 40 | "type": "array", | + | 41 | "minItems": 4, | + | 42 | "maxItems": 4, | + | 43 | "items": [{ | + | 44 | "type": "number" | + | 45 | }] | + | 46 | }] | + | 47 | }, | + | 48 | "detection_scores": { | + | 49 | "type": "array", | + | 50 | "items": [{ | + | 51 | "type": "number" | + | 52 | }] | + | 53 | } | + | 54 | } | + | 55 | } | + | 56 | } | + | 57 | }], | + | 58 | "dependencies": [{ | + | 59 | "installer": "pip", | + | 60 | "packages": [{ | + | 61 | "restraint": "EXACT", | + | 62 | "package_version": "1.15.0", | + | 63 | "package_name": "numpy" | + | 64 | }, | + | 65 | { | + | 66 | "restraint": "EXACT", | + | 67 | "package_version": "5.2.0", | + | 68 | "package_name": "Pillow" | + | 69 | } | + | 70 | ] | + | 71 | }] | + | 72 | } | + | 73 | ``` | + +-----------------------------------+-------------------------------------------------------+ + +Example of the Image Classification Model Configuration File +------------------------------------------------------------ + +The following code uses the TensorFlow engine as an example. You can modify the **model_type** parameter based on the actual engine type. + +- Model input + + Key: images + + Value: image files + +- Model output + + +-----------------------------------+-------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "predicted_label": "flower", | + | 4 | "scores": [ | + | 5 | ["rose", 0.99], | + | 6 | ["begonia", 0.01] | + | 7 | ] | + | 8 | } | + | 9 | ``` | + +-----------------------------------+-------------------------------------+ + +- Configuration file + + +-----------------------------------+---------------------------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "model_type": "TensorFlow", | + | 4 | "model_algorithm": "image_classification", | + | 5 | "metrics": { | + | 6 | "f1": 0.345294, | + | 7 | "accuracy": 0.462963, | + | 8 | "precision": 0.338977, | + | 9 | "recall": 0.351852 | + | 10 | }, | + | 11 | "apis": [{ | + | 12 | "protocol": "http", | + | 13 | "url": "/", | + | 14 | "method": "post", | + | 15 | "request": { | + | 16 | "Content-type": "multipart/form-data", | + | 17 | "data": { | + | 18 | "type": "object", | + | 19 | "properties": { | + | 20 | "images": { | + | 21 | "type": "file" | + | 22 | } | + | 23 | } | + | 24 | } | + | 25 | }, | + | 26 | "response": { | + | 27 | "Content-type": "multipart/form-data", | + | 28 | "data": { | + | 29 | "type": "object", | + | 30 | "properties": { | + | 31 | "predicted_label": { | + | 32 | "type": "string" | + | 33 | }, | + | 34 | "scores": { | + | 35 | "type": "array", | + | 36 | "items": [{ | + | 37 | "type": "array", | + | 38 | "minItems": 2, | + | 39 | "maxItems": 2, | + | 40 | "items": [ | + | 41 | { | + | 42 | "type": "string" | + | 43 | }, | + | 44 | { | + | 45 | "type": "number" | + | 46 | } | + | 47 | ] | + | 48 | }] | + | 49 | } | + | 50 | } | + | 51 | } | + | 52 | } | + | 53 | }], | + | 54 | "dependencies": [{ | + | 55 | "installer": "pip", | + | 56 | "packages": [{ | + | 57 | "restraint": "ATLEAST", | + | 58 | "package_version": "1.15.0", | + | 59 | "package_name": "numpy" | + | 60 | }, | + | 61 | { | + | 62 | "restraint": "", | + | 63 | "package_version": "", | + | 64 | "package_name": "Pillow" | + | 65 | } | + | 66 | ] | + | 67 | }] | + | 68 | } | + | 69 | ``` | + +-----------------------------------+---------------------------------------------------------+ + +Example of the Predictive Analytics Model Configuration File +------------------------------------------------------------ + +The following code uses the TensorFlow engine as an example. You can modify the **model_type** parameter based on the actual engine type. + +- Model input + + +-----------------------------------+--------------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "data": { | + | 4 | "req_data": [ | + | 5 | { | + | 6 | "buying_price": "high", | + | 7 | "maint_price": "high", | + | 8 | "doors": "2", | + | 9 | "persons": "2", | + | 10 | "lug_boot": "small", | + | 11 | "safety": "low", | + | 12 | "acceptability": "acc" | + | 13 | }, | + | 14 | { | + | 15 | "buying_price": "high", | + | 16 | "maint_price": "high", | + | 17 | "doors": "2", | + | 18 | "persons": "2", | + | 19 | "lug_boot": "small", | + | 20 | "safety": "low", | + | 21 | "acceptability": "acc" | + | 22 | } | + | 23 | ] | + | 24 | } | + | 25 | } | + | 26 | ``` | + +-----------------------------------+--------------------------------------------+ + +- Model output + + +-----------------------------------+----------------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "data": { | + | 4 | "resp_data": [ | + | 5 | { | + | 6 | "predict_result": "unacc" | + | 7 | }, | + | 8 | { | + | 9 | "predict_result": "unacc" | + | 10 | } | + | 11 | ] | + | 12 | } | + | 13 | } | + | 14 | ``` | + +-----------------------------------+----------------------------------------------+ + +- Configuration file + + +-----------------------------------+------------------------------------------------------------------+ + | :: | :: | + | | | + | 1 | ``` | + | 2 | { | + | 3 | "model_type": "TensorFlow", | + | 4 | "model_algorithm": "predict_analysis", | + | 5 | "metrics": { | + | 6 | "f1": 0.345294, | + | 7 | "accuracy": 0.462963, | + | 8 | "precision": 0.338977, | + | 9 | "recall": 0.351852 | + | 10 | }, | + | 11 | "apis": [ | + | 12 | { | + | 13 | "protocol": "http", | + | 14 | "url": "/", | + | 15 | "method": "post", | + | 16 | "request": { | + | 17 | "Content-type": "application/json", | + | 18 | "data": { | + | 19 | "type": "object", | + | 20 | "properties": { | + | 21 | "data": { | + | 22 | "type": "object", | + | 23 | "properties": { | + | 24 | "req_data": { | + | 25 | "items": [ | + | 26 | { | + | 27 | "type": "object", | + | 28 | "properties": { | + | 29 | } | + | 30 | }], | + | 31 | "type": "array" | + | 32 | } | + | 33 | } | + | 34 | } | + | 35 | } | + | 36 | } | + | 37 | }, | + | 38 | "response": { | + | 39 | "Content-type": "multipart/form-data", | + | 40 | "data": { | + | 41 | "type": "object", | + | 42 | "properties": { | + | 43 | "data": { | + | 44 | "type": "object", | + | 45 | "properties": { | + | 46 | "resp_data": { | + | 47 | "type": "array", | + | 48 | "items": [ | + | 49 | { | + | 50 | "type": "object", | + | 51 | "properties": { | + | 52 | } | + | 53 | }] | + | 54 | } | + | 55 | } | + | 56 | } | + | 57 | } | + | 58 | } | + | 59 | } | + | 60 | }], | + | 61 | "dependencies": [ | + | 62 | { | + | 63 | "installer": "pip", | + | 64 | "packages": [ | + | 65 | { | + | 66 | "restraint": "EXACT", | + | 67 | "package_version": "1.15.0", | + | 68 | "package_name": "numpy" | + | 69 | }, | + | 70 | { | + | 71 | "restraint": "EXACT", | + | 72 | "package_version": "5.2.0", | + | 73 | "package_name": "Pillow" | + | 74 | }] | + | 75 | }] | + | 76 | } | + | 77 | ``` | + +-----------------------------------+------------------------------------------------------------------+ + +Example of the Custom Image Model Configuration File +---------------------------------------------------- + +The model input and output are similar to those in `Example of the Object Detection Model Configuration File <#example-of-the-object-detection-model-configuration-file>`__. + ++-----------------------------------+---------------------------------------------------------+ +| :: | :: | +| | | +| 1 | { | +| 2 | "model_algorithm": "image_classification", | +| 3 | "model_type": "Image", | +| 4 | | +| 5 | "metrics": { | +| 6 | "f1": 0.345294, | +| 7 | "accuracy": 0.462963, | +| 8 | "precision": 0.338977, | +| 9 | "recall": 0.351852 | +| 10 | }, | +| 11 | "apis": [{ | +| 12 | "protocol": "http", | +| 13 | "url": "/", | +| 14 | "method": "post", | +| 15 | "request": { | +| 16 | "Content-type": "multipart/form-data", | +| 17 | "data": { | +| 18 | "type": "object", | +| 19 | "properties": { | +| 20 | "images": { | +| 21 | "type": "file" | +| 22 | } | +| 23 | } | +| 24 | } | +| 25 | }, | +| 26 | "response": { | +| 27 | "Content-type": "multipart/form-data", | +| 28 | "data": { | +| 29 | "type": "object", | +| 30 | "required": [ | +| 31 | "predicted_label", | +| 32 | "scores" | +| 33 | ], | +| 34 | "properties": { | +| 35 | "predicted_label": { | +| 36 | "type": "string" | +| 37 | }, | +| 38 | "scores": { | +| 39 | "type": "array", | +| 40 | "items": [{ | +| 41 | "type": "array", | +| 42 | "minItems": 2, | +| 43 | "maxItems": 2, | +| 44 | "items": [{ | +| 45 | "type": "string" | +| 46 | }, | +| 47 | { | +| 48 | "type": "number" | +| 49 | } | +| 50 | ] | +| 51 | }] | +| 52 | } | +| 53 | } | +| 54 | } | +| 55 | } | +| 56 | }] | +| 57 | } | ++-----------------------------------+---------------------------------------------------------+ + +Example of the Machine Learning Model Configuration File +-------------------------------------------------------- + +The following uses XGBoost as an example: + +- Model input + +.. code-block:: + + { + "data": { + "req_data": [{ + "sepal_length": 5, + "sepal_width": 3.3, + "petal_length": 1.4, + "petal_width": 0.2 + }, { + "sepal_length": 5, + "sepal_width": 2, + "petal_length": 3.5, + "petal_width": 1 + }, { + "sepal_length": 6, + "sepal_width": 2.2, + "petal_length": 5, + "petal_width": 1.5 + }] + } + } + +- Model output + +.. code-block:: + + { + "data": { + "resp_data": [{ + "predict_result": "Iris-setosa" + }, { + "predict_result": "Iris-versicolor" + }] + } + } + +- Configuration file + +.. code-block:: + + { + "model_type": "XGBoost", + "model_algorithm": "xgboost_iris_test", + "runtime": "python2.7", + "metrics": { + "f1": 0.345294, + "accuracy": 0.462963, + "precision": 0.338977, + "recall": 0.351852 + }, + "apis": [ + { + "protocol": "http", + "url": "/", + "method": "post", + "request": { + "Content-type": "application/json", + "data": { + "type": "object", + "properties": { + "data": { + "type": "object", + "properties": { + "req_data": { + "items": [ + { + "type": "object", + "properties": {} + } + ], + "type": "array" + } + } + } + } + } + }, + "response": { + "Content-type": "applicaton/json", + "data": { + "type": "object", + "properties": { + "resp_data": { + "type": "array", + "items": [ + { + "type": "object", + "properties": { + "predict_result": { + "type": "number" + } + } + } + ] + } + } + } + } + } + ] + } + +Example of a Model Configuration File Using a Custom Dependency Package +----------------------------------------------------------------------- + +The following example defines the NumPy 1.16.4 dependency environment. + ++-----------------------------------+------------------------------------------------------------+ +| :: | :: | +| | | +| 1 | { | +| 2 | "model_algorithm": "image_classification", | +| 3 | "model_type": "TensorFlow", | +| 4 | "runtime": "python3.6", | +| 5 | "apis": [{ | +| 6 | "procotol": "http", | +| 7 | "url": "/", | +| 8 | "method": "post", | +| 9 | "request": { | +| 10 | "Content-type": "multipart/form-data", | +| 11 | "data": { | +| 12 | "type": "object", | +| 13 | "properties": { | +| 14 | "images": { | +| 15 | "type": "file" | +| 16 | } | +| 17 | } | +| 18 | } | +| 19 | }, | +| 20 | "response": { | +| 21 | "Content-type": "applicaton/json", | +| 22 | "data": { | +| 23 | "type": "object", | +| 24 | "properties": { | +| 25 | "mnist_result": { | +| 26 | "type": "array", | +| 27 | "item": [{ | +| 28 | "type": "string" | +| 29 | }] | +| 30 | } | +| 31 | } | +| 32 | } | +| 33 | } | +| 34 | } | +| 35 | ], | +| 36 | "metrics": { | +| 37 | "f1": 0.124555, | +| 38 | "recall": 0.171875, | +| 39 | "precision": 0.0023493892851938493, | +| 40 | "accuracy": 0.00746268656716417 | +| 41 | }, | +| 42 | "dependencies": [{ | +| 43 | "installer": "pip", | +| 44 | "packages": [{ | +| 45 | "restraint": "EXACT", | +| 46 | "package_version": "1.16.4", | +| 47 | "package_name": "numpy" | +| 48 | } | +| 49 | ] | +| 50 | }] | +| 51 | } | ++-----------------------------------+------------------------------------------------------------+ + + diff --git a/umn/source/model_templates/index.rst b/umn/source/model_templates/index.rst new file mode 100644 index 0000000..e124219 --- /dev/null +++ b/umn/source/model_templates/index.rst @@ -0,0 +1,10 @@ +=============== +Model Templates +=============== + +.. toctree:: + :maxdepth: 1 + + introduction_to_model_templates + template_description/index + input_and_output_modes/index diff --git a/umn/source/model_templates/input_and_output_modes/built-in_image_processing_mode.rst b/umn/source/model_templates/input_and_output_modes/built-in_image_processing_mode.rst new file mode 100644 index 0000000..2a4f9da --- /dev/null +++ b/umn/source/model_templates/input_and_output_modes/built-in_image_processing_mode.rst @@ -0,0 +1,25 @@ +Built-in Image Processing Mode +============================== + +Input +----- + +The built-in image processing input and output mode can be applied to models such as image classification, object detection, and image semantic segmentation. The prediction request path is **/**, the request protocol is **HTTPS**, the request method is **POST**, **Content-Type** is **multipart/form-data**, **key** is **images**, and **type** is **file**. Before selecting this mode, ensure that your model can process the input data whose **key** is **images**. + +Output +------ + +The inference result is returned in JSON format. The specific fields are determined by the model. + +Sample Request +-------------- + +In this mode, input an image to be processed in the inference request. The response in JSON format varies according to the model. The following are examples: + +- Performing prediction on the console + +- Using Postman to call a RESTful API for prediction + + After a model is deployed as a service, you can obtain the API URL on the **Usage Guides** tab page of the service details page. On the **Body** tab page, set the request body. Set **key** to **images**, select **File**, select the image to be processed, and click **send** to send your prediction request. + + diff --git a/umn/source/model_templates/input_and_output_modes/built-in_object_detection_mode.rst b/umn/source/model_templates/input_and_output_modes/built-in_object_detection_mode.rst new file mode 100644 index 0000000..475918d --- /dev/null +++ b/umn/source/model_templates/input_and_output_modes/built-in_object_detection_mode.rst @@ -0,0 +1,85 @@ +Built-in Object Detection Mode +============================== + +Input +----- + +This is a built-in input and output mode for object detection. The models using this mode are identified as object detection models. The prediction request path is **/**, the request protocol is **HTTP**, the request method is **POST**, **Content-Type** is **multipart/form-data**, **key** is **images**, and **type** is **file**. Before selecting this mode, ensure that your model can process the input data whose **key** is **images**. + +Output +------ + +The inference result is returned in JSON format. For details about the fields, see `Table 1 <#modelarts230100enustopic0172873542table101531747125712>`__. + + + +.. _modelarts230100enustopic0172873542table101531747125712: + +.. table:: **Table 1** Parameters + + +-------------------+--------------+-----------------------------------------------------------------------------------------+ + | Field | Type | Description | + +===================+==============+=========================================================================================+ + | detection_classes | String array | List of detected objects, for example, **["flowers","cat"]** | + +-------------------+--------------+-----------------------------------------------------------------------------------------+ + | detection_boxes | Float array | Coordinates of the bounding box, in the format of |image2| | + +-------------------+--------------+-----------------------------------------------------------------------------------------+ + | detection_scores | Float array | Confidence scores of detected objects, which are used to measure the detection accuracy | + +-------------------+--------------+-----------------------------------------------------------------------------------------+ + +The **JSON Schema** of the inference result is as follows: + +.. code-block:: + + { + "type": "object", + "properties": { + "detection_classes": { + "items": { + "type": "string" + }, + "type": "array" + }, + "detection_boxes": { + "items": { + "minItems": 4, + "items": { + "type": "number" + }, + "type": "array", + "maxItems": 4 + }, + "type": "array" + }, + "detection_scores": { + "items": { + "type": "string" + }, + "type": "array" + } + } + } + +Sample Request +-------------- + +In this mode, input an image to be processed in the inference request. The inference result is returned in JSON format. The following are examples: + +- Performing prediction on the console + + On the **Prediction** tab page of the service details page, upload an image and click **Predict** to obtain the prediction result. + +- Using Postman to call a RESTful API for prediction + + After a model is deployed as a service, you can obtain the API URL on the **Usage Guides** tab page of the service details page. + + - On the **Headers** tab page, set **Content-Type** to **multipart/form-data** and **X-Auth-Token** to the actual token obtained. + + - On the **Body** tab page, set the request body. Set **key** to **images**, select **File**, select the image to be processed, and click **send** to send your prediction request. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110761158.png + +.. |image2| image:: /_static/images/en-us_image_0000001110761158.png + diff --git a/umn/source/model_templates/input_and_output_modes/built-in_predictive_analytics_mode.rst b/umn/source/model_templates/input_and_output_modes/built-in_predictive_analytics_mode.rst new file mode 100644 index 0000000..0c09158 --- /dev/null +++ b/umn/source/model_templates/input_and_output_modes/built-in_predictive_analytics_mode.rst @@ -0,0 +1,126 @@ +Built-in Predictive Analytics Mode +================================== + +Input +----- + +This is a built-in input and output mode for predictive analytics. The models using this mode are identified as predictive analytics models. The prediction request path is **/**, the request protocol is **HTTP**, the request method is **POST**, and **Content-Type** is **application/json**. The request body is in JSON format. For details about the JSON fields, see `Table 1 <#modelarts230102enustopic0172873544table101531747125712>`__. Before selecting this mode, ensure that your model can process the input data in **JSON Schema** format. + + + +.. _modelarts230102enustopic0172873544table101531747125712: + +.. table:: **Table 1** JSON field description + + +-------+----------------+------------------------------------------------------------------------------------------------------+ + | Field | Type | Description | + +=======+================+======================================================================================================+ + | data | Data structure | Inference data. For details, see `Table 2 <#modelarts230102enustopic0172873544table159187574436>`__. | + +-------+----------------+------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230102enustopic0172873544table159187574436: + +.. table:: **Table 2** **Data** description + + ======== ============= ====================== + Field Type Description + ======== ============= ====================== + req_data ReqData array List of inference data + ======== ============= ====================== + +**ReqData** is of the **Object** type and indicates the inference data. The data structure is determined by the application scenario. For models using this mode, the preprocessing logic in the custom model inference code should be able to correctly process the data inputted in the format defined by the mode. + +The **JSON Schema** of a prediction request is as follows: + +.. code-block:: + + { + "type": "object", + "properties": { + "data": { + "type": "object", + "properties": { + "req_data": { + "items": [{ + "type": "object", + "properties": {} + }], + "type": "array" + } + } + } + } + } + +Output +------ + +The inference result is returned in JSON format. For details about the JSON fields, see `Table 3 <#modelarts230102enustopic0172873544table49621346461>`__. + + + +.. _modelarts230102enustopic0172873544table49621346461: + +.. table:: **Table 3** JSON field description + + +-------+----------------+------------------------------------------------------------------------------------------------------+ + | Field | Type | Description | + +=======+================+======================================================================================================+ + | data | Data structure | Inference data. For details, see `Table 4 <#modelarts230102enustopic0172873544table196311344469>`__. | + +-------+----------------+------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230102enustopic0172873544table196311344469: + +.. table:: **Table 4** **Data** description + + ========= ============== ========================== + Field Type Description + ========= ============== ========================== + resp_data RespData array List of prediction results + ========= ============== ========================== + +Similar to **ReqData**, **RespData** is also of the **Object** type and indicates the prediction result. Its structure is determined by the application scenario. For models using this mode, the postprocessing logic in the custom model inference code should be able to correctly output data in the format defined by the mode. + +The **JSON Schema** of a prediction result is as follows: + +.. code-block:: + + { + "type": "object", + "properties": { + "data": { + "type": "object", + "properties": { + "resp_data": { + "type": "array", + "items": [{ + "type": "object", + "properties": {} + }] + } + } + } + } + } + +Sample Request +-------------- + +In this mode, input the data to be predicted in JSON format. The prediction result is returned in JSON format. The following are examples: + +- Performing prediction on the console + + On the **Prediction** tab page of the service details page, enter inference code and click **Predict** to obtain the prediction result. + +- Using Postman to call a RESTful API for prediction + + After a model is deployed as a service, you can obtain the API URL on the **Usage Guides** tab page of the service details page. + + - On the **Headers** tab page, set **Content-Type** to **application/json** and **X-Auth-Token** to the actual token obtained. + - On the **Body** tab page, edit the data to be predicted and click **send** to send your prediction request. + + diff --git a/umn/source/model_templates/input_and_output_modes/index.rst b/umn/source/model_templates/input_and_output_modes/index.rst new file mode 100644 index 0000000..ba2ed95 --- /dev/null +++ b/umn/source/model_templates/input_and_output_modes/index.rst @@ -0,0 +1,11 @@ +====================== +Input and Output Modes +====================== + +.. toctree:: + :maxdepth: 1 + + built-in_object_detection_mode + built-in_image_processing_mode + built-in_predictive_analytics_mode + undefined_mode diff --git a/umn/source/model_templates/input_and_output_modes/undefined_mode.rst b/umn/source/model_templates/input_and_output_modes/undefined_mode.rst new file mode 100644 index 0000000..a23f347 --- /dev/null +++ b/umn/source/model_templates/input_and_output_modes/undefined_mode.rst @@ -0,0 +1,24 @@ +Undefined Mode +============== + +Description +----------- + +The undefined mode does not define the input and output mode. The input and output mode is determined by the model. Select this mode only when the existing input and output mode is not applicable to the application scenario of the model. The models imported in undefined mode cannot be deployed as batch services. In addition, the service prediction page may not be displayed properly. New modes are coming soon for more application scenarios. + +Input +----- + +No limit. + +Output +------ + +No limit. + +Sample Request +-------------- + +The undefined mode has no specific sample request because the input and output of the request are entirely determined by the model. + + diff --git a/umn/source/model_templates/introduction_to_model_templates.rst b/umn/source/model_templates/introduction_to_model_templates.rst new file mode 100644 index 0000000..8906e38 --- /dev/null +++ b/umn/source/model_templates/introduction_to_model_templates.rst @@ -0,0 +1,44 @@ +Introduction to Model Templates +=============================== + +Because the configurations of models with the same functions are similar, ModelArts integrates the configurations of such models into a common template. By using this template, you can easily and quickly import models without compiling the **config.json** configuration file. In simple terms, a template integrates AI engine and model configurations. Each template corresponds to a specific AI engine and inference mode. With the templates, you can quickly import models to ModelArts. + +Using a Template +---------------- + +The following uses the template described in `TensorFlow-py36 General Template <../model_templates/template_description/tensorflow-py36_general_template.html>`__ as an example. Upload the TensorFlow model package to OBS before using the template. Store the model files in the **model** directory. When creating a model using this template, you need to select the **model** directory. + +#. On the **Import Model** page, set **Meta Model Source** to **Template**. + +#. In the **Template** area, select **TensorFlow-py36 general template**. + + ModelArts also provides three filter criteria: **Type**, **Engine**, and **Environment**, helping you quickly find the desired template. If the three filter criteria cannot meet your requirements, you can enter keywords to search for the target template. + +#. For **Model Folder**, select the **model** directory where the model files reside. For details, see `Template Description <../model_templates/index.html>`__. + + .. note:: + + If a training job is executed for multiple times, different version directories are generated, such as V001 and V002, and the generated models are stored in the **model** folder in different version directories. When selecting model files, specify the **model** folder in the corresponding version directory. + +#. If the default input and output mode of the selected template can be overwritten, you can select an input and output mode based on the model function or application scenario. **Input and Output Mode** is an abstract of the API in **config.json**. It describes the interface provided by the model for external inference. An input and output mode describes one or more APIs, and corresponds to a template. + + For details about the supported input and output modes, see `Input and Output Modes <../model_templates/index.html>`__. + +Supported Templates +------------------- + +- `TensorFlow-py36 General Template <../model_templates/template_description/tensorflow-py36_general_template.html>`__ +- `MXNet-py36 General Template <../model_templates/template_description/mxnet-py36_general_template.html>`__ +- `PyTorch-py36 General Template <../model_templates/template_description/pytorch-py36_general_template.html>`__ +- `Caffe-CPU-py36 General Template <../model_templates/template_description/caffe-cpu-py36_general_template.html>`__ +- `Caffe-GPU-py36 General Template <../model_templates/template_description/caffe-gpu-py36_general_template.html>`__ + +Supported Input and Output Modes +-------------------------------- + +- `Built-in Object Detection Mode <../model_templates/input_and_output_modes/built-in_object_detection_mode.html>`__ +- `Built-in Image Processing Mode <../model_templates/input_and_output_modes/built-in_image_processing_mode.html>`__ +- `Built-in Predictive Analytics Mode <../model_templates/input_and_output_modes/built-in_predictive_analytics_mode.html>`__ +- `Undefined Mode <../model_templates/input_and_output_modes/undefined_mode.html>`__ + + diff --git a/umn/source/model_templates/template_description/arm-ascend_template.rst b/umn/source/model_templates/template_description/arm-ascend_template.rst new file mode 100644 index 0000000..ce9d548 --- /dev/null +++ b/umn/source/model_templates/template_description/arm-ascend_template.rst @@ -0,0 +1,50 @@ +Arm-Ascend Template +=================== + +Introduction +------------ + +AI engine: MindSpore; Environment: Python 3.5; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the OM-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you cannot select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the OM-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + ├── model.om (Mandatory) Protocol buffer file, which contains the diagram description of the model + ├──customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/model_templates/template_description/caffe-cpu-py36_general_template.rst b/umn/source/model_templates/template_description/caffe-cpu-py36_general_template.rst new file mode 100644 index 0000000..d999709 --- /dev/null +++ b/umn/source/model_templates/template_description/caffe-cpu-py36_general_template.rst @@ -0,0 +1,51 @@ +Caffe-CPU-py36 General Template +=============================== + +Introduction +------------ + +AI engine: CPU-based Caffe 1.0; Environment: Python 3.6; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the Caffe-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you can select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the Caffe-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + |── <> (Optional) User's Python package, which can be directly referenced in the model inference code + |── deploy.prototxt (Mandatory) Caffe model file, which contains information such as the model network structure + |── resnet.caffemodel (Mandatory) Caffe model file, which contains variable and weight information + |── customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/model_templates/template_description/caffe-gpu-py36_general_template.rst b/umn/source/model_templates/template_description/caffe-gpu-py36_general_template.rst new file mode 100644 index 0000000..1c3590f --- /dev/null +++ b/umn/source/model_templates/template_description/caffe-gpu-py36_general_template.rst @@ -0,0 +1,51 @@ +Caffe-GPU-py36 General Template +=============================== + +Introduction +------------ + +AI engine: GPU-based Caffe 1.0; Environment: Python 3.6; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the Caffe-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you can select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the Caffe-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + |── <> (Optional) User's Python package, which can be directly referenced in the model inference code + |── deploy.prototxt (Mandatory) Caffe model file, which contains information such as the model network structure + |── resnet.caffemodel (Mandatory) Caffe model file, which contains variable and weight information + |── customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/model_templates/template_description/index.rst b/umn/source/model_templates/template_description/index.rst new file mode 100644 index 0000000..e23be45 --- /dev/null +++ b/umn/source/model_templates/template_description/index.rst @@ -0,0 +1,13 @@ +==================== +Template Description +==================== + +.. toctree:: + :maxdepth: 1 + + tensorflow-py36_general_template + mxnet-py36_general_template + pytorch-py36_general_template + caffe-cpu-py36_general_template + caffe-gpu-py36_general_template + arm-ascend_template diff --git a/umn/source/model_templates/template_description/mxnet-py36_general_template.rst b/umn/source/model_templates/template_description/mxnet-py36_general_template.rst new file mode 100644 index 0000000..adcc6e4 --- /dev/null +++ b/umn/source/model_templates/template_description/mxnet-py36_general_template.rst @@ -0,0 +1,51 @@ +MXNet-py36 General Template +=========================== + +Introduction +------------ + +AI engine: MXNet 1.2.1; Environment: Python 3.6; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the MXNet-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you can select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the MXNet-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + ├── resnet-50-symbol.json (Mandatory) Model definition file, which contains the neural network description of the model + ├── resnet-50-0000.params (Mandatory) Model variable parameter file, which contains parameter and weight information + ├──customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/model_templates/template_description/pytorch-py36_general_template.rst b/umn/source/model_templates/template_description/pytorch-py36_general_template.rst new file mode 100644 index 0000000..610aea3 --- /dev/null +++ b/umn/source/model_templates/template_description/pytorch-py36_general_template.rst @@ -0,0 +1,50 @@ +PyTorch-py36 General Template +============================= + +Introduction +------------ + +AI engine: PyTorch 1.0; Environment: Python 3.6; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the PyTorch-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you can select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the PyTorch-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + ├── resnet50.pth (Mandatory) PyTorch model file, which contains variable and weight information + ├──customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/model_templates/template_description/tensorflow-py36_general_template.rst b/umn/source/model_templates/template_description/tensorflow-py36_general_template.rst new file mode 100644 index 0000000..5987e11 --- /dev/null +++ b/umn/source/model_templates/template_description/tensorflow-py36_general_template.rst @@ -0,0 +1,53 @@ +TensorFlow-py36 General Template +================================ + +Introduction +------------ + +AI engine: TensorFlow 1.8; Environment: Python 3.6; Input and output mode: undefined mode. Select an appropriate input and output mode based on the model function or application scenario. When using the template to import a model, select the **model** directory containing the model files. + +Template Input +-------------- + +The template input is the TensorFlow-based model package stored on OBS. Ensure that the OBS directory you use and ModelArts are in the same region. For details about model package requirements, see `Model Package Example <#model-package-example>`__. + +Input and Output Mode +--------------------- + +`Undefined Mode <../../model_templates/input_and_output_modes/undefined_mode.html>`__ can be overwritten. That is, you can select another input and output mode during model creation. + +Model Package Specifications +---------------------------- + +- The model package must be stored in the OBS folder named **model**. Model files and the model inference code file are stored in the **model** folder. +- The model inference code file is optional. If the file exists, the file name must be **customize_service.py**. Only one inference code file can exist in the **model** folder. For details about how to compile the model inference code file, see `Specifications for Compiling Model Inference Code <../../model_package_specifications/specifications_for_compiling_model_inference_code.html>`__. + +- The structure of the model package imported using the template is as follows: + + .. code-block:: + + model/ + │ + ├── Model file //(Mandatory) The model file format varies according to the engine. For details, see the model package example. + ├── Custom Python package //(Optional) User's Python package, which can be directly referenced in the model inference code + ├── customize_service.py //(Optional) Model inference code file. The file name must be customize_service.py. Otherwise, the code is not considered as inference code. + +Model Package Example +--------------------- + +**Structure of the TensorFlow-based model package** + +When publishing the model, you only need to specify the **model** directory. + +.. code-block:: + + OBS bucket/directory name + |── model (Mandatory) The folder must be named model and is used to store model-related files. + ├── <> (Optional) User's Python package, which can be directly referenced in the model inference code + ├── saved_model.pb (Mandatory) Protocol buffer file, which contains the diagram description of the model + ├── variables Mandatory for the main file of the *.pb model. The folder must be named variables and contains the weight deviation of the model. + ├── variables.index Mandatory + ├── variables.data-00000-of-00001 Mandatory + ├──customize_service.py (Optional) Model inference code file. The file must be named customize_service.py. Only one inference code file exists. The .py file on which customize_service.py depends can be directly put in the model directory. + + diff --git a/umn/source/monitoring/index.rst b/umn/source/monitoring/index.rst new file mode 100644 index 0000000..47b793b --- /dev/null +++ b/umn/source/monitoring/index.rst @@ -0,0 +1,10 @@ +========== +Monitoring +========== + +.. toctree:: + :maxdepth: 1 + + modelarts_metrics + setting_alarm_rules + viewing_monitoring_metrics diff --git a/umn/source/monitoring/modelarts_metrics.rst b/umn/source/monitoring/modelarts_metrics.rst new file mode 100644 index 0000000..86d94d6 --- /dev/null +++ b/umn/source/monitoring/modelarts_metrics.rst @@ -0,0 +1,137 @@ +ModelArts Metrics +================= + +Description +----------- + +The cloud service platform provides Cloud Eye to help you better understand the status of your ModelArts real-time services and models. You can use Cloud Eye to automatically monitor your ModelArts real-time services and models in real time and manage alarms and notifications, so that you can keep track of performance metrics of ModelArts and models. + +Namespace +--------- + +SYS.ModelArts + +Monitoring Metrics +------------------ + + + +.. _modelarts230187enustopic0198064686table3293914123812: + +.. table:: **Table 1** ModelArts metrics + + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | Metric ID | Metric Name | Meaning | Value Range | Measurement Object & Dimension | Monitoring Interval | + +=============================================================================================================================================================================================================================+============================+===================================================+=============+================================+=====================+ + | cpu_usage | CPU Usage | CPU usage of ModelArts | ≥ 0% | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: % | | ModelArts models | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | mem_usage | Memory Usage | Memory usage of ModelArts | ≥ 0% | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: % | | ModelArts models | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | gpu_util | GPU Usage | GPU usage of ModelArts | ≥ 0% | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: % | | ModelArts models | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | gpu_mem_usage | GPU Memory Usage | GPU memory usage of ModelArts | ≥ 0% | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: % | | ModelArts models | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | successfully_called_times | Number of Successful Calls | Times that ModelArts has been successfully called | ≥Count/min | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: Times/min | | ModelArts models | | + | | | | | | | + | | | | | ModelArts real-time services | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id, | | + | | | | | | | + | | | | | service_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | failed_called_times | Number of Failed Calls | Times that ModelArts failed to be called | ≥Count/min | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: Times/min | | ModelArts models | | + | | | | | | | + | | | | | ModelArts real-time services | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id, | | + | | | | | | | + | | | | | service_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | total_called_times | API Calls | Times that ModelArts is called | ≥Count/min | Measurement object: | 1 minute | + | | | | | | | + | | | Unit: Times/min | | ModelArts models | | + | | | | | | | + | | | | | ModelArts real-time services | | + | | | | | | | + | | | | | Dimension: | | + | | | | | | | + | | | | | model_id, | | + | | | | | | | + | | | | | service_id | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + | If a measurement object has multiple measurement dimensions, all the measurement dimensions are mandatory when you use an API to query monitoring metrics. | | | | | | + | | | | | | | + | - The following provides an example of using the multi-dimensional **dim** to query a single monitoring metric: dim.0=service_id,530cd6b0-86d7-4818-837f-935f6a27414d&dim.1="model_id,3773b058-5b4f-4366-9035-9bbd9964714a | | | | | | + | | | | | | | + | - The following provides an example of using the multi-dimensional **dim** to query monitoring metrics in batches: | | | | | | + | | | | | | | + | "dimensions": [ | | | | | | + | | | | | | | + | { | | | | | | + | | | | | | | + | "name": "service_id", | | | | | | + | | | | | | | + | "value": "530cd6b0-86d7-4818-837f-935f6a27414d" | | | | | | + | | | | | | | + | } | | | | | | + | | | | | | | + | { | | | | | | + | | | | | | | + | "name": "model_id", | | | | | | + | | | | | | | + | "value": "3773b058-5b4f-4366-9035-9bbd9964714a" | | | | | | + | | | | | | | + | } | | | | | | + | | | | | | | + | ], | | | | | | + +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+---------------------------------------------------+-------------+--------------------------------+---------------------+ + +Dimensions +---------- + + + +.. _modelarts230187enustopic0198064686table130310173915: + +.. table:: **Table 2** Dimension description + + ========== ==================== + Key Value + ========== ==================== + service_id Real-time service ID + model_id Model ID + ========== ==================== + + diff --git a/umn/source/monitoring/setting_alarm_rules.rst b/umn/source/monitoring/setting_alarm_rules.rst new file mode 100644 index 0000000..8696488 --- /dev/null +++ b/umn/source/monitoring/setting_alarm_rules.rst @@ -0,0 +1,26 @@ +Setting Alarm Rules +=================== + +Scenario +-------- + +Setting alarm rules allows you to customize the monitored objects and notification policies so that you can know the status of ModelArts real-time services and models in a timely manner. + +An alarm rule includes the alarm rule name, monitored object, metric, threshold, monitoring interval, and whether to send a notification. This section describes how to set alarm rules for ModelArts services and models. + +Prerequisites +------------- + +You have created an ModelArts real-time service. + +Procedure +--------- + +#. Log in to the management console. +#. Click **Service List**. Under **Management & Deployment**, click **Cloud Eye**. +#. In the left navigation pane, choose **Cloud Service Monitoring > ModelArts**. +#. Select a real-time service for which you want to create an alarm rule and click **Create Alarm Rule** in the **Operation** column. +#. On the **Create Alarm Rule** page, create an alarm rule for ModelArts real-time services and models as prompted. +#. After the setting is complete, click **Create**. When an alarm that meets the rule is generated, the system automatically sends a notification. + + diff --git a/umn/source/monitoring/viewing_monitoring_metrics.rst b/umn/source/monitoring/viewing_monitoring_metrics.rst new file mode 100644 index 0000000..013ba7a --- /dev/null +++ b/umn/source/monitoring/viewing_monitoring_metrics.rst @@ -0,0 +1,45 @@ +Viewing Monitoring Metrics +========================== + +Scenario +-------- + +Cloud Eye on the cloud service platform monitors the status of ModelArts real-time services and model loads. You can obtain the monitoring metrics of each ModelArts real-time service and model loads on the management console. Monitored data requires a period of time for transmission and display. The status of ModelArts displayed on the Cloud Eye console is usually the status obtained 5 to 10 minutes before. You can view the monitored data of a newly created real-time service 5 to 10 minutes later. + +Prerequisites +------------- + +- The ModelArts real-time service is running properly. + +- Alarm rules have been configured on the Cloud Eye page. For details, see `Setting Alarm Rules <../monitoring/setting_alarm_rules.html>`__. +- The real-time service has been properly running for at least 10 minutes. +- The monitoring data and graphics are available for a new real-time service after the service runs for at least 10 minutes. + +- Cloud Eye does not display the metrics of a faulty or deleted real-time service. The monitoring metrics can be viewed after the real-time service starts or recovers. + +Monitoring data is unavailable without alarm rules configured on Cloud Eye. For details, see `Setting Alarm Rules <../monitoring/setting_alarm_rules.html>`__. + +Procedure +--------- + +#. Log in to the management console. + +#. Click **Service List**. Under **Management & Deployment**, click **Cloud Eye**. + +#. In the left navigation pane, choose **Cloud Service Monitoring > ModelArts**. + +#. View monitoring graphs. + + - Viewing monitoring graphs of the real-time service: Click **View Graph** in the **Operation** column. + - Viewing monitoring graphs of the model loads: Click |image1| next to the target real-time service, and select **View Graph** from the drop-down list for model loads in the **Operation** column. + +#. In the monitoring area, you can select a duration to view the monitoring data. + + You can view the monitoring data in the recent 1 hour, 3 hours, or 12 hours. To view the monitoring curve of a longer time range, click |image2| to enlarge the graph. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920964.png + +.. |image2| image:: /_static/images/en-us_image_0000001110761062.png + diff --git a/umn/source/permissions_management/basic_concepts.rst b/umn/source/permissions_management/basic_concepts.rst new file mode 100644 index 0000000..1969c19 --- /dev/null +++ b/umn/source/permissions_management/basic_concepts.rst @@ -0,0 +1,11 @@ +Basic Concepts +============== + +A fine-grained policy is a set of permissions defining which operations on which cloud services can be performed. Each policy can define multiple permissions. After a policy is granted to a user group, users in the group can obtain all permissions defined by the policy. IAM implements fine-grained permissions management based on the permissions defined by policies. + +IAM supports two types of policies: + +- Default policies: Define the common permissions preset in the system, which are typically read-only or management permissions for cloud services such as ModelArts. Default policies can be used only for authorization and cannot be edited or modified. +- Custom policies: Define the permissions created and managed by users and are the extension and supplement of default policies. + + diff --git a/umn/source/permissions_management/creating_a_custom_policy.rst b/umn/source/permissions_management/creating_a_custom_policy.rst new file mode 100644 index 0000000..abd8848 --- /dev/null +++ b/umn/source/permissions_management/creating_a_custom_policy.rst @@ -0,0 +1,100 @@ +Creating a Custom Policy +======================== + +If default policies cannot meet the requirements on fine-grained access control, you can create custom policies and assign the policies to the user group. + +You can create custom policies in either of the following ways: + +- Visual editor: Select cloud services, actions, resources, and request conditions. This does not require knowledge of policy syntax. +- JSON: Edit JSON policies from scratch or based on an existing policy. + +For details about how to create a custom policy, see section "Creating a Custom Policy" in the *Identity and Access Management User Guide*. This section describes `example custom policies of OBS (a dependent service of ModelArts) <#example-custom-policies-of-obs>`__ and `ModelArts <#example-custom-policies-of-modelarts>`__. + +Precautions +----------- + +- The permissions to use ModelArts depend on OBS authorization. Therefore, you need to grant OBS system permissions to users. +- A custom policy can contain actions of multiple services that are globally accessible or accessible through region-specific projects. +- To define permissions required to access both global and project-level services, create two custom policies and specify the scope as **Global services** and **Project-level services**. Then grant the two policies to the users. + +Example Custom Policies of OBS +------------------------------ + +ModelArts is a project-level service, and OBS is a global service. Therefore, you need to create custom policies for the two services respectively and grant them to users. The permissions to use ModelArts depend on OBS authorization. The following example shows the minimum permissions for OBS, including the permissions for OBS buckets and objects. After being granted the minimum permissions for OBS, users can access OBS from ModelArts without restrictions. + +.. code-block:: + + { + "Version": "1.1", + "Statement": [ + { + "Action": [ + "obs:bucket:ListAllMybuckets", + "obs:bucket:HeadBucket", + "obs:bucket:ListBucket", + "obs:bucket:GetBucketLocation", + "obs:object:GetObject", + "obs:object:GetObjectVersion", + "obs:object:PutObject", + "obs:object:DeleteObject", + "obs:object:DeleteObjectVersion", + "obs:object:ListMultipartUploadParts", + "obs:object:AbortMultipartUpload", + "obs:object:GetObjectAcl", + "obs:object:GetObjectVersionAcl", + "obs:bucket:PutBucketAcl" + ], + "Effect": "Allow" + } + ] + } + +Example Custom Policies of ModelArts +------------------------------------ + +- Example: Denying ExeML project deletion + + A deny policy must be used in conjunction with other policies to take effect. If the permissions assigned to a user contain both Allow and Deny actions, the Deny actions take precedence over the Allow actions. + + The following method can be used if you need to assign permissions of the **ModelArts FullAccess** policy to a user but also forbid the user from deleting ExeML projects. Create a custom policy for denying ExeML project deletion, and assign both policies to the group the user belongs to. Then the user can perform all operations on ModelArts except deleting ExeML projects. The following is an example deny policy: + + .. code-block:: + + { + "Version": "1.1", + "Statement": [ + { + "Effect": "Deny", + "Action": [ + "modelarts:exemlProject:delete" + ] + } + ] + } + +- Example: Allowing users to use only development environments + + The following is a policy configuration example for this user: + + .. code-block:: + + { + "Version": "1.1", + "Statement": [ + + { + "Effect": "Allow", + "Action": [ + "modelarts:notebook:list", + "modelarts:notebook:create" , + "modelarts:notebook:get" , + "modelarts:notebook:update" , + "modelarts:notebook:delete" , + "modelarts:notebook:action" , + "modelarts:notebook:access" + ] + } + ] + } + + diff --git a/umn/source/permissions_management/creating_a_user_and_granting_permissions.rst b/umn/source/permissions_management/creating_a_user_and_granting_permissions.rst new file mode 100644 index 0000000..25b1851 --- /dev/null +++ b/umn/source/permissions_management/creating_a_user_and_granting_permissions.rst @@ -0,0 +1,91 @@ +Creating a User and Granting Permissions +======================================== + +Policy Content +-------------- + +A fine-grained policy consists of the policy version (the **Version** field) and statement (the **Statement** field). + +- **Version**: Distinguishes between role-based access control (RBAC) and fine-grained policies. + + - **1.0**: RBAC policies, which are preset in the system and used to grant permissions of each service as a whole. After such a policy is granted to a user, the user has all permissions of the corresponding service. + - **1.1**: Fine-grained policies. A fine-grained policy consists of API-based permissions for operations on specific resource types. Fine-grained policies, as the name suggests, allow for more fine-grained control than RBAC policies. Users granted permissions of such a policy can only perform specific operations on the corresponding service. Fine-grained policies are classified into default and custom policies. + + - Default policies: Preset common permission sets to control read and administrator permissions of different services. + - Custom policies: Permission sets created and managed by users as an extension and supplement to system-defined policies. For example, a custom policy can be created to allow users only to modify ECS specifications. + +- **Statement**: Detailed information about a policy, containing the **Effect** and **Action** elements. + + - Effect + + Valid values for **Effect** include **Allow** and **Deny**. In a custom policy that contains both Allow and Deny statements, the Deny statements take precedence. + + - Action + + The value can be one or more resource operations. + + The value format is *Service name*:*Resource type*:*Action*, for example, **modelarts:exemlProject:create**. + + .. note:: + + - *Service name*: service name. Only lowercase letters are supported, for example, **modelarts**. + - *Resource type* and *Action*: The values are case-insensitive, and the wildcard (*) are allowed. A wildcard (*) can represent all or part of information about resource types and actions for the specific service. + +Example Policies +---------------- + +- A policy can define a single permission, such as the permission to deny ExeML project deletion. + + .. code-block:: + + { + "Version": "1.1", + "Statement": [ + { + "Effect": "Deny", + "Action": [ + "modelarts:exemlProject:delete" + ] + } + ] + } + +- A policy can define multiple permissions, such as the permissions to delete an ExeML version and an ExeML project. + + .. code-block:: + + { + "Version": "1.1", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "modelarts:exemlProjectVersion:delete", + "modelarts:exemlProject:delete" + ] + } + ] + } + +Authentication Logic +-------------------- + +IAM authenticates users according to the permissions that the users have been granted. The authentication logic is as follows: + +.. figure:: /_static/images/en-us_image_0000001110920802.png + :alt: **Figure 1** Authentication logic + + + **Figure 1** Authentication logic + +.. note:: + + The actions in each policy bear the OR relationship. + +#. A user accesses the system and makes an operation request. +#. The system evaluates all the permissions policies assigned to the user. +#. In these policies, the system looks for explicit deny permissions. If the system finds an explicit deny that applies, it returns a decision of **Deny**, and the authentication ends. +#. If no explicit deny is found, the system looks for allow permissions that would apply to the request. If the system finds an explicit allow permission that applies, it returns a decision of **Allow**, and the authentication ends. +#. If no explicit allow permission is found, the system returns a decision of **Deny**, and the authentication ends. + + diff --git a/umn/source/permissions_management/index.rst b/umn/source/permissions_management/index.rst new file mode 100644 index 0000000..9dc45d9 --- /dev/null +++ b/umn/source/permissions_management/index.rst @@ -0,0 +1,10 @@ +====================== +Permissions Management +====================== + +.. toctree:: + :maxdepth: 1 + + basic_concepts + creating_a_user_and_granting_permissions + creating_a_custom_policy diff --git a/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_access_key_authorization.rst b/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_access_key_authorization.rst new file mode 100644 index 0000000..619196c --- /dev/null +++ b/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_access_key_authorization.rst @@ -0,0 +1,30 @@ +Configuring Access Key Authorization +==================================== + +To use an access key pair for authorization, you need to obtain the access key pair first and then add the access key pair on the ModelArts management console. If your access key pair changes, you need to add a new one. + +Obtaining an Access Key +----------------------- + +#. On the ModelArts management console, hover over the username in the upper right corner and choose **My Credentials** from the drop-down list. +#. On the **My Credentials** page, choose **Access Keys** > **Create Access Key**. +#. In the **Create Access Key** dialog box that is displayed, enter the verification code received by SMS or email. +#. Click **OK** and save the access key file as prompted. The access key file is saved in the default download folder of the browser. Open the **credentials.csv** file to view the access key (**Access Key Id** and **Secret Access Key**). + +Adding an Access Key +-------------------- + +#. Log in to the ModelArts management console. In the navigation pane, choose **Settings**. The **Settings** page is displayed. + +#. Click **Add Authorization**. + +#. In the **Add Authorization** dialog box that is displayed, set **Authorization Method** to **AK/SK**. The username is fixed. Enter the obtained access key pair. + + - **AK**: Enter the value of the **Access Key Id** field in the key file. + - **SK**: Enter the value of the **Secret Access Key** field in the key file. + +#. Select **I have read and agree to the ModelArts Service Statement** and click **Agree**. + + After the configuration is complete, you can view the access key configurations of an account or IAM user on the **Settings** page. + + diff --git a/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_agency_authorization_(recommended).rst b/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_agency_authorization_(recommended).rst new file mode 100644 index 0000000..1bdf445 --- /dev/null +++ b/umn/source/preparations/configuring_access_authorization_(global_configuration)/configuring_agency_authorization_(recommended).rst @@ -0,0 +1,79 @@ +Configuring Agency Authorization (Recommended) +============================================== + +An agency is used to delegate ModelArts the operation permissions for dependent services such as OBS and SWR. Before using ModelArts, you need to complete agency authorization. + +.. note:: + + If you have used ModelArts before, click **Delete Authorization** in the global configurations area and then create an agency. + +Before You Start +---------------- + +- account + + - Only a cloud account can perform agency authorization to authorize the current account or all IAM users under the current account. + - Multiple IAM users or accounts can use the same agency. + - A maximum of 50 agencies can be created under an account. + +- IAM user + + - If the agency has been authorized, you can view the authorization information on the **Settings** page. + - If an IAM user has not obtained the authorization, ModelArts will display a message indicating that the user has not been authorized when the user accesses the **Add Authorization** page. In this case, contact the administrator of the IAM user to add authorization. Alternatively, you can `use access keys for authorization <../../preparations/configuring_access_authorization_(global_configuration)/configuring_access_key_authorization.html>`__. + +- When configuring an agency, you can use an automatically created agency. For details, see `Automatically Creating an IAM Agency <#automatically-creating-an-iam-agency>`__. You can also configure an agency. For example, you can configure an IAM user with the agency valid for only one day. + +Configuring Authorization +------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, click **Settings**. The **Settings** page is displayed. + +#. Click **Add Authorization**. + +#. In the **Add Authorization** dialog box that is displayed, set **Authorization Method** to **Agency**, and select the username and agency to be authorized. + +.. _modelarts080007enustopic0284258827enustopic0256240291table11954142119154: + + .. table:: **Table 1** Parameters + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=====================================================================================================================================================================================================================================================================================================================================================================================+ + | Authorization Method | Select **Agency**. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Username | Select an account from the drop-down list on the right. By default, **All IAM users (including logged-in account)** is selected, which indicates that authorization will be performed for the logged-in account and all IAM users under the account. All IAM users under the logged-in account are displayed in the drop-down list. You can configure an agency for an IAM user. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Agency | - **Auto Create** (recommended): When you use ModelArts **Settings** for the first time, no agency is available. In this case, you can click **Auto Create** to automatically create an agency for the user selected in **Username**. For details about the automatically created agency, see `Automatically Creating an IAM Agency <#automatically-creating-an-iam-agency>`__. | + | | - Select an existing agency: If you have created agencies in IAM, you can select an available agency from the drop-down list to authorize the selected user. | + | | - **Create on IAM**: If the automatically created agency cannot meet your requirements, you can click **Create on IAM** to manually create an agency on the IAM management console. If you choose **Create on IAM**, configure at least the **ModelArts CommonOperation** and **OBS Operate Access** permissions. Otherwise, the basic functions of ModelArts will be unavailable. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. After configuring the username and agency, select **I have read and agree to the ModelArts Service Statement** and click **Agree**. + + After the configuration is complete, you can view the agency configurations of an account or IAM user on the **Settings** page. + +Automatically Creating an IAM Agency +------------------------------------ + +The following provides the details about the agency automatically created by ModelArts. + +- **Agency Name**: For a cloud account, the agency name is **modelarts_agency**. For an IAM user, the agency name is **ma_agency_**. +- **Agency Type**: Select **Cloud service**. +- **Cloud Service**: Select **ModelArts**. +- **Validity Period**: Select **Unlimited**. +- **Permissions**: The **ModelArts CommonOperations**, **OBS OperateAccess**, and **Tenant Administrator** (required for using other dependent services) permissions are automatically added for this agency to use all ModelArts functions. + +Deleting Authorizations +----------------------- + +To better manage your authorization, you can delete the authorization of an IAM user or delete the authorizations of all users in batches. + +- **Deleting the authorization of a user** + + On the **Settings** page, the authorizations configured for IAM users under the current account are displayed. You can click **Delete** in the **Operation** column to delete the authorization of a user. After the deletion takes effect, the user cannot use ModelArts functions. + +- **Deleting authorizations in batches** + + On the **Settings** page, click **Delete Authorization** above the authorization list to delete all authorizations of the current account. After the deletion, the account and all IAM users under the account cannot use ModelArts functions. + + diff --git a/umn/source/preparations/configuring_access_authorization_(global_configuration)/index.rst b/umn/source/preparations/configuring_access_authorization_(global_configuration)/index.rst new file mode 100644 index 0000000..1639845 --- /dev/null +++ b/umn/source/preparations/configuring_access_authorization_(global_configuration)/index.rst @@ -0,0 +1,10 @@ +======================================================= +Configuring Access Authorization (Global Configuration) +======================================================= + +.. toctree:: + :maxdepth: 1 + + overview + configuring_agency_authorization_(recommended) + configuring_access_key_authorization diff --git a/umn/source/preparations/configuring_access_authorization_(global_configuration)/overview.rst b/umn/source/preparations/configuring_access_authorization_(global_configuration)/overview.rst new file mode 100644 index 0000000..3de1564 --- /dev/null +++ b/umn/source/preparations/configuring_access_authorization_(global_configuration)/overview.rst @@ -0,0 +1,23 @@ +Overview +======== + +When you use ExeML, data management, notebook instances, training jobs, models, and services, ModelArts may need to access dependent services such as OBS and Software Repository for Container (SWR). If ModelArts is not authorized to access the services, these functions cannot be used. + +You can configure access authorization in either of the following ways: + +- **Using an agency** (recommended) + + After agency authorization is configured, the dependent service operation permissions are delegated to ModelArts so that ModelArts can use the dependent services and perform operations on resources on your behalf. + +- **Using the access key** + + You can use the obtained access key pair (AK/SK) to authorize ModelArts to access dependent services and and perform operations on resources. + +Precautions +----------- + +- Agency authorization grants ModelArts permissions on dependent services, such as OBS and SWR. If the OBS permissions are not configured for an IAM user, the user still does not have the permission to operate the services. +- For users who have used ModelArts before, access key authorization has been configured and does not need to be configured again. However, you are advised to use agency authorization again. +- For new users, use agency authorization. + + diff --git a/umn/source/preparations/creating_an_obs_bucket.rst b/umn/source/preparations/creating_an_obs_bucket.rst new file mode 100644 index 0000000..225a008 --- /dev/null +++ b/umn/source/preparations/creating_an_obs_bucket.rst @@ -0,0 +1,17 @@ +Creating an OBS Bucket +====================== + +ModelArts uses OBS to store data and model backups and snapshots, achieving secure, reliable, and low-cost storage. Therefore, before using ModelArts, create an OBS bucket and folders for storing data. + +Procedure +--------- + +#. Log in to OBS Console and create an OBS bucket. For details, see "Creating a Bucket". For example, create an OBS bucket named **c-flowers**. + + .. note:: + + The created OBS bucket and ModelArts are in the same region. + +#. Create a folder for storing data. For details, see "Creating a Folder". For example, create a folder named **flowers** in the created **c-flowers** OBS bucket. + + diff --git a/umn/source/preparations/index.rst b/umn/source/preparations/index.rst new file mode 100644 index 0000000..950b937 --- /dev/null +++ b/umn/source/preparations/index.rst @@ -0,0 +1,9 @@ +============ +Preparations +============ + +.. toctree:: + :maxdepth: 1 + + configuring_access_authorization_(global_configuration)/index + creating_an_obs_bucket diff --git a/umn/source/resource_pools.rst b/umn/source/resource_pools.rst new file mode 100644 index 0000000..42bdc91 --- /dev/null +++ b/umn/source/resource_pools.rst @@ -0,0 +1,116 @@ +Resource Pools +============== + +ModelArts Resource Pools +------------------------ + +When using ModelArts to implement AI Development Lifecycle, you can use two different resource pools to train and deploy models. + +- **Public Resource Pool**: provides public large-scale computing clusters, which are allocated based on job parameter settings. Resources are isolated by job. + +- **Dedicated Resource Pool**: provides exclusive compute resources, which can be used for model deployment. It delivers higher efficiency and cannot be shared with other users. + + Create a dedicated resource pool and select the dedicated resource pool during AI development. For details about the dedicated resource pool, see the following: + + `Dedicated Resource Pool <#dedicated-resource-pool>`__ + + `Creating a Dedicated Resource Pool <#creating-a-dedicated-resource-pool>`__ + + `Scaling a Dedicated Resource Pool <#scaling-a-dedicated-resource-pool>`__ + + `Deleting a Dedicated Resource Pool <#deleting-a-dedicated-resource-pool>`__ + +Dedicated Resource Pool +----------------------- + +- Dedicated resource pools can be used in the following jobs and tasks: notebook instances, training, TensorBoard, and deployment. +- Dedicated resource pools are classified into two types: **Dedicated for Development/Training** and **Dedicated for Service Deployment**. The **Dedicated for Development/Training** type can be used only for notebook instances, training, and TensorBoard. The **Dedicated for Service Deployment** type can be used only for model deployment. +- Dedicated resource pools are available only when they are in the **Running** status. If a dedicated resource pool is unavailable or abnormal, rectify the fault before using it. + +Creating a Dedicated Resource Pool +---------------------------------- + +#. Log in to the ModelArts management console and choose **Dedicated Resource Pools** on the left. + +#. On the **Dedicated Resource Pools** page, select **Dedicated for Development/Training** or **Dedicated for Service Deployment**. + +#. Click **Create** in the upper left corner. The page for creating a dedicated resource pool is displayed. + +#. Set the parameters on the page. For details about how to set parameters, see `Table 1 <#modelarts230076enustopic0143244658table1073325155617>`__ and `Table 2 <#modelarts230076enustopic0143244658table199892206411>`__. + +.. _modelarts230076enustopic0143244658table1073325155617: + + .. table:: **Table 1** Parameters of the **Dedicated for Development/Training** type + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+=======================================================================================================================================================================================================+ + | Resource Type | The default value is and cannot be changed. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Name | Name of a dedicated resource pool. | + | | | + | | The value can contain letters, digits, hyphens (-), and underscores (_). | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of a dedicated resource pool. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Nodes | Select the number of nodes in a dedicated resource pool. More nodes mean higher computing performance. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Specifications | Required specifications. The GPU delivers better performance, and the CPU is more cost-effective. If a flavor is sold out, you can purchase it again only after other users delete the resource pool. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + + +.. _modelarts230076enustopic0143244658table199892206411: + + .. table:: **Table 2** Parameters of the **Dedicated for Service Deployment** type + + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+====================================================================================================================================================================================================================================================================================================================================+ + | Resource Type | The default value is **Dedicated for Service Deployment** and cannot be changed. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Name | Name of a dedicated resource pool. | + | | | + | | The value can contain letters, digits, hyphens (-), and underscores (_). | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Description | Brief description of a dedicated resource pool. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Custom Network Configuration | If you enable **Custom Network Configuration**, the service instance runs on the specified network and can communicate with other cloud service resource instances on the network. If you do not enable **Custom Network Configuration**, ModelArts allocates a dedicated network to each user and isolates users from each other. | + | | | + | | If you enable **Custom Network Configuration**, set **VPC**, **Subnet**, and **Security Group**. If no network is available, go to the VPC service and create a network. . | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | AZ | You can select **Random**, **AZ 1**, **AZ 2**, or **AZ 3** based on site requirements. An AZ is a physical region where resources use independent power supplies and networks. AZs are physically isolated but interconnected through an internal network. To enhance workload availability, create nodes in different AZs. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Nodes | Select the number of nodes in a dedicated resource pool. More nodes mean higher computing performance. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Specifications | Required specifications. The GPU delivers better performance. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +#. After confirming that the specifications are correct, create a dedicated resource pool as prompted. After a dedicated resource pool is created, its status changes to **Running**. + +Scaling a Dedicated Resource Pool +--------------------------------- + +After a dedicated resource pool is used for a period of time, you can scale out or in the capacity of the resource pool by increasing or decreasing the number of nodes. + +The procedure for scaling is as follows: + +#. Go to the dedicated resource pool management page, locate the row that contains the desired dedicated resource pool, and click **Scale** in the **Operation** column. +#. On the scaling page, increase or decrease the number of nodes. Increasing the node quantity scales out the resource pool whereas decreasing the node quantity scales in the resource pool. Scale the capacity based on service requirements. + + - During capacity expansion, + - During capacity reduction, delete the target nodes in the **Operation** column. To reduce one node, you need to switch off the node in **Node List** to delete the node. + +#. Click **Submit**. After the request is submitted, the dedicated resource pool management page is displayed. + +Deleting a Dedicated Resource Pool +---------------------------------- + +If a dedicated resource pool is no longer needed during AI service development, you can delete the resource pool to release resources. + +.. note:: + + - After a dedicated resource pool is deleted, the training jobs, notebook instances, and deployment that depend on the resource pool are unavailable. A dedicated resource pool cannot be restored after being deleted. Exercise caution when deleting a dedicated resource pool. + +#. Go to the dedicated resource pool management page, locate the row that contains the desired dedicated resource pool, and click **Delete** in the **Operation** column. +#. In the dialog box that is displayed, click **OK**. diff --git a/umn/source/service_overview/basic_knowledge/basic_concepts_of_ai_development.rst b/umn/source/service_overview/basic_knowledge/basic_concepts_of_ai_development.rst new file mode 100644 index 0000000..89e80b6 --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/basic_concepts_of_ai_development.rst @@ -0,0 +1,40 @@ +Basic Concepts of AI Development +================================ + +Machine learning is classified into supervised, unsupervised, and reinforcement learning. + +- Supervised learning uses labeled samples to adjust the parameters of classifiers to achieve the required performance. It can be considered as learning with a teacher. Common supervised learning includes regression and classification. +- Unsupervised learning is used to find hidden structures in unlabeled data. Clustering is a form of unsupervised learning. +- Reinforcement learning is an area of machine learning concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward. + +Regression +---------- + +Regression reflects the time feature of data attributes and generates a function that maps one data attribute to an actual variable prediction to find the dependency between the variable and attribute. Regression mainly analyzes data and predicts data and data relationship. Regression can be used for customer development, retention, customer churn prevention, production lifecycle analysis, sales trend prediction, and targeted promotion. + +|image1| + +Classification +-------------- + +Classification involves defining a set of categories based on the common features of objects and identifying which category an object belongs to. Classification can be used for customer classification, customer properties, feature analysis, customer satisfaction analysis, and customer purchase trend prediction. + +|image2| + +Clustering +---------- + +Clustering involves grouping a set of objects in such a way that objects in the same group are more similar to each other than to those in other groups. Clustering can be used for customer segmentation, customer characteristic analysis, customer purchase trend prediction, and market segmentation. + +|image3| + +Clustering analyzes data objects and produces class labels. Objects are grouped based on the maximized and minimized similarities to form clusters. In this way, objects in the same cluster are more similar to each other than to those in other clusters. + + + +.. |image1| image:: /_static/images/en-us_image_0000001110920858.png + +.. |image2| image:: /_static/images/en-us_image_0000001157080805.png + +.. |image3| image:: /_static/images/en-us_image_0000001110760956.png + diff --git a/umn/source/service_overview/basic_knowledge/common_concepts_of_modelarts.rst b/umn/source/service_overview/basic_knowledge/common_concepts_of_modelarts.rst new file mode 100644 index 0000000..dd9770a --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/common_concepts_of_modelarts.rst @@ -0,0 +1,29 @@ +Common Concepts of ModelArts +============================ + +ExeML +----- + +ExeML is the process of automating model design, parameter tuning, and model training, model compression, and model deployment with the labeled data. The process is code-free and does not require developers to have experience in model development. A model can be built in three steps: labeling data, training a model, and deploying the model. + +Inference +--------- + +Inference is the process of deriving a new judgment from a known judgment according to a certain strategy. In AI, machines simulate human intelligence, and complete inference based on neural networks. + +Real-Time Inference +------------------- + +Real-time inference specifies a web service that provides an inference result for each inference request. + +Batch Inference +--------------- + +Batch inference specifies a batch job that processes batch data for inference. + +Resource Pool +------------- + +ModelArts provides large-scale computing clusters for model development, training, and deployment. There are two types of resource pools: public resource pool and dedicated resource pool. The public resource pool is provided by default. Dedicated resource pools are created separately and used exclusively. + + diff --git a/umn/source/service_overview/basic_knowledge/data_management.rst b/umn/source/service_overview/basic_knowledge/data_management.rst new file mode 100644 index 0000000..e6b2b9e --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/data_management.rst @@ -0,0 +1,12 @@ +Data Management +=============== + +During AI development, massive volumes of data need to be processed, and data preparation and labeling usually take more than half of the development time. ModelArts data management provides an efficient data management and labeling framework. It supports various data types such as image, text, audio, and video in a range of labeling scenarios such as image classification, object detection, speech paragraph labeling, and text classification. ModelArts data management can be used in AI projects of computer vision, natural language processing, and audio and video analysis. In addition, it provides functions such as data filtering, data analysis, team labeling, and version management for full-process data labeling. + +Team labeling enables multiple members to label a dataset, improving labeling efficiency. ModelArts allows project-based management for labeling by individual developers, small-scale labeling by small teams, and large-scale labeling by professional teams. + +For large-scale team labeling, ModelArts provides team management, personnel management, and data management to implement the entire process, from project creation, allocation, management, labeling, to acceptance. For small-scale labeling by individuals and small teams, ModelArts provides an easy-to-use labeling tool to minimize project management costs. + +In addition, the labeling platform ensures data security. User data is used only within the authorized scope. The labeling object allocation policy ensures user data privacy and implements data anonymization. + + diff --git a/umn/source/service_overview/basic_knowledge/devenviron.rst b/umn/source/service_overview/basic_knowledge/devenviron.rst new file mode 100644 index 0000000..0aea04c --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/devenviron.rst @@ -0,0 +1,14 @@ +DevEnviron +========== + +It is challenging to set up a development environment, select an AI algorithm framework and algorithm, debug code, install software, and accelerate hardware. To address these challenges, ModelArts provides DevEnviron to simplify the entire development process. + +- **Mainstream AI algorithm frameworks supported** + + In the machine learning and deep learning fields, popular open-source training and inference frameworks include TensorFlow, PyTorch, MXNet, and MindSpore. ModelArts supports all popular AI computing frameworks and provides a user-friendly development and debugging environment. It supports traditional machine learning algorithms, such as logistic regression, decision tree, and clustering, as well as multiple types of deep learning algorithms, such as the convolutional neural network (CNN), recurrent neural network (RNN), and long short-term memory (LSTM). + +- **Simplified algorithm development for distributed training** + + Deep learning generally requires large-scale GPU clusters for distributed acceleration. For existing open-source frameworks, algorithm developers need to write a large amount of code for distributed training on different hardware, and the acceleration code varies depending on the framework. To resolve these issues, a distributed lightweight framework or SDK is required. The framework or SDK is built on deep learning engines such as TensorFlow, PyTorch, MXNet, and MindSpore to improve the distributed performance and usability of these engines. ModelArts MoXing perfectly suits the needs. The easy-to-use MoXing API/SDK enables you to develop deep learning at low costs. + + diff --git a/umn/source/service_overview/basic_knowledge/exeml.rst b/umn/source/service_overview/basic_knowledge/exeml.rst new file mode 100644 index 0000000..41f47dd --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/exeml.rst @@ -0,0 +1,18 @@ +ExeML +===== + +To implement AI in various industries, AI model development must be simplified. Currently, only a few algorithm engineers and researchers are capable of AI development and optimization. They find it challenging to develop related prototypes into products and projects. Most service developers, however, face difficulties in developing AI algorithms and optimizing parameters. As a result, most enterprises lack comprehensive AI development capabilities. + +ModelArts provides ExeML for service developers who are not experienced in algorithm development to develop algorithms. It automatically generates models based on transfer learning and Neural Architecture Search (NAS), selects parameters for model training, and tunes models for rapid model training and deployment. Based on the labeled data and application scenario provided by developers, ModelArts automatically generates models that meet precision requirements, without the need for coding. The application scenarios include image classification and object detection. Models can be automatically optimized and generated based on the deployment environment and inference speed requirements. + +.. figure:: /_static/images/en-us_image_0000001214778791.png + :alt: **Figure 1** Process of using ExeML + + + **Figure 1** Process of using ExeML + +ModelArts ExeML also provides the auto learning white-box capabilities. It opens model parameters and implements template-based development. ExeML helps accelerate the development speed. With ExeML, developers can directly optimize the generated model or retrain the model, instead of setting up a new model. + +The key techniques of automatic deep learning are transfer learning (generating high-quality models based on a small amount of data), automatic design of the model architecture in multiple dimensions (neural network search and adaptive model optimization), and fast, accurate automatic tuning of training parameters. + + diff --git a/umn/source/service_overview/basic_knowledge/index.rst b/umn/source/service_overview/basic_knowledge/index.rst new file mode 100644 index 0000000..f1c008a --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/index.rst @@ -0,0 +1,15 @@ +=============== +Basic Knowledge +=============== + +.. toctree:: + :maxdepth: 1 + + introduction_to_the_ai_development_lifecycle + basic_concepts_of_ai_development + common_concepts_of_modelarts + data_management + devenviron + model_training + model_deployment + exeml diff --git a/umn/source/service_overview/basic_knowledge/introduction_to_the_ai_development_lifecycle.rst b/umn/source/service_overview/basic_knowledge/introduction_to_the_ai_development_lifecycle.rst new file mode 100644 index 0000000..6c69720 --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/introduction_to_the_ai_development_lifecycle.rst @@ -0,0 +1,51 @@ +Introduction to the AI Development Lifecycle +============================================ + +What Is AI +---------- + +Artificial intelligence (AI) is a technology capable of simulating human cognition through machines. The core capability of AI is to make a judgment or prediction based on a given input. + +What Is the Purpose of AI Development +------------------------------------- + +AI development aims to centrally process and extract information from volumes of data to summarize internal patterns of the study objects. + +Massive volumes of collected data are computed, analyzed, summarized, and organized by using appropriate statistics, machine learning, and deep learning methods to maximize data value. + +Basic Process of AI Development +------------------------------- + +The basic process of AI development includes the following steps: determining an objective, preparing data, and training, evaluating, and deploying a model. + +.. figure:: /_static/images/en-us_image_0000001110921016.png + :alt: **Figure 1** AI development process + + + **Figure 1** AI development process + +#. **Determine an objective.** + + Before starting AI development, determine what to analyze. What problems do you want to solve? What is the business goal? Sort out the AI development framework and ideas based on the business understanding. For example, image classification and object detection. Different projects have different requirements for data and AI development methods. + +#. **Prepare data.** + + Data preparation refers to data collection and preprocessing. + + Data preparation is the basis of AI development. When you collect and integrate related data based on the determined objective, the most important thing is to ensure the authenticity and reliability of the obtained data. Typically, you cannot collect all the data at the same time. In the data labeling phase, you may find that some data sources are missing and then you may need to repeatedly adjust and optimize the data. + +#. **Train a model.** + + Modeling involves analyzing the prepared data to find the causality, internal relationships, and regular patterns, thereby providing references for commercial decision making. After model training, usually one or more machine learning or deep learning models are generated. These models can be applied to new data to obtain predictions and evaluation results. + +#. **Evaluate the model.** + + A model generated by training needs to be evaluated. Typically, you cannot obtain a satisfactory model after the first evaluation, and may need to repeatedly adjust algorithm parameters and data to further optimize the model. + + Some common metrics, such as the accuracy, recall, and area under the curve (AUC), help you effectively evaluate and obtain a satisfactory model. + +#. **Deploy the model.** + + Model development and training are based on existing data (which may be test data). After a satisfactory model is obtained, the model needs to be formally applied to actual data or newly generated data for prediction, evaluation, and visualization. The findings can then be reported to decision makers in an intuitive way, helping them develop the right business strategies. + + diff --git a/umn/source/service_overview/basic_knowledge/model_deployment.rst b/umn/source/service_overview/basic_knowledge/model_deployment.rst new file mode 100644 index 0000000..8a40fa8 --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/model_deployment.rst @@ -0,0 +1,16 @@ +Model Deployment +================ + +Generally, AI model deployment and large-scale implementation are complex. + +ModelArts resolves this issue by deploying a trained model on different devices in various scenarios with only a few clicks. This secure and reliable one-stop deployment is available for individual developers, enterprises, and device manufacturers. + +.. figure:: /_static/images/en-us_image_0000001110920824.png + :alt: **Figure 1** Process of deploying a model + + + **Figure 1** Process of deploying a model + +- The real-time inference service features high concurrency, low latency, and elastic scaling. + + diff --git a/umn/source/service_overview/basic_knowledge/model_training.rst b/umn/source/service_overview/basic_knowledge/model_training.rst new file mode 100644 index 0000000..4addbe1 --- /dev/null +++ b/umn/source/service_overview/basic_knowledge/model_training.rst @@ -0,0 +1,38 @@ +Model Training +============== + +In addition to data and algorithms, developers spend a lot of time configuring model training parameters. Model training parameters determine the model's precision and convergence time. Parameter selection is heavily dependent on developers' experience. Improper parameter selection will affect the model's precision or significantly increase the time required for model training. + +To simplify AI development and improve development efficiency and training performance, ModelArts offers visualized job management, resource management, and version management and automatically performs hyperparameter optimization based on machine learning and reinforcement learning. It provides automatic hyperparameter tuning policies such as learning rate and batch size, and integrates common models. + +Currently, when most developers build models, the models usually have dozens of layers or even hundreds of layers and MB-level or GB-level parameters to meet precision requirements. As a result, the specifications of computing resources are extremely high, especially the computing power of hardware resources, memory, and ROM. The resource specifications on the device side are strictly limited. For example, the computing power on the device side is 1 TFLOPS, the memory size is about 2 GB, and the ROM space is about 2 GB, so the model size on the device side must be limited to 100 KB and the inference delay must be limited to 100 milliseconds. + +Therefore, compression technologies with lossless or near-lossless model precision, such as pruning, quantization, and knowledge distillation, are used to implement automatic model compression and optimization, and automatic iteration of model compression and retraining to control the loss of model precision. The low-bit quantization technology, which eliminates the need for retraining, converts the model from a high-precision floating point to a fixed-point operation. Multiple compression and optimization technologies are used to meet the lightweight requirements of device and edge hardware resources. The model compression technology reduces the precision by less than 1% in specific scenarios. + +When the training data volume is large, the training of the deep learning model is time-consuming. In computer vision technology, ImageNet-1k (a classification dataset containing 1,000 image classes, referred to as ImageNet) is a commonly used dataset. If you use a P100 GPU to train a ResNet-50 model on the dataset, it will take nearly one week. This hinders rapid development of deep learning applications. Therefore, the acceleration of deep learning training has always been an important concern to the academia and the industry. + +Distributed training acceleration needs to be considered in terms of software and hardware. A single optimization method cannot meet expectations. Therefore, optimization of distributed acceleration is a system project. The distributed training architecture needs to be considered in terms of hardware and chip design. To minimize compute and communication delays, many factors need to be considered, including overall compute specifications, network bandwidth, high-speed cache, power consumption, and heat dissipation of the system, and the relationship between compute and communication throughput. + +The software design needs to combine high-performance hardware features to fully use the high-speed hardware network and implement high-bandwidth distributed communication and efficient local data caching. By using training optimization algorithms, such as hybrid parallel, gradient compression, and convolution acceleration, the software and hardware of the distributed training system can be efficiently coordinated and optimized from end to end, and training acceleration can be implemented in a distributed environment of multiple hosts and cards. ModelArts delivers an industry-leading speedup of over 0.8 for ResNet50 on the ImageNet dataset in the distributed environment with thousands of hosts and cards. + +To measure the acceleration performance of distributed deep learning, the following two key indicators are used: + +- Throughput, that is, the amount of data processed in a unit time +- Convergence time, that is, the time required to achieve certain precision + +The throughput depends on server hardware (for example, more AI acceleration chips with higher FLOPS processing capabilities and higher communication bandwidth achieve higher throughput), data reading and caching, data preprocessing, model computing (for example, convolution algorithm selection), and communication topology optimization. Except low-bit computing and gradient (or parameter) compression, most technologies improve throughput without affecting model precision. To achieve the shortest convergence time, optimize the throughput and adjust the parameters. If the parameters are not adjusted properly, the throughput cannot be optimized. If the batch size is set to a small value, the parallel performance of model training will be relatively poor. As a result, the throughput cannot be improved even if the number of compute nodes are increased. + +Users are most concerned about convergence time. The MoXing framework implements full-stack optimization and significantly reduces the training convergence time. For data read and preprocessing, MoXing uses multi-level concurrent input pipelines to prevent data I/Os from becoming a bottleneck. In terms of model computing, MoXing provides hybrid precision calculation, which combines semi-precision and single-precision for the upper layer models and reduces the loss caused by precision calculation through adaptive scaling. Dynamic hyperparameter policies (such as momentum and batch size) are used to minimize the number of epochs required for model convergence. + +ModelArts High-Performance Distributed Training Optimization +------------------------------------------------------------ + +- Automatic hybrid precision to fully utilize hardware computing capabilities +- Dynamic hyperparameter adjustment technologies (dynamic batch size, image size, and momentum) +- Automatic model gradient merging and splitting +- Communication operator scheduling optimization based on BP bubble adaptive computing +- Distributed high-performance communication libraries (NStack and HCCL) +- Distributed data-model hybrid parallel +- Training data compression and multi-level caching + + diff --git a/umn/source/service_overview/functions.rst b/umn/source/service_overview/functions.rst new file mode 100644 index 0000000..3113769 --- /dev/null +++ b/umn/source/service_overview/functions.rst @@ -0,0 +1,30 @@ +Functions +========= + +AI engineers face challenges in the installation and configuration of various AI tools, data preparation, and model training. To address these challenges, the one-stop AI development platform ModelArts is provided. The platform integrates data preparation, algorithm development, model training, and model deployment into the production environment, allowing AI engineers to perform one-stop AI development. + +.. figure:: /_static/images/en-us_image_0000001156920845.png + :alt: **Figure 1** Function overview + + + **Figure 1** Function overview + +ModelArts has the following features: + +- **Data governance** + + Manages data preparation, such as data filtering and labeling, and dataset versions. + +- **Rapid and simplified model training** + + Enables high-performance distributed training and simplifies coding with the self-developed MoXing deep learning framework. + +- **Multi-scenario deployment** + + Deploys models in various production environments, and supports real-time and batch inference. + +- **Auto learning** + + Enables model building without coding and supports image classification, object detection, and predictive analytics. + + diff --git a/umn/source/service_overview/index.rst b/umn/source/service_overview/index.rst new file mode 100644 index 0000000..668e125 --- /dev/null +++ b/umn/source/service_overview/index.rst @@ -0,0 +1,12 @@ +================ +Service Overview +================ + +.. toctree:: + :maxdepth: 1 + + what_is_modelarts + functions + basic_knowledge/index + related_services + permissions_management diff --git a/umn/source/service_overview/permissions_management.rst b/umn/source/service_overview/permissions_management.rst new file mode 100644 index 0000000..c4a2a31 --- /dev/null +++ b/umn/source/service_overview/permissions_management.rst @@ -0,0 +1,36 @@ +Permissions Management +====================== + +If you need to assign different permissions to different employees in your enterprise to access ModelArts resources, IAM is a good choice for fine-grained permissions management. + +Granting Permissions to Users +----------------------------- + +.. figure:: /_static/images/en-us_image_0000001156920871.png + :alt: **Figure 1** Authorization model + + + **Figure 1** Authorization model + +#. Plan user groups and grant required permissions to each user group. +#. Add a user to a specific user group so that the user can inherit the permissions of the group. + +When personnel changes occur, you only need to change individual user permissions by changing their user group. User groups make permission management more efficient. + +Granting Permissions to Other Accounts +-------------------------------------- + +You (account A) can create an agency on IAM to grant required permissions to the delegated account (account B). The administrator of account B grants the **Agent Operator** permissions to the user of account B to enable the user to manage resources in your account (account A). + +Granting Permissions to Federated Users +--------------------------------------- + +You can use IAM to create an IdP and create rules for the IdP to convert federated users into IAM users who have specified permissions to access cloud resources. + +.. figure:: /_static/images/en-us_image_0000001157080847.png + :alt: **Figure 2** Principles of identity conversion for federated users + + + **Figure 2** Principles of identity conversion for federated users + + diff --git a/umn/source/service_overview/related_services.rst b/umn/source/service_overview/related_services.rst new file mode 100644 index 0000000..5927ec0 --- /dev/null +++ b/umn/source/service_overview/related_services.rst @@ -0,0 +1,24 @@ +Related Services +================ + +OBS +--- + +ModelArts uses Object Storage Service (OBS) to store data and model backups and snapshots. OBS provides secure, reliable, low-cost storage. For more details, see *Object Storage Service Console Function Overview*. + +CCE +--- + +ModelArts uses Cloud Container Engine (CCE) to deploy models as real-time services. CCE enables high concurrency and provides elastic scaling. For more information about CCE, see *Cloud Container Engine User Guide*. + +SWR +--- + +To use an AI framework that is not supported by ModelArts, use SoftWare Repository for Container (SWR) to customize an image and import the image to ModelArts for training or inference. For more details, see . + +Cloud Eye +--------- + +ModelArts uses Cloud Eye to monitor online services and model loads in real time and send alarms and notifications automatically. For details about Cloud Eye, see *Cloud Eye User Guide*. + + diff --git a/umn/source/service_overview/what_is_modelarts.rst b/umn/source/service_overview/what_is_modelarts.rst new file mode 100644 index 0000000..cc8beaa --- /dev/null +++ b/umn/source/service_overview/what_is_modelarts.rst @@ -0,0 +1,47 @@ +What Is ModelArts? +================== + +ModelArts is a one-stop development platform for AI developers. With data preprocessing, semi-automated data labeling, distributed training, automated model building, and model deployment, ModelArts helps AI developers quickly build models and efficiently manage the AI development lifecycle. + +ModelArts covers all stages of AI development, including data processing and model training and deployment. The underlying technologies of ModelArts support various heterogeneous computing resources, allowing developers to flexibly select and use resources. In addition, ModelArts supports popular open-source AI development frameworks such as TensorFlow. Developers can also use self-developed algorithm frameworks to match their usage habits. + +ModelArts aims to simplify AI development. + +Product Architecture +-------------------- + +ModelArts supports the entire development process, including data processing, and model training, management, and deployment. + +ModelArts supports various AI application scenarios, such as image classification and object detection. + +.. figure:: /_static/images/en-us_image_0000001110920880.png + :alt: **Figure 1** ModelArts architecture + + + **Figure 1** ModelArts architecture + +Product Advantages +------------------ + +- **One-stop platform** + + The out-of-the-box and full-lifecycle AI development platform provides one-stop training, management, and deployment of models. + +- **Easy to use** + + - Automatic optimization of hyperparameters + - Code-free development and simplified operations + +- **High performance** + + - The self-developed MoXing deep learning framework accelerates algorithm development and training. + - Optimized GPU utilization accelerates real-time inference. + +- **Flexible** + + - Popular open-source frameworks available, such as TensorFlow, and MindSpore + - Popular GPUs + - Exclusive use of dedicated resources + - Custom images for custom frameworks and operators + + diff --git a/umn/source/training_management/creating_a_training_job/index.rst b/umn/source/training_management/creating_a_training_job/index.rst new file mode 100644 index 0000000..7ac0fc5 --- /dev/null +++ b/umn/source/training_management/creating_a_training_job/index.rst @@ -0,0 +1,10 @@ +======================= +Creating a Training Job +======================= + +.. toctree:: + :maxdepth: 1 + + introduction_to_training_jobs + using_frequently-used_frameworks_to_train_models + using_custom_images_to_train_models diff --git a/umn/source/training_management/creating_a_training_job/introduction_to_training_jobs.rst b/umn/source/training_management/creating_a_training_job/introduction_to_training_jobs.rst new file mode 100644 index 0000000..b226103 --- /dev/null +++ b/umn/source/training_management/creating_a_training_job/introduction_to_training_jobs.rst @@ -0,0 +1,17 @@ +Introduction to Training Jobs +============================= + +ModelArts supports multiple types of training jobs during the entire AI development process. Select a creation mode based on the algorithm source. + +Algorithm Sources of Training Jobs +---------------------------------- + +- **Frequently-used** + + If you have used some frequently-used frameworks to develop algorithms locally, you can select a frequently-used framework and create a training job to build a model. For details, see `Using Frequently-used Frameworks to Train Models <../../training_management/creating_a_training_job/using_frequently-used_frameworks_to_train_models.html>`__. + +- **Custom** + + If the framework used for algorithm development is not a frequently-used framework, you can build an algorithm into a custom image and use the custom image to create a training job. For details about the operation guide to create a training job, see `Using Custom Images to Train Models <../../training_management/creating_a_training_job/using_custom_images_to_train_models.html>`__. For details about the specifications and description of custom images, see `Specifications for Custom Images Used for Training Jobs <../../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. + + diff --git a/umn/source/training_management/creating_a_training_job/using_custom_images_to_train_models.rst b/umn/source/training_management/creating_a_training_job/using_custom_images_to_train_models.rst new file mode 100644 index 0000000..2cbc4e6 --- /dev/null +++ b/umn/source/training_management/creating_a_training_job/using_custom_images_to_train_models.rst @@ -0,0 +1,124 @@ +Using Custom Images to Train Models +=================================== + +If the framework used for algorithm development is not a frequently-used framework, you can build an algorithm into a custom image and use the custom image to create a training job. + +Prerequisites +------------- + +- Data has been prepared. Specifically, you have created an available dataset in ModelArts, or you have uploaded the dataset used for training to the OBS directory. +- If the algorithm source is **Custom**, create an image and upload the image to SWR. For details, see . +- The training script has been uploaded to the OBS directory. +- At least one empty folder has been created on OBS for storing the training output. +- The account is not in arrears because resources are consumed when training jobs are running. +- The OBS directory you use and ModelArts are in the same region. + +Precautions +----------- + +- In the dataset directory specified for a training job, the names of the files (such as the image file, audio file, and label file) containing data used for training contain 0 to 255 characters. If the names of certain files in the dataset directory contain over 255 characters, the training job will ignore these files and use data in the valid files for training. If the names of all files in the dataset directory contain over 255 characters, no data is available for the training job and the training job fails. +- In the training script, the **Data Source** and **Training Output Path** parameters must be set to the OBS path. Use the to perform read and write operations in the path. + +Creating a Training Job +----------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. By default, the system switches to the **Training Jobs** page. + +#. In the upper left corner of the training job list, click **Create** to switch to the **Create Training Job** page. + +#. Set related parameters. + + a. Set the basic information, including **Name**, **Version**, and **Description**. The **Version** information is automatically generated by the system and named in an ascending order of **V001**, **V002**, and so on. You cannot manually modify it. + + Specify **Name** and **Description** according to actual requirements. + + b. Set job parameters, including the data source, algorithm source, and more. For details, see `Table 1 <#modelarts230239enustopic0216621184table1819364517144>`__. + +.. _modelarts230239enustopic0216621184table1819364517144: + + .. table:: **Table 1** Job parameters + + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Sub-Parameter | Description | + +=========================+=======================+====================================================================================================================================================================================================================================================================================================================================================================================+ + | One-Click Configuration | - | If you have saved job parameter configurations in ModelArts, click **One-Click Configuration** and select an existing job parameter configuration as prompted to quickly complete parameter setting for the job. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Algorithm Source | Custom | For details about custom image specifications, see `Specifications for Custom Images Used for Training Jobs <../../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. | + | | | | + | | | - **Image Path**: SWR URL after the image is uploaded to SWR. For details about how to upload an image, see `Creating and Uploading a Custom Image <../../custom_images/creating_and_uploading_a_custom_image.html>`__. | + | | | - **Code Directory**: OBS path for storing the training code file. | + | | | - **Boot Command**: Command to boot the training job after the image is started. Set this parameter based on site requirements. If the custom image is based on a basic ModelArts image, set parameters by referring to `Creating a Training Job Using a Custom Image (GPU) <../../custom_images/for_training_models/creating_a_training_job_using_a_custom_image_(gpu).html>`__. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Source | Dataset | Select an available dataset and its version from the ModelArts **Data Management** module. | + | | | | + | | | - **Dataset**: Select an existing dataset from the drop-down list. If no dataset is available in ModelArts, no result will be displayed in the drop-down list. | + | | | - **Version**: Select a version according to the **Dataset** setting. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Data path | Select the training data from your OBS bucket. On the right of the **Data path** text box, click **Select**. In the dialog box that is displayed, select an OBS folder for storing data. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Training Output Path | - | Storage path of the training result | + | | | | + | | | .. note:: | + | | | | + | | | To minimize errors, select an empty directory for **Training Output Path**. Do not select the directory used for storing the dataset for **Training Output Path**. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Environment Variable | - | Add environment variables based on your image file. This parameter is optional. You can click **Add Environment Variable** to add multiple variable parameters. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Job Log Path | - | Select a path for storing log files generated during job running. | + +-------------------------+-----------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Select resources for the training job. + +.. _modelarts230239enustopic0216621184table8958315124918: + + .. table:: **Table 2** Resource parameters + + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==============================================================================================================================================================================================================================================================================+ + | Resource Pool | Select resource pools for the job. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Type | If **Resource Pool** is set to **Public resource pools**, select a resource type. Available resource types are **CPU** and **GPU**. | + | | | + | | The GPU resource delivers better performance, and the CPU resource is more cost effective. If the selected algorithm has been defined to use the CPU or GPU, the resource type is automatically displayed on the page. Select the resource type as required. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Specifications | Select a resource flavor based on the resource type. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Compute Nodes | Set the number of compute nodes. If you set **Compute Nodes** to **1**, the standalone computing mode is used. If you set **Compute Nodes** to a value greater than **1**, the distributed computing mode is used. Select a computing mode based on the actual requirements. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + d. Configure **Notification** and select whether to save the parameters of the training job. + +.. _modelarts230239enustopic0216621184table1217141794320: + + .. table:: **Table 3** Parameters related to notification and parameter configuration saving + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==================================================================================================================================================================================================================================================================================================================================+ + | Notification | Select the resource pool status to be monitored from the event list, and SMN sends a notification message when the event occurs. | + | | | + | | This parameter is optional. You can choose whether to enable subscription based on actual requirements. If you enable subscription, set the following parameters as required: | + | | | + | | - **Topic**: indicates the topic name. You can create a topic on the SMN console. | + | | - **Event**: indicates the event to be subscribed to. The options are **OnJobRunning**, **OnJobSucceeded**, and **OnJobFailed**, indicating that training is in progress, successful, and failed, respectively. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Saving Training Parameters | If you select this option, the parameter settings of the current job will be saved to facilitate subsequent job creation. | + | | | + | | Select **Save Training Parameters** and specify **Configuration Name** and **Description**. After a training job is created, you can switch to the **Job Parameters** tab page to view your saved job parameter settings. For details, see `Managing Job Parameters <../../training_management/managing_job_parameters.html>`__. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + e. After setting the parameters, click **Next**. + +#. Confirm that the information is correct on the **Confirm** page that is displayed and click **Submit**. Generally, training jobs run for a period of time, which may be several minutes or tens of minutes depending on the amount of your selected data and resources. + + After a custom image job is created, the system authorizes ModelArts to obtain and run the image by default. When you run a custom image job for the first time, ModelArts checks the custom image. For details about the check, see `Specifications for Custom Images Used for Training Jobs <../../custom_images/for_training_models/specifications_for_custom_images_used_for_training_jobs.html>`__. You can view the cause of the check failure in the log and modify the custom image based on the log. + + After the image is checked, the background starts the custom image container to run the custom image training job. You can switch to the training job list to view the basic information about training jobs. In the training job list, **Status** of the newly created training job is **Initializing**. If the status changes to **Successful**, the training job ends and the model generated is stored in the location specified by **Training Output Path**. If the status of a training job changes to **Running failed**. Click the name of the training job and view the job logs. Troubleshoot the fault based on the logs. + + .. note:: + + - After an image is reviewed, the image does not need to be reviewed again when being used to create training jobs again. + - The default user of a custom image must be the user whose UID is **1101**. + + diff --git a/umn/source/training_management/creating_a_training_job/using_frequently-used_frameworks_to_train_models.rst b/umn/source/training_management/creating_a_training_job/using_frequently-used_frameworks_to_train_models.rst new file mode 100644 index 0000000..747b7c0 --- /dev/null +++ b/umn/source/training_management/creating_a_training_job/using_frequently-used_frameworks_to_train_models.rst @@ -0,0 +1,163 @@ +Using Frequently-used Frameworks to Train Models +================================================ + +If you use frequently-used frameworks, such as TensorFlow and MXNet, to develop algorithms locally, you can select **Frequently-used** to create training jobs and build models. + +Prerequisites +------------- + +- Data has been prepared. Specifically, you have created an available dataset in ModelArts, or you have uploaded the dataset used for training to the OBS directory. +- If you select **Frequently-used** for **Algorithm Source**, prepare the training script and upload it to the OBS directory. +- At least one empty folder has been created on OBS for storing the training output. +- The account is not in arrears because resources are consumed when training jobs are running. +- The OBS directory you use and ModelArts are in the same region. + +Precautions +----------- + +- In the dataset directory specified for a training job, the names of the files (such as the image file, audio file, and label file) containing data used for training contain 0 to 255 characters. If the names of certain files in the dataset directory contain over 255 characters, the training job will ignore these files and use data in the valid files for training. If the names of all files in the dataset directory contain over 255 characters, no data is available for the training job and the training job fails. +- In the training script, the **Data Source** and **Training Output Path** parameters must be set to the OBS path. Use the to perform read and write operations in the path. + +Frequently-used AI Frameworks for Training Management +----------------------------------------------------- + +ModelArts supports the following AI engines and versions. + + + +.. _modelarts230238enustopic0216621183table1106232165220: + +.. table:: **Table 1** AI engines supported by training jobs + + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Environment | Supported Chip | System Architecture | System Version | AI Engine and Version | Supported CUDA or Ascend Version | + +=======================+================+=====================+================+===================================+==================================+ + | TensorFlow | CPU and GPU | x86_64 | Ubuntu 16.04 | TF-1.13.1-python3.6 | CUDA 10.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | TF-2.1.0-python3.6 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Caffe | CPU and GPU | x86_64 | Ubuntu 16.04 | Caffe-1.0.0-python2.7 | CUDA 8.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Spark_MLlib | CPU | x86_64 | Ubuntu 16.04 | Spark-2.3.2-python3.6 | N/A | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | XGBoost-Sklearn | CPU | x86_64 | Ubuntu 16.04 | Scikit_Learn-0.18.1-python3.6 | N/A | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | PyTorch | CPU and GPU | x86_64 | Ubuntu 16.04 | PyTorch-1.3.0-python3.6 | CUDA 10.0 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | PyTorch-1.4.0-python3.6 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | Ascend-Powered-Engine | Ascend 910 | AArch64 | EulerOS 2.8 | Mindspore-1.1.1-python3.7-aarch64 | C76 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | | | | | TF-1.15-python3.7-aarch64 | C76 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + | MindSpore-GPU | CPU and GPU | x86_64 | Ubuntu 18.04 | MindSpore-1.1.0-python3.7 | CUDA 10.1 | + +-----------------------+----------------+---------------------+----------------+-----------------------------------+----------------------------------+ + +Creating a Training Job +----------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. By default, the system switches to the **Training Jobs** page. + +#. In the upper left corner of the training job list, click **Create** to switch to the **Create Training Job** page. + +#. Set related parameters. + + a. Set the basic information, including **Name**, **Version**, and **Description**. The **Version** information is automatically generated by the system and named in an ascending order of **V001**, **V002**, and so on. You cannot manually modify it. + + Specify **Name** and **Description** according to actual requirements. + + b. Set job parameters, including the data source, algorithm source, and more. For details, see `Table 2 <#modelarts230238enustopic0216621183table1819364517144>`__. + +.. _modelarts230238enustopic0216621183table1819364517144: + + .. table:: **Table 2** Job parameters + + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Sub-Parameter | Description | + +=========================+=======================+=================================================================================================================================================================================================================================================================================================================================================================================================================+ + | One-Click Configuration | - | If you have saved job parameter configurations in ModelArts, click **One-Click Configuration** and select an existing job parameter configuration as prompted to quickly complete parameter setting for the job. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Algorithm Source | Frequently-used | Select an AI engine and its version and specify **Code Directory** and **Boot File**. The framework selected for the AI engine must be the same as the one you select when compiling training code. For example, if TensorFlow is used in your training code, select TensorFlow when you create a training job. | + | | | | + | | | For details about the supported AI engines and versions, see `Frequently-used AI Frameworks for Training Management <#frequently-used-ai-frameworks-for-training-management>`__. | + | | | | + | | | If your model requires Python dependency packages, place the dependency packages and their configuration files in the code directory based on the requirements defined in ModelArts. For details, see `How Do I Create a Training Job When a Dependency Package Is Referenced in a Model? <../../faqs/training_jobs/how_do_i_create_a_training_job_when_a_dependency_package_is_referenced_in_a_model.html>`__. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Data Source | Dataset | Select an available dataset and its version from the ModelArts **Data Management** module. | + | | | | + | | | - **Dataset**: Select an existing dataset from the drop-down list. If no dataset is available in ModelArts, no result will be displayed in the drop-down list. | + | | | - **Version**: Select a version according to the **Dataset** setting. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | Data path | Select the training data from your OBS bucket. On the right of the **Data path** text box, click **Select**. In the dialog box that is displayed, select an OBS folder for storing data. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Training Output Path | - | Select a path for storing the training result. | + | | | | + | | | .. note:: | + | | | | + | | | To minimize errors, select an empty directory for **Training Output Path**. Do not select the directory used for storing the dataset for **Training Output Path**. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Running Parameter | - | Set the command line parameters in the code based on the algorithm code logic. Make sure that the parameter names are the same as those in the code. | + | | | | + | | | For example, **train_steps = 10000**, where **train_steps** is a passing parameter in code. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Job Log Path | - | Select a path for storing log files generated during job running. | + +-------------------------+-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + c. Select resources for the training job. + +.. _modelarts230238enustopic0216621183table1110144413718: + + .. table:: **Table 3** Resource parameters + + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==========================================================================================================================================================================================================================================================================================+ + | Resource Pool | Select resource pools for the job. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Type | If **Resource Pool** is set to **Public resource pools**, select a resource type. Available resource types are **CPU** and **GPU**. | + | | | + | | The GPU resource delivers better performance, and the CPU resource is more cost effective. If the selected algorithm has been defined to use the CPU or GPU, the resource type is automatically displayed on the page. Select the resource type as required. | + | | | + | | .. note:: | + | | | + | | If GPU resources are used in training code, you must select a GPU cluster when selecting a resource pool. Otherwise, the training job may fail. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Specifications | Select a resource flavor based on the resource type. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Compute Nodes | Set the number of compute nodes. If you set **Compute Nodes** to **1**, the standalone computing mode is used. If you set **Compute Nodes** to a value greater than 1, the distributed computing mode is used. Select a computing mode based on the actual requirements. | + | | | + | | When **Frequently-used** of **Algorithm Source** is set to **Caffe**, only standalone training is supported, that is, **Compute Nodes** must be set to **1**. For other options of **Frequently-used**, you can select the standalone or distributed mode based on service requirements. | + +-----------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + d. Configure **Notification** and select whether to save the parameters of the training job. + +.. _modelarts230238enustopic0216621183table1217141794320: + + .. table:: **Table 4** Parameters related to notification and parameter configuration saving + + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +===================================+==================================================================================================================================================================================================================================================================================================================================+ + | Notification | Select the resource pool status to be monitored from the event list, and SMN sends a notification message when the event occurs. | + | | | + | | This parameter is optional. You can choose whether to enable subscription based on actual requirements. If you enable subscription, set the following parameters as required: | + | | | + | | - **Topic**: indicates the topic name. You can create a topic on the SMN console. | + | | - **Event**: indicates the event to be subscribed to. The options are **OnJobRunning**, **OnJobSucceeded**, and **OnJobFailed**, indicating that training is in progress, successful, and failed, respectively. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Saving Training Parameters | If you select this option, the parameter settings of the current job will be saved to facilitate subsequent job creation. | + | | | + | | Select **Save Training Parameters** and specify **Configuration Name** and **Description**. After a training job is created, you can switch to the **Job Parameters** tab page to view your saved job parameter settings. For details, see `Managing Job Parameters <../../training_management/managing_job_parameters.html>`__. | + +-----------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + e. After setting the parameters, click **Next**. + +#. Confirm that the information is correct on the **Confirm** page that is displayed and click **Submit**. Generally, training jobs run for a period of time, which may be several minutes or tens of minutes depending on the amount of your selected data and resources. + + .. note:: + + After a training job is created, it is started immediately. + + You can switch to the training job list to view the basic information about training jobs. In the training job list, **Status** of the newly created training job is **Initializing**. If the status changes to **Successful**, the training job ends and the model generated is stored in the location specified by **Training Output Path**. If the status of a training job changes to **Running failed**, click the name of the training job and view the job logs. Troubleshoot the fault based on the logs. + + diff --git a/umn/source/training_management/index.rst b/umn/source/training_management/index.rst new file mode 100644 index 0000000..779e078 --- /dev/null +++ b/umn/source/training_management/index.rst @@ -0,0 +1,14 @@ +=================== +Training Management +=================== + +.. toctree:: + :maxdepth: 1 + + introduction_to_model_training + creating_a_training_job/index + stopping_or_deleting_a_job + managing_training_job_versions + viewing_job_details + managing_job_parameters + managing_visualization_jobs diff --git a/umn/source/training_management/introduction_to_model_training.rst b/umn/source/training_management/introduction_to_model_training.rst new file mode 100644 index 0000000..84309e8 --- /dev/null +++ b/umn/source/training_management/introduction_to_model_training.rst @@ -0,0 +1,29 @@ +Introduction to Model Training +============================== + +ModelArts provides model training for you to view the training effect, based on which you can adjust your model parameters. You can select resource pools (CPU or GPU) with different instance flavors for model training. In addition to the models developed by users, ModelArts also provides built-in algorithms. You can directly adjust parameters of the built-in algorithms, instead of developing a model by yourself, to obtain a satisfactory model. + +Description of the Model Training Function +------------------------------------------ + + + +.. _modelarts230044enustopic0129633060table138422031155511: + +.. table:: **Table 1** Function description + + +------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ + | Function | Description | Reference | + +==============================+==============================================================================================================================================================================================================================================================================================================================================================+=================================================================================================+ + | Training job management | You can create training jobs, manage training job versions, and view details of training jobs, and evaluation details. | `Creating a Training Job <../training_management/index.html>`__ | + | | | | + | | | `Managing Training Job Versions <../training_management/managing_training_job_versions.html>`__ | + | | | | + | | | `Viewing Job Details <../training_management/viewing_job_details.html>`__ | + +------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ + | Job parameter management | You can save the parameter settings of a training job (including the data source, algorithm source, running parameters, resource pool parameters, and more) as a job parameter, which can be directly used when you create a training job, eliminating the need to set parameters one by one. As such, the configuration efficiency can be greatly improved. | `Managing Job Parameters <../training_management/managing_job_parameters.html>`__ | + +------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ + | Model training visualization | TensorBoard and MindInsight effectively display the computational graph of a model in the running process, the trend of all metrics in time, and the data used in the training. | `Managing Visualization Jobs <../training_management/managing_visualization_jobs.html>`__ | + +------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/training_management/managing_job_parameters.rst b/umn/source/training_management/managing_job_parameters.rst new file mode 100644 index 0000000..4eb0fff --- /dev/null +++ b/umn/source/training_management/managing_job_parameters.rst @@ -0,0 +1,41 @@ +Managing Job Parameters +======================= + +You can store the parameter settings in ModelArts during job creation so that you can use the stored settings to create follow-up training jobs, which makes job creation more efficient. + +During the operations of creating, editing, and viewing training jobs, the saved job parameter settings are displayed on the **Job Parameter Mgmt** page. + +Using a Job Parameter Configuration +----------------------------------- + +- Method 1: Using a job parameter configuration on the **Job Parameter Mgmt** page + + Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. On the displayed page, click the **Job Parameter Mgmt** tab. In the job parameter list, click **Creating Training Job** for a job parameter configuration to create a training job based on the job parameter configuration. + +- Method 2: Using a job parameter configuration on the **Creating Training Job** page + + On the **Creating Training Job** page, click **One-Click Configuration**. In the displayed dialog box, select the required job parameter configuration to quickly create an available training job. + +Editing a Job Parameter Configuration +------------------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. On the displayed page, click the **Job Parameter Mgmt** tab. + +#. In the job parameter configuration list, click **Edit** in the **Operation** column in a row. + +#. On the displayed page, modify related parameters by referring to "Creating a Training Job" and click **OK** to save the job parameter settings. + + In the existing job parameter settings, the job name cannot be changed. + +Deleting a Training Job Parameter Configuration +----------------------------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. On the displayed page, click the **Job Parameter Mgmt** tab. +#. In the job parameter list, click **Delete** in the **Operation** column in a row. +#. In the displayed dialog box, click **OK**. + + .. note:: + + Deleted job parameter configurations cannot be recovered. Therefore, exercise caution when performing this operation. + + diff --git a/umn/source/training_management/managing_training_job_versions.rst b/umn/source/training_management/managing_training_job_versions.rst new file mode 100644 index 0000000..d059aae --- /dev/null +++ b/umn/source/training_management/managing_training_job_versions.rst @@ -0,0 +1,55 @@ +Managing Training Job Versions +============================== + +During model building, you may need to frequently tune the data, training parameters, or the model based on the training results to obtain a satisfactory model. ModelArts allows you to manage training job versions to effectively train your model after the tuning. Specifically, ModelArts generates a version each time when a training is performed. You can quickly get the difference between different versions. + +Viewing Training Job Versions +----------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Management** > **Training Jobs**. By default, the system switches to the **Training Jobs** page. + +#. In the training job list, click the name of a training job. + + By default, the basic information about the latest version is displayed. If there are multiple available versions, click **Select Version** in the upper left corner to view a certain version. Click the downward arrow to the left of the version to display job details. + +Comparing Versions of a Training Job +------------------------------------ + +On the **Version Manager** page, click **View Comparison Result** to view the comparison of all or selected versions of the current training job. The comparison result involves the following information: **Running Parameter**, **F1 Score**, **Recall**, **Precision**, and **Accuracy**. + +.. note:: + + The **F1 Score**, **Recall**, **Precision**, and **Accuracy** parameters of a training job are displayed only when the job is created using a built-in algorithm. For training jobs created using frequently-used frameworks or custom images, define the output of these parameters in your training script code. These parameters cannot be viewed on the GUI. + +Shortcut Operations Based on Training Job Versions +-------------------------------------------------- + +On the **Version Manager** page, ModelArts provides certain shortcut operation buttons for you to quickly enter the subsequent steps after model training is complete. + + + +.. _modelarts230047enustopic0171858285table545322619177: + +.. table:: **Table 1** Shortcut operation button description + + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Shortcut Operation Button | Description | + +===================================+=================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================+ + | Creating Visualization Job | Creates a visualization job (TensorBoard) for the current training version. For details, see `Managing Visualization Jobs <../training_management/managing_visualization_jobs.html>`__. | + | | | + | | .. note:: | + | | | + | | TensorBoard supports only the TensorFlow and MXNet engines. Therefore, you can create the TensorBoard jobs only when the AI engine is TensorFlow or MXNet. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Create Model | Creates a model for the current training version. For details about how to create a model, see `Importing a Model <../model_management/index.html>`__. You can only create models for training jobs in the **Running** status. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Modify | If the training result of the current version does not meet service requirements or the training job fails, click **Modify** to switch to the page where you can modify the job parameter settings. For details about the parameters of the training job, see `Creating a Training Job <../training_management/creating_a_training_job/introduction_to_training_jobs.html>`__. After modifying the job parameter settings as required, click **OK** to start the training job of a new version. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Save Training Parameters | You can save the job parameter settings of this version as a job parameter configuration, which will be displayed on the **Job Parameter Mgmt** page. Click **More > Save Training Parameters** to switch to the **Training Parameter** page. After confirming that the settings are correct, click **OK**. For details about training parameter management, see `Managing Job Parameters <../training_management/managing_job_parameters.html>`__. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Stop | Click **More > Stop** to stop the training job of the current version. Only training jobs in the **Running** state can be stopped. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Delete | Click **More > Delete** to delete the training job of the current version. | + +-----------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + + diff --git a/umn/source/training_management/managing_visualization_jobs.rst b/umn/source/training_management/managing_visualization_jobs.rst new file mode 100644 index 0000000..f59d146 --- /dev/null +++ b/umn/source/training_management/managing_visualization_jobs.rst @@ -0,0 +1,63 @@ +Managing Visualization Jobs +=========================== + +You can create visualization jobs of TensorBoard and MindInsight types on ModelArts. + +TensorBoard supports training jobs based on the TensorFlow engine, and MindInsight supports training jobs based on the MindSpore engine. + +TensorBoard and MindInsight can effectively display the change trend of a training job and the data used in the training. + +You can use the **summary** file generated during model training to create a visualization job. + +Prerequisites +------------- + +To ensure that the **summary** file is generated in the training result, you need to add the related code to the training script. + +- Using the TensorFlow engine: + + When using the TensorFlow-based MoXing, in **mox.run**, set **save_summary_steps>0** and **summary_verbosity≥1**. + + If you want to display other metrics, add tensors to **log_info** in the return value **mox.ModelSpec** of **model_fn**. Only the rank-0 tensors (scalars) are supported. The added tensors are written into the **summary** file. If you want to write tensors of higher ranks in the **summary** file, use the native **tf.summary** of TensorFlow in **model_fn**. + +- Using the MindSpore engine: + + MindSpore allows you to save data to the **summary** log file and display the data on the GUI. For details, see the `MindSpore official website `__. + +Creating a Visualization Job +---------------------------- + +#. Log in to the ModelArts management console. In the left navigation pane, choose **Training Jobs**. On the displayed page, click the **Visualization Jobs** tab. + +#. In the upper left corner of the visualization job list, click **Create** to switch to the **Create Visualization Job** page. + +#. Set **Job Type** to **TensorBoard** and **MindInsight**. Enter the visualization job name and description as required, set the **Training Output Path** and **Auto Stop** parameters. + + - **Training Output Path**: Select the training output path specified when the training job is created. + - **Auto Stop**: Enable or disable the auto stop function. The options are **1 hour later**, **2 hours later**, **4 hours later**, **6 hours later**, and **Custom**. If you select **Custom**, you can enter any integer within 1 to 24 hours in the textbox on the right. + +#. After confirming the specifications, click **Next**. + + In the visualization job list, when the status changes to **Running**, the virtualization job has been created. You can click the name of the visualization job to view its details. + +Opening a Visualization Job +--------------------------- + +In the visualization job list, click the name of the target visualization job. The **TensorBoard** page is displayed. Only the visualization job in the **Running** status can be opened. + +Running or Stopping a Visualization Job +--------------------------------------- + +- **Stopping a visualization job**: In the visualization job list, click **Stop** in the **Operation** column to stop the visualization job. +- **Running a visualization job**: You can run and use a visualization job in the **Canceled** status again. In the visualization job list, click **Run** in the **Operation** column to run the visualization job. + +Deleting a Visualization Job +---------------------------- + +If your visualization job is no longer used, you can delete it to release resources. In the visualization job list, click **Delete** in the **Operation** column to delete the visualization job. + +.. note:: + + A deleted visualized job cannot be recovered. You need to create a new visualization job if you want to use it. Exercise caution when performing this operation. + + diff --git a/umn/source/training_management/stopping_or_deleting_a_job.rst b/umn/source/training_management/stopping_or_deleting_a_job.rst new file mode 100644 index 0000000..0cff381 --- /dev/null +++ b/umn/source/training_management/stopping_or_deleting_a_job.rst @@ -0,0 +1,22 @@ +Stopping or Deleting a Job +========================== + +Stopping a Training Job +----------------------- + +In the training job list, click **Stop** in the **Operation** column for a training job in the **Running** state to stop a running training job. + +If you have selected **Save Training Parameters** for a stopped training job, the job's parameter settings will be saved to the **Job Parameter Mgmt** page. + +You cannot stop a training job that has stopped running, for example the job in the **Successful** or **Running failed** state. Only training jobs in the **Running** state can be stopped. + +Deleting a Training Job +----------------------- + +If an existing training job is no longer used, you can delete it. + +For a training job in the **Running**, **Successful**, **Running failed**, **Canceled**, or **Deploying** state, click **Delete** in the **Operation** column to delete it. + +If you have selected **Save Training Parameters** for a deleted training job, the job's parameter settings will be saved to the **Job Parameter Mgmt** page. + + diff --git a/umn/source/training_management/viewing_job_details.rst b/umn/source/training_management/viewing_job_details.rst new file mode 100644 index 0000000..3fb5a1c --- /dev/null +++ b/umn/source/training_management/viewing_job_details.rst @@ -0,0 +1,35 @@ +Viewing Job Details +=================== + +After a training job finishes, you can manage the training job versions and check whether the training result of the job is satisfactory by viewing the `job details <#training-job-details>`__. + +Training Job Details +-------------------- + +In the left navigation pane of the ModelArts management console, choose **Training Management** > **Training Jobs** to switch to the **Training Jobs** page. In the training job list, click a job name to view the job details. + +`Table 1 <#modelarts230048enustopic0171858286table43451384323>`__ lists parameters of the training job of each version. + + + +.. _modelarts230048enustopic0171858286table43451384323: + +.. table:: **Table 1** Training job details + + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Parameter | Description | + +=================+====================================================================================================================================================================================================================================+ + | Version | Version of a training job, which is automatically defined by the system, for example, **V0001** and **V0002**. | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Status | Status of a training job, | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Duration | Running duration of a training job | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Configurations | Details about the parameters of the current training job version | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Logs | Logs of the current training job version. If you set **Log Output Path** when creating a training job, you can click the download button on the **Logs** tab page to download the logs stored in the OBS bucket to the local host. | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Resource Usages | Usage of resources of the current training version, including the CPU, GPU, and memory. | + +-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +